Changeset 1642 for XIOS/dev/branch_openmp/extern/remap/src/mapper.cpp
- Timestamp:
- 01/23/19 10:31:44 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/extern/remap/src/mapper.cpp
r1545 r1642 12 12 13 13 #include "mapper.hpp" 14 using namespace ep_lib;15 14 16 15 namespace sphereRemap { 17 18 extern CRemapGrid srcGrid;19 #pragma omp threadprivate(srcGrid)20 21 extern CRemapGrid tgtGrid;22 #pragma omp threadprivate(tgtGrid)23 24 16 25 17 /* A subdivition of an array into N sub-arays … … 35 27 36 28 37 void Mapper::setSourceMesh(const double* boundsLon, const double* boundsLat, int nVertex, int nbCells, const double* pole, const long int* globalId)29 void Mapper::setSourceMesh(const double* boundsLon, const double* boundsLat, const double* area, int nVertex, int nbCells, const double* pole, const long int* globalId) 38 30 { 39 31 srcGrid.pole = Coord(pole[0], pole[1], pole[2]); 40 32 41 33 int mpiRank, mpiSize; 42 MPI_Comm_rank(communicator, &mpiRank);43 MPI_Comm_size(communicator, &mpiSize);34 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 35 ep_lib::MPI_Comm_size(communicator, &mpiSize); 44 36 45 37 sourceElements.reserve(nbCells); … … 51 43 long int offset ; 52 44 long int nb=nbCells ; 53 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ;45 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 54 46 offset=offset-nb ; 55 47 for(int i=0;i<nbCells;i++) sourceGlobalId[i]=offset+i ; … … 67 59 sourceMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 68 60 cptEltGeom(sourceElements[i], Coord(pole[0], pole[1], pole[2])); 69 } 70 71 } 72 73 void Mapper::setTargetMesh(const double* boundsLon, const double* boundsLat, int nVertex, int nbCells, const double* pole, const long int* globalId) 61 if (area!=NULL) sourceElements[i].given_area=area[i] ; 62 else sourceElements[i].given_area=sourceElements[i].area ; 63 } 64 65 } 66 67 void Mapper::setTargetMesh(const double* boundsLon, const double* boundsLat, const double* area, int nVertex, int nbCells, const double* pole, const long int* globalId) 74 68 { 75 69 tgtGrid.pole = Coord(pole[0], pole[1], pole[2]); 76 70 77 71 int mpiRank, mpiSize; 78 MPI_Comm_rank(communicator, &mpiRank);79 MPI_Comm_size(communicator, &mpiSize);72 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 73 ep_lib::MPI_Comm_size(communicator, &mpiSize); 80 74 81 75 targetElements.reserve(nbCells); … … 87 81 long int offset ; 88 82 long int nb=nbCells ; 89 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ;83 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 90 84 offset=offset-nb ; 91 85 for(int i=0;i<nbCells;i++) targetGlobalId[i]=offset+i ; … … 100 94 targetMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 101 95 cptEltGeom(targetElements[i], Coord(pole[0], pole[1], pole[2])); 96 if (area!=NULL) targetElements[i].given_area=area[i] ; 97 else targetElements[i].given_area=targetElements[i].area ; 102 98 } 103 99 … … 121 117 vector<double> timings; 122 118 int mpiSize, mpiRank; 123 MPI_Comm_size(communicator, &mpiSize);124 MPI_Comm_rank(communicator, &mpiRank);119 ep_lib::MPI_Comm_size(communicator, &mpiSize); 120 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 125 121 126 122 this->buildSSTree(sourceMesh, targetMesh); … … 132 128 133 129 tic = cputime(); 134 if (interpOrder == 2) 135 { 130 if (interpOrder == 2) { 136 131 if (mpiRank == 0 && verbose) cout << "Computing grads ..." << endl; 137 132 buildMeshTopology(); … … 142 137 /* Prepare computation of weights */ 143 138 /* compute number of intersections which for the first order case 144 corresponds to the number of edges in the remap matrix */139 corresponds to the number of edges in the remap matrix */ 145 140 int nIntersections = 0; 146 141 for (int j = 0; j < targetElements.size(); j++) … … 178 173 { 179 174 int mpiSize, mpiRank; 180 MPI_Comm_size(communicator, &mpiSize);181 MPI_Comm_rank(communicator, &mpiRank);175 ep_lib::MPI_Comm_size(communicator, &mpiSize); 176 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 182 177 183 178 /* create list of intersections (super mesh elements) for each rank */ … … 187 182 Elt& e = elements[j]; 188 183 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 189 {190 184 elementList[(*it)->id.rank].insert(pair<int, Polyg *>((*it)->id.ind, *it)); 191 }192 185 } 193 186 … … 196 189 double **recvValue = new double*[mpiSize]; 197 190 double **recvArea = new double*[mpiSize]; 191 double **recvGivenArea = new double*[mpiSize]; 198 192 Coord **recvGrad = new Coord*[mpiSize]; 199 193 GloId **recvNeighIds = new GloId*[mpiSize]; /* ids of the of the source neighbours which also contribute through gradient */ … … 218 212 recvValue[rank] = new double[nbSendElement[rank]]; 219 213 recvArea[rank] = new double[nbSendElement[rank]]; 214 recvGivenArea[rank] = new double[nbSendElement[rank]]; 220 215 if (order == 2) 221 216 { … … 240 235 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 241 236 int *nbRecvElement = new int[mpiSize]; 242 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); 243 237 ep_lib::MPI_Alltoall(nbSendElement, 1, EP_INT, nbRecvElement, 1, EP_INT, communicator); 244 238 245 239 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ … … 249 243 double **sendValue = new double*[mpiSize]; 250 244 double **sendArea = new double*[mpiSize]; 245 double **sendGivenArea = new double*[mpiSize]; 251 246 Coord **sendGrad = new Coord*[mpiSize]; 252 247 GloId **sendNeighIds = new GloId*[mpiSize]; 253 MPI_Request *sendRequest = new MPI_Request[4*mpiSize];254 MPI_Request *recvRequest = new MPI_Request[4*mpiSize];248 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[5*mpiSize]; 249 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[5*mpiSize]; 255 250 for (int rank = 0; rank < mpiSize; rank++) 256 251 { 257 252 if (nbSendElement[rank] > 0) 258 253 { 259 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);254 ep_lib::MPI_Issend(sendElement[rank], nbSendElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 260 255 nbSendRequest++; 261 256 } … … 266 261 sendValue[rank] = new double[nbRecvElement[rank]]; 267 262 sendArea[rank] = new double[nbRecvElement[rank]]; 263 sendGivenArea[rank] = new double[nbRecvElement[rank]]; 268 264 if (order == 2) 269 265 { … … 275 271 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 276 272 } 277 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);273 ep_lib::MPI_Irecv(recvElement[rank], nbRecvElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 278 274 nbRecvRequest++; 279 275 } 280 276 } 281 MPI_Status *status = new MPI_Status[4*mpiSize];282 283 MPI_Waitall(nbSendRequest, sendRequest, status);284 MPI_Waitall(nbRecvRequest, recvRequest, status);277 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[5*mpiSize]; 278 279 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 280 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 285 281 286 282 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ … … 296 292 sendValue[rank][j] = sstree.localElements[recvElement[rank][j]].val; 297 293 sendArea[rank][j] = sstree.localElements[recvElement[rank][j]].area; 294 sendGivenArea[rank][j] = sstree.localElements[recvElement[rank][j]].given_area; 298 295 if (order == 2) 299 296 { 300 297 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].grad; 298 // cout<<"grad "<<jj<<" "<<recvElement[rank][j]<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; 301 299 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].src_id; 302 300 jj++; … … 304 302 { 305 303 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].gradNeigh[i]; 304 // cout<<"grad "<<jj<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; 306 305 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].neighId[i]; 307 jj++; 308 } 309 } 310 else 311 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 312 } 313 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 314 nbSendRequest++; 315 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 1, communicator, &sendRequest[nbSendRequest]); 316 nbSendRequest++; 317 if (order == 2) 318 { 319 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), 320 MPI_DOUBLE, rank, 2, communicator, &sendRequest[nbSendRequest]); 321 nbSendRequest++; 322 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &sendRequest[nbSendRequest]); 306 jj++; 307 } 308 } 309 else 310 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 311 } 312 ep_lib::MPI_Issend(sendValue[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 313 nbSendRequest++; 314 ep_lib::MPI_Issend(sendArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 315 nbSendRequest++; 316 ep_lib::MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 317 nbSendRequest++; 318 if (order == 2) 319 { 320 ep_lib::MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 321 nbSendRequest++; 322 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 323 323 //ym --> attention taille GloId 324 325 326 327 328 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 4, communicator, &sendRequest[nbSendRequest]);324 nbSendRequest++; 325 } 326 else 327 { 328 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 329 329 //ym --> attention taille GloId 330 nbSendRequest++; 331 } 332 } 333 if (nbSendElement[rank] > 0) 334 { 335 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 336 nbRecvRequest++; 337 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 1, communicator, &recvRequest[nbRecvRequest]); 338 nbRecvRequest++; 339 if (order == 2) 340 { 341 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 342 MPI_DOUBLE, rank, 2, communicator, &recvRequest[nbRecvRequest]); 343 nbRecvRequest++; 344 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &recvRequest[nbRecvRequest]); 345 //ym --> attention taille GloId 346 nbRecvRequest++; 347 } 348 else 349 { 350 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 4, communicator, &recvRequest[nbRecvRequest]); 330 nbSendRequest++; 331 } 332 } 333 if (nbSendElement[rank] > 0) 334 { 335 ep_lib::MPI_Irecv(recvValue[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 336 nbRecvRequest++; 337 ep_lib::MPI_Irecv(recvArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 338 nbRecvRequest++; 339 ep_lib::MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 340 nbRecvRequest++; 341 if (order == 2) 342 { 343 ep_lib::MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 344 EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 345 nbRecvRequest++; 346 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 351 347 //ym --> attention taille GloId 352 348 nbRecvRequest++; 353 349 } 350 else 351 { 352 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 353 //ym --> attention taille GloId 354 nbRecvRequest++; 355 } 354 356 } 355 357 } 356 358 357 MPI_Waitall(nbSendRequest, sendRequest, status);358 MPI_Waitall(nbRecvRequest, recvRequest, status);359 359 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 360 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 361 360 362 361 363 /* now that all values and gradients are available use them to computed interpolated values on target 362 and also to compute weights */364 and also to compute weights */ 363 365 int i = 0; 364 366 for (int j = 0; j < nbElements; j++) … … 367 369 368 370 /* since for the 2nd order case source grid elements can contribute to a destination grid element over several "paths" 369 (step1: gradient is computed using neighbours on same grid, step2: intersection uses several elements on other grid)370 accumulate them so that there is only one final weight between two elements */371 (step1: gradient is computed using neighbours on same grid, step2: intersection uses several elements on other grid) 372 accumulate them so that there is only one final weight between two elements */ 371 373 map<GloId,double> wgt_map; 372 374 … … 380 382 double fk = recvValue[rank][n1]; 381 383 double srcArea = recvArea[rank][n1]; 384 double srcGivenArea = recvGivenArea[rank][n1]; 382 385 double w = (*it)->area; 383 386 if (quantity) w/=srcArea ; 387 else w=w*srcGivenArea/srcArea*e.area/e.given_area ; 384 388 385 389 /* first order: src value times weight (weight = supermesh area), later divide by target area */ … … 402 406 double renorm=0; 403 407 if (renormalize) 404 for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) renorm+=it->second / e.area; 408 { 409 if (quantity) for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) renorm+=it->second ; 410 else for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) renorm+=it->second / e.area; 411 } 405 412 else renorm=1. ; 406 413 … … 417 424 } 418 425 } 419 420 //MPI_Barrier(communicator);421 426 422 427 /* free all memory allocated in this function */ … … 428 433 delete[] recvValue[rank]; 429 434 delete[] recvArea[rank]; 435 delete[] recvGivenArea[rank]; 430 436 if (order == 2) 431 437 { … … 439 445 delete[] sendValue[rank]; 440 446 delete[] sendArea[rank]; 447 delete[] sendGivenArea[rank]; 441 448 if (order == 2) 442 449 delete[] sendGrad[rank]; … … 463 470 void Mapper::computeGrads() 464 471 { 465 466 467 468 469 470 471 472 473 474 472 /* array of pointers to collect local elements and elements received from other cpu */ 473 vector<Elt*> globalElements(sstree.nbLocalElements + nbNeighbourElements); 474 int index = 0; 475 for (int i = 0; i < sstree.nbLocalElements; i++, index++) 476 globalElements[index] = &(sstree.localElements[i]); 477 for (int i = 0; i < nbNeighbourElements; i++, index++) 478 globalElements[index] = &neighbourElements[i]; 479 480 update_baryc(sstree.localElements, sstree.nbLocalElements); 481 computeGradients(&globalElements[0], sstree.nbLocalElements); 475 482 } 476 483 … … 479 486 void Mapper::buildMeshTopology() 480 487 { 481 int mpiSize, mpiRank; 482 MPI_Comm_size(communicator, &mpiSize); 483 MPI_Comm_rank(communicator, &mpiRank); 484 485 vector<Node> *routingList = new vector<Node>[mpiSize]; 486 vector<vector<int> > routes(sstree.localTree.leafs.size()); 487 488 sstree.routeIntersections(routes, sstree.localTree.leafs); 489 490 for (int i = 0; i < routes.size(); ++i) 491 for (int k = 0; k < routes[i].size(); ++k) 492 routingList[routes[i][k]].push_back(sstree.localTree.leafs[i]); 493 routingList[mpiRank].clear(); 494 495 496 CMPIRouting mpiRoute(communicator); 497 mpiRoute.init(routes); 498 int nRecv = mpiRoute.getTotalSourceElement(); 499 500 int *nbSendNode = new int[mpiSize]; 501 int *nbRecvNode = new int[mpiSize]; 502 int *sendMessageSize = new int[mpiSize]; 503 int *recvMessageSize = new int[mpiSize]; 504 505 for (int rank = 0; rank < mpiSize; rank++) 506 { 507 nbSendNode[rank] = routingList[rank].size(); 508 sendMessageSize[rank] = 0; 509 for (size_t j = 0; j < routingList[rank].size(); j++) 510 { 511 Elt *elt = (Elt *) (routingList[rank][j].data); 512 sendMessageSize[rank] += packedPolygonSize(*elt); 513 } 514 } 515 516 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 517 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 518 519 char **sendBuffer = new char*[mpiSize]; 520 char **recvBuffer = new char*[mpiSize]; 521 int *pos = new int[mpiSize]; 522 523 for (int rank = 0; rank < mpiSize; rank++) 524 { 525 if (nbSendNode[rank] > 0) sendBuffer[rank] = new char[sendMessageSize[rank]]; 526 if (nbRecvNode[rank] > 0) recvBuffer[rank] = new char[recvMessageSize[rank]]; 527 } 528 529 for (int rank = 0; rank < mpiSize; rank++) 530 { 531 pos[rank] = 0; 532 for (size_t j = 0; j < routingList[rank].size(); j++) 533 { 534 Elt *elt = (Elt *) (routingList[rank][j].data); 535 packPolygon(*elt, sendBuffer[rank], pos[rank]); 536 } 537 } 538 delete [] routingList; 539 540 541 int nbSendRequest = 0; 542 int nbRecvRequest = 0; 543 MPI_Request *sendRequest = new MPI_Request[mpiSize]; 544 MPI_Request *recvRequest = new MPI_Request[mpiSize]; 545 MPI_Status *status = new MPI_Status[mpiSize]; 546 547 for (int rank = 0; rank < mpiSize; rank++) 548 { 549 if (nbSendNode[rank] > 0) 550 { 551 MPI_Issend(sendBuffer[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 552 nbSendRequest++; 553 } 554 if (nbRecvNode[rank] > 0) 555 { 556 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 557 nbRecvRequest++; 558 } 559 } 560 561 MPI_Waitall(nbRecvRequest, recvRequest, status); 562 MPI_Waitall(nbSendRequest, sendRequest, status); 563 564 for (int rank = 0; rank < mpiSize; rank++) 565 if (nbSendNode[rank] > 0) delete [] sendBuffer[rank]; 566 delete [] sendBuffer; 567 568 char **sendBuffer2 = new char*[mpiSize]; 569 char **recvBuffer2 = new char*[mpiSize]; 570 571 for (int rank = 0; rank < mpiSize; rank++) 572 { 573 nbSendNode[rank] = 0; 574 sendMessageSize[rank] = 0; 575 576 if (nbRecvNode[rank] > 0) 577 { 578 set<NodePtr> neighbourList; 579 pos[rank] = 0; 580 for (int j = 0; j < nbRecvNode[rank]; j++) 581 { 582 Elt elt; 583 unpackPolygon(elt, recvBuffer[rank], pos[rank]); 584 Node node(elt.x, cptRadius(elt), &elt); 585 findNeighbour(sstree.localTree.root, &node, neighbourList); 586 } 587 nbSendNode[rank] = neighbourList.size(); 588 for (set<NodePtr>::iterator it = neighbourList.begin(); it != neighbourList.end(); it++) 589 { 590 Elt *elt = (Elt *) ((*it)->data); 591 sendMessageSize[rank] += packedPolygonSize(*elt); 592 } 593 594 sendBuffer2[rank] = new char[sendMessageSize[rank]]; 595 pos[rank] = 0; 596 597 for (set<NodePtr>::iterator it = neighbourList.begin(); it != neighbourList.end(); it++) 598 { 599 Elt *elt = (Elt *) ((*it)->data); 600 packPolygon(*elt, sendBuffer2[rank], pos[rank]); 601 } 602 } 603 } 604 for (int rank = 0; rank < mpiSize; rank++) 605 if (nbRecvNode[rank] > 0) delete [] recvBuffer[rank]; 606 delete [] recvBuffer; 607 608 609 MPI_Barrier(communicator); 610 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 611 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 612 613 for (int rank = 0; rank < mpiSize; rank++) 614 if (nbRecvNode[rank] > 0) recvBuffer2[rank] = new char[recvMessageSize[rank]]; 615 616 nbSendRequest = 0; 617 nbRecvRequest = 0; 618 619 for (int rank = 0; rank < mpiSize; rank++) 620 { 621 if (nbSendNode[rank] > 0) 622 { 623 MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 624 nbSendRequest++; 625 } 626 if (nbRecvNode[rank] > 0) 627 { 628 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 629 nbRecvRequest++; 630 } 631 } 632 633 MPI_Waitall(nbRecvRequest, recvRequest, status); 634 MPI_Waitall(nbSendRequest, sendRequest, status); 635 636 int nbNeighbourNodes = 0; 637 for (int rank = 0; rank < mpiSize; rank++) 638 nbNeighbourNodes += nbRecvNode[rank]; 639 640 neighbourElements = new Elt[nbNeighbourNodes]; 641 nbNeighbourElements = nbNeighbourNodes; 642 643 int index = 0; 644 for (int rank = 0; rank < mpiSize; rank++) 645 { 646 pos[rank] = 0; 647 for (int j = 0; j < nbRecvNode[rank]; j++) 648 { 649 unpackPolygon(neighbourElements[index], recvBuffer2[rank], pos[rank]); 650 neighbourElements[index].id.ind = sstree.localTree.leafs.size() + index; 651 index++; 652 } 653 } 654 for (int rank = 0; rank < mpiSize; rank++) 655 { 656 if (nbRecvNode[rank] > 0) delete [] recvBuffer2[rank]; 657 if (nbSendNode[rank] > 0) delete [] sendBuffer2[rank]; 658 } 659 delete [] recvBuffer2; 660 delete [] sendBuffer2; 661 delete [] sendMessageSize; 662 delete [] recvMessageSize; 663 delete [] nbSendNode; 664 delete [] nbRecvNode; 665 delete [] sendRequest; 666 delete [] recvRequest; 667 delete [] status; 668 delete [] pos; 669 670 /* re-compute on received elements to avoid having to send this information */ 671 neighbourNodes.resize(nbNeighbourNodes); 672 setCirclesAndLinks(neighbourElements, neighbourNodes); 673 cptAllEltsGeom(neighbourElements, nbNeighbourNodes, srcGrid.pole); 674 675 /* the local SS tree must include nodes from other cpus if they are potential 488 int mpiSize, mpiRank; 489 ep_lib::MPI_Comm_size(communicator, &mpiSize); 490 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 491 492 vector<Node> *routingList = new vector<Node>[mpiSize]; 493 vector<vector<int> > routes(sstree.localTree.leafs.size()); 494 495 sstree.routeIntersections(routes, sstree.localTree.leafs); 496 497 for (int i = 0; i < routes.size(); ++i) 498 for (int k = 0; k < routes[i].size(); ++k) 499 routingList[routes[i][k]].push_back(sstree.localTree.leafs[i]); 500 routingList[mpiRank].clear(); 501 502 503 CMPIRouting mpiRoute(communicator); 504 mpiRoute.init(routes); 505 int nRecv = mpiRoute.getTotalSourceElement(); 506 // cout << mpiRank << " NRECV " << nRecv << "(" << routes.size() << ")"<< endl; 507 508 int *nbSendNode = new int[mpiSize]; 509 int *nbRecvNode = new int[mpiSize]; 510 int *sendMessageSize = new int[mpiSize]; 511 int *recvMessageSize = new int[mpiSize]; 512 513 for (int rank = 0; rank < mpiSize; rank++) 514 { 515 nbSendNode[rank] = routingList[rank].size(); 516 sendMessageSize[rank] = 0; 517 for (size_t j = 0; j < routingList[rank].size(); j++) 518 { 519 Elt *elt = (Elt *) (routingList[rank][j].data); 520 sendMessageSize[rank] += packedPolygonSize(*elt); 521 } 522 } 523 524 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 525 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 526 527 char **sendBuffer = new char*[mpiSize]; 528 char **recvBuffer = new char*[mpiSize]; 529 int *pos = new int[mpiSize]; 530 531 for (int rank = 0; rank < mpiSize; rank++) 532 { 533 if (nbSendNode[rank] > 0) sendBuffer[rank] = new char[sendMessageSize[rank]]; 534 if (nbRecvNode[rank] > 0) recvBuffer[rank] = new char[recvMessageSize[rank]]; 535 } 536 537 for (int rank = 0; rank < mpiSize; rank++) 538 { 539 pos[rank] = 0; 540 for (size_t j = 0; j < routingList[rank].size(); j++) 541 { 542 Elt *elt = (Elt *) (routingList[rank][j].data); 543 packPolygon(*elt, sendBuffer[rank], pos[rank]); 544 } 545 } 546 delete [] routingList; 547 548 549 int nbSendRequest = 0; 550 int nbRecvRequest = 0; 551 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 552 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 553 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize]; 554 555 for (int rank = 0; rank < mpiSize; rank++) 556 { 557 if (nbSendNode[rank] > 0) 558 { 559 ep_lib::MPI_Issend(sendBuffer[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 560 nbSendRequest++; 561 } 562 if (nbRecvNode[rank] > 0) 563 { 564 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 565 nbRecvRequest++; 566 } 567 } 568 569 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 570 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 571 572 for (int rank = 0; rank < mpiSize; rank++) 573 if (nbSendNode[rank] > 0) delete [] sendBuffer[rank]; 574 delete [] sendBuffer; 575 576 char **sendBuffer2 = new char*[mpiSize]; 577 char **recvBuffer2 = new char*[mpiSize]; 578 579 for (int rank = 0; rank < mpiSize; rank++) 580 { 581 nbSendNode[rank] = 0; 582 sendMessageSize[rank] = 0; 583 584 if (nbRecvNode[rank] > 0) 585 { 586 set<NodePtr> neighbourList; 587 pos[rank] = 0; 588 for (int j = 0; j < nbRecvNode[rank]; j++) 589 { 590 Elt elt; 591 unpackPolygon(elt, recvBuffer[rank], pos[rank]); 592 Node node(elt.x, cptRadius(elt), &elt); 593 findNeighbour(sstree.localTree.root, &node, neighbourList); 594 } 595 nbSendNode[rank] = neighbourList.size(); 596 for (set<NodePtr>::iterator it = neighbourList.begin(); it != neighbourList.end(); it++) 597 { 598 Elt *elt = (Elt *) ((*it)->data); 599 sendMessageSize[rank] += packedPolygonSize(*elt); 600 } 601 602 sendBuffer2[rank] = new char[sendMessageSize[rank]]; 603 pos[rank] = 0; 604 605 for (set<NodePtr>::iterator it = neighbourList.begin(); it != neighbourList.end(); it++) 606 { 607 Elt *elt = (Elt *) ((*it)->data); 608 packPolygon(*elt, sendBuffer2[rank], pos[rank]); 609 } 610 } 611 } 612 for (int rank = 0; rank < mpiSize; rank++) 613 if (nbRecvNode[rank] > 0) delete [] recvBuffer[rank]; 614 delete [] recvBuffer; 615 616 617 ep_lib::MPI_Barrier(communicator); 618 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 619 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 620 621 for (int rank = 0; rank < mpiSize; rank++) 622 if (nbRecvNode[rank] > 0) recvBuffer2[rank] = new char[recvMessageSize[rank]]; 623 624 nbSendRequest = 0; 625 nbRecvRequest = 0; 626 627 for (int rank = 0; rank < mpiSize; rank++) 628 { 629 if (nbSendNode[rank] > 0) 630 { 631 ep_lib::MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 632 nbSendRequest++; 633 } 634 if (nbRecvNode[rank] > 0) 635 { 636 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 637 nbRecvRequest++; 638 } 639 } 640 641 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 642 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 643 644 int nbNeighbourNodes = 0; 645 for (int rank = 0; rank < mpiSize; rank++) 646 nbNeighbourNodes += nbRecvNode[rank]; 647 648 neighbourElements = new Elt[nbNeighbourNodes]; 649 nbNeighbourElements = nbNeighbourNodes; 650 651 int index = 0; 652 for (int rank = 0; rank < mpiSize; rank++) 653 { 654 pos[rank] = 0; 655 for (int j = 0; j < nbRecvNode[rank]; j++) 656 { 657 unpackPolygon(neighbourElements[index], recvBuffer2[rank], pos[rank]); 658 neighbourElements[index].id.ind = sstree.localTree.leafs.size() + index; 659 index++; 660 } 661 } 662 for (int rank = 0; rank < mpiSize; rank++) 663 { 664 if (nbRecvNode[rank] > 0) delete [] recvBuffer2[rank]; 665 if (nbSendNode[rank] > 0) delete [] sendBuffer2[rank]; 666 } 667 delete [] recvBuffer2; 668 delete [] sendBuffer2; 669 delete [] sendMessageSize; 670 delete [] recvMessageSize; 671 delete [] nbSendNode; 672 delete [] nbRecvNode; 673 delete [] sendRequest; 674 delete [] recvRequest; 675 delete [] status; 676 delete [] pos; 677 678 /* re-compute on received elements to avoid having to send this information */ 679 neighbourNodes.resize(nbNeighbourNodes); 680 setCirclesAndLinks(neighbourElements, neighbourNodes); 681 cptAllEltsGeom(neighbourElements, nbNeighbourNodes, srcGrid.pole); 682 683 /* the local SS tree must include nodes from other cpus if they are potential 676 684 intersector of a local node */ 677 678 679 685 sstree.localTree.insertNodes(neighbourNodes); 686 687 /* for every local element, 680 688 use the SS-tree to find all elements (including neighbourElements) 681 689 who are potential neighbours because their circles intersect, 682 683 684 685 686 687 688 689 690 691 692 693 694 695 690 then check all canditates for common edges to build up connectivity information 691 */ 692 for (int j = 0; j < sstree.localTree.leafs.size(); j++) 693 { 694 Node& node = sstree.localTree.leafs[j]; 695 696 /* find all leafs whoes circles that intersect node's circle and save into node->intersectors */ 697 node.search(sstree.localTree.root); 698 699 Elt *elt = (Elt *)(node.data); 700 701 for (int i = 0; i < elt->n; i++) elt->neighbour[i] = NOT_FOUND; 702 703 /* for element `elt` loop through all nodes in the SS-tree 696 704 whoes circles intersect with the circle around `elt` (the SS intersectors) 697 705 and check if they are neighbours in the sense that the two elements share an edge. 698 706 If they do, save this information for elt */ 699 700 701 702 703 707 for (list<NodePtr>::iterator it = (node.intersectors).begin(); it != (node.intersectors).end(); ++it) 708 { 709 Elt *elt2 = (Elt *)((*it)->data); 710 set_neighbour(*elt, *elt2); 711 } 704 712 705 713 /* 706 707 708 709 710 714 for (int i = 0; i < elt->n; i++) 715 { 716 if (elt->neighbour[i] == NOT_FOUND) 717 error_exit("neighbour not found"); 718 } 711 719 */ 712 720 } 713 721 } 714 722 … … 716 724 void Mapper::computeIntersection(Elt *elements, int nbElements) 717 725 { 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);766 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 MPI_Request *sendRequest = newMPI_Request[mpiSize];800 MPI_Request *recvRequest = newMPI_Request[mpiSize];801 MPI_Status *status = newMPI_Status[mpiSize];802 803 804 805 806 807 MPI_Issend(sendBuffer[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);808 809 810 811 812 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 // 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);878 879 880 881 882 883 884 885 886 887 888 889 890 MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);891 892 893 894 895 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 726 int mpiSize, mpiRank; 727 ep_lib::MPI_Comm_size(communicator, &mpiSize); 728 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 729 730 ep_lib::MPI_Barrier(communicator); 731 732 vector<Node> *routingList = new vector<Node>[mpiSize]; 733 734 vector<Node> routeNodes; routeNodes.reserve(nbElements); 735 for (int j = 0; j < nbElements; j++) 736 { 737 elements[j].id.ind = j; 738 elements[j].id.rank = mpiRank; 739 routeNodes.push_back(Node(elements[j].x, cptRadius(elements[j]), &elements[j])); 740 } 741 742 vector<vector<int> > routes(routeNodes.size()); 743 sstree.routeIntersections(routes, routeNodes); 744 for (int i = 0; i < routes.size(); ++i) 745 for (int k = 0; k < routes[i].size(); ++k) 746 routingList[routes[i][k]].push_back(routeNodes[i]); 747 748 if (verbose >= 2) 749 { 750 cout << " --> rank " << mpiRank << " nbElements " << nbElements << " : "; 751 for (int rank = 0; rank < mpiSize; rank++) 752 cout << routingList[rank].size() << " "; 753 cout << endl; 754 } 755 ep_lib::MPI_Barrier(communicator); 756 757 int *nbSendNode = new int[mpiSize]; 758 int *nbRecvNode = new int[mpiSize]; 759 int *sentMessageSize = new int[mpiSize]; 760 int *recvMessageSize = new int[mpiSize]; 761 762 for (int rank = 0; rank < mpiSize; rank++) 763 { 764 nbSendNode[rank] = routingList[rank].size(); 765 sentMessageSize[rank] = 0; 766 for (size_t j = 0; j < routingList[rank].size(); j++) 767 { 768 Elt *elt = (Elt *) (routingList[rank][j].data); 769 sentMessageSize[rank] += packedPolygonSize(*elt); 770 } 771 } 772 773 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 774 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 775 776 int total = 0; 777 778 for (int rank = 0; rank < mpiSize; rank++) 779 { 780 total = total + nbRecvNode[rank]; 781 } 782 783 if (verbose >= 2) cout << "---> rank " << mpiRank << " : compute intersection : total received nodes " << total << endl; 784 char **sendBuffer = new char*[mpiSize]; 785 char **recvBuffer = new char*[mpiSize]; 786 int *pos = new int[mpiSize]; 787 788 for (int rank = 0; rank < mpiSize; rank++) 789 { 790 if (nbSendNode[rank] > 0) sendBuffer[rank] = new char[sentMessageSize[rank]]; 791 if (nbRecvNode[rank] > 0) recvBuffer[rank] = new char[recvMessageSize[rank]]; 792 } 793 794 for (int rank = 0; rank < mpiSize; rank++) 795 { 796 pos[rank] = 0; 797 for (size_t j = 0; j < routingList[rank].size(); j++) 798 { 799 Elt* elt = (Elt *) (routingList[rank][j].data); 800 packPolygon(*elt, sendBuffer[rank], pos[rank]); 801 } 802 } 803 delete [] routingList; 804 805 int nbSendRequest = 0; 806 int nbRecvRequest = 0; 807 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 808 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 809 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize]; 810 811 for (int rank = 0; rank < mpiSize; rank++) 812 { 813 if (nbSendNode[rank] > 0) 814 { 815 ep_lib::MPI_Issend(sendBuffer[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 816 nbSendRequest++; 817 } 818 if (nbRecvNode[rank] > 0) 819 { 820 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 821 nbRecvRequest++; 822 } 823 } 824 825 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 826 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 827 char **sendBuffer2 = new char*[mpiSize]; 828 char **recvBuffer2 = new char*[mpiSize]; 829 830 double tic = cputime(); 831 for (int rank = 0; rank < mpiSize; rank++) 832 { 833 sentMessageSize[rank] = 0; 834 835 if (nbRecvNode[rank] > 0) 836 { 837 Elt *recvElt = new Elt[nbRecvNode[rank]]; 838 pos[rank] = 0; 839 for (int j = 0; j < nbRecvNode[rank]; j++) 840 { 841 unpackPolygon(recvElt[j], recvBuffer[rank], pos[rank]); 842 cptEltGeom(recvElt[j], tgtGrid.pole); 843 Node recvNode(recvElt[j].x, cptRadius(recvElt[j]), &recvElt[j]); 844 recvNode.search(sstree.localTree.root); 845 /* for a node holding an element of the target, loop throught candidates for intersecting source */ 846 for (list<NodePtr>::iterator it = (recvNode.intersectors).begin(); it != (recvNode.intersectors).end(); ++it) 847 { 848 Elt *elt2 = (Elt *) ((*it)->data); 849 /* recvElt is target, elt2 is source */ 850 // intersect(&recvElt[j], elt2); 851 intersect_ym(&recvElt[j], elt2); 852 } 853 854 if (recvElt[j].is.size() > 0) sentMessageSize[rank] += packIntersectionSize(recvElt[j]); 855 856 // here recvNode goes out of scope 857 } 858 859 if (sentMessageSize[rank] > 0) 860 { 861 sentMessageSize[rank] += sizeof(int); 862 sendBuffer2[rank] = new char[sentMessageSize[rank]]; 863 *((int *) sendBuffer2[rank]) = 0; 864 pos[rank] = sizeof(int); 865 for (int j = 0; j < nbRecvNode[rank]; j++) 866 { 867 packIntersection(recvElt[j], sendBuffer2[rank], pos[rank]); 868 //FIXME should be deleted: recvElt[j].delete_intersections(); // intersection areas have been packed to buffer and won't be used any more 869 } 870 } 871 delete [] recvElt; 872 873 } 874 } 875 delete [] pos; 876 877 for (int rank = 0; rank < mpiSize; rank++) 878 { 879 if (nbSendNode[rank] > 0) delete [] sendBuffer[rank]; 880 if (nbRecvNode[rank] > 0) delete [] recvBuffer[rank]; 881 nbSendNode[rank] = 0; 882 } 883 884 if (verbose >= 2) cout << "Rank " << mpiRank << " Compute (internal) intersection " << cputime() - tic << " s" << endl; 885 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 886 887 for (int rank = 0; rank < mpiSize; rank++) 888 if (recvMessageSize[rank] > 0) 889 recvBuffer2[rank] = new char[recvMessageSize[rank]]; 890 891 nbSendRequest = 0; 892 nbRecvRequest = 0; 893 894 for (int rank = 0; rank < mpiSize; rank++) 895 { 896 if (sentMessageSize[rank] > 0) 897 { 898 ep_lib::MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 899 nbSendRequest++; 900 } 901 if (recvMessageSize[rank] > 0) 902 { 903 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 904 nbRecvRequest++; 905 } 906 } 907 908 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 909 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 910 911 delete [] sendRequest; 912 delete [] recvRequest; 913 delete [] status; 914 for (int rank = 0; rank < mpiSize; rank++) 915 { 916 if (nbRecvNode[rank] > 0) 917 { 918 if (sentMessageSize[rank] > 0) 919 delete [] sendBuffer2[rank]; 920 } 921 922 if (recvMessageSize[rank] > 0) 923 { 924 unpackIntersection(elements, recvBuffer2[rank]); 925 delete [] recvBuffer2[rank]; 926 } 927 } 928 delete [] sendBuffer2; 929 delete [] recvBuffer2; 930 delete [] sendBuffer; 931 delete [] recvBuffer; 932 933 delete [] nbSendNode; 934 delete [] nbRecvNode; 935 delete [] sentMessageSize; 936 delete [] recvMessageSize; 929 937 } 930 938 931 939 Mapper::~Mapper() 932 940 { 933 934 935 936 937 938 } 939 940 } 941 delete [] remapMatrix; 942 delete [] srcAddress; 943 delete [] srcRank; 944 delete [] dstAddress; 945 if (neighbourElements) delete [] neighbourElements; 946 } 947 948 }
Note: See TracChangeset
for help on using the changeset viewer.