Changeset 1784 for XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation
- Timestamp:
- 12/12/19 18:15:14 (4 years ago)
- Location:
- XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/axis_algorithm_interpolate.cpp
r1639 r1784 72 72 CTimer::get("CAxisAlgorithmInterpolate::computeIndexSourceMapping_").resume() ; 73 73 CContext* context = CContext::getCurrent(); 74 CContextClient* client=context->client; 75 int nbClient = client->clientSize; 74 int nbClient = context->intraCommSize_; 76 75 CArray<bool,1>& axisMask = axisSrc_->mask; 77 76 int srcSize = axisSrc_->n_glo.getValue(); … … 227 226 { 228 227 CContext* context = CContext::getCurrent(); 229 CContextClient* client=context->client; 230 int nbClient = client->clientSize; 228 int nbClient = context->intraCommSize_; 231 229 232 230 int srcSize = axisSrc_->n_glo.getValue(); … … 272 270 273 271 int* recvCount=new int[nbClient]; 274 MPI_Allgather(&numValue,1,MPI_INT,recvCount,1,MPI_INT,c lient->intraComm);272 MPI_Allgather(&numValue,1,MPI_INT,recvCount,1,MPI_INT,context->intraComm_); 275 273 276 274 int* displ=new int[nbClient]; … … 279 277 280 278 // Each client have enough global info of axis 281 MPI_Allgatherv(sendIndexBuff,numValue,MPI_INT,recvIndexBuff,recvCount,displ,MPI_INT,c lient->intraComm);282 MPI_Allgatherv(sendValueBuff,numValue,MPI_DOUBLE,&(recvBuff[0]),recvCount,displ,MPI_DOUBLE,c lient->intraComm);279 MPI_Allgatherv(sendIndexBuff,numValue,MPI_INT,recvIndexBuff,recvCount,displ,MPI_INT,context->intraComm_); 280 MPI_Allgatherv(sendValueBuff,numValue,MPI_DOUBLE,&(recvBuff[0]),recvCount,displ,MPI_DOUBLE,context->intraComm_); 283 281 284 282 for (int idx = 0; idx < srcSize; ++idx) -
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/axis_algorithm_inverse.cpp
r1639 r1784 98 98 { 99 99 CContext* context = CContext::getCurrent(); 100 CContextClient* client=context->client; 101 int clientRank = client->clientRank; 102 int nbClient = client->clientSize; 100 int clientRank = context->intraCommRank_; 101 int nbClient = context->intraCommSize_; 103 102 104 103 int niSrc = axisSrc_->n.getValue(); … … 131 130 } 132 131 133 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, c lient->intraComm);132 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, context->intraComm_); 134 133 dhtIndexProcRank.computeIndexInfoMapping(globalSrcIndex); 135 134 CClientClientDHTInt::Index2VectorInfoTypeMap& computedGlobalIndexOnProc = dhtIndexProcRank.getInfoIndexMap(); … … 161 160 sendRankSizeMap[itIndex->first] = sendSize; 162 161 } 163 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,c lient->intraComm);162 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,context->intraComm_); 164 163 165 164 displ[0]=0 ; … … 168 167 int* recvRankBuff=new int[recvSize]; 169 168 int* recvSizeBuff=new int[recvSize]; 170 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,c lient->intraComm);171 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,c lient->intraComm);169 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,context->intraComm_); 170 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,context->intraComm_); 172 171 for (int i = 0; i < nbClient; ++i) 173 172 { … … 193 192 194 193 requests.push_back(MPI_Request()); 195 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, c lient->intraComm, &requests.back());194 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, context->intraComm_, &requests.back()); 196 195 } 197 196 … … 215 214 // Send global index source and mask 216 215 requests.push_back(MPI_Request()); 217 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, c lient->intraComm, &requests.back());216 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, context->intraComm_, &requests.back()); 218 217 } 219 218 … … 232 231 233 232 requests.push_back(MPI_Request()); 234 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, c lient->intraComm, &requests.back());233 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, context->intraComm_, &requests.back()); 235 234 } 236 235 … … 250 249 // Okie, now inform the destination which source index are masked 251 250 requests.push_back(MPI_Request()); 252 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, c lient->intraComm, &requests.back());251 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, context->intraComm_, &requests.back()); 253 252 } 254 253 status.resize(requests.size()); -
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/axis_algorithm_transformation.cpp
r1622 r1784 76 76 { 77 77 CContext* context = CContext::getCurrent(); 78 CContextClient* client=context->client; 79 int clientRank = client->clientRank; 80 int clientSize = client->clientSize; 78 int clientRank = context->intraCommRank_; 79 int clientSize = context->intraCommSize_; 81 80 82 81 size_t globalIndex; … … 127 126 } 128 127 129 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, c lient->intraComm);128 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, context->intraComm_); 130 129 dhtIndexProcRank.computeIndexInfoMapping(globalAxisIndex); 131 130 -
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/domain_algorithm_expand.cpp
r1622 r1784 94 94 { 95 95 CContext* context = CContext::getCurrent(); 96 CContextClient* client=context->client; 97 96 98 97 int type = 1; // For edge 99 98 CMesh mesh; … … 103 102 switch (domainSource->type) { 104 103 case CDomain::type_attr::unstructured: 105 mesh.getGlobalNghbFaces(type, c lient->intraComm, domainSource->i_index, bounds_lon_src, bounds_lat_src, neighborsSrc);104 mesh.getGlobalNghbFaces(type, context->intraComm_, domainSource->i_index, bounds_lon_src, bounds_lat_src, neighborsSrc); 106 105 updateUnstructuredDomainAttributes(domainDestination, domainSource, neighborsSrc); 107 106 break; … … 123 122 { 124 123 CContext* context = CContext::getCurrent(); 125 CContextClient* client=context->client;126 124 127 125 int type = 1; // For edge … … 132 130 switch (domainSource->type) { 133 131 case CDomain::type_attr::unstructured: 134 mesh.getGlobalNghbFaces(type, c lient->intraComm, domainSource->i_index, bounds_lon_src, bounds_lat_src, neighborsSrc);132 mesh.getGlobalNghbFaces(type, context->intraComm_, domainSource->i_index, bounds_lon_src, bounds_lat_src, neighborsSrc); 135 133 updateUnstructuredDomainAttributes(domainDestination, domainSource, neighborsSrc); 136 134 break; … … 158 156 int iindexSrc, jindexSrc, globIndexSrc; 159 157 CContext* context = CContext::getCurrent(); 160 CContextClient* client=context->client;161 158 162 159 // First of all, "copy" all attributes of domain source to domain destination … … 341 338 } 342 339 343 CClientClientDHTDouble dhtData(localData,c lient->intraComm);340 CClientClientDHTDouble dhtData(localData,context->intraComm_); 344 341 dhtData.computeIndexInfoMapping(globalIndexSrcOnDstDomain); 345 342 CClientClientDHTDouble::Index2VectorInfoTypeMap& neighborData = dhtData.getInfoIndexMap(); … … 482 479 483 480 CContext* context = CContext::getCurrent(); 484 CContextClient* client=context->client;485 481 486 482 // First of all, "copy" all attributes of domain source to domain destination … … 606 602 } 607 603 608 CClientClientDHTDouble dhtData(localData, client->intraComm);604 CClientClientDHTDouble dhtData(localData, context->intraComm_); 609 605 CArray<size_t,1> neighborInd(nbNeighbor); 610 606 for (int idx = 0; idx < nbNeighbor; ++idx) -
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/domain_algorithm_generate_rectilinear.cpp
r1639 r1784 48 48 { 49 49 CContext* context = CContext::getCurrent(); 50 CContextClient* client = context->client; 51 50 int clientSize = context->intraCommSize_ ; 51 int clientRank = context->intraCommRank_ ; 52 52 53 std::vector<CDomain*> domListSrcP = gridSrc->getDomains(); 53 54 std::vector<CAxis*> axisListSrcP = gridSrc->getAxis(); … … 56 57 { 57 58 // First, find (roundly) distribution of associated axis (if any) 58 if (axisListSrcP.empty()) nbDomainDistributedPart_ = client ->clientSize;59 if (axisListSrcP.empty()) nbDomainDistributedPart_ = clientSize; 59 60 else 60 61 { … … 69 70 HashXIOS<int> hashFunc; 70 71 StdSize hashValue = hashFunc.hashVec(globalAxisIndex); 71 std::vector<StdSize> recvBuff(client ->clientSize);72 std::vector<StdSize> recvBuff(clientSize); 72 73 MPI_Gather(&hashValue, 1, MPI_UNSIGNED_LONG, 73 74 &recvBuff[0], 1, MPI_UNSIGNED_LONG, 74 75 0, 75 c lient->intraComm);76 if (0 == client ->clientRank)76 context->intraComm_); 77 if (0 == clientRank) 77 78 { 78 79 std::set<StdSize> setTmp; … … 88 89 89 90 MPI_Bcast(&nbLocalAxis[0], nbAxis, MPI_INT, 90 0, c lient->intraComm);91 0, context->intraComm_); 91 92 } 92 93 93 94 int nbAxisDistributedPart = 1; 94 95 for (int j = 0; j < nbAxis; ++j) nbAxisDistributedPart *= nbLocalAxis[j]; 95 nbDomainDistributedPart_ = client ->clientSize/nbAxisDistributedPart;96 nbDomainDistributedPart_ = clientSize/nbAxisDistributedPart; 96 97 } 97 98 } … … 118 119 119 120 CContext* context = CContext::getCurrent(); 120 CContextClient* client = context->client; 121 int modPart = (client->clientSize) % nbPartition; 121 int modPart = (context->intraCommSize_) % nbPartition; 122 122 if (0 != modPart) 123 123 ERROR("CDomainAlgorithmGenerateRectilinear::computeDistributionGridDestination(CGrid* gridDest)", 124 124 << "The grid " <<gridDest->getId() << " is not well-distributed. There is an incompatibility between distribution of axis and domain."); 125 nbDomainDistributedPart_ = c lient->clientSize/nbPartition;125 nbDomainDistributedPart_ = context->intraCommSize_/nbPartition; 126 126 127 127 } -
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/domain_algorithm_interpolate.cpp
r1639 r1784 109 109 110 110 CContext* context = CContext::getCurrent(); 111 CContextClient* client=context->client; 112 int clientRank = client->clientRank; 111 int clientRank = context->intraCommRank_; 113 112 int i, j, k, idx; 114 113 std::vector<double> srcPole(3,0), dstPole(3,0); … … 300 299 301 300 // Calculate weight index 302 Mapper mapper(c lient->intraComm);301 Mapper mapper(context->intraComm_); 303 302 mapper.setVerbosity(PROGRESS) ; 304 303 … … 432 431 { 433 432 CContext* context = CContext::getCurrent(); 434 CContextClient* client=context->client; 435 433 436 434 MPI_Comm poleComme(MPI_COMM_NULL); 437 MPI_Comm_split(c lient->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme);435 MPI_Comm_split(context->intraComm_, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 438 436 if (MPI_COMM_NULL != poleComme) 439 437 { … … 534 532 { 535 533 CContext* context = CContext::getCurrent(); 536 CContextClient* client=context->client; 537 int clientRank = client->clientRank; 534 int clientRank = context->intraCommRank_; 538 535 539 536 this->transformationMapping_.resize(1); … … 800 797 { 801 798 CContext* context = CContext::getCurrent(); 802 CContextClient* client=context->client; 803 799 804 800 size_t n_src = domainSrc_->ni_glo * domainSrc_->nj_glo; 805 801 size_t n_dst = domainDest_->ni_glo * domainDest_->nj_glo; … … 835 831 } 836 832 837 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, c lient->intraComm);838 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, c lient->intraComm);833 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, context->intraComm_); 834 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, context->intraComm_); 839 835 840 836 if (0 == globalNbWeight) … … 850 846 std::vector<StdSize> count(1, localNbWeight); 851 847 852 WriteNetCdf netCdfWriter(filename, c lient->intraComm);848 WriteNetCdf netCdfWriter(filename, context->intraComm_); 853 849 854 850 // Define some dimensions … … 895 891 896 892 CContext* context = CContext::getCurrent(); 897 CContextClient* client=context->client; 898 int clientRank = client->clientRank; 899 int clientSize = client->clientSize; 893 int clientRank = context->intraCommRank_; 894 int clientSize = context->intraCommSize_; 900 895 901 896 -
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/domain_algorithm_transformation.cpp
r1622 r1784 39 39 { 40 40 CContext* context = CContext::getCurrent(); 41 CContextClient* client=context->client; 42 int clientRank = client->clientRank; 43 int clientSize = client->clientSize; 41 int clientRank = context->intraCommRank_; 42 int clientSize = context->intraCommSize_; 44 43 45 44 int niGlob = domainSrc_->ni_glo.getValue(); … … 62 61 } 63 62 64 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, c lient->intraComm);63 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, context->intraComm_); 65 64 dhtIndexProcRank.computeIndexInfoMapping(globalDomainIndex); 66 65 globalDomainIndexOnProc = dhtIndexProcRank.getInfoIndexMap(); -
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/generic_algorithm_transformation.cpp
r1639 r1784 124 124 { 125 125 CContext* context = CContext::getCurrent(); 126 CContextClient* client = context->client; 127 126 128 127 computePositionElements(gridSrc, gridDst); 129 128 std::vector<CScalar*> scalarListSrcP = gridSrc->getScalars(); … … 136 135 { 137 136 distributed=domainListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]->isDistributed() ; 138 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, c lient->intraComm) ;137 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, context->intraComm_) ; 139 138 140 139 } … … 142 141 { 143 142 distributed=axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]->isDistributed() ; 144 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, c lient->intraComm) ;143 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, context->intraComm_) ; 145 144 } 146 145 else //it's a scalar … … 170 169 { 171 170 CContext* context = CContext::getCurrent(); 172 CContextClient* client = context->client; 173 int nbClient = client->clientSize; 171 int nbClient = context->intraCommSize_; 174 172 175 173 typedef std::unordered_map<int, std::vector<std::pair<int,double> > > SrcToDstMap; … … 238 236 int sendValue = (computeGlobalIndexOnProc) ? 1 : 0; 239 237 int recvValue = 0; 240 MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, c lient->intraComm);238 MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, context->intraComm_); 241 239 computeGlobalIndexOnProc = (0 < recvValue); 242 240 … … 475 473 476 474 CContext* context = CContext::getCurrent(); 477 CContextClient* client=context->client; 478 int clientRank = client->clientRank; 475 int clientRank = context->intraCommRank_; 479 476 480 477 std::vector<CDomain*> domainListSrcP = gridSrc->getDomains(); … … 667 664 { 668 665 CContext* context = CContext::getCurrent(); 669 CContextClient* client=context->client; 670 int clientRank = client->clientRank; 671 int clientSize = client->clientSize; 666 int clientRank = context->intraCommRank_; 667 int clientSize = context->intraCommSize_; 672 668 673 669 globalScalarIndexOnProc.rehash(std::ceil(clientSize/globalScalarIndexOnProc.max_load_factor())); … … 693 689 { 694 690 CContext* context = CContext::getCurrent(); 695 CContextClient* client=context->client; 696 int clientRank = client->clientRank; 697 int clientSize = client->clientSize; 691 int clientRank = context->intraCommRank_; 692 int clientSize = context->intraCommSize_; 698 693 699 694 size_t globalIndex; … … 710 705 } 711 706 712 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, c lient->intraComm);707 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, context->intraComm_); 713 708 CArray<size_t,1> globalAxisIndex(axisDst->index.numElements()); 714 709 for (int idx = 0; idx < globalAxisIndex.numElements(); ++idx) … … 764 759 { 765 760 CContext* context = CContext::getCurrent(); 766 CContextClient* client=context->client; 767 int clientRank = client->clientRank; 768 int clientSize = client->clientSize; 761 int clientRank = context->intraCommRank_; 762 int clientSize = context->intraCommSize_; 769 763 770 764 int niGlobSrc = domainSrc->ni_glo.getValue(); … … 820 814 } 821 815 822 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, c lient->intraComm);816 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, context->intraComm_); 823 817 dhtIndexProcRank.computeIndexInfoMapping(globalDomainIndex); 824 818 … … 862 856 863 857 CContext* context = CContext::getCurrent(); 864 CContextClient* client = context->client; 865 int nbClient = client->clientSize; 858 int nbClient = context->intraCommSize_; 866 859 867 860 computePositionElements(gridDst, gridSrc); -
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/grid_transformation.cpp
r1639 r1784 365 365 366 366 CContext* context = CContext::getCurrent(); 367 CContextClient* client = context->client;368 367 369 368 ListAlgoType::const_iterator itb = listAlgos_.begin(), … … 419 418 CTimer::get("computeTransformationMappingConvert").resume(); 420 419 nbLocalIndexOnGridDest_.push_back(nbLocalIndexOnGridDest) ; 421 int clientRank=c lient->clientRank;420 int clientRank=context->intraCommRank_ ; 422 421 { 423 422 SendingIndexGridSourceMap tmp; … … 475 474 { 476 475 CContext* context = CContext::getCurrent(); 477 CContextClient* client = context->client; 478 int nbClient = client->clientSize; 479 int clientRank = client->clientRank; 476 int nbClient = context->intraCommSize_; 477 int clientRank = context->intraCommRank_; 480 478 481 479 // Recalculate the distribution of grid destination 482 CDistributionClient distributionClientDest(client ->clientRank, tmpGridDestination_);480 CDistributionClient distributionClientDest(clientRank, tmpGridDestination_); 483 481 CDistributionClient::GlobalLocalDataMap& globalLocalIndexGridDestSendToServer = distributionClientDest.getGlobalLocalDataSendToServer(); 484 482 … … 514 512 sendRankSizeMap[itIndex->first] = sendSize; 515 513 } 516 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT, client->intraComm);514 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT, context->intraComm_); 517 515 518 516 displ[0]=0 ; … … 521 519 int* recvRankBuff=new int[recvSize]; 522 520 int* recvSizeBuff=new int[recvSize]; 523 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,c lient->intraComm);524 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,c lient->intraComm);521 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,context->intraComm_); 522 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,context->intraComm_); 525 523 for (int i = 0; i < nbClient; ++i) 526 524 { … … 546 544 547 545 requests.push_back(MPI_Request()); 548 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, c lient->intraComm, &requests.back());546 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, context->intraComm_, &requests.back()); 549 547 requests.push_back(MPI_Request()); 550 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, c lient->intraComm, &requests.back());548 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, context->intraComm_, &requests.back()); 551 549 } 552 550 … … 584 582 // Send global index source and mask 585 583 requests.push_back(MPI_Request()); 586 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, c lient->intraComm, &requests.back());584 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, context->intraComm_, &requests.back()); 587 585 requests.push_back(MPI_Request()); 588 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, c lient->intraComm, &requests.back());586 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, context->intraComm_, &requests.back()); 589 587 } 590 588 … … 602 600 603 601 requests.push_back(MPI_Request()); 604 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, c lient->intraComm, &requests.back());602 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, context->intraComm_, &requests.back()); 605 603 } 606 604 607 605 // Ok, now we fill in local index of grid source (we even count for masked index) 608 CDistributionClient distributionClientSrc(client ->clientRank, gridSource_);606 CDistributionClient distributionClientSrc(clientRank, gridSource_); 609 607 CDistributionClient::GlobalLocalDataMap& globalLocalIndexGridSrcSendToServer = distributionClientSrc.getGlobalLocalDataSendToServer(); 610 608 localIndexToSendFromGridSource_.push_back(SendingIndexGridSourceMap()); … … 639 637 // Okie, now inform the destination which source index are masked 640 638 requests.push_back(MPI_Request()); 641 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, c lient->intraComm, &requests.back());639 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, context->intraComm_, &requests.back()); 642 640 } 643 641 status.resize(requests.size()); -
XIOS/dev/dev_ym/XIOS_COUPLING/src/transformation/scalar_algorithm_transformation.cpp
r1622 r1784 60 60 { 61 61 CContext* context = CContext::getCurrent(); 62 CContextClient* client=context->client; 63 int clientRank = client->clientRank; 64 int clientSize = client->clientSize; 62 int clientRank = context->intraCommRank_; 63 int clientSize = context->intraCommSize_; 65 64 66 65 if (2 == elementSourceType) // Source is a domain … … 84 83 } 85 84 86 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, c lient->intraComm);85 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, context->intraComm_); 87 86 dhtIndexProcRank.computeIndexInfoMapping(globalIndexElementSource); 88 87 globalIndexElementSourceOnProc = dhtIndexProcRank.getInfoIndexMap(); … … 106 105 } 107 106 108 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, c lient->intraComm);107 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, context->intraComm_); 109 108 dhtIndexProcRank.computeIndexInfoMapping(globalIndexElementSource); 110 109 globalIndexElementSourceOnProc = dhtIndexProcRank.getInfoIndexMap(); … … 117 116 globalIndex2ProcRank[globalIndex][0] = clientRank; 118 117 119 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, c lient->intraComm);118 CClientClientDHTInt dhtIndexProcRank(globalIndex2ProcRank, context->intraComm_); 120 119 dhtIndexProcRank.computeIndexInfoMapping(globalIndexElementSource); 121 120 globalIndexElementSourceOnProc = dhtIndexProcRank.getInfoIndexMap();
Note: See TracChangeset
for help on using the changeset viewer.