Changeset 2296


Ignore:
Timestamp:
02/03/22 16:59:25 (2 years ago)
Author:
ymipsl
Message:

Solve problem rising when output scalar.
Jenkins hash method map 0 to 0 which conflict with hash initialisation that is also set to 0.

YM

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/distribution/grid_remote_connector.cpp

    r2291 r2296  
    4848    int nDst = dstView_.size() ; 
    4949    vector<size_t> hashRank(remoteSize_) ; 
     50    vector<size_t> sizeRank(remoteSize_) ; 
    5051    isDstViewDistributed_.resize(nDst) ; 
    5152 
     
    5657      hashRank.assign(remoteSize_,0) ; // everybody ranks to 0 except rank of the remote view I have  
    5758                                       // that would be assign to my local hash  
     59      sizeRank.assign(remoteSize_,0) ; 
     60 
    5861      for(auto& it : globalIndexView) 
    5962      { 
     
    6467        for(size_t ind=0;ind<globalIndexSize;ind++) hashValue += hashGlobalIndex(globalIndex(ind)) ; 
    6568        hashRank[rank] += hashValue ; 
     69        sizeRank[rank] += globalIndexSize ; 
    6670      } 
    6771      // sum all the hash for every process of the local comm. The reduce is on the size of remote view (remoteSize_) 
    6872      // after that for each rank of the remote view, we get the hash 
    6973      MPI_Allreduce(MPI_IN_PLACE, hashRank.data(), remoteSize_, MPI_SIZE_T, MPI_SUM, localComm_) ; 
     74      MPI_Allreduce(MPI_IN_PLACE, sizeRank.data(), remoteSize_, MPI_SIZE_T, MPI_SUM, localComm_) ; 
    7075      size_t value = hashRank[0] ; 
     76      size_t size = sizeRank[0] ; 
    7177      isDstViewDistributed_[i]=false ; 
    7278      for(int j=0 ; j<remoteSize_ ; j++)  
    73         if (value != hashRank[j])  
     79        if (size!=sizeRank[j] || value != hashRank[j])  
    7480        {  
    7581          isDstViewDistributed_[i]=true ; 
     
    9197      hashRank.assign(commSize,0) ; // 0 for everybody except my rank 
    9298      size_t globalIndexSize = globalIndex.numElements() ; 
     99       
     100      size_t allEqual ; 
     101      MPI_Allreduce(&globalIndexSize, &allEqual, 1, MPI_SIZE_T, MPI_BXOR, localComm_) ; 
     102      if (allEqual!=0)  
     103      { 
     104        isSrcViewDistributed_[i]=true ; 
     105        break ; 
     106      } 
     107 
     108      // warning : jenkins hash : 0 --> 0 : need to compare number of element for each ranks 
    93109      size_t hashValue=0 ; 
    94110      for(size_t ind=0;ind<globalIndexSize;ind++) hashValue += hashGlobalIndex(globalIndex(ind)) ; 
    95         hashRank[commRank] += hashValue ; 
    96      
    97       // Same method than for remote view  
    98       MPI_Allreduce(MPI_IN_PLACE, hashRank.data(), commSize, MPI_SIZE_T, MPI_SUM, localComm_) ; 
    99       size_t value = hashRank[0] ; 
    100       isSrcViewDistributed_[i]=false ; 
    101       for(int j=0 ; j<commSize ; j++)  
    102         if (value != hashRank[j])  
    103         {  
    104           isSrcViewDistributed_[i]=true ; 
    105           break ; 
    106         } 
     111      MPI_Allreduce(&hashValue, &allEqual, 1, MPI_SIZE_T, MPI_BXOR, localComm_) ; 
     112      if (allEqual!=0) isSrcViewDistributed_[i]=true ; 
     113      else isSrcViewDistributed_[i]=false ; 
    107114    } 
    108115 
     
    659666        hash=0 ; 
    660667        for(int i=0; i<globalIndex.numElements(); i++) hash+=hashGlobalIndex(globalIndex(i)) ; 
    661         if (hashRanks.count(rank)==0) hashRanks[rank]=hash ; 
    662         else hashRanks[rank]=hashGlobalIndex.hashCombine(hashRanks[rank],hash) ; 
     668        if (globalIndex.numElements()>0) 
     669        { 
     670          if (hashRanks.count(rank)==0) hashRanks[rank]=hash ; 
     671          else hashRanks[rank]=hashGlobalIndex.hashCombine(hashRanks[rank],hash) ; 
     672        } 
    663673      } 
    664674    // a hash is now computed for data block I will sent to the server. 
Note: See TracChangeset for help on using the changeset viewer.