Changeset 2296
- Timestamp:
- 02/03/22 16:59:25 (2 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_ym/XIOS_COUPLING/src/distribution/grid_remote_connector.cpp
r2291 r2296 48 48 int nDst = dstView_.size() ; 49 49 vector<size_t> hashRank(remoteSize_) ; 50 vector<size_t> sizeRank(remoteSize_) ; 50 51 isDstViewDistributed_.resize(nDst) ; 51 52 … … 56 57 hashRank.assign(remoteSize_,0) ; // everybody ranks to 0 except rank of the remote view I have 57 58 // that would be assign to my local hash 59 sizeRank.assign(remoteSize_,0) ; 60 58 61 for(auto& it : globalIndexView) 59 62 { … … 64 67 for(size_t ind=0;ind<globalIndexSize;ind++) hashValue += hashGlobalIndex(globalIndex(ind)) ; 65 68 hashRank[rank] += hashValue ; 69 sizeRank[rank] += globalIndexSize ; 66 70 } 67 71 // sum all the hash for every process of the local comm. The reduce is on the size of remote view (remoteSize_) 68 72 // after that for each rank of the remote view, we get the hash 69 73 MPI_Allreduce(MPI_IN_PLACE, hashRank.data(), remoteSize_, MPI_SIZE_T, MPI_SUM, localComm_) ; 74 MPI_Allreduce(MPI_IN_PLACE, sizeRank.data(), remoteSize_, MPI_SIZE_T, MPI_SUM, localComm_) ; 70 75 size_t value = hashRank[0] ; 76 size_t size = sizeRank[0] ; 71 77 isDstViewDistributed_[i]=false ; 72 78 for(int j=0 ; j<remoteSize_ ; j++) 73 if ( value != hashRank[j])79 if (size!=sizeRank[j] || value != hashRank[j]) 74 80 { 75 81 isDstViewDistributed_[i]=true ; … … 91 97 hashRank.assign(commSize,0) ; // 0 for everybody except my rank 92 98 size_t globalIndexSize = globalIndex.numElements() ; 99 100 size_t allEqual ; 101 MPI_Allreduce(&globalIndexSize, &allEqual, 1, MPI_SIZE_T, MPI_BXOR, localComm_) ; 102 if (allEqual!=0) 103 { 104 isSrcViewDistributed_[i]=true ; 105 break ; 106 } 107 108 // warning : jenkins hash : 0 --> 0 : need to compare number of element for each ranks 93 109 size_t hashValue=0 ; 94 110 for(size_t ind=0;ind<globalIndexSize;ind++) hashValue += hashGlobalIndex(globalIndex(ind)) ; 95 hashRank[commRank] += hashValue ; 96 97 // Same method than for remote view 98 MPI_Allreduce(MPI_IN_PLACE, hashRank.data(), commSize, MPI_SIZE_T, MPI_SUM, localComm_) ; 99 size_t value = hashRank[0] ; 100 isSrcViewDistributed_[i]=false ; 101 for(int j=0 ; j<commSize ; j++) 102 if (value != hashRank[j]) 103 { 104 isSrcViewDistributed_[i]=true ; 105 break ; 106 } 111 MPI_Allreduce(&hashValue, &allEqual, 1, MPI_SIZE_T, MPI_BXOR, localComm_) ; 112 if (allEqual!=0) isSrcViewDistributed_[i]=true ; 113 else isSrcViewDistributed_[i]=false ; 107 114 } 108 115 … … 659 666 hash=0 ; 660 667 for(int i=0; i<globalIndex.numElements(); i++) hash+=hashGlobalIndex(globalIndex(i)) ; 661 if (hashRanks.count(rank)==0) hashRanks[rank]=hash ; 662 else hashRanks[rank]=hashGlobalIndex.hashCombine(hashRanks[rank],hash) ; 668 if (globalIndex.numElements()>0) 669 { 670 if (hashRanks.count(rank)==0) hashRanks[rank]=hash ; 671 else hashRanks[rank]=hashGlobalIndex.hashCombine(hashRanks[rank],hash) ; 672 } 663 673 } 664 674 // a hash is now computed for data block I will sent to the server.
Note: See TracChangeset
for help on using the changeset viewer.