#include "grid_remote_connector.hpp" #include "client_client_dht_template.hpp" #include "mpi.hpp" namespace xios { CGridRemoteConnector::CGridRemoteConnector(vector& srcView, vector& dstView, MPI_Comm localComm, int remoteSize) : srcView_(srcView), dstView_(dstView), localComm_(localComm), remoteSize_(remoteSize) {} void CGridRemoteConnector::computeConnector(void) { computeGenericMethod() ; } void CGridRemoteConnector::computeGenericMethod(void) { // generic method, every element can be distributed int nDst = dstView_.size() ; vector dstSliceSize(nDst) ; dstSliceSize[0] = 1 ; for(int i=1; igetGlobalSize()*dstSliceSize[i-1] ; CClientClientDHTTemplate::Index2VectorInfoTypeMap dataInfo ; CClientClientDHTTemplate::Index2VectorInfoTypeMap info ; // info map for(int pos=0; pos> globalIndexView ; dstView_[pos]->getGlobalIndexView(globalIndexView) ; CClientClientDHTTemplate::Index2VectorInfoTypeMap lastInfo(info) ; if (pos>0) { CArray ranks(globalIndexView.size()) ; auto it=globalIndexView.begin() ; for(int i=0 ; ifirst ; CClientClientDHTTemplate dataRanks(info, localComm_) ; dataRanks.computeIndexInfoMapping(ranks) ; lastInfo = dataRanks.getInfoIndexMap() ; } info.clear() ; for(auto& it : globalIndexView) { int rank = it.first ; auto& globalIndex = it.second ; auto& inf = info[rank] ; if (pos==0) for(int i=0;i dataRanks(dataInfo, localComm_) ; // generate list of global index for src view int nSrc = srcView_.size() ; vector srcSliceSize(nSrc) ; srcSliceSize[0] = 1 ; for(int i=1; igetGlobalSize()*srcSliceSize[i-1] ; vector srcGlobalIndex ; size_t sliceIndex=0 ; srcView_[nSrc-1]->getGlobalIndex(srcGlobalIndex, sliceIndex, srcSliceSize.data(), srcView_.data(), nSrc-1) ; if (srcGlobalIndex.size()>0) { CArray srcGlobalIndexArray(srcGlobalIndex.data(), shape(srcGlobalIndex.size()),neverDeleteData) ; dataRanks.computeIndexInfoMapping(srcGlobalIndexArray) ; } else { CArray srcGlobalIndexArray ; dataRanks.computeIndexInfoMapping(srcGlobalIndexArray) ; } const auto& returnInfo = dataRanks.getInfoIndexMap() ; vector>> elements(nSrc) ; // internal representation of elements composing the grid for(auto& indRanks : returnInfo) { size_t gridIndexGlo=indRanks.first ; auto& ranks = indRanks.second ; for(int i=nSrc-1; i>=0; i--) { auto& element = elements[i] ; size_t localIndGlo = gridIndexGlo / srcSliceSize[i] ; gridIndexGlo = gridIndexGlo % srcSliceSize[i] ; for(int rank : ranks) element[rank].insert(localIndGlo) ; } } elements_.resize(nSrc) ; for(int i=0 ; i& indGlo = rankInd.second ; CArray& indGloArray = elements_[i][rank] ; indGloArray.resize(indGlo.size()) ; int j=0 ; for (auto index : indGlo) { indGloArray(j) = index ; j++; } } } // So what about when there is some server that have no data to receive // they must be inform they receive an event with no data. // So find remote servers with no data, and one client will take in charge // that it receive global index with no data (0-size) vector ranks(remoteSize_,0) ; for(auto& it : elements_[0]) ranks[it.first] = 1 ; MPI_Allreduce(MPI_IN_PLACE, ranks.data(), remoteSize_, MPI_INT, MPI_SUM, localComm_) ; int commRank, commSize ; MPI_Comm_rank(localComm_,&commRank) ; MPI_Comm_size(localComm_,&commSize) ; int pos=0 ; for(int i=0; i(0) ; pos++ ; } } }