/*! \file policy.cpp \author Ha NGUYEN \since 06 Oct 2015 \date 06 Oct 2015 \brief Some useful policies for templated classes */ #include "policy.hpp" #include namespace xios { ///*! // Calculate MPI communicator for each level of hierarchy. // \param[in] mpiCommRoot MPI communicator of the level 0 (usually communicator of all clients) // \param[in] levels number of level in hierarchy //*/ //void DivideCommByTwo::computeMPICommLevel(const MPI_Comm& mpiCommRoot, int levels) //{ // int nbProc; // MPI_Comm_size(mpiCommRoot,&nbProc); // if (levels > nbProc) levels = std::log10(nbProc) * 3.3219; // log2(x) = log2(10) * log10(x); stupid C++98 // else if (1 > levels) levels = 1; // // commLevel_.push_back(mpiCommRoot); // divideMPICommLevel(mpiCommRoot, levels); //} // ///*! // Divide each MPI communicator into sub-communicator. Recursive function // \param [in] mpiCommLevel MPI communicator of current level // \param [in] level current level //*/ //void DivideCommByTwo::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int level) //{ // int clientRank; // MPI_Comm_rank(mpiCommLevel,&clientRank); // // --level; // if (0 < level) // { // int color = clientRank % 2; // commLevel_.push_back(MPI_Comm()); // MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back())); // divideMPICommLevel(commLevel_.back(), level); // } //} DivideAdaptiveComm::DivideAdaptiveComm(const MPI_Comm& mpiComm) : internalComm_(mpiComm), level_(0), groupBegin_(), nbInGroup_(), computed_(false) { } void DivideAdaptiveComm::computeMPICommLevel() { if (computed_) return; computed_ = true; int mpiSize, mpiRank; MPI_Comm_size(internalComm_,&mpiSize); MPI_Comm_rank(internalComm_,&mpiRank); int maxChild=1; int m; do { m=1; ++maxChild; for(int i=0;i(maxLevel); nbInGroupParents_ = groupParentsBegin_= std::vector >(maxLevel,std::vector(maxChild)); groupBegin_[level_] = begin; nbInGroup_[level_] = nb; ++level_; while (nb>2 && (level_=pos && mpiRank(maxLevel+1); // child=vector >(maxLevel+1,vector(maxChild)); // nbChild=vector (maxLevel+1); // do // { // n=0; // pos=begin; // nbChild[level_]=0; // parent[level_+1]=begin; // for(int i=0;i=pos && mpiRank1); } //void DivideAdaptiveComm::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int color, int level) //{ //// int clientRank; //// MPI_Comm_rank(mpiCommLevel,&clientRank); // // --level; // if (0 < level) // { // int color = clientRank % 2; // commLevel_.push_back(MPI_Comm()); // MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back())); // divideMPICommLevel(commLevel_.back(), level); // } //} }