Changeset 720


Ignore:
Timestamp:
10/06/15 17:17:10 (9 years ago)
Author:
mhnguyen
Message:

First implementation of hierarchical distributed hashed table

+) Implement dht for int with index of type size_t

Test
+) Local
+) Work correctly

Location:
XIOS/trunk
Files:
3 added
8 edited

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/inputs/REMAP/iodef.xml

    r718 r720  
    1818     </file> 
    1919     <file id="output_dst" name="output_dst"> 
    20 <!--        <field id="test" field_ref="dst_field" name="field" />--> 
     20<!--        <field field_ref="dst_field" name="field" />--> 
    2121     </file> 
    2222     <file id="output_dst_regular" name="output_dst_regular" type="one_file"> 
  • XIOS/trunk/inputs/Version2/iodef.xml

    r718 r720  
    1919   <file_definition type="multiple_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE."> 
    2020     <file id="output" name="output" type="one_file"> 
    21 <!--        <field field_ref="field_A" />--> 
     21        <field field_ref="field_A" /> 
    2222     </file> 
    2323     <file id="output_Axis" name="output_Axis" type="one_file"> 
     
    3434     </file> 
    3535     <file id="output_Domain_transformed_interpolated" name="output_Domain_transformed_interpolated"> 
    36         <field field_ref="field_Domain_transformed_Interpolated" /> 
     36<!--        <field field_ref="field_Domain_transformed_Interpolated" />--> 
    3737     </file> 
    3838     <file id="output_Scalar" name="output_Scalar" type="one_file"> 
  • XIOS/trunk/inputs/iodef.xml

    r718 r720  
    1212 
    1313 
    14    <file_definition type="multiple_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE."> 
     14   <file_definition type="one_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE."> 
    1515     <file id="output" name="output"> 
    16         <field field_ref="field_A_zoom" name="field_A" /> 
     16        <field field_ref="field_A_zoom" name="field_A_zoom" /> 
    1717     </file> 
    1818   </file_definition> 
     
    2222     <axis id="axis_A"/> 
    2323     <axis id="axis_A_zoom" axis_ref="axis_A"> 
    24        <zoom_axis zoom_begin="1" zoom_size="2" /> 
     24       <zoom_axis zoom_begin="1" zoom_size="1" /> 
    2525     </axis> 
    2626   </axis_definition> 
  • XIOS/trunk/src/client_server_mapping_distributed.cpp

    r630 r720  
    1212#include <boost/functional/hash.hpp> 
    1313#include "utils.hpp" 
     14#include "client_client_dht.hpp" 
     15#include "mpi_tag.hpp" 
    1416 
    1517namespace xios 
     
    1921                                                                 const MPI_Comm& clientIntraComm, bool isDataDistributed) 
    2022  : CClientServerMapping(), indexClientHash_(), countIndexGlobal_(0), countIndexServer_(0), 
    21     indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_(), isDataDistributed_(isDataDistributed) 
     23    indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_(), isDataDistributed_(isDataDistributed), 
     24    ccDHT_(0) 
    2225{ 
    2326  clientIntraComm_ = clientIntraComm; 
     
    2528  MPI_Comm_rank(clientIntraComm,&clientRank_); 
    2629  computeHashIndex(); 
    27   computeDistributedServerIndex(globalIndexOfServer, clientIntraComm); 
     30 
     31  ccDHT_ = new CClientClientDHT(globalIndexOfServer, 
     32                                clientIntraComm, 
     33                                isDataDistributed); 
     34//  const boost::unordered_map<size_t,int>& globalIndexToServerMappingTmp = clientDht.getGlobalIndexServerMapping(); 
     35//  globalIndexToServerMapping_ = clientDht.getGlobalIndexServerMapping(); 
     36 
     37 
     38 
     39//  computeDistributedServerIndex(globalIndexOfServer, clientIntraComm); 
    2840} 
    2941 
    3042CClientServerMappingDistributed::~CClientServerMappingDistributed() 
    3143{ 
     44  if (0 != ccDHT_) delete ccDHT_; 
    3245} 
    3346 
     
    3851void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient) 
    3952{ 
     53  ccDHT_->computeServerIndexMapping(globalIndexOnClient); 
     54  indexGlobalOnServer_ = ccDHT_->getGlobalIndexOnServer(); 
     55 
     56/* 
    4057  size_t ssize = globalIndexOnClient.numElements(), hashedIndex; 
    4158 
     
    177194  delete [] sendBuff; 
    178195  delete [] recvBuff; 
     196*/ 
    179197} 
    180198 
     
    348366 
    349367  // Probing for global index 
    350   MPI_Iprobe(MPI_ANY_SOURCE, 15, clientIntraComm_, &flagIndexGlobal, &statusIndexGlobal); 
     368  MPI_Iprobe(MPI_ANY_SOURCE, MPI_DHT_INDEX_0, clientIntraComm_, &flagIndexGlobal, &statusIndexGlobal); 
    351369  if ((true == flagIndexGlobal) && (countIndexGlobal_ < recvNbIndexCount)) 
    352370  { 
     
    354372    indexGlobalBuffBegin_.insert(std::make_pair<int, unsigned long*>(statusIndexGlobal.MPI_SOURCE, recvIndexGlobalBuff+countIndexGlobal_)); 
    355373    MPI_Irecv(recvIndexGlobalBuff+countIndexGlobal_, count, MPI_UNSIGNED_LONG, 
    356               statusIndexGlobal.MPI_SOURCE, 15, clientIntraComm_, 
     374              statusIndexGlobal.MPI_SOURCE, MPI_DHT_INDEX_0, clientIntraComm_, 
    357375              &requestRecvIndexGlobal_[statusIndexGlobal.MPI_SOURCE]); 
    358376    countIndexGlobal_ += count; 
     
    373391 
    374392  // Probing for server index 
    375   MPI_Iprobe(MPI_ANY_SOURCE, 12, clientIntraComm_, &flagIndexServer, &statusIndexServer); 
     393  MPI_Iprobe(MPI_ANY_SOURCE, MPI_DHT_INFO_0, clientIntraComm_, &flagIndexServer, &statusIndexServer); 
    376394  if ((true == flagIndexServer) && (countIndexServer_ < recvNbIndexCount)) 
    377395  { 
     
    379397    indexServerBuffBegin_.insert(std::make_pair<int, int*>(statusIndexServer.MPI_SOURCE, recvIndexServerBuff+countIndexServer_)); 
    380398    MPI_Irecv(recvIndexServerBuff+countIndexServer_, count, MPI_INT, 
    381               statusIndexServer.MPI_SOURCE, 12, clientIntraComm_, 
     399              statusIndexServer.MPI_SOURCE, MPI_DHT_INFO_0, clientIntraComm_, 
    382400              &requestRecvIndexServer_[statusIndexServer.MPI_SOURCE]); 
    383401 
     
    400418  requestSendIndexGlobal.push_back(request); 
    401419  MPI_Isend(&(indexGlobal)[0], (indexGlobal).size(), MPI_UNSIGNED_LONG, 
    402             clientDestRank, 15, clientIntraComm, &(requestSendIndexGlobal.back())); 
     420            clientDestRank, MPI_DHT_INDEX_0, clientIntraComm, &(requestSendIndexGlobal.back())); 
    403421} 
    404422 
     
    417435  requestSendIndexServer.push_back(request); 
    418436  MPI_Isend(&(indexServer)[0], (indexServer).size(), MPI_INT, 
    419             clientDestRank, 12, clientIntraComm, &(requestSendIndexServer.back())); 
     437            clientDestRank, MPI_DHT_INFO_0, clientIntraComm, &(requestSendIndexServer.back())); 
    420438} 
    421439 
  • XIOS/trunk/src/client_server_mapping_distributed.hpp

    r620 r720  
    1717#include "mpi.hpp" 
    1818#include <boost/unordered_map.hpp> 
     19#include "client_client_dht.hpp" 
    1920 
    2021namespace xios 
     
    118119    //! Flag to specify whether data is distributed or not 
    119120    bool isDataDistributed_; 
     121 
     122 
     123    CClientClientDHT* ccDHT_; 
    120124}; 
    121125 
  • XIOS/trunk/src/node/domain.cpp

    r715 r720  
    272272        fillInRectilinearLonLat(); 
    273273        this->isRedistributed_ = true; 
    274         info <<"now, we are here " << std::endl; 
    275         info << "domain " << this->getId() << " ni " << ni.getValue() << " nj " << nj.getValue() << std::endl; 
    276274     } 
    277275   } 
  • XIOS/trunk/src/node/grid.cpp

    r718 r720  
    1818#include "grid_transformation.hpp" 
    1919#include "grid_generate.hpp" 
     20#include "client_client_dht.hpp" 
    2021 
    2122namespace xios { 
     
    158159   /*! 
    159160    * Test whether the data defined on the grid can be outputted in a compressed way. 
    160     *  
     161    * 
    161162    * \return true if and only if a mask was defined for this grid 
    162163    */ 
     
    395396                                                            clientDistribution_->isDataDistributed()); 
    396397 
     398     CClientClientDHT clientDht(serverDistributionDescription.getGlobalIndexRange(), 
     399                                client->intraComm, 
     400                                clientDistribution_->isDataDistributed()); 
     401     clientDht.computeServerIndexMapping(clientDistribution_->getGlobalIndex()); 
     402     const std::map<int, std::vector<size_t> >& globalIndexOnServer0 = clientDht.getGlobalIndexOnServer(); 
     403 
     404     std::map<int, std::vector<size_t> >::const_iterator itbTmp, itTmp, iteTmp; 
     405     itbTmp = globalIndexOnServer0.begin(); iteTmp = globalIndexOnServer0.end(); 
     406     for (itTmp = itbTmp; itTmp != iteTmp; ++itTmp) 
     407     { 
     408       const std::vector<size_t>& tmpVec = itTmp->second; info << "tmpVec0. Rank " << itTmp->first << ". Size = " << tmpVec.size() << ". "  ; 
     409       for (int i = 0; i < tmpVec.size(); ++i) info << tmpVec[i] << " "; 
     410       info << std::endl; 
     411     } 
     412// 
    397413     clientServerMap_->computeServerIndexMapping(clientDistribution_->getGlobalIndex()); 
    398414     const std::map<int, std::vector<size_t> >& globalIndexOnServer = clientServerMap_->getGlobalIndexOnServer(); 
     415 
     416     itbTmp = globalIndexOnServer.begin(); iteTmp = globalIndexOnServer.end(); 
     417     for (itTmp = itbTmp; itTmp != iteTmp; ++itTmp) 
     418     { 
     419       const std::vector<size_t>& tmpVec = itTmp->second; info << "tmpVec1. Rank " << itTmp->first << ". Size = " << tmpVec.size() << ". "  ; 
     420       for (int i = 0; i < tmpVec.size(); ++i) info << tmpVec[i] << " "; 
     421       info << std::endl; 
     422     } 
     423 
    399424     const std::vector<size_t>& globalIndexSendToServer = clientDistribution_->getGlobalDataIndexSendToServer(); 
    400425     std::map<int, std::vector<size_t> >::const_iterator iteGlobalMap, itbGlobalMap, itGlobalMap; 
  • XIOS/trunk/src/test/test_client.f90

    r668 r720  
    1515  CHARACTER(len=15) :: calendar_type 
    1616  TYPE(xios_context) :: ctx_hdl 
    17   INTEGER,PARAMETER :: ni_glo=100 
    18   INTEGER,PARAMETER :: nj_glo=100 
     17  INTEGER,PARAMETER :: ni_glo=10 
     18  INTEGER,PARAMETER :: nj_glo=10 
    1919  INTEGER,PARAMETER :: llm=5 
    2020  DOUBLE PRECISION  :: lval(llm)=1 
Note: See TracChangeset for help on using the changeset viewer.