Changeset 720
- Timestamp:
- 10/06/15 17:17:10 (9 years ago)
- Location:
- XIOS/trunk
- Files:
-
- 3 added
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/inputs/REMAP/iodef.xml
r718 r720 18 18 </file> 19 19 <file id="output_dst" name="output_dst"> 20 <!-- <field id="test"field_ref="dst_field" name="field" />-->20 <!-- <field field_ref="dst_field" name="field" />--> 21 21 </file> 22 22 <file id="output_dst_regular" name="output_dst_regular" type="one_file"> -
XIOS/trunk/inputs/Version2/iodef.xml
r718 r720 19 19 <file_definition type="multiple_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE."> 20 20 <file id="output" name="output" type="one_file"> 21 <!-- <field field_ref="field_A" />-->21 <field field_ref="field_A" /> 22 22 </file> 23 23 <file id="output_Axis" name="output_Axis" type="one_file"> … … 34 34 </file> 35 35 <file id="output_Domain_transformed_interpolated" name="output_Domain_transformed_interpolated"> 36 <field field_ref="field_Domain_transformed_Interpolated" />36 <!-- <field field_ref="field_Domain_transformed_Interpolated" />--> 37 37 </file> 38 38 <file id="output_Scalar" name="output_Scalar" type="one_file"> -
XIOS/trunk/inputs/iodef.xml
r718 r720 12 12 13 13 14 <file_definition type=" multiple_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE.">14 <file_definition type="one_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE."> 15 15 <file id="output" name="output"> 16 <field field_ref="field_A_zoom" name="field_A " />16 <field field_ref="field_A_zoom" name="field_A_zoom" /> 17 17 </file> 18 18 </file_definition> … … 22 22 <axis id="axis_A"/> 23 23 <axis id="axis_A_zoom" axis_ref="axis_A"> 24 <zoom_axis zoom_begin="1" zoom_size=" 2" />24 <zoom_axis zoom_begin="1" zoom_size="1" /> 25 25 </axis> 26 26 </axis_definition> -
XIOS/trunk/src/client_server_mapping_distributed.cpp
r630 r720 12 12 #include <boost/functional/hash.hpp> 13 13 #include "utils.hpp" 14 #include "client_client_dht.hpp" 15 #include "mpi_tag.hpp" 14 16 15 17 namespace xios … … 19 21 const MPI_Comm& clientIntraComm, bool isDataDistributed) 20 22 : CClientServerMapping(), indexClientHash_(), countIndexGlobal_(0), countIndexServer_(0), 21 indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_(), isDataDistributed_(isDataDistributed) 23 indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_(), isDataDistributed_(isDataDistributed), 24 ccDHT_(0) 22 25 { 23 26 clientIntraComm_ = clientIntraComm; … … 25 28 MPI_Comm_rank(clientIntraComm,&clientRank_); 26 29 computeHashIndex(); 27 computeDistributedServerIndex(globalIndexOfServer, clientIntraComm); 30 31 ccDHT_ = new CClientClientDHT(globalIndexOfServer, 32 clientIntraComm, 33 isDataDistributed); 34 // const boost::unordered_map<size_t,int>& globalIndexToServerMappingTmp = clientDht.getGlobalIndexServerMapping(); 35 // globalIndexToServerMapping_ = clientDht.getGlobalIndexServerMapping(); 36 37 38 39 // computeDistributedServerIndex(globalIndexOfServer, clientIntraComm); 28 40 } 29 41 30 42 CClientServerMappingDistributed::~CClientServerMappingDistributed() 31 43 { 44 if (0 != ccDHT_) delete ccDHT_; 32 45 } 33 46 … … 38 51 void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient) 39 52 { 53 ccDHT_->computeServerIndexMapping(globalIndexOnClient); 54 indexGlobalOnServer_ = ccDHT_->getGlobalIndexOnServer(); 55 56 /* 40 57 size_t ssize = globalIndexOnClient.numElements(), hashedIndex; 41 58 … … 177 194 delete [] sendBuff; 178 195 delete [] recvBuff; 196 */ 179 197 } 180 198 … … 348 366 349 367 // Probing for global index 350 MPI_Iprobe(MPI_ANY_SOURCE, 15, clientIntraComm_, &flagIndexGlobal, &statusIndexGlobal);368 MPI_Iprobe(MPI_ANY_SOURCE, MPI_DHT_INDEX_0, clientIntraComm_, &flagIndexGlobal, &statusIndexGlobal); 351 369 if ((true == flagIndexGlobal) && (countIndexGlobal_ < recvNbIndexCount)) 352 370 { … … 354 372 indexGlobalBuffBegin_.insert(std::make_pair<int, unsigned long*>(statusIndexGlobal.MPI_SOURCE, recvIndexGlobalBuff+countIndexGlobal_)); 355 373 MPI_Irecv(recvIndexGlobalBuff+countIndexGlobal_, count, MPI_UNSIGNED_LONG, 356 statusIndexGlobal.MPI_SOURCE, 15, clientIntraComm_,374 statusIndexGlobal.MPI_SOURCE, MPI_DHT_INDEX_0, clientIntraComm_, 357 375 &requestRecvIndexGlobal_[statusIndexGlobal.MPI_SOURCE]); 358 376 countIndexGlobal_ += count; … … 373 391 374 392 // Probing for server index 375 MPI_Iprobe(MPI_ANY_SOURCE, 12, clientIntraComm_, &flagIndexServer, &statusIndexServer);393 MPI_Iprobe(MPI_ANY_SOURCE, MPI_DHT_INFO_0, clientIntraComm_, &flagIndexServer, &statusIndexServer); 376 394 if ((true == flagIndexServer) && (countIndexServer_ < recvNbIndexCount)) 377 395 { … … 379 397 indexServerBuffBegin_.insert(std::make_pair<int, int*>(statusIndexServer.MPI_SOURCE, recvIndexServerBuff+countIndexServer_)); 380 398 MPI_Irecv(recvIndexServerBuff+countIndexServer_, count, MPI_INT, 381 statusIndexServer.MPI_SOURCE, 12, clientIntraComm_,399 statusIndexServer.MPI_SOURCE, MPI_DHT_INFO_0, clientIntraComm_, 382 400 &requestRecvIndexServer_[statusIndexServer.MPI_SOURCE]); 383 401 … … 400 418 requestSendIndexGlobal.push_back(request); 401 419 MPI_Isend(&(indexGlobal)[0], (indexGlobal).size(), MPI_UNSIGNED_LONG, 402 clientDestRank, 15, clientIntraComm, &(requestSendIndexGlobal.back()));420 clientDestRank, MPI_DHT_INDEX_0, clientIntraComm, &(requestSendIndexGlobal.back())); 403 421 } 404 422 … … 417 435 requestSendIndexServer.push_back(request); 418 436 MPI_Isend(&(indexServer)[0], (indexServer).size(), MPI_INT, 419 clientDestRank, 12, clientIntraComm, &(requestSendIndexServer.back()));437 clientDestRank, MPI_DHT_INFO_0, clientIntraComm, &(requestSendIndexServer.back())); 420 438 } 421 439 -
XIOS/trunk/src/client_server_mapping_distributed.hpp
r620 r720 17 17 #include "mpi.hpp" 18 18 #include <boost/unordered_map.hpp> 19 #include "client_client_dht.hpp" 19 20 20 21 namespace xios … … 118 119 //! Flag to specify whether data is distributed or not 119 120 bool isDataDistributed_; 121 122 123 CClientClientDHT* ccDHT_; 120 124 }; 121 125 -
XIOS/trunk/src/node/domain.cpp
r715 r720 272 272 fillInRectilinearLonLat(); 273 273 this->isRedistributed_ = true; 274 info <<"now, we are here " << std::endl;275 info << "domain " << this->getId() << " ni " << ni.getValue() << " nj " << nj.getValue() << std::endl;276 274 } 277 275 } -
XIOS/trunk/src/node/grid.cpp
r718 r720 18 18 #include "grid_transformation.hpp" 19 19 #include "grid_generate.hpp" 20 #include "client_client_dht.hpp" 20 21 21 22 namespace xios { … … 158 159 /*! 159 160 * Test whether the data defined on the grid can be outputted in a compressed way. 160 * 161 * 161 162 * \return true if and only if a mask was defined for this grid 162 163 */ … … 395 396 clientDistribution_->isDataDistributed()); 396 397 398 CClientClientDHT clientDht(serverDistributionDescription.getGlobalIndexRange(), 399 client->intraComm, 400 clientDistribution_->isDataDistributed()); 401 clientDht.computeServerIndexMapping(clientDistribution_->getGlobalIndex()); 402 const std::map<int, std::vector<size_t> >& globalIndexOnServer0 = clientDht.getGlobalIndexOnServer(); 403 404 std::map<int, std::vector<size_t> >::const_iterator itbTmp, itTmp, iteTmp; 405 itbTmp = globalIndexOnServer0.begin(); iteTmp = globalIndexOnServer0.end(); 406 for (itTmp = itbTmp; itTmp != iteTmp; ++itTmp) 407 { 408 const std::vector<size_t>& tmpVec = itTmp->second; info << "tmpVec0. Rank " << itTmp->first << ". Size = " << tmpVec.size() << ". " ; 409 for (int i = 0; i < tmpVec.size(); ++i) info << tmpVec[i] << " "; 410 info << std::endl; 411 } 412 // 397 413 clientServerMap_->computeServerIndexMapping(clientDistribution_->getGlobalIndex()); 398 414 const std::map<int, std::vector<size_t> >& globalIndexOnServer = clientServerMap_->getGlobalIndexOnServer(); 415 416 itbTmp = globalIndexOnServer.begin(); iteTmp = globalIndexOnServer.end(); 417 for (itTmp = itbTmp; itTmp != iteTmp; ++itTmp) 418 { 419 const std::vector<size_t>& tmpVec = itTmp->second; info << "tmpVec1. Rank " << itTmp->first << ". Size = " << tmpVec.size() << ". " ; 420 for (int i = 0; i < tmpVec.size(); ++i) info << tmpVec[i] << " "; 421 info << std::endl; 422 } 423 399 424 const std::vector<size_t>& globalIndexSendToServer = clientDistribution_->getGlobalDataIndexSendToServer(); 400 425 std::map<int, std::vector<size_t> >::const_iterator iteGlobalMap, itbGlobalMap, itGlobalMap; -
XIOS/trunk/src/test/test_client.f90
r668 r720 15 15 CHARACTER(len=15) :: calendar_type 16 16 TYPE(xios_context) :: ctx_hdl 17 INTEGER,PARAMETER :: ni_glo=10 018 INTEGER,PARAMETER :: nj_glo=10 017 INTEGER,PARAMETER :: ni_glo=10 18 INTEGER,PARAMETER :: nj_glo=10 19 19 INTEGER,PARAMETER :: llm=5 20 20 DOUBLE PRECISION :: lval(llm)=1
Note: See TracChangeset
for help on using the changeset viewer.