Changeset 721
- Timestamp:
- 10/06/15 17:17:11 (9 years ago)
- Location:
- XIOS/trunk
- Files:
-
- 4 added
- 1 deleted
- 5 edited
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/inputs/REMAP/iodef.xml
r720 r721 8 8 <field_definition level="1" > 9 9 <field id="src_field" operation="instant" grid_ref="src_grid"/> 10 <field id="dst_field" operation="instant" field_ref="src_field" grid_ref="dst_grid"/> 10 11 <field id="dst_field_regular" operation="instant" field_ref="tmp_field" grid_ref="dst_grid_regular"/> 11 12 <field id="tmp_field" operation="instant" grid_ref="src_grid_regular"/> … … 13 14 14 15 15 <file_definition type=" multiple_file" par_access="collective" output_freq="1ts" output_level="10" enabled=".TRUE.">16 <file_definition type="one_file" par_access="collective" output_freq="1ts" output_level="10" enabled=".TRUE."> 16 17 <file id="output" name="output"> 17 <!-- <field field_ref="src_field" name="field" />-->18 <field field_ref="src_field" name="field" /> 18 19 </file> 19 20 <file id="output_dst" name="output_dst"> 20 <!-- <field field_ref="dst_field" name="field" />-->21 <field field_ref="dst_field" name="field" /> 21 22 </file> 22 23 <file id="output_dst_regular" name="output_dst_regular" type="one_file"> 23 <field field_ref="dst_field_regular" name="field" />24 <!-- <field field_ref="dst_field_regular" name="field" />--> 24 25 </file> 25 26 26 27 <file id="output_src_regular" name="output_src_regular" mode="read" type="multiple_file"> 27 <field id="src_field_regular" name="field" grid_ref="src_grid_regular" operation="instant"/>28 <!-- <field id="src_field_regular" name="field" grid_ref="src_grid_regular" operation="instant"/>--> 28 29 </file> 29 30 <file id="output_regular" name="output_regular" mode="write"> -
XIOS/trunk/src/client_client_dht_template_impl.hpp
r720 r721 1 1 /*! 2 \file client_client_dht .cpp2 \file client_client_dht_template_impl.hpp 3 3 \author Ha NGUYEN 4 \since 15 Sep20155 \date 15 Sep20154 \since 05 Oct 2015 5 \date 05 Oct 2015 6 6 7 7 \brief Distributed hashed table implementation. 8 8 */ 9 #include "client_client_dht.hpp" 10 #include <limits> 11 #include <cmath> 12 #include <boost/functional/hash.hpp> 9 #include "client_client_dht_template.hpp" 13 10 #include "utils.hpp" 14 11 #include "mpi_tag.hpp" … … 16 13 namespace xios 17 14 { 18 19 CClientClientDHT::CClientClientDHT(const boost::unordered_map<size_t,int>& indexInfoMap, 20 const MPI_Comm& clientIntraComm, bool isDataDistributed, int hierarLvl) 21 : intraCommRoot_(clientIntraComm), commLevel_(), isDataDistributed_(isDataDistributed), 22 nbLevel_(hierarLvl), globalIndexToServerMapping_(), globalIndexToInfoMappingLevel_() 23 { 24 computeMPICommLevel(clientIntraComm); 25 int lvl = commLevel_.size() - 1; 26 computeDistributedIndex(indexInfoMap, commLevel_[lvl], lvl); 27 } 28 29 CClientClientDHT::~CClientClientDHT() 30 { 31 } 32 33 /*! 34 Calculate MPI communicator for each level of hierarchy. 35 \param[in] mpiCommRoot MPI communicator of the level 0 (usually communicator of all clients) 36 */ 37 void CClientClientDHT::computeMPICommLevel(const MPI_Comm& mpiCommRoot) 38 { 39 int nbProc; 40 MPI_Comm_size(mpiCommRoot,&nbProc); 41 if (nbLevel_ > nbProc) nbLevel_ = std::log(nbProc); 42 else if (1 > nbLevel_) nbLevel_ = 1; 43 44 commLevel_.push_back(mpiCommRoot); 45 divideMPICommLevel(mpiCommRoot, nbLevel_); 46 } 47 48 /*! 49 Divide each MPI communicator into sub-communicator. Recursive function 50 \param [in] mpiCommLevel MPI communicator of current level 51 \param [in] level current level 52 */ 53 void CClientClientDHT::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int level) 54 { 55 int clientRank; 56 MPI_Comm_rank(mpiCommLevel,&clientRank); 57 58 --level; 59 if (0 < level) 60 { 61 int color = clientRank % 2; 62 commLevel_.push_back(MPI_Comm()); 63 MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back())); 64 divideMPICommLevel(commLevel_.back(), level); 65 } 15 /*! 16 Constructor with initial distribution information and the corresponding index 17 Each client (process) holds a piece of information as well as the attached index, the index 18 will be redistributed (projected) into size_t space as long as the associated information. 19 \param [in] indexInfoMap initial index and information mapping 20 \param [in] clientIntraComm communicator of clients 21 \param [in] hierarLvl level of hierarchy 22 */ 23 template<typename T, typename H> 24 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const boost::unordered_map<size_t,T>& indexInfoMap, 25 const MPI_Comm& clientIntraComm, 26 int hierarLvl) 27 : index2InfoMapping_(), indexToInfoMappingLevel_() 28 { 29 this->computeMPICommLevel(clientIntraComm, hierarLvl); 30 int lvl = this->commLevel_.size() - 1; 31 computeDistributedIndex(indexInfoMap, this->commLevel_[lvl], lvl); 32 } 33 34 template<typename T, typename H> 35 CClientClientDHTTemplate<T,H>::~CClientClientDHTTemplate() 36 { 66 37 } 67 38 … … 70 41 \param [in] indices indices a proc has 71 42 */ 72 void CClientClientDHT::computeServerIndexMapping(const CArray<size_t,1>& indices) 73 { 74 int lvl = commLevel_.size() - 1; 75 computeIndexMapping(indices, commLevel_[lvl], lvl); 43 template<typename T, typename H> 44 void CClientClientDHTTemplate<T,H>::computeIndexInfoMapping(const CArray<size_t,1>& indices) 45 { 46 int lvl = this->commLevel_.size() - 1; 47 computeIndexInfoMappingLevel(indices, this->commLevel_[lvl], lvl); 76 48 size_t size = indices.numElements(); 77 49 for (size_t idx = 0; idx < size; ++idx) 78 50 { 79 int serverIdx = globalIndexToInfoMappingLevel_[indices(idx)];51 int serverIdx = indexToInfoMappingLevel_[indices(idx)]; 80 52 indexGlobalOnServer_[serverIdx].push_back(indices(idx)); 81 53 } … … 89 61 \param [in] level current level 90 62 */ 91 void CClientClientDHT::computeIndexMapping(const CArray<size_t,1>& indices, 92 const MPI_Comm& commLevel, 93 int level) 63 template<typename T, typename H> 64 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 65 const MPI_Comm& commLevel, 66 int level) 94 67 { 95 68 int nbClient, clientRank; … … 105 78 std::map<int, std::vector<size_t> > client2ClientIndex; 106 79 107 // Number of global index whose mapping server can be found out thanks to index-server mapping108 int nbIndexAlreadyOnClient = 0;109 110 80 // Number of global index whose mapping server are on other clients 111 int nbIndex SendToOthers= 0;81 int nbIndexToSend = 0; 112 82 HashXIOS<size_t> hashGlobalIndex; 113 83 for (int i = 0; i < ssize; ++i) … … 121 91 { 122 92 client2ClientIndex[indexClient].push_back(index); 123 ++nbIndex SendToOthers;93 ++nbIndexToSend; 124 94 } 125 95 } 126 96 } 127 info << "level " << level << " nbIndexsendtoOther " << nbIndexSendToOthers << std::endl;128 97 129 98 int* sendBuff = new int[nbClient]; … … 135 104 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel); 136 105 137 std::list<MPI_Request> send Request;138 if (0 != nbIndex SendToOthers)106 std::list<MPI_Request> sendIndexRequest; 107 if (0 != nbIndexToSend) 139 108 for (it = itb; it != ite; ++it) 140 sendIndexToClients(it->first, it->second, commLevel, send Request);141 142 int nbDemandingClient = recvBuff[clientRank], nb IndexServerReceived = 0;109 sendIndexToClients(it->first, it->second, commLevel, sendIndexRequest); 110 111 int nbDemandingClient = recvBuff[clientRank], nbSendBuffInfoReceived = 0; 143 112 144 113 // Receiving demand as well as the responds from other clients 145 114 // The demand message contains global index; meanwhile the responds have server index information 146 115 // Buffer to receive demand from other clients, it can be allocated or not depending whether it has demand(s) 147 116 // There are some cases we demand duplicate index so need to determine maxium size of demanding buffer 148 117 for (it = itb; it != ite; ++it) sendBuff[it->first] = (it->second).size(); 149 118 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel); … … 151 120 unsigned long* recvBuffIndex = 0; 152 121 int maxNbIndexDemandedFromOthers = recvBuff[clientRank]; 153 // if (!isDataDistributed_) maxNbIndexDemandedFromOthers = nbDemandingClient * nbIndexSendToOthers; //globalIndexToServerMapping_.size(); // Not very optimal but it's general154 122 155 123 if (0 != maxNbIndexDemandedFromOthers) … … 157 125 158 126 // Buffer to receive respond from other clients, it can be allocated or not depending whether it demands other clients 159 int* recvBuffInfo = 0;160 int nbIndexReceivedFromOthers = nbIndex SendToOthers;127 InfoType* recvBuffInfo = 0; 128 int nbIndexReceivedFromOthers = nbIndexToSend; 161 129 if (0 != nbIndexReceivedFromOthers) 162 recvBuffInfo = new int[nbIndexReceivedFromOthers];130 recvBuffInfo = new InfoType[nbIndexReceivedFromOthers]; 163 131 164 132 std::map<int, MPI_Request>::iterator itRequest; 165 133 std::vector<int> demandAlreadyReceived, repondAlreadyReceived; 166 134 167 // Counting of buffer for receiving global index 168 int countIndex = 0; 169 170 // Request returned by MPI_IRecv function about global index 171 std::map<int, MPI_Request> requestRecvIndex; 172 173 // Mapping client rank and the beginning position of receiving buffer for message of global index from this client 135 136 int countIndex = 0; // Counting of buffer for receiving index 137 std::map<int, MPI_Request> requestRecvIndex; // Request returned by MPI_IRecv function about index 138 139 // Mapping client rank and the beginning position of receiving buffer for message of index from this client 174 140 std::map<int, unsigned long*> indexBuffBegin; 175 141 … … 177 143 178 144 CArray<size_t,1> tmpGlobalIndexOnClient(maxNbIndexDemandedFromOthers); 145 179 146 int k = 0; 180 181 while ((0 < nbDemandingClient) || (!sendRequest.empty())) 147 while ((0 < nbDemandingClient) || (!sendIndexRequest.empty())) 182 148 { 183 149 // Just check whether a client has any demand from other clients. … … 215 181 } 216 182 217 testSendRequest(send Request);183 testSendRequest(sendIndexRequest); 218 184 } 219 185 … … 221 187 { 222 188 --level; 223 computeIndex Mapping(tmpGlobalIndexOnClient,commLevel_[level], level);189 computeIndexInfoMappingLevel(tmpGlobalIndexOnClient, this->commLevel_[level], level); 224 190 } 225 191 else 226 globalIndexToInfoMappingLevel_ = globalIndexToServerMapping_;227 228 std::map<int, std::vector< int> > client2ClientInfo;192 indexToInfoMappingLevel_ = index2InfoMapping_; 193 194 std::map<int, std::vector<InfoType> > client2ClientInfo; 229 195 std::list<MPI_Request> sendInfoRequest; 230 196 std::map<int, std::vector<size_t> >::iterator itbSrc2Idx = src2Index.begin(), itSrc2Idx, … … 236 202 for (int idx = 0; idx < srcIdx.size(); ++idx) 237 203 { 238 // client2ClientInfo[clientSourceRank].push_back(globalIndexToServerMapping_[srcIdx[idx]]); 239 client2ClientInfo[clientSourceRank].push_back(globalIndexToInfoMappingLevel_[srcIdx[idx]]); 204 client2ClientInfo[clientSourceRank].push_back(indexToInfoMappingLevel_[srcIdx[idx]]); 240 205 } 241 206 sendInfoToClients(clientSourceRank, client2ClientInfo[clientSourceRank], commLevel, sendInfoRequest); 242 207 } 243 208 244 boost::unordered_map<size_t,int> indexToInfoMapping; 245 246 // Counting of buffer for receiving server index 247 int countInfo = 0; 248 209 boost::unordered_map<size_t,InfoType> indexToInfoMapping; 210 int countInfo = 0; // Counting of buffer for receiving server index 249 211 std::map<int, MPI_Request> requestRecvInfo; 250 212 251 213 // Mapping client rank and the begining position of receiving buffer for message of server index from this client 252 std::map<int, int*> infoBuffBegin;253 254 while ((!sendInfoRequest.empty()) || (nb IndexServerReceived < nbIndexReceivedFromOthers))214 std::map<int, InfoType*> infoBuffBegin; 215 216 while ((!sendInfoRequest.empty()) || (nbSendBuffInfoReceived < nbIndexReceivedFromOthers)) 255 217 { 256 218 testSendRequest(sendInfoRequest); … … 273 235 MPI_Get_count(&statusInfo, MPI_INT, &count); 274 236 int clientSourceRank = statusInfo.MPI_SOURCE; 275 int* beginBuff = infoBuffBegin[clientSourceRank];276 std::vector<size_t>& globalIndexTmp = client2ClientIndex[clientSourceRank];237 InfoType* beginBuff = infoBuffBegin[clientSourceRank]; 238 std::vector<size_t>& indexTmp = client2ClientIndex[clientSourceRank]; 277 239 for (int i = 0; i < count; ++i) 278 240 { 279 indexToInfoMapping[globalIndexTmp[i]] = *(beginBuff+i); 280 // globalIndexToServerMapping_[globalIndexTmp[i]] = *(beginBuff+i); 241 indexToInfoMapping[indexTmp[i]] = *(beginBuff+i); 281 242 } 282 nb IndexServerReceived += count;243 nbSendBuffInfoReceived += count; 283 244 repondAlreadyReceived.push_back(clientSourceRank); 284 245 } … … 290 251 } 291 252 292 globalIndexToInfoMappingLevel_ = indexToInfoMapping; 293 info << "temp " << tmpGlobalIndexOnClient << std::endl; 253 indexToInfoMappingLevel_ = indexToInfoMapping; 294 254 if (0 != maxNbIndexDemandedFromOthers) delete [] recvBuffIndex; 295 255 if (0 != nbIndexReceivedFromOthers) delete [] recvBuffInfo; … … 301 261 Compute the hash index distribution of whole size_t space then each client will have a range of this distribution 302 262 */ 303 void CClientClientDHT::computeHashIndex(std::vector<size_t>& hashedIndex, int nbClient) 263 template<typename T, typename H> 264 void CClientClientDHTTemplate<T,H>::computeHashIndex(std::vector<size_t>& hashedIndex, int nbClient) 304 265 { 305 266 // Compute range of hash index for each client … … 326 287 \param [in] level current level 327 288 */ 328 void CClientClientDHT::computeDistributedIndex(const boost::unordered_map<size_t,int>& indexInfoMap, 329 const MPI_Comm& commLevel, 330 int level) 289 template<typename T, typename H> 290 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const boost::unordered_map<size_t,T>& indexInfoMap, 291 const MPI_Comm& commLevel, 292 int level) 331 293 { 332 294 int nbClient, clientRank; … … 345 307 // Compute size of sending and receving buffer 346 308 std::map<int, std::vector<size_t> > client2ClientIndex; 347 std::map<int, std::vector< int> > client2ClientInfo;309 std::map<int, std::vector<InfoType> > client2ClientInfo; 348 310 349 311 std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 350 312 iteClientHash = hashedIndex.end(); 351 boost::unordered_map<size_t,int>::const_iterator it = indexInfoMap.begin(),352 ite = indexInfoMap.end();313 typename boost::unordered_map<size_t,InfoType>::const_iterator it = indexInfoMap.begin(), 314 ite = indexInfoMap.end(); 353 315 HashXIOS<size_t> hashGlobalIndex; 354 316 for (; it != ite; ++it) … … 378 340 int recvNbIndexCount = recvNbIndexBuff[clientRank]; 379 341 unsigned long* recvIndexBuff = new unsigned long[recvNbIndexCount]; 380 int* recvInfoBuff = new int[recvNbIndexCount];342 InfoType* recvInfoBuff = new InfoType[recvNbIndexCount]; 381 343 382 344 // If a client holds information about index and the corresponding which don't belong to it, … … 388 350 for (; itIndex != iteIndex; ++itIndex) 389 351 sendIndexToClients(itIndex->first, itIndex->second, commLevel, sendRequest); 390 std::map<int, std::vector<int> >::iterator itInfo = client2ClientInfo.begin(),391 iteInfo = client2ClientInfo.end();352 typename std::map<int, std::vector<InfoType> >::iterator itInfo = client2ClientInfo.begin(), 353 iteInfo = client2ClientInfo.end(); 392 354 for (; itInfo != iteInfo; ++itInfo) 393 355 sendInfoToClients(itInfo->first, itInfo->second, commLevel, sendRequest); … … 414 376 std::map<int, int*> infoBuffBegin; 415 377 416 boost::unordered_map<size_t, int> indexToInfoMapping;378 boost::unordered_map<size_t,InfoType> indexToInfoMapping; 417 379 418 380 // Now each client trys to listen to demand from others. … … 422 384 testSendRequest(sendRequest); 423 385 probeIndexMessageFromClients(recvIndexBuff, recvNbIndexCount, 424 425 386 countIndex, indexBuffBegin, 387 requestRecvIndex, commLevel); 426 388 // Processing complete request 427 389 for (itRequestIndex = requestRecvIndex.begin(); … … 430 392 { 431 393 int rank = itRequestIndex->first; 432 int count = computeBuffCountIndex Global(itRequestIndex->second);394 int count = computeBuffCountIndex(itRequestIndex->second); 433 395 if (0 != count) 434 396 countBuffIndex[rank] = count; … … 436 398 437 399 probeInfoMessageFromClients(recvInfoBuff, recvNbIndexCount, 438 439 400 countInfo, infoBuffBegin, 401 requestRecvInfo, commLevel); 440 402 for (itRequestInfo = requestRecvInfo.begin(); 441 403 itRequestInfo != requestRecvInfo.end(); … … 443 405 { 444 406 int rank = itRequestInfo->first; 445 int count = computeBuffCountIn dexServer(itRequestInfo->second);407 int count = computeBuffCountInfo(itRequestInfo->second); 446 408 if (0 != count) 447 409 countBuffInfo[rank] = count; … … 457 419 int count = it->second; 458 420 for (int i = 0; i < count; ++i) 459 indexToInfoMapping.insert(std::make_pair<size_t, int>(*(indexBuffBegin[rank]+i),*(infoBuffBegin[rank]+i)));421 indexToInfoMapping.insert(std::make_pair<size_t,InfoType>(*(indexBuffBegin[rank]+i),*(infoBuffBegin[rank]+i))); 460 422 processedList.push_back(rank); 461 423 --recvNbClient; … … 485 447 { 486 448 --level; 487 computeDistributedIndex(indexToInfoMapping, commLevel_[level], level);449 computeDistributedIndex(indexToInfoMapping, this->commLevel_[level], level); 488 450 } 489 451 else 490 globalIndexToServerMapping_ = indexToInfoMapping;452 index2InfoMapping_ = indexToInfoMapping; 491 453 } 492 454 … … 502 464 \param [in] intraComm communicator 503 465 */ 504 void CClientClientDHT::probeIndexMessageFromClients(unsigned long* recvIndexBuff, 466 template<typename T, typename H> 467 void CClientClientDHTTemplate<T,H>::probeIndexMessageFromClients(unsigned long* recvIndexBuff, 505 468 const int recvNbIndexCount, 506 469 int& countIndex, … … 536 499 \param [in] intraComm communicator 537 500 */ 538 void CClientClientDHT::probeInfoMessageFromClients(int* recvInfoBuff, 501 template<typename T, typename H> 502 void CClientClientDHTTemplate<T,H>::probeInfoMessageFromClients(T* recvInfoBuff, 539 503 const int recvNbIndexCount, 540 504 int& countInfo, 541 std::map<int, int*>& infoBuffBegin,505 std::map<int, T*>& infoBuffBegin, 542 506 std::map<int, MPI_Request>& requestRecvInfo, 543 507 const MPI_Comm& intraComm) … … 550 514 if ((true == flagInfo) && (countInfo < recvNbIndexCount)) 551 515 { 552 MPI_Get_count(&statusInfo, MPI_ INT, &count);553 infoBuffBegin.insert(std::make_pair<int, int*>(statusInfo.MPI_SOURCE, recvInfoBuff+countInfo));554 MPI_Irecv(recvInfoBuff+countInfo, count, MPI_ INT,516 MPI_Get_count(&statusInfo, MPI_CHAR, &count); 517 infoBuffBegin.insert(std::make_pair<int, T*>(statusInfo.MPI_SOURCE, recvInfoBuff+countInfo)); 518 MPI_Irecv(recvInfoBuff+countInfo, count, MPI_CHAR, 555 519 statusInfo.MPI_SOURCE, MPI_DHT_INFO, intraComm, 556 520 &requestRecvInfo[statusInfo.MPI_SOURCE]); 557 521 558 countInfo += count ;522 countInfo += count/infoTypeSize; 559 523 } 560 524 } … … 567 531 \param [in] requestSendIndex list of sending request 568 532 */ 569 void CClientClientDHT::sendIndexToClients(int clientDestRank, std::vector<size_t>& indices, 533 template<typename T, typename H> 534 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, std::vector<size_t>& indices, 570 535 const MPI_Comm& clientIntraComm, 571 536 std::list<MPI_Request>& requestSendIndex) … … 584 549 \param [in] requestSendInfo list of sending request 585 550 */ 586 void CClientClientDHT::sendInfoToClients(int clientDestRank, std::vector<int>& info, 551 template<typename T, typename H> 552 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, std::vector<T>& info, 587 553 const MPI_Comm& clientIntraComm, 588 554 std::list<MPI_Request>& requestSendInfo) … … 590 556 MPI_Request request; 591 557 requestSendInfo.push_back(request); 592 MPI_Isend(&(info)[0], (info).size(), MPI_INT, 558 559 MPI_Isend(&(info)[0], info.size() * infoTypeSize, MPI_CHAR, 593 560 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 594 561 } … … 598 565 \param [in] sendRequest sending request to verify 599 566 */ 600 void CClientClientDHT::testSendRequest(std::list<MPI_Request>& sendRequest) 567 template<typename T, typename H> 568 void CClientClientDHTTemplate<T,H>::testSendRequest(std::list<MPI_Request>& sendRequest) 601 569 { 602 570 int flag = 0; … … 623 591 624 592 /*! 625 Process the received request. Pushing global index and server index into map626 \param[in] buffIndexGlobal pointer to the begining of buffer containing global index627 \param[in] buffIndexServer pointer to the begining of buffer containing server index628 \param[in] count size of received message629 */630 //void CClientClientDHT::processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count)631 //{632 // for (int i = 0; i < count; ++i)633 // globalIndexToServerMapping_.insert(std::make_pair<size_t,int>(*(buffIndexGlobal+i),*(buffIndexServer+i)));634 //}635 636 /*!637 593 Compute size of message containing global index 638 594 \param[in] requestRecv request of message 639 595 */ 640 int CClientClientDHT::computeBuffCountIndexGlobal(MPI_Request& requestRecv) 596 template<typename T, typename H> 597 int CClientClientDHTTemplate<T,H>::computeBuffCountIndex(MPI_Request& requestRecv) 641 598 { 642 599 int flag, count = 0; … … 656 613 \param[in] requestRecv request of message 657 614 */ 658 int CClientClientDHT::computeBuffCountIndexServer(MPI_Request& requestRecv) 615 template<typename T, typename H> 616 int CClientClientDHTTemplate<T,H>::computeBuffCountInfo(MPI_Request& requestRecv) 659 617 { 660 618 int flag, count = 0; … … 664 622 if (true == flag) 665 623 { 666 MPI_Get_count(&status, MPI_ INT, &count);667 } 668 669 return count;670 } 671 672 } 624 MPI_Get_count(&status, MPI_CHAR, &count); 625 } 626 627 return (count/infoTypeSize); 628 } 629 630 } -
XIOS/trunk/src/client_server_mapping_distributed.cpp
r720 r721 3 3 \author Ha NGUYEN 4 4 \since 27 Feb 2015 5 \date 0 9 Mars20155 \date 06 Oct 2015 6 6 7 7 \brief Mapping between index client and server. … … 12 12 #include <boost/functional/hash.hpp> 13 13 #include "utils.hpp" 14 #include "client_client_dht.hpp"15 14 #include "mpi_tag.hpp" 16 15 … … 29 28 computeHashIndex(); 30 29 31 ccDHT_ = new CClientClientDHT(globalIndexOfServer, 32 clientIntraComm, 33 isDataDistributed); 34 // const boost::unordered_map<size_t,int>& globalIndexToServerMappingTmp = clientDht.getGlobalIndexServerMapping(); 35 // globalIndexToServerMapping_ = clientDht.getGlobalIndexServerMapping(); 36 37 30 ccDHT_ = new CClientClientDHTInt(globalIndexOfServer, 31 clientIntraComm, 32 isDataDistributed); 38 33 39 34 // computeDistributedServerIndex(globalIndexOfServer, clientIntraComm); … … 51 46 void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient) 52 47 { 53 ccDHT_->compute ServerIndexMapping(globalIndexOnClient);54 indexGlobalOnServer_ = ccDHT_->getGlobalIndexOnServer();48 ccDHT_->computeIndexInfoMapping(globalIndexOnClient); 49 indexGlobalOnServer_ = (ccDHT_->getInfoIndexMap()); 55 50 56 51 /* -
XIOS/trunk/src/client_server_mapping_distributed.hpp
r720 r721 17 17 #include "mpi.hpp" 18 18 #include <boost/unordered_map.hpp> 19 #include "client_client_dht .hpp"19 #include "client_client_dht_template.hpp" 20 20 21 21 namespace xios … … 120 120 bool isDataDistributed_; 121 121 122 123 CClientClientDHT * ccDHT_;122 // CClientClientDHTTemplate<int>* ccDHT_; 123 CClientClientDHTInt* ccDHT_; 124 124 }; 125 125 -
XIOS/trunk/src/node/grid.cpp
r720 r721 18 18 #include "grid_transformation.hpp" 19 19 #include "grid_generate.hpp" 20 #include "client_client_dht.hpp"21 20 22 21 namespace xios { … … 396 395 clientDistribution_->isDataDistributed()); 397 396 398 CClientClientDHT clientDht(serverDistributionDescription.getGlobalIndexRange(),399 client->intraComm,400 clientDistribution_->isDataDistributed());401 clientDht.computeServerIndexMapping(clientDistribution_->getGlobalIndex());402 const std::map<int, std::vector<size_t> >& globalIndexOnServer0 = clientDht.getGlobalIndexOnServer();403 404 std::map<int, std::vector<size_t> >::const_iterator itbTmp, itTmp, iteTmp;405 itbTmp = globalIndexOnServer0.begin(); iteTmp = globalIndexOnServer0.end();406 for (itTmp = itbTmp; itTmp != iteTmp; ++itTmp)407 {408 const std::vector<size_t>& tmpVec = itTmp->second; info << "tmpVec0. Rank " << itTmp->first << ". Size = " << tmpVec.size() << ". " ;409 for (int i = 0; i < tmpVec.size(); ++i) info << tmpVec[i] << " ";410 info << std::endl;411 }412 //413 397 clientServerMap_->computeServerIndexMapping(clientDistribution_->getGlobalIndex()); 414 398 const std::map<int, std::vector<size_t> >& globalIndexOnServer = clientServerMap_->getGlobalIndexOnServer(); 415 416 itbTmp = globalIndexOnServer.begin(); iteTmp = globalIndexOnServer.end();417 for (itTmp = itbTmp; itTmp != iteTmp; ++itTmp)418 {419 const std::vector<size_t>& tmpVec = itTmp->second; info << "tmpVec1. Rank " << itTmp->first << ". Size = " << tmpVec.size() << ". " ;420 for (int i = 0; i < tmpVec.size(); ++i) info << tmpVec[i] << " ";421 info << std::endl;422 }423 399 424 400 const std::vector<size_t>& globalIndexSendToServer = clientDistribution_->getGlobalDataIndexSendToServer(); -
XIOS/trunk/src/test/test_remap.f90
r715 r721 129 129 130 130 DO ts=1,1 131 CALL xios_recv_field("src_field_regular", tmp_field)131 ! CALL xios_recv_field("src_field_regular", tmp_field) 132 132 CALL xios_update_calendar(ts) 133 !CALL xios_send_field("src_field",src_field)134 CALL xios_send_field("tmp_field",tmp_field)133 CALL xios_send_field("src_field",src_field) 134 ! CALL xios_send_field("tmp_field",tmp_field) 135 135 CALL wait_us(5000) ; 136 136 ENDDO
Note: See TracChangeset
for help on using the changeset viewer.