Changeset 860
- Timestamp:
- 06/09/16 11:32:01 (9 years ago)
- Location:
- XIOS/trunk/src
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/client_client_dht_template.hpp
r843 r860 36 36 typedef typename boost::unordered_map<InfoType, std::vector<size_t> > InfoType2IndexMap; 37 37 typedef typename boost::unordered_map<size_t,InfoType> Index2InfoTypeMap; 38 typedef typename boost::unordered_map<size_t,std::vector<InfoType> > Index2VectorInfoTypeMap; 38 39 39 40 public: … … 41 42 const MPI_Comm& clientIntraComm); 42 43 44 CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 45 const MPI_Comm& clientIntraComm); 46 43 47 void computeIndexInfoMapping(const CArray<size_t,1>& indices); 44 48 45 const Index2InfoTypeMap& getInfoIndexMap() const {return indexToInfoMappingLevel_; } 49 // const Index2InfoTypeMap& getInfoIndexMap() const {return indexToInfoMappingLevel_; } 50 const Index2VectorInfoTypeMap& getInfoIndexMap() const {return indexToInfoMappingLevel_; } 46 51 47 52 int getNbClient() { return nbClient_; } … … 55 60 const MPI_Comm& intraCommLevel, 56 61 int level); 62 63 void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 64 const MPI_Comm& intraCommLevel, 65 int level); 66 57 67 58 68 void computeHashIndex(std::vector<size_t>& indexClientHash, int nbClient); … … 92 102 protected: 93 103 //! Mapping of global index to the corresponding client 94 Index2InfoTypeMap index2InfoMapping_; 104 // Index2InfoTypeMap index2InfoMapping_; 105 Index2VectorInfoTypeMap index2InfoMapping_; 95 106 96 107 //! A mapping of index to the corresponding information in each level of hierarchy 97 Index2InfoTypeMap indexToInfoMappingLevel_; 108 // Index2InfoTypeMap indexToInfoMappingLevel_; 109 Index2VectorInfoTypeMap indexToInfoMappingLevel_; 98 110 99 111 std::vector<std::vector<int> > sendRank_; -
XIOS/trunk/src/client_client_dht_template_impl.hpp
r843 r860 22 22 */ 23 23 template<typename T, typename H> 24 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const boost::unordered_map<size_t,T>& indexInfoMap, 24 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 25 const MPI_Comm& clientIntraComm) 26 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 27 { 28 MPI_Comm_size(clientIntraComm, &nbClient_); 29 this->computeMPICommLevel(); 30 int nbLvl = this->getNbLevel(); 31 sendRank_.resize(nbLvl); 32 recvRank_.resize(nbLvl); 33 Index2VectorInfoTypeMap indexToVecInfoMap; 34 indexToVecInfoMap.rehash(std::ceil(indexInfoMap.size()/indexToVecInfoMap.max_load_factor())); 35 typename Index2InfoTypeMap::const_iterator it = indexInfoMap.begin(), ite = indexInfoMap.end(); 36 for (; it != ite; ++it) indexToVecInfoMap[it->first].push_back(it->second); 37 computeDistributedIndex(indexToVecInfoMap, clientIntraComm, nbLvl-1); 38 } 39 40 /*! 41 Constructor with initial distribution information and the corresponding index 42 Each client (process) holds a piece of information as well as the attached index, the index 43 will be redistributed (projected) into size_t space as long as the associated information. 44 \param [in] indexInfoMap initial index and information mapping 45 \param [in] clientIntraComm communicator of clients 46 \param [in] hierarLvl level of hierarchy 47 */ 48 template<typename T, typename H> 49 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 25 50 const MPI_Comm& clientIntraComm) 26 51 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) … … 164 189 indexToInfoMappingLevel_= (index2InfoMapping_); 165 190 166 typename Index2 InfoTypeMap::const_iterator iteIndexToInfoMap = indexToInfoMappingLevel_.end(), itIndexToInfoMap;191 typename Index2VectorInfoTypeMap::const_iterator iteIndexToInfoMap = indexToInfoMappingLevel_.end(), itIndexToInfoMap; 167 192 std::vector<int> sendNbIndexOnReturn(nbRecvClient,0); 168 193 currentIndex = 0; … … 172 197 { 173 198 itIndexToInfoMap = indexToInfoMappingLevel_.find(*(recvIndexBuff+currentIndex+i)); 174 if (iteIndexToInfoMap != itIndexToInfoMap) ++sendNbIndexOnReturn[idx]; 199 if (iteIndexToInfoMap != itIndexToInfoMap) 200 sendNbIndexOnReturn[idx] += itIndexToInfoMap->second.size(); 201 // ++sendNbIndexOnReturn[idx]; 175 202 } 176 203 currentIndex += recvNbIndexClientCount[idx]; … … 232 259 if (iteIndexToInfoMap != itIndexToInfoMap) 233 260 { 234 client2ClientIndexOnReturn[rank][nb] = itIndexToInfoMap->first; 235 ProcessDHTElement<InfoType>::packElement(itIndexToInfoMap->second, tmpInfoPtr, infoIndex); 236 ++nb; 261 const std::vector<InfoType>& infoTmp = itIndexToInfoMap->second; 262 for (int k = 0; k < infoTmp.size(); ++k) 263 { 264 client2ClientIndexOnReturn[rank][nb] = itIndexToInfoMap->first; 265 ProcessDHTElement<InfoType>::packElement(infoTmp[k], tmpInfoPtr, infoIndex); 266 ++nb; 267 } 237 268 } 238 269 } … … 249 280 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 250 281 251 boost::unordered_map<size_t,InfoType>indexToInfoMapping;282 Index2VectorInfoTypeMap indexToInfoMapping; 252 283 indexToInfoMapping.rehash(std::ceil(recvNbIndexCountOnReturn/indexToInfoMapping.max_load_factor())); 253 284 int infoIndex = 0; 285 InfoType unpackedInfo; 254 286 for (int idx = 0; idx < recvNbIndexCountOnReturn; ++idx) 255 287 { 256 ProcessDHTElement<InfoType>::unpackElement(indexToInfoMapping[recvIndexBuffOnReturn[idx]], recvInfoBuffOnReturn, infoIndex); 288 ProcessDHTElement<InfoType>::unpackElement(unpackedInfo, recvInfoBuffOnReturn, infoIndex); 289 indexToInfoMapping[recvIndexBuffOnReturn[idx]].push_back(unpackedInfo); 290 // ProcessDHTElement<InfoType>::unpackElement(indexToInfoMapping[recvIndexBuffOnReturn[idx]], recvInfoBuffOnReturn, infoIndex); 257 291 } 258 292 … … 278 312 delete [] it->second; 279 313 } 314 315 ///*! 316 // Compute mapping between indices and information corresponding to these indices 317 //for each level of hierarchical DHT. Recursive function 318 // \param [in] indices indices a proc has 319 // \param [in] commLevel communicator of current level 320 // \param [in] level current level 321 //*/ 322 //template<typename T, typename H> 323 //void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 324 // const MPI_Comm& commLevel, 325 // int level) 326 //{ 327 // int clientRank; 328 // MPI_Comm_rank(commLevel,&clientRank); 329 // int groupRankBegin = this->getGroupBegin()[level]; 330 // int nbClient = this->getNbInGroup()[level]; 331 // std::vector<size_t> hashedIndex; 332 // computeHashIndex(hashedIndex, nbClient); 333 // 334 // size_t ssize = indices.numElements(), hashedVal; 335 // 336 // std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 337 // iteClientHash = hashedIndex.end(); 338 // std::vector<int> sendBuff(nbClient,0); 339 // std::vector<int> sendNbIndexBuff(nbClient,0); 340 // 341 // // Number of global index whose mapping server are on other clients 342 // int nbIndexToSend = 0; 343 // size_t index; 344 // HashXIOS<size_t> hashGlobalIndex; 345 // for (int i = 0; i < ssize; ++i) 346 // { 347 // index = indices(i); 348 // hashedVal = hashGlobalIndex(index); 349 // itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedVal); 350 // int indexClient = std::distance(itbClientHash, itClientHash)-1; 351 // ++sendNbIndexBuff[indexClient]; 352 // } 353 // 354 // boost::unordered_map<int, size_t* > client2ClientIndex; 355 // for (int idx = 0; idx < nbClient; ++idx) 356 // { 357 // if (0 != sendNbIndexBuff[idx]) 358 // { 359 // client2ClientIndex[idx+groupRankBegin] = new unsigned long [sendNbIndexBuff[idx]]; 360 // nbIndexToSend += sendNbIndexBuff[idx]; 361 // sendBuff[idx] = 1; 362 // sendNbIndexBuff[idx] = 0; 363 // } 364 // } 365 // 366 // for (int i = 0; i < ssize; ++i) 367 // { 368 // index = indices(i); 369 // hashedVal = hashGlobalIndex(index); 370 // itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedVal); 371 // { 372 // int indexClient = std::distance(itbClientHash, itClientHash)-1; 373 // { 374 // client2ClientIndex[indexClient+groupRankBegin][sendNbIndexBuff[indexClient]] = index; 375 // ++sendNbIndexBuff[indexClient]; 376 // } 377 // } 378 // } 379 // 380 // std::vector<int> recvRankClient, recvNbIndexClientCount; 381 // sendRecvRank(level, sendBuff, sendNbIndexBuff, 382 // recvRankClient, recvNbIndexClientCount); 383 // 384 // int recvNbIndexCount = 0; 385 // for (int idx = 0; idx < recvNbIndexClientCount.size(); ++idx) 386 // recvNbIndexCount += recvNbIndexClientCount[idx]; 387 // 388 // unsigned long* recvIndexBuff; 389 // if (0 != recvNbIndexCount) 390 // recvIndexBuff = new unsigned long[recvNbIndexCount]; 391 // 392 // std::vector<MPI_Request> request; 393 // std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 394 // iteRecvIndex = recvRankClient.end(), 395 // itbRecvNbIndex = recvNbIndexClientCount.begin(), 396 // itRecvNbIndex; 397 // int currentIndex = 0; 398 // int nbRecvClient = recvRankClient.size(); 399 // for (int idx = 0; idx < nbRecvClient; ++idx) 400 // { 401 // if (0 != recvNbIndexClientCount[idx]) 402 // recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 403 // currentIndex += recvNbIndexClientCount[idx]; 404 // } 405 // 406 // boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 407 // iteIndex = client2ClientIndex.end(); 408 // for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 409 // sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 410 // 411 // std::vector<MPI_Status> status(request.size()); 412 // MPI_Waitall(request.size(), &request[0], &status[0]); 413 // 414 // CArray<size_t,1>* tmpGlobalIndex; 415 // if (0 != recvNbIndexCount) 416 // tmpGlobalIndex = new CArray<size_t,1>(recvIndexBuff, shape(recvNbIndexCount), neverDeleteData); 417 // else 418 // tmpGlobalIndex = new CArray<size_t,1>(); 419 // 420 // // OK, we go to the next level and do something recursive 421 // if (0 < level) 422 // { 423 // --level; 424 // computeIndexInfoMappingLevel(*tmpGlobalIndex, this->internalComm_, level); 425 // } 426 // else // Now, we are in the last level where necessary mappings are. 427 // indexToInfoMappingLevel_= (index2InfoMapping_); 428 // 429 // typename Index2InfoTypeMap::const_iterator iteIndexToInfoMap = indexToInfoMappingLevel_.end(), itIndexToInfoMap; 430 // std::vector<int> sendNbIndexOnReturn(nbRecvClient,0); 431 // currentIndex = 0; 432 // for (int idx = 0; idx < nbRecvClient; ++idx) 433 // { 434 // for (int i = 0; i < recvNbIndexClientCount[idx]; ++i) 435 // { 436 // itIndexToInfoMap = indexToInfoMappingLevel_.find(*(recvIndexBuff+currentIndex+i)); 437 // if (iteIndexToInfoMap != itIndexToInfoMap) ++sendNbIndexOnReturn[idx]; 438 // } 439 // currentIndex += recvNbIndexClientCount[idx]; 440 // } 441 // 442 // std::vector<int> recvRankOnReturn(client2ClientIndex.size()); 443 // std::vector<int> recvNbIndexOnReturn(client2ClientIndex.size(),0); 444 // int indexIndex = 0; 445 // for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex, ++indexIndex) 446 // { 447 // recvRankOnReturn[indexIndex] = itIndex->first; 448 // } 449 // sendRecvOnReturn(recvRankClient, sendNbIndexOnReturn, 450 // recvRankOnReturn, recvNbIndexOnReturn); 451 // 452 // int recvNbIndexCountOnReturn = 0; 453 // for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 454 // recvNbIndexCountOnReturn += recvNbIndexOnReturn[idx]; 455 // 456 // unsigned long* recvIndexBuffOnReturn; 457 // unsigned char* recvInfoBuffOnReturn; 458 // if (0 != recvNbIndexCountOnReturn) 459 // { 460 // recvIndexBuffOnReturn = new unsigned long[recvNbIndexCountOnReturn]; 461 // recvInfoBuffOnReturn = new unsigned char[recvNbIndexCountOnReturn*ProcessDHTElement<InfoType>::typeSize()]; 462 // } 463 // 464 // std::vector<MPI_Request> requestOnReturn; 465 // currentIndex = 0; 466 // for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 467 // { 468 // if (0 != recvNbIndexOnReturn[idx]) 469 // { 470 // recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 471 // recvInfoFromClients(recvRankOnReturn[idx], 472 // recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 473 // recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 474 // commLevel, requestOnReturn); 475 // } 476 // currentIndex += recvNbIndexOnReturn[idx]; 477 // } 478 // 479 // boost::unordered_map<int,unsigned char*> client2ClientInfoOnReturn; 480 // boost::unordered_map<int,size_t*> client2ClientIndexOnReturn; 481 // currentIndex = 0; 482 // for (int idx = 0; idx < nbRecvClient; ++idx) 483 // { 484 // if (0 != sendNbIndexOnReturn[idx]) 485 // { 486 // int rank = recvRankClient[idx]; 487 // client2ClientIndexOnReturn[rank] = new unsigned long [sendNbIndexOnReturn[idx]]; 488 // client2ClientInfoOnReturn[rank] = new unsigned char [sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize()]; 489 // unsigned char* tmpInfoPtr = client2ClientInfoOnReturn[rank]; 490 // int infoIndex = 0; 491 // int nb = 0; 492 // for (int i = 0; i < recvNbIndexClientCount[idx]; ++i) 493 // { 494 // itIndexToInfoMap = indexToInfoMappingLevel_.find(*(recvIndexBuff+currentIndex+i)); 495 // if (iteIndexToInfoMap != itIndexToInfoMap) 496 // { 497 // client2ClientIndexOnReturn[rank][nb] = itIndexToInfoMap->first; 498 // ProcessDHTElement<InfoType>::packElement(itIndexToInfoMap->second, tmpInfoPtr, infoIndex); 499 // ++nb; 500 // } 501 // } 502 // 503 // sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 504 // sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 505 // sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 506 // sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 507 // } 508 // currentIndex += recvNbIndexClientCount[idx]; 509 // } 510 // 511 // std::vector<MPI_Status> statusOnReturn(requestOnReturn.size()); 512 // MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 513 // 514 // boost::unordered_map<size_t,InfoType> indexToInfoMapping; 515 // indexToInfoMapping.rehash(std::ceil(recvNbIndexCountOnReturn/indexToInfoMapping.max_load_factor())); 516 // int infoIndex = 0; 517 // for (int idx = 0; idx < recvNbIndexCountOnReturn; ++idx) 518 // { 519 // ProcessDHTElement<InfoType>::unpackElement(indexToInfoMapping[recvIndexBuffOnReturn[idx]], recvInfoBuffOnReturn, infoIndex); 520 // } 521 // 522 // indexToInfoMappingLevel_.swap(indexToInfoMapping); //indexToInfoMappingLevel_ = (indexToInfoMapping); 523 // if (0 != recvNbIndexCount) delete [] recvIndexBuff; 524 // for (boost::unordered_map<int,size_t*>::const_iterator it = client2ClientIndex.begin(); 525 // it != client2ClientIndex.end(); ++it) 526 // delete [] it->second; 527 // delete tmpGlobalIndex; 528 // 529 // if (0 != recvNbIndexCountOnReturn) 530 // { 531 // delete [] recvIndexBuffOnReturn; 532 // delete [] recvInfoBuffOnReturn; 533 // } 534 // 535 // for (boost::unordered_map<int,unsigned char*>::const_iterator it = client2ClientInfoOnReturn.begin(); 536 // it != client2ClientInfoOnReturn.end(); ++it) 537 // delete [] it->second; 538 // 539 // for (boost::unordered_map<int,size_t*>::const_iterator it = client2ClientIndexOnReturn.begin(); 540 // it != client2ClientIndexOnReturn.end(); ++it) 541 // delete [] it->second; 542 //} 280 543 281 544 /*! … … 310 573 */ 311 574 template<typename T, typename H> 312 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const boost::unordered_map<size_t,T>& indexInfoMap,575 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 313 576 const MPI_Comm& commLevel, 314 577 int level) … … 327 590 std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 328 591 iteClientHash = hashedIndex.end(); 329 typename boost::unordered_map<size_t,InfoType>::const_iterator itb = indexInfoMap.begin(),it,330 592 typename Index2VectorInfoTypeMap::const_iterator itb = indexInfoMap.begin(),it, 593 ite = indexInfoMap.end(); 331 594 HashXIOS<size_t> hashGlobalIndex; 332 595 … … 334 597 for (it = itb; it != ite; ++it) 335 598 { 336 size_t hashIndex = hashGlobalIndex(it->first); 337 itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 338 { 339 int indexClient = std::distance(itbClientHash, itClientHash)-1; 599 int infoVecSize = it->second.size(); 600 for (int idx = 0; idx < infoVecSize; ++idx) 601 { 602 size_t hashIndex = hashGlobalIndex(it->first); 603 itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 340 604 { 341 ++sendNbIndexBuff[indexClient]; 605 int indexClient = std::distance(itbClientHash, itClientHash)-1; 606 { 607 ++sendNbIndexBuff[indexClient]; 608 } 342 609 } 343 610 } … … 360 627 for (it = itb; it != ite; ++it) 361 628 { 362 size_t hashIndex = hashGlobalIndex(it->first); 363 itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 364 { 365 int indexClient = std::distance(itbClientHash, itClientHash)-1; 629 const std::vector<InfoType>& infoTmp = it->second; 630 for (int idx = 0; idx < infoTmp.size(); ++idx) 631 { 632 size_t hashIndex = hashGlobalIndex(it->first); 633 itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 366 634 { 367 client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 368 ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 369 ++sendNbIndexBuff[indexClient]; 635 int indexClient = std::distance(itbClientHash, itClientHash)-1; 636 { 637 client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 638 // ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 639 ProcessDHTElement<InfoType>::packElement(infoTmp[idx], client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 640 ++sendNbIndexBuff[indexClient]; 641 } 370 642 } 371 643 } … … 421 693 MPI_Waitall(request.size(), &request[0], &status[0]); 422 694 423 boost::unordered_map<size_t,InfoType>indexToInfoMapping;695 Index2VectorInfoTypeMap indexToInfoMapping; 424 696 indexToInfoMapping.rehash(std::ceil(currentIndex/indexToInfoMapping.max_load_factor())); 425 697 currentIndex = 0; … … 433 705 { 434 706 ProcessDHTElement<InfoType>::unpackElement(infoValue, infoBuff, infoIndex); 435 indexToInfoMapping[*(recvIndexBuff+currentIndex+i)] = infoValue; 707 // indexToInfoMapping[*(recvIndexBuff+currentIndex+i)] = infoValue; 708 indexToInfoMapping[*(recvIndexBuff+currentIndex+i)].push_back(infoValue); 436 709 } 437 710 currentIndex += count; … … 458 731 } 459 732 else 460 index2InfoMapping_.swap(indexToInfoMapping); //index2InfoMapping_ = (indexToInfoMapping); 461 } 733 index2InfoMapping_.swap(indexToInfoMapping); 734 } 735 736 ///*! 737 // Compute distribution of global index for servers 738 // Each client already holds a piece of information and its associated index. 739 //This information will be redistributed among processes by projecting indices into size_t space, 740 //the corresponding information will be also distributed on size_t space. 741 //After the redistribution, each client holds rearranged index and its corresponding information. 742 // \param [in] indexInfoMap index and its corresponding info (usually server index) 743 // \param [in] commLevel communicator of current level 744 // \param [in] level current level 745 //*/ 746 //template<typename T, typename H> 747 //void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const boost::unordered_map<size_t,T>& indexInfoMap, 748 // const MPI_Comm& commLevel, 749 // int level) 750 //{ 751 // int clientRank; 752 // MPI_Comm_rank(commLevel,&clientRank); 753 // computeSendRecvRank(level, clientRank); 754 // 755 // int groupRankBegin = this->getGroupBegin()[level]; 756 // int nbClient = this->getNbInGroup()[level]; 757 // std::vector<size_t> hashedIndex; 758 // computeHashIndex(hashedIndex, nbClient); 759 // 760 // std::vector<int> sendBuff(nbClient,0); 761 // std::vector<int> sendNbIndexBuff(nbClient,0); 762 // std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 763 // iteClientHash = hashedIndex.end(); 764 // typename boost::unordered_map<size_t,InfoType>::const_iterator itb = indexInfoMap.begin(),it, 765 // ite = indexInfoMap.end(); 766 // HashXIOS<size_t> hashGlobalIndex; 767 // 768 // // Compute size of sending and receving buffer 769 // for (it = itb; it != ite; ++it) 770 // { 771 // size_t hashIndex = hashGlobalIndex(it->first); 772 // itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 773 // { 774 // int indexClient = std::distance(itbClientHash, itClientHash)-1; 775 // { 776 // ++sendNbIndexBuff[indexClient]; 777 // } 778 // } 779 // } 780 // 781 // boost::unordered_map<int, size_t*> client2ClientIndex; 782 // boost::unordered_map<int, unsigned char*> client2ClientInfo; 783 // for (int idx = 0; idx < nbClient; ++idx) 784 // { 785 // if (0 != sendNbIndexBuff[idx]) 786 // { 787 // client2ClientIndex[idx+groupRankBegin] = new unsigned long [sendNbIndexBuff[idx]]; 788 // client2ClientInfo[idx+groupRankBegin] = new unsigned char [sendNbIndexBuff[idx]*ProcessDHTElement<InfoType>::typeSize()]; 789 // sendNbIndexBuff[idx] = 0; 790 // sendBuff[idx] = 1; 791 // } 792 // } 793 // 794 // std::vector<int> sendNbInfo(nbClient,0); 795 // for (it = itb; it != ite; ++it) 796 // { 797 // size_t hashIndex = hashGlobalIndex(it->first); 798 // itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 799 // { 800 // int indexClient = std::distance(itbClientHash, itClientHash)-1; 801 // { 802 // client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 803 // ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 804 // ++sendNbIndexBuff[indexClient]; 805 // } 806 // } 807 // } 808 // 809 // // Calculate from how many clients each client receive message. 810 // // Calculate size of buffer for receiving message 811 // std::vector<int> recvRankClient, recvNbIndexClientCount; 812 // sendRecvRank(level, sendBuff, sendNbIndexBuff, 813 // recvRankClient, recvNbIndexClientCount); 814 // 815 // int recvNbIndexCount = 0; 816 // for (int idx = 0; idx < recvNbIndexClientCount.size(); ++idx) 817 // recvNbIndexCount += recvNbIndexClientCount[idx]; 818 // 819 // unsigned long* recvIndexBuff; 820 // unsigned char* recvInfoBuff; 821 // if (0 != recvNbIndexCount) 822 // { 823 // recvIndexBuff = new unsigned long[recvNbIndexCount]; 824 // recvInfoBuff = new unsigned char[recvNbIndexCount*ProcessDHTElement<InfoType>::typeSize()]; 825 // } 826 // 827 // // If a client holds information about index and the corresponding which don't belong to it, 828 // // it will send a message to the correct clients. 829 // // Contents of the message are index and its corresponding informatioin 830 // std::vector<MPI_Request> request; 831 // int currentIndex = 0; 832 // int nbRecvClient = recvRankClient.size(); 833 // for (int idx = 0; idx < nbRecvClient; ++idx) 834 // { 835 // if (0 != recvNbIndexClientCount[idx]) 836 // { 837 // recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 838 // recvInfoFromClients(recvRankClient[idx], 839 // recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 840 // recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 841 // commLevel, request); 842 // } 843 // currentIndex += recvNbIndexClientCount[idx]; 844 // } 845 // 846 // boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 847 // iteIndex = client2ClientIndex.end(); 848 // for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 849 // sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 850 // boost::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 851 // iteInfo = client2ClientInfo.end(); 852 // for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 853 // sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 854 // 855 // std::vector<MPI_Status> status(request.size()); 856 // MPI_Waitall(request.size(), &request[0], &status[0]); 857 // 858 // boost::unordered_map<size_t,InfoType> indexToInfoMapping; 859 // indexToInfoMapping.rehash(std::ceil(currentIndex/indexToInfoMapping.max_load_factor())); 860 // currentIndex = 0; 861 // InfoType infoValue; 862 // int infoIndex = 0; 863 // unsigned char* infoBuff = recvInfoBuff; 864 // for (int idx = 0; idx < nbRecvClient; ++idx) 865 // { 866 // int count = recvNbIndexClientCount[idx]; 867 // for (int i = 0; i < count; ++i) 868 // { 869 // ProcessDHTElement<InfoType>::unpackElement(infoValue, infoBuff, infoIndex); 870 // indexToInfoMapping[*(recvIndexBuff+currentIndex+i)] = infoValue; 871 // } 872 // currentIndex += count; 873 // } 874 // 875 // if (0 != recvNbIndexCount) 876 // { 877 // delete [] recvIndexBuff; 878 // delete [] recvInfoBuff; 879 // } 880 // for (boost::unordered_map<int,unsigned char*>::const_iterator it = client2ClientInfo.begin(); 881 // it != client2ClientInfo.end(); ++it) 882 // delete [] it->second; 883 // 884 // for (boost::unordered_map<int,size_t*>::const_iterator it = client2ClientIndex.begin(); 885 // it != client2ClientIndex.end(); ++it) 886 // delete [] it->second; 887 // 888 // // Ok, now do something recursive 889 // if (0 < level) 890 // { 891 // --level; 892 // computeDistributedIndex(indexToInfoMapping, this->internalComm_, level); 893 // } 894 // else 895 // index2InfoMapping_.swap(indexToInfoMapping); //index2InfoMapping_ = (indexToInfoMapping); 896 //} 462 897 463 898 /*! -
XIOS/trunk/src/client_server_mapping_distributed.cpp
r843 r860 37 37 { 38 38 ccDHT_->computeIndexInfoMapping(globalIndexOnClient); 39 const boost::unordered_map<size_t,int>& infoIndexMap = (ccDHT_->getInfoIndexMap());40 boost::unordered_map<size_t,int>::const_iterator itb = infoIndexMap.begin(), ite = infoIndexMap.end(), it;39 const CClientClientDHTInt::Index2VectorInfoTypeMap& infoIndexMap = (ccDHT_->getInfoIndexMap()); 40 CClientClientDHTInt::Index2VectorInfoTypeMap::const_iterator itb = infoIndexMap.begin(), ite = infoIndexMap.end(), it; 41 41 std::vector<size_t> nbInfoIndex(ccDHT_->getNbClient(),0); 42 42 43 43 for (it = itb; it != ite; ++it) 44 44 { 45 ++nbInfoIndex[it->second ];45 ++nbInfoIndex[it->second[0]]; 46 46 } 47 47 … … 57 57 for (it = itb; it != ite; ++it) 58 58 { 59 indexGlobalOnServer_[it->second ][nbInfoIndex[it->second]] = (it->first);60 ++nbInfoIndex[it->second ];59 indexGlobalOnServer_[it->second[0]][nbInfoIndex[it->second[0]]] = (it->first); 60 ++nbInfoIndex[it->second[0]]; 61 61 } 62 62 }
Note: See TracChangeset
for help on using the changeset viewer.