Changeset 833
- Timestamp:
- 04/08/16 15:00:15 (9 years ago)
- Location:
- XIOS/trunk/src
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/client_client_dht_template.hpp
r830 r833 20 20 namespace xios 21 21 { 22 template<typename T, class HierarchyPolicy = Divide CommByTwo> class CClientClientDHTTemplate;22 template<typename T, class HierarchyPolicy = DivideAdaptiveComm> class CClientClientDHTTemplate; 23 23 24 24 /*! … … 61 61 int level); 62 62 63 void computeSendRecvRank(int level, int rank); 64 65 void sendRecvRank(int level, 66 const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements, 67 int& recvNbRank, int& recvNbElements); 68 63 69 protected: 64 70 void probeIndexMessageFromClients(unsigned long* recvIndexGlobalBuff, … … 81 87 82 88 // Send global index to clients 83 void sendIndexToClients(int clientDestRank, s td::vector<size_t>& indexGlobal,89 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 84 90 const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal); 85 91 … … 100 106 Index2InfoTypeMap indexToInfoMappingLevel_; 101 107 102 //! intracommuntion of clients 103 MPI_Comm intraCommRoot_; 108 std::vector<std::vector<int> > sendRank_; 109 110 std::vector<std::vector<int> > recvRank_; 104 111 105 112 //! Flag to specify whether data is distributed or not -
XIOS/trunk/src/client_client_dht_template_impl.hpp
r832 r833 25 25 const MPI_Comm& clientIntraComm, 26 26 int hierarLvl) 27 : index2InfoMapping_(), indexToInfoMappingLevel_() 28 { 29 this->computeMPICommLevel(clientIntraComm, hierarLvl); 30 int lvl = this->commLevel_.size() - 1; 31 computeDistributedIndex(indexInfoMap, this->commLevel_[lvl], lvl); 27 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_() 28 { 29 this->computeMPICommLevel(); 30 int nbLvl = this->getNbLevel(); 31 sendRank_.resize(nbLvl); 32 recvRank_.resize(nbLvl); 33 computeDistributedIndex(indexInfoMap, clientIntraComm, nbLvl-1); 32 34 } 33 35 … … 44 46 void CClientClientDHTTemplate<T,H>::computeIndexInfoMapping(const CArray<size_t,1>& indices) 45 47 { 46 int lvl = this->commLevel_.size() - 1;47 computeIndexInfoMappingLevel(indices, this-> commLevel_[lvl], lvl);48 int nbLvl = this->getNbLevel(); 49 computeIndexInfoMappingLevel(indices, this->internalComm_, nbLvl-1); 48 50 } 49 51 … … 60 62 int level) 61 63 { 62 int nbClient, clientRank; 63 MPI_Comm_size(commLevel,&nbClient); 64 int clientRank; 64 65 MPI_Comm_rank(commLevel,&clientRank); 66 int groupRankBegin = this->getGroupBegin()[level]; 67 int nbClient = this->getNbInGroup()[level]; 65 68 std::vector<size_t> hashedIndex; 66 69 computeHashIndex(hashedIndex, nbClient); … … 70 73 std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 71 74 iteClientHash = hashedIndex.end(); 72 std::map<int, std::vector<size_t> > client2ClientIndex; 75 std::vector<int> sendBuff(nbClient,0); 76 std::vector<int> sendNbIndexBuff(nbClient,0); 73 77 74 78 // Number of global index whose mapping server are on other clients 75 79 int nbIndexToSend = 0; 80 size_t index; 76 81 HashXIOS<size_t> hashGlobalIndex; 77 82 for (int i = 0; i < ssize; ++i) 78 83 { 79 size_tindex = indices(i);84 index = indices(i); 80 85 hashedVal = hashGlobalIndex(index); 81 86 itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedVal); 82 if (iteClientHash != itClientHash) 87 int indexClient = std::distance(itbClientHash, itClientHash)-1; 88 ++sendNbIndexBuff[indexClient]; 89 } 90 91 std::map<int, size_t* > client2ClientIndex; 92 for (int idx = 0; idx < nbClient; ++idx) 93 { 94 if (0 != sendNbIndexBuff[idx]) 95 { 96 client2ClientIndex[idx+groupRankBegin] = new unsigned long [sendNbIndexBuff[idx]]; 97 nbIndexToSend += sendNbIndexBuff[idx]; 98 sendBuff[idx] = 1; 99 sendNbIndexBuff[idx] = 0; 100 } 101 } 102 103 for (int i = 0; i < ssize; ++i) 104 { 105 index = indices(i); 106 hashedVal = hashGlobalIndex(index); 107 itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedVal); 83 108 { 84 109 int indexClient = std::distance(itbClientHash, itClientHash)-1; 85 110 { 86 client2ClientIndex[indexClient ].push_back(index);87 ++ nbIndexToSend;111 client2ClientIndex[indexClient+groupRankBegin][sendNbIndexBuff[indexClient]] = index;; 112 ++sendNbIndexBuff[indexClient]; 88 113 } 89 114 } 90 115 } 91 116 92 int* sendBuff = new int[nbClient]; 93 for (int i = 0; i < nbClient; ++i) sendBuff[i] = 0; 94 std::map<int, std::vector<size_t> >::iterator itb = client2ClientIndex.begin(), it, 95 ite = client2ClientIndex.end(); 96 for (it = itb; it != ite; ++it) sendBuff[it->first] = 1; 97 int* recvBuff = new int[nbClient]; 98 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel); 117 int recvNbClient, recvNbIndexCount; 118 sendRecvRank(level, sendBuff, sendNbIndexBuff, 119 recvNbClient, recvNbIndexCount); 120 121 std::map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 122 iteIndex = client2ClientIndex.end(); 99 123 100 124 std::list<MPI_Request> sendIndexRequest; 101 if (0 != nbIndexToSend)102 for (it = itb; it != ite; ++it)103 sendIndexToClients(it->first, it->second, commLevel, sendIndexRequest); 104 105 int nb DemandingClient = recvBuff[clientRank], nbSendBuffInfoReceived = 0;125 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 126 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, sendIndexRequest); 127 128 int nbDemandingClient = recvNbClient; //recvBuff[clientRank], 129 int nbSendBuffInfoReceived = 0; 106 130 107 131 // Receiving demand as well as the responds from other clients … … 109 133 // Buffer to receive demand from other clients, it can be allocated or not depending whether it has demand(s) 110 134 // There are some cases we demand duplicate index so need to determine maxium size of demanding buffer 111 for (it = itb; it != ite; ++it) sendBuff[it->first] = (it->second).size();112 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel);113 114 135 unsigned long* recvBuffIndex = 0; 115 int maxNbIndexDemandedFromOthers = recvBuff[clientRank]; 116 136 int maxNbIndexDemandedFromOthers = recvNbIndexCount; 117 137 if (0 != maxNbIndexDemandedFromOthers) 118 138 recvBuffIndex = new unsigned long[maxNbIndexDemandedFromOthers]; 119 139 120 140 // Buffer to receive respond from other clients, it can be allocated or not depending whether it demands other clients 121 // InfoType* recvBuffInfo = 0;122 141 unsigned char* recvBuffInfo = 0; 123 142 int nbIndexReceivedFromOthers = nbIndexToSend; … … 181 200 { 182 201 --level; 183 computeIndexInfoMappingLevel(tmpGlobalIndexOnClient, this-> commLevel_[level], level);202 computeIndexInfoMappingLevel(tmpGlobalIndexOnClient, this->internalComm_, level); 184 203 } 185 204 else … … 234 253 int clientSourceRank = statusInfo.MPI_SOURCE; 235 254 unsigned char* beginBuff = infoBuffBegin[clientSourceRank]; 236 s td::vector<size_t>&indexTmp = client2ClientIndex[clientSourceRank];255 size_t* indexTmp = client2ClientIndex[clientSourceRank]; 237 256 int infoIndex = 0; 238 257 for (int i = 0; i < actualCountInfo; ++i) … … 253 272 if (0 != maxNbIndexDemandedFromOthers) delete [] recvBuffIndex; 254 273 if (0 != nbIndexReceivedFromOthers) delete [] recvBuffInfo; 274 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) delete [] itIndex->second; 255 275 for (int idx = 0; idx < infoToSend.size(); ++idx) delete [] infoToSend[idx]; 256 delete [] sendBuff;257 delete [] recvBuff;258 276 } 259 277 … … 292 310 int level) 293 311 { 294 int nbClient, clientRank; 295 MPI_Comm_size(commLevel,&nbClient); 312 int clientRank; 296 313 MPI_Comm_rank(commLevel,&clientRank); 314 computeSendRecvRank(level, clientRank); 315 316 int groupRankBegin = this->getGroupBegin()[level]; 317 int nbClient = this->getNbInGroup()[level]; 297 318 std::vector<size_t> hashedIndex; 298 319 computeHashIndex(hashedIndex, nbClient); 299 320 300 int* sendBuff = new int[nbClient]; 301 int* sendNbIndexBuff = new int[nbClient]; 302 for (int i = 0; i < nbClient; ++i) 303 { 304 sendBuff[i] = 0; sendNbIndexBuff[i] = 0; 305 } 306 307 // Compute size of sending and receving buffer 308 std::map<int, std::vector<size_t> > client2ClientIndex; 309 std::map<int, std::vector<InfoType> > client2ClientInfo; 310 321 std::vector<int> sendBuff(nbClient,0); 322 std::vector<int> sendNbIndexBuff(nbClient,0); 311 323 std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 312 324 iteClientHash = hashedIndex.end(); 313 typename boost::unordered_map<size_t,InfoType>::const_iterator it = indexInfoMap.begin(),325 typename boost::unordered_map<size_t,InfoType>::const_iterator itb = indexInfoMap.begin(),it, 314 326 ite = indexInfoMap.end(); 315 327 HashXIOS<size_t> hashGlobalIndex; 316 for (; it != ite; ++it) 328 329 // Compute size of sending and receving buffer 330 for (it = itb; it != ite; ++it) 317 331 { 318 332 size_t hashIndex = hashGlobalIndex(it->first); 319 333 itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 320 if (itClientHash != iteClientHash)321 334 { 322 335 int indexClient = std::distance(itbClientHash, itClientHash)-1; 323 336 { 324 sendBuff[indexClient] = 1;325 337 ++sendNbIndexBuff[indexClient]; 326 client2ClientIndex[indexClient].push_back(it->first);327 client2ClientInfo[indexClient].push_back(it->second);328 338 } 329 339 } 330 340 } 331 341 342 std::map<int, size_t*> client2ClientIndex; 343 std::map<int, unsigned char*> client2ClientInfo; 344 for (int idx = 0; idx < nbClient; ++idx) 345 { 346 if (0 != sendNbIndexBuff[idx]) 347 { 348 client2ClientIndex[idx+groupRankBegin] = new unsigned long [sendNbIndexBuff[idx]]; 349 client2ClientInfo[idx+groupRankBegin] = new unsigned char [sendNbIndexBuff[idx]*ProcessDHTElement<InfoType>::typeSize()]; 350 sendNbIndexBuff[idx] = 0; 351 sendBuff[idx] = 1; 352 } 353 } 354 355 std::vector<int> sendNbInfo(nbClient,0); 356 for (it = itb; it != ite; ++it) 357 { 358 size_t hashIndex = hashGlobalIndex(it->first); 359 itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 360 { 361 int indexClient = std::distance(itbClientHash, itClientHash)-1; 362 { 363 client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 364 ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 365 ++sendNbIndexBuff[indexClient]; 366 } 367 } 368 } 369 332 370 // Calculate from how many clients each client receive message. 333 int* recvBuff = new int[nbClient];334 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel);335 int recvNbClient = recvBuff[clientRank];336 337 371 // Calculate size of buffer for receiving message 338 int* recvNbIndexBuff = new int[nbClient]; 339 MPI_Allreduce(sendNbIndexBuff, recvNbIndexBuff, nbClient, MPI_INT, MPI_SUM, commLevel); 340 int recvNbIndexCount = recvNbIndexBuff[clientRank]; 341 unsigned long* recvIndexBuff = new unsigned long[recvNbIndexCount]; 342 unsigned char* recvInfoBuff = new unsigned char[recvNbIndexCount*ProcessDHTElement<InfoType>::typeSize()]; 372 int recvNbClient, recvNbIndexCount; 373 sendRecvRank(level, sendBuff, sendNbIndexBuff, 374 recvNbClient, recvNbIndexCount); 343 375 344 376 // If a client holds information about index and the corresponding which don't belong to it, … … 346 378 // Contents of the message are index and its corresponding informatioin 347 379 std::list<MPI_Request> sendRequest; 348 std::map<int, std::vector<size_t> >::iterator itIndex = client2ClientIndex.begin(), 349 iteIndex = client2ClientIndex.end(); 350 for (; itIndex != iteIndex; ++itIndex) 351 sendIndexToClients(itIndex->first, itIndex->second, commLevel, sendRequest); 352 typename std::map<int, std::vector<InfoType> >::iterator itbInfo = client2ClientInfo.begin(), itInfo, 353 iteInfo = client2ClientInfo.end(); 354 355 std::vector<int> infoSizeToSend(client2ClientInfo.size(),0); 356 std::vector<unsigned char*> infoToSend(client2ClientInfo.size()); 357 itInfo = itbInfo; 358 for (int idx = 0; itInfo != iteInfo; ++itInfo, ++idx) 359 { 360 const std::vector<InfoType>& infoVec = itInfo->second; 361 int infoVecSize = infoVec.size(); 362 std::vector<int> infoIndex(infoVecSize); 363 for (int i = 0; i < infoVecSize; ++i) 364 { 365 infoIndex[i] = infoSizeToSend[idx]; 366 ProcessDHTElement<InfoType>::packElement(infoVec[i], NULL, infoSizeToSend[idx]); 367 } 368 369 infoToSend[idx] = new unsigned char[infoSizeToSend[idx]]; 370 infoSizeToSend[idx] = 0; 371 for (int i = 0; i < infoVecSize; ++i) 372 { 373 ProcessDHTElement<InfoType>::packElement(infoVec[i], infoToSend[idx], infoSizeToSend[idx]); 374 } 375 376 sendInfoToClients(itInfo->first, infoToSend[idx], infoSizeToSend[idx], commLevel, sendRequest); 377 } 378 380 std::map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 381 iteIndex = client2ClientIndex.end(); 382 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 383 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, sendRequest); 384 std::map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 385 iteInfo = client2ClientInfo.end(); 386 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 387 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, sendRequest); 388 389 390 unsigned long* recvIndexBuff = new unsigned long[recvNbIndexCount]; 391 unsigned char* recvInfoBuff = new unsigned char[recvNbIndexCount*ProcessDHTElement<InfoType>::typeSize()]; 379 392 380 393 std::map<int, MPI_Request>::iterator itRequestIndex, itRequestInfo; … … 465 478 } 466 479 467 for (int idx = 0; idx < infoToSend.size(); ++idx) delete [] infoToSend[idx]; 468 delete [] sendBuff; 469 delete [] sendNbIndexBuff; 470 delete [] recvBuff; 471 delete [] recvNbIndexBuff; 480 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) delete [] itIndex->second; 481 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) delete [] itInfo->second; 472 482 delete [] recvIndexBuff; 473 483 delete [] recvInfoBuff; … … 477 487 { 478 488 --level; 479 computeDistributedIndex(indexToInfoMapping, this-> commLevel_[level], level);489 computeDistributedIndex(indexToInfoMapping, this->internalComm_, level); 480 490 } 481 491 else … … 563 573 */ 564 574 template<typename T, typename H> 565 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, s td::vector<size_t>& indices,575 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 566 576 const MPI_Comm& clientIntraComm, 567 577 std::list<MPI_Request>& requestSendIndex) … … 569 579 MPI_Request request; 570 580 requestSendIndex.push_back(request); 571 MPI_Isend( &(indices)[0], (indices).size(), MPI_UNSIGNED_LONG,581 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 572 582 clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 573 583 } … … 658 668 } 659 669 660 } 670 /*! 671 Compute how many processes one process needs to send to and from how many processes it will receive 672 */ 673 template<typename T, typename H> 674 void CClientClientDHTTemplate<T,H>::computeSendRecvRank(int level, int rank) 675 { 676 int groupBegin = this->getGroupBegin()[level]; 677 int nbInGroup = this->getNbInGroup()[level]; 678 const std::vector<int>& groupParentBegin = this->getGroupParentsBegin()[level]; 679 const std::vector<int>& nbInGroupParents = this->getNbInGroupParents()[level]; 680 681 std::vector<size_t> hashedIndexGroup; 682 computeHashIndex(hashedIndexGroup, nbInGroup); 683 size_t a = hashedIndexGroup[rank-groupBegin]; 684 size_t b = hashedIndexGroup[rank-groupBegin+1]-1; 685 686 int currentGroup, offset; 687 size_t e,f; 688 689 // Do a simple math [a,b) intersect [c,d) 690 for (int idx = 0; idx < groupParentBegin.size(); ++idx) 691 { 692 std::vector<size_t> hashedIndexGroupParent; 693 int nbInGroupParent = nbInGroupParents[idx]; 694 if (0 != nbInGroupParent) 695 computeHashIndex(hashedIndexGroupParent, nbInGroupParent); 696 for (int i = 0; i < nbInGroupParent; ++i) 697 { 698 size_t c = hashedIndexGroupParent[i]; 699 size_t d = hashedIndexGroupParent[i+1]-1; 700 701 if (!((d < a) || (b <c))) 702 recvRank_[level].push_back(groupParentBegin[idx]+i); 703 } 704 705 offset = rank - groupParentBegin[idx]; 706 if ((offset<nbInGroupParents[idx]) && (0 <= offset)) 707 { 708 e = hashedIndexGroupParent[offset]; 709 f = hashedIndexGroupParent[offset+1]-1; 710 } 711 } 712 713 std::vector<size_t>::const_iterator itbHashGroup = hashedIndexGroup.begin(), itHashGroup, 714 iteHashGroup = hashedIndexGroup.end(); 715 itHashGroup = std::lower_bound(itbHashGroup, iteHashGroup, e+1); 716 int begin = std::distance(itbHashGroup, itHashGroup)-1; 717 itHashGroup = std::upper_bound(itbHashGroup, iteHashGroup, f); 718 int end = std::distance(itbHashGroup, itHashGroup) -1; 719 sendRank_[level].resize(end-begin+1); 720 for (int idx = 0; idx < sendRank_[level].size(); ++idx) sendRank_[level][idx] = idx + groupBegin + begin; 721 } 722 723 /*! 724 Send and receive number of process each process need to listen to as well as number 725 of index it will receive 726 */ 727 template<typename T, typename H> 728 void CClientClientDHTTemplate<T,H>::sendRecvRank(int level, 729 const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements, 730 int& recvNbRank, int& recvNbElements) 731 { 732 int groupBegin = this->getGroupBegin()[level]; 733 734 int offSet = 0; 735 std::vector<int>& sendRank = sendRank_[level]; 736 std::vector<int>& recvRank = recvRank_[level]; 737 int sendBuffSize = sendRank.size(); 738 int* sendBuff = new int [sendBuffSize*2]; 739 std::vector<MPI_Request> request(sendBuffSize); 740 std::vector<MPI_Status> requestStatus(sendBuffSize); 741 int recvBuffSize = recvRank.size(); 742 int* recvBuff = new int [2]; 743 744 for (int idx = 0; idx < sendBuffSize; ++idx) 745 { 746 offSet = sendRank[idx]-groupBegin; 747 sendBuff[idx*2] = sendNbRank[offSet]; 748 sendBuff[idx*2+1] = sendNbElements[offSet]; 749 } 750 751 for (int idx = 0; idx < sendBuffSize; ++idx) 752 { 753 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 754 sendRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[idx]); 755 } 756 757 MPI_Status status; 758 int nbRecvRank = 0, nbRecvElements = 0; 759 for (int idx = 0; idx < recvBuffSize; ++idx) 760 { 761 MPI_Recv(recvBuff, 2, MPI_INT, 762 recvRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &status); 763 nbRecvRank += *(recvBuff); 764 nbRecvElements += *(recvBuff+1); 765 } 766 767 MPI_Waitall(sendBuffSize, &request[0], &requestStatus[0]); 768 769 recvNbRank = nbRecvRank; 770 recvNbElements = nbRecvElements; 771 772 delete [] sendBuff; 773 delete [] recvBuff; 774 } 775 776 } -
XIOS/trunk/src/dht_data_types.hpp
r830 r833 15 15 namespace xios 16 16 { 17 typedef std::pair<int,int> PairIntInt; 18 //struct PairIntInt 19 //{ 20 //// PairIntInt(int f, int s) : first(f), second(s) {} 21 //// PairIntInt(const PairIntInt& p) 22 //// { 23 //// this->first = p.first; 24 //// this->second=p.second; 25 //// } 26 // 27 // int first; 28 // int second; 29 //}; 17 typedef std::pair<int,int> PairIntInt; 30 18 31 19 template<typename T> -
XIOS/trunk/src/distribution_client.cpp
r831 r833 18 18 , gridMask_(), localDomainIndex_(), localAxisIndex_(), indexMap_(), indexDomainData_(), indexAxisData_() 19 19 , isDataDistributed_(true), axisNum_(0), domainNum_(0), nIndexDomain_(), nIndexAxis_() 20 , localDataIndex_(), localMaskIndex_() //, globalDataSendToServer_(), localDataIndexSendToServer_()20 , localDataIndex_(), localMaskIndex_() 21 21 , globalLocalDataSendToServerMap_() 22 22 , infoIndex_() … … 31 31 , gridMask_(), localDomainIndex_(), localAxisIndex_(), indexMap_(), indexDomainData_(), indexAxisData_() 32 32 , isDataDistributed_(true), axisNum_(0), domainNum_(0), nIndexDomain_(), nIndexAxis_() 33 , localDataIndex_(), localMaskIndex_() //, globalDataSendToServer_(), localDataIndexSendToServer_()33 , localDataIndex_(), localMaskIndex_() 34 34 , globalLocalDataSendToServerMap_() 35 35 , infoIndex_() … … 507 507 508 508 // Now allocate these arrays 509 // globalDataSendToServer_.resize(indexSend2ServerCount);510 // localDataIndexSendToServer_.resize(indexSend2ServerCount);511 509 localDataIndex_.resize(indexLocalDataOnClientCount); 512 513 510 localMaskIndex_.resize(indexSend2ServerCount); 514 511 … … 658 655 } 659 656 } 660 // globalDataSendToServer_[indexSend2ServerCount] = globalIndex;661 // localDataIndexSendToServer_[indexSend2ServerCount] = indexLocalDataOnClientCount;662 657 globalLocalDataSendToServerMap_[globalIndex] = indexLocalDataOnClientCount; 663 658 localMaskIndex_[indexSend2ServerCount] = gridMaskIndex; -
XIOS/trunk/src/io/netCdfInterface.cpp
r811 r833 279 279 sstr << "Error when calling function nc_inq_varname(ncid, varId, varNameBuff)" << std::endl 280 280 << errormsg << std::endl 281 << "Unable to get variable name given its id: " << varId << std::endl;281 << "Unable to get variable name: "<< varName << " given its id: " << varId << std::endl; 282 282 StdString e = sstr.str(); 283 283 throw CNetCdfException(e); … … 329 329 sstr << "Error when calling function nc_inq_dimname(ncid, dimId, fullNameIn)" << std::endl 330 330 << errormsg << std::endl 331 << "Unable to get dimension name given its id: " << dimId << std::endl;331 << "Unable to get dimension name: " << dimName << " given its id: " << dimId << std::endl; 332 332 StdString e = sstr.str(); 333 333 throw CNetCdfException(e); … … 612 612 sstr << "Error when calling function nc_inq_attname(ncid, varid, attnum, attName)" << std::endl; 613 613 sstr << errormsg << std::endl; 614 sstr << "Unable to query the name of attribute " << attnum << " given the location id:" << ncid << " and the variable id:" << varid << std::endl;614 sstr << "Unable to query the name: " << name << " of attribute " << attnum << " given the location id:" << ncid << " and the variable id:" << varid << std::endl; 615 615 StdString e = sstr.str(); 616 616 throw CNetCdfException(e); -
XIOS/trunk/src/io/netCdfInterface_impl.hpp
r686 r833 32 32 { 33 33 StdStringStream sstr; 34 StdString varName; 34 35 sstr << "Error when calling function ncGetAttType(ncid, varId, attrName.c_str(), data)" << std::endl; 35 36 sstr << nc_strerror(status) << std::endl; 36 sstr << "Unable to read attribute " << attrName << " given the location id: " << ncid << " and the variable id: " << varId << std::endl; 37 inqVarName(ncid, varId, varName); 38 sstr << "Unable to read attribute " << attrName << " given the location id: " << ncid << " and the variable whose id: " << varId << " and name: " << varName << std::endl; 37 39 throw CNetCdfException(sstr.str()); 38 40 } … … 59 61 { 60 62 StdStringStream sstr; 63 StdString varName; 61 64 sstr << "Error when calling function ncPutAttType(ncid, varId, attrName.c_str(), numVal, data)" << std::endl; 62 65 sstr << nc_strerror(status) << std::endl; 63 sstr << "Unable to set attribute " << attrName << " given the location id: " << ncid << " and the variable id: " << varId 66 inqVarName(ncid, varId, varName); 67 sstr << "Unable to set attribute " << attrName << " given the location id: " << ncid << " and the variable whose id: " << varId << " and name: " << varName << std::endl 64 68 << " with " << numVal << " elements." << std::endl; 65 69 throw CNetCdfException(sstr.str()); … … 85 89 { 86 90 StdStringStream sstr; 91 StdString varName; 87 92 sstr << "Error when calling function ncGetVaraType(ncid, varId, start, count, data)" << std::endl; 88 93 sstr << nc_strerror(status) << std::endl; 89 sstr << "Unable to read data given the location id: " << ncid << " and the variable id: " << varId << std::endl; 94 inqVarName(ncid, varId, varName); 95 sstr << "Unable to read data given the location id: " << ncid << " and the variable whose id: " << varId << " and name: " << varName << std::endl; 90 96 throw CNetCdfException(sstr.str()); 91 97 } … … 110 116 { 111 117 StdStringStream sstr; 118 StdString varName; 112 119 sstr << "Error when calling function ncPutVaraType(ncid, varId, start, count, data)" << std::endl; 113 120 sstr << nc_strerror(status) << std::endl; 114 sstr << "Unable to write data given the location id: " << ncid << " and the variable id: " << varId << std::endl; 121 inqVarName(ncid, varId, varName); 122 sstr << "Unable to write data given the location id: " << ncid << " and the variable whose id: " << varId << " and name: " << varName << std::endl; 115 123 throw CNetCdfException(sstr.str()); 116 124 } -
XIOS/trunk/src/mpi_tag.hpp
r829 r833 13 13 /* Tag for mpi communication to send and receive info in distributed hashed table version 1*/ 14 14 #define MPI_DHT_INFO_0 22 15 16 /* Tag for mpi communication to send and receive index in distributed hashed table version 1*/ 17 #define MPI_DHT_INDEX_1 27 18 19 /* Tag for mpi communication to send and receive info in distributed hashed table version 1*/ 20 #define MPI_DHT_INFO_1 24 15 21 16 22 /* Tag for mpi communication to send and receive info of current grid source in grid transformation*/ -
XIOS/trunk/src/policy.cpp
r721 r833 49 49 } 50 50 51 DivideAdaptiveComm::DivideAdaptiveComm(const MPI_Comm& mpiComm) 52 : internalComm_(mpiComm), level_(0), groupBegin_(), nbInGroup_(), computed_(false) 53 { 54 51 55 } 52 56 57 void DivideAdaptiveComm::computeMPICommLevel() 58 { 59 if (computed_) return; 60 computed_ = true; 61 62 int mpiSize, mpiRank; 63 MPI_Comm_size(internalComm_,&mpiSize); 64 MPI_Comm_rank(internalComm_,&mpiRank); 65 66 int maxChild=1; 67 int m; 68 do 69 { 70 m=1; 71 ++maxChild; 72 for(int i=0;i<maxChild;++i) m *= maxChild; 73 } while(m<mpiSize); 74 75 int maxLevel=0; 76 for(int size=1; size<=mpiSize; size*=maxChild) ++maxLevel; 77 78 int pos, n, idx; 79 level_=0; 80 int begin=0; 81 int end=mpiSize-1; 82 int nb=end-begin+1; 83 84 nbInGroup_ = groupBegin_= std::vector<int>(maxLevel); 85 nbInGroupParents_ = groupParentsBegin_= std::vector<std::vector<int> >(maxLevel,std::vector<int>(maxChild)); 86 87 groupBegin_[level_] = begin; 88 nbInGroup_[level_] = nb; 89 ++level_; 90 while (nb>2 && (level_<maxLevel)) 91 { 92 n = 0; idx = 0; 93 pos = begin; 94 for(int i=0;i<maxChild && i<nb;i++) 95 { 96 if (i<nb%maxChild) n = nb/maxChild + 1; 97 else n = nb/maxChild; 98 99 if (mpiRank>=pos && mpiRank<pos+n) 100 { 101 begin=pos; 102 end=pos+n-1; 103 } 104 groupParentsBegin_[level_-1][idx] = pos; 105 nbInGroupParents_[level_-1][idx] = n; 106 ++idx; 107 pos=pos+n; 108 } 109 groupBegin_[level_] = begin; 110 nbInGroup_[level_] = nb = end-begin+1; 111 112 ++level_; 113 } 114 115 for (int i = 0; i < nbInGroup_[level_-1];++i) 116 { 117 groupParentsBegin_[level_-1][i] = groupBegin_[level_-1]+i; 118 nbInGroupParents_[level_-1][i] = 1; 119 } 120 121 122 123 // parent=vector<int>(maxLevel+1); 124 // child=vector<vector<int> >(maxLevel+1,vector<int>(maxChild)); 125 // nbChild=vector<int> (maxLevel+1); 126 127 // do 128 // { 129 // n=0; 130 // pos=begin; 131 // nbChild[level_]=0; 132 // parent[level_+1]=begin; 133 // for(int i=0;i<maxChild && i<nb;i++) 134 // { 135 // if (i<nb%maxChild) n = nb/maxChild + 1; 136 // else n = nb/maxChild; 137 // 138 // if (mpiRank>=pos && mpiRank<pos+n) 139 // { 140 // begin=pos; 141 // end=pos+n-1; 142 // } 143 // child[level_][i]=pos; 144 // pos=pos+n; 145 // nbChild[level_]++; 146 // } 147 // nb=end-begin+1; 148 // level_=level_+1; 149 // } while (nb>1); 150 } 151 152 //void DivideAdaptiveComm::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int color, int level) 153 //{ 154 //// int clientRank; 155 //// MPI_Comm_rank(mpiCommLevel,&clientRank); 156 // 157 // --level; 158 // if (0 < level) 159 // { 160 // int color = clientRank % 2; 161 // commLevel_.push_back(MPI_Comm()); 162 // MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back())); 163 // divideMPICommLevel(commLevel_.back(), level); 164 // } 165 //} 166 167 168 } 169 -
XIOS/trunk/src/policy.hpp
r721 r833 28 28 }; 29 29 30 class DivideAdaptiveComm 31 { 32 protected: 33 DivideAdaptiveComm(const MPI_Comm& mpiComm); 34 35 void computeMPICommLevel(); 36 const std::vector<int>& getGroupBegin() { return groupBegin_; } 37 const std::vector<int>& getNbInGroup() { return nbInGroup_; } 38 const std::vector<std::vector<int> >& getGroupParentsBegin() { return groupParentsBegin_; } 39 const std::vector<std::vector<int> >& getNbInGroupParents() { return nbInGroupParents_; } 40 int getNbLevel() { return level_; } 41 42 protected: 43 const MPI_Comm& internalComm_; 44 std::vector<std::vector<int> > groupParentsBegin_; 45 std::vector<std::vector<int> > nbInGroupParents_; 46 47 int level_; 48 std::vector<int> groupBegin_; //! Rank beginning of a group 49 std::vector<int> nbInGroup_; //! Number of process in each group 50 bool computed_; 51 // std::vector<std::vector<int> > child_; /*!< List of child rank for each level */ 52 // std::vector<int> nbChild_; /*!< Number of child for each level */ 53 }; 54 30 55 } 31 56 -
XIOS/trunk/src/transformation/axis_algorithm_interpolate.cpp
r831 r833 128 128 void CAxisAlgorithmInterpolate::computeWeightedValueAndMapping(const std::map<int, std::vector<std::pair<int,double> > >& interpolatingIndexValues, int transPos) 129 129 { 130 std::map<int, std::vector<int> >& transMap = this->transformationMapping_[transPos];131 std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[transPos];130 TransformationIndexMap& transMap = this->transformationMapping_[transPos]; 131 TransformationWeightMap& transWeight = this->transformationWeight_[transPos]; 132 132 std::map<int, std::vector<std::pair<int,double> > >::const_iterator itb = interpolatingIndexValues.begin(), it, 133 133 ite = interpolatingIndexValues.end(); -
XIOS/trunk/src/transformation/axis_algorithm_inverse.cpp
r829 r833 24 24 << "Size of axis destionation " <<axisDestination->getId() << " is " << axisDestination->n_glo.getValue()); 25 25 } 26 27 // this->computeIndexSourceMapping();28 26 } 29 27 … … 33 31 this->transformationWeight_.resize(1); 34 32 35 std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0];36 std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0];33 TransformationIndexMap& transMap = this->transformationMapping_[0]; 34 TransformationWeightMap& transWeight = this->transformationWeight_[0]; 37 35 38 36 int globalIndexSize = axisDestGlobalIndex_.size(); … … 69 67 CTransformationMapping transformationMap(axisDest_, axisSrc_); 70 68 71 std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0];72 std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0];69 TransformationIndexMap& transMap = this->transformationMapping_[0]; 70 TransformationWeightMap& transWeight = this->transformationWeight_[0]; 73 71 74 72 CTransformationMapping::DestinationIndexMap globaIndexMapFromDestToSource; 75 std::map<int, std::vector<int> >::const_iterator it = transMap.begin(), ite = transMap.end();73 TransformationIndexMap::const_iterator it = transMap.begin(), ite = transMap.end(); 76 74 int localIndex = 0; 77 75 for (; it != ite; ++it) -
XIOS/trunk/src/transformation/axis_algorithm_zoom.cpp
r827 r833 47 47 this->transformationWeight_.resize(1); 48 48 49 std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0];50 std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0];49 TransformationIndexMap& transMap = this->transformationMapping_[0]; 50 TransformationWeightMap& transWeight = this->transformationWeight_[0]; 51 51 52 52 for (StdSize idx = 0; idx < ni; ++idx) … … 79 79 StdSize iBeginMask = axisDest_->begin.getValue(); 80 80 StdSize globalIndexMask = 0; 81 std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0];82 std::map<int, std::vector<int> >::const_iterator ite = (transMap).end();81 TransformationIndexMap& transMap = this->transformationMapping_[0]; 82 TransformationIndexMap::const_iterator ite = (transMap).end(); 83 83 for (StdSize idx = 0; idx < niMask; ++idx) 84 84 { -
XIOS/trunk/src/transformation/domain_algorithm_interpolate.cpp
r829 r833 23 23 { 24 24 interpDomain_->checkValid(domainSource); 25 // computeIndexSourceMapping();26 25 } 27 26 … … 385 384 this->transformationWeight_.resize(1); 386 385 387 std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0];388 std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0];386 TransformationIndexMap& transMap = this->transformationMapping_[0]; 387 TransformationWeightMap& transWeight = this->transformationWeight_[0]; 389 388 390 389 boost::unordered_map<size_t,int> globalIndexOfDomainDest; -
XIOS/trunk/src/transformation/domain_algorithm_zoom.cpp
r827 r833 72 72 this->transformationWeight_.resize(1); 73 73 74 std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0];75 std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0];74 TransformationIndexMap& transMap = this->transformationMapping_[0]; 75 TransformationWeightMap& transWeight = this->transformationWeight_[0]; 76 76 77 77 // std::map<int, std::vector<int> >& transMap = this->transformationMapping_; … … 117 117 int globalIndexMask = 0; 118 118 119 std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0];120 std::map<int, std::vector<int> >::const_iterator ite = (transMap).end();119 TransformationIndexMap& transMap = this->transformationMapping_[0]; 120 TransformationIndexMap::const_iterator ite = (transMap).end(); 121 121 for (int j = 0; j < njMask; ++j) 122 122 { -
XIOS/trunk/src/transformation/generic_algorithm_transformation.cpp
r831 r833 35 35 for (size_t idxTrans = 0; idxTrans < transformationMapping_.size(); ++idxTrans) 36 36 { 37 std::map<int, std::vector<int> >::const_iterator itbTransMap = transformationMapping_[idxTrans].begin(), itTransMap,37 TransformationIndexMap::const_iterator itbTransMap = transformationMapping_[idxTrans].begin(), itTransMap, 38 38 iteTransMap = transformationMapping_[idxTrans].end(); 39 std::map<int, std::vector<double> >::const_iterator itTransWeight = transformationWeight_[idxTrans].begin();39 TransformationWeightMap::const_iterator itTransWeight = transformationWeight_[idxTrans].begin(); 40 40 41 41 // If transformation position exists 42 std::map<int, std::vector<int> >::const_iterator itTransPos, iteTransPos;42 TransformationIndexMap::const_iterator itTransPos, iteTransPos; 43 43 if (!isTransPosEmpty) 44 44 { -
XIOS/trunk/src/transformation/generic_algorithm_transformation.hpp
r831 r833 70 70 71 71 protected: 72 typedef boost::unordered_map<int, std::vector<int> > TransformationIndexMap; 73 typedef boost::unordered_map<int, std::vector<double> > TransformationWeightMap; 74 typedef boost::unordered_map<int, std::vector<int> > TransformationPositionMap; 75 72 76 //! Map between global index of destination element and source element 73 std::vector< std::map<int, std::vector<int> >> transformationMapping_;77 std::vector<TransformationIndexMap> transformationMapping_; 74 78 //! Weight corresponding of source to destination 75 std::vector< std::map<int, std::vector<double> >> transformationWeight_;79 std::vector<TransformationWeightMap> transformationWeight_; 76 80 //! Map of global index of destination element and corresponding global index of other elements in the same grid 77 81 //! By default, one index of an element corresponds to all index of remaining element in the grid. So it's empty 78 std::vector< std::map<int, std::vector<int> >> transformationPosition_;82 std::vector<TransformationPositionMap> transformationPosition_; 79 83 80 84 //! Id of auxillary inputs which help doing transformation dynamically
Note: See TracChangeset
for help on using the changeset viewer.