- Timestamp:
- 04/01/15 17:52:58 (9 years ago)
- Location:
- XIOS/trunk/src
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/client_server_mapping_distributed.cpp
r584 r585 17 17 18 18 CClientServerMappingDistributed::CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 19 const MPI_Comm& clientIntraComm )19 const MPI_Comm& clientIntraComm, bool isDataDistributed) 20 20 : CClientServerMapping(), indexClientHash_(), countIndexGlobal_(0), countIndexServer_(0), 21 indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_() 21 indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_(), isDataDistributed_(isDataDistributed) 22 22 { 23 23 clientIntraComm_ = clientIntraComm; … … 35 35 Compute mapping global index of server which client sends to. 36 36 \param [in] globalIndexOnClient global index client has 37 */38 //void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient)39 //{40 // int ssize = globalIndexOnClient.numElements();41 // CArray<int,1>* localIndexOnClient = new CArray<int,1>(ssize);42 // for (int i = 0; i < ssize; ++i) (*localIndexOnClient)(i) = i;43 //44 // this->computeServerIndexMapping(globalIndexOnClient, *localIndexOnClient);45 // delete localIndexOnClient;46 //}47 48 /*!49 Compute mapping global index of server which client sends to.50 \param [in] globalIndexOnClient global index client has51 37 \param [in] localIndexOnClient local index on client 52 38 */ … … 61 47 std::map<int, std::vector<size_t> > client2ClientIndexGlobal; 62 48 std::map<int, std::vector<int> > client2ClientIndexServer; 63 // std::map<int, std::vector<int> > clientLocalIndex;64 49 65 50 // Number of global index whose mapping server can be found out thanks to index-server mapping … … 81 66 { 82 67 (indexGlobalOnServer_[globalIndexToServerMapping_[globalIndexClient]]).push_back(globalIndexClient); 83 // (localIndexSend2Server_[globalIndexToServerMapping_[globalIndexClient]]).push_back(localIndexOnClient(i));84 68 ++nbIndexAlreadyOnClient; 85 69 } … … 87 71 { 88 72 client2ClientIndexGlobal[indexClient].push_back(globalIndexClient); 89 // clientLocalIndex[indexClient].push_back(i);90 73 ++nbIndexSendToOthers; 91 74 } … … 106 89 sendIndexGlobalToClients(it->first, it->second, clientIntraComm_, sendRequest); 107 90 91 int nbDemandingClient = recvBuff[clientRank_], nbIndexServerReceived = 0; 108 92 // Receiving demand as well as the responds from other clients 109 93 // The demand message contains global index; meanwhile the responds have server index information … … 112 96 int maxNbIndexDemandedFromOthers = (nbIndexAlreadyOnClient >= globalIndexToServerMapping_.size()) 113 97 ? 0 : (globalIndexToServerMapping_.size() - nbIndexAlreadyOnClient); 98 if (!isDataDistributed_) maxNbIndexDemandedFromOthers = nbDemandingClient * globalIndexToServerMapping_.size(); // Not very optimal but it's general 99 114 100 if (0 != maxNbIndexDemandedFromOthers) 115 101 recvBuffIndexGlobal = new unsigned long[maxNbIndexDemandedFromOthers]; … … 118 104 int* recvBuffIndexServer = 0; 119 105 int nbIndexReceivedFromOthers = nbIndexSendToOthers; 106 // int nbIndexReceivedFromOthers = globalIndexToServerMapping_.size() - nbIndexAlreadyOnClient; 120 107 if (0 != nbIndexReceivedFromOthers) 121 108 recvBuffIndexServer = new int[nbIndexReceivedFromOthers]; 122 109 123 resetReceivingRequestAndCount();124 110 std::map<int, MPI_Request>::iterator itRequest; 125 111 std::vector<int> demandAlreadyReceived, repondAlreadyReceived; 126 int nbDemandingClient = recvBuff[clientRank_], nbIndexServerReceived = 0; 112 127 113 128 114 resetReceivingRequestAndCount(); … … 180 166 int* beginBuff = indexServerBuffBegin_[clientSourceRank]; 181 167 std::vector<size_t>& globalIndexTmp = client2ClientIndexGlobal[clientSourceRank]; 182 // std::vector<int>& localIndexTmp = clientLocalIndex[clientSourceRank];183 168 for (int i = 0; i < count; ++i) 184 169 { 185 170 (indexGlobalOnServer_[*(beginBuff+i)]).push_back(globalIndexTmp[i]); 186 // (localIndexSend2Server_[*(beginBuff+i)]).push_back(localIndexOnClient(localIndexTmp[i]));187 171 } 188 172 nbIndexServerReceived += count; … … 224 208 Compute distribution of global index for servers 225 209 Each client already holds a piece of information about global index and the corresponding server. 226 This information is redistributed into size_t s ipace in which each client possesses a specific range of index.227 After h the redistribution, each client as longas its range of index contains all necessary information about server.210 This information is redistributed into size_t space in which each client possesses a specific range of index. 211 After the redistribution, each client as well as its range of index contains all necessary information about server. 228 212 \param [in] globalIndexOfServer global index and the corresponding server 229 213 \param [in] clientIntraComm client joining distribution process. -
XIOS/trunk/src/client_server_mapping_distributed.hpp
r584 r585 34 34 /** Default constructor */ 35 35 CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 36 const MPI_Comm& clientIntraComm );36 const MPI_Comm& clientIntraComm, bool isDataDistributed = true); 37 37 38 38 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer); … … 118 118 //! Mapping client rank and the begining position of receiving buffer for message of server index from this client 119 119 std::map<int, int*> indexServerBuffBegin_; 120 121 //! Flag to specify whether data is distributed or not 122 bool isDataDistributed_; 120 123 }; 121 124 -
XIOS/trunk/src/node/domain.cpp
r584 r585 744 744 for (int i = 0; i < nb; ++i) 745 745 { 746 if ( std::binary_search(itbVec, iteVec, globalIndexDomainZoom(i)))746 if (iteVec != std::find(itbVec, iteVec, globalIndexDomainZoom(i))) 747 747 { 748 748 indSrv_[rank].push_back(globalIndexDomainZoom(i)); -
XIOS/trunk/src/node/grid.cpp
r584 r585 336 336 // Finally, compute index mapping between client(s) and server(s) 337 337 clientServerMap_ = new CClientServerMappingDistributed(serverDistributionDescription_->getGlobalIndexRange(), 338 client->intraComm); 338 client->intraComm, 339 clientDistribution_->isDataDistributed()); 339 340 340 341 clientServerMap_->computeServerIndexMapping(clientDistribution_->getGlobalIndex()); … … 501 502 if (!doGridHaveDataDistributed()) 502 503 { 503 // if (0 == client->clientRank) 504 // { 505 // for (int ns = 0; itGlobal != iteMap; ++itGlobal, ++itLocal, ++ns) 506 // { 507 // rank = itGlobal->first; 508 // int nb = (itGlobal->second).size(); 509 // 510 // CArray<size_t, 1> outGlobalIndexOnServer(nb); 511 // CArray<int, 1> outLocalIndexToServer(nb); 512 // for (int k = 0; k < nb; ++k) 513 // { 514 // outGlobalIndexOnServer(k) = itGlobal->second.at(k); 515 // outLocalIndexToServer(k) = itLocal->second.at(k); 516 // } 517 // 518 // storeIndex_toSrv.insert( pair<int,CArray<int,1>* >(rank,new CArray<int,1>(outLocalIndexToServer) )); 519 // listOutIndex.push_back(new CArray<size_t,1>(outGlobalIndexOnServer)); 520 // 521 // list_msg.push_back(shared_ptr<CMessage>(new CMessage)); 522 // *list_msg.back()<<getId()<<*listOutIndex.back(); 523 // event.push(rank, 1, *list_msg.back()); 524 // } 525 // client->sendEvent(event); 526 // } else client->sendEvent(event); 504 if (0 == client->clientRank) 505 { 506 for (rank = 0; rank < client->serverSize; ++rank) 507 { 508 int nb = 0; 509 if (globalIndexTmp.end() != globalIndexTmp.find(rank)) 510 nb = globalIndexTmp[rank].size(); 511 512 CArray<size_t, 1> outGlobalIndexOnServer(nb); 513 CArray<int, 1> outLocalIndexToServer(nb); 514 for (int k = 0; k < nb; ++k) 515 { 516 outGlobalIndexOnServer(k) = globalIndexTmp[rank].at(k); 517 outLocalIndexToServer(k) = localIndexTmp[rank].at(k); 518 } 519 520 storeIndex_toSrv.insert( pair<int,CArray<int,1>* >(rank,new CArray<int,1>(outLocalIndexToServer) )); 521 listOutIndex.push_back(new CArray<size_t,1>(outGlobalIndexOnServer)); 522 523 list_msg.push_back(shared_ptr<CMessage>(new CMessage)); 524 *list_msg.back()<<getId()<<*listOutIndex.back(); 525 526 event.push(rank, 1, *list_msg.back()); 527 } 528 client->sendEvent(event); 529 } 530 else 531 client->sendEvent(event); 527 532 } 528 533 else
Note: See TracChangeset
for help on using the changeset viewer.