Changeset 1847 for XIOS/dev/dev_ym/XIOS_COUPLING
- Timestamp:
- 01/06/20 14:46:09 (4 years ago)
- Location:
- XIOS/dev/dev_ym/XIOS_COUPLING/src
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_ym/XIOS_COUPLING/src/io/nc4_data_output.cpp
r1639 r1847 1639 1639 1640 1640 CArray<int, 1> indexes(grid->getNumberWrittenIndexes()); 1641 indexes = grid->localIndexToWriteOnServer ;1641 indexes = grid->localIndexToWriteOnServer_; 1642 1642 1643 1643 switch (SuperClass::type) -
XIOS/dev/dev_ym/XIOS_COUPLING/src/node/axis.hpp
r1639 r1847 169 169 // std::map<int, vector<int> > indWrittenSrv_; // Global written index of each client sent to server 170 170 std::unordered_map<size_t,size_t> globalLocalIndexMap_; 171 std::vector<int> indexesToWrite;172 171 std::map<int,int> numberWrittenIndexes_, totalNumberWrittenIndexes_, offsetWrittenIndexes_; 173 172 std::map<int, CArray<int, 1> > compressedIndexToWriteOnServer; -
XIOS/dev/dev_ym/XIOS_COUPLING/src/node/domain.hpp
r1639 r1847 209 209 bool isClientChecked; // Verify whether all attributes of domain on the client side are good 210 210 bool isClientAfterTransformationChecked; 211 212 /** global index of the domain on server side, sent by the clients. This is global index for lon, lat, mask elements (ie non masked elements) 213 indGlobs_[rank] -> array of global index received from the client of rank "rank" 214 indGlobs[rank](ind) -> global indices of the "ind" element sent. 215 Defined only on server side 216 */ 211 217 std::map<int, CArray<int,1> > indGlob_; 212 std::map<int, map<int,int> > nbSenders; // Mapping of number of communicating client to a server 213 214 /** Global index of each client sent to server: map<serverSize, map<serverRank, indexes>> */ 218 219 /** only on client sided : defined the number of clients which participate to a message sent to a server for longitude, lat, area, etc. attributes 220 nbSender[nbServers] --> first map is related to the server distribution (ie associated with the contextClient) 221 nbSenders[nbServers][server_rank]-> return the number of participants of a message sent to the server of rank "server_rank" 222 */ 223 std::map<int, map<int,int> > nbSenders; 224 225 /** only on client side : Global index of each client sent to server: map<serverSize, map<serverRank, indexes>> 226 indSrv_[nbServers] --> first map is related to the server distribution (ie associated with the contextClient) 227 indSrv_[nbServers][server_rank] -> array of global index sent to the server of rank "server_rank" 228 indSrv_[nbServers][server_rank](ind) --> global index on server of the local element "ind" sent (for lon, lat, mask, etc...) 229 */ 215 230 std::map<int, std::unordered_map<int, vector<size_t> > > indSrv_; 216 // std::map<CContextClient*, std::map<int, vector<int> > > indWrittenSrv_; // Global written index of each client sent to server 217 std::vector<int> indexesToWrite; 231 232 /** make the mapping between the global index (the key) and the local index 233 globalLocalIndexMap_[global_index] --> get the local index 234 */ 235 std::unordered_map<size_t,size_t> globalLocalIndexMap_; 236 237 238 /** only on server side : get the rank of each clients which participate to a received message 239 * recvClientRanks_[num_receiver] : client rank of the receiver "num_receiver" 240 */ 218 241 std::vector<int> recvClientRanks_; 242 219 243 std::map<int,int> numberWrittenIndexes_, totalNumberWrittenIndexes_, offsetWrittenIndexes_; 220 244 std::map<int, CArray<int, 1> > compressedIndexToWriteOnServer; 221 std::map<int, std::map<int,size_t> > connectedDataSize_;222 245 std::map<int, std::vector<int> > connectedServerRank_; 223 246 … … 227 250 TransMapTypes transformationMap_; 228 251 bool isUnstructed_; 229 std::unordered_map<size_t,size_t> globalLocalIndexMap_; 252 230 253 231 254 private: -
XIOS/dev/dev_ym/XIOS_COUPLING/src/node/field.cpp
r1794 r1847 1627 1627 TRY 1628 1628 { 1629 CArray<size_t,1>& outIndexClient = grid->localIndexToWriteOnClient ;1630 CArray<size_t,1>& outIndexServer = grid->localIndexToWriteOnServer ;1629 CArray<size_t,1>& outIndexClient = grid->localIndexToWriteOnClient_; 1630 CArray<size_t,1>& outIndexServer = grid->localIndexToWriteOnServer_; 1631 1631 for (size_t idx = 0; idx < outIndexServer.numElements(); ++idx) 1632 1632 { … … 1639 1639 TRY 1640 1640 { 1641 CArray<size_t,1>& outIndexClient = grid->localIndexToWriteOnClient ;1642 CArray<size_t,1>& outIndexServer = grid->localIndexToWriteOnServer ;1641 CArray<size_t,1>& outIndexClient = grid->localIndexToWriteOnClient_; 1642 CArray<size_t,1>& outIndexServer = grid->localIndexToWriteOnServer_; 1643 1643 for (size_t idx = 0; idx < outIndexServer.numElements(); ++idx) 1644 1644 { … … 1651 1651 TRY 1652 1652 { 1653 CArray<size_t,1>& outIndexClient = grid->localIndexToWriteOnClient ;1654 CArray<size_t,1>& outIndexServer = grid->localIndexToWriteOnServer ;1653 CArray<size_t,1>& outIndexClient = grid->localIndexToWriteOnClient_; 1654 CArray<size_t,1>& outIndexServer = grid->localIndexToWriteOnServer_; 1655 1655 for (size_t idx = 0; idx < outIndexServer.numElements(); ++idx) 1656 1656 { -
XIOS/dev/dev_ym/XIOS_COUPLING/src/node/grid.cpp
r1794 r1847 622 622 int writtenIndex = 0; 623 623 624 localIndexToWriteOnClient .resize(nbWritten);625 localIndexToWriteOnServer .resize(nbWritten);626 localIndexToWriteOnServer (0) = writtenIndex;627 localIndexToWriteOnClient (0) = writtenIndex;624 localIndexToWriteOnClient_.resize(nbWritten); 625 localIndexToWriteOnServer_.resize(nbWritten); 626 localIndexToWriteOnServer_(0) = writtenIndex; 627 localIndexToWriteOnClient_(0) = writtenIndex; 628 628 629 629 return; … … 643 643 } 644 644 645 localIndexToWriteOnClient .resize(nbWritten);646 localIndexToWriteOnServer .resize(nbWritten);645 localIndexToWriteOnClient_.resize(nbWritten); 646 localIndexToWriteOnServer_.resize(nbWritten); 647 647 648 648 { … … 650 650 if (isDataDistributed_) 651 651 { 652 CContext Server* server = CContext::getCurrent()->server;653 MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);654 MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);652 CContext* context = CContext::getCurrent(); 653 MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, context->intraComm_); 654 MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, context->intraComm_); 655 655 offsetWrittenIndexes_ -= numberWrittenIndexes_; 656 656 } … … 666 666 if (itSrve != itSrv) 667 667 { 668 localIndexToWriteOnServer (nbWritten) = itSrv->second;669 localIndexToWriteOnClient (nbWritten) = it->second;668 localIndexToWriteOnServer_(nbWritten) = itSrv->second; 669 localIndexToWriteOnClient_(nbWritten) = it->second; 670 670 ++nbWritten; 671 671 } … … 1743 1743 for (int i = 0; i < nSize.size(); ++i) 1744 1744 dataSize *= nSize[i]; 1745 serverDistribution_ = new CDistributionServer( server->intraCommRank,1745 serverDistribution_ = new CDistributionServer(context->intraCommRank_, 1746 1746 globalIndex, axis_domain_order, 1747 1747 nBegin, nSize, nBeginGlobal, nGlob); -
XIOS/dev/dev_ym/XIOS_COUPLING/src/node/grid.hpp
r1794 r1847 315 315 // Maybe we need a flag to determine whether a client wants to write. TODO " 316 316 317 /** Map storing received data on server side. This map is the equivalent of storeIndex_client, but for data not received from model318 * instead that f or client. This map is used to concatenate data received from several clients into a single array on server side317 /** Map storing received data on server side. This map is the equivalent to the storeIndex_client, but for data received from client 318 * instead that from model. This map is used to concatenate data received from several clients into a single array on server side 319 319 * which match the local workflow grid. 320 320 * outLocalIndexStoreOnClient_[client_rank] -> Array of index from client of rank "client_rank" 321 321 * outLocalIndexStoreOnClient_[client_rank](index of buffer from client) -> local index of the workflow grid 322 322 * The map is created in CGrid::computeClientIndex and filled upon receiving data in CField::recvUpdateData(). 323 * Symetrically it is also used to send data from a server to sev ral client for reading case. */323 * Symetrically it is also used to send data from a server to several client for reading case. */ 324 324 map<int, CArray<size_t, 1> > outLocalIndexStoreOnClient_; 325 325 … … 327 327 /** Indexes calculated based on server-like distribution. 328 328 * They are used for writing/reading data and only calculated for server level that does the writing/reading. 329 * Along with localIndexToWriteOnClient, these indexes are used to correctly place incoming data. */ 330 CArray<size_t,1> localIndexToWriteOnServer; 329 * Along with localIndexToWriteOnClient, these indexes are used to correctly place incoming data. 330 * size of the array : numberWrittenIndexes_ : number of index written in a compressed way 331 * localIndexToWriteOnServer_(compressed_written_index) : -> local uncompressed index that will be written in the file */ 332 CArray<size_t,1> localIndexToWriteOnServer_; 331 333 332 334 /** Indexes calculated based on client-like distribution. 333 335 * They are used for writing/reading data and only calculated for server level that does the writing/reading. 334 * Along with localIndexToWriteOnServer, these indexes are used to correctly place incoming data. */ 335 CArray<size_t,1> localIndexToWriteOnClient; 336 337 CArray<size_t,1> indexFromClients; 338 336 * Along with localIndexToWriteOnServer, these indexes are used to correctly place incoming data. 337 * size of the array : numberWrittenIndexes_ 338 * localIndexToWriteOnClient_(compressed_written_index) -> local index of the workflow grid*/ 339 CArray<size_t,1> localIndexToWriteOnClient_; 339 340 340 341 private: … … 344 345 std::set<CContextClient*> clientsSet; 345 346 346 /** Map storing received indexes. Key = sender rank, value = index array. */ 347 map<int, CArray<size_t, 1> > outGlobalIndexFromClient_; 347 /** Map storing received indexes on server side sent by clients. Key = sender rank, value = global index array. 348 Later, the global indexes received will be mapped onto local index computed with the local distribution. 349 outGlobalIndexFromClient_[rank] -> array of global index send by client of rank "rank" 350 outGlobalIndexFromClient_[rank](n) -> global index of datav n sent by client 351 */ 352 map<int, CArray<size_t, 1> > outGlobalIndexFromClient_; 348 353 349 354 bool isChecked; … … 394 399 bool hasTransform_; 395 400 396 /** Map storing global indexes of server-like (band-wise) distribution for sending to receivers. 397 * Key = size of receiver's intracomm. 401 /** Map storing global indexes of server-like (band-wise) distribution for sending to receivers (client side). 402 * Key = size of receiver's intracomm (i.e. number of servers) 403 * ~ map<int, umap<int, std::vector<size_t> >> globalIndexOnServer_ 404 * globalIndexOnServer_[servers_size] -> map for a distribution of size "servers_size" (number of servers) 405 * globalIndexOnServer_[servers_size][server_rank] -> array of global index managed by server of rank "server_rank" 406 * globalIndexOnServer_[servers_size][server_rank][n] -> global index of data to be send to the server by client based on sub element of the grid. 407 * -> grid masking is not included. 398 408 */ 399 409 // std::map<CContextClient*, CClientServerMapping::GlobalIndexMap> globalIndexOnServer_;
Note: See TracChangeset
for help on using the changeset viewer.