Changeset 1460 for XIOS/dev/branch_openmp/src/node
- Timestamp:
- 03/22/18 10:43:20 (6 years ago)
- Location:
- XIOS/dev/branch_openmp/src/node
- Files:
-
- 10 added
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/src/node/axis.cpp
r1334 r1460 10 10 #include "context_server.hpp" 11 11 #include "xios_spl.hpp" 12 #include "inverse_axis.hpp"13 #include "zoom_axis.hpp"14 #include "interpolate_axis.hpp"15 12 #include "server_distribution_description.hpp" 16 13 #include "client_server_mapping_distributed.hpp" 17 14 #include "distribution_client.hpp" 18 15 16 using namespace ep_lib; 17 19 18 namespace xios { 20 19 21 /// ////////////////////// D éfinitions ////////////////////// ///20 /// ////////////////////// Definitions ////////////////////// /// 22 21 23 22 CAxis::CAxis(void) 24 23 : CObjectTemplate<CAxis>() 25 , CAxisAttributes(), isChecked(false), relFiles(), areClientAttributesChecked_(false) 24 , CAxisAttributes(), isChecked(false), relFiles() 25 , areClientAttributesChecked_(false) 26 26 , isClientAfterTransformationChecked(false) 27 , isDistributed_(false), hasBounds_(false), isCompressible_(false) 28 , numberWrittenIndexes_(0), totalNumberWrittenIndexes_(0), offsetWrittenIndexes_(0) 29 , transformationMap_(), hasValue(false), hasLabel(false) 27 , hasBounds(false), isCompressible_(false) 28 , numberWrittenIndexes_(), totalNumberWrittenIndexes_(), offsetWrittenIndexes_() 29 , transformationMap_() 30 , hasValue(false), hasLabel(false) 31 , computedWrittenIndex_(false) 32 , clients() 30 33 { 31 34 } … … 33 36 CAxis::CAxis(const StdString & id) 34 37 : CObjectTemplate<CAxis>(id) 35 , CAxisAttributes(), isChecked(false), relFiles(), areClientAttributesChecked_(false) 38 , CAxisAttributes(), isChecked(false), relFiles() 39 , areClientAttributesChecked_(false) 36 40 , isClientAfterTransformationChecked(false) 37 , isDistributed_(false), hasBounds_(false), isCompressible_(false) 38 , numberWrittenIndexes_(0), totalNumberWrittenIndexes_(0), offsetWrittenIndexes_(0) 39 , transformationMap_(), hasValue(false), hasLabel(false) 41 , hasBounds(false), isCompressible_(false) 42 , numberWrittenIndexes_(), totalNumberWrittenIndexes_(), offsetWrittenIndexes_() 43 , transformationMap_() 44 , hasValue(false), hasLabel(false) 45 , computedWrittenIndex_(false) 46 , clients() 40 47 { 41 48 } … … 52 59 m["inverse_axis"] = TRANS_INVERSE_AXIS; 53 60 m["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_AXIS; 61 m["reduce_axis"] = TRANS_REDUCE_AXIS_TO_AXIS; 54 62 m["extract_domain"] = TRANS_EXTRACT_DOMAIN_TO_AXIS; 63 m["temporal_splitting"] = TRANS_TEMPORAL_SPLITTING; 64 m["duplicate_scalar"] = TRANS_DUPLICATE_SCALAR_TO_AXIS; 55 65 } 56 66 … … 63 73 (*CAxis::transformationMapList_ptr)["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_AXIS; 64 74 (*CAxis::transformationMapList_ptr)["extract_domain"] = TRANS_EXTRACT_DOMAIN_TO_AXIS; 75 (*CAxis::transformationMapList_ptr)["reduce_axis"] = TRANS_REDUCE_AXIS_TO_AXIS; 76 (*CAxis::transformationMapList_ptr)["temporal_splitting"] = TRANS_TEMPORAL_SPLITTING; 77 (*CAxis::transformationMapList_ptr)["duplicate_scalar"] = TRANS_DUPLICATE_SCALAR_TO_AXIS; 65 78 } 66 79 … … 84 97 bool CAxis::isDistributed(void) const 85 98 { 86 return isDistributed_; 99 bool distributed = (!this->begin.isEmpty() && !this->n.isEmpty() && (this->begin + this->n < this->n_glo)) || 100 (!this->n.isEmpty() && (this->n != this->n_glo)); 101 // A same stupid condition to make sure that if there is only one client, axis 102 // should be considered to be distributed. This should be a temporary solution 103 distributed |= (1 == CContext::getCurrent()->client->clientSize); 104 return distributed; 87 105 } 88 106 … … 109 127 //---------------------------------------------------------------- 110 128 111 const std::vector<int>& CAxis::getIndexesToWrite(void) const112 {113 return indexesToWrite;114 }115 116 129 /*! 117 130 Returns the number of indexes written by each server. 118 131 \return the number of indexes written by each server 119 132 */ 120 int CAxis::getNumberWrittenIndexes() const 121 { 122 return numberWrittenIndexes_; 133 int CAxis::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 134 { 135 int writtenSize; 136 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 137 return numberWrittenIndexes_[writtenSize]; 123 138 } 124 139 … … 127 142 \return the total number of indexes written by the servers 128 143 */ 129 int CAxis::getTotalNumberWrittenIndexes() const 130 { 131 return totalNumberWrittenIndexes_; 144 int CAxis::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 145 { 146 int writtenSize; 147 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 148 return totalNumberWrittenIndexes_[writtenSize]; 132 149 } 133 150 … … 136 153 \return the offset of indexes written by each server 137 154 */ 138 int CAxis::getOffsetWrittenIndexes() const 139 { 140 return offsetWrittenIndexes_; 155 int CAxis::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 156 { 157 int writtenSize; 158 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 159 return offsetWrittenIndexes_[writtenSize]; 160 } 161 162 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 163 { 164 int writtenSize; 165 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 166 return compressedIndexToWriteOnServer[writtenSize]; 141 167 } 142 168 143 169 //---------------------------------------------------------------- 144 170 145 /*!171 /*! 146 172 * Compute the minimum buffer size required to send the attributes to the server(s). 147 173 * 148 174 * \return A map associating the server rank with its minimum buffer size. 149 175 */ 150 std::map<int, StdSize> CAxis::getAttributesBufferSize() 151 { 152 CContextClient* client = CContext::getCurrent()->client; 153 154 std::map<int, StdSize> attributesSizes = getMinimumBufferSizeForAttributes(); 155 156 bool isNonDistributed = (n == n_glo); 176 std::map<int, StdSize> CAxis::getAttributesBufferSize(CContextClient* client, const std::vector<int>& globalDim, int orderPositionInGrid, 177 CServerDistributionDescription::ServerDistributionType distType) 178 { 179 180 std::map<int, StdSize> attributesSizes = getMinimumBufferSizeForAttributes(client); 181 182 // bool isNonDistributed = (n_glo == n); 183 bool isDistributed = (orderPositionInGrid == CServerDistributionDescription::defaultDistributedDimension(globalDim.size(), distType)) 184 || (index.numElements() != n_glo); 157 185 158 186 if (client->isServerLeader()) … … 161 189 size_t size = 6 * sizeof(size_t); 162 190 // size estimation for sendNonDistributedValue 163 if (isNonDistributed) 164 size = std::max(size, CArray<double,1>::size(n_glo) + (isCompressible_ ? CArray<int,1>::size(n_glo) : 0)); 191 if (!isDistributed) 192 { 193 // size = std::max(size, CArray<double,1>::size(n_glo) + (isCompressible_ ? CArray<int,1>::size(n_glo) : 0)); 194 size += CArray<int,1>::size(n_glo); 195 size += CArray<int,1>::size(n_glo); 196 size += CArray<bool,1>::size(n_glo); 197 size += CArray<double,1>::size(n_glo); 198 if (hasBounds) 199 size += CArray<double,2>::size(2*n_glo); 200 if (hasLabel) 201 size += CArray<StdString,1>::size(n_glo); 202 } 165 203 size += CEventClient::headerSize + getId().size() + sizeof(size_t); 166 204 … … 171 209 attributesSizes[*itRank] = size; 172 210 } 211 const std::list<int>& ranksNonLeaders = client->getRanksServerNotLeader(); 212 for (std::list<int>::const_iterator itRank = ranksNonLeaders.begin(), itRankEnd = ranksNonLeaders.end(); itRank != itRankEnd; ++itRank) 213 { 214 if (size > attributesSizes[*itRank]) 215 attributesSizes[*itRank] = size; 216 } 217 173 218 } 174 219 175 if ( !isNonDistributed)220 if (isDistributed) 176 221 { 177 222 // size estimation for sendDistributedValue 178 std::map<int, std::vector<size_t> >::const_iterator it, ite = indSrv_.end();179 for (it = indSrv_ .begin(); it != ite; ++it)223 boost::unordered_map<int, vector<size_t> >::const_iterator it, ite = indSrv_[client->serverSize].end(); 224 for (it = indSrv_[client->serverSize].begin(); it != ite; ++it) 180 225 { 181 size_t sizeIndexEvent = CArray<int,1>::size(it->second.size()); 182 if (isCompressible_) 183 sizeIndexEvent += CArray<int,1>::size(indWrittenSrv_[it->first].size()); 184 185 size_t sizeValEvent = CArray<double,1>::size(it->second.size()); 186 if (hasBounds_) 187 sizeValEvent += CArray<double,2>::size(2 * it->second.size()); 188 226 size_t size = 6 * sizeof(size_t); 227 size += CArray<int,1>::size(it->second.size()); 228 size += CArray<int,1>::size(it->second.size()); 229 size += CArray<bool,1>::size(it->second.size()); 230 size += CArray<double,1>::size(it->second.size()); 231 if (hasBounds) 232 size += CArray<double,2>::size(2 * it->second.size()); 189 233 if (hasLabel) 190 size ValEvent+= CArray<StdString,1>::size(it->second.size());191 192 size _t size = CEventClient::headerSize + getId().size() + sizeof(size_t) + std::max(sizeIndexEvent, sizeValEvent);234 size += CArray<StdString,1>::size(it->second.size()); 235 236 size += CEventClient::headerSize + getId().size() + sizeof(size_t); 193 237 if (size > attributesSizes[it->first]) 194 238 attributesSizes[it->first] = size; … … 213 257 } 214 258 215 void CAxis::fillInValues(const CArray<double,1>& values) 216 { 217 this->value = values; 218 } 219 259 /*! 260 Check common attributes of an axis. 261 This check should be done in the very beginning of work flow 262 */ 220 263 void CAxis::checkAttributes(void) 221 264 { … … 260 303 } 261 304 305 // Remove this check because it doen't make sense in case of a hole or overlapping axes 262 306 if (!this->value.isEmpty()) 263 307 { 264 StdSize true_size = value.numElements();265 if (this->n.getValue() != true_size)266 ERROR("CAxis::checkAttributes(void)",267 << "[ id = '" << getId() << "' , context = '" << CObjectFactory::GetCurrentContextId() << "' ] "268 << "The axis is wrongly defined, attribute 'value' has a different size (" << true_size << ") than the one defined by the \'size\' attribute (" << n.getValue() << ").");308 // StdSize true_size = value.numElements(); 309 // if (this->n.getValue() != true_size) 310 // ERROR("CAxis::checkAttributes(void)", 311 // << "[ id = '" << getId() << "' , context = '" << CObjectFactory::GetCurrentContextId() << "' ] " 312 // << "The axis is wrongly defined, attribute 'value' has a different size (" << true_size << ") than the one defined by the \'size\' attribute (" << n.getValue() << ")."); 269 313 this->hasValue = true; 270 314 } 271 315 272 this->checkData();273 this->checkZoom();274 this->checkMask();275 316 this->checkBounds(); 276 this->checkLabel(); 277 278 isDistributed_ = (!this->begin.isEmpty() && !this->n.isEmpty() && (this->begin + this->n < this->n_glo)) || 279 (!this->n.isEmpty() && (this->n != this->n_glo)); 280 281 // A same stupid condition to make sure that if there is only one client, axis 282 // should be considered to be distributed. This should be a temporary solution 283 isDistributed_ |= (1 == CContext::getCurrent()->client->clientSize); 284 } 285 317 318 CContext* context=CContext::getCurrent(); 319 if (context->hasClient) 320 { 321 this->checkData(); 322 this->checkZoom(); 323 this->checkMask(); 324 this->checkLabel(); 325 } 326 } 327 328 /*! 329 Check the validity of data and fill in values if any. 330 */ 286 331 void CAxis::checkData() 287 332 { … … 306 351 } 307 352 353 /*! 354 Check validity of zoom info and fill in values if any. 355 */ 308 356 void CAxis::checkZoom(void) 309 357 { 310 358 if (global_zoom_begin.isEmpty()) global_zoom_begin.setValue(0); 311 359 if (global_zoom_n.isEmpty()) global_zoom_n.setValue(n_glo.getValue()); 312 } 313 360 if (zoom_index.isEmpty()) 361 { 362 zoom_index.setValue(index.getValue()); 363 } 364 if (zoom_n.isEmpty()) zoom_n.setValue(n); 365 if (zoom_begin.isEmpty()) zoom_begin.setValue(begin); 366 } 367 368 size_t CAxis::getGlobalWrittenSize(void) 369 { 370 if (zoomByIndex()) return global_zoom_index.numElements(); 371 else return global_zoom_n ; 372 } 373 374 /*! 375 Check validity of mask info and fill in values if any. 376 */ 314 377 void CAxis::checkMask() 315 378 { … … 333 396 } 334 397 335 void CAxis::checkBounds() 336 { 337 if (!bounds.isEmpty()) 338 { 339 if (bounds.extent(0) != 2 || bounds.extent(1) != n) 340 ERROR("CAxis::checkAttributes(void)", 341 << "The bounds array of the axis [ id = '" << getId() << "' , context = '" << CObjectFactory::GetCurrentContextId() << "' ] must be of dimension 2 x axis size." << std::endl 342 << "Axis size is " << n.getValue() << "." << std::endl 343 << "Bounds size is "<< bounds.extent(0) << " x " << bounds.extent(1) << "."); 344 hasBounds_ = true; 345 } 346 else hasBounds_ = false; 347 } 398 /*! 399 Check validity of bounds info and fill in values if any. 400 */ 401 void CAxis::checkBounds() 402 { 403 if (!bounds.isEmpty()) 404 { 405 if (bounds.extent(0) != 2 || bounds.extent(1) != n) 406 ERROR("CAxis::checkAttributes(void)", 407 << "The bounds array of the axis [ id = '" << getId() << "' , context = '" << CObjectFactory::GetCurrentContextId() << "' ] must be of dimension 2 x axis size." << std::endl 408 << "Axis size is " << n.getValue() << "." << std::endl 409 << "Bounds size is "<< bounds.extent(0) << " x " << bounds.extent(1) << "."); 410 hasBounds = true; 411 } 412 else hasBounds = false; 413 } 348 414 349 415 void CAxis::checkLabel() … … 360 426 else hasLabel = false; 361 427 } 428 429 /*! 430 Check whether we can do compressed output 431 */ 362 432 void CAxis::checkEligibilityForCompressedOutput() 363 433 { … … 366 436 } 367 437 438 /* 439 Check whether we do zooming by indexing 440 return true if do zooming by index 441 */ 368 442 bool CAxis::zoomByIndex() 369 443 { … … 371 445 } 372 446 447 /*! 448 Dispatch event from the lower communication layer then process event according to its type 449 */ 373 450 bool CAxis::dispatchEvent(CEventServer& event) 374 { 375 if (SuperClass::dispatchEvent(event)) return true; 376 else 377 { 378 switch(event.type) 379 { 380 case EVENT_ID_SERVER_ATTRIBUT : 381 recvServerAttribut(event); 382 return true; 383 break; 384 case EVENT_ID_INDEX: 385 recvIndex(event); 451 { 452 if (SuperClass::dispatchEvent(event)) return true; 453 else 454 { 455 switch(event.type) 456 { 457 case EVENT_ID_DISTRIBUTION_ATTRIBUTE : 458 recvDistributionAttribute(event); 386 459 return true; 387 460 break; 388 case EVENT_ID_DISTRIBUTED_VALUE: 389 recvDistributedValue(event); 390 return true; 391 break; 392 case EVENT_ID_NON_DISTRIBUTED_VALUE: 393 recvNonDistributedValue(event); 394 return true; 395 break; 396 default : 397 ERROR("bool CAxis::dispatchEvent(CEventServer& event)", 398 << "Unknown Event"); 399 return false; 400 } 401 } 402 } 403 461 case EVENT_ID_NON_DISTRIBUTED_ATTRIBUTES: 462 recvNonDistributedAttributes(event); 463 return true; 464 break; 465 case EVENT_ID_DISTRIBUTED_ATTRIBUTES: 466 recvDistributedAttributes(event); 467 return true; 468 break; 469 default : 470 ERROR("bool CAxis::dispatchEvent(CEventServer& event)", 471 << "Unknown Event"); 472 return false; 473 } 474 } 475 } 476 477 /*! 478 Check attributes on client side (This name is still adequate???) 479 */ 404 480 void CAxis::checkAttributesOnClient() 405 481 { 406 482 if (this->areClientAttributesChecked_) return; 407 483 408 this->checkAttributes(); 484 CContext* context=CContext::getCurrent(); 485 if (context->hasClient && !context->hasServer) this->checkAttributes(); 409 486 410 487 this->areClientAttributesChecked_ = true; 411 488 } 412 489 490 /* 491 The (spatial) transformation sometimes can change attributes of an axis (e.g zoom can change mask or generate can change whole attributes) 492 Therefore, we should recheck them. 493 */ 413 494 void CAxis::checkAttributesOnClientAfterTransformation(const std::vector<int>& globalDim, int orderPositionInGrid, 414 495 CServerDistributionDescription::ServerDistributionType distType) … … 418 499 if (this->isClientAfterTransformationChecked) return; 419 500 if (context->hasClient) 420 { 421 if (n.getValue() != n_glo.getValue()) computeConnectedServer(globalDim, orderPositionInGrid, distType); 501 { 502 if (orderPositionInGrid == CServerDistributionDescription::defaultDistributedDimension(globalDim.size(), distType)) 503 computeConnectedClients(globalDim, orderPositionInGrid, distType); 504 else if (index.numElements() != n_glo) computeConnectedClients(globalDim, orderPositionInGrid, CServerDistributionDescription::ROOT_DISTRIBUTION); 422 505 } 423 506 … … 425 508 } 426 509 427 // Send all checked attributes to server 510 /* 511 Send all checked attributes to server? (We dont have notion of server any more so client==server) 512 \param [in] globalDim global dimension of grid containing this axis 513 \param [in] orderPositionInGrid the relative order of this axis in the grid (e.g grid composed of domain+axis -> orderPositionInGrid is 2) 514 \param [in] distType distribution type of the server. For now, we only have band distribution. 515 516 */ 428 517 void CAxis::sendCheckedAttributes(const std::vector<int>& globalDim, int orderPositionInGrid, 429 518 CServerDistributionDescription::ServerDistributionType distType) … … 434 523 435 524 if (this->isChecked) return; 436 if (context->hasClient) 525 if (context->hasClient) sendAttributes(globalDim, orderPositionInGrid, distType); 526 527 this->isChecked = true; 528 } 529 530 /*! 531 Send attributes from one client to other clients 532 \param[in] globalDim global dimension of grid which contains this axis 533 \param[in] order 534 */ 535 void CAxis::sendAttributes(const std::vector<int>& globalDim, int orderPositionInGrid, 536 CServerDistributionDescription::ServerDistributionType distType) 537 { 538 sendDistributionAttribute(globalDim, orderPositionInGrid, distType); 539 540 // if (index.numElements() == n_glo.getValue()) 541 if ((orderPositionInGrid == CServerDistributionDescription::defaultDistributedDimension(globalDim.size(), distType)) 542 || (index.numElements() != n_glo)) 437 543 { 438 sendServerAttribut(globalDim, orderPositionInGrid, distType); 439 if (hasValue) sendValue(); 544 sendDistributedAttributes(); 440 545 } 441 442 this->isChecked = true;443 }444 445 void CAxis::sendValue()446 {447 if (n.getValue() == n_glo.getValue())448 sendNonDistributedValue();449 546 else 450 sendDistributedValue(); 451 } 452 453 void CAxis::computeConnectedServer(const std::vector<int>& globalDim, int orderPositionInGrid, 547 { 548 sendNonDistributedAttributes(); 549 } 550 } 551 552 /* 553 Compute the connection between group of clients (or clients/servers). 554 (E.g: Suppose we have 2 group of clients in two model: A (client role) connect to B (server role), 555 this function calculate number of clients B connect to one client of A) 556 \param [in] globalDim global dimension of grid containing this axis 557 \param [in] orderPositionInGrid the relative order of this axis in the grid (e.g grid composed of domain+axis -> orderPositionInGrid is 2) 558 \param [in] distType distribution type of the server. For now, we only have band distribution. 559 */ 560 void CAxis::computeConnectedClients(const std::vector<int>& globalDim, int orderPositionInGrid, 454 561 CServerDistributionDescription::ServerDistributionType distType) 455 562 { 456 563 CContext* context = CContext::getCurrent(); 457 CContextClient* client = context->client; 458 int nbServer = client->serverSize; 459 int range, clientSize = client->clientSize; 460 int rank = client->clientRank; 461 462 size_t ni = this->n.getValue(); 463 size_t ibegin = this->begin.getValue(); 464 size_t zoom_end = global_zoom_begin+global_zoom_n-1; 465 size_t nZoomCount = 0; 466 size_t nbIndex = index.numElements(); 467 468 int end = (0 == n) ? begin : begin + n - 1; 469 int zoom_size = zoomByIndex() ? global_zoom_index.numElements() : global_zoom_n; 470 int minInd = min(index); 471 int maxInd = max(index); 472 for (size_t idx = 0; idx < zoom_size; ++idx) 473 { 474 size_t globalZoomIndex = zoomByIndex() ? global_zoom_index(idx) : global_zoom_begin + idx; 475 if (globalZoomIndex >= minInd && globalZoomIndex <= maxInd) ++nZoomCount; 476 } 477 478 /* for (size_t idx = 0; idx < nbIndex; ++idx) 479 { 480 size_t globalIndex = index(idx); 481 if (globalIndex >= global_zoom_begin && globalIndex <= zoom_end) ++nZoomCount; 482 }*/ 564 565 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; 566 567 connectedServerRank_.clear(); 568 nbSenders.clear(); 569 570 for (int p = 0; p < nbSrvPools; ++p) 571 { 572 CContextClient* client = (0 != context->clientPrimServer.size()) ? context->clientPrimServer[p] : context->client; 573 int nbServer = client->serverSize; 574 int range, clientSize = client->clientSize; 575 int rank = client->clientRank; 576 577 if (connectedServerRank_.find(nbServer) == connectedServerRank_.end()) 578 { 579 size_t ni = this->n.getValue(); 580 size_t ibegin = this->begin.getValue(); 581 size_t global_zoom_end = global_zoom_begin+global_zoom_n-1; 582 size_t nZoomCount = 0; 583 size_t nbIndex = index.numElements(); 584 585 // First of all, we should compute the mapping of the global index and local index of the current client 586 if (globalLocalIndexMap_.empty()) 587 { 588 for (size_t idx = 0; idx < nbIndex; ++idx) 589 { 590 globalLocalIndexMap_[index(idx)] = idx; 591 } 592 } 593 594 // Calculate the compressed index if any 595 std::set<int> writtenInd; 596 if (isCompressible_) 597 { 598 for (int idx = 0; idx < data_index.numElements(); ++idx) 599 { 600 int ind = CDistributionClient::getAxisIndex(data_index(idx), data_begin, ni); 601 602 if (ind >= 0 && ind < ni && mask(ind)) 603 { 604 ind += ibegin; 605 if (ind >= global_zoom_begin && ind <= global_zoom_end) 606 writtenInd.insert(ind); 607 } 608 } 609 } 610 611 // Compute the global index of the current client (process) hold 612 std::vector<int> nGlobAxis(1); 613 nGlobAxis[0] = n_glo.getValue(); 614 615 size_t globalSizeIndex = 1, indexBegin, indexEnd; 616 for (int i = 0; i < nGlobAxis.size(); ++i) globalSizeIndex *= nGlobAxis[i]; 617 indexBegin = 0; 618 if (globalSizeIndex <= clientSize) 619 { 620 indexBegin = rank%globalSizeIndex; 621 indexEnd = indexBegin; 622 } 623 else 624 { 625 for (int i = 0; i < clientSize; ++i) 626 { 627 range = globalSizeIndex / clientSize; 628 if (i < (globalSizeIndex%clientSize)) ++range; 629 if (i == client->clientRank) break; 630 indexBegin += range; 631 } 632 indexEnd = indexBegin + range - 1; 633 } 634 635 CArray<size_t,1> globalIndex(index.numElements()); 636 for (size_t idx = 0; idx < globalIndex.numElements(); ++idx) 637 globalIndex(idx) = index(idx); 638 639 // Describe the distribution of server side 640 641 CServerDistributionDescription serverDescription(nGlobAxis, nbServer, distType); 642 643 std::vector<int> serverZeroIndex; 644 serverZeroIndex = serverDescription.computeServerGlobalIndexInRange(std::make_pair<size_t,size_t>(indexBegin, indexEnd), 0); 645 646 std::list<int> serverZeroIndexLeader; 647 std::list<int> serverZeroIndexNotLeader; 648 CContextClient::computeLeader(client->clientRank, client->clientSize, serverZeroIndex.size(), serverZeroIndexLeader, serverZeroIndexNotLeader); 649 650 for (std::list<int>::iterator it = serverZeroIndexLeader.begin(); it != serverZeroIndexLeader.end(); ++it) 651 *it = serverZeroIndex[*it]; 652 653 // Find out the connection between client and server side 654 CClientServerMapping* clientServerMap = new CClientServerMappingDistributed(serverDescription.getGlobalIndexRange(), client->intraComm); 655 clientServerMap->computeServerIndexMapping(globalIndex, nbServer); 656 CClientServerMapping::GlobalIndexMap& globalIndexAxisOnServer = clientServerMap->getGlobalIndexOnServer(); 657 658 indSrv_[nbServer].swap(globalIndexAxisOnServer); 659 660 if (distType==CServerDistributionDescription::ROOT_DISTRIBUTION) 661 { 662 for(int i=1; i<nbServer; ++i) 663 { 664 indSrv_[nbServer].insert(pair<int, vector<size_t> >(i,indSrv_[nbServer][0]) ) ; 665 } 666 667 serverZeroIndexLeader.clear() ; 668 } 669 670 CClientServerMapping::GlobalIndexMap::const_iterator it = indSrv_[nbServer].begin(), 671 ite = indSrv_[nbServer].end(); 672 673 for (it = indSrv_[nbServer].begin(); it != ite; ++it) connectedServerRank_[nbServer].push_back(it->first); 674 675 for (std::list<int>::const_iterator it = serverZeroIndexLeader.begin(); it != serverZeroIndexLeader.end(); ++it) 676 connectedServerRank_[nbServer].push_back(*it); 677 678 // Even if a client has no index, it must connect to at least one server and 679 // send an "empty" data to this server 680 if (connectedServerRank_[nbServer].empty()) 681 connectedServerRank_[nbServer].push_back(client->clientRank % client->serverSize); 682 683 nbSenders[nbServer] = CClientServerMapping::computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_[nbServer]); 684 685 delete clientServerMap; 686 } 687 } 688 } 689 690 /* 691 Compute the index of data to write into file 692 (Different from the previous version, this version of XIOS allows data be written into file (classical role), 693 or transfered to another clients) 694 */ 695 void CAxis::computeWrittenIndex() 696 { 697 if (computedWrittenIndex_) return; 698 computedWrittenIndex_ = true; 699 700 CContext* context=CContext::getCurrent(); 701 CContextServer* server = context->server; 702 703 // We describe the distribution of client (server) on which data are written 704 std::vector<int> nBegin(1), nSize(1), nBeginGlobal(1), nGlob(1); 705 nBegin[0] = zoom_begin; 706 nSize[0] = zoom_n; 707 nBeginGlobal[0] = 0; 708 nGlob[0] = n_glo; 709 CDistributionServer srvDist(server->intraCommSize, nBegin, nSize, nBeginGlobal, nGlob); 710 const CArray<size_t,1>& writtenGlobalIndex = srvDist.getGlobalIndex(); 711 712 // Because all written data are local on a client, 713 // we need to compute the local index on the server from its corresponding global index 714 size_t nbWritten = 0, indGlo; 715 boost::unordered_map<size_t,size_t>::const_iterator itb = globalLocalIndexMap_.begin(), 716 ite = globalLocalIndexMap_.end(), it; 717 CArray<size_t,1>::const_iterator itSrvb = writtenGlobalIndex.begin(), 718 itSrve = writtenGlobalIndex.end(), itSrv; 719 if (!zoomByIndex()) 720 { 721 for (itSrv = itSrvb; itSrv != itSrve; ++itSrv) 722 { 723 indGlo = *itSrv; 724 if (ite != globalLocalIndexMap_.find(indGlo)) 725 { 726 ++nbWritten; 727 } 728 } 729 730 localIndexToWriteOnServer.resize(writtenGlobalIndex.numElements()); 731 // localIndexToWriteOnServer.resize(nbWritten); 732 733 nbWritten = 0; 734 for (itSrv = itSrvb; itSrv != itSrve; ++itSrv) 735 { 736 indGlo = *itSrv; 737 if (ite != globalLocalIndexMap_.find(indGlo)) 738 { 739 localIndexToWriteOnServer(nbWritten) = globalLocalIndexMap_[indGlo]; 740 ++nbWritten; 741 } 742 } 743 } 744 else 745 { 746 nbWritten = 0; 747 boost::unordered_map<size_t,size_t>::const_iterator itb = globalLocalIndexMap_.begin(), 748 ite = globalLocalIndexMap_.end(), it; 749 for (int i = 0; i < zoom_index.numElements(); ++i) 750 { 751 if (ite != globalLocalIndexMap_.find(zoom_index(i))) 752 ++nbWritten; 753 } 754 755 localIndexToWriteOnServer.resize(nbWritten); 756 757 nbWritten = 0; 758 for (int i = 0; i < zoom_index.numElements(); ++i) 759 { 760 if (ite != globalLocalIndexMap_.find(zoom_index(i))) 761 { 762 localIndexToWriteOnServer(nbWritten) = globalLocalIndexMap_[zoom_index(i)]; 763 ++nbWritten; 764 } 765 } 766 } 767 768 } 769 770 void CAxis::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 771 { 772 int writtenCommSize; 773 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 774 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 775 return; 776 777 if (isCompressible()) 778 { 779 size_t nbWritten = 0, indGlo; 780 CContext* context=CContext::getCurrent(); 781 CContextServer* server = context->server; 782 783 // We describe the distribution of client (server) on which data are written 784 std::vector<int> nBegin(1), nSize(1), nBeginGlobal(1), nGlob(1); 785 nBegin[0] = zoom_begin; 786 nSize[0] = zoom_n; 787 nBeginGlobal[0] = 0; 788 nGlob[0] = n_glo; 789 CDistributionServer srvDist(server->intraCommSize, nBegin, nSize, nBeginGlobal, nGlob); 790 const CArray<size_t,1>& writtenGlobalIndex = srvDist.getGlobalIndex(); 791 boost::unordered_map<size_t,size_t>::const_iterator itb = globalLocalIndexMap_.begin(), 792 ite = globalLocalIndexMap_.end(), it; 793 794 CArray<size_t,1>::const_iterator itSrvb = writtenGlobalIndex.begin(), 795 itSrve = writtenGlobalIndex.end(), itSrv; 796 boost::unordered_map<size_t,size_t> localGlobalIndexMap; 797 for (itSrv = itSrvb; itSrv != itSrve; ++itSrv) 798 { 799 indGlo = *itSrv; 800 if (ite != globalLocalIndexMap_.find(indGlo)) 801 { 802 localGlobalIndexMap[localIndexToWriteOnServer(nbWritten)] = indGlo; 803 ++nbWritten; 804 } 805 } 806 807 nbWritten = 0; 808 for (int idx = 0; idx < data_index.numElements(); ++idx) 809 { 810 if (localGlobalIndexMap.end() != localGlobalIndexMap.find(data_index(idx))) 811 { 812 ++nbWritten; 813 } 814 } 815 816 compressedIndexToWriteOnServer[writtenCommSize].resize(nbWritten); 817 nbWritten = 0; 818 for (int idx = 0; idx < data_index.numElements(); ++idx) 819 { 820 if (localGlobalIndexMap.end() != localGlobalIndexMap.find(data_index(idx))) 821 { 822 compressedIndexToWriteOnServer[writtenCommSize](nbWritten) = localGlobalIndexMap[data_index(idx)]; 823 ++nbWritten; 824 } 825 } 826 827 numberWrittenIndexes_[writtenCommSize] = nbWritten; 828 if (isDistributed()) 829 { 830 831 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 832 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 833 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 834 } 835 else 836 totalNumberWrittenIndexes_[writtenCommSize] = numberWrittenIndexes_[writtenCommSize]; 837 } 838 } 839 840 /*! 841 Send distribution information from a group of client (client role) to another group of client (server role) 842 The distribution of a group of client (server role) is imposed by the group of client (client role) 843 \param [in] globalDim global dimension of grid containing this axis 844 \param [in] orderPositionInGrid the relative order of this axis in the grid (e.g grid composed of domain+axis -> orderPositionInGrid is 2) 845 \param [in] distType distribution type of the server. For now, we only have band distribution. 846 */ 847 void CAxis::sendDistributionAttribute(const std::vector<int>& globalDim, int orderPositionInGrid, 848 CServerDistributionDescription::ServerDistributionType distType) 849 { 850 std::list<CContextClient*>::iterator it; 851 for (it=clients.begin(); it!=clients.end(); ++it) 852 { 853 CContextClient* client = *it; 854 int nbServer = client->serverSize; 855 856 CServerDistributionDescription serverDescription(globalDim, nbServer); 857 serverDescription.computeServerDistribution(); 858 859 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 860 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 861 862 CEventClient event(getType(),EVENT_ID_DISTRIBUTION_ATTRIBUTE); 863 if (client->isServerLeader()) 864 { 865 std::list<CMessage> msgs; 866 867 const std::list<int>& ranks = client->getRanksServerLeader(); 868 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 869 { 870 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 871 const int begin = serverIndexBegin[*itRank][orderPositionInGrid]; 872 const int ni = serverDimensionSizes[*itRank][orderPositionInGrid]; 873 const int end = begin + ni - 1; 874 875 msgs.push_back(CMessage()); 876 CMessage& msg = msgs.back(); 877 msg << this->getId(); 878 msg << ni << begin << end; 879 msg << isCompressible_; 880 881 event.push(*itRank,1,msg); 882 } 883 client->sendEvent(event); 884 } 885 else client->sendEvent(event); 886 } 887 } 888 889 /* 890 Receive distribution attribute from another client 891 \param [in] event event containing data of these attributes 892 */ 893 void CAxis::recvDistributionAttribute(CEventServer& event) 894 { 895 CBufferIn* buffer = event.subEvents.begin()->buffer; 896 string axisId; 897 *buffer >> axisId; 898 get(axisId)->recvDistributionAttribute(*buffer); 899 } 900 901 /* 902 Receive distribution attribute from another client 903 \param [in] buffer buffer containing data of these attributes 904 */ 905 void CAxis::recvDistributionAttribute(CBufferIn& buffer) 906 { 907 int ni_srv, begin_srv, end_srv; 908 int global_zoom_end, zoom_end; 909 bool zoomIndex = zoomByIndex(); 483 910 484 CArray<size_t,1> globalIndexAxis(nbIndex); 485 for (size_t idx = 0; idx < nbIndex; ++idx) 486 { 487 globalIndexAxis(idx) = (size_t)index(idx); 488 } 489 490 std::vector<size_t> globalAxisZoom(nZoomCount); 491 nZoomCount = 0; 492 for (size_t idx = 0; idx < zoom_size; ++idx) 493 { 494 size_t globalZoomIndex = zoomByIndex() ? global_zoom_index(idx) : global_zoom_begin + idx; 495 if (globalZoomIndex >= minInd && globalZoomIndex <= maxInd) 496 { 497 globalAxisZoom[nZoomCount] = globalZoomIndex; 498 ++nZoomCount; 499 } 500 } 501 502 std::set<int> writtenInd; 503 if (isCompressible_) 504 { 911 std::vector<int> zoom_index_tmp; 912 std::vector<int>::iterator itZoomBegin, itZoomEnd, itZoom; 913 914 buffer >> ni_srv >> begin_srv >> end_srv; 915 buffer >> isCompressible_; 916 917 // Set up new local size of axis on the receiving clients 918 n.setValue(ni_srv); 919 begin.setValue(begin_srv); 920 921 // If we have zoom by index then process it 922 if (zoomIndex) 923 { 924 zoom_index_tmp.resize(global_zoom_index.numElements()); 925 std::copy(global_zoom_index.begin(), global_zoom_index.end(), zoom_index_tmp.begin()); 926 std::sort(zoom_index_tmp.begin(), zoom_index_tmp.end()); 927 itZoomBegin = std::lower_bound(zoom_index_tmp.begin(), zoom_index_tmp.end(), begin_srv); 928 itZoomEnd = std::upper_bound(zoom_index_tmp.begin(), zoom_index_tmp.end(), end_srv); 929 int sz = std::distance(itZoomBegin, itZoomEnd); 930 zoom_index.resize(sz); 931 itZoom = itZoomBegin; 932 for (int i = 0; i < sz; ++i, ++itZoom) 933 { 934 zoom_index(i) = *(itZoom); 935 } 936 } 937 938 global_zoom_begin = zoomIndex ? 0 : global_zoom_begin ; 939 global_zoom_n = zoomIndex ? zoom_index_tmp.size() : global_zoom_n; 940 global_zoom_end = global_zoom_begin + global_zoom_n - 1; 941 942 zoom_begin = zoomIndex ? std::distance(zoom_index_tmp.begin(), itZoomBegin) 943 : global_zoom_begin > begin_srv ? global_zoom_begin : begin_srv ; 944 zoom_end = zoomIndex ? std::distance(zoom_index_tmp.begin(), itZoomEnd) - 1 945 : global_zoom_end < end_srv ? global_zoom_end : end_srv ; 946 zoom_n = zoom_end - zoom_begin + 1; 947 948 if (zoom_n<=0) 949 { 950 zoom_n = 0; zoom_begin=global_zoom_begin; //0; zoom_begin = 0; 951 } 952 953 if (n_glo == n) 954 { 955 zoom_begin = zoomIndex ? std::distance(itZoomBegin, zoom_index_tmp.begin()) 956 : global_zoom_begin; 957 zoom_n = zoomIndex ? zoom_index_tmp.size() : global_zoom_n; 958 } 959 } 960 961 /* 962 Send attributes of axis from a group of client to other group of clients/servers 963 on supposing that these attributes are not distributed among the sending group 964 In the future, if new attributes are added, they should also be processed in this function 965 */ 966 void CAxis::sendNonDistributedAttributes() 967 { 968 std::list<CContextClient*>::iterator it; 969 for (it=clients.begin(); it!=clients.end(); ++it) 970 { 971 CContextClient* client = *it; 972 973 CEventClient event(getType(), EVENT_ID_NON_DISTRIBUTED_ATTRIBUTES); 974 size_t nbIndex = index.numElements(); 975 size_t nbDataIndex = 0; 976 505 977 for (int idx = 0; idx < data_index.numElements(); ++idx) 506 978 { 507 int ind = CDistributionClient::getAxisIndex(data_index(idx), data_begin, ni); 508 509 if (ind >= 0 && ind < ni && mask(ind)) 510 { 511 ind += ibegin; 512 if (ind >= global_zoom_begin && ind <= zoom_end) 513 writtenInd.insert(ind); 514 } 515 } 516 } 517 518 CServerDistributionDescription serverDescriptionGlobal(globalDim, nbServer); 519 int distributedDimensionOnServer = serverDescriptionGlobal.getDimensionDistributed(); 520 CClientServerMapping::GlobalIndexMap globalIndexAxisOnServer; 521 if (distributedDimensionOnServer == orderPositionInGrid) // So we have distributed axis on client side and also on server side* 522 { 523 std::vector<int> nGlobAxis(1); 524 nGlobAxis[0] = n_glo.getValue(); 525 526 size_t globalSizeIndex = 1, indexBegin, indexEnd; 527 for (int i = 0; i < nGlobAxis.size(); ++i) globalSizeIndex *= nGlobAxis[i]; 528 indexBegin = 0; 529 if (globalSizeIndex <= clientSize) 530 { 531 indexBegin = rank%globalSizeIndex; 532 indexEnd = indexBegin; 533 } 534 else 535 { 536 for (int i = 0; i < clientSize; ++i) 537 { 538 range = globalSizeIndex / clientSize; 539 if (i < (globalSizeIndex%clientSize)) ++range; 540 if (i == client->clientRank) break; 541 indexBegin += range; 542 } 543 indexEnd = indexBegin + range - 1; 544 } 545 546 CServerDistributionDescription serverDescription(nGlobAxis, nbServer); 547 serverDescription.computeServerGlobalIndexInRange(std::make_pair<size_t,size_t>(indexBegin, indexEnd)); 548 CClientServerMapping* clientServerMap = new CClientServerMappingDistributed(serverDescription.getGlobalIndexRange(), client->intraComm); 549 clientServerMap->computeServerIndexMapping(globalIndexAxis); 550 globalIndexAxisOnServer = clientServerMap->getGlobalIndexOnServer(); 551 delete clientServerMap; 552 } 553 else 554 { 555 std::vector<size_t> globalIndexServer(n_glo.getValue()); 556 for (size_t idx = 0; idx < n_glo.getValue(); ++idx) 557 { 558 globalIndexServer[idx] = idx; 559 } 560 561 for (int idx = 0; idx < nbServer; ++idx) 562 { 563 globalIndexAxisOnServer[idx] = globalIndexServer; 564 } 565 } 566 567 CClientServerMapping::GlobalIndexMap::const_iterator it = globalIndexAxisOnServer.begin(), 568 ite = globalIndexAxisOnServer.end(); 569 std::vector<size_t>::const_iterator itbVec = (globalAxisZoom).begin(), 570 iteVec = (globalAxisZoom).end(); 571 indSrv_.clear(); 572 indWrittenSrv_.clear(); 573 for (; it != ite; ++it) 574 { 575 int rank = it->first; 576 const std::vector<size_t>& globalIndexTmp = it->second; 577 int nb = globalIndexTmp.size(); 578 579 for (int i = 0; i < nb; ++i) 580 { 581 if (std::binary_search(itbVec, iteVec, globalIndexTmp[i])) 582 { 583 indSrv_[rank].push_back(globalIndexTmp[i]); 584 } 585 586 if (writtenInd.count(globalIndexTmp[i])) 587 { 588 indWrittenSrv_[rank].push_back(globalIndexTmp[i]); 589 } 590 } 591 } 592 593 connectedServerRank_.clear(); 594 for (it = globalIndexAxisOnServer.begin(); it != ite; ++it) { 595 connectedServerRank_.push_back(it->first); 596 } 597 598 if (!indSrv_.empty()) 599 { 600 std::map<int, vector<size_t> >::const_iterator itIndSrv = indSrv_.begin(), 601 iteIndSrv = indSrv_.end(); 602 connectedServerRank_.clear(); 603 for (; itIndSrv != iteIndSrv; ++itIndSrv) 604 connectedServerRank_.push_back(itIndSrv->first); 605 } 606 nbConnectedClients_ = CClientServerMapping::computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_); 607 } 608 609 void CAxis::sendNonDistributedValue() 610 { 611 CContext* context = CContext::getCurrent(); 612 CContextClient* client = context->client; 613 CEventClient event(getType(), EVENT_ID_NON_DISTRIBUTED_VALUE); 614 615 int zoom_end = global_zoom_begin + global_zoom_n - 1; 616 int nb = 0; 617 /* for (size_t idx = 0; idx < n; ++idx) 618 { 619 size_t globalIndex = begin + idx; 620 if (globalIndex >= global_zoom_begin && globalIndex <= zoom_end) ++nb; 621 }*/ 622 623 int end = (0 == n) ? begin : begin + n - 1; 624 int zoom_size = zoomByIndex() ? global_zoom_index.numElements() : global_zoom_n; 625 for (size_t idx = 0; idx < zoom_size; ++idx) 626 { 627 size_t globalZoomIndex = zoomByIndex() ? global_zoom_index(idx) : global_zoom_begin + idx; 628 if (globalZoomIndex >= begin && globalZoomIndex <= end) ++nb; 629 } 630 631 int nbWritten = 0; 632 if (isCompressible_) 633 { 979 int ind = data_index(idx); 980 if (ind >= 0 && ind < nbIndex) ++nbDataIndex; 981 } 982 983 CArray<int,1> dataIndex(nbDataIndex); 984 nbDataIndex = 0; 634 985 for (int idx = 0; idx < data_index.numElements(); ++idx) 635 986 { 636 int ind = CDistributionClient::getAxisIndex(data_index(idx), data_begin, n); 637 638 if (ind >= 0 && ind < n && mask(ind)) 639 { 640 ind += begin; 641 if (ind >= global_zoom_begin && ind <= zoom_end) 642 ++nbWritten; 643 } 644 } 645 } 646 647 CArray<double,1> val(nb); 648 nb = 0; 649 /* for (size_t idx = 0; idx < n; ++idx) 650 { 651 size_t globalIndex = begin + idx; 652 if (globalIndex >= global_zoom_begin && globalIndex <= zoom_end) 653 { 654 val(nb) = value(idx); 655 ++nb; 656 } 657 }*/ 658 659 for (size_t idx = 0; idx < zoom_size; ++idx) 660 { 661 size_t globalZoomIndex = zoomByIndex() ? global_zoom_index(idx) : global_zoom_begin + idx; 662 if (globalZoomIndex >= begin && globalZoomIndex <= end) 663 { 664 val(nb) = value(globalZoomIndex-begin); 665 ++nb; 666 } 667 } 668 669 CArray<int, 1> writtenInd(nbWritten); 670 nbWritten = 0; 671 if (isCompressible_) 672 { 673 for (int idx = 0; idx < data_index.numElements(); ++idx) 674 { 675 int ind = CDistributionClient::getAxisIndex(data_index(idx), data_begin, n); 676 677 if (ind >= 0 && ind < n && mask(ind)) 678 { 679 ind += begin; 680 if (ind >= global_zoom_begin && ind <= zoom_end) 681 { 682 writtenInd(nbWritten) = ind; 683 ++nbWritten; 684 } 685 } 686 } 687 } 688 689 if (client->isServerLeader()) 690 { 691 std::list<CMessage> msgs; 692 693 const std::list<int>& ranks = client->getRanksServerLeader(); 694 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 695 { 696 msgs.push_back(CMessage()); 697 CMessage& msg = msgs.back(); 698 msg << this->getId(); 699 msg << val; 700 if (isCompressible_) 701 msg << writtenInd; 702 event.push(*itRank, 1, msg); 703 } 704 client->sendEvent(event); 705 } 706 else client->sendEvent(event); 707 } 708 709 void CAxis::sendDistributedValue(void) 710 { 711 int ns, n, i, j, ind, nv, idx; 712 CContext* context = CContext::getCurrent(); 713 CContextClient* client=context->client; 714 715 // send value for each connected server 716 CEventClient eventIndex(getType(), EVENT_ID_INDEX); 717 CEventClient eventVal(getType(), EVENT_ID_DISTRIBUTED_VALUE); 718 719 list<CMessage> list_msgsIndex, list_msgsVal; 720 list<CArray<int,1> > list_indi; 721 list<CArray<int,1> > list_writtenInd; 722 list<CArray<double,1> > list_val; 723 list<CArray<double,2> > list_bounds; 724 list<CArray<StdString,1> > list_label; 725 726 std::map<int, std::vector<size_t> >::const_iterator it, iteMap; 727 iteMap = indSrv_.end(); 728 for (int k = 0; k < connectedServerRank_.size(); ++k) 729 { 730 int nbData = 0; 731 int rank = connectedServerRank_[k]; 732 it = indSrv_.find(rank); 733 if (iteMap != it) 734 nbData = it->second.size(); 735 736 list_indi.push_back(CArray<int,1>(nbData)); 737 list_val.push_back(CArray<double,1>(nbData)); 738 739 if (hasBounds_) 740 { 741 list_bounds.push_back(CArray<double,2>(2,nbData)); 742 } 743 744 if (hasLabel) 745 { 746 list_label.push_back(CArray<StdString,1>(nbData)); 747 } 748 749 CArray<int,1>& indi = list_indi.back(); 750 CArray<double,1>& val = list_val.back(); 751 752 for (n = 0; n < nbData; ++n) 753 { 754 idx = static_cast<int>(it->second[n]); 755 ind = idx - begin; 756 757 val(n) = value(ind); 758 indi(n) = idx; 759 760 if (hasBounds_) 761 { 762 CArray<double,2>& boundsVal = list_bounds.back(); 763 boundsVal(0, n) = bounds(0,n); 764 boundsVal(1, n) = bounds(1,n); 765 } 766 767 if (hasLabel) 768 { 769 CArray<StdString,1>& labelVal = list_label.back(); 770 labelVal(n) = label(n); 771 } 772 } 773 774 list_msgsIndex.push_back(CMessage()); 775 list_msgsIndex.back() << this->getId() << list_indi.back(); 776 777 if (isCompressible_) 778 { 779 std::vector<int>& writtenIndSrc = indWrittenSrv_[rank]; 780 list_writtenInd.push_back(CArray<int,1>(writtenIndSrc.size())); 781 CArray<int,1>& writtenInd = list_writtenInd.back(); 782 783 for (n = 0; n < writtenInd.numElements(); ++n) 784 writtenInd(n) = writtenIndSrc[n]; 785 786 list_msgsIndex.back() << writtenInd; 787 } 788 789 list_msgsVal.push_back(CMessage()); 790 list_msgsVal.back() << this->getId() << list_val.back(); 791 792 if (hasBounds_) 793 { 794 list_msgsVal.back() << list_bounds.back(); 795 } 796 797 if (hasLabel) 798 { 799 list_msgsVal.back() << list_label.back(); 800 } 801 802 eventIndex.push(rank, nbConnectedClients_[rank], list_msgsIndex.back()); 803 eventVal.push(rank, nbConnectedClients_[rank], list_msgsVal.back()); 804 } 805 806 client->sendEvent(eventIndex); 807 client->sendEvent(eventVal); 808 } 809 810 void CAxis::recvIndex(CEventServer& event) 811 { 812 CAxis* axis; 813 987 int ind = data_index(idx); 988 if (ind >= 0 && ind < nbIndex) 989 { 990 dataIndex(nbDataIndex) = ind; 991 ++nbDataIndex; 992 } 993 } 994 995 if (client->isServerLeader()) 996 { 997 std::list<CMessage> msgs; 998 999 const std::list<int>& ranks = client->getRanksServerLeader(); 1000 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1001 { 1002 msgs.push_back(CMessage()); 1003 CMessage& msg = msgs.back(); 1004 msg << this->getId(); 1005 msg << index.getValue() << dataIndex << mask.getValue(); 1006 msg << hasValue; 1007 if (hasValue) msg << value.getValue(); 1008 msg << hasBounds; 1009 if (hasBounds) msg << bounds.getValue(); 1010 msg << hasLabel; 1011 if (hasLabel) msg << label.getValue(); 1012 1013 event.push(*itRank, 1, msg); 1014 } 1015 client->sendEvent(event); 1016 } 1017 else client->sendEvent(event); 1018 } 1019 } 1020 1021 /* 1022 Receive the non-distributed attributes from another group of clients 1023 \param [in] event event containing data of these attributes 1024 */ 1025 void CAxis::recvNonDistributedAttributes(CEventServer& event) 1026 { 814 1027 list<CEventServer::SSubEvent>::iterator it; 815 1028 for (it = event.subEvents.begin(); it != event.subEvents.end(); ++it) … … 818 1031 string axisId; 819 1032 *buffer >> axisId; 820 axis = get(axisId); 821 axis->recvIndex(it->rank, *buffer); 822 } 823 824 if (axis->isCompressible_) 825 { 826 std::sort(axis->indexesToWrite.begin(), axis->indexesToWrite.end()); 827 828 CContextServer* server = CContext::getCurrent()->server; 829 axis->numberWrittenIndexes_ = axis->indexesToWrite.size(); 830 MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 831 MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 832 axis->offsetWrittenIndexes_ -= axis->numberWrittenIndexes_; 833 } 834 } 835 836 void CAxis::recvIndex(int rank, CBufferIn& buffer) 837 { 838 buffer >> indiSrv_[rank]; 839 840 if (isCompressible_) 841 { 842 CArray<int, 1> writtenIndexes; 843 buffer >> writtenIndexes; 844 indexesToWrite.reserve(indexesToWrite.size() + writtenIndexes.numElements()); 845 for (int i = 0; i < writtenIndexes.numElements(); ++i) 846 indexesToWrite.push_back(writtenIndexes(i)); 847 } 848 } 849 850 void CAxis::recvDistributedValue(CEventServer& event) 851 { 1033 get(axisId)->recvNonDistributedAttributes(it->rank, *buffer); 1034 } 1035 } 1036 1037 /* 1038 Receive the non-distributed attributes from another group of clients 1039 \param [in] rank rank of the sender 1040 \param [in] buffer buffer containing data sent from the sender 1041 */ 1042 void CAxis::recvNonDistributedAttributes(int rank, CBufferIn& buffer) 1043 { 1044 CArray<int,1> tmp_index, tmp_data_index, tmp_zoom_index; 1045 CArray<bool,1> tmp_mask; 1046 CArray<double,1> tmp_val; 1047 CArray<double,2> tmp_bnds; 1048 CArray<string,1> tmp_label; 1049 1050 buffer >> tmp_index; 1051 index.reference(tmp_index); 1052 buffer >> tmp_data_index; 1053 data_index.reference(tmp_data_index); 1054 buffer >> tmp_mask; 1055 mask.reference(tmp_mask); 1056 1057 buffer >> hasValue; 1058 if (hasValue) 1059 { 1060 buffer >> tmp_val; 1061 value.reference(tmp_val); 1062 } 1063 1064 buffer >> hasBounds; 1065 if (hasBounds) 1066 { 1067 buffer >> tmp_bnds; 1068 bounds.reference(tmp_bnds); 1069 } 1070 1071 buffer >> hasLabel; 1072 if (hasLabel) 1073 { 1074 buffer >> tmp_label; 1075 label.reference(tmp_label); 1076 } 1077 1078 // Some value should be reset here 1079 data_begin.setValue(0); 1080 globalLocalIndexMap_.rehash(std::ceil(index.numElements()/globalLocalIndexMap_.max_load_factor())); 1081 // for (int idx = 0; idx < index.numElements(); ++idx) globalLocalIndexMap_[idx] = index(idx); 1082 for (int idx = 0; idx < index.numElements(); ++idx) globalLocalIndexMap_[index(idx)] = idx; 1083 } 1084 1085 /* 1086 Send attributes of axis from a group of client to other group of clients/servers 1087 on supposing that these attributes are distributed among the clients of the sending group 1088 In the future, if new attributes are added, they should also be processed in this function 1089 */ 1090 void CAxis::sendDistributedAttributes(void) 1091 { 1092 int ns, n, i, j, ind, nv, idx; 1093 std::list<CContextClient*>::iterator it; 1094 1095 for (it=clients.begin(); it!=clients.end(); ++it) 1096 { 1097 CContextClient* client = *it; 1098 int nbServer = client->serverSize; 1099 1100 CEventClient eventData(getType(), EVENT_ID_DISTRIBUTED_ATTRIBUTES); 1101 1102 list<CMessage> listData; 1103 list<CArray<int,1> > list_indi, list_dataInd, list_zoomInd; 1104 list<CArray<bool,1> > list_mask; 1105 list<CArray<double,1> > list_val; 1106 list<CArray<double,2> > list_bounds; 1107 list<CArray<string,1> > list_label; 1108 1109 int nbIndex = index.numElements(); 1110 CArray<int,1> dataIndex(nbIndex); 1111 dataIndex = -1; 1112 for (idx = 0; idx < data_index.numElements(); ++idx) 1113 { 1114 if (0 <= data_index(idx) && data_index(idx) < nbIndex) 1115 dataIndex(idx) = 1; 1116 } 1117 1118 boost::unordered_map<int, std::vector<size_t> >::const_iterator it, iteMap; 1119 iteMap = indSrv_[nbServer].end(); 1120 for (int k = 0; k < connectedServerRank_[nbServer].size(); ++k) 1121 { 1122 int nbData = 0; 1123 int rank = connectedServerRank_[nbServer][k]; 1124 it = indSrv_[nbServer].find(rank); 1125 if (iteMap != it) 1126 nbData = it->second.size(); 1127 1128 list_indi.push_back(CArray<int,1>(nbData)); 1129 list_dataInd.push_back(CArray<int,1>(nbData)); 1130 list_mask.push_back(CArray<bool,1>(nbData)); 1131 1132 if (hasValue) 1133 list_val.push_back(CArray<double,1>(nbData)); 1134 1135 if (hasBounds) 1136 list_bounds.push_back(CArray<double,2>(2,nbData)); 1137 1138 if (hasLabel) 1139 list_label.push_back(CArray<string,1>(nbData)); 1140 1141 CArray<int,1>& indi = list_indi.back(); 1142 CArray<int,1>& dataIndi = list_dataInd.back(); 1143 CArray<bool,1>& maskIndi = list_mask.back(); 1144 1145 for (n = 0; n < nbData; ++n) 1146 { 1147 idx = static_cast<int>(it->second[n]); 1148 indi(n) = idx; 1149 1150 ind = globalLocalIndexMap_[idx]; 1151 dataIndi(n) = dataIndex(ind); 1152 maskIndi(n) = mask(ind); 1153 1154 if (hasValue) 1155 { 1156 CArray<double,1>& val = list_val.back(); 1157 val(n) = value(ind); 1158 } 1159 1160 if (hasBounds) 1161 { 1162 CArray<double,2>& boundsVal = list_bounds.back(); 1163 boundsVal(0, n) = bounds(0,ind); 1164 boundsVal(1, n) = bounds(1,ind); 1165 } 1166 1167 if (hasLabel) 1168 { 1169 CArray<string,1>& labelVal = list_label.back(); 1170 labelVal(n) = label(ind); 1171 } 1172 } 1173 1174 listData.push_back(CMessage()); 1175 listData.back() << this->getId() 1176 << list_indi.back() << list_dataInd.back() << list_mask.back(); 1177 1178 listData.back() << hasValue; 1179 if (hasValue) 1180 listData.back() << list_val.back(); 1181 1182 listData.back() << hasBounds; 1183 if (hasBounds) 1184 listData.back() << list_bounds.back(); 1185 1186 listData.back() << hasLabel; 1187 if (hasLabel) 1188 listData.back() << list_label.back(); 1189 1190 eventData.push(rank, nbSenders[nbServer][rank], listData.back()); 1191 } 1192 1193 client->sendEvent(eventData); 1194 } 1195 } 1196 1197 /* 1198 Receive the distributed attributes from another group of clients 1199 \param [in] event event containing data of these attributes 1200 */ 1201 void CAxis::recvDistributedAttributes(CEventServer& event) 1202 { 1203 string axisId; 1204 vector<int> ranks; 1205 vector<CBufferIn*> buffers; 1206 852 1207 list<CEventServer::SSubEvent>::iterator it; 853 1208 for (it = event.subEvents.begin(); it != event.subEvents.end(); ++it) 854 1209 { 1210 ranks.push_back(it->rank); 855 1211 CBufferIn* buffer = it->buffer; 856 string axisId;857 1212 *buffer >> axisId; 858 get(axisId)->recvDistributedValue(it->rank, *buffer); 859 } 860 } 861 862 void CAxis::recvDistributedValue(int rank, CBufferIn& buffer) 863 { 864 CArray<int,1> &indi = indiSrv_[rank]; 865 CArray<double,1> val; 866 CArray<double,2> boundsVal; 867 CArray<StdString,1> labelVal; 868 869 buffer >> val; 870 if (hasBounds_) buffer >> boundsVal; 871 if (hasLabel) buffer >> labelVal; 872 873 int i, j, ind_srv; 874 for (int ind = 0; ind < indi.numElements(); ++ind) 875 { 876 i = indi(ind); 877 ind_srv = i - zoom_begin_srv; 878 value_srv(ind_srv) = val(ind); 879 if (hasBounds_) 880 { 881 bound_srv(0,ind_srv) = boundsVal(0, ind); 882 bound_srv(1,ind_srv) = boundsVal(1, ind); 883 } 884 1213 buffers.push_back(buffer); 1214 } 1215 get(axisId)->recvDistributedAttributes(ranks, buffers); 1216 } 1217 1218 /* 1219 Receive the non-distributed attributes from another group of clients 1220 \param [in] ranks rank of the sender 1221 \param [in] buffers buffer containing data sent from the sender 1222 */ 1223 void CAxis::recvDistributedAttributes(vector<int>& ranks, vector<CBufferIn*> buffers) 1224 { 1225 int nbReceived = ranks.size(), idx, ind, gloInd, locInd; 1226 vector<CArray<int,1> > vec_indi(nbReceived), vec_dataInd(nbReceived), vec_zoomInd(nbReceived); 1227 vector<CArray<bool,1> > vec_mask(nbReceived); 1228 vector<CArray<double,1> > vec_val(nbReceived); 1229 vector<CArray<double,2> > vec_bounds(nbReceived); 1230 vector<CArray<string,1> > vec_label(nbReceived); 1231 1232 for (idx = 0; idx < nbReceived; ++idx) 1233 { 1234 CBufferIn& buffer = *buffers[idx]; 1235 buffer >> vec_indi[idx]; 1236 buffer >> vec_dataInd[idx]; 1237 buffer >> vec_mask[idx]; 1238 1239 buffer >> hasValue; 1240 if (hasValue) 1241 buffer >> vec_val[idx]; 1242 1243 buffer >> hasBounds; 1244 if (hasBounds) 1245 buffer >> vec_bounds[idx]; 1246 1247 buffer >> hasLabel; 885 1248 if (hasLabel) 886 { 887 label_srv(ind_srv) = labelVal( ind); 888 } 889 890 } 891 } 892 893 void CAxis::recvNonDistributedValue(CEventServer& event) 894 { 895 CAxis* axis; 896 897 list<CEventServer::SSubEvent>::iterator it; 898 for (it = event.subEvents.begin(); it != event.subEvents.end(); ++it) 899 { 900 CBufferIn* buffer = it->buffer; 901 string axisId; 902 *buffer >> axisId; 903 axis = get(axisId); 904 axis->recvNonDistributedValue(it->rank, *buffer); 905 } 906 907 if (axis->isCompressible_) 908 { 909 std::sort(axis->indexesToWrite.begin(), axis->indexesToWrite.end()); 910 911 axis->numberWrittenIndexes_ = axis->totalNumberWrittenIndexes_ = axis->indexesToWrite.size(); 912 axis->offsetWrittenIndexes_ = 0; 913 } 914 } 915 916 void CAxis::recvNonDistributedValue(int rank, CBufferIn& buffer) 917 { 918 CArray<double,1> val; 919 buffer >> val; 920 921 for (int ind = 0; ind < val.numElements(); ++ind) 922 { 923 value_srv(ind) = val(ind); 924 if (hasBounds_) 925 { 926 bound_srv(0,ind) = bounds(0,ind); 927 bound_srv(1,ind) = bounds(1,ind); 928 } 929 if (hasLabel) 930 { 931 label_srv(ind) = label(ind); 932 } 933 } 934 935 if (isCompressible_) 936 { 937 CArray<int, 1> writtenIndexes; 938 buffer >> writtenIndexes; 939 indexesToWrite.reserve(indexesToWrite.size() + writtenIndexes.numElements()); 940 for (int i = 0; i < writtenIndexes.numElements(); ++i) 941 indexesToWrite.push_back(writtenIndexes(i)); 942 } 943 } 944 945 void CAxis::sendServerAttribut(const std::vector<int>& globalDim, int orderPositionInGrid, 946 CServerDistributionDescription::ServerDistributionType distType) 947 { 948 CContext* context = CContext::getCurrent(); 949 CContextClient* client = context->client; 950 int nbServer = client->serverSize; 951 952 CServerDistributionDescription serverDescription(globalDim, nbServer); 953 serverDescription.computeServerDistribution(); 954 955 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 956 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 957 958 CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT); 959 if (client->isServerLeader()) 960 { 961 std::list<CMessage> msgs; 962 963 const std::list<int>& ranks = client->getRanksServerLeader(); 964 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 965 { 966 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 967 const int begin = serverIndexBegin[*itRank][orderPositionInGrid]; 968 const int ni = serverDimensionSizes[*itRank][orderPositionInGrid]; 969 const int end = begin + ni - 1; 970 const bool zoomIndex = zoomByIndex(); 971 972 msgs.push_back(CMessage()); 973 CMessage& msg = msgs.back(); 974 msg << this->getId(); 975 msg << ni << begin << end; 976 msg << global_zoom_begin.getValue() << global_zoom_n.getValue(); 977 msg << isCompressible_; 978 msg << zoomIndex; 979 if (zoomIndex) 980 msg << global_zoom_index.getValue(); 981 982 event.push(*itRank,1,msg); 983 } 984 client->sendEvent(event); 985 } 986 else client->sendEvent(event); 987 } 988 989 void CAxis::recvServerAttribut(CEventServer& event) 990 { 991 CBufferIn* buffer = event.subEvents.begin()->buffer; 992 string axisId; 993 *buffer >> axisId; 994 get(axisId)->recvServerAttribut(*buffer); 995 } 996 997 void CAxis::recvServerAttribut(CBufferIn& buffer) 998 { 999 int ni_srv, begin_srv, end_srv, global_zoom_begin_tmp, global_zoom_n_tmp; 1000 bool zoomIndex; 1001 CArray<int,1> zoom_index_recv; 1002 std::vector<int> zoom_index_tmp; 1003 std::vector<int>::iterator itZoomBeginSrv, itZoomEndSrv, itZoomSrv; 1004 1005 buffer >> ni_srv >> begin_srv >> end_srv; 1006 buffer >> global_zoom_begin_tmp >> global_zoom_n_tmp; 1007 buffer >> isCompressible_; 1008 buffer >> zoomIndex; 1009 if (zoomIndex) 1010 { 1011 buffer >> zoom_index_recv; 1012 global_zoom_index.reference(zoom_index_recv); 1013 zoom_index_tmp.resize(global_zoom_index.numElements()); 1014 std::copy(global_zoom_index.begin(), global_zoom_index.end(), zoom_index_tmp.begin()); 1015 std::sort(zoom_index_tmp.begin(), zoom_index_tmp.end()); 1016 itZoomBeginSrv = std::lower_bound(zoom_index_tmp.begin(), zoom_index_tmp.end(), begin_srv); 1017 itZoomEndSrv = std::upper_bound(zoom_index_tmp.begin(), zoom_index_tmp.end(), end_srv); 1018 int sz = std::distance(itZoomBeginSrv, itZoomEndSrv); 1019 zoom_index_srv.resize(sz); 1020 itZoomSrv = itZoomBeginSrv; 1021 for (int i = 0; i < sz; ++i, ++itZoomSrv) 1022 { 1023 zoom_index_srv(i) = *(itZoomSrv); 1024 } 1025 } 1026 1027 global_zoom_begin = global_zoom_begin_tmp; 1028 global_zoom_n = global_zoom_n_tmp; 1029 int global_zoom_end = global_zoom_begin + global_zoom_n - 1; 1030 1031 zoom_begin_srv = zoomIndex ? std::distance(itZoomBeginSrv, zoom_index_tmp.begin()) 1032 : global_zoom_begin > begin_srv ? global_zoom_begin : begin_srv ; 1033 zoom_end_srv = zoomIndex ? std::distance(zoom_index_tmp.begin(), itZoomEndSrv) - 1 1034 : global_zoom_end < end_srv ? global_zoom_end : end_srv ; 1035 zoom_size_srv = zoom_end_srv - zoom_begin_srv + 1; 1036 1037 global_zoom_begin_srv = zoomIndex ? 0 : global_zoom_begin ; 1038 global_zoom_size_srv = zoomIndex ? zoom_index_tmp.size() : global_zoom_n; 1039 1040 if (zoom_size_srv<=0) 1041 { 1042 zoom_begin_srv = 0; zoom_end_srv = 0; zoom_size_srv = 0; 1043 } 1044 1045 if (n_glo == n) 1046 { 1047 zoom_begin_srv = zoomIndex ? std::distance(itZoomBeginSrv, zoom_index_tmp.begin()) 1048 : global_zoom_begin; 1049 zoom_size_srv = zoomIndex ? zoom_index_tmp.size() 1050 : global_zoom_n; 1051 } 1249 buffer >> vec_label[idx]; 1250 } 1251 1252 // Estimate size of index array 1253 int nbIndexGlob = 0; 1254 for (idx = 0; idx < nbReceived; ++idx) 1255 { 1256 nbIndexGlob += vec_indi[idx].numElements(); 1257 } 1258 1259 // Recompute global index 1260 // Take account of the overlapped index 1261 index.resize(nbIndexGlob); 1262 globalLocalIndexMap_.rehash(std::ceil(index.numElements()/globalLocalIndexMap_.max_load_factor())); 1263 nbIndexGlob = 0; 1264 for (idx = 0; idx < nbReceived; ++idx) 1265 { 1266 CArray<int,1>& tmp = vec_indi[idx]; 1267 for (ind = 0; ind < tmp.numElements(); ++ind) 1268 { 1269 gloInd = tmp(ind); 1270 if (0 == globalLocalIndexMap_.count(gloInd)) 1271 { 1272 index(nbIndexGlob) = gloInd % n_glo; 1273 globalLocalIndexMap_[gloInd] = nbIndexGlob; 1274 ++nbIndexGlob; 1275 } 1276 } 1277 } 1278 1279 // Resize index to its real size 1280 if (nbIndexGlob==0) index.resize(nbIndexGlob) ; 1281 else index.resizeAndPreserve(nbIndexGlob); 1282 1283 int nbData = nbIndexGlob; 1284 CArray<int,1> nonCompressedData(nbData); 1285 nonCompressedData = -1; 1286 mask.resize(nbData); 1052 1287 if (hasValue) 1053 { 1054 value_srv.resize(zoom_size_srv); 1055 if (hasBounds_) bound_srv.resize(2,zoom_size_srv); 1056 if (hasLabel) label_srv.resize(zoom_size_srv); 1057 } 1288 value.resize(nbData); 1289 if (hasBounds) 1290 bounds.resize(2,nbData); 1291 if (hasLabel) 1292 label.resize(nbData); 1293 1294 nbData = 0; 1295 for (idx = 0; idx < nbReceived; ++idx) 1296 { 1297 CArray<int,1>& indi = vec_indi[idx]; 1298 CArray<int,1>& dataIndi = vec_dataInd[idx]; 1299 CArray<bool,1>& maskIndi = vec_mask[idx]; 1300 int nb = indi.numElements(); 1301 for (int n = 0; n < nb; ++n) 1302 { 1303 locInd = globalLocalIndexMap_[size_t(indi(n))]; 1304 1305 nonCompressedData(locInd) = (-1 == nonCompressedData(locInd)) ? dataIndi(n) : nonCompressedData(locInd); 1306 1307 if (!mask(locInd)) // Only rewrite mask if it's not true 1308 mask(locInd) = maskIndi(n); 1309 1310 if (hasValue) 1311 value(locInd) = vec_val[idx](n); 1312 1313 if (hasBounds) 1314 { 1315 bounds(0,locInd) = vec_bounds[idx](0,n); 1316 bounds(1,locInd) = vec_bounds[idx](1,n); 1317 } 1318 1319 if (hasLabel) 1320 label(locInd) = vec_label[idx](n); 1321 } 1322 } 1323 1324 int nbCompressedData = 0; 1325 for (idx = 0; idx < nonCompressedData.numElements(); ++idx) 1326 { 1327 if (0 <= nonCompressedData(idx)) 1328 ++nbCompressedData; 1329 } 1330 1331 data_index.resize(nbCompressedData); 1332 nbCompressedData = 0; 1333 for (idx = 0; idx < nonCompressedData.numElements(); ++idx) 1334 { 1335 if (0 <= nonCompressedData(idx)) 1336 { 1337 data_index(nbCompressedData) = idx % n; 1338 ++nbCompressedData; 1339 } 1340 } 1341 1342 data_begin.setValue(0); 1058 1343 } 1059 1344 … … 1090 1375 } 1091 1376 1377 /* 1378 Add transformation into axis. This function only servers for Fortran interface 1379 \param [in] transType transformation type 1380 \param [in] id identifier of the transformation object 1381 */ 1092 1382 CTransformation<CAxis>* CAxis::addTransformation(ETranformationType transType, const StdString& id) 1093 1383 { … … 1096 1386 } 1097 1387 1388 /* 1389 Check whether an axis has (spatial) transformation 1390 */ 1098 1391 bool CAxis::hasTransformation() 1099 1392 { … … 1101 1394 } 1102 1395 1396 /* 1397 Set transformation 1398 \param [in] axisTrans transformation to set 1399 */ 1103 1400 void CAxis::setTransformations(const TransMapTypes& axisTrans) 1104 1401 { … … 1106 1403 } 1107 1404 1405 /* 1406 Return all transformation held by the axis 1407 \return transformation the axis has 1408 */ 1108 1409 CAxis::TransMapTypes CAxis::getAllTransformations(void) 1109 1410 { … … 1111 1412 } 1112 1413 1414 /* 1415 Duplicate transformation of another axis 1416 \param [in] src axis whose transformations are copied 1417 */ 1113 1418 void CAxis::duplicateTransformation(CAxis* src) 1114 1419 { … … 1140 1445 } 1141 1446 1447 void CAxis::setContextClient(CContextClient* contextClient) 1448 { 1449 if (clientsSet.find(contextClient)==clientsSet.end()) 1450 { 1451 clients.push_back(contextClient) ; 1452 clientsSet.insert(contextClient); 1453 } 1454 } 1455 1142 1456 void CAxis::parse(xml::CXMLNode & node) 1143 1457 { -
XIOS/dev/branch_openmp/src/node/axis.hpp
r1331 r1460 16 16 #include "transformation.hpp" 17 17 #include "transformation_enum.hpp" 18 19 #include "mpi_std.hpp" 18 20 19 21 namespace xios { … … 38 40 , public CAxisAttributes 39 41 { 42 /// typedef /// 43 typedef CObjectTemplate<CAxis> SuperClass; 44 typedef CAxisAttributes SuperClassAttribute; 45 46 public: 40 47 enum EEventId 41 48 { 42 EVENT_ID_SERVER_ATTRIBUT, 43 EVENT_ID_INDEX, 49 EVENT_ID_DISTRIBUTION_ATTRIBUTE, 44 50 EVENT_ID_DISTRIBUTED_VALUE, 45 EVENT_ID_NON_DISTRIBUTED_VALUE 51 EVENT_ID_NON_DISTRIBUTED_VALUE, 52 EVENT_ID_NON_DISTRIBUTED_ATTRIBUTES, 53 EVENT_ID_DISTRIBUTED_ATTRIBUTES 46 54 } ; 47 55 48 /// typedef /// 49 typedef CObjectTemplate<CAxis> SuperClass; 50 typedef CAxisAttributes SuperClassAttribute; 51 52 public : 53 56 public: 54 57 typedef CAxisAttributes RelAttributes; 55 58 typedef CAxisGroup RelGroup; … … 68 71 const std::set<StdString> & getRelFiles(void) const; 69 72 70 const std::vector<int>& getIndexesToWrite(void) const;71 int get NumberWrittenIndexes() const;72 int get TotalNumberWrittenIndexes() const;73 int getOffsetWrittenIndexes() const;73 int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 74 int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 75 int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 76 CArray<int, 1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 74 77 75 std::map<int, StdSize> getAttributesBufferSize(); 78 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, const std::vector<int>& globalDim, int orderPositionInGrid, 79 CServerDistributionDescription::ServerDistributionType disType = CServerDistributionDescription::BAND_DISTRIBUTION); 76 80 77 81 /// Test /// … … 93 97 virtual void parse(xml::CXMLNode & node); 94 98 99 void setContextClient(CContextClient* contextClient); 100 95 101 /// Accesseurs statiques /// 96 102 static StdString GetName(void); … … 98 104 static ENodeType GetType(void); 99 105 100 void sendServerAttribut(const std::vector<int>& globalDim, int orderPositionInGrid, 101 CServerDistributionDescription::ServerDistributionType distType); 102 static bool dispatchEvent(CEventServer& event); 103 static void recvServerAttribut(CEventServer& event); 104 void recvServerAttribut(CBufferIn& buffer) ; 106 static bool dispatchEvent(CEventServer& event); 107 105 108 void checkAttributesOnClient(); 106 109 void checkAttributesOnClientAfterTransformation(const std::vector<int>& globalDim, int orderPositionInGrid, … … 110 113 111 114 void checkEligibilityForCompressedOutput(); 115 size_t getGlobalWrittenSize(void) ; 112 116 117 void computeWrittenIndex(); 118 void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 113 119 bool hasTransformation(); 114 120 void solveInheritanceTransformation(); 115 TransMapTypes getAllTransformations(); 116 void fillInValues(const CArray<double,1>& values); 121 TransMapTypes getAllTransformations(); 117 122 void duplicateTransformation(CAxis*); 118 123 CTransformation<CAxis>* addTransformation(ETranformationType transType, const StdString& id=""); 119 124 bool isEqual(CAxis* axis); 125 bool zoomByIndex(); 120 126 121 public: 122 int zoom_begin_srv, zoom_end_srv, zoom_size_srv; 123 int ni_srv, begin_srv, end_srv; 124 int global_zoom_begin_srv, global_zoom_end_srv, global_zoom_size_srv; 125 CArray<double,1> value_srv; 126 CArray<double,2> bound_srv; 127 CArray<StdString,1> label_srv; 128 CArray<int,1> zoom_index_srv; 129 bool hasValue; 127 public: 128 bool hasValue; 129 CArray<size_t,1> localIndexToWriteOnServer; 130 130 131 131 private: … … 134 134 void checkZoom(); 135 135 void checkBounds(); 136 void checkLabel(); 137 void sendValue(); 138 void computeConnectedServer(const std::vector<int>& globalDim, int orderPositionInGrid, 136 void checkLabel(); 137 void sendAttributes(const std::vector<int>& globalDim, int orderPositionInGrid, 138 CServerDistributionDescription::ServerDistributionType distType); 139 void sendDistributionAttribute(const std::vector<int>& globalDim, int orderPositionInGrid, 140 CServerDistributionDescription::ServerDistributionType distType); 141 void computeConnectedClients(const std::vector<int>& globalDim, int orderPositionInGrid, 139 142 CServerDistributionDescription::ServerDistributionType distType); 140 void sendDistributedValue();141 void sendNonDistributedValue();142 bool zoomByIndex();143 143 144 static void recvIndex(CEventServer& event); 145 static void recvDistributedValue(CEventServer& event); 146 static void recvNonDistributedValue(CEventServer& event); 147 void recvIndex(int rank, CBufferIn& buffer); 148 void recvDistributedValue(int rank, CBufferIn& buffer); 149 void recvNonDistributedValue(int rank, CBufferIn& buffer); 144 void sendNonDistributedAttributes(void); 145 void sendDistributedAttributes(void); 146 147 static void recvNonDistributedAttributes(CEventServer& event); 148 static void recvDistributedAttributes(CEventServer& event); 149 static void recvDistributionAttribute(CEventServer& event); 150 void recvNonDistributedAttributes(int rank, CBufferIn& buffer); 151 void recvDistributedAttributes(vector<int>& rank, vector<CBufferIn*> buffers); 152 void recvDistributionAttribute(CBufferIn& buffer); 150 153 151 154 void setTransformations(const TransMapTypes&); 152 155 153 156 private: 157 158 /** Clients that have to send a domain. There can be multiple clients in case of secondary server, otherwise only one client. */ 159 std::list<CContextClient*> clients; 160 std::set<CContextClient*> clientsSet; 161 154 162 bool isChecked; 155 163 bool areClientAttributesChecked_; 156 164 bool isClientAfterTransformationChecked; 157 165 std::set<StdString> relFiles, relFilesCompressed; 158 TransMapTypes transformationMap_; 159 bool isDistributed_; 166 TransMapTypes transformationMap_; 160 167 //! True if and only if the data defined on the axis can be outputted in a compressed way 161 168 bool isCompressible_; 162 std::map<int,int> nbConnectedClients_; // Mapping of number of communicating client to a server 163 std::map<int, vector<size_t> > indSrv_; // Global index of each client sent to server 164 std::map<int, vector<int> > indWrittenSrv_; // Global written index of each client sent to server 169 std::map<int, map<int,int> > nbSenders; // Mapping of number of communicating client to a server 170 std::map<int, boost::unordered_map<int, vector<size_t> > > indSrv_; // Global index of each client sent to server 171 // std::map<int, vector<int> > indWrittenSrv_; // Global written index of each client sent to server 172 boost::unordered_map<size_t,size_t> globalLocalIndexMap_; 165 173 std::vector<int> indexesToWrite; 166 int numberWrittenIndexes_, totalNumberWrittenIndexes_, offsetWrittenIndexes_; 167 std::vector<int> connectedServerRank_; 168 std::map<int, CArray<int,1> > indiSrv_; 169 bool hasBounds_; 170 bool hasLabel; 174 std::map<int,int> numberWrittenIndexes_, totalNumberWrittenIndexes_, offsetWrittenIndexes_; 175 std::map<int, CArray<int, 1> > compressedIndexToWriteOnServer; 176 std::map<int, std::vector<int> > connectedServerRank_; 177 bool hasBounds; 178 bool hasLabel; 179 bool computedWrittenIndex_; 171 180 172 181 private: -
XIOS/dev/branch_openmp/src/node/context.cpp
r1342 r1460 1 2 1 #include "context.hpp" 3 2 #include "attribute_template.hpp" … … 17 16 #include "timer.hpp" 18 17 #include "memtrack.hpp" 18 #include <limits> 19 #include <fstream> 20 #include "server.hpp" 21 #include "distribute_file_server2.hpp" 19 22 20 23 using namespace ep_lib; 21 24 22 23 25 namespace xios { 24 26 25 27 boost::shared_ptr<CContextGroup> * CContext::root_ptr = 0; 26 28 27 /// ////////////////////// D finitions ////////////////////// ///29 /// ////////////////////// Définitions ////////////////////// /// 28 30 29 31 CContext::CContext(void) 30 32 : CObjectTemplate<CContext>(), CContextAttributes() 31 , calendar(), hasClient(false), hasServer(false), isPostProcessed(false), finalized(false) 33 , calendar(), hasClient(false), hasServer(false) 34 , isPostProcessed(false), finalized(false) 32 35 , idServer_(), client(0), server(0) 36 , allProcessed(false), countChildCtx_(0) 33 37 { /* Ne rien faire de plus */ } 34 38 35 39 CContext::CContext(const StdString & id) 36 40 : CObjectTemplate<CContext>(id), CContextAttributes() 37 , calendar(), hasClient(false), hasServer(false), isPostProcessed(false), finalized(false) 41 , calendar(), hasClient(false), hasServer(false) 42 , isPostProcessed(false), finalized(false) 38 43 , idServer_(), client(0), server(0) 44 , allProcessed(false), countChildCtx_(0) 39 45 { /* Ne rien faire de plus */ } 40 46 … … 43 49 delete client; 44 50 delete server; 51 for (std::vector<CContextClient*>::iterator it = clientPrimServer.begin(); it != clientPrimServer.end(); it++) delete *it; 52 for (std::vector<CContextServer*>::iterator it = serverPrimServer.begin(); it != serverPrimServer.end(); it++) delete *it; 53 45 54 } 46 55 … … 182 191 if (!this->hasChild()) 183 192 { 184 //oss << "<!-- No definition -->" << std::endl; // fait planter l'incr mentation193 //oss << "<!-- No definition -->" << std::endl; // fait planter l'incrémentation 185 194 } 186 195 else … … 243 252 void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 244 253 { 245 hasClient=true; 246 client = new CContextClient(this,intraComm, interComm, cxtServer); 247 registryIn=new CRegistry(intraComm); 248 registryIn->setPath(getId()) ; 249 if (client->clientRank==0) registryIn->fromFile("xios_registry.bin") ; 250 registryIn->bcastRegistry() ; 251 252 registryOut=new CRegistry(intraComm) ; 253 registryOut->setPath(getId()) ; 254 254 255 hasClient = true; 255 256 ep_lib::MPI_Comm intraCommServer, interCommServer; 256 if (cxtServer) // Attached mode 257 { 258 intraCommServer = intraComm; 259 interCommServer = interComm; 257 258 259 if (CServer::serverLevel != 1) 260 // initClient is called by client 261 { 262 client = new CContextClient(this, intraComm, interComm, cxtServer); 263 if (cxtServer) // Attached mode 264 { 265 intraCommServer = intraComm; 266 interCommServer = interComm; 267 } 268 else 269 { 270 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 271 comms.push_back(intraCommServer); 272 ep_lib::MPI_Comm_dup(interComm, &interCommServer); 273 comms.push_back(interCommServer); 274 } 275 /* for registry take the id of client context */ 276 /* for servers, supress the _server_ from id */ 277 string contextRegistryId=getId() ; 278 size_t pos=contextRegistryId.find("_server_") ; 279 if (pos!=std::string::npos) contextRegistryId=contextRegistryId.substr(0,pos) ; 280 281 registryIn=new CRegistry(intraComm); 282 registryIn->setPath(contextRegistryId) ; 283 if (client->clientRank==0) registryIn->fromFile("xios_registry.bin") ; 284 registryIn->bcastRegistry() ; 285 registryOut=new CRegistry(intraComm) ; 286 287 registryOut->setPath(contextRegistryId) ; 288 289 server = new CContextServer(this, intraCommServer, interCommServer); 260 290 } 261 291 else 262 { 292 // initClient is called by primary server 293 { 294 clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 263 295 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 264 296 comms.push_back(intraCommServer); 265 297 ep_lib::MPI_Comm_dup(interComm, &interCommServer); 266 298 comms.push_back(interCommServer); 267 } 268 server = new CContextServer(this,intraCommServer,interCommServer); 269 } 270 271 void CContext::setClientServerBuffer() 272 { 273 // Estimated minimum event size for small events (10 is an arbitrary constant just for safety) 299 serverPrimServer.push_back(new CContextServer(this, intraCommServer, interCommServer)); 300 } 301 } 302 303 /*! 304 Sets client buffers. 305 \param [in] contextClient 306 \param [in] bufferForWriting True if buffers are used for sending data for writing 307 This flag is only true for client and server-1 for communication with server-2 308 */ 309 void CContext::setClientServerBuffer(CContextClient* contextClient, bool bufferForWriting) 310 { 311 // Estimated minimum event size for small events (10 is an arbitrary constant just for safety) 274 312 const size_t minEventSize = CEventClient::headerSize + getIdServer().size() + 10 * sizeof(int); 275 // Ensure there is at least some room for 20 of such events in the buffers 276 size_t minBufferSize = std::max(CXios::minBufferSize, 20 * minEventSize); 313 314 // Ensure there is at least some room for 20 of such events in the buffers 315 size_t minBufferSize = std::max(CXios::minBufferSize, 20 * minEventSize); 316 277 317 #define DECLARE_NODE(Name_, name_) \ 278 318 if (minBufferSize < sizeof(C##Name_##Definition)) minBufferSize = sizeof(C##Name_##Definition); … … 284 324 // Compute the buffer sizes needed to send the attributes and data corresponding to fields 285 325 std::map<int, StdSize> maxEventSize; 286 std::map<int, StdSize> bufferSize = getAttributesBufferSize(maxEventSize );287 std::map<int, StdSize> dataBufferSize = getDataBufferSize(maxEventSize );326 std::map<int, StdSize> bufferSize = getAttributesBufferSize(maxEventSize, contextClient, bufferForWriting); 327 std::map<int, StdSize> dataBufferSize = getDataBufferSize(maxEventSize, contextClient, bufferForWriting); 288 328 289 329 std::map<int, StdSize>::iterator it, ite = dataBufferSize.end(); … … 291 331 if (it->second > bufferSize[it->first]) bufferSize[it->first] = it->second; 292 332 293 // Apply the buffer size factor and check that we are above the minimum buffersize333 // Apply the buffer size factor, check that we are above the minimum buffer size and below the maximum size 294 334 ite = bufferSize.end(); 295 335 for (it = bufferSize.begin(); it != ite; ++it) … … 297 337 it->second *= CXios::bufferSizeFactor; 298 338 if (it->second < minBufferSize) it->second = minBufferSize; 339 if (it->second > CXios::maxBufferSize) it->second = CXios::maxBufferSize; 299 340 } 300 341 301 342 // Leaders will have to send some control events so ensure there is some room for those in the buffers 302 if (c lient->isServerLeader())303 { 304 const std::list<int>& ranks = c lient->getRanksServerLeader();343 if (contextClient->isServerLeader()) 344 { 345 const std::list<int>& ranks = contextClient->getRanksServerLeader(); 305 346 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 306 347 { … … 312 353 } 313 354 } 314 315 client->setBufferSize(bufferSize, maxEventSize); 355 contextClient->setBufferSize(bufferSize, maxEventSize); 356 316 357 } 317 358 … … 322 363 } 323 364 324 //! Initialize server325 365 void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 326 366 { … … 328 368 server = new CContextServer(this,intraComm,interComm); 329 369 370 /* for registry take the id of client context */ 371 /* for servers, supress the _server_ from id */ 372 string contextRegistryId=getId() ; 373 size_t pos=contextRegistryId.find("_server_") ; 374 if (pos!=std::string::npos) contextRegistryId=contextRegistryId.substr(0,pos) ; 375 330 376 registryIn=new CRegistry(intraComm); 331 registryIn->setPath( getId()) ;377 registryIn->setPath(contextRegistryId) ; 332 378 if (server->intraCommRank==0) registryIn->fromFile("xios_registry.bin") ; 333 379 registryIn->bcastRegistry() ; 334 380 registryOut=new CRegistry(intraComm) ; 335 registryOut->setPath( getId()) ;381 registryOut->setPath(contextRegistryId) ; 336 382 337 383 ep_lib::MPI_Comm intraCommClient, interCommClient; … … 352 398 353 399 //! Try to send the buffers and receive possible answers 354 bool CContext::checkBuffersAndListen(void) 355 { 356 client->checkBuffers(); 357 358 bool hasTmpBufferedEvent = client->hasTemporarilyBufferedEvent(); 359 if (hasTmpBufferedEvent) 360 hasTmpBufferedEvent = !client->sendTemporarilyBufferedEvent(); 361 362 // Don't process events if there is a temporarily buffered event 363 return server->eventLoop(!hasTmpBufferedEvent); 364 } 400 bool CContext::checkBuffersAndListen(bool enableEventsProcessing /*= true*/) 401 { 402 bool clientReady, serverFinished; 403 404 // Only classical servers are non-blocking 405 if (CServer::serverLevel == 0) 406 { 407 client->checkBuffers(); 408 bool hasTmpBufferedEvent = client->hasTemporarilyBufferedEvent(); 409 if (hasTmpBufferedEvent) 410 hasTmpBufferedEvent = !client->sendTemporarilyBufferedEvent(); 411 // Don't process events if there is a temporarily buffered event 412 return server->eventLoop(!hasTmpBufferedEvent || !enableEventsProcessing); 413 } 414 else if (CServer::serverLevel == 1) 415 { 416 if (!finalized) 417 client->checkBuffers(); 418 bool serverFinished = true; 419 if (!finalized) 420 serverFinished = server->eventLoop(enableEventsProcessing); 421 bool serverPrimFinished = true; 422 for (int i = 0; i < clientPrimServer.size(); ++i) 423 { 424 if (!finalized) 425 clientPrimServer[i]->checkBuffers(); 426 if (!finalized) 427 serverPrimFinished *= serverPrimServer[i]->eventLoop(enableEventsProcessing); 428 } 429 return ( serverFinished && serverPrimFinished); 430 } 431 432 else if (CServer::serverLevel == 2) 433 { 434 client->checkBuffers(); 435 return server->eventLoop(enableEventsProcessing); 436 } 437 } 365 438 366 439 //! Terminate a context 367 440 void CContext::finalize(void) 368 441 { 369 if ( !finalized)442 if (hasClient && !hasServer) // For now we only use server level 1 to read data 370 443 { 371 finalized = true; 372 if (hasClient) sendRegistry() ; 373 client->finalize(); 374 while (!server->hasFinished()) 375 { 376 server->eventLoop(); 377 } 378 379 if (hasServer) 380 { 381 closeAllFile(); 382 registryOut->hierarchicalGatherRegistry() ; 383 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 384 } 385 386 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 387 ep_lib::MPI_Comm_free(&(*it)); 388 comms.clear(); 444 doPreTimestepOperationsForEnabledReadModeFiles(); 389 445 } 446 // Send registry upon calling the function the first time 447 if (countChildCtx_ == 0) 448 if (hasClient) sendRegistry() ; 449 450 // Client: 451 // (1) blocking send context finalize to its server 452 // (2) blocking receive context finalize from its server 453 // (3) some memory deallocations 454 if (CXios::isClient) 455 { 456 // Make sure that client (model) enters the loop only once 457 if (countChildCtx_ < 1) 458 { 459 ++countChildCtx_; 460 461 client->finalize(); 462 while (client->havePendingRequests()) 463 client->checkBuffers(); 464 465 while (!server->hasFinished()) 466 server->eventLoop(); 467 468 if (hasServer) // Mode attache 469 { 470 closeAllFile(); 471 registryOut->hierarchicalGatherRegistry() ; 472 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 473 } 474 475 //! Deallocate client buffers 476 client->releaseBuffers(); 477 478 //! Free internally allocated communicators 479 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 480 ep_lib::MPI_Comm_free(&(*it)); 481 comms.clear(); 482 483 #pragma omp critical (_output) 484 info(20)<<"CContext: Context <"<<getId()<<"> is finalized."<<endl; 485 } 486 } 487 else if (CXios::isServer) 488 { 489 // First context finalize message received from a model 490 // Send context finalize to its child contexts (if any) 491 if (countChildCtx_ == 0) 492 for (int i = 0; i < clientPrimServer.size(); ++i) 493 clientPrimServer[i]->finalize(); 494 495 // (Last) context finalized message received 496 if (countChildCtx_ == clientPrimServer.size()) 497 { 498 // Blocking send of context finalize message to its client (e.g. primary server or model) 499 #pragma omp critical (_output) 500 info(100)<<"DEBUG: context "<<getId()<<" Send client finalize<<"<<endl ; 501 client->finalize(); 502 bool bufferReleased; 503 do 504 { 505 client->checkBuffers(); 506 bufferReleased = !client->havePendingRequests(); 507 } while (!bufferReleased); 508 finalized = true; 509 510 closeAllFile(); // Just move to here to make sure that server-level 1 can close files 511 if (hasServer && !hasClient) 512 { 513 registryOut->hierarchicalGatherRegistry() ; 514 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 515 } 516 517 //! Deallocate client buffers 518 client->releaseBuffers(); 519 for (int i = 0; i < clientPrimServer.size(); ++i) 520 clientPrimServer[i]->releaseBuffers(); 521 522 //! Free internally allocated communicators 523 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 524 ep_lib::MPI_Comm_free(&(*it)); 525 comms.clear(); 526 527 #pragma omp critical (_output) 528 info(20)<<"CContext: Context <"<<getId()<<"> is finalized."<<endl; 529 } 530 531 ++countChildCtx_; 532 } 533 } 534 535 //! Free internally allocated communicators 536 void CContext::freeComms(void) 537 { 538 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 539 ep_lib::MPI_Comm_free(&(*it)); 540 comms.clear(); 541 } 542 543 //! Deallocate buffers allocated by clientContexts 544 void CContext::releaseClientBuffers(void) 545 { 546 client->releaseBuffers(); 547 for (int i = 0; i < clientPrimServer.size(); ++i) 548 clientPrimServer[i]->releaseBuffers(); 549 } 550 551 void CContext::postProcessingGlobalAttributes() 552 { 553 if (allProcessed) return; 554 555 // After xml is parsed, there are some more works with post processing 556 postProcessing(); 557 558 // Check grid and calculate its distribution 559 checkGridEnabledFields(); 560 561 // Distribute files between secondary servers according to the data size 562 distributeFiles(); 563 564 setClientServerBuffer(client, (hasClient && !hasServer)); 565 for (int i = 0; i < clientPrimServer.size(); ++i) 566 setClientServerBuffer(clientPrimServer[i], true); 567 568 if (hasClient) 569 { 570 // Send all attributes of current context to server 571 this->sendAllAttributesToServer(); 572 573 // Send all attributes of current calendar 574 CCalendarWrapper::get(CCalendarWrapper::GetDefName())->sendAllAttributesToServer(); 575 576 // We have enough information to send to server 577 // First of all, send all enabled files 578 sendEnabledFiles(this->enabledWriteModeFiles); 579 // We only use server-level 1 (for now) to read data 580 if (!hasServer) 581 sendEnabledFiles(this->enabledReadModeFiles); 582 583 // Then, send all enabled fields 584 sendEnabledFieldsInFiles(this->enabledWriteModeFiles); 585 if (!hasServer) 586 sendEnabledFieldsInFiles(this->enabledReadModeFiles); 587 588 // Then, check whether we have domain_ref, axis_ref or scalar_ref attached to the enabled fields 589 // If any, so send them to server 590 sendRefDomainsAxisScalars(this->enabledWriteModeFiles); 591 if (!hasServer) 592 sendRefDomainsAxisScalars(this->enabledReadModeFiles); 593 594 // Check whether enabled fields have grid_ref, if any, send this info to server 595 sendRefGrid(this->enabledFiles); 596 // This code may be useful in the future when we want to seperate completely read and write 597 // sendRefGrid(this->enabledWriteModeFiles); 598 // if (!hasServer) 599 // sendRefGrid(this->enabledReadModeFiles); 600 601 // A grid of enabled fields composed of several components which must be checked then their 602 // checked attributes should be sent to server 603 sendGridComponentEnabledFieldsInFiles(this->enabledFiles); // This code can be seperated in two (one for reading, another for writing) 604 605 // We have a xml tree on the server side and now, it should be also processed 606 sendPostProcessing(); 607 608 // Finally, we send information of grid itself to server 609 sendGridEnabledFieldsInFiles(this->enabledWriteModeFiles); 610 if (!hasServer) 611 sendGridEnabledFieldsInFiles(this->enabledReadModeFiles); 612 } 613 allProcessed = true; 614 } 615 616 void CContext::sendPostProcessingGlobalAttributes() 617 { 618 // Use correct context client to send message 619 // int nbSrvPools = (hasServer) ? clientPrimServer.size() : 1; 620 int nbSrvPools = (this->hasServer) ? (this->hasClient ? this->clientPrimServer.size() : 0) : 1; 621 for (int i = 0; i < nbSrvPools; ++i) 622 { 623 CContextClient* contextClientTmp = (0 != clientPrimServer.size()) ? clientPrimServer[i] : client; 624 CEventClient event(getType(),EVENT_ID_POST_PROCESS_GLOBAL_ATTRIBUTES); 625 626 if (contextClientTmp->isServerLeader()) 627 { 628 CMessage msg; 629 if (hasServer) 630 msg<<this->getIdServer(i); 631 else 632 msg<<this->getIdServer(); 633 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 634 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 635 event.push(*itRank,1,msg); 636 contextClientTmp->sendEvent(event); 637 } 638 else contextClientTmp->sendEvent(event); 639 } 640 } 641 642 void CContext::recvPostProcessingGlobalAttributes(CEventServer& event) 643 { 644 CBufferIn* buffer=event.subEvents.begin()->buffer; 645 string id; 646 *buffer>>id; 647 get(id)->recvPostProcessingGlobalAttributes(*buffer); 648 } 649 650 void CContext::recvPostProcessingGlobalAttributes(CBufferIn& buffer) 651 { 652 postProcessingGlobalAttributes(); 390 653 } 391 654 … … 401 664 void CContext::closeDefinition(void) 402 665 { 403 CTimer::get("Context : close definition").resume() ; 404 // There is nothing client need to send to server 405 if (hasClient) 406 { 407 // After xml is parsed, there are some more works with post processing 408 postProcessing(); 409 } 410 setClientServerBuffer(); 411 666 CTimer::get("Context : close definition").resume() ; 667 postProcessingGlobalAttributes(); 668 669 if (hasClient) sendPostProcessingGlobalAttributes(); 670 671 // There are some processings that should be done after all of above. For example: check mask or index 672 this->buildFilterGraphOfEnabledFields(); 673 412 674 if (hasClient && !hasServer) 413 { 414 // Send all attributes of current context to server 415 this->sendAllAttributesToServer(); 416 417 // Send all attributes of current calendar 418 CCalendarWrapper::get(CCalendarWrapper::GetDefName())->sendAllAttributesToServer(); 419 420 // We have enough information to send to server 421 // First of all, send all enabled files 422 sendEnabledFiles(); 423 424 // Then, send all enabled fields 425 sendEnabledFields(); 426 427 // At last, we have all info of domain and axis, then send them 428 sendRefDomainsAxis(); 429 430 // After that, send all grid (if any) 431 sendRefGrid(); 675 { 676 buildFilterGraphOfFieldsWithReadAccess(); 677 postProcessFilterGraph(); 432 678 } 433 434 // We have a xml tree on the server side and now, it should be also processed 435 if (hasClient && !hasServer) sendPostProcessing(); 436 437 // There are some processings that should be done after all of above. For example: check mask or index 438 if (hasClient) 439 { 440 this->buildFilterGraphOfEnabledFields(); 441 buildFilterGraphOfFieldsWithReadAccess(); 442 this->solveAllRefOfEnabledFields(true); 443 } 444 445 // Now tell server that it can process all messages from client 446 if (hasClient && !hasServer) this->sendCloseDefinition(); 679 680 checkGridEnabledFields(); 681 682 if (hasClient) this->sendProcessingGridOfEnabledFields(); 683 if (hasClient) this->sendCloseDefinition(); 447 684 448 685 // Nettoyage de l'arborescence 449 if (hasClient && !hasServer) CleanTree(); // Only on client side??686 if (hasClient) CleanTree(); // Only on client side?? 450 687 451 688 if (hasClient) 452 689 { 453 690 sendCreateFileHeader(); 454 455 startPrefetchingOfEnabledReadModeFiles(); 691 if (!hasServer) startPrefetchingOfEnabledReadModeFiles(); 456 692 } 457 693 CTimer::get("Context : close definition").suspend() ; 458 694 } 459 695 460 void CContext::findAllEnabledFields(void) 461 { 462 for (unsigned int i = 0; i < this->enabledFiles.size(); i++) 463 (void)this->enabledFiles[i]->getEnabledFields(); 464 } 465 466 void CContext::findAllEnabledFieldsInReadModeFiles(void) 467 { 468 for (unsigned int i = 0; i < this->enabledReadModeFiles.size(); ++i) 469 (void)this->enabledReadModeFiles[i]->getEnabledFields(); 696 void CContext::findAllEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles) 697 { 698 for (unsigned int i = 0; i < activeFiles.size(); i++) 699 (void)activeFiles[i]->getEnabledFields(); 470 700 } 471 701 … … 476 706 } 477 707 708 void CContext::sendGridComponentEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles) 709 { 710 int size = activeFiles.size(); 711 for (int i = 0; i < size; ++i) 712 { 713 activeFiles[i]->sendGridComponentOfEnabledFields(); 714 } 715 } 716 717 /*! 718 Send active (enabled) fields in file from a client to others 719 \param [in] activeFiles files contains enabled fields to send 720 */ 721 void CContext::sendGridEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles) 722 { 723 int size = activeFiles.size(); 724 for (int i = 0; i < size; ++i) 725 { 726 activeFiles[i]->sendGridOfEnabledFields(); 727 } 728 } 729 730 void CContext::checkGridEnabledFields() 731 { 732 int size = enabledFiles.size(); 733 for (int i = 0; i < size; ++i) 734 { 735 enabledFiles[i]->checkGridOfEnabledFields(); 736 } 737 } 738 739 /*! 740 Check grid of active (enabled) fields in file 741 \param [in] activeFiles files contains enabled fields whose grid needs checking 742 */ 743 void CContext::checkGridEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles) 744 { 745 int size = activeFiles.size(); 746 for (int i = 0; i < size; ++i) 747 { 748 activeFiles[i]->checkGridOfEnabledFields(); 749 } 750 } 751 752 /*! 753 Go up the hierachical tree via field_ref and do check of attributes of fields 754 This can be done in a client then all computed information will be sent from this client to others 755 \param [in] sendToServer Flag to indicate whether calculated information will be sent 756 */ 478 757 void CContext::solveOnlyRefOfEnabledFields(bool sendToServer) 479 758 { … … 490 769 } 491 770 492 void CContext::solveAllRefOfEnabledFields(bool sendToServer) 771 /*! 772 Go up the hierachical tree via field_ref and do check of attributes of fields. 773 The transformation can be done in this step. 774 All computed information will be sent from this client to others. 775 \param [in] sendToServer Flag to indicate whether calculated information will be sent 776 */ 777 void CContext::solveAllRefOfEnabledFieldsAndTransform(bool sendToServer) 493 778 { 494 779 int size = this->enabledFiles.size(); 495 780 for (int i = 0; i < size; ++i) 496 781 { 497 this->enabledFiles[i]->solveAllRefOfEnabledFields (sendToServer);782 this->enabledFiles[i]->solveAllRefOfEnabledFieldsAndTransform(sendToServer); 498 783 } 499 784 } … … 508 793 } 509 794 795 void CContext::postProcessFilterGraph() 796 { 797 int size = enabledFiles.size(); 798 for (int i = 0; i < size; ++i) 799 { 800 enabledFiles[i]->postProcessFilterGraph(); 801 } 802 } 803 510 804 void CContext::startPrefetchingOfEnabledReadModeFiles() 511 805 { … … 514 808 { 515 809 enabledReadModeFiles[i]->prefetchEnabledReadModeFields(); 810 } 811 } 812 813 void CContext::doPreTimestepOperationsForEnabledReadModeFiles() 814 { 815 int size = enabledReadModeFiles.size(); 816 for (int i = 0; i < size; ++i) 817 { 818 enabledReadModeFiles[i]->doPreTimestepOperationsForEnabledReadModeFields(); 516 819 } 517 820 } … … 553 856 } 554 857 555 void CContext::solveAllInheritance(bool apply) // default : apply = true556 { 557 // R solution des hritages descendants (cd des hritages de groupes)858 void CContext::solveAllInheritance(bool apply) 859 { 860 // Résolution des héritages descendants (cà d des héritages de groupes) 558 861 // pour chacun des contextes. 559 862 solveDescInheritance(apply); 560 863 561 // R solution des hritages par rfrence au niveau des fichiers.864 // Résolution des héritages par référence au niveau des fichiers. 562 865 const vector<CFile*> allFiles=CFile::getAll(); 563 866 const vector<CGrid*> allGrids= CGrid::getAll(); 564 867 565 //if (hasClient && !hasServer)566 if (hasClient)868 if (hasClient && !hasServer) 869 //if (hasClient) 567 870 { 568 871 for (unsigned int i = 0; i < allFiles.size(); i++) … … 583 886 584 887 for (unsigned int i = 0; i < allFiles.size(); i++) 585 if (!allFiles[i]->enabled.isEmpty()) // Si l'attribut 'enabled' est d fini.888 if (!allFiles[i]->enabled.isEmpty()) // Si l'attribut 'enabled' est défini. 586 889 { 587 if (allFiles[i]->enabled.getValue()) // Si l'attribut 'enabled' est fix 890 if (allFiles[i]->enabled.getValue()) // Si l'attribut 'enabled' est fixé à vrai. 588 891 { 589 892 if ((initDate + allFiles[i]->output_freq.getValue()) < (initDate + this->getCalendar()->getTimeStep())) … … 610 913 611 914 if (enabledFiles.size() == 0) 612 DEBUG(<<"Aucun fichier ne va tre sorti dans le contexte nomm\""915 DEBUG(<<"Aucun fichier ne va être sorti dans le contexte nommé \"" 613 916 << getId() << "\" !"); 614 } 615 616 void CContext::findEnabledReadModeFiles(void) 917 918 } 919 920 void CContext::distributeFiles(void) 921 { 922 bool distFileMemory=false ; 923 distFileMemory=CXios::getin<bool>("server2_dist_file_memory", distFileMemory); 924 925 if (distFileMemory) distributeFileOverMemoryBandwith() ; 926 else distributeFileOverBandwith() ; 927 } 928 929 930 void CContext::distributeFileOverBandwith(void) 931 { 932 double eps=std::numeric_limits<double>::epsilon()*10 ; 933 934 // If primary server 935 if (hasServer && hasClient) 936 { 937 std::ofstream ofs(("distribute_file_"+getId()+".dat").c_str(), std::ofstream::out); 938 int nbPools = clientPrimServer.size(); 939 940 // (1) Find all enabled files in write mode 941 // for (int i = 0; i < this->enabledFiles.size(); ++i) 942 // { 943 // if (enabledFiles[i]->mode.isEmpty() || (!enabledFiles[i]->mode.isEmpty() && enabledFiles[i]->mode.getValue() == CFile::mode_attr::write )) 944 // enabledWriteModeFiles.push_back(enabledFiles[i]); 945 // } 946 947 // (2) Estimate the data volume for each file 948 int size = this->enabledWriteModeFiles.size(); 949 std::vector<std::pair<double, CFile*> > dataSizeMap; 950 double dataPerPool = 0; 951 int nfield=0 ; 952 ofs<<size<<endl ; 953 for (size_t i = 0; i < size; ++i) 954 { 955 CFile* file = this->enabledWriteModeFiles[i]; 956 ofs<<file->getId()<<endl ; 957 StdSize dataSize=0; 958 std::vector<CField*> enabledFields = file->getEnabledFields(); 959 size_t numEnabledFields = enabledFields.size(); 960 ofs<<numEnabledFields<<endl ; 961 for (size_t j = 0; j < numEnabledFields; ++j) 962 { 963 dataSize += enabledFields[j]->getGlobalWrittenSize() ; 964 ofs<<enabledFields[j]->grid->getId()<<endl ; 965 ofs<<enabledFields[j]->getGlobalWrittenSize()<<endl ; 966 } 967 double outFreqSec = (Time)(calendar->getCurrentDate()+file->output_freq)-(Time)(calendar->getCurrentDate()) ; 968 double dataSizeSec= dataSize/ outFreqSec; 969 ofs<<dataSizeSec<<endl ; 970 nfield++ ; 971 // add epsilon*nField to dataSizeSec in order to preserve reproductive ordering when sorting 972 dataSizeMap.push_back(make_pair(dataSizeSec + dataSizeSec * eps * nfield , file)); 973 dataPerPool += dataSizeSec; 974 } 975 dataPerPool /= nbPools; 976 std::sort(dataSizeMap.begin(), dataSizeMap.end()); 977 978 // (3) Assign contextClient to each enabled file 979 980 std::multimap<double,int> poolDataSize ; 981 // multimap is not garanty to preserve stable sorting in c++98 but it seems it does for c++11 982 983 int j; 984 double dataSize ; 985 for (j = 0 ; j < nbPools ; ++j) poolDataSize.insert(std::pair<double,int>(0.,j)) ; 986 987 for (int i = dataSizeMap.size()-1; i >= 0; --i) 988 { 989 dataSize=(*poolDataSize.begin()).first ; 990 j=(*poolDataSize.begin()).second ; 991 dataSizeMap[i].second->setContextClient(clientPrimServer[j]); 992 dataSize+=dataSizeMap[i].first; 993 poolDataSize.erase(poolDataSize.begin()) ; 994 poolDataSize.insert(std::pair<double,int>(dataSize,j)) ; 995 } 996 997 for (std::multimap<double,int>:: iterator it=poolDataSize.begin() ; it!=poolDataSize.end(); ++it) 998 { 999 #pragma omp critical (_output) 1000 info(30)<<"Load Balancing for servers (perfect=1) : "<<it->second<<" : ratio "<<it->first*1./dataPerPool<<endl ; 1001 } 1002 1003 for (int i = 0; i < this->enabledReadModeFiles.size(); ++i) 1004 { 1005 enabledReadModeFiles[i]->setContextClient(client); 1006 } 1007 } 1008 else 1009 { 1010 for (int i = 0; i < this->enabledFiles.size(); ++i) 1011 enabledFiles[i]->setContextClient(client); 1012 } 1013 } 1014 1015 void CContext::distributeFileOverMemoryBandwith(void) 1016 { 1017 // If primary server 1018 if (hasServer && hasClient) 1019 { 1020 int nbPools = clientPrimServer.size(); 1021 double ratio=0.5 ; 1022 ratio=CXios::getin<double>("server2_dist_file_memory_ratio", ratio); 1023 1024 int nFiles = this->enabledWriteModeFiles.size(); 1025 vector<SDistFile> files(nFiles); 1026 vector<SDistGrid> grids; 1027 map<string,int> gridMap ; 1028 string gridId; 1029 int gridIndex=0 ; 1030 1031 for (size_t i = 0; i < nFiles; ++i) 1032 { 1033 StdSize dataSize=0; 1034 CFile* file = this->enabledWriteModeFiles[i]; 1035 std::vector<CField*> enabledFields = file->getEnabledFields(); 1036 size_t numEnabledFields = enabledFields.size(); 1037 1038 files[i].id_=file->getId() ; 1039 files[i].nbGrids_=numEnabledFields; 1040 files[i].assignedGrid_ = new int[files[i].nbGrids_] ; 1041 1042 for (size_t j = 0; j < numEnabledFields; ++j) 1043 { 1044 gridId=enabledFields[j]->grid->getId() ; 1045 if (gridMap.find(gridId)==gridMap.end()) 1046 { 1047 gridMap[gridId]=gridIndex ; 1048 SDistGrid newGrid; 1049 grids.push_back(newGrid) ; 1050 gridIndex++ ; 1051 } 1052 files[i].assignedGrid_[j]=gridMap[gridId] ; 1053 grids[files[i].assignedGrid_[j]].size_=enabledFields[j]->getGlobalWrittenSize() ; 1054 dataSize += enabledFields[j]->getGlobalWrittenSize() ; // usefull 1055 } 1056 double outFreqSec = (Time)(calendar->getCurrentDate()+file->output_freq)-(Time)(calendar->getCurrentDate()) ; 1057 files[i].bandwith_= dataSize/ outFreqSec ; 1058 } 1059 1060 double bandwith=0 ; 1061 double memory=0 ; 1062 1063 for(int i=0; i<nFiles; i++) bandwith+=files[i].bandwith_ ; 1064 for(int i=0; i<nFiles; i++) files[i].bandwith_ = files[i].bandwith_/bandwith * ratio ; 1065 1066 for(int i=0; i<grids.size(); i++) memory+=grids[i].size_ ; 1067 for(int i=0; i<grids.size(); i++) grids[i].size_ = grids[i].size_ / memory * (1.0-ratio) ; 1068 1069 distributeFileOverServer2(nbPools, grids.size(), &grids[0], nFiles, &files[0]) ; 1070 1071 vector<double> memorySize(nbPools,0.) ; 1072 vector< set<int> > serverGrids(nbPools) ; 1073 vector<double> bandwithSize(nbPools,0.) ; 1074 1075 for (size_t i = 0; i < nFiles; ++i) 1076 { 1077 bandwithSize[files[i].assignedServer_] += files[i].bandwith_* bandwith /ratio ; 1078 for(int j=0 ; j<files[i].nbGrids_;j++) 1079 { 1080 if (serverGrids[files[i].assignedServer_].find(files[i].assignedGrid_[j]) == serverGrids[files[i].assignedServer_].end()) 1081 { 1082 memorySize[files[i].assignedServer_]+= grids[files[i].assignedGrid_[j]].size_ * memory / (1.0-ratio); 1083 serverGrids[files[i].assignedServer_].insert(files[i].assignedGrid_[j]) ; 1084 } 1085 } 1086 enabledWriteModeFiles[i]->setContextClient(clientPrimServer[files[i].assignedServer_]) ; 1087 delete [] files[i].assignedGrid_ ; 1088 } 1089 1090 for (int i = 0; i < nbPools; ++i) 1091 { 1092 #pragma omp critical (_output) 1093 info(100)<<"Pool server level2 "<<i<<" assigned file bandwith "<<bandwithSize[i]*86400.*4./1024/1024.<<" Mb / days"<<endl ; 1094 } 1095 for (int i = 0; i < nbPools; ++i) 1096 { 1097 #pragma omp critical (_output) 1098 info(100)<<"Pool server level2 "<<i<<" assigned grid memory "<<memorySize[i]*100/1024./1024.<<" Mb"<<endl ; 1099 } 1100 1101 for (int i = 0; i < this->enabledReadModeFiles.size(); ++i) 1102 { 1103 enabledReadModeFiles[i]->setContextClient(client); 1104 } 1105 1106 } 1107 else 1108 { 1109 for (int i = 0; i < this->enabledFiles.size(); ++i) 1110 enabledFiles[i]->setContextClient(client); 1111 } 1112 } 1113 1114 1115 1116 /*! 1117 Find all files in write mode 1118 */ 1119 void CContext::findEnabledWriteModeFiles(void) 617 1120 { 618 1121 int size = this->enabledFiles.size(); 619 1122 for (int i = 0; i < size; ++i) 620 1123 { 1124 if (enabledFiles[i]->mode.isEmpty() || 1125 (!enabledFiles[i]->mode.isEmpty() && enabledFiles[i]->mode.getValue() == CFile::mode_attr::write )) 1126 enabledWriteModeFiles.push_back(enabledFiles[i]); 1127 } 1128 } 1129 1130 /*! 1131 Find all files in read mode 1132 */ 1133 void CContext::findEnabledReadModeFiles(void) 1134 { 1135 int size = this->enabledFiles.size(); 1136 for (int i = 0; i < size; ++i) 1137 { 621 1138 if (!enabledFiles[i]->mode.isEmpty() && enabledFiles[i]->mode.getValue() == CFile::mode_attr::read) 622 1139 enabledReadModeFiles.push_back(enabledFiles[i]); … … 631 1148 for (; it != end; it++) 632 1149 { 1150 #pragma omp critical (_output) 633 1151 info(30)<<"Closing File : "<<(*it)->getId()<<endl; 634 1152 (*it)->close(); … … 669 1187 recvRegistry(event); 670 1188 return true; 671 break; 672 1189 break; 1190 case EVENT_ID_POST_PROCESS_GLOBAL_ATTRIBUTES: 1191 recvPostProcessingGlobalAttributes(event); 1192 return true; 1193 break; 1194 case EVENT_ID_PROCESS_GRID_ENABLED_FIELDS: 1195 recvProcessingGridOfEnabledFields(event); 1196 return true; 1197 break; 673 1198 default : 674 1199 ERROR("bool CContext::dispatchEvent(CEventServer& event)", … … 682 1207 void CContext::sendCloseDefinition(void) 683 1208 { 684 CEventClient event(getType(),EVENT_ID_CLOSE_DEFINITION); 685 if (client->isServerLeader()) 686 { 687 CMessage msg; 688 msg<<this->getIdServer(); 689 const std::list<int>& ranks = client->getRanksServerLeader(); 690 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 691 event.push(*itRank,1,msg); 692 client->sendEvent(event); 693 } 694 else client->sendEvent(event); 1209 // Use correct context client to send message 1210 int nbSrvPools = (this->hasServer) ? (this->hasClient ? this->clientPrimServer.size() : 0) : 1; 1211 for (int i = 0; i < nbSrvPools; ++i) 1212 { 1213 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 1214 CEventClient event(getType(),EVENT_ID_CLOSE_DEFINITION); 1215 if (contextClientTmp->isServerLeader()) 1216 { 1217 CMessage msg; 1218 if (hasServer) 1219 msg<<this->getIdServer(i); 1220 else 1221 msg<<this->getIdServer(); 1222 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 1223 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1224 event.push(*itRank,1,msg); 1225 contextClientTmp->sendEvent(event); 1226 } 1227 else contextClientTmp->sendEvent(event); 1228 } 695 1229 } 696 1230 … … 698 1232 void CContext::recvCloseDefinition(CEventServer& event) 699 1233 { 700 701 1234 CBufferIn* buffer=event.subEvents.begin()->buffer; 702 1235 string id; … … 708 1241 void CContext::sendUpdateCalendar(int step) 709 1242 { 710 if (!hasServer) 711 { 1243 // Use correct context client to send message 1244 int nbSrvPools = (this->hasServer) ? (this->hasClient ? this->clientPrimServer.size() : 0) : 1; 1245 for (int i = 0; i < nbSrvPools; ++i) 1246 { 1247 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 712 1248 CEventClient event(getType(),EVENT_ID_UPDATE_CALENDAR); 713 if (client->isServerLeader()) 714 { 715 CMessage msg; 716 msg<<this->getIdServer()<<step; 717 const std::list<int>& ranks = client->getRanksServerLeader(); 718 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 719 event.push(*itRank,1,msg); 720 client->sendEvent(event); 721 } 722 else client->sendEvent(event); 1249 1250 if (contextClientTmp->isServerLeader()) 1251 { 1252 CMessage msg; 1253 if (hasServer) 1254 msg<<this->getIdServer(i)<<step; 1255 else 1256 msg<<this->getIdServer()<<step; 1257 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 1258 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1259 event.push(*itRank,1,msg); 1260 contextClientTmp->sendEvent(event); 1261 } 1262 else contextClientTmp->sendEvent(event); 723 1263 } 724 1264 } … … 739 1279 buffer>>step; 740 1280 updateCalendar(step); 1281 if (hasClient && hasServer) 1282 { 1283 sendUpdateCalendar(step); 1284 } 741 1285 } 742 1286 … … 744 1288 void CContext::sendCreateFileHeader(void) 745 1289 { 746 CEventClient event(getType(),EVENT_ID_CREATE_FILE_HEADER); 747 if (client->isServerLeader()) 748 { 749 CMessage msg; 750 msg<<this->getIdServer(); 751 const std::list<int>& ranks = client->getRanksServerLeader(); 752 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 753 event.push(*itRank,1,msg) ; 754 client->sendEvent(event); 755 } 756 else client->sendEvent(event); 1290 // Use correct context client to send message 1291 // int nbSrvPools = (hasServer) ? clientPrimServer.size() : 1; 1292 int nbSrvPools = (this->hasServer) ? (this->hasClient ? this->clientPrimServer.size() : 0) : 1; 1293 for (int i = 0; i < nbSrvPools; ++i) 1294 { 1295 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 1296 CEventClient event(getType(),EVENT_ID_CREATE_FILE_HEADER); 1297 1298 if (contextClientTmp->isServerLeader()) 1299 { 1300 CMessage msg; 1301 if (hasServer) 1302 msg<<this->getIdServer(i); 1303 else 1304 msg<<this->getIdServer(); 1305 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 1306 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1307 event.push(*itRank,1,msg) ; 1308 contextClientTmp->sendEvent(event); 1309 } 1310 else contextClientTmp->sendEvent(event); 1311 } 757 1312 } 758 1313 … … 769 1324 void CContext::recvCreateFileHeader(CBufferIn& buffer) 770 1325 { 771 createFileHeader(); 1326 if (!hasClient && hasServer) 1327 createFileHeader(); 1328 } 1329 1330 //! Client side: Send a message to do some post processing on server 1331 void CContext::sendProcessingGridOfEnabledFields() 1332 { 1333 // Use correct context client to send message 1334 int nbSrvPools = (this->hasServer) ? (this->hasClient ? this->clientPrimServer.size() : 0) : 1; 1335 for (int i = 0; i < nbSrvPools; ++i) 1336 { 1337 CContextClient* contextClientTmp = (0 != clientPrimServer.size()) ? clientPrimServer[i] : client; 1338 CEventClient event(getType(),EVENT_ID_PROCESS_GRID_ENABLED_FIELDS); 1339 1340 if (contextClientTmp->isServerLeader()) 1341 { 1342 CMessage msg; 1343 if (hasServer) 1344 msg<<this->getIdServer(i); 1345 else 1346 msg<<this->getIdServer(); 1347 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 1348 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1349 event.push(*itRank,1,msg); 1350 contextClientTmp->sendEvent(event); 1351 } 1352 else contextClientTmp->sendEvent(event); 1353 } 1354 } 1355 1356 //! Server side: Receive a message to do some post processing 1357 void CContext::recvProcessingGridOfEnabledFields(CEventServer& event) 1358 { 1359 CBufferIn* buffer=event.subEvents.begin()->buffer; 1360 string id; 1361 *buffer>>id; 772 1362 } 773 1363 … … 775 1365 void CContext::sendPostProcessing() 776 1366 { 777 if (!hasServer) 778 { 1367 // Use correct context client to send message 1368 // int nbSrvPools = (hasServer) ? clientPrimServer.size() : 1; 1369 int nbSrvPools = (this->hasServer) ? (this->hasClient ? this->clientPrimServer.size() : 0) : 1; 1370 for (int i = 0; i < nbSrvPools; ++i) 1371 { 1372 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 779 1373 CEventClient event(getType(),EVENT_ID_POST_PROCESS); 780 if (c lient->isServerLeader())1374 if (contextClientTmp->isServerLeader()) 781 1375 { 782 1376 CMessage msg; 783 msg<<this->getIdServer(); 784 const std::list<int>& ranks = client->getRanksServerLeader(); 1377 if (hasServer) 1378 msg<<this->getIdServer(i); 1379 else 1380 msg<<this->getIdServer(); 1381 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 785 1382 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 786 787 c lient->sendEvent(event);788 } 789 else c lient->sendEvent(event);1383 event.push(*itRank,1,msg); 1384 contextClientTmp->sendEvent(event); 1385 } 1386 else contextClientTmp->sendEvent(event); 790 1387 } 791 1388 } … … 818 1415 } 819 1416 1417 const StdString& CContext::getIdServer(const int i) 1418 { 1419 idServer_ = this->getId(); 1420 idServer_ += "_server_"; 1421 idServer_ += boost::lexical_cast<string>(i); 1422 return idServer_; 1423 } 1424 1425 820 1426 /*! 821 1427 \brief Do some simple post processings after parsing xml file … … 839 1445 this->solveAllInheritance(); 840 1446 1447 // ShowTree(info(10)); 1448 841 1449 // Check if some axis, domains or grids are eligible to for compressed indexed output. 842 1450 // Warning: This must be done after solving the inheritance and before the rest of post-processing 843 checkAxisDomainsGridsEligibilityForCompressedOutput(); 1451 checkAxisDomainsGridsEligibilityForCompressedOutput(); 844 1452 845 1453 // Check if some automatic time series should be generated 846 // Warning: This must be done after solving the inheritance and before the rest of post-processing 847 prepareTimeseries(); 848 849 //Initialisation du vecteur 'enabledFiles' contenant la liste des fichiers sortir. 850 this->findEnabledFiles(); 851 this->findEnabledReadModeFiles(); 852 853 // Find all enabled fields of each file 854 this->findAllEnabledFields(); 855 this->findAllEnabledFieldsInReadModeFiles(); 856 857 if (hasClient && !hasServer) 858 { 859 // Try to read attributes of fields in file then fill in corresponding grid (or domain, axis) 860 this->readAttributesOfEnabledFieldsInReadModeFiles(); 861 } 1454 // Warning: This must be done after solving the inheritance and before the rest of post-processing 1455 1456 // The timeseries should only be prepared in client 1457 if (hasClient && !hasServer) prepareTimeseries(); 1458 1459 //Initialisation du vecteur 'enabledFiles' contenant la liste des fichiers à sortir. 1460 findEnabledFiles(); 1461 findEnabledWriteModeFiles(); 1462 findEnabledReadModeFiles(); 1463 1464 // For now, only read files with client and only one level server 1465 // if (hasClient && !hasServer) findEnabledReadModeFiles(); 1466 1467 // Find all enabled fields of each file 1468 findAllEnabledFieldsInFiles(this->enabledWriteModeFiles); 1469 findAllEnabledFieldsInFiles(this->enabledReadModeFiles); 1470 1471 // For now, only read files with client and only one level server 1472 // if (hasClient && !hasServer) 1473 // findAllEnabledFieldsInFiles(this->enabledReadModeFiles); 1474 1475 if (hasClient && !hasServer) 1476 { 1477 initReadFiles(); 1478 // Try to read attributes of fields in file then fill in corresponding grid (or domain, axis) 1479 this->readAttributesOfEnabledFieldsInReadModeFiles(); 1480 } 862 1481 863 1482 // Only search and rebuild all reference objects of enable fields, don't transform 864 1483 this->solveOnlyRefOfEnabledFields(false); 865 1484 866 // Search and rebuild all reference object of enabled fields 867 this->solveAllRefOfEnabledFields (false);1485 // Search and rebuild all reference object of enabled fields, and transform 1486 this->solveAllRefOfEnabledFieldsAndTransform(false); 868 1487 869 1488 // Find all fields with read access from the public API 870 findFieldsWithReadAccess();1489 if (hasClient && !hasServer) findFieldsWithReadAccess(); 871 1490 // and solve the all reference for them 872 solveAllRefOfFieldsWithReadAccess();1491 if (hasClient && !hasServer) solveAllRefOfFieldsWithReadAccess(); 873 1492 874 1493 isPostProcessed = true; … … 877 1496 /*! 878 1497 * Compute the required buffer size to send the attributes (mostly those grid related). 879 *880 1498 * \param maxEventSize [in/out] the size of the bigger event for each connected server 1499 * \param [in] contextClient 1500 * \param [in] bufferForWriting True if buffers are used for sending data for writing 1501 This flag is only true for client and server-1 for communication with server-2 881 1502 */ 882 std::map<int, StdSize> CContext::getAttributesBufferSize(std::map<int, StdSize>& maxEventSize) 883 { 884 std::map<int, StdSize> attributesSize; 885 886 if (hasClient) 887 { 888 size_t numEnabledFiles = this->enabledFiles.size(); 889 for (size_t i = 0; i < numEnabledFiles; ++i) 890 { 891 CFile* file = this->enabledFiles[i]; 892 1503 std::map<int, StdSize> CContext::getAttributesBufferSize(std::map<int, StdSize>& maxEventSize, 1504 CContextClient* contextClient, bool bufferForWriting /*= "false"*/) 1505 { 1506 // As calendar attributes are sent even if there are no active files or fields, maps are initialized according the size of calendar attributes 1507 std::map<int, StdSize> attributesSize = CCalendarWrapper::get(CCalendarWrapper::GetDefName())->getMinimumBufferSizeForAttributes(contextClient); 1508 maxEventSize = CCalendarWrapper::get(CCalendarWrapper::GetDefName())->getMinimumBufferSizeForAttributes(contextClient); 1509 1510 std::vector<CFile*>& fileList = this->enabledFiles; 1511 size_t numEnabledFiles = fileList.size(); 1512 for (size_t i = 0; i < numEnabledFiles; ++i) 1513 { 1514 // CFile* file = this->enabledWriteModeFiles[i]; 1515 CFile* file = fileList[i]; 1516 std::vector<CField*> enabledFields = file->getEnabledFields(); 1517 size_t numEnabledFields = enabledFields.size(); 1518 for (size_t j = 0; j < numEnabledFields; ++j) 1519 { 1520 const std::map<int, StdSize> mapSize = enabledFields[j]->getGridAttributesBufferSize(contextClient, bufferForWriting); 1521 std::map<int, StdSize>::const_iterator it = mapSize.begin(), itE = mapSize.end(); 1522 for (; it != itE; ++it) 1523 { 1524 // If attributesSize[it->first] does not exist, it will be zero-initialized 1525 // so we can use it safely without checking for its existence 1526 if (attributesSize[it->first] < it->second) 1527 attributesSize[it->first] = it->second; 1528 1529 if (maxEventSize[it->first] < it->second) 1530 maxEventSize[it->first] = it->second; 1531 } 1532 } 1533 } 1534 return attributesSize; 1535 } 1536 1537 /*! 1538 * Compute the required buffer size to send the fields data. 1539 * \param maxEventSize [in/out] the size of the bigger event for each connected server 1540 * \param [in] contextClient 1541 * \param [in] bufferForWriting True if buffers are used for sending data for writing 1542 This flag is only true for client and server-1 for communication with server-2 1543 */ 1544 std::map<int, StdSize> CContext::getDataBufferSize(std::map<int, StdSize>& maxEventSize, 1545 CContextClient* contextClient, bool bufferForWriting /*= "false"*/) 1546 { 1547 std::map<int, StdSize> dataSize; 1548 1549 // Find all reference domain and axis of all active fields 1550 std::vector<CFile*>& fileList = bufferForWriting ? this->enabledWriteModeFiles : this->enabledReadModeFiles; 1551 size_t numEnabledFiles = fileList.size(); 1552 for (size_t i = 0; i < numEnabledFiles; ++i) 1553 { 1554 // CFile* file = this->enabledFiles[i]; 1555 CFile* file = fileList[i]; 1556 if (file->getContextClient() == contextClient) 1557 { 893 1558 std::vector<CField*> enabledFields = file->getEnabledFields(); 894 1559 size_t numEnabledFields = enabledFields.size(); 895 1560 for (size_t j = 0; j < numEnabledFields; ++j) 896 1561 { 897 const std::map<int, StdSize> mapSize = enabledFields[j]->getGridAttributesBufferSize(); 898 std::map<int, StdSize>::const_iterator it = mapSize.begin(), itE = mapSize.end(); 899 for (; it != itE; ++it) 900 { 901 // If attributesSize[it->first] does not exist, it will be zero-initialized 902 // so we can use it safely without checking for its existance 903 if (attributesSize[it->first] < it->second) 904 attributesSize[it->first] = it->second; 905 906 if (maxEventSize[it->first] < it->second) 907 maxEventSize[it->first] = it->second; 908 } 909 } 910 } 911 } 912 913 return attributesSize; 914 } 915 916 /*! 917 * Compute the required buffer size to send the fields data. 918 * 919 * \param maxEventSize [in/out] the size of the bigger event for each connected server 920 */ 921 std::map<int, StdSize> CContext::getDataBufferSize(std::map<int, StdSize>& maxEventSize) 922 { 923 CFile::mode_attr::t_enum mode = hasClient ? CFile::mode_attr::write : CFile::mode_attr::read; 924 925 std::map<int, StdSize> dataSize; 926 927 // Find all reference domain and axis of all active fields 928 size_t numEnabledFiles = this->enabledFiles.size(); 929 for (size_t i = 0; i < numEnabledFiles; ++i) 930 { 931 CFile* file = this->enabledFiles[i]; 932 CFile::mode_attr::t_enum fileMode = file->mode.isEmpty() ? CFile::mode_attr::write : file->mode.getValue(); 933 934 if (fileMode == mode) 935 { 936 std::vector<CField*> enabledFields = file->getEnabledFields(); 937 size_t numEnabledFields = enabledFields.size(); 938 for (size_t j = 0; j < numEnabledFields; ++j) 939 { 940 const std::map<int, StdSize> mapSize = enabledFields[j]->getGridDataBufferSize(); 1562 // const std::vector<std::map<int, StdSize> > mapSize = enabledFields[j]->getGridDataBufferSize(contextClient); 1563 const std::map<int, StdSize> mapSize = enabledFields[j]->getGridDataBufferSize(contextClient,bufferForWriting); 941 1564 std::map<int, StdSize>::const_iterator it = mapSize.begin(), itE = mapSize.end(); 942 1565 for (; it != itE; ++it) … … 944 1567 // If dataSize[it->first] does not exist, it will be zero-initialized 945 1568 // so we can use it safely without checking for its existance 946 1569 if (CXios::isOptPerformance) 947 1570 dataSize[it->first] += it->second; 948 1571 else if (dataSize[it->first] < it->second) 949 1572 dataSize[it->first] = it->second; 950 1573 951 1574 if (maxEventSize[it->first] < it->second) 952 1575 maxEventSize[it->first] = it->second; 953 1576 } … … 955 1578 } 956 1579 } 957 958 1580 return dataSize; 959 1581 } 960 1582 961 1583 //! Client side: Send infomation of active files (files are enabled to write out) 962 void CContext::sendEnabledFiles( )963 { 964 int size = this->enabledFiles.size();1584 void CContext::sendEnabledFiles(const std::vector<CFile*>& activeFiles) 1585 { 1586 int size = activeFiles.size(); 965 1587 966 1588 // In a context, each type has a root definition, e.g: axis, domain, field. … … 972 1594 for (int i = 0; i < size; ++i) 973 1595 { 974 cfgrpPtr->sendCreateChild(this->enabledFiles[i]->getId()); 975 this->enabledFiles[i]->sendAllAttributesToServer(); 976 this->enabledFiles[i]->sendAddAllVariables(); 1596 CFile* f = activeFiles[i]; 1597 cfgrpPtr->sendCreateChild(f->getId(),f->getContextClient()); 1598 f->sendAllAttributesToServer(f->getContextClient()); 1599 f->sendAddAllVariables(f->getContextClient()); 977 1600 } 978 1601 } 979 1602 980 1603 //! Client side: Send information of active fields (ones are written onto files) 981 void CContext::sendEnabledFields ()982 { 983 int size = this->enabledFiles.size();1604 void CContext::sendEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles) 1605 { 1606 int size = activeFiles.size(); 984 1607 for (int i = 0; i < size; ++i) 985 1608 { 986 this->enabledFiles[i]->sendEnabledFields();1609 activeFiles[i]->sendEnabledFields(activeFiles[i]->getContextClient()); 987 1610 } 988 1611 } … … 1107 1730 1108 1731 //! Client side: Send information of reference grid of active fields 1109 void CContext::sendRefGrid( )1732 void CContext::sendRefGrid(const std::vector<CFile*>& activeFiles) 1110 1733 { 1111 1734 std::set<StdString> gridIds; 1112 int sizeFile = this->enabledFiles.size();1735 int sizeFile = activeFiles.size(); 1113 1736 CFile* filePtr(NULL); 1114 1737 … … 1116 1739 for (int i = 0; i < sizeFile; ++i) 1117 1740 { 1118 filePtr = this->enabledFiles[i];1741 filePtr = activeFiles[i]; 1119 1742 std::vector<CField*> enabledFields = filePtr->getEnabledFields(); 1120 1743 int sizeField = enabledFields.size(); … … 1140 1763 } 1141 1764 1142 1143 //! Client side: Send information of reference domain and axis of active fields 1144 void CContext::sendRefDomainsAxis() 1765 //! Client side: Send information of reference domain, axis and scalar of active fields 1766 void CContext::sendRefDomainsAxisScalars(const std::vector<CFile*>& activeFiles) 1145 1767 { 1146 1768 std::set<StdString> domainIds, axisIds, scalarIds; 1147 1769 1148 1770 // Find all reference domain and axis of all active fields 1149 int numEnabledFiles = this->enabledFiles.size();1771 int numEnabledFiles = activeFiles.size(); 1150 1772 for (int i = 0; i < numEnabledFiles; ++i) 1151 1773 { 1152 std::vector<CField*> enabledFields = this->enabledFiles[i]->getEnabledFields();1774 std::vector<CField*> enabledFields = activeFiles[i]->getEnabledFields(); 1153 1775 int numEnabledFields = enabledFields.size(); 1154 1776 for (int j = 0; j < numEnabledFields; ++j) … … 1205 1827 void CContext::updateCalendar(int step) 1206 1828 { 1207 #pragma omp critical (_output) 1208 info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl; 1209 calendar->update(step); 1210 #pragma omp critical (_output) 1211 info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl; 1212 #ifdef XIOS_MEMTRACK_LIGHT 1213 #pragma omp critical (_output) 1214 info(50) << " Current memory used by XIOS : "<< MemTrack::getCurrentMemorySize()*1.0/(1024*1024)<<" Mbyte, at timestep "<<step<<" of context "<<this->getId()<<endl ; 1215 #endif 1216 if (hasClient) 1829 int prevStep = calendar->getStep(); 1830 1831 if (prevStep < step) 1217 1832 { 1218 doPostTimestepOperationsForEnabledReadModeFiles(); 1219 garbageCollector.invalidate(calendar->getCurrentDate()); 1833 if (hasClient && !hasServer) // For now we only use server level 1 to read data 1834 { 1835 doPreTimestepOperationsForEnabledReadModeFiles(); 1836 } 1837 1838 #pragma omp critical (_output) 1839 info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl; 1840 calendar->update(step); 1841 #pragma omp critical (_output) 1842 info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl; 1843 #ifdef XIOS_MEMTRACK_LIGHT 1844 #pragma omp critical (_output) 1845 info(50) << " Current memory used by XIOS : "<< MemTrack::getCurrentMemorySize()*1.0/(1024*1024)<<" Mbyte, at timestep "<<step<<" of context "<<this->getId()<<endl ; 1846 #endif 1847 1848 if (hasClient && !hasServer) // For now we only use server level 1 to read data 1849 { 1850 doPostTimestepOperationsForEnabledReadModeFiles(); 1851 garbageCollector.invalidate(calendar->getCurrentDate()); 1852 } 1220 1853 } 1854 else if (prevStep == step) 1855 { 1856 #pragma omp critical (_output) 1857 info(50) << "updateCalendar: already at step " << step << ", no operation done." << endl; 1858 } 1859 else // if (prevStep > step) 1860 ERROR("void CContext::updateCalendar(int step)", 1861 << "Illegal calendar update: previous step was " << prevStep << ", new step " << step << "is in the past!") 1862 } 1863 1864 void CContext::initReadFiles(void) 1865 { 1866 vector<CFile*>::const_iterator it; 1867 1868 for (it=enabledReadModeFiles.begin(); it != enabledReadModeFiles.end(); it++) 1869 { 1870 (*it)->initRead(); 1871 } 1221 1872 } 1222 1873 1223 1874 //! Server side: Create header of netcdf file 1224 void CContext::createFileHeader(void 1875 void CContext::createFileHeader(void) 1225 1876 { 1226 1877 vector<CFile*>::const_iterator it; 1227 1878 1228 1879 for (it=enabledFiles.begin(); it != enabledFiles.end(); it++) 1880 // for (it=enabledWriteModeFiles.begin(); it != enabledWriteModeFiles.end(); it++) 1229 1881 { 1230 (*it)->init File();1882 (*it)->initWrite(); 1231 1883 } 1232 1884 } … … 1269 1921 return (context); 1270 1922 } 1271 1272 1923 1273 1924 … … 1295 1946 registryOut->hierarchicalGatherRegistry() ; 1296 1947 1297 CEventClient event(CContext::GetType(), CContext::EVENT_ID_SEND_REGISTRY); 1298 if (client->isServerLeader()) 1948 // Use correct context client to send message 1949 int nbSrvPools = (this->hasServer) ? (this->hasClient ? this->clientPrimServer.size() : 0) : 1; 1950 for (int i = 0; i < nbSrvPools; ++i) 1299 1951 { 1300 CMessage msg ; 1301 msg<<this->getIdServer(); 1302 if (client->clientRank==0) msg<<*registryOut ; 1303 const std::list<int>& ranks = client->getRanksServerLeader(); 1304 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1305 event.push(*itRank,1,msg); 1306 client->sendEvent(event); 1307 } 1308 else client->sendEvent(event); 1952 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 1953 CEventClient event(CContext::GetType(), CContext::EVENT_ID_SEND_REGISTRY); 1954 if (contextClientTmp->isServerLeader()) 1955 { 1956 CMessage msg ; 1957 if (hasServer) 1958 msg<<this->getIdServer(i); 1959 else 1960 msg<<this->getIdServer(); 1961 if (contextClientTmp->clientRank==0) msg<<*registryOut ; 1962 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 1963 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1964 event.push(*itRank,1,msg); 1965 contextClientTmp->sendEvent(event); 1966 } 1967 else contextClientTmp->sendEvent(event); 1968 } 1309 1969 } 1310 1970 1971 /*! 1972 * \fn bool CContext::isFinalized(void) 1973 * Context is finalized if it received context post finalize event. 1974 */ 1975 bool CContext::isFinalized(void) 1976 { 1977 return finalized; 1978 } 1979 1311 1980 } // namespace xios -
XIOS/dev/branch_openmp/src/node/context.hpp
r1334 r1460 14 14 #include "garbage_collector.hpp" 15 15 #include "registry.hpp" 16 //#include "mpi.hpp" 16 17 17 18 … … 51 52 EVENT_ID_CLOSE_DEFINITION,EVENT_ID_UPDATE_CALENDAR, 52 53 EVENT_ID_CREATE_FILE_HEADER,EVENT_ID_CONTEXT_FINALIZE, 53 EVENT_ID_POST_PROCESS, EVENT_ID_SEND_REGISTRY 54 EVENT_ID_POST_PROCESS, EVENT_ID_SEND_REGISTRY, 55 EVENT_ID_POST_PROCESS_GLOBAL_ATTRIBUTES, 56 EVENT_ID_PROCESS_GRID_ENABLED_FIELDS 54 57 }; 55 58 … … 88 91 public : 89 92 // Initialize server or client 93 void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 90 94 void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 91 void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0);92 95 bool isInitialized(void); 93 96 94 97 // Put sever or client into loop state 95 bool checkBuffersAndListen( void);98 bool checkBuffersAndListen(bool enableEventsProcessing=true); 96 99 97 100 // Finalize a context 98 101 void finalize(void); 102 bool isFinalized(void); 103 99 104 void closeDefinition(void); 100 105 101 106 // Some functions to process context 102 void findAllEnabledFields(void); 103 void findAllEnabledFieldsInReadModeFiles(void); 107 void findAllEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles); 108 // void findAllEnabledFields(void); 109 // void findAllEnabledFieldsInReadModeFiles(void); 104 110 void readAttributesOfEnabledFieldsInReadModeFiles(); 105 111 void solveAllInheritance(bool apply=true); 106 112 void findEnabledFiles(void); 113 void findEnabledWriteModeFiles(void); 107 114 void findEnabledReadModeFiles(void); 108 115 void closeAllFile(void); 109 116 void updateCalendar(int step); 110 void createFileHeader(void ); 117 void createFileHeader(void); 118 void initReadFiles(void); 111 119 void checkAxisDomainsGridsEligibilityForCompressedOutput(); 112 120 void prepareTimeseries(void); 113 void solveOnlyRefOfEnabledFields(bool sendToServer); 114 void solveAllRefOfEnabledFields(bool sendToServer); 121 void solveOnlyRefOfEnabledFields(bool sendToServer); 115 122 void buildFilterGraphOfEnabledFields(); 123 void postProcessFilterGraph(); 116 124 void startPrefetchingOfEnabledReadModeFiles(); 125 void doPreTimestepOperationsForEnabledReadModeFiles(); 117 126 void doPostTimestepOperationsForEnabledReadModeFiles(); 118 127 void findFieldsWithReadAccess(void); … … 120 129 void buildFilterGraphOfFieldsWithReadAccess(); 121 130 void postProcessing(); 122 123 std::map<int, StdSize> getAttributesBufferSize(std::map<int, StdSize>& maxEventSize); 124 std::map<int, StdSize> getDataBufferSize(std::map<int, StdSize>& maxEventSize); 125 void setClientServerBuffer(); 131 void postProcessingGlobalAttributes(); 132 133 void solveAllRefOfEnabledFieldsAndTransform(bool sendToServer); 134 void checkGridEnabledFields(); 135 void checkGridEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles); 136 void sendGridEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles); 137 void sendGridComponentEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles) ; 138 139 std::map<int, StdSize> getAttributesBufferSize(std::map<int, StdSize>& maxEventSize, CContextClient* contextClient, bool bufferForWriting = false); 140 std::map<int, StdSize> getDataBufferSize(std::map<int, StdSize>& maxEventSize, CContextClient* contextClient, bool bufferForWriting = false); 141 void setClientServerBuffer(CContextClient* contextClient, bool bufferForWriting = false); 142 143 // Distribute files (in write mode) among secondary-server pools according to the estimated data flux 144 void distributeFiles(void); 145 void distributeFileOverBandwith() ; 146 void distributeFileOverMemoryBandwith() ; 147 126 148 127 149 // Send context close definition … … 130 152 void sendUpdateCalendar(int step); 131 153 void sendCreateFileHeader(void); 132 void sendEnabledFiles( );133 void sendEnabledFields ();134 void sendRefDomainsAxis ();135 void sendRefGrid( );154 void sendEnabledFiles(const std::vector<CFile*>& activeFiles); 155 void sendEnabledFieldsInFiles(const std::vector<CFile*>& activeFiles); 156 void sendRefDomainsAxisScalars(const std::vector<CFile*>& activeFiles); 157 void sendRefGrid(const std::vector<CFile*>& activeFiles); 136 158 void sendPostProcessing(); 137 void sendRegistry(void) ; //!< after be gathered to the root process of the context, merged registry is sent to the root process of the servers 159 void sendPostProcessingGlobalAttributes(); 160 void sendProcessingGridOfEnabledFields(); 161 //!< after be gathered to the root process of the context, merged registry is sent to the root process of the servers 162 void sendRegistry(void) ; 163 138 164 const StdString& getIdServer(); 165 const StdString& getIdServer(const int srvPoolNb); 139 166 140 167 // Client side: Receive and process messages … … 148 175 static void recvPostProcessing(CEventServer& event); 149 176 void recvPostProcessing(CBufferIn& buffer); 177 static void recvProcessingGridOfEnabledFields(CEventServer& event); 178 static void recvPostProcessingGlobalAttributes(CEventServer& event); 179 void recvPostProcessingGlobalAttributes(CBufferIn& buffer); 150 180 static void recvRegistry(CEventServer& event) ; 151 void recvRegistry(CBufferIn& buffer) ; //!< registry is received by the root process of the servers 181 void recvRegistry(CBufferIn& buffer) ; //!< registry is received by the servers 182 183 void freeComms(void); //!< Free internally allcoated communicators 184 void releaseClientBuffers(void); //! Deallocate buffers allocated by clientContexts 152 185 153 186 // dispatch event … … 192 225 virtual bool hasChild(void) const; 193 226 227 194 228 public : 195 229 // Calendar of context … … 200 234 // List of all enabled files in read mode (files on which fields are read) 201 235 std::vector<CFile*> enabledReadModeFiles; 236 // List of all enabled files in write mode 237 std::vector<CFile*> enabledWriteModeFiles; 202 238 203 239 // List of all enabled fields whose instant data is accessible from the public API … … 215 251 bool hasServer; 216 252 217 //Concrete context server218 CContext Server* server;219 220 // Concrete contex client221 CContextClient* client; 222 CRegistry* registryIn ; //!< input registry which is read from file223 CRegistry* registryOut ; //!< output registry which will be wrote onfile at the finalize253 CContextServer* server; //!< Concrete context server 254 CContextClient* client; //!< Concrete contex client 255 std::vector<CContextServer*> serverPrimServer; 256 std::vector<CContextClient*> clientPrimServer; 257 258 CRegistry* registryIn ; //!< input registry which is read from file 259 CRegistry* registryOut ; //!< output registry which will be written into file at the finalize 224 260 225 261 private: 226 262 bool isPostProcessed; 263 bool allProcessed; 227 264 bool finalized; 265 int countChildCtx_; //!< Counter of child contexts (for now it is the number of secondary server pools) 228 266 StdString idServer_; 229 267 CGarbageCollector garbageCollector; -
XIOS/dev/branch_openmp/src/node/domain.cpp
r1334 r1460 18 18 #include "server_distribution_description.hpp" 19 19 #include "client_server_mapping_distributed.hpp" 20 #include "zoom_domain.hpp" 21 #include "interpolate_domain.hpp" 22 #include "generate_rectilinear_domain.hpp" 20 21 using namespace ep_lib; 23 22 24 23 #include <algorithm> … … 30 29 CDomain::CDomain(void) 31 30 : CObjectTemplate<CDomain>(), CDomainAttributes() 32 , isChecked(false), relFiles(), isClientChecked(false), nb ConnectedClients_(), indSrv_(), connectedServerRank_()33 , hasBounds(false), hasArea(false), is Distributed_(false), isCompressible_(false), isUnstructed_(false)31 , isChecked(false), relFiles(), isClientChecked(false), nbSenders(), indSrv_(), connectedServerRank_() 32 , hasBounds(false), hasArea(false), isCompressible_(false), isUnstructed_(false) 34 33 , isClientAfterTransformationChecked(false), hasLonLat(false) 35 , lonvalue_client(), latvalue_client(), bounds_lon_client(), bounds_lat_client() 36 , isRedistributed_(false), hasPole(false) 34 , isRedistributed_(false), hasPole(false), doZoomByIndex_(false) 35 , lonvalue(), latvalue(), bounds_lonvalue(), bounds_latvalue() 36 , globalLocalIndexMap_(), computedWrittenIndex_(false) 37 , clients() 37 38 { 38 39 } … … 40 41 CDomain::CDomain(const StdString & id) 41 42 : CObjectTemplate<CDomain>(id), CDomainAttributes() 42 , isChecked(false), relFiles(), isClientChecked(false), nb ConnectedClients_(), indSrv_(), connectedServerRank_()43 , hasBounds(false), hasArea(false), is Distributed_(false), isCompressible_(false), isUnstructed_(false)43 , isChecked(false), relFiles(), isClientChecked(false), nbSenders(), indSrv_(), connectedServerRank_() 44 , hasBounds(false), hasArea(false), isCompressible_(false), isUnstructed_(false) 44 45 , isClientAfterTransformationChecked(false), hasLonLat(false) 45 , lonvalue_client(), latvalue_client(), bounds_lon_client(), bounds_lat_client() 46 , isRedistributed_(false), hasPole(false) 47 { 48 } 46 , isRedistributed_(false), hasPole(false), doZoomByIndex_(false) 47 , lonvalue(), latvalue(), bounds_lonvalue(), bounds_latvalue() 48 , globalLocalIndexMap_(), computedWrittenIndex_(false) 49 , clients() 50 { 51 } 49 52 50 53 CDomain::~CDomain(void) … … 75 78 m["compute_connectivity_domain"] = TRANS_COMPUTE_CONNECTIVITY_DOMAIN; 76 79 m["expand_domain"] = TRANS_EXPAND_DOMAIN; 80 m["reorder_domain"] = TRANS_REORDER_DOMAIN; 77 81 } 78 82 … … 85 89 (*CDomain::transformationMapList_ptr)["compute_connectivity_domain"] = TRANS_COMPUTE_CONNECTIVITY_DOMAIN; 86 90 (*CDomain::transformationMapList_ptr)["expand_domain"] = TRANS_EXPAND_DOMAIN; 91 (*CDomain::transformationMapList_ptr)["reorder_domain"] = TRANS_REORDER_DOMAIN; 87 92 } 88 93 … … 92 97 } 93 98 94 95 const std::vector<int>& CDomain::getIndexesToWrite(void) const96 {97 return indexesToWrite;98 }99 99 100 100 /*! … … 102 102 \return the number of indexes written by each server 103 103 */ 104 int CDomain::getNumberWrittenIndexes() const 105 { 106 return numberWrittenIndexes_; 104 int CDomain::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 105 { 106 int writtenSize; 107 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 108 return numberWrittenIndexes_[writtenSize]; 107 109 } 108 110 … … 111 113 \return the total number of indexes written by the servers 112 114 */ 113 int CDomain::getTotalNumberWrittenIndexes() const 114 { 115 return totalNumberWrittenIndexes_; 115 int CDomain::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 116 { 117 int writtenSize; 118 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 119 return totalNumberWrittenIndexes_[writtenSize]; 116 120 } 117 121 … … 120 124 \return the offset of indexes written by each server 121 125 */ 122 int CDomain::getOffsetWrittenIndexes() const 123 { 124 return offsetWrittenIndexes_; 126 int CDomain::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 127 { 128 int writtenSize; 129 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 130 return offsetWrittenIndexes_[writtenSize]; 131 } 132 133 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 134 { 135 int writtenSize; 136 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 137 return compressedIndexToWriteOnServer[writtenSize]; 125 138 } 126 139 … … 132 145 * \return A map associating the server rank with its minimum buffer size. 133 146 */ 134 std::map<int, StdSize> CDomain::getAttributesBufferSize() 135 { 136 CContextClient* client = CContext::getCurrent()->client; 137 138 std::map<int, StdSize> attributesSizes = getMinimumBufferSizeForAttributes(); 147 std::map<int, StdSize> CDomain::getAttributesBufferSize(CContextClient* client, bool bufferForWriting /*= false*/) 148 { 149 150 std::map<int, StdSize> attributesSizes = getMinimumBufferSizeForAttributes(client); 139 151 140 152 if (client->isServerLeader()) 141 153 { 142 // size estimation for send ServerAttribut154 // size estimation for sendDistributionAttribut 143 155 size_t size = 11 * sizeof(size_t); 144 156 … … 151 163 } 152 164 153 std::map<int, std::vector<size_t> >::const_iterator itIndexEnd = indSrv_.end();154 std::map<int, std::vector<int> >::const_iterator itWrittenIndexEnd = indWrittenSrv_.end();155 for (size_t k = 0; k < connectedServerRank_ .size(); ++k)156 { 157 int rank = connectedServerRank_[ k];158 std::map<int, std::vector<size_t> >::const_iterator it = indSrv_.find(rank);165 boost::unordered_map<int, vector<size_t> >::const_iterator itIndexEnd = indSrv_[client->serverSize].end(); 166 // std::map<int, std::vector<int> >::const_iterator itWrittenIndexEnd = indWrittenSrv_.end(); 167 for (size_t k = 0; k < connectedServerRank_[client->serverSize].size(); ++k) 168 { 169 int rank = connectedServerRank_[client->serverSize][k]; 170 boost::unordered_map<int, std::vector<size_t> >::const_iterator it = indSrv_[client->serverSize].find(rank); 159 171 size_t idxCount = (it != itIndexEnd) ? it->second.size() : 0; 160 172 161 173 // size estimation for sendIndex (and sendArea which is always smaller or equal) 162 174 size_t sizeIndexEvent = 2 * sizeof(size_t) + 2 * CArray<int,1>::size(idxCount); 163 if (isCompressible_)164 {165 std::map<int, std::vector<int> >::const_iterator itWritten = indWrittenSrv_.find(rank);166 size_t writtenIdxCount = (itWritten != itWrittenIndexEnd) ? itWritten->second.size() : 0;167 sizeIndexEvent += CArray<int,1>::size(writtenIdxCount);168 }175 // if (isCompressible_) 176 // { 177 // std::map<int, std::vector<int> >::const_iterator itWritten = indWrittenSrv_.find(rank); 178 // size_t writtenIdxCount = (itWritten != itWrittenIndexEnd) ? itWritten->second.size() : 0; 179 // sizeIndexEvent += CArray<int,1>::size(writtenIdxCount); 180 // } 169 181 170 182 // size estimation for sendLonLat … … 185 197 bool CDomain::isEmpty(void) const 186 198 { 187 return ((this->zoom_ ni_srv == 0) ||188 (this->zoom_nj_srv == 0)); 199 return ((this->zoom_i_index.isEmpty()) || (0 == this->zoom_i_index.numElements())); 200 189 201 } 190 202 … … 205 217 bool CDomain::isDistributed(void) const 206 218 { 207 return isDistributed_; 219 bool distributed = !((!ni.isEmpty() && (ni == ni_glo) && !nj.isEmpty() && (nj == nj_glo)) || 220 (!i_index.isEmpty() && i_index.numElements() == ni_glo*nj_glo)); 221 distributed |= (1 == CContext::getCurrent()->client->clientSize); 222 223 return distributed; 208 224 } 209 225 … … 254 270 255 271 /*! 256 Redistribute RECTILINEAR domain with a number of local domains.272 Redistribute RECTILINEAR or CURVILINEAR domain with a number of local domains. 257 273 All attributes ni,nj,ibegin,jbegin (if defined) will be rewritten 258 274 The optional attributes lonvalue, latvalue will be added. Because this function only serves (for now) … … 266 282 this->isRedistributed_ = true; 267 283 CContext* context = CContext::getCurrent(); 268 CContextClient* client = context->client; 284 // For now the assumption is that secondary server pools consist of the same number of procs. 285 // CHANGE the line below if the assumption changes. 286 CContextClient* client = (0 != context->clientPrimServer.size()) ? context->clientPrimServer[0] : context->client; 269 287 int rankClient = client->clientRank; 270 288 int rankOnDomain = rankClient%nbLocalDomain; … … 532 550 for (int jdx = 0; jdx < nj; ++jdx) 533 551 for (int idx = 0; idx < ni; ++idx) 534 lonvalue_2d(idx,jdx) = lonvalue_curvilinear_read_from_file(idx +ibegin, jdx+jbegin);552 lonvalue_2d(idx,jdx) = lonvalue_curvilinear_read_from_file(idx, jdx); 535 553 536 554 lonvalue_curvilinear_read_from_file.free(); … … 542 560 for (int jdx = 0; jdx < nj; ++jdx) 543 561 for (int idx = 0; idx < ni; ++idx) 544 latvalue_2d(idx,jdx) = latvalue_curvilinear_read_from_file(idx+ibegin, jdx+jbegin);562 latvalue_2d(idx,jdx) = latvalue_curvilinear_read_from_file(idx, jdx); 545 563 546 564 latvalue_curvilinear_read_from_file.free(); … … 553 571 for (int idx = 0; idx < ni; ++idx) 554 572 for (int ndx = 0; ndx < nvertex; ++ndx) 555 bounds_lon_2d(ndx,idx,jdx) = bounds_lonvalue_curvilinear_read_from_file(ndx,idx+ibegin, jdx+jbegin);573 bounds_lon_2d(ndx,idx,jdx) = bounds_lonvalue_curvilinear_read_from_file(ndx,idx, jdx); 556 574 557 575 bounds_lonvalue_curvilinear_read_from_file.free(); … … 564 582 for (int idx = 0; idx < ni; ++idx) 565 583 for (int ndx = 0; ndx < nvertex; ++ndx) 566 bounds_lat_2d(ndx,idx,jdx) = bounds_latvalue_curvilinear_read_from_file(ndx,idx +ibegin, jdx+jbegin);584 bounds_lat_2d(ndx,idx,jdx) = bounds_latvalue_curvilinear_read_from_file(ndx,idx, jdx); 567 585 568 586 bounds_latvalue_curvilinear_read_from_file.free(); … … 587 605 lonvalue_1d.resize(ni); 588 606 for (int idx = 0; idx < ni; ++idx) 589 lonvalue_1d(idx) = lonvalue_unstructured_read_from_file(i _index(idx));607 lonvalue_1d(idx) = lonvalue_unstructured_read_from_file(idx); 590 608 591 609 // We dont need these values anymore, so just delete them … … 597 615 latvalue_1d.resize(ni); 598 616 for (int idx = 0; idx < ni; ++idx) 599 latvalue_1d(idx) = latvalue_unstructured_read_from_file(i _index(idx));617 latvalue_1d(idx) = latvalue_unstructured_read_from_file(idx); 600 618 601 619 // We dont need these values anymore, so just delete them … … 609 627 for (int idx = 0; idx < ni; ++idx) 610 628 for (int jdx = 0; jdx < nbVertex; ++jdx) 611 bounds_lon_1d(jdx,idx) = bounds_lonvalue_unstructured_read_from_file(jdx, i _index(idx));629 bounds_lon_1d(jdx,idx) = bounds_lonvalue_unstructured_read_from_file(jdx, idx); 612 630 613 631 // We dont need these values anymore, so just delete them 614 lonvalue_unstructured_read_from_file.free();632 bounds_lonvalue_unstructured_read_from_file.free(); 615 633 } 616 634 … … 621 639 for (int idx = 0; idx < ni; ++idx) 622 640 for (int jdx = 0; jdx < nbVertex; ++jdx) 623 bounds_lat_1d(jdx,idx) = bounds_latvalue_unstructured_read_from_file(jdx, i _index(idx));641 bounds_lat_1d(jdx,idx) = bounds_latvalue_unstructured_read_from_file(jdx, idx); 624 642 625 643 // We dont need these values anymore, so just delete them 626 lonvalue_unstructured_read_from_file.free();644 bounds_latvalue_unstructured_read_from_file.free(); 627 645 } 628 646 } … … 633 651 void CDomain::AllgatherRectilinearLonLat(CArray<double,1>& lon, CArray<double,1>& lat, CArray<double,1>& lon_g, CArray<double,1>& lat_g) 634 652 { 635 CContext* context = CContext::getCurrent(); 636 CContextClient* client = context->client; 637 lon_g.resize(ni_glo) ; 638 lat_g.resize(nj_glo) ; 639 640 641 int* ibegin_g = new int[client->clientSize] ; 642 int* jbegin_g = new int[client->clientSize] ; 643 int* ni_g = new int[client->clientSize] ; 644 int* nj_g = new int[client->clientSize] ; 645 int v ; 646 v=ibegin ; 647 MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ; 648 v=jbegin ; 649 MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ; 650 v=ni ; 651 MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ; 652 v=nj ; 653 MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ; 654 655 MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ; 656 MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ; 653 CContext* context = CContext::getCurrent(); 654 // For now the assumption is that secondary server pools consist of the same number of procs. 655 // CHANGE the line below if the assumption changes. 656 CContextClient* client = (0 != context->clientPrimServer.size()) ? context->clientPrimServer[0] : context->client; 657 lon_g.resize(ni_glo) ; 658 lat_g.resize(nj_glo) ; 659 660 661 int* ibegin_g = new int[client->clientSize] ; 662 int* jbegin_g = new int[client->clientSize] ; 663 int* ni_g = new int[client->clientSize] ; 664 int* nj_g = new int[client->clientSize] ; 665 int v ; 666 v=ibegin ; 667 ep_lib::MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ; 668 v=jbegin ; 669 ep_lib::MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ; 670 v=ni ; 671 ep_lib::MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ; 672 v=nj ; 673 ep_lib::MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ; 674 675 ep_lib::MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ; 676 ep_lib::MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ; 657 677 658 678 delete[] ibegin_g ; … … 764 784 } 765 785 786 /* 787 General check of the domain to verify its mandatory attributes 788 */ 766 789 void CDomain::checkDomain(void) 767 790 { … … 776 799 if (type == type_attr::gaussian) 777 800 { 778 779 780 781 782 801 hasPole=true ; 802 type.setValue(type_attr::unstructured) ; 803 } 804 else if (type == type_attr::rectilinear) hasPole=true ; 805 783 806 if (type == type_attr::unstructured) 784 807 { … … 857 880 858 881 checkZoom(); 859 860 isDistributed_ = !((!ni.isEmpty() && (ni == ni_glo) && !nj.isEmpty() && (nj == nj_glo)) ||861 (!i_index.isEmpty() && i_index.numElements() == ni_glo*nj_glo));862 863 // A stupid condition to make sure that if there is only one client, domain864 // should be considered to be distributed. This should be a temporary solution865 isDistributed_ |= (1 == CContext::getCurrent()->client->clientSize);866 882 } 867 883 … … 878 894 if (global_zoom_nj.isEmpty()) 879 895 global_zoom_nj.setValue(nj_glo); 880 } 881 896 if (zoom_i_index.isEmpty()) zoom_i_index.setValue(i_index.getValue()); 897 if (zoom_j_index.isEmpty()) zoom_j_index.setValue(j_index.getValue()); 898 if (zoom_ibegin.isEmpty()) zoom_ibegin.setValue(ibegin); 899 if (zoom_ni.isEmpty()) zoom_ni.setValue(ni); 900 if (zoom_jbegin.isEmpty()) zoom_jbegin.setValue(jbegin); 901 if (zoom_nj.isEmpty()) zoom_nj.setValue(nj); 902 } 903 904 size_t CDomain::getGlobalWrittenSize(void) 905 { 906 return global_zoom_ni*global_zoom_nj ; 907 } 882 908 //---------------------------------------------------------------- 883 909 … … 1026 1052 if (!mask_2d.isEmpty()) 1027 1053 { 1028 mask_1d.resize(mask_2d.extent(0) * mask_2d.extent(1));1054 domainMask.resize(mask_2d.extent(0) * mask_2d.extent(1)); 1029 1055 for (int j = 0; j < nj; ++j) 1030 for (int i = 0; i < ni; ++i) mask_1d(i+j*ni) = mask_2d(i,j);1031 mask_2d.reset();1056 for (int i = 0; i < ni; ++i) domainMask(i+j*ni) = mask_2d(i,j); 1057 // mask_2d.reset(); 1032 1058 } 1033 1059 else if (mask_1d.isEmpty()) 1034 1060 { 1035 mask_1d.resize(i_index.numElements()); 1036 for (int i = 0; i < i_index.numElements(); ++i) mask_1d(i) = true; 1037 } 1061 domainMask.resize(i_index.numElements()); 1062 for (int i = 0; i < i_index.numElements(); ++i) domainMask(i) = true; 1063 } 1064 else 1065 { 1066 domainMask.resize(mask_1d.numElements()); 1067 domainMask=mask_1d ; 1068 } 1038 1069 } 1039 1070 … … 1154 1185 void CDomain::computeLocalMask(void) 1155 1186 { 1156 localMask.resize( ni*nj) ;1187 localMask.resize(i_index.numElements()) ; 1157 1188 localMask=false ; 1158 size_t zoom_ibegin=global_zoom_ibegin ;1159 size_t zoom_iend=global_zoom_ibegin+global_zoom_ni-1 ;1160 size_t zoom_jbegin=global_zoom_jbegin ;1161 size_t zoom_jend=global_zoom_jbegin+global_zoom_nj-1 ;1162 1163 1189 1164 1190 size_t dn=data_i_index.numElements() ; … … 1172 1198 i=data_i_index(k)+data_ibegin ; 1173 1199 j=data_j_index(k)+data_jbegin ; 1200 if (i>=0 && i<ni && j>=0 && j<nj) 1201 { 1202 ind=j*ni+i ; 1203 localMask(ind)=domainMask(ind) ; 1204 } 1174 1205 } 1175 1206 else 1176 1207 { 1177 i=(data_i_index(k)+data_ibegin)%ni ; 1178 j=(data_i_index(k)+data_ibegin)/ni ; 1208 i=data_i_index(k)+data_ibegin ; 1209 if (i>=0 && i<i_index.numElements()) 1210 { 1211 ind=i ; 1212 localMask(ind)=domainMask(ind) ; 1213 } 1179 1214 } 1180 1181 if (i>=0 && i<ni && j>=0 && j<nj) 1182 if (i+ibegin>=zoom_ibegin && i+ibegin<=zoom_iend && j+jbegin>=zoom_jbegin && j+jbegin<=zoom_jend) 1183 { 1184 ind=i+ni*j ; 1185 localMask(ind)=mask_1d(ind) ; 1186 } 1187 } 1188 } 1189 1190 1191 1192 1193 1194 1215 } 1216 } 1195 1217 1196 1218 void CDomain::checkEligibilityForCompressedOutput(void) … … 1202 1224 //---------------------------------------------------------------- 1203 1225 1226 /* 1227 Fill in longitude and latitude value from clients (or models) into internal values lonvalue, latvalue which 1228 will be used by XIOS. 1229 */ 1204 1230 void CDomain::completeLonLatClient(void) 1205 1231 { 1206 if (!lonvalue_2d.isEmpty()) 1207 { 1208 lonvalue_client.resize(ni * nj); 1209 latvalue_client.resize(ni * nj); 1232 bool lonlatValueExisted = (0 != lonvalue.numElements()) || (0 != latvalue.numElements()); 1233 checkBounds() ; 1234 if (!lonvalue_2d.isEmpty() && !lonlatValueExisted) 1235 { 1236 lonvalue.resize(ni * nj); 1237 latvalue.resize(ni * nj); 1210 1238 if (hasBounds) 1211 1239 { 1212 bounds_lon _client.resize(nvertex, ni * nj);1213 bounds_lat _client.resize(nvertex, ni * nj);1240 bounds_lonvalue.resize(nvertex, ni * nj); 1241 bounds_latvalue.resize(nvertex, ni * nj); 1214 1242 } 1215 1243 … … 1220 1248 int k = j * ni + i; 1221 1249 1222 lonvalue _client(k) = lonvalue_2d(i,j);1223 latvalue _client(k) = latvalue_2d(i,j);1250 lonvalue(k) = lonvalue_2d(i,j); 1251 latvalue(k) = latvalue_2d(i,j); 1224 1252 1225 1253 if (hasBounds) … … 1227 1255 for (int n = 0; n < nvertex; ++n) 1228 1256 { 1229 bounds_lon _client(n,k) = bounds_lon_2d(n,i,j);1230 bounds_lat _client(n,k) = bounds_lat_2d(n,i,j);1257 bounds_lonvalue(n,k) = bounds_lon_2d(n,i,j); 1258 bounds_latvalue(n,k) = bounds_lat_2d(n,i,j); 1231 1259 } 1232 1260 } … … 1234 1262 } 1235 1263 } 1236 else if (!lonvalue_1d.isEmpty() )1264 else if (!lonvalue_1d.isEmpty() && !lonlatValueExisted) 1237 1265 { 1238 1266 if (type_attr::rectilinear == type) … … 1240 1268 if (ni == lonvalue_1d.numElements() && nj == latvalue_1d.numElements()) 1241 1269 { 1242 lonvalue _client.resize(ni * nj);1243 latvalue _client.resize(ni * nj);1270 lonvalue.resize(ni * nj); 1271 latvalue.resize(ni * nj); 1244 1272 if (hasBounds) 1245 1273 { 1246 bounds_lon _client.resize(nvertex, ni * nj);1247 bounds_lat _client.resize(nvertex, ni * nj);1274 bounds_lonvalue.resize(nvertex, ni * nj); 1275 bounds_latvalue.resize(nvertex, ni * nj); 1248 1276 } 1249 1277 … … 1254 1282 int k = j * ni + i; 1255 1283 1256 lonvalue _client(k) = lonvalue_1d(i);1257 latvalue _client(k) = latvalue_1d(j);1284 lonvalue(k) = lonvalue_1d(i); 1285 latvalue(k) = latvalue_1d(j); 1258 1286 1259 1287 if (hasBounds) … … 1261 1289 for (int n = 0; n < nvertex; ++n) 1262 1290 { 1263 bounds_lon _client(n,k) = bounds_lon_1d(n,i);1264 bounds_lat _client(n,k) = bounds_lat_1d(n,j);1291 bounds_lonvalue(n,k) = bounds_lon_1d(n,i); 1292 bounds_latvalue(n,k) = bounds_lat_1d(n,j); 1265 1293 } 1266 1294 } … … 1268 1296 } 1269 1297 } 1270 else if (i_index.numElements() == lonvalue_1d.numElements() && j_index.numElements() == latvalue_1d.numElements() )1298 else if (i_index.numElements() == lonvalue_1d.numElements() && j_index.numElements() == latvalue_1d.numElements() && !lonlatValueExisted) 1271 1299 { 1272 lonvalue _client.reference(lonvalue_1d);1273 latvalue _client.reference(latvalue_1d);1300 lonvalue.reference(lonvalue_1d); 1301 latvalue.reference(latvalue_1d); 1274 1302 if (hasBounds) 1275 1303 { 1276 bounds_lon _client.reference(bounds_lon_1d);1277 bounds_lat _client.reference(bounds_lat_1d);1304 bounds_lonvalue.reference(bounds_lon_1d); 1305 bounds_latvalue.reference(bounds_lat_1d); 1278 1306 } 1279 1307 } … … 1287 1315 << i_index.numElements() << " and " << j_index.numElements() << "."); 1288 1316 } 1289 else if (type == type_attr::curvilinear || type == type_attr::unstructured )1317 else if (type == type_attr::curvilinear || type == type_attr::unstructured && !lonlatValueExisted) 1290 1318 { 1291 lonvalue _client.reference(lonvalue_1d);1292 latvalue _client.reference(latvalue_1d);1319 lonvalue.reference(lonvalue_1d); 1320 latvalue.reference(latvalue_1d); 1293 1321 if (hasBounds) 1294 1322 { 1295 bounds_lon _client.reference(bounds_lon_1d);1296 bounds_lat _client.reference(bounds_lat_1d);1323 bounds_lonvalue.reference(bounds_lon_1d); 1324 bounds_latvalue.reference(bounds_lat_1d); 1297 1325 } 1298 1326 } … … 1300 1328 } 1301 1329 1330 /* 1331 Convert internal longitude latitude value used by XIOS to "lonvalue_*" which can be retrieved with Fortran interface 1332 */ 1333 void CDomain::convertLonLatValue(void) 1334 { 1335 bool lonlatValueExisted = (0 != lonvalue.numElements()) || (0 != latvalue.numElements()); 1336 if (!lonvalue_2d.isEmpty() && lonlatValueExisted) 1337 { 1338 lonvalue_2d.resize(ni,nj); 1339 latvalue_2d.resize(ni,nj); 1340 if (hasBounds) 1341 { 1342 bounds_lon_2d.resize(nvertex, ni, nj); 1343 bounds_lat_2d.resize(nvertex, ni, nj); 1344 } 1345 1346 for (int j = 0; j < nj; ++j) 1347 { 1348 for (int i = 0; i < ni; ++i) 1349 { 1350 int k = j * ni + i; 1351 1352 lonvalue_2d(i,j) = lonvalue(k); 1353 latvalue_2d(i,j) = latvalue(k); 1354 1355 if (hasBounds) 1356 { 1357 for (int n = 0; n < nvertex; ++n) 1358 { 1359 bounds_lon_2d(n,i,j) = bounds_lonvalue(n,k); 1360 bounds_lat_2d(n,i,j) = bounds_latvalue(n,k); 1361 } 1362 } 1363 } 1364 } 1365 } 1366 else if (!lonvalue_1d.isEmpty() && lonlatValueExisted) 1367 { 1368 if (type_attr::rectilinear == type) 1369 { 1370 if (ni == lonvalue_1d.numElements() && nj == latvalue_1d.numElements()) 1371 { 1372 lonvalue.resize(ni * nj); 1373 latvalue.resize(ni * nj); 1374 if (hasBounds) 1375 { 1376 bounds_lonvalue.resize(nvertex, ni * nj); 1377 bounds_latvalue.resize(nvertex, ni * nj); 1378 } 1379 1380 for (int j = 0; j < nj; ++j) 1381 { 1382 for (int i = 0; i < ni; ++i) 1383 { 1384 int k = j * ni + i; 1385 1386 lonvalue(k) = lonvalue_1d(i); 1387 latvalue(k) = latvalue_1d(j); 1388 1389 if (hasBounds) 1390 { 1391 for (int n = 0; n < nvertex; ++n) 1392 { 1393 bounds_lonvalue(n,k) = bounds_lon_1d(n,i); 1394 bounds_latvalue(n,k) = bounds_lat_1d(n,j); 1395 } 1396 } 1397 } 1398 } 1399 } 1400 else if (i_index.numElements() == lonvalue_1d.numElements() && j_index.numElements() == latvalue_1d.numElements() && !lonlatValueExisted) 1401 { 1402 lonvalue.reference(lonvalue_1d); 1403 latvalue.reference(latvalue_1d); 1404 if (hasBounds) 1405 { 1406 bounds_lonvalue.reference(bounds_lon_1d); 1407 bounds_latvalue.reference(bounds_lat_1d); 1408 } 1409 } 1410 else 1411 ERROR("CDomain::completeLonClient(void)", 1412 << "[ id = " << this->getId() << " , context = '" << CObjectFactory::GetCurrentContextId() << " ] " 1413 << "'lonvalue_1d' and 'latvalue_1d' does not have the same size as the local domain." << std::endl 1414 << "'lonvalue_1d' size is " << lonvalue_1d.numElements() 1415 << " and 'latvalue_1d' size is " << latvalue_1d.numElements() << std::endl 1416 << " They should be correspondingly " << ni.getValue() << " and " << nj.getValue() << " or " << std::endl 1417 << i_index.numElements() << " and " << j_index.numElements() << "."); 1418 } 1419 else if (type == type_attr::curvilinear || type == type_attr::unstructured && !lonlatValueExisted) 1420 { 1421 lonvalue.reference(lonvalue_1d); 1422 latvalue.reference(latvalue_1d); 1423 if (hasBounds) 1424 { 1425 bounds_lonvalue.reference(bounds_lon_1d); 1426 bounds_latvalue.reference(bounds_lat_1d); 1427 } 1428 } 1429 } 1430 } 1431 1432 1302 1433 void CDomain::checkBounds(void) 1303 1434 { 1304 if (!nvertex.isEmpty() && nvertex > 0) 1435 bool hasBoundValues = (0 != bounds_lonvalue.numElements()) || (0 != bounds_latvalue.numElements()); 1436 if (!nvertex.isEmpty() && nvertex > 0 && !hasBoundValues) 1305 1437 { 1306 1438 if (!bounds_lon_1d.isEmpty() && !bounds_lon_2d.isEmpty()) … … 1381 1513 hasBounds = true; 1382 1514 } 1515 else if (hasBoundValues) 1516 { 1517 hasBounds = true; 1518 } 1383 1519 else 1384 1520 { 1385 1521 hasBounds = false; 1386 nvertex = 0;1387 1522 } 1388 1523 } … … 1390 1525 void CDomain::checkArea(void) 1391 1526 { 1527 bool hasAreaValue = (!areavalue.isEmpty() && 0 != areavalue.numElements()); 1392 1528 hasArea = !area.isEmpty(); 1393 if (hasArea )1529 if (hasArea && !hasAreaValue) 1394 1530 { 1395 1531 if (area.extent(0) != ni || area.extent(1) != nj) … … 1401 1537 << "Area size is " << area.extent(0) << " x " << area.extent(1) << "."); 1402 1538 } 1539 if (areavalue.isEmpty()) 1540 { 1541 areavalue.resize(ni*nj); 1542 for (int j = 0; j < nj; ++j) 1543 { 1544 for (int i = 0; i < ni; ++i) 1545 { 1546 int k = j * ni + i; 1547 areavalue(k) = area(i,j); 1548 } 1549 } 1550 } 1403 1551 } 1404 1552 } … … 1406 1554 void CDomain::checkLonLat() 1407 1555 { 1408 hasLonLat = (!latvalue_1d.isEmpty() && !lonvalue_1d.isEmpty()) || 1409 (!latvalue_2d.isEmpty() && !lonvalue_2d.isEmpty()); 1410 if (hasLonLat) 1556 if (!hasLonLat) hasLonLat = (!latvalue_1d.isEmpty() && !lonvalue_1d.isEmpty()) || 1557 (!latvalue_2d.isEmpty() && !lonvalue_2d.isEmpty()); 1558 bool hasLonLatValue = (0 != lonvalue.numElements()) || (0 != latvalue.numElements()); 1559 if (hasLonLat && !hasLonLatValue) 1411 1560 { 1412 1561 if (!lonvalue_1d.isEmpty() && !lonvalue_2d.isEmpty()) … … 1471 1620 if (context->hasClient) 1472 1621 { 1473 this->checkMask(); 1474 if (hasLonLat || hasArea || isCompressible_) this->computeConnectedServer(); 1475 if (hasLonLat) this->completeLonLatClient(); 1622 this->computeConnectedClients(); 1623 if (hasLonLat) 1624 if (!context->hasServer) 1625 this->completeLonLatClient(); 1476 1626 } 1477 1627 … … 1487 1637 CContext* context=CContext::getCurrent(); 1488 1638 1489 this->checkDomain(); 1490 this->checkBounds(); 1491 this->checkArea(); 1492 this->checkLonLat(); 1493 1494 if (context->hasClient) 1495 { // CÃŽté client uniquement 1639 if (context->hasClient && !context->hasServer) 1640 { 1641 this->checkDomain(); 1642 this->checkBounds(); 1643 this->checkArea(); 1644 this->checkLonLat(); 1645 } 1646 1647 if (context->hasClient && !context->hasServer) 1648 { // Ct client uniquement 1496 1649 this->checkMask(); 1497 1650 this->checkDomainData(); … … 1500 1653 } 1501 1654 else 1502 { // C ÃŽtéserveur uniquement1655 { // Ct serveur uniquement 1503 1656 } 1504 1657 … … 1516 1669 if (context->hasClient) 1517 1670 { 1518 sendServerAttribut(); 1519 if (hasLonLat || hasArea || isCompressible_) sendLonLatArea(); 1671 sendAttributes(); 1520 1672 } 1521 1673 this->isChecked = true; … … 1533 1685 1534 1686 if (context->hasClient) 1535 { // C ÃŽtéclient uniquement1687 { // Ct client uniquement 1536 1688 this->checkMask(); 1537 1689 this->checkDomainData(); … … 1541 1693 } 1542 1694 else 1543 { // C ÃŽtéserveur uniquement1695 { // Ct serveur uniquement 1544 1696 } 1545 1697 1546 1698 if (context->hasClient) 1547 1699 { 1548 this->computeConnected Server();1700 this->computeConnectedClients(); 1549 1701 this->completeLonLatClient(); 1550 this->sendServerAttribut();1551 this->sendLonLatArea();1552 1702 } 1553 1703 … … 1555 1705 } 1556 1706 1557 void CDomain::sendServerAttribut(void) 1558 { 1559 CContext* context = CContext::getCurrent(); 1560 CContextClient* client = context->client; 1561 int nbServer = client->serverSize; 1562 1563 CServerDistributionDescription serverDescription(getNbGlob(), nbServer); 1564 if (isUnstructed_) serverDescription.computeServerDistribution(false, 0); 1565 else serverDescription.computeServerDistribution(false, 1); 1566 1567 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 1568 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 1569 1570 CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT); 1571 if (client->isServerLeader()) 1707 /*! 1708 Compute the connection of a client to other clients to determine which clients to send attributes to. 1709 The sending clients are supposed to already know the distribution of receiving clients (In simple cases, it's band) 1710 The connection among clients is calculated by using global index. 1711 A client connects to other clients which holds the same global index as it. 1712 */ 1713 void CDomain::computeConnectedClients() 1714 { 1715 CContext* context=CContext::getCurrent() ; 1716 1717 // This line should be changed soon. 1718 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 1719 1720 nbSenders.clear(); 1721 connectedServerRank_.clear(); 1722 1723 for (int p = 0; p < nbSrvPools; ++p) 1572 1724 { 1573 std::list<CMessage> msgs; 1574 1575 const std::list<int>& ranks = client->getRanksServerLeader(); 1576 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1577 { 1578 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 1579 const int ibegin_srv = serverIndexBegin[*itRank][0]; 1580 const int jbegin_srv = serverIndexBegin[*itRank][1]; 1581 const int ni_srv = serverDimensionSizes[*itRank][0]; 1582 const int nj_srv = serverDimensionSizes[*itRank][1]; 1583 const int iend_srv = ibegin_srv + ni_srv - 1; 1584 const int jend_srv = jbegin_srv + nj_srv - 1; 1585 1586 msgs.push_back(CMessage()); 1587 CMessage& msg = msgs.back(); 1588 msg << this->getId() ; 1589 msg << ni_srv << ibegin_srv << iend_srv << nj_srv << jbegin_srv << jend_srv; 1590 msg << global_zoom_ni.getValue() << global_zoom_ibegin.getValue() << global_zoom_nj.getValue() << global_zoom_jbegin.getValue(); 1591 msg << isCompressible_; 1592 1593 event.push(*itRank,1,msg); 1594 } 1595 client->sendEvent(event); 1596 } 1597 else client->sendEvent(event); 1598 } 1599 1600 std::vector<int> CDomain::getNbGlob() 1601 { 1602 std::vector<int> nbGlob(2); 1603 nbGlob[0] = ni_glo.getValue(); 1604 nbGlob[1] = nj_glo.getValue(); 1605 1606 return nbGlob; 1607 } 1608 1609 void CDomain::computeConnectedServer(void) 1610 { 1611 CContext* context=CContext::getCurrent() ; 1612 CContextClient* client=context->client ; 1613 int nbServer=client->serverSize; 1614 int rank = client->clientRank; 1615 bool doComputeGlobalIndexServer = true; 1616 1617 int i,j,i_ind,j_ind, nbIndex; 1618 int global_zoom_iend=global_zoom_ibegin+global_zoom_ni-1 ; 1619 int global_zoom_jend=global_zoom_jbegin+global_zoom_nj-1 ; 1620 1621 // Precompute number of index 1622 int globalIndexCountZoom = 0; 1623 nbIndex = i_index.numElements(); 1624 for (i = 0; i < nbIndex; ++i) 1725 CContextClient* client = (0 != context->clientPrimServer.size()) ? context->clientPrimServer[p] : context->client; 1726 int nbServer = client->serverSize; 1727 int nbClient = client->clientSize; 1728 int rank = client->clientRank; 1729 bool doComputeGlobalIndexServer = true; 1730 1731 if (connectedServerRank_.find(nbServer) == connectedServerRank_.end()) 1732 { 1733 1734 if (indSrv_.find(nbServer) == indSrv_.end()) 1735 { 1736 int i,j,i_ind,j_ind, nbIndex, nbIndexZoom; 1737 int global_zoom_iend=global_zoom_ibegin+global_zoom_ni-1; 1738 int global_zoom_jend=global_zoom_jbegin+global_zoom_nj-1; 1739 1740 // Precompute number of index 1741 int globalIndexCountZoom = 0; 1742 nbIndex = i_index.numElements(); 1743 1744 if (doZoomByIndex_) 1745 { 1746 globalIndexCountZoom = zoom_i_index.numElements(); 1747 } 1748 else 1749 { 1750 for (i = 0; i < nbIndex; ++i) 1751 { 1752 i_ind=i_index(i); 1753 j_ind=j_index(i); 1754 1755 if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1756 { 1757 ++globalIndexCountZoom; 1758 } 1759 } 1760 } 1761 1762 // Fill in index 1763 CArray<size_t,1> globalIndexDomainZoom(globalIndexCountZoom); 1764 CArray<size_t,1> localIndexDomainZoom(globalIndexCountZoom); 1765 CArray<size_t,1> globalIndexDomain(nbIndex); 1766 size_t globalIndex; 1767 int globalIndexCount = 0; 1768 1769 for (i = 0; i < nbIndex; ++i) 1770 { 1771 i_ind=i_index(i); 1772 j_ind=j_index(i); 1773 globalIndex = i_ind + j_ind * ni_glo; 1774 globalIndexDomain(i) = globalIndex; 1775 } 1776 1777 if (globalLocalIndexMap_.empty()) 1778 { 1779 for (i = 0; i < nbIndex; ++i) 1780 globalLocalIndexMap_[globalIndexDomain(i)] = i; 1781 } 1782 1783 globalIndexCountZoom = 0; 1784 if (doZoomByIndex_) 1785 { 1786 int nbIndexZoom = zoom_i_index.numElements(); 1787 1788 for (i = 0; i < nbIndexZoom; ++i) 1789 { 1790 i_ind=zoom_i_index(i); 1791 j_ind=zoom_j_index(i); 1792 globalIndex = i_ind + j_ind * ni_glo; 1793 globalIndexDomainZoom(globalIndexCountZoom) = globalIndex; 1794 ++globalIndexCountZoom; 1795 } 1796 } 1797 else 1798 { 1799 int global_zoom_iend=global_zoom_ibegin+global_zoom_ni-1; 1800 int global_zoom_jend=global_zoom_jbegin+global_zoom_nj-1; 1801 for (i = 0; i < nbIndex; ++i) 1802 { 1803 i_ind=i_index(i); 1804 j_ind=j_index(i); 1805 globalIndex = i_ind + j_ind * ni_glo; 1806 if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1807 { 1808 globalIndexDomainZoom(globalIndexCountZoom) = globalIndex; 1809 ++globalIndexCountZoom; 1810 } 1811 } 1812 1813 int iend = ibegin + ni -1; 1814 int jend = jbegin + nj -1; 1815 zoom_ibegin = global_zoom_ibegin > ibegin ? global_zoom_ibegin : ibegin; 1816 int zoom_iend = global_zoom_iend < iend ? zoom_iend : iend ; 1817 zoom_ni = zoom_iend-zoom_ibegin+1 ; 1818 1819 zoom_jbegin = global_zoom_jbegin > jbegin ? global_zoom_jbegin : jbegin ; 1820 int zoom_jend = global_zoom_jend < jend ? zoom_jend : jend; 1821 zoom_nj = zoom_jend-zoom_jbegin+1; 1822 } 1823 1824 size_t globalSizeIndex = 1, indexBegin, indexEnd; 1825 int range, clientSize = client->clientSize; 1826 std::vector<int> nGlobDomain(2); 1827 nGlobDomain[0] = this->ni_glo; 1828 nGlobDomain[1] = this->nj_glo; 1829 for (int i = 0; i < nGlobDomain.size(); ++i) globalSizeIndex *= nGlobDomain[i]; 1830 indexBegin = 0; 1831 if (globalSizeIndex <= clientSize) 1832 { 1833 indexBegin = rank%globalSizeIndex; 1834 indexEnd = indexBegin; 1835 } 1836 else 1837 { 1838 for (int i = 0; i < clientSize; ++i) 1839 { 1840 range = globalSizeIndex / clientSize; 1841 if (i < (globalSizeIndex%clientSize)) ++range; 1842 if (i == client->clientRank) break; 1843 indexBegin += range; 1844 } 1845 indexEnd = indexBegin + range - 1; 1846 } 1847 1848 // Even if servers have no index, they must received something from client 1849 // We only use several client to send "empty" message to these servers 1850 CServerDistributionDescription serverDescription(nGlobDomain, nbServer); 1851 std::vector<int> serverZeroIndex; 1852 if (isUnstructed_) serverZeroIndex = serverDescription.computeServerGlobalIndexInRange(std::make_pair<size_t,size_t>(indexBegin, indexEnd), 0); 1853 else serverZeroIndex = serverDescription.computeServerGlobalIndexInRange(std::make_pair<size_t,size_t>(indexBegin, indexEnd), 1); 1854 1855 std::list<int> serverZeroIndexLeader; 1856 std::list<int> serverZeroIndexNotLeader; 1857 CContextClient::computeLeader(client->clientRank, client->clientSize, serverZeroIndex.size(), serverZeroIndexLeader, serverZeroIndexNotLeader); 1858 for (std::list<int>::iterator it = serverZeroIndexLeader.begin(); it != serverZeroIndexLeader.end(); ++it) 1859 *it = serverZeroIndex[*it]; 1860 1861 CClientServerMapping* clientServerMap = new CClientServerMappingDistributed(serverDescription.getGlobalIndexRange(), 1862 client->intraComm); 1863 clientServerMap->computeServerIndexMapping(globalIndexDomain, nbServer); 1864 CClientServerMapping::GlobalIndexMap& globalIndexDomainOnServer = clientServerMap->getGlobalIndexOnServer(); 1865 1866 CClientServerMapping::GlobalIndexMap::const_iterator it = globalIndexDomainOnServer.begin(), 1867 ite = globalIndexDomainOnServer.end(); 1868 indSrv_[nbServer].swap(globalIndexDomainOnServer); 1869 connectedServerRank_[nbServer].clear(); 1870 for (it = indSrv_[nbServer].begin(); it != ite; ++it) 1871 connectedServerRank_[nbServer].push_back(it->first); 1872 1873 for (std::list<int>::const_iterator it = serverZeroIndexLeader.begin(); it != serverZeroIndexLeader.end(); ++it) 1874 connectedServerRank_[nbServer].push_back(*it); 1875 1876 // Even if a client has no index, it must connect to at least one server and 1877 // send an "empty" data to this server 1878 if (connectedServerRank_[nbServer].empty()) 1879 connectedServerRank_[nbServer].push_back(client->clientRank % client->serverSize); 1880 1881 nbSenders[nbServer] = clientServerMap->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_[nbServer]); 1882 delete clientServerMap; 1883 } 1884 } 1885 } 1886 } 1887 1888 /*! 1889 Compute index to write data. We only write data on the zoomed region, therefore, there should 1890 be a map between the complete grid and the reduced grid where we write data. 1891 By using global index we can easily create this kind of mapping. 1892 */ 1893 void CDomain::computeWrittenIndex() 1894 { 1895 if (computedWrittenIndex_) return; 1896 computedWrittenIndex_ = true; 1897 1898 CContext* context=CContext::getCurrent(); 1899 CContextServer* server = context->server; 1900 1901 std::vector<int> nBegin(2), nSize(2), nBeginGlobal(2), nGlob(2); 1902 nBegin[0] = zoom_ibegin; nBegin[1] = zoom_jbegin; 1903 nSize[0] = zoom_ni; nSize[1] = zoom_nj; 1904 nBeginGlobal[0] = 0; nBeginGlobal[1] = 0; 1905 nGlob[0] = ni_glo; nGlob[1] = nj_glo; 1906 CDistributionServer srvDist(server->intraCommSize, nBegin, nSize, nBeginGlobal, nGlob); 1907 const CArray<size_t,1>& writtenGlobalIndex = srvDist.getGlobalIndex(); 1908 1909 size_t nbWritten = 0, indGlo; 1910 boost::unordered_map<size_t,size_t>::const_iterator itb = globalLocalIndexMap_.begin(), 1911 ite = globalLocalIndexMap_.end(), it; 1912 CArray<size_t,1>::const_iterator itSrvb = writtenGlobalIndex.begin(), 1913 itSrve = writtenGlobalIndex.end(), itSrv; 1914 1915 // for (itSrv = itSrvb; itSrv != itSrve; ++itSrv) 1916 // { 1917 // indGlo = *itSrv; 1918 // if (ite != globalLocalIndexMap_.find(indGlo)) 1919 // { 1920 // ++nbWritten; 1921 // } 1922 // } 1923 1924 // localIndexToWriteOnServer.resize(nbWritten); 1925 localIndexToWriteOnServer.resize(writtenGlobalIndex.numElements()); 1926 1927 nbWritten = 0; 1928 for (itSrv = itSrvb; itSrv != itSrve; ++itSrv) 1929 { 1930 indGlo = *itSrv; 1931 if (ite != globalLocalIndexMap_.find(indGlo)) 1932 { 1933 localIndexToWriteOnServer(nbWritten) = globalLocalIndexMap_[indGlo]; 1934 ++nbWritten; 1935 } 1936 else 1937 { 1938 localIndexToWriteOnServer(nbWritten) = 0; 1939 ++nbWritten; 1940 } 1941 } 1942 1943 // if (isCompressible()) 1944 // { 1945 // nbWritten = 0; 1946 // boost::unordered_map<size_t,size_t> localGlobalIndexMap; 1947 // for (itSrv = itSrvb; itSrv != itSrve; ++itSrv) 1948 // { 1949 // indGlo = *itSrv; 1950 // if (ite != globalLocalIndexMap_.find(indGlo)) 1951 // { 1952 // localGlobalIndexMap[localIndexToWriteOnServer(nbWritten)] = indGlo; 1953 // ++nbWritten; 1954 // } 1955 // } 1956 1957 // nbWritten = 0; 1958 // for (int idx = 0; idx < data_i_index.numElements(); ++idx) 1959 // { 1960 // if (localGlobalIndexMap.end() != localGlobalIndexMap.find(data_i_index(idx))) 1961 // { 1962 // ++nbWritten; 1963 // } 1964 // } 1965 1966 // compressedIndexToWriteOnServer.resize(nbWritten); 1967 // nbWritten = 0; 1968 // for (int idx = 0; idx < data_i_index.numElements(); ++idx) 1969 // { 1970 // if (localGlobalIndexMap.end() != localGlobalIndexMap.find(data_i_index(idx))) 1971 // { 1972 // compressedIndexToWriteOnServer(nbWritten) = localGlobalIndexMap[data_i_index(idx)]; 1973 // ++nbWritten; 1974 // } 1975 // } 1976 1977 // numberWrittenIndexes_ = nbWritten; 1978 // if (isDistributed()) 1979 // { 1980 // ep_lib::MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 1981 // ep_lib::MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 1982 // offsetWrittenIndexes_ -= numberWrittenIndexes_; 1983 // } 1984 // else 1985 // totalNumberWrittenIndexes_ = numberWrittenIndexes_; 1986 // } 1987 } 1988 1989 void CDomain::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 1990 { 1991 int writtenCommSize; 1992 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 1993 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 1994 return; 1995 1996 if (isCompressible()) 1625 1997 { 1626 i_ind=i_index(i); 1627 j_ind=j_index(i); 1628 1629 if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1630 { 1631 ++globalIndexCountZoom; 1632 } 1633 } 1634 1635 int globalIndexWrittenCount = 0; 1636 if (isCompressible_) 1637 { 1638 for (i = 0; i < data_i_index.numElements(); ++i) 1639 { 1640 i_ind = CDistributionClient::getDomainIndex(data_i_index(i), data_j_index(i), 1641 data_ibegin, data_jbegin, data_dim, ni, 1642 j_ind); 1643 if (i_ind >= 0 && i_ind < ni && j_ind >= 0 && j_ind < nj && mask_1d(i_ind + j_ind * ni)) 1644 { 1645 i_ind += ibegin; 1646 j_ind += jbegin; 1647 if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1648 ++globalIndexWrittenCount; 1649 } 1650 } 1651 } 1652 1653 // Fill in index 1654 CArray<size_t,1> globalIndexDomainZoom(globalIndexCountZoom); 1655 CArray<size_t,1> localIndexDomainZoom(globalIndexCountZoom); 1656 CArray<size_t,1> globalIndexDomain(nbIndex); 1657 size_t globalIndex; 1658 int globalIndexCount = 0; 1659 globalIndexCountZoom = 0; 1660 1661 for (i = 0; i < nbIndex; ++i) 1662 { 1663 i_ind=i_index(i); 1664 j_ind=j_index(i); 1665 globalIndex = i_ind + j_ind * ni_glo; 1666 globalIndexDomain(globalIndexCount) = globalIndex; 1667 ++globalIndexCount; 1668 if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1669 { 1670 globalIndexDomainZoom(globalIndexCountZoom) = globalIndex; 1671 localIndexDomainZoom(globalIndexCountZoom) = i; 1672 ++globalIndexCountZoom; 1673 } 1674 } 1675 1676 CArray<int,1> globalIndexWrittenDomain(globalIndexWrittenCount); 1677 if (isCompressible_) 1678 { 1679 globalIndexWrittenCount = 0; 1680 for (i = 0; i < data_i_index.numElements(); ++i) 1681 { 1682 i_ind = CDistributionClient::getDomainIndex(data_i_index(i), data_j_index(i), 1683 data_ibegin, data_jbegin, data_dim, ni, 1684 j_ind); 1685 if (i_ind >= 0 && i_ind < ni && j_ind >= 0 && j_ind < nj && mask_1d(i_ind + j_ind * ni)) 1686 { 1687 i_ind += ibegin; 1688 j_ind += jbegin; 1689 if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1690 { 1691 globalIndexWrittenDomain(globalIndexWrittenCount) = i_ind + j_ind * ni_glo; 1692 ++globalIndexWrittenCount; 1693 } 1694 } 1695 } 1696 } 1697 1698 size_t globalSizeIndex = 1, indexBegin, indexEnd; 1699 int range, clientSize = client->clientSize; 1700 for (int i = 0; i < getNbGlob().size(); ++i) globalSizeIndex *= getNbGlob()[i]; 1701 indexBegin = 0; 1702 if (globalSizeIndex <= clientSize) 1703 { 1704 indexBegin = rank%globalSizeIndex; 1705 indexEnd = indexBegin; 1706 } 1707 else 1708 { 1709 for (int i = 0; i < clientSize; ++i) 1710 { 1711 range = globalSizeIndex / clientSize; 1712 if (i < (globalSizeIndex%clientSize)) ++range; 1713 if (i == client->clientRank) break; 1714 indexBegin += range; 1715 } 1716 indexEnd = indexBegin + range - 1; 1717 } 1718 1719 CServerDistributionDescription serverDescription(getNbGlob(), nbServer); 1720 if (isUnstructed_) serverDescription.computeServerGlobalIndexInRange(std::make_pair<size_t,size_t>(indexBegin, indexEnd), 0); 1721 else serverDescription.computeServerGlobalIndexInRange(std::make_pair<size_t,size_t>(indexBegin, indexEnd), 1); 1722 1723 CClientServerMapping* clientServerMap = new CClientServerMappingDistributed(serverDescription.getGlobalIndexRange(), 1724 client->intraComm); 1725 clientServerMap->computeServerIndexMapping(globalIndexDomain); 1726 const CClientServerMapping::GlobalIndexMap& globalIndexDomainOnServer = clientServerMap->getGlobalIndexOnServer(); 1727 1728 CClientServerMapping::GlobalIndexMap::const_iterator it = globalIndexDomainOnServer.begin(), 1729 ite = globalIndexDomainOnServer.end(); 1730 typedef XIOSBinarySearchWithIndex<size_t> BinarySearch; 1731 std::vector<int>::iterator itVec; 1732 1733 indSrv_.clear(); 1734 indWrittenSrv_.clear(); 1735 for (; it != ite; ++it) 1736 { 1737 int rank = it->first; 1738 int indexSize = it->second.size(); 1739 std::vector<int> permutIndex(indexSize); 1740 XIOSAlgorithms::fillInIndex(indexSize, permutIndex); 1741 XIOSAlgorithms::sortWithIndex<size_t, CVectorStorage>(it->second, permutIndex); 1742 BinarySearch binSearch(it->second); 1743 int nb = globalIndexDomainZoom.numElements(); 1744 for (int i = 0; i < nb; ++i) 1745 { 1746 if (binSearch.search(permutIndex.begin(), permutIndex.end(), globalIndexDomainZoom(i), itVec)) 1747 { 1748 indSrv_[rank].push_back(localIndexDomainZoom(i)); 1749 } 1750 } 1751 for (int i = 0; i < globalIndexWrittenDomain.numElements(); ++i) 1752 { 1753 if (binSearch.search(permutIndex.begin(), permutIndex.end(), globalIndexWrittenDomain(i), itVec)) 1754 { 1755 indWrittenSrv_[rank].push_back(globalIndexWrittenDomain(i)); 1756 } 1757 } 1758 } 1759 1760 connectedServerRank_.clear(); 1761 for (it = globalIndexDomainOnServer.begin(); it != ite; ++it) { 1762 connectedServerRank_.push_back(it->first); 1763 } 1764 1765 nbConnectedClients_ = clientServerMap->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_); 1766 1767 delete clientServerMap; 1768 } 1769 1770 const std::map<int, vector<size_t> >& CDomain::getIndexServer() const 1771 { 1772 return indSrv_; 1998 size_t nbWritten = 0, indGlo; 1999 CContext* context=CContext::getCurrent(); 2000 CContextServer* server = context->server; 2001 2002 std::vector<int> nBegin(2), nSize(2), nBeginGlobal(2), nGlob(2); 2003 nBegin[0] = zoom_ibegin; nBegin[1] = zoom_jbegin; 2004 nSize[0] = zoom_ni; nSize[1] = zoom_nj; 2005 nBeginGlobal[0] = 0; nBeginGlobal[1] = 0; 2006 nGlob[0] = ni_glo; nGlob[1] = nj_glo; 2007 CDistributionServer srvDist(server->intraCommSize, nBegin, nSize, nBeginGlobal, nGlob); 2008 const CArray<size_t,1>& writtenGlobalIndex = srvDist.getGlobalIndex(); 2009 2010 boost::unordered_map<size_t,size_t>::const_iterator itb = globalLocalIndexMap_.begin(), 2011 ite = globalLocalIndexMap_.end(), it; 2012 CArray<size_t,1>::const_iterator itSrvb = writtenGlobalIndex.begin(), 2013 itSrve = writtenGlobalIndex.end(), itSrv; 2014 boost::unordered_map<size_t,size_t> localGlobalIndexMap; 2015 for (itSrv = itSrvb; itSrv != itSrve; ++itSrv) 2016 { 2017 indGlo = *itSrv; 2018 if (ite != globalLocalIndexMap_.find(indGlo)) 2019 { 2020 localGlobalIndexMap[localIndexToWriteOnServer(nbWritten)] = indGlo; 2021 ++nbWritten; 2022 } 2023 } 2024 2025 nbWritten = 0; 2026 for (int idx = 0; idx < data_i_index.numElements(); ++idx) 2027 { 2028 if (localGlobalIndexMap.end() != localGlobalIndexMap.find(data_i_index(idx))) 2029 { 2030 ++nbWritten; 2031 } 2032 } 2033 2034 compressedIndexToWriteOnServer[writtenCommSize].resize(nbWritten); 2035 nbWritten = 0; 2036 for (int idx = 0; idx < data_i_index.numElements(); ++idx) 2037 { 2038 if (localGlobalIndexMap.end() != localGlobalIndexMap.find(data_i_index(idx))) 2039 { 2040 compressedIndexToWriteOnServer[writtenCommSize](nbWritten) = localGlobalIndexMap[data_i_index(idx)]; 2041 ++nbWritten; 2042 } 2043 } 2044 2045 numberWrittenIndexes_[writtenCommSize] = nbWritten; 2046 if (isDistributed()) 2047 { 2048 2049 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 2050 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 2051 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 2052 } 2053 else 2054 totalNumberWrittenIndexes_[writtenCommSize] = numberWrittenIndexes_[writtenCommSize]; 2055 } 1773 2056 } 1774 2057 1775 2058 /*! 1776 Send index from client to server(s) 2059 Send all attributes from client to connected clients 2060 The attributes will be rebuilt on receiving side 2061 */ 2062 void CDomain::sendAttributes() 2063 { 2064 sendDistributionAttributes(); 2065 sendIndex(); 2066 sendMask(); 2067 sendLonLat(); 2068 sendArea(); 2069 sendDataIndex(); 2070 } 2071 2072 /*! 2073 Send global index and zoom index from client to connected client(s) 2074 zoom index can be smaller than global index 1777 2075 */ 1778 2076 void CDomain::sendIndex() 1779 2077 { 1780 2078 int ns, n, i, j, ind, nv, idx; 1781 CContext* context = CContext::getCurrent(); 1782 CContextClient* client=context->client; 1783 1784 CEventClient eventIndex(getType(), EVENT_ID_INDEX); 1785 1786 list<CMessage> list_msgsIndex; 1787 list<CArray<int,1> > list_indi, list_indj, list_writtenInd; 1788 1789 std::map<int, std::vector<size_t> >::const_iterator it, iteMap; 1790 iteMap = indSrv_.end(); 1791 for (int k = 0; k < connectedServerRank_.size(); ++k) 2079 std::list<CContextClient*>::iterator it; 2080 for (it=clients.begin(); it!=clients.end(); ++it) 1792 2081 { 1793 int nbData = 0; 1794 int rank = connectedServerRank_[k]; 1795 it = indSrv_.find(rank); 1796 if (iteMap != it) 1797 nbData = it->second.size(); 1798 1799 list_indi.push_back(CArray<int,1>(nbData)); 1800 list_indj.push_back(CArray<int,1>(nbData)); 1801 1802 CArray<int,1>& indi = list_indi.back(); 1803 CArray<int,1>& indj = list_indj.back(); 1804 const std::vector<size_t>& temp = it->second; 1805 for (n = 0; n < nbData; ++n) 1806 { 1807 idx = static_cast<int>(it->second[n]); 1808 indi(n) = i_index(idx); 1809 indj(n) = j_index(idx); 1810 } 1811 1812 list_msgsIndex.push_back(CMessage()); 1813 1814 list_msgsIndex.back() << this->getId() << (int)type; // enum ne fonctionne pour les message => ToFix 1815 list_msgsIndex.back() << isCurvilinear; 1816 list_msgsIndex.back() << list_indi.back() << list_indj.back(); 1817 1818 if (isCompressible_) 1819 { 1820 std::vector<int>& writtenIndSrc = indWrittenSrv_[rank]; 1821 list_writtenInd.push_back(CArray<int,1>(writtenIndSrc.size())); 1822 CArray<int,1>& writtenInd = list_writtenInd.back(); 1823 1824 for (n = 0; n < writtenInd.numElements(); ++n) 1825 writtenInd(n) = writtenIndSrc[n]; 1826 1827 list_msgsIndex.back() << writtenInd; 1828 } 1829 1830 eventIndex.push(rank, nbConnectedClients_[rank], list_msgsIndex.back()); 1831 } 1832 1833 client->sendEvent(eventIndex); 2082 CContextClient* client = *it; 2083 2084 int serverSize = client->serverSize; 2085 CEventClient eventIndex(getType(), EVENT_ID_INDEX); 2086 2087 list<CMessage> list_msgsIndex; 2088 list<CArray<int,1> > list_indZoom, list_writtenInd, list_indGlob; 2089 2090 boost::unordered_map<int, vector<size_t> >::const_iterator itIndex, iteIndex; 2091 iteIndex = indSrv_[serverSize].end(); 2092 for (int k = 0; k < connectedServerRank_[serverSize].size(); ++k) 2093 { 2094 int nbIndGlob = 0; 2095 int rank = connectedServerRank_[serverSize][k]; 2096 itIndex = indSrv_[serverSize].find(rank); 2097 if (iteIndex != itIndex) 2098 nbIndGlob = itIndex->second.size(); 2099 2100 list_indGlob.push_back(CArray<int,1>(nbIndGlob)); 2101 2102 CArray<int,1>& indGlob = list_indGlob.back(); 2103 for (n = 0; n < nbIndGlob; ++n) 2104 { 2105 indGlob(n) = static_cast<int>(itIndex->second[n]); 2106 } 2107 2108 list_msgsIndex.push_back(CMessage()); 2109 list_msgsIndex.back() << this->getId() << (int)type; // enum ne fonctionne pour les message => ToFix 2110 list_msgsIndex.back() << isCurvilinear; 2111 list_msgsIndex.back() << list_indGlob.back(); //list_indi.back() << list_indj.back(); 2112 2113 eventIndex.push(rank, nbSenders[serverSize][rank], list_msgsIndex.back()); 2114 } 2115 2116 client->sendEvent(eventIndex); 2117 } 1834 2118 } 1835 2119 1836 2120 /*! 1837 Send area from client to server(s) 2121 Send distribution from client to other clients 2122 Because a client in a level knows correctly the grid distribution of client on the next level 2123 it calculates this distribution then sends it to the corresponding clients on the next level 2124 */ 2125 void CDomain::sendDistributionAttributes(void) 2126 { 2127 std::list<CContextClient*>::iterator it; 2128 for (it=clients.begin(); it!=clients.end(); ++it) 2129 { 2130 CContextClient* client = *it; 2131 int nbServer = client->serverSize; 2132 std::vector<int> nGlobDomain(2); 2133 nGlobDomain[0] = this->ni_glo; 2134 nGlobDomain[1] = this->nj_glo; 2135 2136 CServerDistributionDescription serverDescription(nGlobDomain, nbServer); 2137 if (isUnstructed_) serverDescription.computeServerDistribution(false, 0); 2138 else serverDescription.computeServerDistribution(false, 1); 2139 2140 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 2141 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 2142 2143 CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT); 2144 if (client->isServerLeader()) 2145 { 2146 std::list<CMessage> msgs; 2147 2148 const std::list<int>& ranks = client->getRanksServerLeader(); 2149 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 2150 { 2151 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 2152 const int ibegin_srv = serverIndexBegin[*itRank][0]; 2153 const int jbegin_srv = serverIndexBegin[*itRank][1]; 2154 const int ni_srv = serverDimensionSizes[*itRank][0]; 2155 const int nj_srv = serverDimensionSizes[*itRank][1]; 2156 2157 msgs.push_back(CMessage()); 2158 CMessage& msg = msgs.back(); 2159 msg << this->getId() ; 2160 msg << isUnstructed_; 2161 msg << ni_srv << ibegin_srv << nj_srv << jbegin_srv; 2162 msg << global_zoom_ni.getValue() << global_zoom_ibegin.getValue() << global_zoom_nj.getValue() << global_zoom_jbegin.getValue(); 2163 msg << isCompressible_; 2164 2165 event.push(*itRank,1,msg); 2166 } 2167 client->sendEvent(event); 2168 } 2169 else client->sendEvent(event); 2170 } 2171 } 2172 2173 /*! 2174 Send mask index from client to connected(s) clients 2175 */ 2176 void CDomain::sendMask() 2177 { 2178 int ns, n, i, j, ind, nv, idx; 2179 std::list<CContextClient*>::iterator it; 2180 for (it=clients.begin(); it!=clients.end(); ++it) 2181 { 2182 CContextClient* client = *it; 2183 int serverSize = client->serverSize; 2184 2185 // send area for each connected server 2186 CEventClient eventMask(getType(), EVENT_ID_MASK); 2187 2188 list<CMessage> list_msgsMask; 2189 list<CArray<bool,1> > list_mask; 2190 2191 boost::unordered_map<int, vector<size_t> >::const_iterator it, iteMap; 2192 iteMap = indSrv_[serverSize].end(); 2193 for (int k = 0; k < connectedServerRank_[serverSize].size(); ++k) 2194 { 2195 int nbData = 0; 2196 int rank = connectedServerRank_[serverSize][k]; 2197 it = indSrv_[serverSize].find(rank); 2198 if (iteMap != it) 2199 nbData = it->second.size(); 2200 list_mask.push_back(CArray<bool,1>(nbData)); 2201 2202 const std::vector<size_t>& temp = it->second; 2203 for (n = 0; n < nbData; ++n) 2204 { 2205 idx = static_cast<int>(it->second[n]); 2206 list_mask.back()(n) = domainMask(globalLocalIndexMap_[idx]); 2207 } 2208 2209 list_msgsMask.push_back(CMessage()); 2210 list_msgsMask.back() << this->getId() << list_mask.back(); 2211 eventMask.push(rank, nbSenders[serverSize][rank], list_msgsMask.back()); 2212 } 2213 client->sendEvent(eventMask); 2214 } 2215 } 2216 2217 /*! 2218 Send area from client to connected client(s) 1838 2219 */ 1839 2220 void CDomain::sendArea() … … 1842 2223 1843 2224 int ns, n, i, j, ind, nv, idx; 1844 CContext* context = CContext::getCurrent(); 1845 CContextClient* client=context->client; 1846 1847 // send area for each connected server 1848 CEventClient eventArea(getType(), EVENT_ID_AREA); 1849 1850 list<CMessage> list_msgsArea; 1851 list<CArray<double,1> > list_area; 1852 1853 std::map<int, std::vector<size_t> >::const_iterator it, iteMap; 1854 iteMap = indSrv_.end(); 1855 for (int k = 0; k < connectedServerRank_.size(); ++k) 2225 std::list<CContextClient*>::iterator it; 2226 2227 for (it=clients.begin(); it!=clients.end(); ++it) 1856 2228 { 1857 int nbData = 0; 1858 int rank = connectedServerRank_[k]; 1859 it = indSrv_.find(rank); 1860 if (iteMap != it) 1861 nbData = it->second.size(); 1862 list_area.push_back(CArray<double,1>(nbData)); 1863 1864 const std::vector<size_t>& temp = it->second; 1865 for (n = 0; n < nbData; ++n) 1866 { 1867 idx = static_cast<int>(it->second[n]); 1868 i = i_index(idx); 1869 j = j_index(idx); 1870 if (hasArea) 1871 list_area.back()(n) = area(i - ibegin, j - jbegin); 1872 } 1873 1874 list_msgsArea.push_back(CMessage()); 1875 list_msgsArea.back() << this->getId() << list_area.back(); 1876 eventArea.push(rank, nbConnectedClients_[rank], list_msgsArea.back()); 1877 } 1878 client->sendEvent(eventArea); 2229 CContextClient* client = *it; 2230 int serverSize = client->serverSize; 2231 2232 // send area for each connected server 2233 CEventClient eventArea(getType(), EVENT_ID_AREA); 2234 2235 list<CMessage> list_msgsArea; 2236 list<CArray<double,1> > list_area; 2237 2238 boost::unordered_map<int, vector<size_t> >::const_iterator it, iteMap; 2239 iteMap = indSrv_[serverSize].end(); 2240 for (int k = 0; k < connectedServerRank_[serverSize].size(); ++k) 2241 { 2242 int nbData = 0; 2243 int rank = connectedServerRank_[serverSize][k]; 2244 it = indSrv_[serverSize].find(rank); 2245 if (iteMap != it) 2246 nbData = it->second.size(); 2247 list_area.push_back(CArray<double,1>(nbData)); 2248 2249 const std::vector<size_t>& temp = it->second; 2250 for (n = 0; n < nbData; ++n) 2251 { 2252 idx = static_cast<int>(it->second[n]); 2253 list_area.back()(n) = areavalue(globalLocalIndexMap_[idx]); 2254 } 2255 2256 list_msgsArea.push_back(CMessage()); 2257 list_msgsArea.back() << this->getId() << hasArea; 2258 list_msgsArea.back() << list_area.back(); 2259 eventArea.push(rank, nbSenders[serverSize][rank], list_msgsArea.back()); 2260 } 2261 client->sendEvent(eventArea); 2262 } 1879 2263 } 1880 2264 1881 2265 /*! 1882 2266 Send longitude and latitude from client to servers 1883 Each client send long and lat information to corresponding connected server(s).2267 Each client send long and lat information to corresponding connected clients(s). 1884 2268 Because longitude and latitude are optional, this function only called if latitude and longitude exist 1885 2269 */ … … 1889 2273 1890 2274 int ns, n, i, j, ind, nv, idx; 1891 CContext* context = CContext::getCurrent(); 1892 CContextClient* client=context->client; 1893 1894 // send lon lat for each connected server 1895 CEventClient eventLon(getType(), EVENT_ID_LON); 1896 CEventClient eventLat(getType(), EVENT_ID_LAT); 1897 1898 list<CMessage> list_msgsLon, list_msgsLat; 1899 list<CArray<double,1> > list_lon, list_lat; 1900 list<CArray<double,2> > list_boundslon, list_boundslat; 1901 1902 std::map<int, std::vector<size_t> >::const_iterator it, iteMap; 1903 iteMap = indSrv_.end(); 1904 for (int k = 0; k < connectedServerRank_.size(); ++k) 2275 std::list<CContextClient*>::iterator it; 2276 for (it=clients.begin(); it!=clients.end(); ++it) 1905 2277 { 1906 int nbData = 0;1907 int rank = connectedServerRank_[k];1908 it = indSrv_.find(rank); 1909 if (iteMap != it)1910 nbData = it->second.size();1911 1912 list_lon.push_back(CArray<double,1>(nbData)); 1913 list _lat.push_back(CArray<double,1>(nbData));1914 1915 if (hasBounds)1916 { 1917 list_boundslon.push_back(CArray<double,2>(nvertex, nbData));1918 list_boundslat.push_back(CArray<double,2>(nvertex, nbData));1919 }1920 1921 CArray<double,1>& lon = list_lon.back();1922 CArray<double,1>& lat = list_lat.back();1923 const std::vector<size_t>& temp = it->second;1924 for (n = 0; n < nbData; ++n)1925 {1926 idx = static_cast<int>(it->second[n]); 1927 l on(n) = lonvalue_client(idx);1928 l at(n) = latvalue_client(idx);2278 CContextClient* client = *it; 2279 int serverSize = client->serverSize; 2280 2281 // send lon lat for each connected server 2282 CEventClient eventLon(getType(), EVENT_ID_LON); 2283 CEventClient eventLat(getType(), EVENT_ID_LAT); 2284 2285 list<CMessage> list_msgsLon, list_msgsLat; 2286 list<CArray<double,1> > list_lon, list_lat; 2287 list<CArray<double,2> > list_boundslon, list_boundslat; 2288 2289 boost::unordered_map<int, vector<size_t> >::const_iterator it, iteMap; 2290 iteMap = indSrv_[serverSize].end(); 2291 for (int k = 0; k < connectedServerRank_[serverSize].size(); ++k) 2292 { 2293 int nbData = 0; 2294 int rank = connectedServerRank_[serverSize][k]; 2295 it = indSrv_[serverSize].find(rank); 2296 if (iteMap != it) 2297 nbData = it->second.size(); 2298 2299 list_lon.push_back(CArray<double,1>(nbData)); 2300 list_lat.push_back(CArray<double,1>(nbData)); 1929 2301 1930 2302 if (hasBounds) 1931 2303 { 1932 CArray<double,2>& boundslon = list_boundslon.back(); 1933 CArray<double,2>& boundslat = list_boundslat.back(); 1934 1935 for (nv = 0; nv < nvertex; ++nv) 2304 list_boundslon.push_back(CArray<double,2>(nvertex, nbData)); 2305 list_boundslat.push_back(CArray<double,2>(nvertex, nbData)); 2306 } 2307 2308 CArray<double,1>& lon = list_lon.back(); 2309 CArray<double,1>& lat = list_lat.back(); 2310 const std::vector<size_t>& temp = it->second; 2311 for (n = 0; n < nbData; ++n) 2312 { 2313 idx = static_cast<int>(it->second[n]); 2314 int localInd = globalLocalIndexMap_[idx]; 2315 lon(n) = lonvalue(localInd); 2316 lat(n) = latvalue(localInd); 2317 2318 if (hasBounds) 1936 2319 { 1937 boundslon(nv, n) = bounds_lon_client(nv, idx); 1938 boundslat(nv, n) = bounds_lat_client(nv, idx); 2320 CArray<double,2>& boundslon = list_boundslon.back(); 2321 CArray<double,2>& boundslat = list_boundslat.back(); 2322 2323 for (nv = 0; nv < nvertex; ++nv) 2324 { 2325 boundslon(nv, n) = bounds_lonvalue(nv, localInd); 2326 boundslat(nv, n) = bounds_latvalue(nv, localInd); 2327 } 1939 2328 } 1940 2329 } 1941 } 1942 1943 list_msgsLon.push_back(CMessage()); 1944 list_msgsLat.push_back(CMessage()); 1945 1946 list_msgsLon.back() << this->getId() << list_lon.back(); 1947 list_msgsLat.back() << this->getId() << list_lat.back(); 1948 1949 if (hasBounds) 1950 { 1951 list_msgsLon.back() << list_boundslon.back(); 1952 list_msgsLat.back() << list_boundslat.back(); 1953 } 1954 1955 eventLon.push(rank, nbConnectedClients_[rank], list_msgsLon.back()); 1956 eventLat.push(rank, nbConnectedClients_[rank], list_msgsLat.back()); 1957 } 1958 1959 client->sendEvent(eventLon); 1960 client->sendEvent(eventLat); 2330 2331 list_msgsLon.push_back(CMessage()); 2332 list_msgsLat.push_back(CMessage()); 2333 2334 list_msgsLon.back() << this->getId() << hasLonLat; 2335 if (hasLonLat) 2336 list_msgsLon.back() << list_lon.back(); 2337 list_msgsLon.back() << hasBounds; 2338 if (hasBounds) 2339 { 2340 list_msgsLon.back() << list_boundslon.back(); 2341 } 2342 2343 list_msgsLat.back() << this->getId() << hasLonLat; 2344 if (hasLonLat) 2345 list_msgsLat.back() << list_lat.back(); 2346 list_msgsLat.back() << hasBounds; 2347 if (hasBounds) 2348 { 2349 list_msgsLat.back() << list_boundslat.back(); 2350 } 2351 2352 eventLon.push(rank, nbSenders[serverSize][rank], list_msgsLon.back()); 2353 eventLat.push(rank, nbSenders[serverSize][rank], list_msgsLat.back()); 2354 } 2355 client->sendEvent(eventLon); 2356 client->sendEvent(eventLat); 2357 } 1961 2358 } 1962 2359 1963 2360 /*! 1964 Send some optional information to server(s) 1965 In the future, this function can be extended with more optional information to send 2361 Send data index to corresponding connected clients. 2362 Data index can be compressed however, we always send decompressed data index 2363 and they will be compressed on receiving. 2364 The compressed index are represented with 1 and others are represented with -1 1966 2365 */ 1967 void CDomain::sendLonLatArea(void) 1968 { 1969 sendIndex(); 1970 sendLonLat(); 1971 sendArea(); 1972 } 1973 2366 void CDomain::sendDataIndex() 2367 { 2368 int ns, n, i, j, ind, nv, idx; 2369 std::list<CContextClient*>::iterator it; 2370 for (it=clients.begin(); it!=clients.end(); ++it) 2371 { 2372 CContextClient* client = *it; 2373 2374 int serverSize = client->serverSize; 2375 2376 // send area for each connected server 2377 CEventClient eventDataIndex(getType(), EVENT_ID_DATA_INDEX); 2378 2379 list<CMessage> list_msgsDataIndex; 2380 list<CArray<int,1> > list_data_i_index, list_data_j_index; 2381 2382 int nbIndex = i_index.numElements(); 2383 int niByIndex = max(i_index) - min(i_index) + 1; 2384 int njByIndex = max(j_index) - min(j_index) + 1; 2385 int dataIindexBound = (1 == data_dim) ? (niByIndex * njByIndex) : niByIndex; 2386 int dataJindexBound = (1 == data_dim) ? (niByIndex * njByIndex) : njByIndex; 2387 2388 2389 CArray<int,1> dataIIndex(nbIndex), dataJIndex(nbIndex); 2390 dataIIndex = -1; 2391 dataJIndex = -1; 2392 ind = 0; 2393 2394 for (idx = 0; idx < data_i_index.numElements(); ++idx) 2395 { 2396 int dataIidx = data_i_index(idx) + data_ibegin; 2397 int dataJidx = data_j_index(idx) + data_jbegin; 2398 if ((0 <= dataIidx) && (dataIidx < dataIindexBound) && 2399 (0 <= dataJidx) && (dataJidx < dataJindexBound)) 2400 { 2401 dataIIndex((1 == data_dim) ? dataIidx : dataJidx * ni + dataIidx) = 1; //i_index(dataIidx);//dataIidx; 2402 dataJIndex((1 == data_dim) ? dataIidx : dataJidx * ni + dataIidx) = 1; //j_index(dataJidx);// 2403 } 2404 } 2405 2406 boost::unordered_map<int, vector<size_t> >::const_iterator it, iteMap; 2407 iteMap = indSrv_[serverSize].end(); 2408 for (int k = 0; k < connectedServerRank_[serverSize].size(); ++k) 2409 { 2410 int nbData = 0; 2411 int rank = connectedServerRank_[serverSize][k]; 2412 it = indSrv_[serverSize].find(rank); 2413 if (iteMap != it) 2414 nbData = it->second.size(); 2415 list_data_i_index.push_back(CArray<int,1>(nbData)); 2416 list_data_j_index.push_back(CArray<int,1>(nbData)); 2417 2418 const std::vector<size_t>& temp = it->second; 2419 for (n = 0; n < nbData; ++n) 2420 { 2421 idx = static_cast<int>(it->second[n]); 2422 i = globalLocalIndexMap_[idx]; 2423 list_data_i_index.back()(n) = dataIIndex(i); 2424 list_data_j_index.back()(n) = dataJIndex(i); 2425 } 2426 2427 list_msgsDataIndex.push_back(CMessage()); 2428 list_msgsDataIndex.back() << this->getId(); 2429 list_msgsDataIndex.back() << list_data_i_index.back() << list_data_j_index.back(); 2430 eventDataIndex.push(rank, nbSenders[serverSize][rank], list_msgsDataIndex.back()); 2431 } 2432 client->sendEvent(eventDataIndex); 2433 } 2434 } 2435 1974 2436 bool CDomain::dispatchEvent(CEventServer& event) 1975 2437 { … … 1980 2442 { 1981 2443 case EVENT_ID_SERVER_ATTRIBUT: 1982 recv ServerAttribut(event);2444 recvDistributionAttributes(event); 1983 2445 return true; 1984 2446 break; 1985 2447 case EVENT_ID_INDEX: 1986 2448 recvIndex(event); 2449 return true; 2450 break; 2451 case EVENT_ID_MASK: 2452 recvMask(event); 1987 2453 return true; 1988 2454 break; … … 1998 2464 recvArea(event); 1999 2465 return true; 2466 break; 2467 case EVENT_ID_DATA_INDEX: 2468 recvDataIndex(event); 2469 return true; 2000 2470 break; 2001 2471 default: … … 2008 2478 2009 2479 /*! 2480 Receive index event from clients(s) 2481 \param[in] event event contain info about rank and associated index 2482 */ 2483 void CDomain::recvIndex(CEventServer& event) 2484 { 2485 string domainId; 2486 std::map<int, CBufferIn*> rankBuffers; 2487 2488 list<CEventServer::SSubEvent>::iterator it; 2489 for (it = event.subEvents.begin(); it != event.subEvents.end(); ++it) 2490 { 2491 CBufferIn* buffer = it->buffer; 2492 *buffer >> domainId; 2493 rankBuffers[it->rank] = buffer; 2494 } 2495 get(domainId)->recvIndex(rankBuffers); 2496 } 2497 2498 /*! 2499 Receive index information from client(s). We use the global index for mapping index between 2500 sending clients and receiving clients. 2501 \param[in] rankBuffers rank of sending client and the corresponding receive buffer 2502 */ 2503 void CDomain::recvIndex(std::map<int, CBufferIn*>& rankBuffers) 2504 { 2505 int nbReceived = rankBuffers.size(), i, ind, index, type_int, iIndex, jIndex; 2506 recvClientRanks_.resize(nbReceived); 2507 2508 std::map<int, CBufferIn*>::iterator it = rankBuffers.begin(), ite = rankBuffers.end(); 2509 ind = 0; 2510 for (ind = 0; it != ite; ++it, ++ind) 2511 { 2512 recvClientRanks_[ind] = it->first; 2513 CBufferIn& buffer = *(it->second); 2514 buffer >> type_int >> isCurvilinear >> indGlob_[it->first]; 2515 type.setValue((type_attr::t_enum)type_int); // probleme des type enum avec les buffers : ToFix 2516 } 2517 int nbIndGlob = 0; 2518 for (i = 0; i < nbReceived; ++i) 2519 { 2520 nbIndGlob += indGlob_[recvClientRanks_[i]].numElements(); 2521 } 2522 2523 globalLocalIndexMap_.rehash(std::ceil(nbIndGlob/globalLocalIndexMap_.max_load_factor())); 2524 i_index.resize(nbIndGlob); 2525 j_index.resize(nbIndGlob); 2526 int nbIndexGlobMax = nbIndGlob, nbIndLoc; 2527 2528 nbIndGlob = 0; 2529 for (i = 0; i < nbReceived; ++i) 2530 { 2531 CArray<int,1>& tmp = indGlob_[recvClientRanks_[i]]; 2532 for (ind = 0; ind < tmp.numElements(); ++ind) 2533 { 2534 index = tmp(ind); 2535 if (0 == globalLocalIndexMap_.count(index)) 2536 { 2537 iIndex = (index%ni_glo)-ibegin; 2538 iIndex = (iIndex < 0) ? 0 : iIndex; 2539 jIndex = (index/ni_glo)-jbegin; 2540 jIndex = (jIndex < 0) ? 0 : jIndex; 2541 nbIndLoc = iIndex + ni * jIndex; 2542 if (nbIndLoc < nbIndexGlobMax) 2543 { 2544 i_index(nbIndLoc) = index % ni_glo; 2545 j_index(nbIndLoc) = index / ni_glo; 2546 globalLocalIndexMap_[index] = nbIndLoc; 2547 ++nbIndGlob; 2548 } 2549 // i_index(nbIndGlob) = index % ni_glo; 2550 // j_index(nbIndGlob) = index / ni_glo; 2551 // globalLocalIndexMap_[index] = nbIndGlob; 2552 // ++nbIndGlob; 2553 } 2554 } 2555 } 2556 2557 if (nbIndGlob==0) 2558 { 2559 i_index.resize(nbIndGlob); 2560 j_index.resize(nbIndGlob); 2561 } 2562 else 2563 { 2564 i_index.resizeAndPreserve(nbIndGlob); 2565 j_index.resizeAndPreserve(nbIndGlob); 2566 } 2567 } 2568 2569 /*! 2010 2570 Receive attributes event from clients(s) 2011 2571 \param[in] event event contain info about rank and associated attributes 2012 2572 */ 2013 void CDomain::recv ServerAttribut(CEventServer& event)2573 void CDomain::recvDistributionAttributes(CEventServer& event) 2014 2574 { 2015 2575 CBufferIn* buffer=event.subEvents.begin()->buffer; 2016 2576 string domainId ; 2017 2577 *buffer>>domainId ; 2018 get(domainId)->recv ServerAttribut(*buffer);2578 get(domainId)->recvDistributionAttributes(*buffer); 2019 2579 } 2020 2580 … … 2024 2584 \param[in] buffer message containing attributes info 2025 2585 */ 2026 void CDomain::recvServerAttribut(CBufferIn& buffer) 2027 { 2586 void CDomain::recvDistributionAttributes(CBufferIn& buffer) 2587 { 2588 int ni_tmp, ibegin_tmp, nj_tmp, jbegin_tmp; 2028 2589 int global_zoom_ni_tmp, global_zoom_ibegin_tmp, global_zoom_nj_tmp, global_zoom_jbegin_tmp; 2029 buffer >> ni_srv >> ibegin_srv >> iend_srv >> nj_srv >> jbegin_srv >> jend_srv2030 >> global_zoom_ni_tmp >> global_zoom_ibegin_tmp >> global_zoom_nj_tmp >> global_zoom_jbegin_tmp 2590 buffer >> isUnstructed_ >> ni_tmp >> ibegin_tmp >> nj_tmp >> jbegin_tmp 2591 >> global_zoom_ni_tmp >> global_zoom_ibegin_tmp >> global_zoom_nj_tmp >> global_zoom_jbegin_tmp 2031 2592 >> isCompressible_; 2593 ni.setValue(ni_tmp); 2594 ibegin.setValue(ibegin_tmp); 2595 nj.setValue(nj_tmp); 2596 jbegin.setValue(jbegin_tmp); 2032 2597 2033 2598 global_zoom_ni.setValue(global_zoom_ni_tmp); … … 2036 2601 global_zoom_jbegin.setValue(global_zoom_jbegin_tmp); 2037 2602 2038 int zoom_iend = global_zoom_ibegin + global_zoom_ni - 1; 2039 int zoom_jend = global_zoom_jbegin + global_zoom_nj - 1; 2040 2041 zoom_ibegin_srv = global_zoom_ibegin > ibegin_srv ? global_zoom_ibegin : ibegin_srv ; 2042 zoom_iend_srv = zoom_iend < iend_srv ? zoom_iend : iend_srv ; 2043 zoom_ni_srv=zoom_iend_srv-zoom_ibegin_srv+1 ; 2044 2045 zoom_jbegin_srv = global_zoom_jbegin > jbegin_srv ? global_zoom_jbegin : jbegin_srv ; 2046 zoom_jend_srv = zoom_jend < jend_srv ? zoom_jend : jend_srv ; 2047 zoom_nj_srv=zoom_jend_srv-zoom_jbegin_srv+1 ; 2048 2049 if (zoom_ni_srv<=0 || zoom_nj_srv<=0) 2603 int iend = ibegin + ni - 1; 2604 int jend = jbegin + nj - 1; 2605 int zoom_iend_glob = global_zoom_ibegin + global_zoom_ni - 1; 2606 int zoom_jend_glob = global_zoom_jbegin + global_zoom_nj - 1; 2607 2608 zoom_ibegin.setValue(global_zoom_ibegin > ibegin ? global_zoom_ibegin : ibegin); 2609 int zoom_iend = zoom_iend_glob < iend ? zoom_iend_glob : iend ; 2610 zoom_ni.setValue(zoom_iend-zoom_ibegin+1); 2611 2612 zoom_jbegin.setValue(global_zoom_jbegin > jbegin ? global_zoom_jbegin : jbegin); 2613 int zoom_jend = zoom_jend_glob < jend ? zoom_jend_glob : jend ; 2614 zoom_nj.setValue(zoom_jend-zoom_jbegin+1); 2615 2616 if (zoom_ni<=0 || zoom_nj<=0) 2050 2617 { 2051 zoom_ibegin_srv=0 ; zoom_iend_srv=0 ; zoom_ni_srv=0 ; 2052 zoom_jbegin_srv=0 ; zoom_jend_srv=0 ; zoom_nj_srv=0 ; 2053 } 2054 lonvalue_srv.resize(zoom_ni_srv*zoom_nj_srv) ; 2055 lonvalue_srv = 0. ; 2056 latvalue_srv.resize(zoom_ni_srv*zoom_nj_srv) ; 2057 latvalue_srv = 0. ; 2058 if (hasBounds) 2059 { 2060 bounds_lon_srv.resize(nvertex,zoom_ni_srv*zoom_nj_srv) ; 2061 bounds_lon_srv = 0. ; 2062 bounds_lat_srv.resize(nvertex,zoom_ni_srv*zoom_nj_srv) ; 2063 bounds_lat_srv = 0. ; 2064 } 2065 2066 if (hasArea) 2067 { 2068 area_srv.resize(zoom_ni_srv * zoom_nj_srv); 2069 area_srv = 0.; 2618 zoom_ni=0 ; zoom_ibegin=global_zoom_ibegin ; //=0; zoom_iend=0 ; 2619 zoom_nj=0 ; zoom_jbegin=global_zoom_jbegin ; //=0; zoom_jend=0 ; 2070 2620 } 2071 2621 … … 2073 2623 2074 2624 /*! 2075 Receive indexevent from clients(s)2076 \param[in] event event contain info about rank and associated index2625 Receive area event from clients(s) 2626 \param[in] event event contain info about rank and associated area 2077 2627 */ 2078 void CDomain::recvIndex(CEventServer& event) 2079 { 2080 CDomain* domain; 2628 void CDomain::recvMask(CEventServer& event) 2629 { 2630 string domainId; 2631 std::map<int, CBufferIn*> rankBuffers; 2081 2632 2082 2633 list<CEventServer::SSubEvent>::iterator it; 2083 2634 for (it = event.subEvents.begin(); it != event.subEvents.end(); ++it) 2635 { 2636 CBufferIn* buffer = it->buffer; 2637 *buffer >> domainId; 2638 rankBuffers[it->rank] = buffer; 2639 } 2640 get(domainId)->recvMask(rankBuffers); 2641 } 2642 2643 2644 /*! 2645 Receive mask information from client(s) 2646 \param[in] rankBuffers rank of sending client and the corresponding receive buffer 2647 */ 2648 void CDomain::recvMask(std::map<int, CBufferIn*>& rankBuffers) 2649 { 2650 int nbReceived = rankBuffers.size(), i, ind, index, lInd; 2651 if (nbReceived != recvClientRanks_.size()) 2652 ERROR("void CDomain::recvMask(std::map<int, CBufferIn*>& rankBuffers)", 2653 << "The number of sending clients is not correct." 2654 << "Expected number: " << recvClientRanks_.size() << " but received " << nbReceived); 2655 2656 vector<CArray<bool,1> > recvMaskValue(nbReceived); 2657 for (i = 0; i < recvClientRanks_.size(); ++i) 2084 2658 { 2085 CBufferIn* buffer = it->buffer; 2086 string domainId; 2087 *buffer >> domainId; 2088 domain = get(domainId); 2089 domain->recvIndex(it->rank, *buffer); 2090 } 2091 2092 if (domain->isCompressible_) 2659 int rank = recvClientRanks_[i]; 2660 CBufferIn& buffer = *(rankBuffers[rank]); 2661 buffer >> recvMaskValue[i]; 2662 } 2663 2664 int nbMaskInd = 0; 2665 for (i = 0; i < nbReceived; ++i) 2093 2666 { 2094 std::sort(domain->indexesToWrite.begin(), domain->indexesToWrite.end()); 2095 2096 CContextServer* server = CContext::getCurrent()->server; 2097 domain->numberWrittenIndexes_ = domain->indexesToWrite.size(); 2098 MPI_Allreduce(&domain->numberWrittenIndexes_, &domain->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 2099 MPI_Scan(&domain->numberWrittenIndexes_, &domain->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 2100 domain->offsetWrittenIndexes_ -= domain->numberWrittenIndexes_; 2101 } 2102 } 2103 2104 /*! 2105 Receive index information from client(s) 2106 \param[in] rank rank of client source 2107 \param[in] buffer message containing index info 2108 */ 2109 void CDomain::recvIndex(int rank, CBufferIn& buffer) 2110 { 2111 int type_int; 2112 buffer >> type_int >> isCurvilinear >> indiSrv[rank] >> indjSrv[rank]; 2113 type.setValue((type_attr::t_enum)type_int); // probleme des type enum avec les buffers : ToFix 2114 2115 if (isCompressible_) 2667 nbMaskInd += recvMaskValue[i].numElements(); 2668 } 2669 2670 if (nbMaskInd != globalLocalIndexMap_.size()) 2671 info (0) << "If the domain " << this->getDomainOutputName() <<" does not have overlapped region between processes." 2672 << "Something must be wrong with mask index "<< std::endl; 2673 2674 nbMaskInd = globalLocalIndexMap_.size(); 2675 mask_1d.resize(nbMaskInd); 2676 domainMask.resize(nbMaskInd); 2677 mask_1d = false; 2678 2679 for (i = 0; i < nbReceived; ++i) 2116 2680 { 2117 CArray<int, 1> writtenIndexes; 2118 buffer >> writtenIndexes; 2119 indexesToWrite.reserve(indexesToWrite.size() + writtenIndexes.numElements()); 2120 for (int i = 0; i < writtenIndexes.numElements(); ++i) 2121 indexesToWrite.push_back(writtenIndexes(i)); 2122 } 2681 CArray<int,1>& tmpInd = indGlob_[recvClientRanks_[i]]; 2682 CArray<bool,1>& tmp = recvMaskValue[i]; 2683 for (ind = 0; ind < tmp.numElements(); ++ind) 2684 { 2685 lInd = globalLocalIndexMap_[size_t(tmpInd(ind))]; 2686 if (!mask_1d(lInd)) // Only rewrite mask_1d if it's not true 2687 mask_1d(lInd) = tmp(ind); 2688 } 2689 } 2690 domainMask=mask_1d ; 2123 2691 } 2124 2692 … … 2129 2697 void CDomain::recvLon(CEventServer& event) 2130 2698 { 2699 string domainId; 2700 std::map<int, CBufferIn*> rankBuffers; 2701 2131 2702 list<CEventServer::SSubEvent>::iterator it; 2132 2703 for (it = event.subEvents.begin(); it != event.subEvents.end(); ++it) 2133 { 2704 { 2134 2705 CBufferIn* buffer = it->buffer; 2135 string domainId;2136 2706 *buffer >> domainId; 2137 get(domainId)->recvLon(it->rank, *buffer); 2138 } 2707 rankBuffers[it->rank] = buffer; 2708 } 2709 get(domainId)->recvLon(rankBuffers); 2139 2710 } 2140 2711 2141 2712 /*! 2142 2713 Receive longitude information from client(s) 2143 \param[in] rank rank of client source 2144 \param[in] buffer message containing longitude info 2714 \param[in] rankBuffers rank of sending client and the corresponding receive buffer 2145 2715 */ 2146 void CDomain::recvLon(int rank, CBufferIn& buffer) 2147 { 2148 CArray<int,1> &indi = indiSrv[rank], &indj = indjSrv[rank]; 2149 CArray<double,1> lon; 2150 CArray<double,2> boundslon; 2151 2152 buffer >> lon; 2153 2154 if (hasBounds) buffer >> boundslon; 2155 2156 int i, j, ind_srv; 2157 for (int ind = 0; ind < indi.numElements(); ind++) 2716 void CDomain::recvLon(std::map<int, CBufferIn*>& rankBuffers) 2717 { 2718 int nbReceived = rankBuffers.size(), i, ind, index, iindex, jindex, lInd; 2719 if (nbReceived != recvClientRanks_.size()) 2720 ERROR("void CDomain::recvLon(std::map<int, CBufferIn*>& rankBuffers)", 2721 << "The number of sending clients is not correct." 2722 << "Expected number: " << recvClientRanks_.size() << " but received " << nbReceived); 2723 2724 vector<CArray<double,1> > recvLonValue(nbReceived); 2725 vector<CArray<double,2> > recvBoundsLonValue(nbReceived); 2726 for (i = 0; i < recvClientRanks_.size(); ++i) 2158 2727 { 2159 i = indi(ind); j = indj(ind); 2160 ind_srv = (i - zoom_ibegin_srv) + (j - zoom_jbegin_srv) * zoom_ni_srv; 2161 lonvalue_srv(ind_srv) = lon(ind); 2728 int rank = recvClientRanks_[i]; 2729 CBufferIn& buffer = *(rankBuffers[rank]); 2730 buffer >> hasLonLat; 2731 if (hasLonLat) 2732 buffer >> recvLonValue[i]; 2733 buffer >> hasBounds; 2162 2734 if (hasBounds) 2163 { 2164 for (int nv = 0; nv < nvertex; ++nv) 2165 bounds_lon_srv(nv, ind_srv) = boundslon(nv, ind); 2166 } 2735 buffer >> recvBoundsLonValue[i]; 2736 } 2737 2738 if (hasLonLat) 2739 { 2740 int nbLonInd = 0; 2741 for (i = 0; i < nbReceived; ++i) 2742 { 2743 nbLonInd += recvLonValue[i].numElements(); 2744 } 2745 2746 if (nbLonInd != globalLocalIndexMap_.size()) 2747 info (0) << "If the domain " << this->getDomainOutputName() <<" does not have overlapped region between processes." 2748 << "Something must be wrong with longitude index "<< std::endl; 2749 2750 nbLonInd = globalLocalIndexMap_.size(); 2751 lonvalue.resize(nbLonInd); 2752 if (hasBounds) 2753 { 2754 bounds_lonvalue.resize(nvertex,nbLonInd); 2755 bounds_lonvalue = 0.; 2756 } 2757 2758 nbLonInd = 0; 2759 for (i = 0; i < nbReceived; ++i) 2760 { 2761 CArray<int,1>& tmpInd = indGlob_[recvClientRanks_[i]]; 2762 CArray<double,1>& tmp = recvLonValue[i]; 2763 for (ind = 0; ind < tmp.numElements(); ++ind) 2764 { 2765 lInd = globalLocalIndexMap_[size_t(tmpInd(ind))]; 2766 lonvalue(lInd) = tmp(ind); 2767 if (hasBounds) 2768 { 2769 for (int nv = 0; nv < nvertex; ++nv) 2770 bounds_lonvalue(nv, lInd) = recvBoundsLonValue[i](nv, ind); 2771 } 2772 } 2773 } 2167 2774 } 2168 2775 } … … 2174 2781 void CDomain::recvLat(CEventServer& event) 2175 2782 { 2783 string domainId; 2784 std::map<int, CBufferIn*> rankBuffers; 2785 2176 2786 list<CEventServer::SSubEvent>::iterator it; 2177 2787 for (it = event.subEvents.begin(); it != event.subEvents.end(); ++it) 2178 { 2788 { 2179 2789 CBufferIn* buffer = it->buffer; 2180 string domainId;2181 2790 *buffer >> domainId; 2182 get(domainId)->recvLat(it->rank, *buffer); 2183 } 2791 rankBuffers[it->rank] = buffer; 2792 } 2793 get(domainId)->recvLat(rankBuffers); 2184 2794 } 2185 2795 2186 2796 /*! 2187 2797 Receive latitude information from client(s) 2188 \param[in] rank rank of client source 2189 \param[in] buffer message containing latitude info 2798 \param[in] rankBuffers rank of sending client and the corresponding receive buffer 2190 2799 */ 2191 void CDomain::recvLat( int rank, CBufferIn& buffer)2192 { 2193 CArray<int,1> &indi = indiSrv[rank], &indj = indjSrv[rank];2194 CArray<double,1> lat;2195 CArray<double,2> boundslat;2196 2197 buffer >> lat;2198 if (hasBounds) buffer >> boundslat; 2199 2200 int i, j, ind_srv;2201 for (i nt ind = 0; ind < indi.numElements(); ind++)2800 void CDomain::recvLat(std::map<int, CBufferIn*>& rankBuffers) 2801 { 2802 int nbReceived = rankBuffers.size(), i, ind, index, iindex, jindex, lInd; 2803 if (nbReceived != recvClientRanks_.size()) 2804 ERROR("void CDomain::recvLat(std::map<int, CBufferIn*>& rankBuffers)", 2805 << "The number of sending clients is not correct." 2806 << "Expected number: " << recvClientRanks_.size() << " but received " << nbReceived); 2807 2808 vector<CArray<double,1> > recvLatValue(nbReceived); 2809 vector<CArray<double,2> > recvBoundsLatValue(nbReceived); 2810 for (i = 0; i < recvClientRanks_.size(); ++i) 2202 2811 { 2203 i = indi(ind); j = indj(ind); 2204 ind_srv = (i - zoom_ibegin_srv) + (j - zoom_jbegin_srv) * zoom_ni_srv; 2205 latvalue_srv(ind_srv) = lat(ind); 2812 int rank = recvClientRanks_[i]; 2813 CBufferIn& buffer = *(rankBuffers[rank]); 2814 buffer >> hasLonLat; 2815 if (hasLonLat) 2816 buffer >> recvLatValue[i]; 2817 buffer >> hasBounds; 2206 2818 if (hasBounds) 2207 { 2208 for (int nv = 0; nv < nvertex; nv++) 2209 bounds_lat_srv(nv, ind_srv) = boundslat(nv, ind); 2210 } 2819 buffer >> recvBoundsLatValue[i]; 2820 } 2821 2822 if (hasLonLat) 2823 { 2824 int nbLatInd = 0; 2825 for (i = 0; i < nbReceived; ++i) 2826 { 2827 nbLatInd += recvLatValue[i].numElements(); 2828 } 2829 2830 if (nbLatInd != globalLocalIndexMap_.size()) 2831 info (0) << "If the domain " << this->getDomainOutputName() <<" does not have overlapped region between processes." 2832 << "Something must be wrong with latitude index "<< std::endl; 2833 2834 nbLatInd = globalLocalIndexMap_.size(); 2835 latvalue.resize(nbLatInd); 2836 if (hasBounds) 2837 { 2838 bounds_latvalue.resize(nvertex,nbLatInd); 2839 bounds_latvalue = 0. ; 2840 } 2841 2842 nbLatInd = 0; 2843 for (i = 0; i < nbReceived; ++i) 2844 { 2845 CArray<int,1>& tmpInd = indGlob_[recvClientRanks_[i]]; 2846 CArray<double,1>& tmp = recvLatValue[i]; 2847 for (ind = 0; ind < tmp.numElements(); ++ind) 2848 { 2849 lInd = globalLocalIndexMap_[size_t(tmpInd(ind))]; 2850 latvalue(lInd) = tmp(ind); 2851 if (hasBounds) 2852 { 2853 CArray<double,2>& boundslat = recvBoundsLatValue[i]; 2854 for (int nv = 0; nv < nvertex; ++nv) 2855 bounds_latvalue(nv, lInd) = boundslat(nv, ind); 2856 } 2857 ++nbLatInd; 2858 } 2859 } 2211 2860 } 2212 2861 } … … 2218 2867 void CDomain::recvArea(CEventServer& event) 2219 2868 { 2869 string domainId; 2870 std::map<int, CBufferIn*> rankBuffers; 2871 2220 2872 list<CEventServer::SSubEvent>::iterator it; 2221 2873 for (it = event.subEvents.begin(); it != event.subEvents.end(); ++it) 2222 { 2874 { 2223 2875 CBufferIn* buffer = it->buffer; 2224 string domainId;2225 2876 *buffer >> domainId; 2226 get(domainId)->recvArea(it->rank, *buffer); 2227 } 2877 rankBuffers[it->rank] = buffer; 2878 } 2879 get(domainId)->recvArea(rankBuffers); 2228 2880 } 2229 2881 2230 2882 /*! 2231 2883 Receive area information from client(s) 2232 \param[in] rank rank of client source 2233 \param[in] buffer message containing area info 2884 \param[in] rankBuffers rank of sending client and the corresponding receive buffer 2234 2885 */ 2235 void CDomain::recvArea(int rank, CBufferIn& buffer) 2236 { 2237 CArray<int,1> &indi = indiSrv[rank], &indj = indjSrv[rank]; 2238 CArray<double,1> clientArea; 2239 2240 buffer >> clientArea; 2241 2242 int i, j, ind_srv; 2243 for (int ind = 0; ind < indi.numElements(); ind++) 2886 void CDomain::recvArea(std::map<int, CBufferIn*>& rankBuffers) 2887 { 2888 int nbReceived = rankBuffers.size(), i, ind, index, lInd; 2889 if (nbReceived != recvClientRanks_.size()) 2890 ERROR("void CDomain::recvArea(std::map<int, CBufferIn*>& rankBuffers)", 2891 << "The number of sending clients is not correct." 2892 << "Expected number: " << recvClientRanks_.size() << " but received " << nbReceived); 2893 2894 vector<CArray<double,1> > recvAreaValue(nbReceived); 2895 for (i = 0; i < recvClientRanks_.size(); ++i) 2244 2896 { 2245 i = indi(ind); j = indj(ind); 2246 ind_srv = (i - zoom_ibegin_srv) + (j - zoom_jbegin_srv) * zoom_ni_srv; 2247 area_srv(ind_srv) = clientArea(ind); 2897 int rank = recvClientRanks_[i]; 2898 CBufferIn& buffer = *(rankBuffers[rank]); 2899 buffer >> hasArea; 2900 if (hasArea) 2901 buffer >> recvAreaValue[i]; 2902 } 2903 2904 if (hasArea) 2905 { 2906 int nbAreaInd = 0; 2907 for (i = 0; i < nbReceived; ++i) 2908 { 2909 nbAreaInd += recvAreaValue[i].numElements(); 2910 } 2911 2912 if (nbAreaInd != globalLocalIndexMap_.size()) 2913 info (0) << "If the domain " << this->getDomainOutputName() <<" does not have overlapped region between processes." 2914 << "Something must be wrong with area index "<< std::endl; 2915 2916 nbAreaInd = globalLocalIndexMap_.size(); 2917 areavalue.resize(nbAreaInd); 2918 nbAreaInd = 0; 2919 for (i = 0; i < nbReceived; ++i) 2920 { 2921 CArray<int,1>& tmpInd = indGlob_[recvClientRanks_[i]]; 2922 CArray<double,1>& tmp = recvAreaValue[i]; 2923 for (ind = 0; ind < tmp.numElements(); ++ind) 2924 { 2925 lInd = globalLocalIndexMap_[size_t(tmpInd(ind))]; 2926 areavalue(lInd) = tmp(ind); 2927 } 2928 } 2929 2248 2930 } 2249 2931 } … … 2280 2962 } 2281 2963 2964 /*! 2965 Receive data index event from clients(s) 2966 \param[in] event event contain info about rank and associated index 2967 */ 2968 void CDomain::recvDataIndex(CEventServer& event) 2969 { 2970 string domainId; 2971 std::map<int, CBufferIn*> rankBuffers; 2972 2973 list<CEventServer::SSubEvent>::iterator it; 2974 for (it = event.subEvents.begin(); it != event.subEvents.end(); ++it) 2975 { 2976 CBufferIn* buffer = it->buffer; 2977 *buffer >> domainId; 2978 rankBuffers[it->rank] = buffer; 2979 } 2980 get(domainId)->recvDataIndex(rankBuffers); 2981 } 2982 2983 /*! 2984 Receive data index information from client(s) 2985 A client receives data index from different clients to rebuild its own data index. 2986 Because we use global index + mask info to calculate the sending data to client(s), 2987 this data index must be updated with mask info (maybe it will change in the future) 2988 Because the data index is local, to rebuild data index of received client, we should use global index along with. 2989 2990 \param[in] rankBuffers rank of sending client and the corresponding receive buffer 2991 */ 2992 void CDomain::recvDataIndex(std::map<int, CBufferIn*>& rankBuffers) 2993 { 2994 int nbReceived = rankBuffers.size(), i, ind, index, indexI, indexJ, type_int, lInd; 2995 if (nbReceived != recvClientRanks_.size()) 2996 ERROR("void CDomain::recvDataIndex(std::map<int, CBufferIn*>& rankBuffers)", 2997 << "The number of sending clients is not correct." 2998 << "Expected number: " << recvClientRanks_.size() << " but received " << nbReceived); 2999 3000 vector<CArray<int,1> > recvDataIIndex(nbReceived),recvDataJIndex(nbReceived); 3001 for (i = 0; i < recvClientRanks_.size(); ++i) 3002 { 3003 int rank = recvClientRanks_[i]; 3004 CBufferIn& buffer = *(rankBuffers[rank]); 3005 buffer >> recvDataIIndex[i]; 3006 buffer >> recvDataJIndex[i]; 3007 } 3008 3009 int nbIndex = i_index.numElements(); 3010 CArray<int,1> dataIIndex(nbIndex), dataJIndex(nbIndex); 3011 dataIIndex = -1; dataJIndex = -1; 3012 3013 nbIndex = 0; 3014 for (i = 0; i < nbReceived; ++i) 3015 { 3016 CArray<int,1>& tmpInd = indGlob_[recvClientRanks_[i]]; 3017 CArray<int,1>& tmpI = recvDataIIndex[i]; 3018 CArray<int,1>& tmpJ = recvDataJIndex[i]; 3019 if ((tmpI.numElements() != tmpInd.numElements()) || (tmpJ.numElements() != tmpInd.numElements())) 3020 ERROR("void CDomain::recvDataIndex(std::map<int, CBufferIn*>& rankBuffers)", 3021 << "The number of global received index is not coherent with the number of received data index." 3022 << "Expected number of global index: " << tmpI.numElements() << " but received " << tmpInd.numElements()); 3023 3024 for (ind = 0; ind < tmpI.numElements(); ++ind) 3025 { 3026 lInd = globalLocalIndexMap_[size_t(tmpInd(ind))]; 3027 dataIIndex(lInd) = (-1 == dataIIndex(lInd)) ? tmpI(ind) : dataIIndex(lInd); // Only fill in dataIndex if there is no data 3028 dataJIndex(lInd) = (-1 == dataJIndex(lInd)) ? tmpJ(ind) : dataJIndex(lInd); 3029 3030 if (!domainMask(lInd)) // Include mask info into data index on the RECEIVE getServerDimensionSizes 3031 { 3032 dataIIndex(lInd) = dataJIndex(lInd) = -1; 3033 } 3034 } 3035 } 3036 3037 int nbCompressedData = 0; 3038 for (ind = 0; ind < dataIIndex.numElements(); ++ind) 3039 { 3040 indexI = dataIIndex(ind); indexJ = dataJIndex(ind); 3041 if ((0 <= indexI) && (0 <= indexJ)) 3042 ++nbCompressedData; 3043 } 3044 3045 data_i_index.resize(nbCompressedData); 3046 data_j_index.resize(nbCompressedData); 3047 3048 nbCompressedData = 0; 3049 for (ind = 0; ind < dataIIndex.numElements(); ++ind) 3050 { 3051 indexI = dataIIndex(ind); indexJ = dataJIndex(ind); 3052 if ((0 <= indexI) && (0 <= indexJ)) 3053 { 3054 data_i_index(nbCompressedData) = (1 == data_dim) ? ind : ind % ni; 3055 data_j_index(nbCompressedData) = (1 == data_dim) ? 0 : ind / ni; 3056 ++nbCompressedData; 3057 } 3058 } 3059 3060 // Reset data_ibegin, data_jbegin 3061 data_ibegin.setValue(0); 3062 data_jbegin.setValue(0); 3063 } 3064 2282 3065 CTransformation<CDomain>* CDomain::addTransformation(ETranformationType transType, const StdString& id) 2283 3066 { … … 2342 3125 } 2343 3126 3127 void CDomain::setContextClient(CContextClient* contextClient) 3128 { 3129 if (clientsSet.find(contextClient)==clientsSet.end()) 3130 { 3131 clients.push_back(contextClient) ; 3132 clientsSet.insert(contextClient); 3133 } 3134 } 3135 2344 3136 /*! 2345 3137 Parse children nodes of a domain in xml file. -
XIOS/dev/branch_openmp/src/node/domain.hpp
r1331 r1460 16 16 #include "transformation.hpp" 17 17 #include "transformation_enum.hpp" 18 18 #include "server_distribution_description.hpp" 19 #include "mpi_std.hpp" 19 20 #include "mesh.hpp" 20 21 … … 42 43 , public CDomainAttributes 43 44 { 44 45 /// typedef /// 46 typedef CObjectTemplate<CDomain> SuperClass; 47 typedef CDomainAttributes SuperClassAttribute; 48 public: 45 49 enum EEventId 46 50 { 47 EVENT_ID_SERVER_ATTRIBUT, EVENT_ID_INDEX, EVENT_ID_LON, EVENT_ID_LAT, EVENT_ID_AREA 51 EVENT_ID_INDEX, EVENT_ID_LON, EVENT_ID_LAT, 52 EVENT_ID_AREA, EVENT_ID_MASK, 53 EVENT_ID_DATA_INDEX, EVENT_ID_SERVER_ATTRIBUT 48 54 } ; 49 50 /// typedef ///51 typedef CObjectTemplate<CDomain> SuperClass;52 typedef CDomainAttributes SuperClassAttribute;53 55 54 56 public: … … 71 73 virtual void parse(xml::CXMLNode & node); 72 74 75 void setContextClient(CContextClient* contextClient); 76 73 77 /// Vérifications /// 74 78 void checkAttributes(void); 75 76 79 void checkAttributesOnClient(); 77 80 void checkAttributesOnClientAfterTransformation(); 78 79 81 void checkEligibilityForCompressedOutput(void); 80 82 … … 92 94 bool IsWritten(const StdString & filename) const; 93 95 bool isWrittenCompressed(const StdString& filename) const; 94 95 const std::vector<int>& getIndexesToWrite(void) const;96 int get NumberWrittenIndexes() const;97 int get TotalNumberWrittenIndexes() const;98 int getOffsetWrittenIndexes() const;99 100 std::map<int, StdSize> getAttributesBufferSize( );96 97 int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 98 int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 99 int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 100 CArray<int,1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 101 102 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, bool bufferForWriting = false); 101 103 102 104 bool isEmpty(void) const; 103 105 bool isDistributed(void) const; 104 bool isCompressible(void) const; 105 bool distributionAttributesHaveValue() const; 106 107 int ni_srv,ibegin_srv,iend_srv ; 108 int zoom_ni_srv,zoom_ibegin_srv,zoom_iend_srv ; 109 110 int nj_srv,jbegin_srv,jend_srv ; 111 int zoom_nj_srv,zoom_jbegin_srv,zoom_jend_srv ; 112 113 CArray<double, 1> lonvalue_srv, latvalue_srv ; 114 CArray<double, 2> bounds_lon_srv, bounds_lat_srv ; 115 CArray<double, 1> lonvalue_client, latvalue_client; 116 CArray<double, 2> bounds_lon_client, bounds_lat_client; 117 CArray<double, 1> area_srv; 118 119 vector<int> connectedServer ; // list of connected server 120 vector<int> nbSenders ; // for each communication with a server, number of communicating client 121 vector<int> nbDataSrv ; // size of data to send to each server 122 vector< vector<int> > i_indSrv ; // for each server, i global index to send 123 vector< vector<int> > j_indSrv ; // for each server, j global index to send 124 106 bool isCompressible(void) const; 107 125 108 std::vector<int> getNbGlob(); 126 109 bool isEqual(CDomain* domain); 110 111 static bool dispatchEvent(CEventServer& event); 112 127 113 public: 128 114 /// Mutateur /// 129 115 void addRelFile(const StdString & filename); 130 void addRelFileCompressed(const StdString& filename); 131 void completeLonLatClient(void); 132 void sendServerAttribut(void) ; 133 void sendLonLatArea(void); 134 void computeConnectedServer(void) ; 135 void computeLocalMask(void) ; 136 116 void addRelFileCompressed(const StdString& filename); 117 118 void computeWrittenIndex(); 119 void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 120 137 121 void AllgatherRectilinearLonLat(CArray<double,1>& lon, CArray<double,1>& lat, 138 122 CArray<double,1>& lon_g, CArray<double,1>& lat_g); … … 142 126 143 127 void fillInLonLat(); 144 145 static bool dispatchEvent(CEventServer& event); 146 static void recvServerAttribut(CEventServer& event); 147 static void recvIndex(CEventServer& event); 148 static void recvLon(CEventServer& event); 149 static void recvLat(CEventServer& event); 150 static void recvArea(CEventServer& event); 151 void recvServerAttribut(CBufferIn& buffer); 152 void recvIndex(int rank, CBufferIn& buffer); 153 void recvLon(int rank, CBufferIn& buffer); 154 void recvLat(int rank, CBufferIn& buffer); 155 void recvArea(int rank, CBufferIn& buffer); 156 128 bool distributionAttributesHaveValue() const; 129 130 size_t getGlobalWrittenSize() ; 157 131 /// Destructeur /// 158 132 virtual ~CDomain(void); … … 162 136 static StdString GetDefName(void); 163 137 164 static ENodeType GetType(void); 165 const std::map<int, vector<size_t> >& getIndexServer() const; 166 CArray<bool, 1> localMask; 138 static ENodeType GetType(void); 139 140 public: 141 CArray<double, 1> lonvalue, latvalue; 142 CArray<double, 2> bounds_lonvalue, bounds_latvalue; 143 CArray<double, 1> areavalue; 144 145 CArray<size_t,1> localIndexToWriteOnServer; 146 147 CArray<bool, 1> domainMask; // mask_1d, mask_2d -> domainMask 148 CArray<bool, 1> localMask; // domainMask + indexing 167 149 bool isCurvilinear ; 168 150 bool hasBounds ; … … 171 153 bool hasPole ; 172 154 155 void computeLocalMask(void) ; 173 156 private: 174 157 void checkDomain(void); … … 183 166 void checkArea(void); 184 167 void checkLonLat(); 185 void checkZoom(void); 186 187 void setTransformations(const TransMapTypes&); 168 void checkZoom(void); 169 170 void setTransformations(const TransMapTypes&); 188 171 void computeNGlobDomain(); 189 172 void sendAttributes(); 190 173 void sendIndex(); 174 void sendDistributionAttributes(); 175 void sendMask(); 191 176 void sendArea(); 192 void sendLonLat(); 193 177 void sendLonLat(); 178 void sendDataIndex(); 179 void convertLonLatValue(); 194 180 void fillInRectilinearLonLat(); 195 181 void fillInCurvilinearLonLat(); 196 182 void fillInUnstructuredLonLat(); 183 184 static void recvDistributionAttributes(CEventServer& event); 185 static void recvIndex(CEventServer& event); 186 static void recvIndexZoom(CEventServer& event); 187 static void recvMask(CEventServer& event); 188 static void recvLon(CEventServer& event); 189 static void recvLat(CEventServer& event); 190 static void recvArea(CEventServer& event); 191 static void recvDataIndex(CEventServer& event); 192 void recvDistributionAttributes(CBufferIn& buffer); 193 void recvIndex(std::map<int, CBufferIn*>& rankBuffers); 194 void recvMask(std::map<int, CBufferIn*>& rankBuffers); 195 void recvLon(std::map<int, CBufferIn*>& rankBuffers); 196 void recvLat(std::map<int, CBufferIn*>& rankBuffers); 197 void recvArea(std::map<int, CBufferIn*>& rankBuffers); 198 void recvDataIndex(std::map<int, CBufferIn*>& rankBuffers); 199 200 void completeLonLatClient(void); 201 void computeConnectedClients(); 202 197 203 private: 198 bool isChecked; 204 205 /** Clients that have to send a domain. There can be multiple clients in case of secondary server, otherwise only one client. */ 206 std::list<CContextClient*> clients; 207 std::set<CContextClient*> clientsSet; 208 209 bool doZoomByIndex_; 210 bool isChecked, computedWrittenIndex_; 199 211 std::set<StdString> relFiles, relFilesCompressed; 200 212 bool isClientChecked; // Verify whether all attributes of domain on the client side are good 201 213 bool isClientAfterTransformationChecked; 202 std::map<int, CArray<int,1> > indiSrv, indjSrv; 203 std::map<int,int> nbConnectedClients_; // Mapping of number of communicating client to a server 204 std::map<int, vector<size_t> > indSrv_; // Global index of each client sent to server 205 std::map<int, vector<int> > indWrittenSrv_; // Global written index of each client sent to server 214 std::map<int, CArray<int,1> > indGlob_; 215 std::map<int, map<int,int> > nbSenders; // Mapping of number of communicating client to a server 216 217 /** Global index of each client sent to server: map<serverSize, map<serverRank, indexes>> */ 218 std::map<int, boost::unordered_map<int, vector<size_t> > > indSrv_; 219 // std::map<CContextClient*, std::map<int, vector<int> > > indWrittenSrv_; // Global written index of each client sent to server 206 220 std::vector<int> indexesToWrite; 207 int numberWrittenIndexes_, totalNumberWrittenIndexes_, offsetWrittenIndexes_; 208 std::vector<int> connectedServerRank_; 209 bool isDistributed_; 221 std::vector<int> recvClientRanks_; 222 std::map<int,int> numberWrittenIndexes_, totalNumberWrittenIndexes_, offsetWrittenIndexes_; 223 std::map<int, CArray<int, 1> > compressedIndexToWriteOnServer; 224 std::map<int, std::map<int,size_t> > connectedDataSize_; 225 std::map<int, std::vector<int> > connectedServerRank_; 226 210 227 //! True if and only if the data defined on the domain can be outputted in a compressed way 211 228 bool isCompressible_; … … 213 230 TransMapTypes transformationMap_; 214 231 bool isUnstructed_; 232 boost::unordered_map<size_t,size_t> globalLocalIndexMap_; 215 233 216 234 private: -
XIOS/dev/branch_openmp/src/node/field.cpp
r1342 r1460 23 23 #include "temporal_filter.hpp" 24 24 #include "spatial_transform_filter.hpp" 25 26 #include <stdio.h> 25 #include "file_server_writer_filter.hpp" 27 26 28 27 namespace xios{ 29 28 30 /// ////////////////////// Dfinitions ////////////////////// /// 31 32 CField* CField::my_getDirectFieldReference(void) const 33 { 34 // if (this->field_ref.isEmpty()) 35 // ERROR("C" #type "* C" #type "::getDirect" #type "Reference(void)", 36 // << "The " #name_ " with id = '" << getId() << "'" 37 // << " has no " #name_ "_ref."); 38 39 // if (!C##type::has(this->name_##_ref)) 40 // ERROR("C" #type "* C" #type "::getDirect" #type "Reference(void)", 41 // << this->name_##_ref 42 // << " refers to an unknown " #name_ " id."); 43 44 return CField::get(this->field_ref); 45 } 46 29 /// ////////////////////// Définitions ////////////////////// /// 47 30 48 31 CField::CField(void) … … 52 35 , nstep(0), nstepMax(0) 53 36 , hasOutputFile(false) 54 , domAxisScalarIds_(vector<StdString>(3,"")), areAllReferenceSolved(false), isReferenceSolved(false) 37 , domAxisScalarIds_(vector<StdString>(3,"")) 38 , areAllReferenceSolved(false), isReferenceSolved(false), isReferenceSolvedAndTransformed(false) 39 , isGridChecked(false) 55 40 , useCompressedOutput(false) 56 41 , hasTimeInstant(false) … … 58 43 , wasDataRequestedFromServer(false) 59 44 , wasDataAlreadyReceivedFromServer(false) 60 , isEOF(false) 45 , mustAutoTrigger(false) 46 , isEOF(false), nstepMaxRead(false) 61 47 { setVirtualVariableGroup(CVariableGroup::create(getId() + "_virtual_variable_group")); } 62 48 … … 67 53 , nstep(0), nstepMax(0) 68 54 , hasOutputFile(false) 69 , domAxisScalarIds_(vector<StdString>(3,"")), areAllReferenceSolved(false), isReferenceSolved(false) 55 , domAxisScalarIds_(vector<StdString>(3,"")) 56 , areAllReferenceSolved(false), isReferenceSolved(false), isReferenceSolvedAndTransformed(false) 57 , isGridChecked(false) 70 58 , useCompressedOutput(false) 71 59 , hasTimeInstant(false) … … 73 61 , wasDataRequestedFromServer(false) 74 62 , wasDataAlreadyReceivedFromServer(false) 75 , isEOF(false) 63 , mustAutoTrigger(false) 64 , isEOF(false), nstepMaxRead(false) 76 65 { setVirtualVariableGroup(CVariableGroup::create(getId() + "_virtual_variable_group")); } 77 66 … … 148 137 149 138 CContext* context = CContext::getCurrent(); 150 CContextClient* client = context->client; 139 CContextClient* client = (!context->hasServer) ? context->client : this->file->getContextClient(); 140 int receiverSize = client->serverSize; 151 141 152 142 CEventClient event(getType(), EVENT_ID_UPDATE_DATA); … … 156 146 list<CArray<double,1> > list_data; 157 147 158 if (!grid->doGridHaveDataDistributed( ))148 if (!grid->doGridHaveDataDistributed(client)) 159 149 { 160 150 if (client->isServerLeader()) 161 151 { 162 for (it = grid->storeIndex_toSrv .begin(); it != grid->storeIndex_toSrv.end(); it++)152 for (it = grid->storeIndex_toSrv[client].begin(); it != grid->storeIndex_toSrv[client].end(); it++) 163 153 { 164 154 int rank = it->first; … … 175 165 } 176 166 client->sendEvent(event); 177 }178 167 } 168 else client->sendEvent(event); 179 169 } 180 170 else 181 171 { 182 for (it = grid->storeIndex_toSrv .begin(); it != grid->storeIndex_toSrv.end(); it++)172 for (it = grid->storeIndex_toSrv[client].begin(); it != grid->storeIndex_toSrv[client].end(); it++) 183 173 { 184 174 int rank = it->first; … … 192 182 193 183 list_msg.back() << getId() << data_tmp; 194 event.push(rank, grid->nbSenders[r ank], list_msg.back());184 event.push(rank, grid->nbSenders[receiverSize][rank], list_msg.back()); 195 185 } 196 186 client->sendEvent(event); … … 202 192 void CField::recvUpdateData(CEventServer& event) 203 193 { 204 vector<int> ranks; 205 vector<CBufferIn*> buffers; 194 std::map<int,CBufferIn*> rankBuffers; 206 195 207 196 list<CEventServer::SSubEvent>::iterator it; … … 213 202 CBufferIn* buffer = it->buffer; 214 203 *buffer >> fieldId; 215 ranks.push_back(rank); 216 buffers.push_back(buffer); 217 } 218 get(fieldId)->recvUpdateData(ranks,buffers); 204 rankBuffers[rank] = buffer; 205 } 206 get(fieldId)->recvUpdateData(rankBuffers); 219 207 CTimer::get("Field : recv data").suspend(); 220 208 } 221 209 222 void CField::recvUpdateData( vector<int>& ranks, vector<CBufferIn*>& buffers)210 void CField::recvUpdateData(std::map<int,CBufferIn*>& rankBuffers) 223 211 { 224 if (data_srv.empty()) 225 { 226 for (map<int, CArray<size_t, 1> >::iterator it = grid->outIndexFromClient.begin(); it != grid->outIndexFromClient.end(); ++it) 212 CContext* context = CContext::getCurrent(); 213 214 size_t sizeData = 0; 215 if (0 == recvDataSrv.numElements()) 216 { 217 CArray<int,1>& storeClient = grid->storeIndex_client; 218 219 // Gather all data from different clients 220 recvDataSrv.resize(storeClient.numElements()); 221 recvFoperationSrv = boost::shared_ptr<func::CFunctor>(new func::CInstant(recvDataSrv)); 222 } 223 224 CArray<double,1> recv_data_tmp(recvDataSrv.numElements()); 225 const CDate& currDate = context->getCalendar()->getCurrentDate(); 226 const CDate opeDate = (last_operation_srv + context->getCalendar()->getTimeStep()) +freq_op + freq_operation_srv - freq_op - context->getCalendar()->getTimeStep(); 227 228 if (opeDate <= currDate) 229 { 230 for (map<int, CArray<size_t, 1> >::iterator it = grid->outLocalIndexStoreOnClient.begin(); it != grid->outLocalIndexStoreOnClient.end(); ++it) 227 231 { 228 int rank = it->first; 229 data_srv.insert(std::make_pair(rank, CArray<double,1>(it->second.numElements()))); 230 foperation_srv.insert(pair<int,boost::shared_ptr<func::CFunctor> >(rank,boost::shared_ptr<func::CFunctor>(new func::CInstant(data_srv[rank])))); 231 } 232 } 233 232 CArray<double,1> tmp; 233 CArray<size_t,1>& indexTmp = it->second; 234 *(rankBuffers[it->first]) >> tmp; 235 for (int idx = 0; idx < indexTmp.numElements(); ++idx) 236 { 237 recv_data_tmp(indexTmp(idx)) = tmp(idx); 238 } 239 } 240 } 241 242 this->setData(recv_data_tmp); 243 // delete incomming flux for server only 244 recvFoperationSrv.reset() ; 245 recvDataSrv.reset() ; 246 } 247 248 void CField::writeUpdateData(const CArray<double,1>& data) 249 { 234 250 CContext* context = CContext::getCurrent(); 251 235 252 const CDate& currDate = context->getCalendar()->getCurrentDate(); 236 const CDate opeDate = last_operation_srv +freq_op + freq_operation_srv - freq_op;237 const CDate writeDate = last_Write_srv+ freq_write_srv;253 const CDate opeDate = (last_operation_srv + context->getCalendar()->getTimeStep()) + freq_op + freq_operation_srv - freq_op - context->getCalendar()->getTimeStep(); 254 const CDate writeDate = last_Write_srv + freq_write_srv; 238 255 239 256 if (opeDate <= currDate) 240 257 { 241 for (int n = 0; n < ranks.size(); n++) 242 { 243 CArray<double,1> data_tmp; 244 *buffers[n] >> data_tmp; 245 (*foperation_srv[ranks[n]])(data_tmp); 246 } 258 (*recvFoperationSrv)(data); 247 259 last_operation_srv = currDate; 248 260 } … … 250 262 if (writeDate < (currDate + freq_operation_srv)) 251 263 { 252 for (int n = 0; n < ranks.size(); n++) 253 { 254 this->foperation_srv[ranks[n]]->final(); 255 } 256 264 recvFoperationSrv->final(); 257 265 last_Write_srv = writeDate; 266 grid->computeWrittenIndex(); 258 267 writeField(); 259 268 lastlast_Write_srv = last_Write_srv; … … 263 272 void CField::writeField(void) 264 273 { 265 if (!getRelFile()-> allDomainEmpty)274 if (!getRelFile()->isEmptyZone()) 266 275 { 267 276 if (grid->doGridHaveDataToWrite() || getRelFile()->type == CFile::type_attr::one_file) 268 277 { 269 getRelFile()->check File();278 getRelFile()->checkWriteFile(); 270 279 this->incrementNStep(); 271 280 getRelFile()->getDataOutput()->writeFieldData(CField::get(this)); … … 274 283 } 275 284 285 /* 286 Send a request for reading data. 287 Client sends a request to server for demanding server to read data and send back to it. 288 For now, this function is called only by client 289 In the future, it can be called by level-1 servers 290 \param [in] tsDataRequested timestamp when the call is made 291 */ 276 292 bool CField::sendReadDataRequest(const CDate& tsDataRequested) 277 293 { 278 294 CContext* context = CContext::getCurrent(); 279 CContextClient* client = context->client; 295 // CContextClient* client = context->client; 296 297 // This code is for future: If we want to read file with level-2 servers 298 CContextClient* client = (!context->hasServer) ? context->client : this->file->getContextClient(); 280 299 281 300 lastDataRequestedFromServer = tsDataRequested; … … 323 342 info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 324 343 } 344 325 345 dataRequested |= sendReadDataRequest(lastDataRequestedFromServer + file->output_freq); 326 346 } … … 337 357 } 338 358 359 /*! 360 Receive data request sent from client and process it 361 Every time server receives this request, it will try to read data and sent read data back to client 362 At the moment, this function is called by server level 1 363 In the future, this should (only) be done by the last level servers. 364 */ 339 365 void CField::recvReadDataRequest(void) 340 366 { … … 345 371 std::list<CMessage> msgs; 346 372 347 boolhasData = readField();373 EReadField hasData = readField(); 348 374 349 375 map<int, CArray<double,1> >::iterator it; 350 if (!grid->doGridHaveDataDistributed( ))376 if (!grid->doGridHaveDataDistributed(client)) 351 377 { 352 378 if (client->isServerLeader()) 353 379 { 354 if (!data_srv.empty()) 355 { 356 it = data_srv.begin(); 380 if (0 != recvDataSrv.numElements()) 381 { 357 382 const std::list<int>& ranks = client->getRanksServerLeader(); 358 383 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) … … 361 386 CMessage& msg = msgs.back(); 362 387 msg << getId(); 363 if (hasData) 364 msg << getNStep() - 1 << it->second; 365 else 366 msg << int(-1); 388 switch (hasData) 389 { 390 case RF_DATA: 391 msg << getNStep() - 1 << recvDataSrv; 392 break; 393 case RF_NODATA: 394 msg << int(-2) << recvDataSrv; 395 break; 396 case RF_EOF: 397 default: 398 msg << int(-1); 399 break; 400 } 401 367 402 event.push(*itRank, 1, msg); 368 403 } 369 404 } 370 405 client->sendEvent(event); 371 } 372 else 373 { 374 // if (!data_srv.empty()) 375 // { 376 // it = data_srv.begin(); 377 // const std::list<int>& ranks = client->getRanksServerNotLeader(); 378 // for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 379 // { 380 // msgs.push_back(CMessage()); 381 // CMessage& msg = msgs.back(); 382 // msg << getId(); 383 // if (hasData) 384 // msg << getNStep() - 1 << it->second; 385 // else 386 // msg << int(-1); 387 // event.push(*itRank, 1, msg); 388 // } 389 // } 406 } 407 else 408 { 390 409 client->sendEvent(event); 391 410 } … … 393 412 else 394 413 { 395 for (it = data_srv.begin(); it != data_srv.end(); it++) 414 for (map<int, CArray<size_t, 1> >::iterator it = grid->outLocalIndexStoreOnClient.begin(); 415 it != grid->outLocalIndexStoreOnClient.end(); ++it) 396 416 { 417 CArray<size_t,1>& indexTmp = it->second; 418 CArray<double,1> tmp(indexTmp.numElements()); 419 for (int idx = 0; idx < indexTmp.numElements(); ++idx) 420 { 421 tmp(idx) = recvDataSrv(indexTmp(idx)); 422 } 423 397 424 msgs.push_back(CMessage()); 398 425 CMessage& msg = msgs.back(); 399 426 msg << getId(); 400 if (hasData) 401 msg << getNStep() - 1 << it->second; 402 else 403 msg << int(-1); 404 event.push(it->first, grid->nbSenders[it->first], msg); 427 switch (hasData) 428 { 429 case RF_DATA: 430 msg << getNStep() - 1 << tmp; 431 break; 432 case RF_NODATA: 433 msg << int(-2) << tmp; 434 break; 435 case RF_EOF: 436 default: 437 msg << int(-1); 438 break; 439 } 440 441 event.push(it->first, grid->nbReadSenders[client][it->first], msg); 405 442 } 406 443 client->sendEvent(event); … … 408 445 } 409 446 410 bool CField::readField(void) 447 /*! 448 Read field from a file. 449 A field is read with the distribution of data on the server side 450 \return State of field can be read from a file 451 */ 452 CField::EReadField CField::readField(void) 411 453 { 412 if (!getRelFile()->allDomainEmpty) 413 { 414 if (grid->doGridHaveDataToWrite() || getRelFile()->type == CFile::type_attr::one_file) 454 CContext* context = CContext::getCurrent(); 455 grid->computeWrittenIndex(); 456 getRelFile()->initRead(); 457 EReadField readState = RF_DATA; 458 459 if (!getRelFile()->isEmptyZone()) 460 { 461 if (grid->doGridHaveDataToWrite() || getRelFile()->type == CFile::type_attr::one_file) 415 462 { 416 if ( data_srv.empty())417 { 418 for (map<int, CArray<size_t, 1> >::iterator it = grid->outIndexFromClient.begin(); it != grid->outIndexFromClient.end(); ++it)419 data_srv.insert(std::make_pair(it->first, CArray<double,1>(it->second.numElements())));463 if (0 == recvDataSrv.numElements()) 464 { 465 CArray<int,1>& storeClient = grid->storeIndex_client; 466 recvDataSrv.resize(storeClient.numElements()); 420 467 } 421 422 getRelFile()->checkFile(); 468 469 getRelFile()->checkReadFile(); 470 423 471 if (!nstepMax) 424 472 { … … 429 477 430 478 if (getNStep() > nstepMax && (getRelFile()->cyclic.isEmpty() || !getRelFile()->cyclic) ) 431 return false; 432 433 getRelFile()->getDataInput()->readFieldData(CField::get(this)); 434 } 435 } 436 437 return true; 479 readState = RF_EOF; 480 481 if (RF_EOF != readState) 482 getRelFile()->getDataInput()->readFieldData(CField::get(this)); 483 } 484 } 485 else 486 { 487 this->incrementNStep(); 488 if (getNStep() > nstepMax && (getRelFile()->cyclic.isEmpty() || !getRelFile()->cyclic) ) 489 readState = RF_EOF; 490 else 491 readState = RF_NODATA; 492 493 if (!nstepMaxRead) // This can be a bug if we try to read field from zero time record 494 readState = RF_NODATA; 495 } 496 497 if (!nstepMaxRead) 498 { 499 //MPI_Allreduce(MPI_IN_PLACE, &nstepMax, 1, MPI_INT, MPI_MAX, context->server->intraComm); 500 MPI_Allreduce(&nstepMax, &nstepMax, 1, MPI_INT, MPI_MAX, context->server->intraComm); 501 nstepMaxRead = true; 502 } 503 504 return readState; 438 505 } 439 506 507 /* 508 Receive read data from server. 509 At the moment, this function is called in the client side. 510 In the future, this function can be called hiearachically (server n-1, server n -2, ..., client) 511 \param event event containing read data 512 */ 440 513 void CField::recvReadDataReady(CEventServer& event) 441 514 { … … 455 528 } 456 529 530 /*! 531 Receive read data from server 532 \param [in] ranks Ranks of sending processes 533 \param [in] buffers buffers containing read data 534 */ 457 535 void CField::recvReadDataReady(vector<int> ranks, vector<CBufferIn*> buffers) 458 536 { … … 499 577 500 578 // Check if data previously requested has been received as expected 501 if (wasDataRequestedFromServer && (!isEOF || currentDate <= dateEOF))579 if (wasDataRequestedFromServer && !isEOF) 502 580 { 503 581 CTimer timer("CField::checkForLateDataFromServer"); … … 507 585 { 508 586 const CDate nextDataDue = wasDataAlreadyReceivedFromServer ? (lastDataReceivedFromServer + file->output_freq) : context->getCalendar()->getInitDate(); 509 isDataLate = nextDataDue < currentDate;587 isDataLate = (nextDataDue <= currentDate); 510 588 511 589 if (isDataLate) … … 526 604 } 527 605 606 void CField::checkIfMustAutoTrigger(void) 607 { 608 mustAutoTrigger = serverSourceFilter ? serverSourceFilter->mustAutoTrigger() : false; 609 } 610 611 void CField::autoTriggerIfNeeded(void) 612 { 613 if (mustAutoTrigger) 614 serverSourceFilter->trigger(CContext::getCurrent()->getCalendar()->getCurrentDate()); 615 } 616 528 617 //---------------------------------------------------------------- 529 618 … … 579 668 { 580 669 this->nstepMax = 0; 670 nstepMaxRead = false; 581 671 } 582 672 … … 629 719 //---------------------------------------------------------------- 630 720 631 void CField::solveOnlyReferenceEnabledField(bool doSending2Server)632 {633 CContext* context = CContext::getCurrent();634 if (!isReferenceSolved)635 {636 isReferenceSolved = true;637 638 if (context->hasClient)639 {640 solveRefInheritance(true);641 if (hasDirectFieldReference()) getDirectFieldReference()->solveOnlyReferenceEnabledField(false);642 }643 else if (context->hasServer)644 solveServerOperation();645 646 solveGridReference();647 648 if (context->hasClient)649 {650 solveGenerateGrid();651 buildGridTransformationGraph();652 }653 }654 }655 656 721 /*! 657 722 Build up graph of grids which plays role of destination and source in grid transformation … … 661 726 { 662 727 CContext* context = CContext::getCurrent(); 663 if (context->hasClient )728 if (context->hasClient && !context->hasServer) 664 729 { 665 730 if (grid && !grid->isTransformed() && hasDirectFieldReference() && grid != getDirectFieldReference()->grid) … … 676 741 { 677 742 CContext* context = CContext::getCurrent(); 678 if (context->hasClient )743 if (context->hasClient && !context->hasServer) 679 744 { 680 745 std::map<CGrid*,std::pair<bool,StdString> >& gridSrcMap = grid->getTransGridSource(); … … 751 816 } 752 817 } 753 818 819 /*! 820 Solve reference of all enabled fields even the source fields . 821 In this step, we do transformations. 822 */ 823 void CField::solveAllEnabledFieldsAndTransform() 824 { 825 CContext* context = CContext::getCurrent(); 826 bool hasClient = context->hasClient; 827 bool hasServer = context->hasServer; 828 829 if (!isReferenceSolvedAndTransformed) 830 { 831 isReferenceSolvedAndTransformed = true; 832 833 if (hasClient && !hasServer) 834 { 835 solveRefInheritance(true); 836 if (hasDirectFieldReference()) getDirectFieldReference()->solveAllEnabledFieldsAndTransform(); 837 } 838 839 if (hasServer) 840 solveServerOperation(); 841 842 solveGridReference(); 843 844 if (hasClient && !hasServer) 845 { 846 solveGenerateGrid(); 847 buildGridTransformationGraph(); 848 } 849 850 solveGridDomainAxisRef(false); 851 852 if (hasClient && !hasServer) 853 { 854 solveTransformedGrid(); 855 } 856 857 solveGridDomainAxisRef(false); 858 } 859 } 860 861 void CField::checkGridOfEnabledFields() 862 { 863 if (!isGridChecked) 864 { 865 isGridChecked = true; 866 solveCheckMaskIndex(false); 867 } 868 } 869 870 void CField::sendGridComponentOfEnabledFields() 871 { 872 solveGridDomainAxisRef(true); 873 // solveCheckMaskIndex(true); 874 } 875 876 void CField::sendGridOfEnabledFields() 877 { 878 // solveGridDomainAxisRef(true); 879 solveCheckMaskIndex(true); 880 } 881 882 void CField::solveOnlyReferenceEnabledField(bool doSending2Server) 883 { 884 CContext* context = CContext::getCurrent(); 885 if (!isReferenceSolved) 886 { 887 isReferenceSolved = true; 888 889 if (context->hasClient && !context->hasServer) 890 { 891 solveRefInheritance(true); 892 if (hasDirectFieldReference()) getDirectFieldReference()->solveOnlyReferenceEnabledField(false); 893 } 894 895 if (context->hasServer) 896 solveServerOperation(); 897 898 solveGridReference(); 899 grid->solveDomainAxisRefInheritance(true); // make it again to solve grid reading from file 900 901 if (context->hasClient && !context->hasServer) 902 { 903 solveGenerateGrid(); 904 buildGridTransformationGraph(); 905 } 906 } 907 } 908 754 909 void CField::solveAllReferenceEnabledField(bool doSending2Server) 755 910 { … … 757 912 solveOnlyReferenceEnabledField(doSending2Server); 758 913 759 //std::cout<<"Field "<<this->getId()<<" areAllReferenceSolved = "<<areAllReferenceSolved<<std::endl;760 761 914 if (!areAllReferenceSolved) 762 915 { 763 916 areAllReferenceSolved = true; 764 //std::cout<<"Field "<<this->getId()<<" all reference solved"<<std::endl; 765 if (context->hasClient )917 918 if (context->hasClient && !context->hasServer) 766 919 { 767 920 solveRefInheritance(true); … … 776 929 solveGridDomainAxisRef(doSending2Server); 777 930 778 if (context->hasClient )931 if (context->hasClient && !context->hasServer) 779 932 { 780 933 solveTransformedGrid(); … … 784 937 } 785 938 786 std::map<int, StdSize> CField::getGridAttributesBufferSize() 787 { 788 return grid->getAttributesBufferSize(); 789 } 790 791 std::map<int, StdSize> CField::getGridDataBufferSize() 792 { 793 return grid->getDataBufferSize(getId()); 939 std::map<int, StdSize> CField::getGridAttributesBufferSize(CContextClient* client, bool bufferForWriting /*= "false"*/) 940 { 941 return grid->getAttributesBufferSize(client, bufferForWriting); 942 } 943 944 std::map<int, StdSize> CField::getGridDataBufferSize(CContextClient* client, bool bufferForWriting /*= "false"*/) 945 { 946 return grid->getDataBufferSize(client, getId(), bufferForWriting); 947 } 948 949 size_t CField::getGlobalWrittenSize() 950 { 951 return grid->getGlobalWrittenSize(); 794 952 } 795 953 … … 851 1009 */ 852 1010 void CField::buildFilterGraph(CGarbageCollector& gc, bool enableOutput) 853 { 854 if (!areAllReferenceSolved) solveAllReferenceEnabledField(false); 1011 { 1012 if (!isReferenceSolvedAndTransformed) solveAllEnabledFieldsAndTransform(); 1013 if (!isGridChecked) checkGridOfEnabledFields(); 855 1014 856 1015 const bool detectMissingValues = (!detect_missing_value.isEmpty() && !default_value.isEmpty() && detect_missing_value == true); 857 1016 const double defaultValue = detectMissingValues ? default_value : (!default_value.isEmpty() ? default_value : 0.0); 858 1017 859 // Start by building a filter which can provide the field's instant data 860 if (!instantDataFilter) 861 { 862 // Check if we have an expression to parse 863 if (hasExpression()) 864 { 865 boost::scoped_ptr<IFilterExprNode> expr(parseExpr(getExpression() + '\0')); 866 boost::shared_ptr<COutputPin> filter = expr->reduce(gc, *this); 867 868 // Check if a spatial transformation is needed 869 if (!field_ref.isEmpty()) 1018 CContext* context = CContext::getCurrent(); 1019 bool hasWriterServer = context->hasServer && !context->hasClient; 1020 bool hasIntermediateServer = context->hasServer && context->hasClient; 1021 1022 if (hasWriterServer) 1023 { 1024 if (!instantDataFilter) 1025 instantDataFilter = clientSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid,true)); 1026 1027 1028 // If the field data is to be read by the client or/and written to a file 1029 if (enableOutput && !storeFilter && !fileWriterFilter) 1030 { 1031 if (file && (file->mode.isEmpty() || file->mode == CFile::mode_attr::write)) 870 1032 { 871 CGrid* gridRef = CField::get(field_ref)->grid; 872 873 if (grid && grid != gridRef && grid->hasTransform()) 1033 fileServerWriterFilter = boost::shared_ptr<CFileServerWriterFilter>(new CFileServerWriterFilter(gc, this)); 1034 instantDataFilter->connectOutput(fileServerWriterFilter, 0); 1035 } 1036 } 1037 } 1038 else if (hasIntermediateServer) 1039 { 1040 if (!instantDataFilter) 1041 instantDataFilter = clientSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, true)); 1042 1043 // If the field data is to be read by the client or/and written to a file 1044 if (enableOutput && !storeFilter && !fileWriterFilter) 1045 { 1046 if (file && (file->mode.isEmpty() || file->mode == CFile::mode_attr::write)) 1047 { 1048 fileWriterFilter = boost::shared_ptr<CFileWriterFilter>(new CFileWriterFilter(gc, this)); 1049 instantDataFilter->connectOutput(fileWriterFilter, 0); 1050 } 1051 } 1052 } 1053 else 1054 { 1055 // Start by building a filter which can provide the field's instant data 1056 if (!instantDataFilter) 1057 { 1058 // Check if we have an expression to parse 1059 if (hasExpression()) 1060 { 1061 boost::scoped_ptr<IFilterExprNode> expr(parseExpr(getExpression() + '\0')); 1062 boost::shared_ptr<COutputPin> filter = expr->reduce(gc, *this); 1063 1064 // Check if a spatial transformation is needed 1065 if (!field_ref.isEmpty()) 874 1066 { 875 std::pair<boost::shared_ptr<CFilter>, boost::shared_ptr<CFilter> > filters = CSpatialTransformFilter::buildFilterGraph(gc, gridRef, grid, detectMissingValues, defaultValue); 876 877 filter->connectOutput(filters.first, 0); 878 filter = filters.second; 1067 CGrid* gridRef = CField::get(field_ref)->grid; 1068 1069 if (grid && grid != gridRef && grid->hasTransform()) 1070 { 1071 std::pair<boost::shared_ptr<CFilter>, boost::shared_ptr<CFilter> > filters = CSpatialTransformFilter::buildFilterGraph(gc, gridRef, grid, detectMissingValues, defaultValue); 1072 1073 filter->connectOutput(filters.first, 0); 1074 filter = filters.second; 1075 } 879 1076 } 1077 1078 instantDataFilter = filter; 880 1079 } 881 882 instantDataFilter = filter; 883 } 884 // Check if we have a reference on another field 885 else if (!field_ref.isEmpty()) 886 instantDataFilter = getFieldReference(gc); 887 // Check if the data is to be read from a file 888 else if (file && !file->mode.isEmpty() && file->mode == CFile::mode_attr::read) 889 { 890 checkAttributes(); 891 instantDataFilter = serverSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, freq_offset, true, 892 detectMissingValues, defaultValue)); 893 } 894 else // The data might be passed from the model 895 { 896 if (check_if_active.isEmpty()) check_if_active = false; 897 instantDataFilter = clientSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, NoneDu, false, 898 detectMissingValues, defaultValue)); 899 } 900 } 901 902 // If the field data is to be read by the client or/and written to a file 903 if (enableOutput && !storeFilter && !fileWriterFilter) 904 { 905 if (!read_access.isEmpty() && read_access) 906 { 907 storeFilter = boost::shared_ptr<CStoreFilter>(new CStoreFilter(gc, CContext::getCurrent(), grid, 908 detectMissingValues, defaultValue)); 909 instantDataFilter->connectOutput(storeFilter, 0); 910 } 911 912 if (file && (file->mode.isEmpty() || file->mode == CFile::mode_attr::write)) 913 { 914 fileWriterFilter = boost::shared_ptr<CFileWriterFilter>(new CFileWriterFilter(gc, this)); 915 getTemporalDataFilter(gc, file->output_freq)->connectOutput(fileWriterFilter, 0); 1080 // Check if we have a reference on another field 1081 else if (!field_ref.isEmpty()) 1082 instantDataFilter = getFieldReference(gc); 1083 // Check if the data is to be read from a file 1084 else if (file && !file->mode.isEmpty() && file->mode == CFile::mode_attr::read) 1085 { 1086 checkTimeAttributes(); 1087 instantDataFilter = serverSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, true, freq_offset, true, 1088 detectMissingValues, defaultValue)); 1089 } 1090 else // The data might be passed from the model 1091 { 1092 if (check_if_active.isEmpty()) check_if_active = false; 1093 instantDataFilter = clientSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, false, NoneDu, false, 1094 detectMissingValues, defaultValue)); 1095 } 1096 } 1097 1098 // If the field data is to be read by the client or/and written to a file 1099 if (enableOutput && !storeFilter && !fileWriterFilter) 1100 { 1101 if (!read_access.isEmpty() && read_access) 1102 { 1103 storeFilter = boost::shared_ptr<CStoreFilter>(new CStoreFilter(gc, CContext::getCurrent(), grid, 1104 detectMissingValues, defaultValue)); 1105 instantDataFilter->connectOutput(storeFilter, 0); 1106 } 1107 1108 if (file && (file->mode.isEmpty() || file->mode == CFile::mode_attr::write)) 1109 { 1110 fileWriterFilter = boost::shared_ptr<CFileWriterFilter>(new CFileWriterFilter(gc, this)); 1111 getTemporalDataFilter(gc, file->output_freq)->connectOutput(fileWriterFilter, 0); 1112 } 916 1113 } 917 1114 } … … 974 1171 if (!serverSourceFilter) 975 1172 { 976 check Attributes();977 serverSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, freq_offset, true,1173 checkTimeAttributes(); 1174 serverSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, true, freq_offset, true, 978 1175 detectMissingValues, defaultValue)); 979 1176 } … … 984 1181 { 985 1182 CField* fieldRef = CField::get(field_ref); 986 fieldRef->buildFilterGraph(gc, false); 1183 fieldRef->buildFilterGraph(gc, false); 987 1184 selfReferenceFilter = fieldRef->getInstantDataFilter(); 988 1185 } … … 992 1189 { 993 1190 if (check_if_active.isEmpty()) check_if_active = false; 994 clientSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, NoneDu, false,1191 clientSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, true, NoneDu, false, 995 1192 detectMissingValues, defaultValue)); 996 1193 } … … 1022 1219 << "An operation must be defined for field \"" << getId() << "\"."); 1023 1220 1024 check Attributes();1025 1026 const bool detectMissingValues = (!detect_missing_value.isEmpty() && !default_value.isEmpty()&& detect_missing_value == true);1221 checkTimeAttributes(&outFreq); 1222 1223 const bool detectMissingValues = (!detect_missing_value.isEmpty() && detect_missing_value == true); 1027 1224 boost::shared_ptr<CTemporalFilter> temporalFilter(new CTemporalFilter(gc, operation, 1028 1225 CContext::getCurrent()->getCalendar()->getInitDate(), 1029 freq_op, freq_offset, outFreq, 1030 detectMissingValues, detectMissingValues ? default_value : 0.0)); 1226 freq_op, freq_offset, outFreq, detectMissingValues)); 1227 1031 1228 instantDataFilter->connectOutput(temporalFilter, 0); 1032 1229 … … 1060 1257 << "An operation must be defined for field \"" << getId() << "\"."); 1061 1258 1062 check Attributes();1063 1064 const bool detectMissingValues = (!detect_missing_value.isEmpty() && !default_value.isEmpty() &&detect_missing_value == true);1259 checkTimeAttributes(&outFreq); 1260 1261 const bool detectMissingValues = (!detect_missing_value.isEmpty() && detect_missing_value == true); 1065 1262 boost::shared_ptr<CTemporalFilter> temporalFilter(new CTemporalFilter(gc, operation, 1066 1263 CContext::getCurrent()->getCalendar()->getInitDate(), 1067 freq_op, freq_offset, outFreq, 1068 detectMissingValues, detectMissingValues ? default_value : 0.0)); 1264 freq_op, freq_offset, outFreq, detectMissingValues)); 1265 1069 1266 selfReferenceFilter->connectOutput(temporalFilter, 0); 1070 1267 return temporalFilter ; … … 1231 1428 { 1232 1429 // Temporarily deactivate the self-transformation of grid 1233 // grid->transformGrid(grid);1430 // grid->transformGrid(grid); 1234 1431 } 1235 1432 } … … 1278 1475 void CField::scaleFactorAddOffset(double scaleFactor, double addOffset) 1279 1476 { 1280 map<int, CArray<double,1> >::iterator it; 1281 for (it = data_srv.begin(); it != data_srv.end(); it++) it->second = (it->second - addOffset) / scaleFactor; 1477 recvDataSrv = (recvDataSrv - addOffset) / scaleFactor; 1282 1478 } 1283 1479 1284 1480 void CField::invertScaleFactorAddOffset(double scaleFactor, double addOffset) 1285 1481 { 1286 map<int, CArray<double,1> >::iterator it;1287 for (it = data_srv.begin(); it != data_srv.end(); it++) it->second = it->second * scaleFactor + addOffset;1288 } 1289 1290 void CField::outputField(CArray<double,3>& fieldOut)1291 {1292 map<int, CArray<double,1> >::iterator it;1293 for ( it = data_srv.begin(); it != data_srv.end(); it++)1482 recvDataSrv = recvDataSrv * scaleFactor + addOffset; 1483 } 1484 1485 void CField::outputField(CArray<double,1>& fieldOut) 1486 { 1487 CArray<size_t,1>& outIndexClient = grid->localIndexToWriteOnClient; 1488 CArray<size_t,1>& outIndexServer = grid->localIndexToWriteOnServer; 1489 for (size_t idx = 0; idx < outIndexServer.numElements(); ++idx) 1294 1490 { 1295 grid->outputField(it->first, it->second, fieldOut.dataFirst()); 1296 } 1297 } 1298 1299 void CField::outputField(CArray<double,2>& fieldOut) 1300 { 1301 map<int, CArray<double,1> >::iterator it; 1302 for(it=data_srv.begin();it!=data_srv.end();it++) 1491 fieldOut(outIndexServer(idx)) = recvDataSrv(outIndexClient(idx)); 1492 } 1493 } 1494 1495 void CField::inputField(CArray<double,1>& fieldIn) 1496 { 1497 CArray<size_t,1>& outIndexClient = grid->localIndexToWriteOnClient; 1498 CArray<size_t,1>& outIndexServer = grid->localIndexToWriteOnServer; 1499 for (size_t idx = 0; idx < outIndexServer.numElements(); ++idx) 1303 1500 { 1304 grid->outputField(it->first, it->second, fieldOut.dataFirst()); 1305 } 1306 } 1307 1308 void CField::outputField(CArray<double,1>& fieldOut) 1309 { 1310 map<int, CArray<double,1> >::iterator it; 1311 1312 for (it = data_srv.begin(); it != data_srv.end(); it++) 1501 recvDataSrv(outIndexClient(idx)) = fieldIn(outIndexServer(idx)); 1502 } 1503 1504 } 1505 1506 void CField::outputCompressedField(CArray<double,1>& fieldOut) 1507 { 1508 CArray<size_t,1>& outIndexClient = grid->localIndexToWriteOnClient; 1509 CArray<size_t,1>& outIndexServer = grid->localIndexToWriteOnServer; 1510 for (size_t idx = 0; idx < outIndexServer.numElements(); ++idx) 1313 1511 { 1314 grid->outputField(it->first, it->second, fieldOut.dataFirst()); 1315 } 1316 } 1317 1318 void CField::inputField(CArray<double,3>& fieldOut) 1319 { 1320 map<int, CArray<double,1> >::iterator it; 1321 for (it = data_srv.begin(); it != data_srv.end(); it++) 1512 fieldOut((idx)) = recvDataSrv(outIndexClient(idx)); 1513 } 1514 } 1515 1516 ///------------------------------------------------------------------- 1517 1518 void CField::parse(xml::CXMLNode& node) 1519 { 1520 string newContent ; 1521 SuperClass::parse(node); 1522 if (node.goToChildElement()) 1322 1523 { 1323 grid->inputField(it->first, fieldOut.dataFirst(), it->second); 1324 } 1325 } 1326 1327 void CField::inputField(CArray<double,2>& fieldOut) 1328 { 1329 map<int, CArray<double,1> >::iterator it; 1330 for(it = data_srv.begin(); it != data_srv.end(); it++) 1331 { 1332 grid->inputField(it->first, fieldOut.dataFirst(), it->second); 1333 } 1334 } 1335 1336 void CField::inputField(CArray<double,1>& fieldOut) 1337 { 1338 map<int, CArray<double,1> >::iterator it; 1339 for (it = data_srv.begin(); it != data_srv.end(); it++) 1340 { 1341 grid->inputField(it->first, fieldOut.dataFirst(), it->second); 1342 } 1343 } 1344 1345 void CField::outputCompressedField(CArray<double,1>& fieldOut) 1346 { 1347 map<int, CArray<double,1> >::iterator it; 1348 1349 for (it = data_srv.begin(); it != data_srv.end(); it++) 1350 { 1351 grid->outputCompressedField(it->first, it->second, fieldOut.dataFirst()); 1352 } 1353 } 1354 1355 ///------------------------------------------------------------------- 1356 1357 void CField::parse(xml::CXMLNode& node) 1358 { 1359 SuperClass::parse(node); 1360 if (!node.getContent(this->content)) 1361 { 1362 if (node.goToChildElement()) 1524 do 1363 1525 { 1364 do 1365 { 1366 if (node.getElementName() == "variable" || node.getElementName() == "variable_group") this->getVirtualVariableGroup()->parseChild(node); 1367 } while (node.goToNextElement()); 1368 node.goToParentElement(); 1369 } 1370 } 1526 if (node.getElementName() == "variable" || node.getElementName() == "variable_group") this->getVirtualVariableGroup()->parseChild(node); 1527 else if (node.getElementName() == "expr") if (node.getContent(newContent)) content+=newContent ; 1528 } while (node.goToNextElement()); 1529 node.goToParentElement(); 1530 } 1531 if (node.getContent(newContent)) content=newContent ; 1371 1532 } 1372 1533 … … 1416 1577 } 1417 1578 1418 void CField::sendAddAllVariables() 1579 void CField::setContextClient(CContextClient* contextClient) 1580 { 1581 CContext* context = CContext::getCurrent(); 1582 client = contextClient; 1583 if (context->hasClient) 1584 { 1585 // A grid is sent by a client (both for read or write) or by primary server (write only) 1586 if (context->hasServer) 1587 { 1588 if (file->mode.isEmpty() || (!file->mode.isEmpty() && file->mode == CFile::mode_attr::write)) 1589 grid->setContextClient(contextClient); 1590 } 1591 else 1592 grid->setContextClient(contextClient); 1593 } 1594 } 1595 1596 CContextClient* CField::getContextClient() 1597 { 1598 return client; 1599 } 1600 1601 void CField::sendAddAllVariables(CContextClient* client) 1419 1602 { 1420 1603 std::vector<CVariable*> allVar = getAllVariables(); … … 1424 1607 for (; it != itE; ++it) 1425 1608 { 1426 this->sendAddVariable((*it)->getId()); 1427 (*it)->sendAllAttributesToServer(); 1428 (*it)->sendValue(); 1429 } 1430 } 1431 1432 void CField::sendAddVariable(const string& id) 1433 { 1434 CContext* context = CContext::getCurrent(); 1435 1436 if (!context->hasServer) 1437 { 1438 CContextClient* client = context->client; 1439 1440 CEventClient event(this->getType(),EVENT_ID_ADD_VARIABLE); 1441 if (client->isServerLeader()) 1442 { 1443 CMessage msg; 1444 msg << this->getId(); 1445 msg << id; 1446 const std::list<int>& ranks = client->getRanksServerLeader(); 1447 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1448 event.push(*itRank,1,msg); 1449 client->sendEvent(event); 1450 } 1451 else client->sendEvent(event); 1452 } 1453 } 1454 1455 void CField::sendAddVariableGroup(const string& id) 1456 { 1457 CContext* context = CContext::getCurrent(); 1458 if (!context->hasServer) 1459 { 1460 CContextClient* client = context->client; 1461 1462 CEventClient event(this->getType(),EVENT_ID_ADD_VARIABLE_GROUP); 1463 if (client->isServerLeader()) 1464 { 1465 CMessage msg; 1466 msg << this->getId(); 1467 msg << id; 1468 const std::list<int>& ranks = client->getRanksServerLeader(); 1469 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1470 event.push(*itRank,1,msg); 1471 client->sendEvent(event); 1472 } 1473 else client->sendEvent(event); 1474 } 1609 this->sendAddVariable((*it)->getId(), client); 1610 (*it)->sendAllAttributesToServer(client); 1611 (*it)->sendValue(client); 1612 } 1613 } 1614 1615 void CField::sendAddVariable(const string& id, CContextClient* client) 1616 { 1617 sendAddItem(id, (int)EVENT_ID_ADD_VARIABLE, client); 1618 } 1619 1620 void CField::sendAddVariableGroup(const string& id, CContextClient* client) 1621 { 1622 sendAddItem(id, (int)EVENT_ID_ADD_VARIABLE_GROUP, client); 1475 1623 } 1476 1624 … … 1510 1658 * Check on freq_off and freq_op attributes. 1511 1659 */ 1512 void CField::checkAttributes(void) 1513 { 1514 bool isFieldRead = file && !file->mode.isEmpty() && file->mode == CFile::mode_attr::read; 1660 void CField::checkTimeAttributes(CDuration* freqOp) 1661 { 1662 bool isFieldRead = file && !file->mode.isEmpty() && file->mode == CFile::mode_attr::read; 1663 bool isFieldWrite = file && ( file->mode.isEmpty() || file->mode == CFile::mode_attr::write); 1515 1664 if (isFieldRead && operation.getValue() != "instant") 1516 ERROR("void CField::check Attributes(void)",1665 ERROR("void CField::checkTimeAttributes(void)", 1517 1666 << "Unsupported operation for field '" << getFieldOutputName() << "'." << std::endl 1518 1667 << "Currently only \"instant\" is supported for fields read from file.") … … 1521 1670 { 1522 1671 if (operation.getValue() == "instant") 1523 freq_op.setValue(file->output_freq.getValue()); 1672 { 1673 if (isFieldRead || isFieldWrite) freq_op.setValue(file->output_freq.getValue()); 1674 else freq_op=*freqOp ; 1675 } 1524 1676 else 1525 1677 freq_op.setValue(TimeStep); … … 1549 1701 } 1550 1702 1703 1551 1704 DEFINE_REF_FUNC(Field,field) 1552 1705 } // namespace xios -
XIOS/dev/branch_openmp/src/node/field.hpp
r1328 r1460 16 16 #include "transformation_enum.hpp" 17 17 #include "variable.hpp" 18 #include "context_client.hpp" 18 19 19 20 … … 36 37 class CStoreFilter; 37 38 class CFileWriterFilter; 39 class CFileServerWriterFilter; 38 40 39 41 ///-------------------------------------------------------------- … … 56 58 typedef CFieldAttributes SuperClassAttribute; 57 59 60 enum EReadField 61 { 62 RF_NODATA, RF_EOF, RF_DATA 63 }; 64 58 65 public: 59 66 … … 93 100 void resetNStepMax(); 94 101 95 std::map<int, StdSize> getGridAttributesBufferSize(); 96 std::map<int, StdSize> getGridDataBufferSize(); 102 std::map<int, StdSize> getGridAttributesBufferSize(CContextClient* client, bool bufferForWriting = false); 103 // Grid data buffer size for each connection of contextclient 104 std::map<int, StdSize> getGridDataBufferSize(CContextClient* client, bool bufferForWriting = false); 105 106 void setContextClient(CContextClient* newContextClient); 107 CContextClient* getContextClient(); 97 108 98 109 public: … … 120 131 void solveGridDomainAxisBaseRef(); 121 132 133 void solveAllEnabledFieldsAndTransform(); 134 void checkGridOfEnabledFields(); 135 void sendGridOfEnabledFields(); 136 void sendGridComponentOfEnabledFields(); 137 138 /// Vérifications /// 139 void checkTimeAttributes(CDuration* freqOp=NULL); 140 122 141 void buildFilterGraph(CGarbageCollector& gc, bool enableOutput); 142 size_t getGlobalWrittenSize(void) ; 143 144 123 145 boost::shared_ptr<COutputPin> getFieldReference(CGarbageCollector& gc); 124 146 boost::shared_ptr<COutputPin> getSelfReference(CGarbageCollector& gc); … … 140 162 static bool dispatchEvent(CEventServer& event); 141 163 void sendUpdateData(const CArray<double,1>& data); 164 void sendUpdateData(const CArray<double,1>& data, CContextClient* client); 142 165 static void recvUpdateData(CEventServer& event); 143 void recvUpdateData( vector<int>& ranks, vector<CBufferIn*>& buffers);166 void recvUpdateData(std::map<int,CBufferIn*>& rankBuffers); 144 167 void writeField(void); 145 168 bool sendReadDataRequest(const CDate& tsDataRequested); … … 147 170 static void recvReadDataRequest(CEventServer& event); 148 171 void recvReadDataRequest(void); 149 boolreadField(void);172 EReadField readField(void); 150 173 static void recvReadDataReady(CEventServer& event); 151 174 void recvReadDataReady(vector<int> ranks, vector<CBufferIn*> buffers); 152 175 void checkForLateDataFromServer(void); 176 void checkIfMustAutoTrigger(void); 177 void autoTriggerIfNeeded(void); 153 178 void outputField(CArray<double,3>& fieldOut); 154 179 void outputField(CArray<double,2>& fieldOut); … … 168 193 169 194 CVariable* addVariable(const string& id = ""); 170 CVariableGroup* addVariableGroup(const string& id = ""); 171 void sendAddVariable(const string& id = "");172 void sendAddVariableGroup(const string& id = "");195 CVariableGroup* addVariableGroup(const string& id = ""); 196 void sendAddVariable(const string& id, CContextClient* client); 197 void sendAddVariableGroup(const string& id, CContextClient* client); 173 198 static void recvAddVariable(CEventServer& event); 174 199 void recvAddVariable(CBufferIn& buffer); 175 200 static void recvAddVariableGroup(CEventServer& event); 176 void recvAddVariableGroup(CBufferIn& buffer); 177 void sendAddAllVariables(); 178 179 /// Vérifications /// 180 void checkAttributes(void); 201 void recvAddVariableGroup(CBufferIn& buffer); 202 void sendAddAllVariables(CContextClient* client); 203 void writeUpdateData(const CArray<double,1>& data); 181 204 182 205 const std::vector<StdString>& getRefDomainAxisIds(); … … 184 207 const string& getExpression(void); 185 208 bool hasExpression(void) const; 186 187 CField* my_getDirectFieldReference(void) const;188 189 190 209 191 210 public: … … 204 223 CDate lastDataRequestedFromServer, lastDataReceivedFromServer, dateEOF; 205 224 bool wasDataRequestedFromServer, wasDataAlreadyReceivedFromServer; 225 bool mustAutoTrigger; 206 226 207 227 map<int,boost::shared_ptr<func::CFunctor> > foperation_srv; 208 228 209 map<int, CArray<double,1> > data_srv; 229 // map<int, CArray<double,1> > data_srv; 230 CArray<double,1> recvDataSrv; 231 232 boost::shared_ptr<func::CFunctor> recvFoperationSrv; 210 233 string content; 234 235 std::vector<StdString> domAxisScalarIds_; 236 bool useCompressedOutput; 237 238 // Two variables to identify the time_counter meta data written in file, which has no time_counter 239 bool hasTimeInstant; 240 bool hasTimeCentered; 241 242 243 DECLARE_REF_FUNC(Field,field) 244 245 private: 246 CContextClient* client; 211 247 212 248 bool areAllReferenceSolved; 213 249 bool isReferenceSolved; 214 std::vector<StdString> domAxisScalarIds_; 215 bool useCompressedOutput; 216 217 // Two variable to identify the time_counter meta data written in file, which has no time_counter 218 bool hasTimeInstant; 219 bool hasTimeCentered; 220 221 DECLARE_REF_FUNC(Field,field) 250 bool isReferenceSolvedAndTransformed; 251 bool isGridChecked; 252 bool nstepMaxRead; 222 253 223 254 private: … … 239 270 //! The terminal filter which writes the data to file 240 271 boost::shared_ptr<CFileWriterFilter> fileWriterFilter; 272 //! The terminal filter which writes data to file 273 boost::shared_ptr<CFileServerWriterFilter> fileServerWriterFilter; 241 274 }; // class CField 242 275 -
XIOS/dev/branch_openmp/src/node/field_impl.hpp
r1205 r1460 20 20 if (clientSourceFilter) 21 21 { 22 if ( !check_if_active || isActive(true))22 if (check_if_active.isEmpty() || (!check_if_active.isEmpty() && (!check_if_active) || isActive(true))) 23 23 clientSourceFilter->streamData(CContext::getCurrent()->getCalendar()->getCurrentDate(), _data); 24 24 } -
XIOS/dev/branch_openmp/src/node/file.cpp
r1338 r1460 17 17 #include "mpi.hpp" 18 18 #include "timer.hpp" 19 19 #include "server.hpp" 20 20 21 21 namespace xios { … … 26 26 : CObjectTemplate<CFile>(), CFileAttributes() 27 27 , vFieldGroup(), data_out(), enabledFields() 28 , allDomainEmpty(false), isOpen(false)28 , isOpen(false), read_client(0), checkRead(false), allZoneEmpty(false) 29 29 { 30 30 setVirtualFieldGroup(CFieldGroup::create(getId() + "_virtual_field_group")); … … 35 35 : CObjectTemplate<CFile>(id), CFileAttributes() 36 36 , vFieldGroup(), data_out(), enabledFields() 37 , allDomainEmpty(false), isOpen(false)37 , isOpen(false), read_client(0), checkRead(false), allZoneEmpty(false) 38 38 { 39 39 setVirtualFieldGroup(CFieldGroup::create(getId() + "_virtual_field_group")); … … 208 208 209 209 //! Initialize a file in order to write into it 210 void CFile::init File(void)210 void CFile::initWrite(void) 211 211 { 212 212 CContext* context = CContext::getCurrent(); … … 218 218 if (!split_freq.isEmpty()) 219 219 { 220 StdString keySuffix("C Context_"+CContext::getCurrent()->getId()+"::CFile_"+getFileOutputName()+"::") ;220 StdString keySuffix("CFile::"+getFileOutputName()+"::") ; 221 221 if (context->registryIn->foundKey(keySuffix+"splitStart") && context->registryIn->foundKey(keySuffix+"splitEnd")) 222 222 { … … 229 229 } 230 230 } 231 isOpen = false; 232 233 allDomainEmpty = true; 231 isOpen = false; 234 232 235 233 // if (!record_offset.isEmpty() && record_offset < 0) … … 238 236 const int recordOffset = record_offset.isEmpty() ? 0 : record_offset; 239 237 240 // set<CAxis*> setAxis;241 // set<CDomain*> setDomains;242 238 set<StdString> setAxis; 243 239 set<StdString> setDomains; 244 240 245 241 std::vector<CField*>::iterator it, end = this->enabledFields.end(); 246 242 for (it = this->enabledFields.begin(); it != end; it++) 247 243 { 248 CField* field = *it; 249 allDomainEmpty &= !field->grid->doGridHaveDataToWrite(); 244 CField* field = *it; 250 245 std::vector<CAxis*> vecAxis = field->grid->getAxis(); 251 246 for (size_t i = 0; i < vecAxis.size(); ++i) 252 setAxis.insert(vecAxis[i]->getAxisOutputName()); 253 // setAxis.insert(vecAxis[i]); 247 setAxis.insert(vecAxis[i]->getAxisOutputName()); 254 248 std::vector<CDomain*> vecDomains = field->grid->getDomains(); 255 249 for (size_t i = 0; i < vecDomains.size(); ++i) 256 setDomains.insert(vecDomains[i]->getDomainOutputName()); 257 // setDomains.insert(vecDomains[i]); 250 setDomains.insert(vecDomains[i]->getDomainOutputName()); 258 251 259 252 field->resetNStep(recordOffset); … … 263 256 264 257 // create sub communicator for file 265 int color = allDomainEmpty ? 0 : 1; 266 ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 267 if (allDomainEmpty) ep_lib::MPI_Comm_free(&fileComm); 258 createSubComFile(); 268 259 269 260 // if (time_counter.isEmpty()) time_counter.setValue(time_counter_attr::centered); … … 271 262 } 272 263 273 //! Verify state of a file274 void CFile:: checkFile(void)264 //! Initialize a file in order to write into it 265 void CFile::initRead(void) 275 266 { 276 if (mode.isEmpty() || mode.getValue() == mode_attr::write) 277 { 278 CTimer::get("Files : create headers").resume(); 279 if (!isOpen) createHeader(); 280 CTimer::get("Files : create headers").suspend(); 281 checkSync(); 282 } 283 else 284 { 285 CTimer::get("Files : open headers").resume(); 286 if (!isOpen) openInReadMode(); 287 CTimer::get("Files : open headers").suspend(); 288 } 289 checkSplit(); 267 if (checkRead) return; 268 createSubComFile(); 269 checkRead = true; 270 } 271 272 /*! 273 Create a sub communicator in which processes participate in reading/opening file 274 */ 275 void CFile::createSubComFile() 276 { 277 CContext* context = CContext::getCurrent(); 278 CContextServer* server = context->server; 279 280 // create sub communicator for file 281 allZoneEmpty = true; 282 std::vector<CField*>::iterator it, end = this->enabledFields.end(); 283 for (it = this->enabledFields.begin(); it != end; it++) 284 { 285 CField* field = *it; 286 bool nullGrid = (0 == field->grid); 287 allZoneEmpty &= nullGrid ? false : !field->grid->doGridHaveDataToWrite(); 288 } 289 290 int color = allZoneEmpty ? 0 : 1; 291 ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 292 if (allZoneEmpty) ep_lib::MPI_Comm_free(&fileComm); 293 } 294 295 /* 296 Check condition to write into a file 297 For now, we only use the level-2 server to write files (if this mode is activated) 298 or classical server to do this job. 299 */ 300 void CFile::checkWriteFile(void) 301 { 302 CContext* context = CContext::getCurrent(); 303 // Done by classical server or secondary server 304 // This condition should be changed soon 305 if (CServer::serverLevel == 0 || CServer::serverLevel == 2) 306 { 307 if (mode.isEmpty() || mode.getValue() == mode_attr::write) 308 { 309 CTimer::get("Files : create headers").resume(); 310 if (!isOpen) createHeader(); 311 CTimer::get("Files : create headers").suspend(); 312 checkSync(); 313 } 314 checkSplit(); // REally need this? 315 } 316 } 317 318 /* 319 Check condition to read from a file 320 For now, we only use the level-1 server to write files (if this mode is activated) 321 or classical server to do this job. 322 This function can be used by client for reading metadata 323 */ 324 void CFile::checkReadFile(void) 325 { 326 CContext* context = CContext::getCurrent(); 327 // Done by classical server or secondary server 328 // TODO: This condition should be changed soon. It only works with maximum number of level as 2 329 if (CServer::serverLevel == 0 || CServer::serverLevel == 1) 330 { 331 if (!mode.isEmpty() && mode.getValue() == mode_attr::read) 332 { 333 CTimer::get("Files : open headers").resume(); 334 335 if (!isOpen) openInReadMode(); 336 337 CTimer::get("Files : open headers").suspend(); 338 } 339 //checkSplit(); // Really need for reading? 340 } 341 } 342 343 /*! 344 Verify if a process participates in an opening-file communicator 345 \return true if the process doesn't participate in opening file 346 */ 347 bool CFile::isEmptyZone() 348 { 349 return allZoneEmpty; 290 350 } 291 351 … … 352 412 CContextServer* server = context->server; 353 413 354 if (!all DomainEmpty)414 if (!allZoneEmpty) 355 415 { 356 416 StdString filename = getFileOutputName(); … … 381 441 if (pos2!=std::string::npos) 382 442 { 383 middlePart=filename.substr(pos1,pos2-pos1) ; 443 middlePart=filename.substr(pos1,pos2-pos1) ; 384 444 pos2+=strEndDate.size() ; 385 445 lastPart=filename.substr(pos2,filename.size()-pos2) ; … … 434 494 oss << lastPart ; 435 495 436 StdString keySuffix("C Context_"+CContext::getCurrent()->getId()+"::CFile_"+getFileOutputName()+"::") ;496 StdString keySuffix("CFile::"+getFileOutputName()+"::") ; 437 497 context->registryOut->setKey(keySuffix+"splitStart", lastSplit); 438 498 context->registryOut->setKey(keySuffix+"splitEnd", splitEnd); … … 534 594 \brief Open an existing NetCDF file in read-only mode 535 595 */ 536 void CFile::openInReadMode( void)596 void CFile::openInReadMode() 537 597 { 538 598 CContext* context = CContext::getCurrent(); 539 599 CContextServer* server = context->server; 540 541 if (!allDomainEmpty) 600 ep_lib::MPI_Comm readComm = this->fileComm; 601 602 if (!allZoneEmpty) 542 603 { 543 604 StdString filename = getFileOutputName(); … … 578 639 { 579 640 int commSize, commRank; 580 ep_lib::MPI_Comm_size( fileComm, &commSize);581 ep_lib::MPI_Comm_rank( fileComm, &commRank);641 ep_lib::MPI_Comm_size(readComm, &commSize); 642 ep_lib::MPI_Comm_rank(readComm, &commRank); 582 643 583 644 if (server->intraCommSize > 1) … … 597 658 bool isCollective = par_access.isEmpty() || par_access == par_access_attr::collective; 598 659 #ifdef _usingEP 599 //printf("multifile was %d\n", multifile);600 //multifile = true;601 660 if (isOpen) data_out->closeFile(); 602 if (time_counter_name.isEmpty()) data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective));603 else data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name));661 if (time_counter_name.isEmpty()) data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), readComm, multifile, isCollective)); 662 else data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), readComm, multifile, isCollective, time_counter_name)); 604 663 isOpen = true; 605 664 #elif _usingMPI 606 665 if (isOpen) data_out->closeFile(); 607 if (time_counter_name.isEmpty()) data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective));608 else data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name));666 if (time_counter_name.isEmpty()) data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), readComm, multifile, isCollective)); 667 else data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), readComm, multifile, isCollective, time_counter_name)); 609 668 isOpen = true; 610 669 #endif … … 615 674 void CFile::close(void) 616 675 { 617 if (!all DomainEmpty)676 if (!allZoneEmpty) 618 677 if (isOpen) 619 678 { … … 622 681 else 623 682 this->data_in->closeFile(); 683 isOpen = false; 624 684 } 625 // if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 626 //if (fileComm.mpi_comm != ::MPI_COMM_NULL) MPI_Comm_free(&fileComm); 685 //if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 627 686 } 628 687 //---------------------------------------------------------------- … … 633 692 634 693 // Just check file and try to open it 635 CContext* context = CContext::getCurrent();636 CContextClient* client=context->client;637 638 // It would probably be better to call initFile() somehow639 ep_lib::MPI_Comm_dup(client->intraComm, &fileComm);640 694 if (time_counter_name.isEmpty()) time_counter_name = "time_counter"; 641 695 642 check File();696 checkReadFile(); 643 697 644 698 for (int idx = 0; idx < enabledFields.size(); ++idx) … … 655 709 // Read necessary value from file 656 710 #pragma omp critical (_func) 657 { 658 this->data_in->readFieldAttributesValues(enabledFields[idx]); 659 } 711 this->data_in->readFieldAttributesValues(enabledFields[idx]); 712 660 713 // Fill attributes for base reference 661 714 enabledFields[idx]->solveGridDomainAxisBaseRef(); … … 737 790 { 738 791 this->enabledFields[i]->solveOnlyReferenceEnabledField(sendToServer); 739 // this->enabledFields[i]->buildGridTransformationGraph(); 792 } 793 } 794 795 void CFile::checkGridOfEnabledFields() 796 { 797 int size = this->enabledFields.size(); 798 for (int i = 0; i < size; ++i) 799 { 800 this->enabledFields[i]->checkGridOfEnabledFields(); 801 } 802 } 803 804 void CFile::sendGridComponentOfEnabledFields() 805 { 806 int size = this->enabledFields.size(); 807 for (int i = 0; i < size; ++i) 808 { 809 this->enabledFields[i]->sendGridComponentOfEnabledFields(); 810 } 811 } 812 813 void CFile::sendGridOfEnabledFields() 814 { 815 int size = this->enabledFields.size(); 816 for (int i = 0; i < size; ++i) 817 { 818 this->enabledFields[i]->sendGridOfEnabledFields(); 740 819 } 741 820 } … … 758 837 \param [in] sendToServer: Send all info to server (true) or only a part of it (false) 759 838 */ 760 void CFile::solveAllRefOfEnabledFields (bool sendToServer)839 void CFile::solveAllRefOfEnabledFieldsAndTransform(bool sendToServer) 761 840 { 762 841 int size = this->enabledFields.size(); 763 842 for (int i = 0; i < size; ++i) 764 843 { 765 this->enabledFields[i]->solveAll ReferenceEnabledField(sendToServer);844 this->enabledFields[i]->solveAllEnabledFieldsAndTransform(); 766 845 } 767 846 } … … 782 861 783 862 /*! 863 * Post-process the filter graph for each active field. 864 */ 865 void CFile::postProcessFilterGraph() 866 { 867 int size = this->enabledFields.size(); 868 for (int i = 0; i < size; ++i) 869 { 870 this->enabledFields[i]->checkIfMustAutoTrigger(); 871 } 872 } 873 874 /*! 784 875 Prefetching the data for enabled fields read from file. 785 876 */ … … 795 886 796 887 /*! 888 Do all pre timestep operations for enabled fields in read mode: 889 - Check that the data excepted from server has been received 890 - Check if some filters must auto-trigger 891 */ 892 void CFile::doPreTimestepOperationsForEnabledReadModeFields(void) 893 { 894 if (mode.isEmpty() || mode.getValue() != mode_attr::read) 895 return; 896 897 int size = this->enabledFields.size(); 898 for (int i = 0; i < size; ++i) 899 { 900 this->enabledFields[i]->checkForLateDataFromServer(); 901 this->enabledFields[i]->autoTriggerIfNeeded(); 902 } 903 } 904 905 /*! 797 906 Do all post timestep operations for enabled fields in read mode: 798 907 - Prefetch the data read from file when needed 799 - Check that the data excepted from server has been received800 908 */ 801 909 void CFile::doPostTimestepOperationsForEnabledReadModeFields(void) … … 807 915 for (int i = 0; i < size; ++i) 808 916 { 809 this->enabledFields[i]->checkForLateDataFromServer();810 917 this->enabledFields[i]->sendReadDataRequestIfNeeded(); 811 918 } … … 874 981 } 875 982 983 void CFile::setContextClient(CContextClient* newContextClient) 984 { 985 client = newContextClient; 986 size_t size = this->enabledFields.size(); 987 for (size_t i = 0; i < size; ++i) 988 { 989 this->enabledFields[i]->setContextClient(newContextClient); 990 } 991 } 992 993 CContextClient* CFile::getContextClient() 994 { 995 return client; 996 } 997 998 void CFile::setReadContextClient(CContextClient* readContextclient) 999 { 1000 read_client = readContextclient; 1001 } 1002 1003 CContextClient* CFile::getReadContextClient() 1004 { 1005 return read_client; 1006 } 1007 876 1008 /*! 877 1009 \brief Send a message to create a field on server side 878 1010 \param[in] id String identity of field that will be created on server 879 1011 */ 880 void CFile::sendAddField(const string& id) 881 { 882 CContext* context = CContext::getCurrent(); 883 884 if (! context->hasServer ) 885 { 886 CContextClient* client = context->client; 887 888 CEventClient event(this->getType(),EVENT_ID_ADD_FIELD); 889 if (client->isServerLeader()) 890 { 891 CMessage msg; 892 msg << this->getId(); 893 msg << id; 894 const std::list<int>& ranks = client->getRanksServerLeader(); 895 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 896 event.push(*itRank,1,msg); 897 client->sendEvent(event); 898 } 899 else client->sendEvent(event); 900 } 901 1012 void CFile::sendAddField(const string& id, CContextClient* client) 1013 { 1014 sendAddItem(id, EVENT_ID_ADD_FIELD, client); 902 1015 } 903 1016 … … 906 1019 \param[in] id String identity of field group that will be created on server 907 1020 */ 908 void CFile::sendAddFieldGroup(const string& id) 909 { 910 CContext* context = CContext::getCurrent(); 911 if (! context->hasServer ) 912 { 913 CContextClient* client = context->client; 914 915 CEventClient event(this->getType(),EVENT_ID_ADD_FIELD_GROUP); 916 if (client->isServerLeader()) 917 { 918 CMessage msg; 919 msg << this->getId(); 920 msg << id; 921 const std::list<int>& ranks = client->getRanksServerLeader(); 922 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 923 event.push(*itRank,1,msg); 924 client->sendEvent(event); 925 } 926 else client->sendEvent(event); 927 } 928 1021 void CFile::sendAddFieldGroup(const string& id, CContextClient* client) 1022 { 1023 sendAddItem(id, (int)EVENT_ID_ADD_FIELD_GROUP, client); 929 1024 } 930 1025 … … 983 1078 is to duplicate this value on server, too. 984 1079 */ 985 void CFile::sendAddAllVariables( )1080 void CFile::sendAddAllVariables(CContextClient* client) 986 1081 { 987 1082 std::vector<CVariable*> allVar = getAllVariables(); … … 991 1086 for (; it != itE; ++it) 992 1087 { 993 this->sendAddVariable((*it)->getId()); 994 (*it)->sendAllAttributesToServer(); 995 (*it)->sendValue(); 996 } 997 } 998 999 /*! 1000 \brief Send a message to create a variable on server side 1001 A variable always belongs to a variable group 1002 \param[in] id String identity of variable that will be created on server 1003 */ 1004 void CFile::sendAddVariable(const string& id) 1005 { 1006 CContext* context = CContext::getCurrent(); 1007 1008 if (! context->hasServer ) 1009 { 1010 CContextClient* client = context->client; 1011 1012 CEventClient event(this->getType(),EVENT_ID_ADD_VARIABLE); 1013 if (client->isServerLeader()) 1014 { 1015 CMessage msg; 1016 msg << this->getId(); 1017 msg << id; 1018 const std::list<int>& ranks = client->getRanksServerLeader(); 1019 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1020 event.push(*itRank,1,msg); 1021 client->sendEvent(event); 1022 } 1023 else client->sendEvent(event); 1024 } 1025 1088 this->sendAddVariable((*it)->getId(), client); 1089 (*it)->sendAllAttributesToServer(client); 1090 (*it)->sendValue(client); 1091 } 1026 1092 } 1027 1093 … … 1029 1095 \brief Send a message to create a variable group on server side 1030 1096 \param[in] id String identity of variable group that will be created on server 1031 */ 1032 void CFile::sendAddVariableGroup(const string& id) 1033 { 1034 CContext* context = CContext::getCurrent(); 1035 if (! context->hasServer ) 1036 { 1037 CContextClient* client = context->client; 1038 1039 CEventClient event(this->getType(),EVENT_ID_ADD_VARIABLE_GROUP); 1040 if (client->isServerLeader()) 1041 { 1042 CMessage msg; 1043 msg << this->getId(); 1044 msg << id; 1045 const std::list<int>& ranks = client->getRanksServerLeader(); 1046 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1047 event.push(*itRank,1,msg); 1048 client->sendEvent(event); 1049 } 1050 else client->sendEvent(event); 1051 } 1052 1097 \param [in] client client to which we will send this adding action 1098 */ 1099 void CFile::sendAddVariableGroup(const string& id, CContextClient* client) 1100 { 1101 sendAddItem(id, (int)EVENT_ID_ADD_VARIABLE_GROUP, client); 1102 } 1103 1104 /* 1105 Send message to add a variable into a file within a certain client 1106 \param [in] id String identity of a variable 1107 \param [in] client client to which we will send this adding action 1108 */ 1109 void CFile::sendAddVariable(const string& id, CContextClient* client) 1110 { 1111 sendAddItem(id, (int)EVENT_ID_ADD_VARIABLE, client); 1053 1112 } 1054 1113 … … 1108 1167 Remark: This function must be called AFTER all active (enabled) files have been created on the server side 1109 1168 */ 1110 void CFile::sendEnabledFields( )1169 void CFile::sendEnabledFields(CContextClient* client) 1111 1170 { 1112 1171 size_t size = this->enabledFields.size(); … … 1114 1173 { 1115 1174 CField* field = this->enabledFields[i]; 1116 this->sendAddField(field->getId()); 1117 field->checkAttributes(); 1118 field->sendAllAttributesToServer(); 1119 field->sendAddAllVariables(); 1120 } 1121 } 1175 this->sendAddField(field->getId(), client); 1176 field->checkTimeAttributes(); 1177 field->sendAllAttributesToServer(client); 1178 field->sendAddAllVariables(client); 1179 } 1180 } 1181 1122 1182 1123 1183 /*! -
XIOS/dev/branch_openmp/src/node/file.hpp
r1328 r1460 12 12 #include "attribute_enum.hpp" 13 13 #include "attribute_enum_impl.hpp" 14 #include "context_client.hpp" 15 //#include "mpi.hpp" 14 16 15 17 namespace xios { … … 86 88 bool checkSplit(void); 87 89 bool checkSync(void); 88 void checkFile(void); 89 void initFile(void); 90 void checkWriteFile(void); 91 void checkReadFile(void); 92 void initWrite(void); 93 void initRead(void); 94 bool isEmptyZone(); 90 95 91 96 /// Mutateurs /// … … 104 109 void solveOnlyRefOfEnabledFields(bool sendToServer); 105 110 void generateNewTransformationGridDest(); 106 void solveAllRefOfEnabledFields(bool sendToServer);111 107 112 void buildFilterGraphOfEnabledFields(CGarbageCollector& gc); 113 void postProcessFilterGraph(); 108 114 void prefetchEnabledReadModeFields(); 115 void doPreTimestepOperationsForEnabledReadModeFields(); 109 116 void doPostTimestepOperationsForEnabledReadModeFields(); 117 118 void solveAllRefOfEnabledFieldsAndTransform(bool sendToServer); 119 void checkGridOfEnabledFields(); 120 void sendGridOfEnabledFields(); 121 void sendGridComponentOfEnabledFields(); 110 122 111 123 // Add component into file … … 114 126 CVariable* addVariable(const string& id = ""); 115 127 CVariableGroup* addVariableGroup(const string& id = ""); 116 117 // Send info to serever 118 void sendEnabledFields(); 119 void sendAddField(const string& id = ""); 120 void sendAddFieldGroup(const string& id = ""); 121 void sendAddAllVariables(); 122 void sendAddVariable(const string& id = ""); 123 void sendAddVariableGroup(const string& id = ""); 124 128 void setContextClient(CContextClient* newContextClient); 129 CContextClient* getContextClient(); 130 131 void setReadContextClient(CContextClient* newContextClient); 132 CContextClient* getReadContextClient(); 133 134 // Send info to server 135 void sendEnabledFields(CContextClient* client); 136 void sendAddField(const string& id, CContextClient* client); 137 void sendAddFieldGroup(const string& id, CContextClient* client); 138 void sendAddVariable(const string& id, CContextClient* client); 139 void sendAddVariableGroup(const string& id, CContextClient* client); 140 void sendAddAllVariables(CContextClient* client); 141 125 142 // Receive info from client 126 143 static void recvAddField(CEventServer& event); … … 154 171 CDate lastSplit; 155 172 int nbAxis, nbDomains; 156 bool isOpen; 157 bool allDomainEmpty; 173 bool isOpen; 158 174 ep_lib::MPI_Comm fileComm; 159 175 176 private: 177 void createSubComFile(); 178 bool checkRead; 179 bool allZoneEmpty; 180 160 181 private : 161 182 /// Propriétés privées /// 183 CContextClient* client; 184 CContextClient* read_client; // Context client for reading (channel between server 1 and client) 162 185 CFieldGroup* vFieldGroup; 163 186 CVariableGroup* vVariableGroup; … … 166 189 std::vector<CField*> enabledFields; 167 190 191 168 192 public: 169 193 // virtual void toBinary (StdOStream& os) const; -
XIOS/dev/branch_openmp/src/node/grid.cpp
r1334 r1460 18 18 #include "grid_transformation.hpp" 19 19 #include "grid_generate.hpp" 20 #include "server.hpp" 20 21 21 22 namespace xios { … … 31 32 , clientDistribution_(0), isIndexSent(false) , serverDistribution_(0), clientServerMap_(0) 32 33 , writtenDataSize_(0), numberWrittenIndexes_(0), totalNumberWrittenIndexes_(0), offsetWrittenIndexes_(0) 33 , connectedDataSize_(), connectedServerRank_(), isDataDistributed_(true), isCompressible_(false) 34 , connectedDataSize_(), connectedServerRank_(), connectedServerRankRead_(), connectedDataSizeRead_() 35 , isDataDistributed_(true), isCompressible_(false) 34 36 , transformations_(0), isTransformed_(false) 35 37 , axisPositionInGrid_(), hasDomainAxisBaseRef_(false) 36 38 , gridSrc_(), hasTransform_(false), isGenerated_(false), order_(), globalIndexOnServer_() 39 , computedWrittenIndex_(false) 40 , clients() 37 41 { 38 42 setVirtualDomainGroup(CDomainGroup::create(getId() + "_virtual_domain_group")); … … 49 53 , clientDistribution_(0), isIndexSent(false) , serverDistribution_(0), clientServerMap_(0) 50 54 , writtenDataSize_(0), numberWrittenIndexes_(0), totalNumberWrittenIndexes_(0), offsetWrittenIndexes_(0) 51 , connectedDataSize_(), connectedServerRank_(), isDataDistributed_(true), isCompressible_(false) 55 , connectedDataSize_(), connectedServerRank_(), connectedServerRankRead_(), connectedDataSizeRead_() 56 , isDataDistributed_(true), isCompressible_(false) 52 57 , transformations_(0), isTransformed_(false) 53 58 , axisPositionInGrid_(), hasDomainAxisBaseRef_(false) 54 59 , gridSrc_(), hasTransform_(false), isGenerated_(false), order_(), globalIndexOnServer_() 60 , computedWrittenIndex_(false) 61 , clients() 55 62 { 56 63 setVirtualDomainGroup(CDomainGroup::create(getId() + "_virtual_domain_group")); … … 87 94 { 88 95 std::vector<int> dataNindex = clientDistribution_->getDataNIndex(); 89 for (int i = 0; i < dataNindex.size(); ++i) retvalue *= dataNindex[i]; 96 for (int i = 0; i < dataNindex.size(); ++i) retvalue *= dataNindex[i]; 90 97 } 91 98 return retvalue; … … 96 103 * 97 104 * \return A map associating the server rank with its minimum buffer size. 105 * TODO: Refactor code 98 106 */ 99 std::map<int, StdSize> CGrid::getAttributesBufferSize( )100 { 101 std::map<int, StdSize> attributesSizes = getMinimumBufferSizeForAttributes( );107 std::map<int, StdSize> CGrid::getAttributesBufferSize(CContextClient* client, bool bufferForWriting) 108 { 109 std::map<int, StdSize> attributesSizes = getMinimumBufferSizeForAttributes(client); 102 110 103 111 // The grid indexes require a similar size as the actual data 104 std::map<int, StdSize> dataSizes = getDataBufferSize( );112 std::map<int, StdSize> dataSizes = getDataBufferSize(client, "", bufferForWriting); 105 113 std::map<int, StdSize>::iterator it, itE = dataSizes.end(); 106 114 for (it = dataSizes.begin(); it != itE; ++it) … … 110 118 attributesSizes[it->first] = it->second; 111 119 } 112 120 113 121 // Account for the axis attributes 114 122 std::vector<CAxis*> axisList = getAxis(); 115 123 for (size_t i = 0; i < axisList.size(); ++i) 116 124 { 117 std::map<int, StdSize> axisAttBuffSize = axisList[i]->getAttributesBufferSize( );125 std::map<int, StdSize> axisAttBuffSize = axisList[i]->getAttributesBufferSize(client, getGlobalDimension(),axisPositionInGrid_[i]); 118 126 for (it = axisAttBuffSize.begin(), itE = axisAttBuffSize.end(); it != itE; ++it) 119 127 { 128 it->second += 2 * sizeof(bool); 120 129 if (it->second > attributesSizes[it->first]) 121 130 attributesSizes[it->first] = it->second; … … 127 136 for (size_t i = 0; i < domList.size(); ++i) 128 137 { 129 std::map<int, StdSize> domAttBuffSize = domList[i]->getAttributesBufferSize( );138 std::map<int, StdSize> domAttBuffSize = domList[i]->getAttributesBufferSize(client); 130 139 for (it = domAttBuffSize.begin(), itE = domAttBuffSize.end(); it != itE; ++it) 131 140 { 141 it->second += 2 * sizeof(bool); 132 142 if (it->second > attributesSizes[it->first]) 133 143 attributesSizes[it->first] = it->second; … … 136 146 137 147 return attributesSizes; 138 148 } 139 149 140 150 /*! 141 * Compute the minimum buffer size required to send the data to the server(s).142 * 151 * Compute the minimum buffer size required to send the data. 152 * \param client contextClient used to determine the size of connected receivers 143 153 * \param id the id used to tag the data 144 * \return A map associating the server rank with its minimum buffer size. 154 * \param bufferForWriting flag indicating if a buffer is used to send data for writing 155 * \return A map associating the sender rank with its minimum buffer size. 145 156 */ 146 std::map<int, StdSize> CGrid::getDataBufferSize(const std::string& id /*= ""*/) 147 { 148 std::map<int, StdSize> dataSizes; 157 std::map<int, StdSize> CGrid::getDataBufferSize(CContextClient* client, const std::string& id /*= ""*/, bool bufferForWriting /*= "false"*/) 158 { 149 159 // The record index is sometimes sent along with the data but we always 150 160 // include it in the size calculation for the sake of simplicity 151 const size_t extraSize = CEventClient::headerSize + (id.empty() ? getId() : id).size() + 2 * sizeof(size_t); 152 153 std::map<int, size_t>::const_iterator itEnd = connectedDataSize_.end(); 154 for (size_t k = 0; k < connectedServerRank_.size(); ++k) 161 const size_t extraSize = CEventClient::headerSize + (id.empty() ? getId() : id).size() 162 + 2 * sizeof(size_t) 163 + sizeof(size_t); 164 165 std::map<int, StdSize> dataSizes; 166 int receiverSize = client->serverSize; 167 std::map<int,size_t>& dataSizeMap = bufferForWriting ? connectedDataSize_[receiverSize]: connectedDataSizeRead_; 168 std::vector<int>& connectedServerRanks = bufferForWriting ? connectedServerRank_[receiverSize] : connectedServerRankRead_; 169 170 std::map<int, size_t>::const_iterator itEnd = dataSizeMap.end(); 171 for (size_t k = 0; k < connectedServerRanks.size(); ++k) 155 172 { 156 int rank = connectedServerRank _[k];157 std::map<int, size_t>::const_iterator it = connectedDataSize_.find(rank);173 int rank = connectedServerRanks[k]; 174 std::map<int, size_t>::const_iterator it = dataSizeMap.find(rank); 158 175 size_t count = (it != itEnd) ? it->second : 0; 159 176 … … 164 181 } 165 182 183 size_t CGrid::getGlobalWrittenSize(void) 184 { 185 std::vector<CDomain*> domainP = this->getDomains(); 186 std::vector<CAxis*> axisP = this->getAxis(); 187 188 size_t globalGridSize=1 ; 189 for (std::vector<CDomain*>::iterator it=domainP.begin(); it!=domainP.end();++it) globalGridSize*=(*it)->getGlobalWrittenSize() ; 190 for (std::vector<CAxis*>::iterator it=axisP.begin(); it!=axisP.end();++it) globalGridSize*=(*it)->getGlobalWrittenSize() ; 191 return globalGridSize ; 192 } 193 194 166 195 void CGrid::checkAttributesAfterTransformation() 167 196 { … … 225 254 226 255 //--------------------------------------------------------------- 227 256 /* 257 Find all reference of grid's components and inherite attributes if necessary 258 */ 228 259 void CGrid::solveDomainAxisRef(bool areAttributesChecked) 229 260 { … … 236 267 } 237 268 269 /* 270 Go up hierachy reference and fill in the base reference with attributes of the children 271 This function should be only used after reading component's attributes from file 272 */ 238 273 void CGrid::solveDomainAxisBaseRef() 239 274 { … … 272 307 { 273 308 CContext* context = CContext::getCurrent(); 274 CContextClient* client=context->client; 275 276 if (isScalarGrid()) 277 { 278 if (context->hasClient) 279 if (this->isChecked && doSendingIndex && !isIndexSent) { sendIndexScalarGrid(); this->isIndexSent = true; } 280 281 if (this->isChecked) return; 282 283 if (context->hasClient) 284 { 285 this->computeIndexScalarGrid(); 309 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 310 nbSrvPools = 1; 311 for (int p = 0; p < nbSrvPools; ++p) 312 { 313 if (context->hasClient && this->isChecked && doSendingIndex && !isIndexSent) 314 { 315 if (isScalarGrid()) 316 sendIndexScalarGrid(); 317 else 318 sendIndex(); 319 this->isIndexSent = true; 286 320 } 287 321 288 if (!(this->hasTransform() && !this->isTransformed())) 289 this->isChecked = true; 290 return; 322 // Not sure about this 323 //if (!(this->hasTransform() && !this->isTransformed())) 324 // this->isChecked = true; 325 //return; 291 326 } 292 293 if (context->hasClient) 294 if (this->isChecked && doSendingIndex && !isIndexSent) { sendIndex(); this->isIndexSent = true; } 295 327 296 328 if (this->isChecked) return; 297 298 if (context->hasClient) 299 { 300 this->checkAttributesAfterTransformation(); 301 this->checkMask(); 302 this->computeIndex(); 303 } 304 305 if (!(this->hasTransform() && !this->isTransformed())) 329 this->checkAttributesAfterTransformation(); 330 331 // TODO: Transfer grid attributes 332 //if (!context->hasClient && context->hasServer) this->createMask(); 333 this->computeIndex(); 334 335 if (!(this->hasTransform() && !this->isTransformed())) 306 336 this->isChecked = true; 307 337 308 338 if (!(this->hasTransform() && (!this->isGenerated()))) 309 this->isChecked = true; 310 } 311 339 this->isChecked = true; 340 } 341 342 /* 343 Create mask of grid from mask of its components 344 */ 312 345 void CGrid::createMask(void) 313 346 { … … 318 351 319 352 std::vector<CArray<bool,1>* > domainMasks(domainP.size()); 320 for (int i = 0; i < domainMasks.size(); ++i) domainMasks[i] = &(domainP[i]-> mask_1d);353 for (int i = 0; i < domainMasks.size(); ++i) domainMasks[i] = &(domainP[i]->domainMask); 321 354 std::vector<CArray<bool,1>* > axisMasks(axisP.size()); 322 355 for (int i = 0; i < axisMasks.size(); ++i) axisMasks[i] = &(axisP[i]->mask); … … 349 382 } 350 383 384 /* 385 Check validity of grid's mask by using the masks of its components 386 */ 351 387 void CGrid::checkMask(void) 352 388 { … … 357 393 358 394 std::vector<CArray<bool,1>* > domainMasks(domainP.size()); 359 for (int i = 0; i < domainMasks.size(); ++i) domainMasks[i] = &(domainP[i]-> mask_1d);395 for (int i = 0; i < domainMasks.size(); ++i) domainMasks[i] = &(domainP[i]->domainMask); 360 396 std::vector<CArray<bool,1>* > axisMasks(axisP.size()); 361 397 for (int i = 0; i < axisMasks.size(); ++i) axisMasks[i] = &(axisP[i]->mask); … … 388 424 } 389 425 390 void CGrid::modifyMask(const CArray<int,1>& indexToModify) 426 427 /*! 428 A grid can have multiple dimension, so can its mask in the form of multi-dimension array. 429 It's not a good idea to store all multi-dimension arrays corresponding to each mask. 430 One of the ways is to convert this array into 1-dimension one and every process is taken place on it. 431 \param [in] multi-dimension array grid mask 432 */ 433 434 void CGrid::getLocalMask(CArray<bool,1>& localMask) 435 { 436 std::vector<CDomain*> domainP = this->getDomains(); 437 std::vector<CAxis*> axisP = this->getAxis(); 438 int dim = domainP.size() * 2 + axisP.size(); 439 440 switch (dim) 441 { 442 case 0: 443 getLocalMask(mask_0d, localMask); 444 break; 445 case 1: 446 getLocalMask(mask_1d, localMask); 447 break; 448 case 2: 449 getLocalMask(mask_2d, localMask); 450 break; 451 case 3: 452 getLocalMask(mask_3d, localMask); 453 break; 454 case 4: 455 getLocalMask(mask_4d, localMask); 456 break; 457 case 5: 458 getLocalMask(mask_5d, localMask); 459 break; 460 case 6: 461 getLocalMask(mask_6d, localMask); 462 break; 463 case 7: 464 getLocalMask(mask_7d, localMask); 465 break; 466 default: 467 break; 468 } 469 } 470 471 /* 472 Modify value of mask in a certain index 473 This function can be used to correct the mask of grid after being constructed with createMask 474 \param [in] indexToModify 475 \param [in] modifyValue 476 */ 477 void CGrid::modifyMask(const CArray<int,1>& indexToModify, bool modifyValue) 391 478 { 392 479 using namespace std; … … 396 483 397 484 switch (dim) { 485 case 0: 486 modifyGridMask(mask_0d, indexToModify, modifyValue); 487 break; 398 488 case 1: 399 modifyGridMask(mask_1d, indexToModify );489 modifyGridMask(mask_1d, indexToModify, modifyValue); 400 490 break; 401 491 case 2: 402 modifyGridMask(mask_2d, indexToModify );492 modifyGridMask(mask_2d, indexToModify, modifyValue); 403 493 break; 404 494 case 3: 405 modifyGridMask(mask_3d, indexToModify );495 modifyGridMask(mask_3d, indexToModify, modifyValue); 406 496 break; 407 497 case 4: 408 modifyGridMask(mask_ 1d, indexToModify);498 modifyGridMask(mask_4d, indexToModify, modifyValue); 409 499 break; 410 500 case 5: 411 modifyGridMask(mask_ 2d, indexToModify);501 modifyGridMask(mask_5d, indexToModify, modifyValue); 412 502 break; 413 503 case 6: 414 modifyGridMask(mask_ 3d, indexToModify);504 modifyGridMask(mask_6d, indexToModify, modifyValue); 415 505 break; 416 506 case 7: 417 modifyGridMask(mask_3d, indexToModify); 507 modifyGridMask(mask_7d, indexToModify, modifyValue); 508 break; 509 default: 510 break; 511 } 512 } 513 514 /* 515 Change the mask size. This function is used on reconstructing mask in server side 516 \param [in] newDimensionSize 517 \param [in] newValue 518 */ 519 void CGrid::modifyMaskSize(const std::vector<int>& newDimensionSize, bool newValue) 520 { 521 std::vector<CDomain*> domainP = this->getDomains(); 522 std::vector<CAxis*> axisP = this->getAxis(); 523 int dim = domainP.size() * 2 + axisP.size(); 524 525 switch (dim) { 526 case 0: 527 modifyGridMaskSize(mask_0d, newDimensionSize, newValue); 528 break; 529 case 1: 530 modifyGridMaskSize(mask_1d, newDimensionSize, newValue); 531 break; 532 case 2: 533 modifyGridMaskSize(mask_2d, newDimensionSize, newValue); 534 break; 535 case 3: 536 modifyGridMaskSize(mask_3d, newDimensionSize, newValue); 537 break; 538 case 4: 539 modifyGridMaskSize(mask_4d, newDimensionSize, newValue); 540 break; 541 case 5: 542 modifyGridMaskSize(mask_5d, newDimensionSize, newValue); 543 break; 544 case 6: 545 modifyGridMaskSize(mask_6d, newDimensionSize, newValue); 546 break; 547 case 7: 548 modifyGridMaskSize(mask_7d, newDimensionSize, newValue); 418 549 break; 419 550 default: … … 486 617 } 487 618 488 std::vector<int> CGrid::getAxisPositionInGrid() const 489 { 490 return axisPositionInGrid_; 619 /*! 620 Compute the index to for write data into a file 621 */ 622 void CGrid::computeWrittenIndex() 623 { 624 if (computedWrittenIndex_) return; 625 computedWrittenIndex_ = true; 626 627 if (isScalarGrid()) 628 { 629 size_t nbWritten = 1; 630 int writtenIndex = 0; 631 632 localIndexToWriteOnClient.resize(nbWritten); 633 localIndexToWriteOnServer.resize(nbWritten); 634 localIndexToWriteOnServer(0) = writtenIndex; 635 localIndexToWriteOnClient(0) = writtenIndex; 636 637 return; 638 } 639 640 size_t nbWritten = 0, indGlo; 641 CDistributionClient::GlobalLocalDataMap& globalDataIndex = clientDistribution_->getGlobalDataIndexOnClient(); 642 CDistributionClient::GlobalLocalDataMap::const_iterator itb = globalDataIndex.begin(), 643 ite = globalDataIndex.end(), it; 644 const CDistributionServer::GlobalLocalMap& globalLocalIndex = serverDistribution_->getGlobalLocalIndex(); 645 CDistributionServer::GlobalLocalMap::const_iterator itSrvb = globalLocalIndex.begin(), 646 itSrve = globalLocalIndex.end(), itSrv; 647 for (it = itb; it != ite; ++it) 648 { 649 indGlo = it->first; 650 if (globalLocalIndex.end() != globalLocalIndex.find(indGlo)) ++nbWritten; 651 } 652 653 localIndexToWriteOnClient.resize(nbWritten); 654 localIndexToWriteOnServer.resize(nbWritten); 655 656 { 657 numberWrittenIndexes_ = nbWritten; 658 if (isDataDistributed_) 659 { 660 CContextServer* server = CContext::getCurrent()->server; 661 MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 662 MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 663 offsetWrittenIndexes_ -= numberWrittenIndexes_; 664 } 665 else 666 totalNumberWrittenIndexes_ = numberWrittenIndexes_; 667 } 668 669 nbWritten = 0; 670 for (it = itb; it != ite; ++it) 671 { 672 indGlo = it->first; 673 itSrv = globalLocalIndex.find(indGlo); 674 if (itSrve != itSrv) 675 { 676 localIndexToWriteOnServer(nbWritten) = itSrv->second; 677 localIndexToWriteOnClient(nbWritten) = it->second; 678 ++nbWritten; 679 } 680 } 491 681 } 492 682 493 683 //--------------------------------------------------------------- 684 685 /* 686 Compute the global index and its local index taking account mask and data index. 687 These global indexes will be used to compute the connection of this client (sender) to its servers (receivers) 688 (via function computeConnectedClient) 689 These global indexes also correspond to data sent to servers (if any) 690 */ 691 void CGrid::computeClientIndex() 692 { 693 CContext* context = CContext::getCurrent(); 694 695 CContextClient* client = context->client; // Here it's not important which contextClient to recuperate 696 int rank = client->clientRank; 697 698 clientDistribution_ = new CDistributionClient(rank, this); 699 // Get local data index on client 700 storeIndex_client.resize(clientDistribution_->getLocalDataIndexOnClient().size()); 701 int nbStoreIndex = storeIndex_client.numElements(); 702 for (int idx = 0; idx < nbStoreIndex; ++idx) storeIndex_client(idx) = (clientDistribution_->getLocalDataIndexOnClient())[idx]; 703 704 if (0 == serverDistribution_) isDataDistributed_= clientDistribution_->isDataDistributed(); 705 else 706 { 707 // Mapping global index received from clients to the storeIndex_client 708 CDistributionClient::GlobalLocalDataMap& globalDataIndex = clientDistribution_->getGlobalDataIndexOnClient(); 709 CDistributionClient::GlobalLocalDataMap::const_iterator itGloe = globalDataIndex.end(); 710 map<int, CArray<size_t, 1> >::iterator itb = outGlobalIndexFromClient.begin(), 711 ite = outGlobalIndexFromClient.end(), it; 712 713 for (it = itb; it != ite; ++it) 714 { 715 int rank = it->first; 716 CArray<size_t,1>& globalIndex = outGlobalIndexFromClient[rank]; 717 outLocalIndexStoreOnClient.insert(make_pair(rank, CArray<size_t,1>(globalIndex.numElements()))); 718 CArray<size_t,1>& localIndex = outLocalIndexStoreOnClient[rank]; 719 size_t nbIndex = 0; 720 721 // Keep this code for this moment but it should be removed (or moved to DEBUG) to improve performance 722 for (size_t idx = 0; idx < globalIndex.numElements(); ++idx) 723 { 724 if (itGloe != globalDataIndex.find(globalIndex(idx))) 725 { 726 ++nbIndex; 727 } 728 } 729 730 if (doGridHaveDataDistributed(client) && (nbIndex != localIndex.numElements())) 731 ERROR("void CGrid::computeClientIndex()", 732 << "Number of local index on client is different from number of received global index" 733 << "Rank of sent client " << rank <<"." 734 << "Number of local index " << nbIndex << ". " 735 << "Number of received global index " << localIndex.numElements() << "."); 736 737 nbIndex = 0; 738 for (size_t idx = 0; idx < globalIndex.numElements(); ++idx) 739 { 740 if (itGloe != globalDataIndex.find(globalIndex(idx))) 741 { 742 localIndex(idx) = globalDataIndex[globalIndex(idx)]; 743 } 744 } 745 } 746 } 747 } 748 749 /*! 750 Compute connected receivers and indexes to be sent to these receivers. 751 */ 752 void CGrid::computeConnectedClients() 753 { 754 CContext* context = CContext::getCurrent(); 755 int nbSrvPools = (context->clientPrimServer.size() == 0) ? 1 : context->clientPrimServer.size(); 756 connectedServerRank_.clear(); 757 connectedDataSize_.clear(); 758 globalIndexOnServer_.clear(); 759 nbSenders.clear(); 760 761 for (int p = 0; p < nbSrvPools; ++p) 762 { 763 CContextClient* client = (context->clientPrimServer.size() == 0) ? context->client : context->clientPrimServer[p]; 764 int receiverSize = client->serverSize; 765 // connectedServerRank_[client].clear(); 766 767 if (connectedServerRank_.find(receiverSize) == connectedServerRank_.end()) 768 { 769 if (!doGridHaveDataDistributed(client)) 770 { 771 if (client->isServerLeader()) 772 { 773 size_t ssize = clientDistribution_->getLocalDataIndexOnClient().size(); 774 const std::list<int>& ranks = client->getRanksServerLeader(); 775 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 776 { 777 connectedServerRank_[receiverSize].push_back(*itRank); 778 connectedDataSize_[receiverSize][*itRank] = ssize; 779 } 780 } 781 return; 782 } 783 784 // Compute mapping between client and server 785 std::vector<boost::unordered_map<size_t,std::vector<int> > > indexServerOnElement; 786 CServerDistributionDescription serverDistributionDescription(getGlobalDimension(), client->serverSize); 787 std::vector<int> serverZeroIndex = serverDistributionDescription.computeServerGlobalByElement(indexServerOnElement, 788 client->clientRank, 789 client->clientSize, 790 axis_domain_order, 791 getDistributedDimension()); 792 793 // Even if servers have no index, they must received something from client 794 // We only use several client to send "empty" message to these servers 795 std::list<int> serverZeroIndexLeader; 796 std::list<int> serverZeroIndexNotLeader; 797 CContextClient::computeLeader(client->clientRank, client->clientSize, serverZeroIndex.size(), serverZeroIndexLeader, serverZeroIndexNotLeader); 798 for (std::list<int>::iterator it = serverZeroIndexLeader.begin(); it != serverZeroIndexLeader.end(); ++it) 799 *it = serverZeroIndex[*it]; 800 801 if (globalIndexOnServer_.find(receiverSize) == globalIndexOnServer_.end()) 802 computeIndexByElement(indexServerOnElement, client, globalIndexOnServer_[receiverSize]); 803 804 const CDistributionClient::GlobalLocalDataMap& globalLocalIndexSendToServer = clientDistribution_->getGlobalLocalDataSendToServer(); 805 CDistributionClient::GlobalLocalDataMap::const_iterator iteGlobalLocalIndexMap = globalLocalIndexSendToServer.end(), itGlobalLocalIndexMap; 806 CClientServerMapping::GlobalIndexMap::const_iterator iteGlobalMap, itbGlobalMap, itGlobalMap; 807 itbGlobalMap = globalIndexOnServer_[receiverSize].begin(); 808 iteGlobalMap = globalIndexOnServer_[receiverSize].end(); 809 810 for (itGlobalMap = itbGlobalMap; itGlobalMap != iteGlobalMap; ++itGlobalMap) 811 { 812 int serverRank = itGlobalMap->first; 813 int indexSize = itGlobalMap->second.size(); 814 const std::vector<size_t>& indexVec = itGlobalMap->second; 815 for (int idx = 0; idx < indexSize; ++idx) 816 { 817 itGlobalLocalIndexMap = globalLocalIndexSendToServer.find(indexVec[idx]); 818 if (iteGlobalLocalIndexMap != itGlobalLocalIndexMap) 819 { 820 if (connectedDataSize_[receiverSize].end() == connectedDataSize_[receiverSize].find(serverRank)) 821 connectedDataSize_[receiverSize][serverRank] = 1; 822 else 823 ++connectedDataSize_[receiverSize][serverRank]; 824 } 825 } 826 } 827 828 // Connected servers which really have index 829 for (itGlobalMap = itbGlobalMap; itGlobalMap != iteGlobalMap; ++itGlobalMap) { 830 connectedServerRank_[receiverSize].push_back(itGlobalMap->first); 831 } 832 833 // Connected servers which have no index at all 834 for (std::list<int>::iterator it = serverZeroIndexLeader.begin(); it != serverZeroIndexLeader.end(); ++it) 835 connectedServerRank_[receiverSize].push_back(*it); 836 837 // Even if a client has no index, it must connect to at least one server and 838 // send an "empty" data to this server 839 if (connectedServerRank_[receiverSize].empty()) 840 connectedServerRank_[receiverSize].push_back(client->clientRank % client->serverSize); 841 842 nbSenders[receiverSize] = clientServerMap_->computeConnectedClients(receiverSize, client->clientSize, client->intraComm, connectedServerRank_[receiverSize]); 843 } 844 } 845 } 494 846 495 847 /*! … … 503 855 { 504 856 CContext* context = CContext::getCurrent(); 505 CContextClient* client = context->client; 506 507 // First of all, compute distribution on client side 508 clientDistribution_ = new CDistributionClient(client->clientRank, this); 509 // Get local data index on client 510 storeIndex_client.resize(clientDistribution_->getLocalDataIndexOnClient().size()); 511 int nbStoreIndex = storeIndex_client.numElements(); 512 for (int idx = 0; idx < nbStoreIndex; ++idx) storeIndex_client(idx) = (clientDistribution_->getLocalDataIndexOnClient())[idx]; 513 isDataDistributed_= clientDistribution_->isDataDistributed(); 514 515 connectedServerRank_.clear(); 516 517 if (!doGridHaveDataDistributed()) 857 if (isScalarGrid()) 518 858 { 519 if (client->isServerLeader()) 520 { 521 size_t ssize = clientDistribution_->getLocalDataIndexOnClient().size(); 522 const std::list<int>& ranks = client->getRanksServerLeader(); 523 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 524 { 525 connectedServerRank_.push_back(*itRank); 526 connectedDataSize_[*itRank] = ssize; 527 } 528 } 529 return; 530 } 531 532 // Compute mapping between client and server 533 std::vector<boost::unordered_map<size_t,std::vector<int> > > indexServerOnElement; 534 CServerDistributionDescription serverDistributionDescription(getGlobalDimension(), client->serverSize); 535 serverDistributionDescription.computeServerGlobalByElement(indexServerOnElement, 536 client->clientRank, 537 client->clientSize, 538 axis_domain_order, 539 getDistributedDimension()); 540 computeIndexByElement(indexServerOnElement, globalIndexOnServer_); 541 542 const CDistributionClient::GlobalLocalDataMap& globalLocalIndexSendToServer = clientDistribution_->getGlobalLocalDataSendToServer(); 543 CDistributionClient::GlobalLocalDataMap::const_iterator iteGlobalLocalIndexMap = globalLocalIndexSendToServer.end(), itGlobalLocalIndexMap; 544 CClientServerMapping::GlobalIndexMap::const_iterator iteGlobalMap, itbGlobalMap, itGlobalMap; 545 itGlobalMap = itbGlobalMap = globalIndexOnServer_.begin(); 546 iteGlobalMap = globalIndexOnServer_.end(); 547 548 for (; itGlobalMap != iteGlobalMap; ++itGlobalMap) 549 { 550 int serverRank = itGlobalMap->first; 551 int indexSize = itGlobalMap->second.size(); 552 const std::vector<size_t>& indexVec = itGlobalMap->second; 553 for (int idx = 0; idx < indexSize; ++idx) 859 computeClientIndexScalarGrid(); 860 if (context->hasClient) 554 861 { 555 itGlobalLocalIndexMap = globalLocalIndexSendToServer.find(indexVec[idx]); 556 if (iteGlobalLocalIndexMap != itGlobalLocalIndexMap) 557 { 558 if (connectedDataSize_.end() == connectedDataSize_.find(serverRank)) 559 connectedDataSize_[serverRank] = 1; 560 else 561 ++connectedDataSize_[serverRank]; 562 } 862 computeConnectedClientsScalarGrid(); 563 863 } 564 864 } 565 566 for (itGlobalMap = itbGlobalMap; itGlobalMap != iteGlobalMap; ++itGlobalMap) { 567 connectedServerRank_.push_back(itGlobalMap->first); 865 else 866 { 867 computeClientIndex(); 868 if (context->hasClient) 869 { 870 computeConnectedClients(); 871 } 568 872 } 569 570 nbSenders = clientServerMap_->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_); 873 if (CServer::serverLevel==2) 874 { 875 computeWrittenIndex() ; 876 if (serverDistribution_!=0) serverDistribution_->partialClear() ; 877 if (clientDistribution_!=0) clientDistribution_->partialClear() ; 878 outGlobalIndexFromClient.clear() ; 879 } 571 880 } 572 881 … … 577 886 on each element whose size is much smaller than one of whole grid. 578 887 \param [in] indexServerOnElement global index of each element and the rank of server associated with these index 888 \param [in] client contextClient 579 889 \param [out] globalIndexOnServer global index of grid and its corresponding rank of server. 580 890 */ 581 891 void CGrid::computeIndexByElement(const std::vector<boost::unordered_map<size_t,std::vector<int> > >& indexServerOnElement, 892 const CContextClient* client, 582 893 CClientServerMapping::GlobalIndexMap& globalIndexOnServer) 583 894 { 584 CContext* context = CContext::getCurrent();585 CContextClient* client = context->client;586 895 int serverSize = client->serverSize; 896 587 897 std::vector<CDomain*> domList = getDomains(); 588 898 std::vector<CAxis*> axisList = getAxis(); … … 667 977 } 668 978 669 nbIndexOnServer = 0; 670 for (it = itb; it != ite; ++it) 979 nbIndexOnServer = 0; 980 for (size_t j = 0; j < globalIndexElementOnServerMap.size(); ++j) 981 { 982 it = globalIndexElementOnServerMap.find(globalIndexElementOnClient(j)); 983 if (it != ite) 671 984 { 672 985 const std::vector<int>& tmp = it->second; … … 683 996 } 684 997 } 998 } 685 999 686 1000 // Determine server which contain global source index … … 931 1245 } 932 1246 1247 /* 933 1248 void CGrid::outputField(int rank, const CArray<double, 1>& stored, double* field) 934 1249 { … … 960 1275 } 961 1276 } 962 1277 */ 963 1278 //---------------------------------------------------------------- 964 1279 … … 978 1293 } 979 1294 980 void CGrid::computeIndexScalarGrid() 1295 void CGrid::uncompressField_arr(const double* const data, CArray<double, 1>& out) const 1296 { 1297 const std::vector<int>& localMaskedDataIndex = clientDistribution_->getLocalMaskedDataIndexOnClient(); 1298 const int size = localMaskedDataIndex.size(); 1299 1300 for(int i = 0; i < size; ++i) out(localMaskedDataIndex[i]) = data[i]; 1301 } 1302 1303 1304 void CGrid::computeClientIndexScalarGrid() 1305 { 1306 CContext* context = CContext::getCurrent(); 1307 // int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; // This should be changed soon 1308 // for (int p = 0; p < nbSrvPools; ++p) 1309 { 1310 // CContextClient* client = (context->hasServer) ? (context->hasClient ? context->clientPrimServer[p] : context->client) 1311 // : context->client; 1312 CContextClient* client = context->client; 1313 1314 int rank = client->clientRank; 1315 1316 clientDistribution_ = new CDistributionClient(rank, this); 1317 1318 storeIndex_client.resize(1); 1319 storeIndex_client(0) = 0; 1320 1321 if (0 != serverDistribution_) 1322 { 1323 map<int, CArray<size_t, 1> >::iterator itb = outGlobalIndexFromClient.begin(), 1324 ite = outGlobalIndexFromClient.end(), it; 1325 for (it = itb; it != ite; ++it) 1326 { 1327 int rank = it->first; 1328 CArray<size_t,1>& globalIndex = outGlobalIndexFromClient[rank]; 1329 outLocalIndexStoreOnClient.insert(make_pair(rank, CArray<size_t,1>(globalIndex.numElements()))); 1330 CArray<size_t,1>& localIndex = outLocalIndexStoreOnClient[rank]; 1331 if (1 != globalIndex.numElements()) 1332 ERROR("void CGrid::computeClientIndexScalarGrid()", 1333 << "Something wrong happened. " 1334 << "Number of received global index on scalar grid should equal to 1" 1335 << "Number of received global index " << globalIndex.numElements() << "."); 1336 1337 localIndex(0) = globalIndex(0); 1338 } 1339 } 1340 } 1341 } 1342 1343 void CGrid::computeConnectedClientsScalarGrid() 1344 { 1345 CContext* context = CContext::getCurrent(); 1346 int nbSrvPools = (context->clientPrimServer.size()==0) ? 1 : context->clientPrimServer.size(); 1347 connectedServerRank_.clear(); 1348 connectedDataSize_.clear(); 1349 nbSenders.clear(); 1350 1351 for (int p = 0; p < nbSrvPools; ++p) 1352 { 1353 CContextClient* client = (context->clientPrimServer.size()==0) ? context->client : context->clientPrimServer[p]; 1354 int receiverSize = client->serverSize; 1355 1356 // connectedServerRank_[client].clear(); 1357 1358 if (connectedServerRank_.find(receiverSize)==connectedServerRank_.end()) 1359 { 1360 if (client->isServerLeader()) 1361 { 1362 const std::list<int>& ranks = client->getRanksServerLeader(); 1363 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1364 { 1365 int rank = *itRank; 1366 int nb = 1; 1367 connectedServerRank_[receiverSize].push_back(rank); 1368 connectedDataSize_[receiverSize][rank] = nb; 1369 nbSenders[receiverSize][rank] = nb; 1370 } 1371 } 1372 else 1373 { 1374 const std::list<int>& ranks = client->getRanksServerNotLeader(); 1375 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1376 { 1377 int rank = *itRank; 1378 int nb = 1; 1379 connectedServerRank_[receiverSize].push_back(rank); 1380 connectedDataSize_[receiverSize][rank] = nb; 1381 nbSenders[receiverSize][rank] = nb; 1382 } 1383 } 1384 } 1385 isDataDistributed_ = false; 1386 } 1387 } 1388 1389 void CGrid::sendIndexScalarGrid() 981 1390 { 982 1391 CContext* context = CContext::getCurrent(); 983 CContextClient* client=context->client; 984 985 storeIndex_client.resize(1); 986 storeIndex_client(0) = 0; 987 988 connectedServerRank_.clear(); 989 990 if (0 == client->clientRank) 991 { 992 for (int rank = 0; rank < client->serverSize; ++rank) 993 { 994 connectedServerRank_.push_back(rank); 995 connectedDataSize_[rank] = 1; 996 nbSenders[rank] = 1; 997 } 998 } 999 isDataDistributed_ = false; 1000 } 1001 1002 void CGrid::computeCompressedIndex() 1003 { 1004 std::map<size_t, size_t> indexes; 1005 1006 { 1007 std::map<int, CArray<size_t,1> >::const_iterator it = outIndexFromClient.begin(); 1008 std::map<int, CArray<size_t,1> >::const_iterator itEnd = outIndexFromClient.end(); 1009 for (; it != itEnd; ++it) 1010 { 1011 for (int i = 0; i < it->second.numElements(); ++i) 1012 indexes.insert(std::make_pair(it->second(i), 0)); 1013 1014 compressedOutIndexFromClient[it->first].resize(it->second.numElements()); 1015 } 1016 } 1017 1018 { 1019 std::map<size_t, size_t>::iterator it = indexes.begin(); 1020 std::map<size_t, size_t>::iterator itEnd = indexes.end(); 1021 for (size_t i = 0; it != itEnd; ++it, ++i) 1022 it->second = i; 1023 } 1024 1025 { 1026 std::map<int, CArray<size_t,1> >::iterator it = compressedOutIndexFromClient.begin(); 1027 std::map<int, CArray<size_t,1> >::iterator itEnd = compressedOutIndexFromClient.end(); 1028 for (; it != itEnd; ++it) 1029 { 1030 const CArray<size_t,1>& outIndex = outIndexFromClient[it->first]; 1031 for (int i = 0; i < it->second.numElements(); ++i) 1032 it->second(i) = indexes[outIndex(i)]; 1033 } 1034 } 1035 } 1036 1037 void CGrid::sendIndexScalarGrid() 1038 { 1039 CContext* context = CContext::getCurrent(); 1040 CContextClient* client = context->client; 1041 1042 CEventClient event(getType(), EVENT_ID_INDEX); 1043 list<CMessage> listMsg; 1044 list<CArray<size_t,1> > listOutIndex; 1045 1046 if (client->isServerLeader()) 1047 { 1048 const std::list<int>& ranks = client->getRanksServerLeader(); 1049 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1050 { 1051 int rank = *itRank; 1052 int nb = 1; 1053 storeIndex_toSrv.insert(std::make_pair(rank, CArray<int,1>(nb))); 1054 listOutIndex.push_back(CArray<size_t,1>(nb)); 1055 1056 CArray<int, 1>& outLocalIndexToServer = storeIndex_toSrv[rank]; 1057 CArray<size_t, 1>& outGlobalIndexOnServer = listOutIndex.back(); 1058 1059 for (int k = 0; k < nb; ++k) 1060 { 1061 outGlobalIndexOnServer(k) = 0; 1062 outLocalIndexToServer(k) = 0; 1063 } 1064 1065 storeIndex_fromSrv.insert(std::make_pair(rank, CArray<int,1>(outLocalIndexToServer))); 1066 listMsg.push_back(CMessage()); 1067 listMsg.back() << getId( )<< isDataDistributed_ << isCompressible_ << listOutIndex.back(); 1068 1069 event.push(rank, 1, listMsg.back()); 1070 } 1071 client->sendEvent(event); 1072 } 1073 else 1074 { 1075 const std::list<int>& ranks = client->getRanksServerNotLeader(); 1076 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1077 { 1078 int rank = *itRank; 1079 int nb = 1; 1080 storeIndex_fromSrv.insert(std::make_pair(rank, CArray<int,1>(nb))); 1081 CArray<int, 1>& outLocalIndexToServer = storeIndex_fromSrv[rank]; 1082 for (int k = 0; k < nb; ++k) 1083 { 1084 outLocalIndexToServer(k) = 0; 1085 } 1086 } 1087 client->sendEvent(event); 1088 } 1089 } 1090 1091 void CGrid::sendIndex(void) 1092 { 1093 CContext* context = CContext::getCurrent(); 1094 CContextClient* client = context->client; 1095 1096 CEventClient event(getType(), EVENT_ID_INDEX); 1097 int rank; 1098 list<CMessage> listMsg; 1099 list<CArray<size_t,1> > listOutIndex; 1100 const CDistributionClient::GlobalLocalDataMap& globalLocalIndexSendToServer = clientDistribution_->getGlobalLocalDataSendToServer(); 1101 CDistributionClient::GlobalLocalDataMap::const_iterator itIndex = globalLocalIndexSendToServer.begin(), 1102 iteIndex = globalLocalIndexSendToServer.end(); 1103 1104 if (!doGridHaveDataDistributed()) 1105 { 1392 storeIndex_toSrv.clear(); 1393 std::list<CContextClient*>::iterator it; 1394 1395 for (it=clients.begin(); it!=clients.end(); ++it) 1396 { 1397 CContextClient* client = *it; 1398 int receiverSize = client->serverSize; 1399 1400 CEventClient event(getType(), EVENT_ID_INDEX); 1401 list<CMessage> listMsg; 1402 list<CArray<size_t,1> > listOutIndex; 1403 1106 1404 if (client->isServerLeader()) 1107 1405 { 1108 int indexSize = globalLocalIndexSendToServer.size();1109 CArray<size_t,1> outGlobalIndexOnServer(indexSize);1110 CArray<int,1> outLocalIndexToServer(indexSize);1111 for (int idx = 0; itIndex != iteIndex; ++itIndex, ++idx)1112 {1113 outGlobalIndexOnServer(idx) = itIndex->first;1114 outLocalIndexToServer(idx) = itIndex->second;1115 }1116 1117 1406 const std::list<int>& ranks = client->getRanksServerLeader(); 1118 1407 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1119 1408 { 1120 storeIndex_toSrv.insert(std::make_pair(*itRank, CArray<int,1>(outLocalIndexToServer))); 1121 storeIndex_fromSrv.insert(std::make_pair(*itRank, CArray<int,1>(outLocalIndexToServer))); 1122 listOutIndex.push_back(CArray<size_t,1>(outGlobalIndexOnServer)); 1123 1409 int rank = *itRank; 1410 int nb = 1; 1411 storeIndex_toSrv[client].insert(std::make_pair(rank, CArray<int,1>(nb))); 1412 listOutIndex.push_back(CArray<size_t,1>(nb)); 1413 1414 CArray<int, 1>& outLocalIndexToServer = storeIndex_toSrv[client][rank]; 1415 CArray<size_t, 1>& outGlobalIndexOnServer = listOutIndex.back(); 1416 1417 for (int k = 0; k < nb; ++k) 1418 { 1419 outGlobalIndexOnServer(k) = 0; 1420 outLocalIndexToServer(k) = 0; 1421 } 1422 1423 if (context->hasClient && !context->hasServer) 1424 storeIndex_fromSrv.insert(std::make_pair(rank, CArray<int,1>(outLocalIndexToServer))); 1425 1426 listMsg.push_back(CMessage()); 1427 listMsg.back() << getId( )<< isDataDistributed_ << isCompressible_ << listOutIndex.back(); 1428 1429 event.push(rank, 1, listMsg.back()); 1430 } 1431 client->sendEvent(event); 1432 } 1433 else 1434 { 1435 const std::list<int>& ranks = client->getRanksServerNotLeader(); 1436 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1437 { 1438 int rank = *itRank; 1439 int nb = 1; 1440 CArray<int, 1> outLocalIndexToServer(nb); 1441 for (int k = 0; k < nb; ++k) 1442 { 1443 outLocalIndexToServer(k) = 0; 1444 } 1445 1446 if (context->hasClient && !context->hasServer) 1447 storeIndex_fromSrv.insert(std::make_pair(rank, CArray<int,1>(outLocalIndexToServer))); 1448 } 1449 client->sendEvent(event); 1450 } 1451 } 1452 } 1453 1454 void CGrid::sendIndex(void) 1455 { 1456 CContext* context = CContext::getCurrent(); 1457 storeIndex_toSrv.clear(); 1458 std::list<CContextClient*>::iterator it; 1459 1460 for (it=clients.begin(); it!=clients.end(); ++it) 1461 { 1462 CContextClient* client = *it; 1463 int receiverSize = client->serverSize; 1464 1465 CEventClient event(getType(), EVENT_ID_INDEX); 1466 int rank; 1467 list<CMessage> listMsg; 1468 list<CArray<size_t,1> > listOutIndex; 1469 const CDistributionClient::GlobalLocalDataMap& globalLocalIndexSendToServer = clientDistribution_->getGlobalLocalDataSendToServer(); 1470 CDistributionClient::GlobalLocalDataMap::const_iterator itbIndex = globalLocalIndexSendToServer.begin(), itIndex, 1471 iteIndex = globalLocalIndexSendToServer.end(); 1472 itIndex = itbIndex; 1473 1474 if (!doGridHaveDataDistributed(client)) 1475 { 1476 if (client->isServerLeader()) 1477 { 1478 int indexSize = globalLocalIndexSendToServer.size(); 1479 CArray<size_t,1> outGlobalIndexOnServer(indexSize); 1480 CArray<int,1> outLocalIndexToServer(indexSize); 1481 for (int idx = 0; itIndex != iteIndex; ++itIndex, ++idx) 1482 { 1483 outGlobalIndexOnServer(idx) = itIndex->first; 1484 outLocalIndexToServer(idx) = itIndex->second; 1485 } 1486 1487 const std::list<int>& ranks = client->getRanksServerLeader(); 1488 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1489 { 1490 storeIndex_toSrv[client].insert(std::make_pair(*itRank, CArray<int,1>(outLocalIndexToServer))); 1491 if (context->hasClient && !context->hasServer) 1492 storeIndex_fromSrv.insert(std::make_pair(*itRank, CArray<int,1>(outLocalIndexToServer))); 1493 1494 listOutIndex.push_back(CArray<size_t,1>(outGlobalIndexOnServer)); 1495 1496 listMsg.push_back(CMessage()); 1497 listMsg.back() << getId() << isDataDistributed_ << isCompressible_ << listOutIndex.back(); 1498 1499 event.push(*itRank, 1, listMsg.back()); 1500 } 1501 client->sendEvent(event); 1502 } 1503 else 1504 { 1505 int indexSize = globalLocalIndexSendToServer.size(); 1506 CArray<int,1> outLocalIndexToServer(indexSize); 1507 for (int idx = 0; itIndex != iteIndex; ++itIndex, ++idx) 1508 { 1509 outLocalIndexToServer(idx) = itIndex->second; 1510 } 1511 1512 const std::list<int>& ranks = client->getRanksServerNotLeader(); 1513 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1514 { 1515 storeIndex_fromSrv.insert(std::make_pair(*itRank, CArray<int,1>(outLocalIndexToServer))); 1516 } 1517 client->sendEvent(event); 1518 } 1519 } 1520 else 1521 { 1522 CClientServerMapping::GlobalIndexMap::const_iterator iteGlobalMap, itGlobalMap; 1523 itGlobalMap = globalIndexOnServer_[receiverSize].begin(); 1524 iteGlobalMap = globalIndexOnServer_[receiverSize].end(); 1525 1526 std::map<int,std::vector<int> >localIndexTmp; 1527 std::map<int,std::vector<size_t> > globalIndexTmp; 1528 for (; itGlobalMap != iteGlobalMap; ++itGlobalMap) 1529 { 1530 int serverRank = itGlobalMap->first; 1531 int indexSize = itGlobalMap->second.size(); 1532 const std::vector<size_t>& indexVec = itGlobalMap->second; 1533 for (int idx = 0; idx < indexSize; ++idx) 1534 { 1535 itIndex = globalLocalIndexSendToServer.find(indexVec[idx]); 1536 if (iteIndex != itIndex) 1537 { 1538 globalIndexTmp[serverRank].push_back(itIndex->first); 1539 localIndexTmp[serverRank].push_back(itIndex->second); 1540 } 1541 } 1542 } 1543 1544 for (int ns = 0; ns < connectedServerRank_[receiverSize].size(); ++ns) 1545 { 1546 rank = connectedServerRank_[receiverSize][ns]; 1547 int nb = 0; 1548 if (globalIndexTmp.end() != globalIndexTmp.find(rank)) 1549 nb = globalIndexTmp[rank].size(); 1550 1551 storeIndex_toSrv[client].insert(make_pair(rank, CArray<int,1>(nb))); 1552 listOutIndex.push_back(CArray<size_t,1>(nb)); 1553 1554 CArray<int, 1>& outLocalIndexToServer = storeIndex_toSrv[client][rank]; 1555 CArray<size_t, 1>& outGlobalIndexOnServer = listOutIndex.back(); 1556 1557 for (int k = 0; k < nb; ++k) 1558 { 1559 outGlobalIndexOnServer(k) = globalIndexTmp[rank].at(k); 1560 outLocalIndexToServer(k) = localIndexTmp[rank].at(k); 1561 } 1562 1563 storeIndex_fromSrv.insert(make_pair(rank, CArray<int,1>(outLocalIndexToServer))); 1124 1564 listMsg.push_back(CMessage()); 1125 1565 listMsg.back() << getId() << isDataDistributed_ << isCompressible_ << listOutIndex.back(); 1126 1566 1127 event.push(*itRank, 1, listMsg.back()); 1128 } 1567 event.push(rank, nbSenders[receiverSize][rank], listMsg.back()); 1568 } 1569 1129 1570 client->sendEvent(event); 1130 1571 } 1131 else1132 {1133 int indexSize = globalLocalIndexSendToServer.size();1134 CArray<int,1> outLocalIndexToServer(indexSize);1135 for (int idx = 0; itIndex != iteIndex; ++itIndex, ++idx)1136 {1137 outLocalIndexToServer(idx) = itIndex->second;1138 }1139 1140 const std::list<int>& ranks = client->getRanksServerNotLeader();1141 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)1142 {1143 storeIndex_fromSrv.insert(std::make_pair(*itRank, CArray<int,1>(outLocalIndexToServer)));1144 }1145 client->sendEvent(event);1146 }1147 }1148 else1149 {1150 CClientServerMapping::GlobalIndexMap::const_iterator iteGlobalMap, itGlobalMap;1151 itGlobalMap = globalIndexOnServer_.begin();1152 iteGlobalMap = globalIndexOnServer_.end();1153 1154 std::map<int,std::vector<int> >localIndexTmp;1155 std::map<int,std::vector<size_t> > globalIndexTmp;1156 for (; itGlobalMap != iteGlobalMap; ++itGlobalMap)1157 {1158 int serverRank = itGlobalMap->first;1159 int indexSize = itGlobalMap->second.size();1160 const std::vector<size_t>& indexVec = itGlobalMap->second;1161 for (int idx = 0; idx < indexSize; ++idx)1162 {1163 itIndex = globalLocalIndexSendToServer.find(indexVec[idx]);1164 if (iteIndex != itIndex)1165 {1166 globalIndexTmp[serverRank].push_back(itIndex->first);1167 localIndexTmp[serverRank].push_back(itIndex->second);1168 }1169 }1170 }1171 1172 for (int ns = 0; ns < connectedServerRank_.size(); ++ns)1173 {1174 rank = connectedServerRank_[ns];1175 int nb = 0;1176 if (globalIndexTmp.end() != globalIndexTmp.find(rank))1177 nb = globalIndexTmp[rank].size();1178 1179 storeIndex_toSrv.insert(make_pair(rank, CArray<int,1>(nb)));1180 listOutIndex.push_back(CArray<size_t,1>(nb));1181 1182 CArray<int, 1>& outLocalIndexToServer = storeIndex_toSrv[rank];1183 CArray<size_t, 1>& outGlobalIndexOnServer = listOutIndex.back();1184 1185 for (int k = 0; k < nb; ++k)1186 {1187 outGlobalIndexOnServer(k) = globalIndexTmp[rank].at(k);1188 outLocalIndexToServer(k) = localIndexTmp[rank].at(k);1189 }1190 1191 storeIndex_fromSrv.insert(make_pair(rank, CArray<int,1>(outLocalIndexToServer)));1192 listMsg.push_back(CMessage());1193 listMsg.back() << getId() << isDataDistributed_ << isCompressible_ << listOutIndex.back();1194 1195 event.push(rank, nbSenders[rank], listMsg.back());1196 }1197 1198 client->sendEvent(event);1199 1572 } 1200 1573 } … … 1220 1593 { 1221 1594 CContext* context = CContext::getCurrent(); 1222 CContextServer* server = context->server; 1223 numberWrittenIndexes_ = totalNumberWrittenIndexes_ = offsetWrittenIndexes_ = 0; 1224 connectedServerRank_ = ranks; 1225 1226 for (int n = 0; n < ranks.size(); n++) 1227 { 1228 int rank = ranks[n]; 1229 CBufferIn& buffer = *buffers[n]; 1230 1231 buffer >> isDataDistributed_ >> isCompressible_; 1232 size_t dataSize = 0; 1233 1234 if (0 == serverDistribution_) 1235 { 1236 int idx = 0, numElement = axis_domain_order.numElements(); 1237 int ssize = numElement; 1238 std::vector<int> indexMap(numElement); 1239 for (int i = 0; i < numElement; ++i) 1240 { 1241 indexMap[i] = idx; 1242 if (2 == axis_domain_order(i)) 1595 connectedServerRankRead_ = ranks; 1596 1597 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; 1598 nbSrvPools = 1; 1599 nbReadSenders.clear(); 1600 for (int p = 0; p < nbSrvPools; ++p) 1601 { 1602 CContextServer* server = (!context->hasClient) ? context->server : context->serverPrimServer[p]; 1603 CContextClient* client = context->client; //(!context->hasClient) ? context->client : context->clientPrimServer[p]; 1604 1605 int idx = 0, numElement = axis_domain_order.numElements(); 1606 int ssize = numElement; 1607 std::vector<int> indexMap(numElement); 1608 for (int i = 0; i < numElement; ++i) 1609 { 1610 indexMap[i] = idx; 1611 if (2 == axis_domain_order(i)) 1612 { 1613 ++ssize; 1614 idx += 2; 1615 } 1616 else 1617 ++idx; 1618 } 1619 1620 for (int n = 0; n < ranks.size(); n++) 1621 { 1622 int rank = ranks[n]; 1623 CBufferIn& buffer = *buffers[n]; 1624 1625 buffer >> isDataDistributed_ >> isCompressible_; 1626 size_t dataSize = 0; 1627 1628 if (0 == serverDistribution_) 1629 { 1630 int axisId = 0, domainId = 0, scalarId = 0, globalSize = 1; 1631 std::vector<CDomain*> domainList = getDomains(); 1632 std::vector<CAxis*> axisList = getAxis(); 1633 std::vector<int> nZoomBegin(ssize), nZoomSize(ssize), nGlob(ssize), nZoomBeginGlobal(ssize), nGlobElement(numElement); 1634 std::vector<CArray<int,1> > globalZoomIndex(numElement); 1635 for (int i = 0; i < numElement; ++i) 1243 1636 { 1244 ++ssize; 1245 idx += 2; 1637 nGlobElement[i] = globalSize; 1638 if (2 == axis_domain_order(i)) //domain 1639 { 1640 nZoomBegin[indexMap[i]] = domainList[domainId]->zoom_ibegin; 1641 nZoomSize[indexMap[i]] = domainList[domainId]->zoom_ni; 1642 nZoomBeginGlobal[indexMap[i]] = domainList[domainId]->global_zoom_ibegin; 1643 nGlob[indexMap[i]] = domainList[domainId]->ni_glo; 1644 1645 nZoomBegin[indexMap[i] + 1] = domainList[domainId]->zoom_jbegin; 1646 nZoomSize[indexMap[i] + 1] = domainList[domainId]->zoom_nj; 1647 nZoomBeginGlobal[indexMap[i] + 1] = domainList[domainId]->global_zoom_jbegin; 1648 nGlob[indexMap[i] + 1] = domainList[domainId]->nj_glo; 1649 1650 { 1651 int count = 0; 1652 globalZoomIndex[i].resize(nZoomSize[indexMap[i]]*nZoomSize[indexMap[i]+1]); 1653 for (int jdx = 0; jdx < nZoomSize[indexMap[i]+1]; ++jdx) 1654 for (int idx = 0; idx < nZoomSize[indexMap[i]]; ++idx) 1655 { 1656 globalZoomIndex[i](count) = (nZoomBegin[indexMap[i]] + idx) + (nZoomBegin[indexMap[i]+1] + jdx) * nGlob[indexMap[i]]; 1657 ++count; 1658 } 1659 } 1660 1661 ++domainId; 1662 } 1663 else if (1 == axis_domain_order(i)) // axis 1664 { 1665 nZoomBegin[indexMap[i]] = axisList[axisId]->zoom_begin; 1666 nZoomSize[indexMap[i]] = axisList[axisId]->zoom_n; 1667 nZoomBeginGlobal[indexMap[i]] = axisList[axisId]->global_zoom_begin; 1668 nGlob[indexMap[i]] = axisList[axisId]->n_glo; 1669 if (axisList[axisId]->zoomByIndex()) 1670 { 1671 globalZoomIndex[i].reference(axisList[axisId]->zoom_index); 1672 } 1673 else 1674 { 1675 globalZoomIndex[i].resize(nZoomSize[indexMap[i]]); 1676 for (int idx = 0; idx < nZoomSize[indexMap[i]]; ++idx) 1677 globalZoomIndex[i](idx) = nZoomBegin[indexMap[i]] + idx; 1678 } 1679 1680 ++axisId; 1681 } 1682 else // scalar 1683 { 1684 nZoomBegin[indexMap[i]] = 0; 1685 nZoomSize[indexMap[i]] = 1; 1686 nZoomBeginGlobal[indexMap[i]] = 0; 1687 nGlob[indexMap[i]] = 1; 1688 globalZoomIndex[i].resize(1); 1689 globalZoomIndex[i](0) = 0; 1690 ++scalarId; 1691 } 1246 1692 } 1247 else 1248 ++idx; 1249 } 1250 1251 int axisId = 0, domainId = 0, scalarId = 0; 1693 dataSize = 1; 1694 1695 for (int i = 0; i < nZoomSize.size(); ++i) 1696 dataSize *= nZoomSize[i]; 1697 serverDistribution_ = new CDistributionServer(server->intraCommRank, 1698 globalZoomIndex, axis_domain_order, 1699 nZoomBegin, nZoomSize, nZoomBeginGlobal, nGlob); 1700 } 1701 1702 CArray<size_t,1> outIndex; 1703 buffer >> outIndex; 1704 outGlobalIndexFromClient.insert(std::make_pair(rank, outIndex)); 1705 connectedDataSizeRead_[rank] = outIndex.numElements(); 1706 1707 if (doGridHaveDataDistributed(client)) 1708 {} 1709 else 1710 { 1711 // THE PROBLEM HERE IS THAT DATA CAN BE NONDISTRIBUTED ON CLIENT AND DISTRIBUTED ON SERVER 1712 // BELOW IS THE TEMPORARY FIX only for a single type of element (domain, asix, scalar) 1713 dataSize = serverDistribution_->getGridSize(); 1714 } 1715 writtenDataSize_ += dataSize; 1716 } 1717 1718 1719 // Compute mask of the current grid 1720 { 1721 int axisId = 0, domainId = 0, scalarId = 0, globalSize = 1; 1252 1722 std::vector<CDomain*> domainList = getDomains(); 1253 1723 std::vector<CAxis*> axisList = getAxis(); 1254 std::vector<int> nZoomBegin(ssize), nZoomSize(ssize), nGlob(ssize), nZoomBeginGlobal(ssize);1255 std::vector< CArray<int,1> > globalZoomIndex(numElement);1724 int dimSize = 2 * domainList.size() + axisList.size(); 1725 std::vector<int> nBegin(dimSize), nSize(dimSize), nGlob(dimSize), nBeginGlobal(dimSize); 1256 1726 for (int i = 0; i < numElement; ++i) 1257 { 1727 { 1258 1728 if (2 == axis_domain_order(i)) //domain 1259 1729 { 1260 n ZoomBegin[indexMap[i]] = domainList[domainId]->zoom_ibegin_srv;1261 n ZoomSize[indexMap[i]] = domainList[domainId]->zoom_ni_srv;1262 n ZoomBeginGlobal[indexMap[i]] = domainList[domainId]->global_zoom_ibegin;1730 nBegin[indexMap[i]] = domainList[domainId]->ibegin; 1731 nSize[indexMap[i]] = domainList[domainId]->ni; 1732 nBeginGlobal[indexMap[i]] = 0; 1263 1733 nGlob[indexMap[i]] = domainList[domainId]->ni_glo; 1264 1734 1265 n ZoomBegin[indexMap[i] + 1] = domainList[domainId]->zoom_jbegin_srv;1266 n ZoomSize[indexMap[i] + 1] = domainList[domainId]->zoom_nj_srv;1267 n ZoomBeginGlobal[indexMap[i] + 1] = domainList[domainId]->global_zoom_jbegin;1735 nBegin[indexMap[i] + 1] = domainList[domainId]->jbegin; 1736 nSize[indexMap[i] + 1] = domainList[domainId]->nj; 1737 nBeginGlobal[indexMap[i] + 1] = 0; 1268 1738 nGlob[indexMap[i] + 1] = domainList[domainId]->nj_glo; 1269 1739 1270 {1271 int count = 0;1272 globalZoomIndex[i].resize(nZoomSize[indexMap[i]]*nZoomSize[indexMap[i]+1]);1273 for (int jdx = 0; jdx < nZoomSize[indexMap[i]+1]; ++jdx)1274 for (int idx = 0; idx < nZoomSize[indexMap[i]]; ++idx)1275 {1276 globalZoomIndex[i](count) = (nZoomBegin[indexMap[i]] + idx) + (nZoomBegin[indexMap[i]+1] + jdx) * nGlob[indexMap[i]];1277 ++count;1278 }1279 }1280 1740 ++domainId; 1281 1741 } 1282 1742 else if (1 == axis_domain_order(i)) // axis 1283 1743 { 1284 nZoomBegin[indexMap[i]] = axisList[axisId]->zoom_begin_srv; 1285 nZoomSize[indexMap[i]] = axisList[axisId]->zoom_size_srv; 1286 nZoomBeginGlobal[indexMap[i]] = axisList[axisId]->global_zoom_begin_srv; 1287 nGlob[indexMap[i]] = axisList[axisId]->n_glo; 1288 if (!axisList[axisId]->global_zoom_index.isEmpty()) 1289 { 1290 globalZoomIndex[i].reference(axisList[axisId]->zoom_index_srv); 1291 } 1292 else 1293 { 1294 globalZoomIndex[i].resize(nZoomSize[indexMap[i]]); 1295 for (int idx = 0; idx < nZoomSize[indexMap[i]]; ++idx) 1296 globalZoomIndex[i](idx) = nZoomBegin[indexMap[i]] + idx; 1297 } 1298 1744 nBegin[indexMap[i]] = axisList[axisId]->begin; 1745 nSize[indexMap[i]] = axisList[axisId]->n; 1746 nBeginGlobal[indexMap[i]] = 0; 1747 nGlob[indexMap[i]] = axisList[axisId]->n_glo; 1299 1748 ++axisId; 1300 1749 } 1301 1750 else // scalar 1751 { 1752 } 1753 } 1754 1755 if (nSize.empty()) // Scalar grid 1756 { 1757 nBegin.push_back(0); 1758 nSize.push_back(1); 1759 nBeginGlobal.push_back(0); 1760 nGlob.push_back(1); 1761 } 1762 1763 modifyMaskSize(nSize, false); 1764 1765 // These below codes are reserved for future 1766 CDistributionServer srvDist(server->intraCommRank, nBegin, nSize, nBeginGlobal, nGlob); 1767 map<int, CArray<size_t, 1> >::iterator itb = outGlobalIndexFromClient.begin(), 1768 ite = outGlobalIndexFromClient.end(), it; 1769 const CDistributionServer::GlobalLocalMap& globalLocalMask = srvDist.getGlobalLocalIndex(); 1770 CDistributionServer::GlobalLocalMap::const_iterator itSrv; 1771 size_t nb = 0; 1772 for (it = itb; it != ite; ++it) 1773 { 1774 CArray<size_t,1>& globalInd = it->second; 1775 for (size_t idx = 0; idx < globalInd.numElements(); ++idx) 1302 1776 { 1303 nZoomBegin[indexMap[i]] = 0; 1304 nZoomSize[indexMap[i]] = 1; 1305 nZoomBeginGlobal[indexMap[i]] = 0; 1306 nGlob[indexMap[i]] = 1; 1307 globalZoomIndex[i].resize(1); 1308 globalZoomIndex[i](0) = 0; 1309 ++scalarId; 1777 if (globalLocalMask.end() != globalLocalMask.find(globalInd(idx))) ++nb; 1310 1778 } 1311 1779 } 1312 dataSize = 1; 1313 for (int i = 0; i < nZoomSize.size(); ++i) 1314 dataSize *= nZoomSize[i]; 1315 1316 /* serverDistribution_ = new CDistributionServer(server->intraCommRank, nZoomBegin, nZoomSize, 1317 nZoomBeginGlobal, nGlob);*/ 1318 serverDistribution_ = new CDistributionServer(server->intraCommRank, 1319 globalZoomIndex, axis_domain_order, 1320 nZoomBegin, nZoomSize, nZoomBeginGlobal, nGlob); 1321 } 1322 1323 CArray<size_t,1> outIndex; 1324 buffer >> outIndex; 1325 if (isDataDistributed_) 1326 serverDistribution_->computeLocalIndex(outIndex); 1327 else 1328 { 1329 dataSize = outIndex.numElements(); 1330 for (int i = 0; i < outIndex.numElements(); ++i) outIndex(i) = i; 1331 } 1332 writtenDataSize_ += dataSize; 1333 1334 outIndexFromClient.insert(std::make_pair(rank, outIndex)); 1335 connectedDataSize_[rank] = outIndex.numElements(); 1336 numberWrittenIndexes_ += outIndex.numElements(); 1337 } 1338 1339 // if (isScalarGrid()) return; 1340 1341 if (isDataDistributed_) 1342 { 1343 MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 1344 MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 1345 offsetWrittenIndexes_ -= numberWrittenIndexes_; 1346 } 1347 else 1348 totalNumberWrittenIndexes_ = numberWrittenIndexes_; 1349 1350 nbSenders = CClientServerMappingDistributed::computeConnectedClients(context->client->serverSize, context->client->clientSize, context->client->intraComm, ranks); 1780 1781 CArray<int,1> indexToModify(nb); 1782 nb = 0; 1783 for (it = itb; it != ite; ++it) 1784 { 1785 CArray<size_t,1>& globalInd = it->second; 1786 for (size_t idx = 0; idx < globalInd.numElements(); ++idx) 1787 { 1788 itSrv = globalLocalMask.find(globalInd(idx)); 1789 if (globalLocalMask.end() != itSrv) 1790 { 1791 indexToModify(nb) = itSrv->second; 1792 ++nb; 1793 } 1794 } 1795 } 1796 1797 modifyMask(indexToModify, true); 1798 } 1799 1800 if (isScalarGrid()) return; 1801 1802 nbReadSenders[client] = CClientServerMappingDistributed::computeConnectedClients(context->client->serverSize, context->client->clientSize, context->client->intraComm, ranks); 1803 } 1351 1804 } 1352 1805 … … 1366 1819 const CArray<int,1>& axisDomainOrder) 1367 1820 { 1368 globalDim.resize(domains.size()*2+axis.size()+scalars.size()); 1821 // globalDim.resize(domains.size()*2+axis.size()+scalars.size()); 1822 globalDim.resize(domains.size()*2+axis.size()); 1369 1823 int positionDimensionDistributed = 1; 1370 1824 int idx = 0, idxDomain = 0, idxAxis = 0, idxScalar = 0; … … 1396 1850 else 1397 1851 { 1398 globalDim[idx] = 1;1852 // globalDim[idx] = 1; 1399 1853 ++idxScalar; 1400 ++idx;1854 // ++idx; 1401 1855 } 1402 1856 } … … 1484 1938 } 1485 1939 1486 bool CGrid::doGridHaveDataDistributed( )1940 bool CGrid::doGridHaveDataDistributed(CContextClient* client) 1487 1941 { 1488 1942 if (isScalarGrid()) return false; 1943 else if (0 != client) 1944 { 1945 return (isDataDistributed_ || (1 != client->clientSize) || (1 != client->serverSize)); 1946 } 1489 1947 else 1490 return isDataDistributed_; 1948 return isDataDistributed_; 1491 1949 } 1492 1950 … … 1583 2041 void CGrid::sendAddDomain(const string& id) 1584 2042 { 1585 CContext* context=CContext::getCurrent(); 1586 1587 if (! context->hasServer ) 1588 { 1589 CContextClient* client=context->client; 1590 1591 CEventClient event(this->getType(),EVENT_ID_ADD_DOMAIN); 1592 if (client->isServerLeader()) 1593 { 1594 CMessage msg; 1595 msg<<this->getId(); 1596 msg<<id; 1597 const std::list<int>& ranks = client->getRanksServerLeader(); 1598 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1599 event.push(*itRank,1,msg); 1600 client->sendEvent(event); 1601 } 1602 else client->sendEvent(event); 1603 } 2043 sendAddItem(id, (int)EVENT_ID_ADD_DOMAIN); 1604 2044 } 1605 2045 … … 1610 2050 void CGrid::sendAddAxis(const string& id) 1611 2051 { 1612 CContext* context=CContext::getCurrent(); 1613 1614 if (! context->hasServer ) 1615 { 1616 CContextClient* client=context->client; 1617 1618 CEventClient event(this->getType(),EVENT_ID_ADD_AXIS); 1619 if (client->isServerLeader()) 1620 { 1621 CMessage msg; 1622 msg<<this->getId(); 1623 msg<<id; 1624 const std::list<int>& ranks = client->getRanksServerLeader(); 1625 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1626 event.push(*itRank,1,msg); 1627 client->sendEvent(event); 1628 } 1629 else client->sendEvent(event); 1630 } 2052 sendAddItem(id, (int)EVENT_ID_ADD_AXIS); 1631 2053 } 1632 2054 … … 1637 2059 void CGrid::sendAddScalar(const string& id) 1638 2060 { 1639 CContext* context=CContext::getCurrent(); 1640 1641 if (! context->hasServer ) 1642 { 1643 CContextClient* client=context->client; 1644 1645 CEventClient event(this->getType(),EVENT_ID_ADD_SCALAR); 1646 if (client->isServerLeader()) 1647 { 1648 CMessage msg; 1649 msg<<this->getId(); 1650 msg<<id; 1651 const std::list<int>& ranks = client->getRanksServerLeader(); 1652 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1653 event.push(*itRank,1,msg); 1654 client->sendEvent(event); 1655 } 1656 else client->sendEvent(event); 1657 } 2061 sendAddItem(id, (int)EVENT_ID_ADD_SCALAR); 1658 2062 } 1659 2063 … … 1746 2150 { 1747 2151 CDomain* pDom = CDomain::get(*it); 1748 if (context->hasClient )2152 if (context->hasClient && !context->hasServer) 1749 2153 { 1750 2154 pDom->solveRefInheritance(apply); … … 1758 2162 { 1759 2163 CAxis* pAxis = CAxis::get(*it); 1760 if (context->hasClient )2164 if (context->hasClient && !context->hasServer) 1761 2165 { 1762 2166 pAxis->solveRefInheritance(apply); … … 1770 2174 { 1771 2175 CScalar* pScalar = CScalar::get(*it); 1772 if (context->hasClient )2176 if (context->hasClient && !context->hasServer) 1773 2177 { 1774 2178 pScalar->solveRefInheritance(apply); … … 2145 2549 } 2146 2550 2551 void CGrid::setContextClient(CContextClient* contextClient) 2552 { 2553 if (clientsSet.find(contextClient)==clientsSet.end()) 2554 { 2555 clients.push_back(contextClient) ; 2556 clientsSet.insert(contextClient); 2557 } 2558 for (int i=0; i<this->getDomains().size(); i++) 2559 this->getDomains()[i]->setContextClient(contextClient); 2560 for (int i=0; i<this->getAxis().size(); i++) 2561 this->getAxis()[i]->setContextClient(contextClient); 2562 } 2563 2147 2564 /*! 2148 2565 Parse a grid, for now, it contains only domain, axis and scalar -
XIOS/dev/branch_openmp/src/node/grid.hpp
r1078 r1460 19 19 namespace xios { 20 20 21 /// ////////////////////// D éclarations ////////////////////// ///21 /// ////////////////////// Dᅵᅵclarations ////////////////////// /// 22 22 23 23 class CGridGroup; … … 91 91 StdSize getDataSize(void) const; 92 92 93 /// Entr ées-sorties de champs ///93 /// Entrᅵᅵes-sorties de champs 94 94 template <int n> 95 95 void inputField(const CArray<double,n>& field, CArray<double,1>& stored) const; 96 96 template <int n> 97 void outputField(const CArray<double,1>& stored, CArray<double,n>& field) const; 98 99 void outputField(int rank, const CArray<double,1>& stored, double* field); 100 void inputField(int rank, const double* const field, CArray<double,1>& stored); 101 102 void outputCompressedField(int rank, const CArray<double,1>& stored, double* field); 97 void outputField(const CArray<double,1>& stored, CArray<double,n>& field) const; 98 template <int n> 99 void uncompressField(const CArray<double,n>& data, CArray<double,1>& outData) const; 103 100 104 101 virtual void parse(xml::CXMLNode& node); … … 129 126 static CGrid* cloneGrid(const StdString& idNewGrid, CGrid* gridSrc); 130 127 131 public: 132 133 /// Entrées-sorties de champs (interne) /// 134 void storeField_arr(const double* const data, CArray<double,1>& stored) const; 135 void restoreField_arr(const CArray<double,1>& stored, double* const data) const; 136 137 /// Traitements protégés /// 128 public: 138 129 void computeIndexServer(void); 139 130 void computeIndex(void); 140 131 void computeIndexScalarGrid(); 141 void compute CompressedIndex();132 void computeWrittenIndex(); 142 133 143 134 void solveDomainRef(bool checkAtt); … … 171 162 void sendIndexScalarGrid(); 172 163 164 void setContextClient(CContextClient* contextClient); 165 173 166 void computeDomConServer(); 174 167 std::map<int, int> getDomConServerSide(); 175 std::map<int, StdSize> getAttributesBufferSize( );176 std::map<int, StdSize> getDataBufferSize( const std::string& id = "");168 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, bool bufferForWriting = false); 169 std::map<int, StdSize> getDataBufferSize(CContextClient* client, const std::string& id = "", bool bufferForWriting = false); 177 170 std::vector<StdString> getDomainList(); 178 171 std::vector<StdString> getAxisList(); … … 186 179 std::vector<int> getAxisOrder(); 187 180 std::vector<int> getGlobalDimension(); 188 bool isScalarGrid() const; 189 std::vector<int> getAxisPositionInGrid() const; 181 bool isScalarGrid() const; 190 182 191 183 bool doGridHaveDataToWrite(); 192 bool doGridHaveDataDistributed( );184 bool doGridHaveDataDistributed(CContextClient* client = 0); 193 185 size_t getWrittenDataSize() const; 194 186 int getNumberWrittenIndexes() const; … … 210 202 std::map<CGrid*, std::pair<bool,StdString> >& getTransGridSource(); 211 203 bool hasTransform(); 212 204 size_t getGlobalWrittenSize(void) ; 205 void getLocalMask(CArray<bool,1>& localMask) ; 206 template<int N> 207 void getLocalMask(const CArray<bool,N>& gridMask, CArray<bool,1>& localMask) ; 213 208 public: 214 215 /// Propriétés privées ///216 bool isChecked;217 bool isDomainAxisChecked;218 bool isIndexSent;219 220 209 CArray<int, 1> storeIndex_client; 221 210 222 map<int, CArray<int, 1> > storeIndex_toSrv; 223 map<int, CArray<int, 1> > storeIndex_fromSrv; 224 map<int,int> nbSenders; 225 226 map<int, CArray<size_t, 1> > outIndexFromClient, compressedOutIndexFromClient; 211 /** Map containing indexes that will be sent in sendIndex(). */ 212 std::map<CContextClient*, map<int, CArray<int, 1> > > storeIndex_toSrv; 213 214 /** Map storing the number of senders. Key = size of receiver's intracomm */ 215 std::map<int, std::map<int,int> > nbSenders; 216 217 std::map<CContextClient*, std::map<int,int> > nbReadSenders; 218 219 map<int, CArray<int, 1> > storeIndex_fromSrv; // Support, for now, reading with level-1 server 220 221 map<int, CArray<size_t, 1> > outIndexFromClient; // Deprecated 222 223 map<int, CArray<size_t, 1> > compressedOutIndexFromClient; 224 225 /** Map storing received indexes. Key = sender rank, value = index array. */ 226 map<int, CArray<size_t, 1> > outGlobalIndexFromClient; 227 228 // Manh Ha's comment: " A client receives global index from other clients (via recvIndex) 229 // then does mapping these index into local index of STORE_CLIENTINDEX 230 // In this way, store_clientIndex can be used as an input of a source filter 231 // Maybe we need a flag to determine whether a client wants to write. TODO " 232 233 /** Map storing received data. Key = sender rank, value = data array. 234 * The map is created in CGrid::computeClientIndex and filled upon receiving data in CField::recvUpdateData() */ 235 map<int, CArray<size_t, 1> > outLocalIndexStoreOnClient; 236 237 /** Indexes calculated based on server-like distribution. 238 * They are used for writing/reading data and only calculated for server level that does the writing/reading. 239 * Along with localIndexToWriteOnClient, these indexes are used to correctly place incoming data. */ 240 CArray<size_t,1> localIndexToWriteOnServer; 241 242 /** Indexes calculated based on client-like distribution. 243 * They are used for writing/reading data and only calculated for server level that does the writing/reading. 244 * Along with localIndexToWriteOnServer, these indexes are used to correctly place incoming data. */ 245 CArray<size_t,1> localIndexToWriteOnClient; 246 247 CArray<size_t,1> indexFromClients; 248 227 249 void checkMask(void); 228 250 void createMask(void); 229 void modifyMask(const CArray<int,1>& indexToModify); 251 void modifyMask(const CArray<int,1>& indexToModify, bool valueToModify = false); 252 void modifyMaskSize(const std::vector<int>& newDimensionSize, bool newValue = false); 253 254 void computeGridGlobalDimension(const std::vector<CDomain*>& domains, 255 const std::vector<CAxis*>& axis, 256 const std::vector<CScalar*>& scalars, 257 const CArray<int,1>& axisDomainOrder); 230 258 231 259 private: … … 237 265 bool createMask = false); 238 266 template<int N> 239 void modifyGridMask(CArray<bool,N>& gridMask, const CArray<int,1>& indexToModify); 267 void modifyGridMask(CArray<bool,N>& gridMask, const CArray<int,1>& indexToModify, bool valueToModify); 268 269 template<int N> 270 void modifyGridMaskSize(CArray<bool,N>& gridMask, const std::vector<int>& eachDimSize, bool newValue); 271 272 void storeField_arr(const double* const data, CArray<double, 1>& stored) const; 273 void restoreField_arr(const CArray<double, 1>& stored, double* const data) const; 274 void uncompressField_arr(const double* const data, CArray<double, 1>& outData) const; 240 275 241 276 void setVirtualDomainGroup(CDomainGroup* newVDomainGroup); … … 254 289 void setTransformationAlgorithms(); 255 290 void computeIndexByElement(const std::vector<boost::unordered_map<size_t,std::vector<int> > >& indexServerOnElement, 291 const CContextClient* client, 256 292 CClientServerMapping::GlobalIndexMap& globalIndexOnServer); 257 258 293 int computeGridGlobalDimension(std::vector<int>& globalDim, 259 294 const std::vector<CDomain*> domains, … … 261 296 const std::vector<CScalar*> scalars, 262 297 const CArray<int,1>& axisDomainOrder); 263 264 298 int getDistributedDimension(); 299 300 void computeClientIndex(); 301 void computeConnectedClients(); 302 void computeClientIndexScalarGrid(); 303 void computeConnectedClientsScalarGrid(); 304 265 305 private: 306 307 /** Clients that have to send a grid. There can be multiple clients in case of secondary server, otherwise only one client. */ 308 std::list<CContextClient*> clients; 309 std::set<CContextClient*> clientsSet; 310 311 bool isChecked; 312 bool isDomainAxisChecked; 313 bool isIndexSent; 314 266 315 CDomainGroup* vDomainGroup_; 267 316 CAxisGroup* vAxisGroup_; … … 270 319 bool isAxisListSet, isDomListSet, isScalarListSet; 271 320 321 /** Client-like distribution calculated based on the knowledge of the entire grid */ 272 322 CDistributionClient* clientDistribution_; 323 324 /** Server-like distribution calculated upon receiving indexes */ 273 325 CDistributionServer* serverDistribution_; 326 274 327 CClientServerMapping* clientServerMap_; 275 328 size_t writtenDataSize_; 276 329 int numberWrittenIndexes_, totalNumberWrittenIndexes_, offsetWrittenIndexes_; 277 std::map<int,size_t> connectedDataSize_; 278 std::vector<int> connectedServerRank_; 279 bool isDataDistributed_; 280 330 331 /** Map storing local ranks of connected receivers. Key = size of receiver's intracomm. 332 * It is calculated in computeConnectedClients(). */ 333 std::map<int, std::vector<int> > connectedServerRank_; 334 335 /** Map storing the size of data to be send. Key = size of receiver's intracomm 336 * It is calculated in computeConnectedClients(). */ 337 std::map<int, std::map<int,size_t> > connectedDataSize_; 338 339 /** Ranks of connected receivers in case of reading. It is calculated in recvIndex(). */ 340 std::vector<int> connectedServerRankRead_; 341 342 /** Size of data to be send in case of reading. It is calculated in recvIndex(). */ 343 std::map<int,size_t> connectedDataSizeRead_; 344 345 bool isDataDistributed_; 281 346 //! True if and only if the data defined on the grid can be outputted in a compressed way 282 347 bool isCompressible_; 283 348 std::set<std::string> relFilesCompressed; 284 349 285 bool isTransformed_ ;286 bool isGenerated_;350 bool isTransformed_, isGenerated_; 351 bool computedWrittenIndex_; 287 352 std::vector<int> axisPositionInGrid_; 288 353 CGridTransformation* transformations_; … … 290 355 std::map<CGrid*, std::pair<bool,StdString> > gridSrc_; 291 356 bool hasTransform_; 292 CClientServerMapping::GlobalIndexMap globalIndexOnServer_; 293 // List order of axis and domain in a grid, if there is a domain, it will take value 1 (true), axis 0 (false) 357 358 /** Map storing global indexes of server-like (band-wise) distribution for sending to receivers. 359 * Key = size of receiver's intracomm. 360 */ 361 // std::map<CContextClient*, CClientServerMapping::GlobalIndexMap> globalIndexOnServer_; 362 std::map<int, CClientServerMapping::GlobalIndexMap> globalIndexOnServer_; 363 364 365 /** List order of axis and domain in a grid, if there is a domain, it will take value 1 (true), axis 0 (false) */ 294 366 std::vector<int> order_; 367 295 368 }; // class CGrid 296 369 … … 300 373 void CGrid::inputField(const CArray<double,n>& field, CArray<double,1>& stored) const 301 374 { 375 //#ifdef __XIOS_DEBUG 302 376 if (this->getDataSize() != field.numElements()) 303 377 ERROR("void CGrid::inputField(const CArray<double,n>& field, CArray<double,1>& stored) const", … … 305 379 << "Received data size = " << field.numElements() << " ] " 306 380 << "The data array does not have the right size! " 307 << "Grid = " << this->GetName()) 381 << "Grid = " << this->getId()) 382 //#endif 308 383 this->storeField_arr(field.dataFirst(), stored); 309 384 } … … 312 387 void CGrid::outputField(const CArray<double,1>& stored, CArray<double,n>& field) const 313 388 { 389 //#ifdef __XIOS_DEBUG 314 390 if (this->getDataSize() != field.numElements()) 315 391 ERROR("void CGrid::outputField(const CArray<double,1>& stored, CArray<double,n>& field) const", … … 317 393 << "Output data size = " << field.numElements() << " ] " 318 394 << "The ouput array does not have the right size! " 319 << "Grid = " << this->GetName()) 395 << "Grid = " << this->getId()) 396 //#endif 320 397 this->restoreField_arr(stored, field.dataFirst()); 398 } 399 400 /*! 401 This function removes the effect of mask on received data on the server. 402 This function only serve for the checking purpose. TODO: Something must be done to seperate mask and data_index from each other in received data 403 \data data received data with masking effect on the server 404 \outData data without masking effect 405 */ 406 template <int N> 407 void CGrid::uncompressField(const CArray<double,N>& data, CArray<double,1>& outData) const 408 { 409 uncompressField_arr(data.dataFirst(), outData); 321 410 } 322 411 … … 332 421 int dim = domainMasks.size() * 2 + axisMasks.size(); 333 422 std::vector<CDomain*> domainP = this->getDomains(); 423 std::vector<CAxis*> axisP = this->getAxis(); 334 424 335 425 std::vector<int> idxLoop(dim,0), indexMap(numElement), eachDimSize(dim); … … 345 435 } 346 436 else if (1 == axisDomainOrder(i)) { 347 eachDimSize[indexMap[i]] = axisMasks[idxAxis]->numElements(); 437 // eachDimSize[indexMap[i]] = axisMasks[idxAxis]->numElements(); 438 eachDimSize[indexMap[i]] = axisP[idxAxis]->n; 348 439 ++idx; ++idxAxis; 349 440 } … … 360 451 << "Local size of dimension " << i << " is " << eachDimSize[i] << "." << std::endl 361 452 << "Mask size for dimension " << i << " is " << gridMask.extent(i) << "." << std::endl 362 << "Grid = " << this-> GetName())453 << "Grid = " << this->getId()) 363 454 } 364 455 } … … 388 479 if (2 == axisDomainOrder(i)) 389 480 { 390 maskValue = maskValue && (*domainMasks[idxDomain])(idxLoop[indexMap[i]] + idxLoop[indexMap[i]+1] * eachDimSize[indexMap[i]]); 481 int idxTmp = idxLoop[indexMap[i]] + idxLoop[indexMap[i]+1] * eachDimSize[indexMap[i]]; 482 if (idxTmp < (*domainMasks[idxDomain]).numElements()) 483 maskValue = maskValue && (*domainMasks[idxDomain])(idxTmp); 484 else 485 maskValue = false; 391 486 ++idxDomain; 392 487 } 393 488 else if (1 == axisDomainOrder(i)) 394 489 { 395 maskValue = maskValue && (*axisMasks[idxAxis])(idxLoop[indexMap[i]]); 490 int idxTmp = idxLoop[indexMap[i]]; 491 if (idxTmp < (*axisMasks[idxAxis]).numElements()) 492 maskValue = maskValue && (*axisMasks[idxAxis])(idxTmp); 493 else 494 maskValue = false; 495 396 496 ++idxAxis; 397 497 } … … 413 513 } 414 514 515 template<int N> 516 void CGrid::modifyGridMaskSize(CArray<bool,N>& gridMask, 517 const std::vector<int>& eachDimSize, 518 bool newValue) 519 { 520 if (N != eachDimSize.size()) 521 { 522 // ERROR("CGrid::modifyGridMaskSize(CArray<bool,N>& gridMask, 523 // const std::vector<int>& eachDimSize, 524 // bool newValue)", 525 // << "Dimension size of the mask is different from input dimension size." << std::endl 526 // << "Mask dimension is " << N << "." << std::endl 527 // << "Input dimension is " << eachDimSize.size() << "." << std::endl 528 // << "Grid = " << this->getId()) 529 } 530 CArrayBoolTraits<CArray<bool,N> >::resizeArray(gridMask,eachDimSize); 531 gridMask = newValue; 532 } 533 534 415 535 /*! 416 536 Modify the current mask of grid, the local index to be modified will take value false … … 419 539 */ 420 540 template<int N> 421 void CGrid::modifyGridMask(CArray<bool,N>& gridMask, const CArray<int,1>& indexToModify) 422 { 423 bool valueToModify = false; 541 void CGrid::modifyGridMask(CArray<bool,N>& gridMask, const CArray<int,1>& indexToModify, bool valueToModify) 542 { 424 543 int num = indexToModify.numElements(); 425 544 for (int idx = 0; idx < num; ++idx) … … 430 549 ///-------------------------------------------------------------- 431 550 551 552 /*! 553 A grid can have multiple dimension, so can its mask in the form of multi-dimension array. 554 It's not a good idea to store all multi-dimension arrays corresponding to each mask. 555 One of the ways is to convert this array into 1-dimension one and every process is taken place on it. 556 \param [in] multi-dimension array grid mask 557 */ 558 template<int N> 559 void CGrid::getLocalMask(const CArray<bool,N>& gridMask, CArray<bool,1>& localMask) 560 { 561 if (gridMask.isEmpty()) return ; 562 int dim = gridMask.dimensions(); 563 std::vector<int> dimensionSizes(dim); 564 for (int i = 0; i < dim; ++i) dimensionSizes[i] = gridMask.extent(i); 565 566 std::vector<int> idxLoop(dim,0); 567 int ssize = gridMask.numElements(), idx = 0; 568 localMask.resize(ssize); 569 while (idx < ssize) 570 { 571 for (int i = 0; i < dim-1; ++i) 572 { 573 if (idxLoop[i] == dimensionSizes[i]) 574 { 575 idxLoop[i] = 0; 576 ++idxLoop[i+1]; 577 } 578 } 579 580 int maskIndex = idxLoop[0]; 581 int mulDim = 1; 582 for (int k = 1; k < dim; ++k) 583 { 584 mulDim *= dimensionSizes[k-1]; 585 maskIndex += idxLoop[k]*mulDim; 586 } 587 localMask(maskIndex) = *(gridMask.dataFirst()+maskIndex); 588 589 ++idxLoop[0]; 590 ++idx; 591 } 592 } 593 432 594 // Declare/Define CGridGroup and CGridDefinition 433 595 DECLARE_GROUP(CGrid); -
XIOS/dev/branch_openmp/src/node/interpolate_domain.cpp
r1014 r1460 48 48 } 49 49 50 bool detect_missing_value=false ; 51 if (!this->detect_missing_value.isEmpty()) detect_missing_value = this->detect_missing_value.getValue(); 52 else this->detect_missing_value.setValue(detect_missing_value); 53 54 bool renormalize=false ; 55 if (!this->renormalize.isEmpty()) renormalize = this->renormalize.getValue(); 56 else this->renormalize.setValue(renormalize); 57 58 bool quantity=false ; 59 if (!this->quantity.isEmpty()) quantity = this->quantity.getValue(); 60 else this->quantity.setValue(quantity); 61 50 62 if (this->mode.isEmpty()) this->mode.setValue(mode_attr::compute); 51 63 if (this->write_weight.isEmpty()) this->write_weight.setValue(false); 52 64 53 StdString weightFile; 54 switch (this->mode) 55 { 56 case mode_attr::read: 57 if (this->weight_filename.isEmpty()) 58 { 59 if (!this->write_weight) 60 ERROR("void CInterpolateDomain::checkValid(CDomain* domainSrc)", 61 << "Read mode is activated but there is no file specified." << std::endl 62 << "Please define a correct file containing interpolation weights with option 'file'. "); 63 } 64 else 65 { 66 weightFile = this->weight_filename; 67 ifstream f(weightFile.c_str()); 68 if (!f.good()) 69 ERROR("void CInterpolateDomain::checkValid(CDomain* domainSrc)", 70 << "Read mode is activated but file " << weightFile << " doesn't exist." << std::endl 71 << "Please check this file "); 72 } 73 break; 74 case mode_attr::compute: 75 break; 76 case mode_attr::read_or_compute: 77 if (!this->weight_filename.isEmpty() && !this->write_weight) 78 { 79 weightFile = this->weight_filename; 80 ifstream f(weightFile.c_str()); 81 if (!f.good()) 82 ERROR("void CInterpolateDomain::checkValid(CDomain* domainSrc)", 83 << "read_or_compute mode is activated but file " << weightFile << " doesn't exist." << std::endl 84 << "Please check this file "); 85 } 86 break; 87 default: 88 break; 89 } 65 if (this->read_write_convention.isEmpty()) this->read_write_convention.setValue(read_write_convention_attr::fortran); 90 66 91 67 } -
XIOS/dev/branch_openmp/src/node/node_enum.hpp
r976 r1460 29 29 eReduceAxisToScalar, 30 30 eReduceDomainToAxis, 31 eReduceAxisToAxis, 31 32 eExtractDomainToAxis, 32 33 eComputeConnectivityDomain, 33 34 eExpandDomain, 34 35 eExtractAxisToScalar, 35 eReduceDomainToScalar 36 37 } ENodeType; 36 eReduceDomainToScalar, 37 eTemporalSplitting, 38 eDuplicateScalarToAxis, 39 eReduceScalarToScalar, 40 eReorderDomain 41 } ENodeType; 38 42 39 43 } // namespace xios -
XIOS/dev/branch_openmp/src/node/node_type.hpp
r976 r1460 19 19 #include "reduce_axis_to_scalar.hpp" 20 20 #include "reduce_domain_to_axis.hpp" 21 #include "reduce_axis_to_axis.hpp" 21 22 #include "extract_domain_to_axis.hpp" 22 23 #include "compute_connectivity_domain.hpp" … … 24 25 #include "extract_axis_to_scalar.hpp" 25 26 #include "reduce_domain_to_scalar.hpp" 26 27 #include "temporal_splitting.hpp" 28 #include "duplicate_scalar_to_axis.hpp" 29 #include "reduce_scalar_to_scalar.hpp" 30 #include "reorder_domain.hpp" 27 31 28 32 -
XIOS/dev/branch_openmp/src/node/reduce_domain_to_axis.cpp
r980 r1460 62 62 << "Domain source " <<domainSrc->getId() << std::endl 63 63 << "Axis destination " << axisDst->getId()); 64 if (this->local.isEmpty()) local=false ; 64 65 65 if (this->direction.isEmpty())66 ERROR("CReduceDomainToAxis::checkValid(CAxis* axisDst, CDomain* domainSrc)",67 << "A direction to apply the operation must be defined. It should be: 'iDir' or 'jDir'"68 << "Domain source " <<domainSrc->getId() << std::endl69 << "Axis destination " << axisDst->getId());70 71 72 66 switch (direction) 73 67 { -
XIOS/dev/branch_openmp/src/node/reduce_domain_to_scalar.cpp
r976 r1460 39 39 void CReduceDomainToScalar::checkValid(CScalar* scalarDst, CDomain* domainSrc) 40 40 { 41 if (this->local.isEmpty()) local=false ; 41 42 } 42 43 -
XIOS/dev/branch_openmp/src/node/scalar.cpp
r1328 r1460 35 35 m["extract_axis"] = TRANS_EXTRACT_AXIS_TO_SCALAR; 36 36 m["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_SCALAR; 37 m["reduce_scalar"] = TRANS_REDUCE_SCALAR_TO_SCALAR; 37 38 } 38 39 … … 43 44 (*CScalar::transformationMapList_ptr)["extract_axis"] = TRANS_EXTRACT_AXIS_TO_SCALAR; 44 45 (*CScalar::transformationMapList_ptr)["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_SCALAR; 46 (*CScalar::transformationMapList_ptr)["reduce_scalar"] = TRANS_REDUCE_SCALAR_TO_SCALAR; 45 47 } 46 48 -
XIOS/dev/branch_openmp/src/node/scalar.hpp
r1331 r1460 10 10 #include "attribute_enum.hpp" 11 11 #include "attribute_enum_impl.hpp" 12 #include "attribute_array.hpp" 12 13 #include "transformation.hpp" 13 14 #include "transformation_enum.hpp" … … 82 83 TransMapTypes transformationMap_; 83 84 84 private: 85 void setTransformations(const TransMapTypes&); 85 void setTransformations(const TransMapTypes&); 86 86 87 87 private: -
XIOS/dev/branch_openmp/src/node/transformation.hpp
r1334 r1460 66 66 if (0 == transformationCreationCallBacks_) 67 67 transformationCreationCallBacks_ = new CallBackMap(); 68 68 69 return (*transformationCreationCallBacks_).insert(make_pair(transType, createFn)).second; 69 70 } -
XIOS/dev/branch_openmp/src/node/transformation_enum.hpp
r976 r1460 20 20 TRANS_EXTRACT_AXIS_TO_SCALAR = 11, 21 21 TRANS_REDUCE_DOMAIN_TO_SCALAR = 12, 22 TRANS_TEMPORAL_SPLITTING = 13, 23 TRANS_REDUCE_AXIS_TO_AXIS = 14, 24 TRANS_DUPLICATE_SCALAR_TO_AXIS = 15, 25 TRANS_REDUCE_SCALAR_TO_SCALAR = 16, 26 TRANS_REORDER_DOMAIN = 17 22 27 } ETranformationType; 23 28 -
XIOS/dev/branch_openmp/src/node/variable.cpp
r1075 r1460 97 97 { 98 98 CContext* context=CContext::getCurrent() ; 99 if (!context->hasServer) 99 100 if (context->hasClient) 100 101 { 101 CContextClient* client=context->client ;102 103 CEventClient event(this->getType(),EVENT_ID_VARIABLE_VALUE);104 if (client->isServerLeader())102 // Use correct context client to send message 103 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 104 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 105 for (int i = 0; i < nbSrvPools; ++i) 105 106 { 106 CMessage msg ; 107 msg<<this->getId() ; 108 msg<<content ; 109 const std::list<int>& ranks = client->getRanksServerLeader(); 110 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 111 event.push(*itRank,1,msg); 112 client->sendEvent(event) ; 113 } 114 else client->sendEvent(event) ; 115 } 107 // CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer 108 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] 109 : context->client; 110 111 CEventClient event(this->getType(),EVENT_ID_VARIABLE_VALUE) ; 112 if (contextClientTmp->isServerLeader()) 113 { 114 CMessage msg ; 115 msg<<this->getId() ; 116 msg<<content ; 117 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 118 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 119 event.push(*itRank,1,msg); 120 contextClientTmp->sendEvent(event) ; 121 } 122 else contextClientTmp->sendEvent(event) ; 123 } 124 } 125 } 126 127 void CVariable::sendValue(CContextClient* client, bool clientPrim /*= false*/) 128 { 129 CEventClient event(this->getType(),EVENT_ID_VARIABLE_VALUE) ; 130 if (client->isServerLeader()) 131 { 132 CMessage msg ; 133 msg<<this->getId() ; 134 msg<<content ; 135 const std::list<int>& ranks = client->getRanksServerLeader(); 136 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 137 event.push(*itRank,1,msg); 138 client->sendEvent(event) ; 139 } 140 else client->sendEvent(event) ; 116 141 } 117 142 -
XIOS/dev/branch_openmp/src/node/variable.hpp
r1041 r1460 32 32 , public CVariableAttributes 33 33 { 34 /// typedef /// 35 typedef CObjectTemplate<CVariable> SuperClass; 36 typedef CVariableAttributes SuperClassAttribute; 37 38 public : 34 39 enum EEventId 35 40 { 36 41 EVENT_ID_VARIABLE_VALUE 37 42 }; 38 39 /// typedef ///40 typedef CObjectTemplate<CVariable> SuperClass;41 typedef CVariableAttributes SuperClassAttribute;42 43 43 44 friend class CVariableGroup; … … 79 80 //! Sending a request to set up variable data 80 81 void sendValue(); 82 void sendValue(CContextClient* client, bool clientPrim = false); 81 83 82 84 static void recvValue(CEventServer& event) ;
Note: See TracChangeset
for help on using the changeset viewer.