Changeset 1099
- Timestamp:
- 04/14/17 16:53:56 (8 years ago)
- Location:
- XIOS/dev/dev_olga
- Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_olga/bld.cfg
r1054 r1099 37 37 #bld::target test_new_features.exe test_unstruct_complete.exe 38 38 #bld::target test_client.exe test_complete.exe 39 #bld::target test_client.exe39 bld::target test_client.exe 40 40 bld::exe_dep 41 41 -
XIOS/dev/dev_olga/inputs/iodef.xml
r1025 r1099 16 16 <file_definition type="one_file" par_access="collective" output_freq="1h" output_level="10" enabled=".TRUE."> 17 17 <file id="output" name="output"> 18 <field field_ref="field_Domain" name="field_A" /> 18 <!-- <field field_ref="field_Domain" name="field_A" /> --> 19 <field field_ref="field_A" name="field_A" /> 20 <field field_ref="field_A" name="field_B" /> 21 </file> 22 <file id="output1" name="output1"> 23 <!-- <field field_ref="field_Domain" name="field_A" /> --> 24 <field field_ref="field_A" name="field_A" /> 19 25 </file> 20 26 </file_definition> … … 24 30 <axis id="axis_A"/> 25 31 <axis id="axis_A_zoom" axis_ref="axis_A"> 26 <zoom_axis begin=" 1" n="2" />32 <zoom_axis begin="0" n="1" /> 27 33 </axis> 28 34 </axis_definition> … … 30 36 <domain_definition> 31 37 <domain id="domain_A" /> 38 32 39 </domain_definition> 33 40 … … 44 51 </context> 45 52 46 <context id="toto" >47 </context>48 49 <context id="titi">50 </context>51 52 <context id="tata">53 </context>54 55 53 <context id="xios"> 56 54 <variable_definition> 55 <variable_group id="server"> 56 <variable id="using_server2" type="bool">true</variable> 57 <variable id="ratio_server2" type="int">50</variable> 58 </variable_group> 59 57 60 <variable_group id="buffer"> 58 61 <variable id="optimal_buffer_size" type="string">performance</variable> 59 <variable id="buffer_size_factor" type="double">1 .0</variable>62 <variable id="buffer_size_factor" type="double">10.0</variable> 60 63 </variable_group> 61 64 -
XIOS/dev/dev_olga/src/config/axis_attribute_private.conf
r1025 r1099 2 2 DECLARE_ATTRIBUTE_PRIVATE(int, global_zoom_n) 3 3 4 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_begin) 5 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_n) 6 4 7 /* LOCAL DATA*/ 5 8 DECLARE_ARRAY_PRIVATE(int , 1 , zoom_index) -
XIOS/dev/dev_olga/src/config/domain_attribute_private.conf
r1025 r1099 19 19 DECLARE_ATTRIBUTE_PRIVATE(int, global_zoom_nj) 20 20 21 // Local zoom information 22 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_ibegin) 23 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_ni) 24 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_jbegin) 25 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_nj) 26 27 21 28 DECLARE_ARRAY_PRIVATE(int , 1 , zoom_i_index, false) 22 29 DECLARE_ARRAY_PRIVATE(int , 1 , zoom_j_index, false) 23 //DECLARE_ARRAY_PRIVATE(double , 1 , areavalue, false)24 //DECLARE_ARRAY_PRIVATE(int , 1 , lonvalue, false)25 //DECLARE_ARRAY_PRIVATE(int , 1 , latvalue, false)26 //DECLARE_ARRAY_PRIVATE(int , 2 , bounds_lonvalue, false)27 //DECLARE_ARRAY_PRIVATE(int , 2 , bounds_latvalue, false)28 -
XIOS/dev/dev_olga/src/declare_ref_func.hpp
r1009 r1099 71 71 bool C##type::hasDirect##type##Reference(void) const \ 72 72 { \ 73 return !this->name_##_ref.isEmpty(); \ 73 return (!this->name_##_ref.isEmpty() && \ 74 C##type::has(this->name_##_ref)); \ 74 75 } \ 75 76 \ -
XIOS/dev/dev_olga/src/distribution_server.cpp
r1030 r1099 175 175 void CDistributionServer::computeLocalIndex(CArray<size_t,1>& globalIndex) 176 176 { 177 int ssize = globalIndex.numElements(); 178 CArray<size_t,1> localIndex(ssize); 177 size_t ssize = globalIndex.numElements(); 178 size_t localIndexSize = std::min(globalIndex_.numElements(), ssize); 179 CArray<size_t,1> localIndex(localIndexSize); 179 180 GlobalLocalMap::const_iterator ite = globalLocalIndexMap_.end(), it; 180 for (int idx = 0; idx < ssize; ++idx) 181 int i = 0; 182 for (size_t idx = 0; idx < ssize; ++idx) 181 183 { 182 184 it = globalLocalIndexMap_.find(globalIndex(idx)); 183 185 if (ite != it) 184 localIndex(idx) = it->second; 186 { 187 localIndex(i) = it->second; 188 ++i; 189 } 185 190 } 186 191 … … 201 206 } 202 207 208 /*! 209 Get the size of grid index in server (e.x: sizeGrid *= size of each dimensiion) 210 */ 211 int CDistributionServer::getGridSize() const 212 { 213 return globalLocalIndexMap_.size(); 214 } 203 215 204 216 const std::vector<int>& CDistributionServer::getZoomBeginGlobal() const -
XIOS/dev/dev_olga/src/distribution_server.hpp
r1054 r1099 42 42 const GlobalLocalMap& getGlobalLocalIndex() const { return globalLocalIndexMap_; } 43 43 const std::vector<CArray<int,1> >& getGlobalIndexEachDimension() const {return globalIndexEachDimension_;} 44 int getGridSize() const; 44 45 45 46 virtual CArray<size_t,1> computeLocalIndex(const CArray<size_t,1>& globalIndex); -
XIOS/dev/dev_olga/src/io/nc4_data_output.cpp
r1054 r1099 74 74 else setWrittenDomain(domid); 75 75 76 const std::vector<int>& local_size_write = domain->getLocalWriteSize();77 const std::vector<int>& global_size_write = domain->getGlobalWriteSize();78 const std::vector<int>& start_write = domain->getStartWriteIndex();79 const std::vector<int>& count_write = domain->getCountWriteIndex();76 // const std::vector<int>& local_size_write = domain->getLocalWriteSize(); 77 // const std::vector<int>& global_size_write = domain->getGlobalWriteSize(); 78 // const std::vector<int>& start_write = domain->getStartWriteIndex(); 79 // const std::vector<int>& count_write = domain->getCountWriteIndex(); 80 80 int nvertex = (domain->nvertex.isEmpty()) ? 0 : domain->nvertex; 81 81 … … 140 140 bounds_latid = StdString("bounds_lat").append(appendDomid); 141 141 142 SuperClassWriter::addDimension(dimXid, local_size_write[0]);143 SuperClassWriter::addDimension(dimYid, local_size_write[1]);142 SuperClassWriter::addDimension(dimXid, domain->zoom_ni); 143 SuperClassWriter::addDimension(dimYid, domain->zoom_nj); 144 144 145 145 if (domain->hasBounds) … … 148 148 if (server->intraCommSize > 1) 149 149 { 150 this->writeLocalAttributes(0, count_write[0], 151 0, count_write[1], 150 this->writeLocalAttributes(domain->zoom_ibegin, 151 domain->zoom_ni, 152 domain->zoom_jbegin, 153 domain->zoom_nj, 152 154 appendDomid); 153 155 154 156 if (singleDomain) 155 157 this->writeLocalAttributes_IOIPSL(dimXid, dimYid, 156 0, count_write[0], 157 0, count_write[1], 158 domain->zoom_ibegin, 159 domain->zoom_ni, 160 domain->zoom_jbegin, 161 domain->zoom_nj, 158 162 domain->ni_glo,domain->nj_glo, 159 163 server->intraCommRank,server->intraCommSize); … … 228 232 break; 229 233 case CDomain::type_attr::rectilinear : 230 CArray<double,1> lat = domain->latvalue(Range(fromStart,toEnd, local_size_write[0])) ;234 CArray<double,1> lat = domain->latvalue(Range(fromStart,toEnd,domain->zoom_ni)) ; 231 235 SuperClassWriter::writeData(CArray<double,1>(lat.copy()), latid, isCollective, 0); 232 CArray<double,1> lon = domain->lonvalue(Range(0, local_size_write[1])) ;236 CArray<double,1> lon = domain->lonvalue(Range(0,domain->zoom_ni-1)) ; 233 237 // CArray<double,1> lon = domain->lonvalue(Range(0,local_size_write[1]-1)) ; 234 238 SuperClassWriter::writeData(CArray<double,1>(lon.copy()), lonid, isCollective, 0); … … 252 256 case (ONE_FILE) : 253 257 { 254 SuperClassWriter::addDimension(dimXid, global_size_write[0]);255 SuperClassWriter::addDimension(dimYid, global_size_write[1]);258 SuperClassWriter::addDimension(dimXid, domain->global_zoom_ni); 259 SuperClassWriter::addDimension(dimYid, domain->global_zoom_nj); 256 260 257 261 if (domain->hasBounds) … … 327 331 else 328 332 { 329 // start[1]=domain->zoom_ibegin_srv-domain->global_zoom_ibegin;330 // start[0]=domain->zoom_jbegin_srv-domain->global_zoom_jbegin;331 // count[1]=domain->zoom_ni_srv ; count[0]=domain->zoom_nj_srv;332 start[1]= start_write[0];333 start[0]= start_write[1];334 count[1]= count_write[0];335 count[0]= count_write[1];333 start[1]=domain->zoom_ibegin-domain->global_zoom_ibegin; 334 start[0]=domain->zoom_jbegin-domain->global_zoom_jbegin; 335 count[1]=domain->zoom_ni ; count[0]=domain->zoom_nj ; 336 // start[1]= start_write[0]; 337 // start[0]= start_write[1]; 338 // count[1]= count_write[0]; 339 // count[0]= count_write[1]; 336 340 } 337 341 … … 359 363 else 360 364 { 361 start[0]= start_write[1]; 362 count[0]= count_write[1]; 363 CArray<double,1> lat = domain->latvalue(Range(fromStart,toEnd,count_write[0])) ; 365 // start[0]= start_write[1]; 366 // count[0]= count_write[1]; 367 start[0]=domain->zoom_jbegin-domain->global_zoom_jbegin; 368 count[0]=domain->zoom_nj; 369 CArray<double,1> lat = domain->latvalue(Range(fromStart,toEnd,domain->zoom_ni)) ; 364 370 SuperClassWriter::writeData(CArray<double,1>(lat.copy()), latid, isCollective, 0,&start,&count); 365 371 366 start[0]= start_write[0]; 367 count[0]= count_write[0]; 368 CArray<double,1> lon=domain->lonvalue(Range(0,count_write[1])) ; 372 // start[0]= start_write[0]; 373 // count[0]= count_write[0]; 374 start[0]=domain->zoom_ibegin-domain->global_zoom_ibegin; 375 count[0]=domain->zoom_ni; 376 CArray<double,1> lon=domain->lonvalue(Range(0,domain->zoom_ni-1)) ; 369 377 SuperClassWriter::writeData(CArray<double,1>(lon.copy()), lonid, isCollective, 0,&start,&count); 370 378 } … … 386 394 { 387 395 start[2] = 0; 388 start[1] = start_write[0];389 start[0] = start_write[1];390 count[2] = nvertex;391 count[1] = count_write[0];392 count[0] = count_write[1];396 start[1] = domain->zoom_ibegin - domain->global_zoom_ibegin; 397 start[0] = domain->zoom_jbegin - domain->global_zoom_jbegin; 398 count[2] = domain->nvertex; 399 count[1] = domain->zoom_ni; 400 count[0] = domain->zoom_nj; 393 401 } 394 402 … … 409 417 else 410 418 { 411 // start[1] = domain->zoom_ibegin_srv- domain->global_zoom_ibegin;412 // start[0] = domain->zoom_jbegin_srv- domain->global_zoom_jbegin;413 // count[1] = domain->zoom_ni_srv;414 // count[0] = domain->zoom_nj_srv;415 416 start[1]= start_write[0];417 start[0]= start_write[1];418 count[1]= count_write[0];419 count[0]= count_write[1];419 start[1] = domain->zoom_ibegin - domain->global_zoom_ibegin; 420 start[0] = domain->zoom_jbegin - domain->global_zoom_jbegin; 421 count[1] = domain->zoom_ni; 422 count[0] = domain->zoom_nj; 423 424 // start[1]= start_write[0]; 425 // start[0]= start_write[1]; 426 // count[1]= count_write[0]; 427 // count[0]= count_write[1]; 420 428 } 421 429 … … 1048 1056 axis->checkAttributes(); 1049 1057 1050 int local_size_write = axis->getLocalWriteSize(); 1051 int global_size_write = axis->getGlobalWriteSize(); 1052 int start_write = axis->getStartWriteIndex(); 1053 int count_write = axis->getCountWriteIndex(); 1054 1055 if ((0 == local_size_write) && (MULTI_FILE == SuperClass::type)) return; 1058 // int local_size_write = axis->getLocalWriteSize(); 1059 // int global_size_write = axis->getGlobalWriteSize(); 1060 // int start_write = axis->getStartWriteIndex(); 1061 // int count_write = axis->getCountWriteIndex(); 1062 1063 1064 // if ((0 == local_size_write) && (MULTI_FILE == SuperClass::type)) return; 1065 1066 int zoom_size = (MULTI_FILE == SuperClass::type) ? axis->zoom_n 1067 : axis->global_zoom_n; 1068 int zoom_begin = (MULTI_FILE == SuperClass::type) ? axis->zoom_begin 1069 : axis->global_zoom_begin; 1070 1071 if ((0 == axis->zoom_n) && (MULTI_FILE == SuperClass::type)) return; 1056 1072 1057 1073 std::vector<StdString> dims; … … 1062 1078 try 1063 1079 { 1064 SuperClassWriter::addDimension(axisid, global_size_write);1080 SuperClassWriter::addDimension(axisid, zoom_size); 1065 1081 if (axis->hasValue) 1066 1082 { … … 1102 1118 case MULTI_FILE: 1103 1119 { 1104 CArray<double,1> axis_value( local_size_write);1105 for (int i = 0; i < local_size_write; i++) axis_value(i) = axis->value(i);1120 CArray<double,1> axis_value(axis->zoom_n); 1121 for (int i = 0; i < axis->zoom_n; i++) axis_value(i) = axis->value(i); 1106 1122 SuperClassWriter::writeData(axis_value, axisid, isCollective, 0); 1107 1123 … … 1115 1131 case ONE_FILE: 1116 1132 { 1117 CArray<double,1> axis_value( count_write);1133 CArray<double,1> axis_value(axis->zoom_n); 1118 1134 axis_value = axis->value; 1119 1120 1135 std::vector<StdSize> start(1), startBounds(2) ; 1121 1136 std::vector<StdSize> count(1), countBounds(2) ; 1122 start[0] = startBounds[0] = start_write;1123 count[0] = countBounds[0] = count_write;1137 start[0] = startBounds[0] = zoom_begin - axis->global_zoom_begin; 1138 count[0] = countBounds[0] = zoom_size; 1124 1139 startBounds[1] = 0; 1125 1140 countBounds[1] = 2; … … 2295 2310 // start.push_back(nZoomBeginServer[idx] - nZoomBeginGlobal[idx]); 2296 2311 // count.push_back(nZoomSizeServer[idx]); 2297 start.push_back((domain->getStartWriteIndex())[idx]); 2298 count.push_back((domain->getCountWriteIndex())[idx]); 2312 // start.push_back((domain->getStartWriteIndex())[idx]); 2313 // count.push_back((domain->getCountWriteIndex())[idx]); 2314 start.push_back(domain->zoom_jbegin - domain->global_zoom_jbegin); 2315 count.push_back(domain->zoom_nj); 2299 2316 } 2300 2317 --idx ; 2301 2318 // start.push_back(nZoomBeginServer[idx] - nZoomBeginGlobal[idx]); 2302 2319 // count.push_back(nZoomSizeServer[idx]); 2303 start.push_back((domain->getStartWriteIndex())[idx]); 2304 count.push_back((domain->getCountWriteIndex())[idx]); 2320 // start.push_back((domain->getStartWriteIndex())[idx]); 2321 // count.push_back((domain->getCountWriteIndex())[idx]); 2322 start.push_back(domain->zoom_ibegin - domain->global_zoom_ibegin); 2323 count.push_back(domain->zoom_ni); 2305 2324 --idx ; 2306 2325 --idxDomain; … … 2309 2328 { 2310 2329 CAxis* axis = CAxis::get(axisList[idxAxis]); 2311 start.push_back(axis->getStartWriteIndex()); 2312 count.push_back(axis->getCountWriteIndex()); 2330 // start.push_back(axis->getStartWriteIndex()); 2331 // count.push_back(axis->getCountWriteIndex()); 2332 start.push_back(axis->zoom_begin - axis->global_zoom_begin); 2333 count.push_back(axis->zoom_n); 2313 2334 --idx; 2314 2335 --idxAxis; -
XIOS/dev/dev_olga/src/node/axis.cpp
r1054 r1099 27 27 , isDistributed_(false), hasBounds_(false), isCompressible_(false) 28 28 , numberWrittenIndexes_(0), totalNumberWrittenIndexes_(0), offsetWrittenIndexes_(0) 29 , transformationMap_(), hasValue(false) 29 , transformationMap_(), hasValue(false), doZoomByIndex_(false) 30 30 { 31 31 } … … 37 37 , isDistributed_(false), hasBounds_(false), isCompressible_(false) 38 38 , numberWrittenIndexes_(0), totalNumberWrittenIndexes_(0), offsetWrittenIndexes_(0) 39 , transformationMap_(), hasValue(false) 39 , transformationMap_(), hasValue(false), doZoomByIndex_(false) 40 40 { 41 41 } … … 334 334 zoom_index.setValue(index.getValue()); 335 335 } 336 if (zoom_n.isEmpty()) zoom_n.setValue(n); 337 if (zoom_begin.isEmpty()) zoom_begin.setValue(begin); 336 338 } 337 339 … … 384 386 switch(event.type) 385 387 { 386 // case EVENT_ID_SERVER_ATTRIBUT:387 // recvServerAttribut(event);388 //return true;389 //break;388 case EVENT_ID_DISTRIBUTION_ATTRIBUTE : 389 recvDistributionAttribute(event); 390 return true; 391 break; 390 392 // case EVENT_ID_INDEX: 391 393 // recvIndex(event); … … 440 442 441 443 if (this->isChecked) return; 442 if (context->hasClient) sendAttributes( );444 if (context->hasClient) sendAttributes(globalDim, orderPositionInGrid, distType); 443 445 444 446 this->isChecked = true; 445 447 } 446 448 447 void CAxis::sendAttributes() 449 void CAxis::sendAttributes(const std::vector<int>& globalDim, int orderPositionInGrid, 450 CServerDistributionDescription::ServerDistributionType distType) 448 451 { 449 452 if (index.numElements() == n_glo.getValue()) 450 453 sendNonDistributedAttributes(); 451 454 else 455 { 452 456 sendDistributedAttributes(); 457 sendDistributionAttribute(globalDim, orderPositionInGrid, distType); 458 } 453 459 } 454 460 … … 457 463 { 458 464 CContext* context = CContext::getCurrent(); 465 459 466 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 460 467 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; … … 466 473 int rank = client->clientRank; 467 474 468 //size_t ni = this->n.getValue();469 //size_t ibegin = this->begin.getValue();470 // size_tzoom_end = global_zoom_begin+global_zoom_n-1;471 //size_t nZoomCount = 0;475 size_t ni = this->n.getValue(); 476 size_t ibegin = this->begin.getValue(); 477 size_t global_zoom_end = global_zoom_begin+global_zoom_n-1; 478 size_t nZoomCount = 0; 472 479 size_t nbIndex = index.numElements(); 473 for (size_t idx = 0; idx < nbIndex; ++idx) 474 { 475 globalLocalIndexMap_[index(idx)] = idx; 476 // size_t globalIndex = index(idx); 477 // if (globalIndex >= global_zoom_begin && globalIndex <= zoom_end) ++nZoomCount; 478 } 479 480 // CArray<size_t,1> globalIndexAxis(nbIndex); 481 // std::vector<size_t> globalAxisZoom(nZoomCount); 482 // nZoomCount = 0; 483 // for (size_t idx = 0; idx < nbIndex; ++idx) 484 // { 485 // size_t globalIndex = index(idx); 486 // globalIndexAxis(idx) = globalIndex; 487 // if (globalIndex >= global_zoom_begin && globalIndex <= zoom_end) 488 // { 489 // globalAxisZoom[nZoomCount] = globalIndex; 490 // ++nZoomCount; 491 // } 492 // } 493 494 // std::set<int> writtenInd; 495 // if (isCompressible_) 496 // { 497 // for (int idx = 0; idx < data_index.numElements(); ++idx) 498 // { 499 // int ind = CDistributionClient::getAxisIndex(data_index(idx), data_begin, ni); 500 501 // if (ind >= 0 && ind < ni && mask(ind)) 502 // { 503 // ind += ibegin; 504 // if (ind >= global_zoom_begin && ind <= zoom_end) 505 // writtenInd.insert(ind); 506 // } 507 // } 508 // } 480 481 if (doZoomByIndex_) 482 { 483 nZoomCount = zoom_index.numElements(); 484 } 485 else 486 { 487 for (size_t idx = 0; idx < nbIndex; ++idx) 488 { 489 globalLocalIndexMap_[index(idx)] = idx; 490 size_t globalIndex = index(idx); 491 if (globalIndex >= global_zoom_begin && globalIndex <= global_zoom_end) ++nZoomCount; 492 } 493 } 494 495 496 CArray<size_t,1> globalIndexAxis(nbIndex); 497 std::vector<size_t> globalAxisZoom(nZoomCount); 498 nZoomCount = 0; 499 if (doZoomByIndex_) 500 { 501 int nbIndexZoom = zoom_index.numElements(); 502 for (int i = 0; i < nbIndexZoom; ++i) 503 { 504 globalIndexAxis(i) = zoom_index(i); 505 } 506 } 507 else 508 { 509 for (size_t idx = 0; idx < nbIndex; ++idx) 510 { 511 size_t globalIndex = index(idx); 512 globalIndexAxis(idx) = globalIndex; 513 if (globalIndex >= global_zoom_begin && globalIndex <= global_zoom_end) 514 { 515 globalAxisZoom[nZoomCount] = globalIndex; 516 ++nZoomCount; 517 } 518 } 519 520 int end = begin + n -1; 521 zoom_begin = global_zoom_begin > begin ? global_zoom_begin : begin; 522 int zoom_end = global_zoom_end < end ? zoom_end : end; 523 zoom_n = zoom_end-zoom_begin+1; 524 } 525 526 std::set<int> writtenInd; 527 if (isCompressible_) 528 { 529 for (int idx = 0; idx < data_index.numElements(); ++idx) 530 { 531 int ind = CDistributionClient::getAxisIndex(data_index(idx), data_begin, ni); 532 533 if (ind >= 0 && ind < ni && mask(ind)) 534 { 535 ind += ibegin; 536 if (ind >= global_zoom_begin && ind <= global_zoom_end) 537 writtenInd.insert(ind); 538 } 539 } 540 } 509 541 510 542 CServerDistributionDescription serverDescriptionGlobal(globalDim, nbServer, distType); … … 608 640 } 609 641 642 643 void CAxis::sendDistributionAttribute(const std::vector<int>& globalDim, int orderPositionInGrid, 644 CServerDistributionDescription::ServerDistributionType distType) 645 { 646 CContext* context = CContext::getCurrent(); 647 648 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 649 for (int i = 0; i < nbSrvPools; ++i) 650 { 651 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] 652 : context->client; 653 int nbServer = contextClientTmp->serverSize; 654 655 CServerDistributionDescription serverDescription(globalDim, nbServer); 656 serverDescription.computeServerDistribution(); 657 658 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 659 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 660 661 globalDimGrid.resize(globalDim.size()); 662 for (int idx = 0; idx < globalDim.size(); ++idx) globalDimGrid(idx) = globalDim[idx]; 663 664 CEventClient event(getType(),EVENT_ID_DISTRIBUTION_ATTRIBUTE); 665 if (contextClientTmp->isServerLeader()) 666 { 667 std::list<CMessage> msgs; 668 669 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 670 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 671 { 672 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 673 const int begin = serverIndexBegin[*itRank][orderPositionInGrid]; 674 const int ni = serverDimensionSizes[*itRank][orderPositionInGrid]; 675 const int end = begin + ni - 1; 676 677 msgs.push_back(CMessage()); 678 CMessage& msg = msgs.back(); 679 msg << this->getId(); 680 msg << ni << begin << end; 681 msg << global_zoom_begin.getValue() << global_zoom_n.getValue(); 682 msg << isCompressible_; 683 msg << orderPositionInGrid; 684 msg << globalDimGrid; 685 686 event.push(*itRank,1,msg); 687 } 688 contextClientTmp->sendEvent(event); 689 } 690 else contextClientTmp->sendEvent(event); 691 } 692 } 610 693 611 694 // void CAxis::computeConnectedServer(const std::vector<int>& globalDim, int orderPositionInGrid, … … 753 836 { 754 837 CContext* context = CContext::getCurrent(); 838 755 839 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 756 840 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; … … 859 943 int ns, n, i, j, ind, nv, idx; 860 944 CContext* context = CContext::getCurrent(); 945 861 946 //int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 862 947 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; … … 894 979 895 980 list_indi.push_back(CArray<int,1>(nbData)); 896 list_dataInd.push_back(CArray<int,1>(nbData)); 897 list_zoomInd.push_back(CArray<int,1>(nbData)); 981 list_dataInd.push_back(CArray<int,1>(nbData)); 898 982 list_mask.push_back(CArray<bool,1>(nbData)); 899 983 984 if (doZoomByIndex_) 985 list_zoomInd.push_back(CArray<int,1>(nbData)); 900 986 901 987 if (hasValue) … … 908 994 909 995 CArray<int,1>& indi = list_indi.back(); 910 CArray<int,1>& dataIndi = list_dataInd.back(); 911 CArray<int,1>& zoomIndi = list_zoomInd.back(); 996 CArray<int,1>& dataIndi = list_dataInd.back(); 912 997 CArray<bool,1>& maskIndi = list_mask.back(); 913 998 … … 920 1005 dataIndi(n) = dataIndex(ind); 921 1006 maskIndi(n) = mask(ind); 922 zoomIndi(n) = zoom_index(ind); 1007 1008 if (doZoomByIndex_) 1009 { 1010 CArray<int,1>& zoomIndi = list_zoomInd.back(); 1011 zoomIndi(n) = zoom_index(ind); 1012 } 923 1013 924 1014 if (hasValue) … … 938 1028 listData.push_back(CMessage()); 939 1029 listData.back() << this->getId() 940 << list_indi.back() << list_dataInd.back() << list_zoomInd.back() << list_mask.back() 941 << hasValue; 1030 << list_indi.back() << list_dataInd.back() << list_mask.back(); 1031 1032 listData.back() << doZoomByIndex_; 1033 if (doZoomByIndex_) 1034 listData.back() << list_zoomInd.back(); 1035 1036 listData.back() << hasValue; 942 1037 if (hasValue) 943 1038 listData.back() << list_val.back(); 1039 944 1040 listData.back() << hasBounds_; 945 1041 if (hasBounds_) … … 982 1078 CBufferIn& buffer = *buffers[idx]; 983 1079 buffer >> vec_indi[idx]; 984 buffer >> vec_dataInd[idx]; 985 buffer >> vec_zoomInd[idx]; 1080 buffer >> vec_dataInd[idx]; 986 1081 buffer >> vec_mask[idx]; 1082 1083 buffer >> doZoomByIndex_; 1084 if (doZoomByIndex_) 1085 buffer >> vec_zoomInd[idx]; 987 1086 988 1087 buffer >> hasValue; 989 1088 if (hasValue) 990 1089 buffer >> vec_val[idx]; 1090 991 1091 buffer >> hasBounds_; 992 1092 if (hasBounds_) … … 1050 1150 } 1051 1151 1052 int nbZoomIndex = 0; 1053 for (int idx = 0; idx < nbReceived; ++idx) 1054 { 1055 nbZoomIndex += vec_zoomInd[idx].numElements(); 1056 } 1057 1058 zoom_index.resize(nbZoomIndex); 1059 nbZoomIndex = 0; 1060 CArray<int,1>& zoom_Index_Tmp = this->zoom_index; 1061 for (int idx = 0; idx < nbReceived; ++idx) 1062 { 1063 CArray<int,1> tmp = zoom_Index_Tmp(Range(nbZoomIndex, nbZoomIndex + vec_zoomInd[idx].numElements()-1)); 1064 tmp = vec_zoomInd[idx]; 1065 1066 nbZoomIndex += vec_zoomInd[idx].numElements(); 1067 } 1068 1069 1070 { 1071 CContextServer* server = CContext::getCurrent()->server; 1072 count_write_index_ = zoom_index.numElements(); 1073 MPI_Scan(&count_write_index_, &start_write_index_, 1, MPI_INT, MPI_SUM, server->intraComm); 1074 global_write_size_ = start_write_index_; 1075 start_write_index_ -= count_write_index_; 1076 local_write_size_ = count_write_index_; 1077 } 1078 } 1079 1080 // void CAxis::sendServerAttribut(const std::vector<int>& globalDim, int orderPositionInGrid, 1081 // CServerDistributionDescription::ServerDistributionType distType) 1082 // { 1083 // CContext* context = CContext::getCurrent(); 1084 1085 // CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer 1086 // : context->client; 1087 1088 1089 // int nbServer = contextClientTmp->serverSize; 1090 1091 // CServerDistributionDescription serverDescription(globalDim, nbServer); 1092 // serverDescription.computeServerDistribution(); 1093 1094 // std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 1095 // std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 1096 1097 // globalDimGrid.resize(globalDim.size()); 1098 // for (int idx = 0; idx < globalDim.size(); ++idx) globalDimGrid(idx) = globalDim[idx]; 1099 1100 // CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT); 1101 // if (contextClientTmp->isServerLeader()) 1102 // { 1103 // std::list<CMessage> msgs; 1104 1105 // const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 1106 // for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1107 // { 1108 // // Use const int to ensure CMessage holds a copy of the value instead of just a reference 1109 // const int begin = serverIndexBegin[*itRank][orderPositionInGrid]; 1110 // const int ni = serverDimensionSizes[*itRank][orderPositionInGrid]; 1111 // const int end = begin + ni - 1; 1112 1113 // msgs.push_back(CMessage()); 1114 // CMessage& msg = msgs.back(); 1115 // msg << this->getId(); 1116 // msg << ni << begin << end; 1117 // msg << global_zoom_begin.getValue() << global_zoom_n.getValue(); 1118 // msg << isCompressible_; 1119 // msg << orderPositionInGrid; 1120 // msg << globalDimGrid; 1121 1122 // event.push(*itRank,1,msg); 1123 // } 1124 // contextClientTmp->sendEvent(event); 1125 // } 1126 // else contextClientTmp->sendEvent(event); 1127 // } 1128 1129 // void CAxis::recvServerAttribut(CEventServer& event) 1130 // { 1131 // CBufferIn* buffer = event.subEvents.begin()->buffer; 1132 // string axisId; 1133 // *buffer >> axisId; 1134 // get(axisId)->recvServerAttribut(*buffer); 1135 1136 // CContext* context = CContext::getCurrent(); 1137 // if (context->hasClient && context->hasServer) 1138 // { 1139 // std::vector<int> globalDim(get(axisId)->globalDimGrid.numElements()); 1140 // for (int idx = 0; idx < globalDim.size(); ++idx) globalDim[idx] = get(axisId)->globalDimGrid(idx); 1141 // get(axisId)->sendServerAttribut(globalDim, get(axisId)->orderPosInGrid, 1142 // CServerDistributionDescription::BAND_DISTRIBUTION); 1143 // } 1144 // } 1145 1146 // void CAxis::recvServerAttribut(CBufferIn& buffer) 1147 // { 1148 // int ni_srv, begin_srv, end_srv, global_zoom_begin_tmp, global_zoom_n_tmp; 1149 1150 // buffer >> ni_srv >> begin_srv >> end_srv; 1151 // buffer >> global_zoom_begin_tmp >> global_zoom_n_tmp; 1152 // buffer >> isCompressible_; 1153 // buffer >> orderPosInGrid; 1154 // buffer >> globalDimGrid; 1155 1156 // global_zoom_begin = global_zoom_begin_tmp; 1157 // global_zoom_n = global_zoom_n_tmp; 1158 // int global_zoom_end = global_zoom_begin + global_zoom_n - 1; 1159 1160 // zoom_begin_srv = global_zoom_begin > begin_srv ? global_zoom_begin : begin_srv ; 1161 // zoom_end_srv = global_zoom_end < end_srv ? global_zoom_end : end_srv ; 1162 // zoom_size_srv = zoom_end_srv - zoom_begin_srv + 1; 1163 1164 // if (zoom_size_srv<=0) 1165 // { 1166 // zoom_begin_srv = 0; zoom_end_srv = 0; zoom_size_srv = 0; 1167 // } 1168 1169 // if (n_glo == n) 1170 // { 1171 // zoom_begin_srv = global_zoom_begin; 1172 // zoom_end_srv = global_zoom_end; //zoom_end; 1173 // zoom_size_srv = zoom_end_srv - zoom_begin_srv + 1; 1174 // } 1175 // if (hasValue) 1176 // { 1177 // value_srv.resize(zoom_size_srv); 1178 // if (hasBounds_) bound_srv.resize(2,zoom_size_srv); 1179 // } 1180 // } 1152 if (doZoomByIndex_) 1153 { 1154 int nbZoomIndex = 0; 1155 for (int idx = 0; idx < nbReceived; ++idx) 1156 { 1157 nbZoomIndex += vec_zoomInd[idx].numElements(); 1158 } 1159 1160 zoom_index.resize(nbZoomIndex); 1161 nbZoomIndex = 0; 1162 for (int idx = 0; idx < nbReceived; ++idx) 1163 { 1164 CArray<int,1>& tmp = vec_zoomInd[idx]; 1165 for (int i = 0; i < tmp.size(); ++i) 1166 { 1167 zoom_index(nbZoomIndex) = tmp(i); 1168 ++nbZoomIndex; 1169 } 1170 } 1171 } 1172 1173 1174 // { 1175 // CContextServer* server = CContext::getCurrent()->server; 1176 // count_write_index_ = zoom_index.numElements(); 1177 // MPI_Scan(&count_write_index_, &start_write_index_, 1, MPI_INT, MPI_SUM, server->intraComm); 1178 // global_write_size_ = start_write_index_; 1179 // start_write_index_ -= count_write_index_; 1180 // local_write_size_ = count_write_index_; 1181 // } 1182 } 1183 1184 void CAxis::recvDistributionAttribute(CEventServer& event) 1185 { 1186 CBufferIn* buffer = event.subEvents.begin()->buffer; 1187 string axisId; 1188 *buffer >> axisId; 1189 get(axisId)->recvDistributionAttribute(*buffer); 1190 } 1191 1192 void CAxis::recvDistributionAttribute(CBufferIn& buffer) 1193 { 1194 int ni_srv, begin_srv, end_srv, global_zoom_begin_tmp, global_zoom_n_tmp; 1195 1196 buffer >> ni_srv >> begin_srv >> end_srv; 1197 buffer >> global_zoom_begin_tmp >> global_zoom_n_tmp; 1198 buffer >> isCompressible_; 1199 buffer >> orderPosInGrid; 1200 buffer >> globalDimGrid; 1201 1202 n.setValue(ni_srv); 1203 begin.setValue(begin_srv); 1204 global_zoom_begin = global_zoom_begin_tmp; 1205 global_zoom_n = global_zoom_n_tmp; 1206 int global_zoom_end = global_zoom_begin + global_zoom_n - 1; 1207 1208 zoom_begin = global_zoom_begin > begin_srv ? global_zoom_begin : begin_srv ; 1209 zoom_end_srv = global_zoom_end < end_srv ? global_zoom_end : end_srv ; 1210 zoom_n = zoom_end_srv - zoom_begin_srv + 1; 1211 1212 if (zoom_n<=0) 1213 { 1214 zoom_begin = 0; zoom_end_srv = 0; zoom_n = 0; 1215 } 1216 1217 if (n_glo == n) 1218 { 1219 zoom_begin = global_zoom_begin; 1220 zoom_end_srv = global_zoom_end; //zoom_end; 1221 zoom_n = zoom_end_srv - zoom_begin + 1; 1222 } 1223 } 1224 1181 1225 1182 1226 CTransformation<CAxis>* CAxis::addTransformation(ETranformationType transType, const StdString& id) -
XIOS/dev/dev_olga/src/node/axis.hpp
r1025 r1099 45 45 enum EEventId 46 46 { 47 EVENT_ID_ SERVER_ATTRIBUT,47 EVENT_ID_DISTRIBUTION_ATTRIBUTE, 48 48 EVENT_ID_INDEX, 49 49 EVENT_ID_DISTRIBUTED_VALUE, … … 108 108 static ENodeType GetType(void); 109 109 110 // void sendServerAttribut(const std::vector<int>& globalDim, int orderPositionInGrid,111 // CServerDistributionDescription::ServerDistributionType distType);112 110 static bool dispatchEvent(CEventServer& event); 113 static void recv ServerAttribut(CEventServer& event);114 // void recvServerAttribut(CBufferIn& buffer) ;111 static void recvDistributionAttribute(CEventServer& event); 112 void recvDistributionAttribute(CBufferIn& buffer) ; 115 113 void checkAttributesOnClient(); 116 114 void checkAttributesOnClientAfterTransformation(const std::vector<int>& globalDim, int orderPositionInGrid, … … 142 140 void checkZoom(); 143 141 void checkBounds(); 144 void sendAttributes(); 142 void sendAttributes(const std::vector<int>& globalDim, int orderPositionInGrid, 143 CServerDistributionDescription::ServerDistributionType distType); 144 void sendDistributionAttribute(const std::vector<int>& globalDim, int orderPositionInGrid, 145 CServerDistributionDescription::ServerDistributionType distType); 145 146 void computeConnectedServer(const std::vector<int>& globalDim, int orderPositionInGrid, 146 147 CServerDistributionDescription::ServerDistributionType distType); … … 180 181 int global_write_size_; 181 182 183 bool doZoomByIndex_; 184 182 185 private: 183 186 static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); -
XIOS/dev/dev_olga/src/node/context.cpp
r1077 r1099 300 300 std::map<int, StdSize> dataBufferSize = getDataBufferSize(maxEventSize); 301 301 302 302 303 std::map<int, StdSize>::iterator it, ite = dataBufferSize.end(); 303 304 for (it = dataBufferSize.begin(); it != ite; ++it) … … 475 476 if (allProcessed) return; 476 477 477 if (hasClient)478 {478 // if (hasClient) 479 // { 479 480 // After xml is parsed, there are some more works with post processing 480 481 postProcessing(); 481 } 482 483 // Check grid and calculate its distribution 484 checkGridEnabledFields(); 485 486 //} 487 488 482 489 483 490 setClientServerBuffer(); … … 502 509 503 510 // After that, send all grid (if any) 504 sendRefGrid(); 511 sendRefGrid(); 505 512 506 513 // We have a xml tree on the server side and now, it should be also processed 507 514 sendPostProcessing(); 508 } 515 516 sendGridEnabledFields(); 517 } 518 509 519 allProcessed = true; 510 520 } … … 523 533 { 524 534 CMessage msg; 525 msg<<this->getIdServer(); 535 if (hasServer) 536 msg<<this->getIdServer(i); 537 else 538 msg<<this->getIdServer(); 526 539 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 527 540 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) … … 538 551 string id; 539 552 *buffer>>id; 540 //get(id)->recvPostProcessingGlobalAttributes(*buffer);553 get(id)->recvPostProcessingGlobalAttributes(*buffer); 541 554 } 542 555 543 556 void CContext::recvPostProcessingGlobalAttributes(CBufferIn& buffer) 544 { 545 // CCalendarWrapper::get(CCalendarWrapper::GetDefName())->createCalendar(); 557 { 546 558 postProcessingGlobalAttributes(); 547 559 } … … 570 582 } 571 583 572 // if (hasClient) this->solveAllRefOfEnabledFields(true); 573 this->processGridEnabledFields(); 584 // if (hasClient) this->solveAllRefOfEnabledFields(true); 585 checkGridEnabledFields(); 586 // sendGridEnabledFields(); 587 574 588 if (hasClient) this->sendProcessingGridOfEnabledFields(); 575 589 if (hasClient) this->sendCloseDefinition(); … … 617 631 } 618 632 619 void CContext:: processGridEnabledFields()633 void CContext::sendGridEnabledFields() 620 634 { 621 635 int size = this->enabledFiles.size(); 622 636 for (int i = 0; i < size; ++i) 623 { 624 this->enabledFiles[i]->checkGridOfEnabledFields(); 637 { 625 638 this->enabledFiles[i]->sendGridOfEnabledFields(); 639 } 640 } 641 642 void CContext::checkGridEnabledFields() 643 { 644 int size = this->enabledFiles.size(); 645 for (int i = 0; i < size; ++i) 646 { 647 this->enabledFiles[i]->checkGridOfEnabledFields(); 626 648 } 627 649 } … … 993 1015 { 994 1016 CMessage msg; 995 msg<<this->getIdServer(); 1017 if (hasServer) 1018 msg<<this->getIdServer(i); 1019 else 1020 msg<<this->getIdServer(); 996 1021 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 997 1022 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) … … 1009 1034 string id; 1010 1035 *buffer>>id; 1011 // get(id)->processGridEnabledFields();1036 //get(id)->solveOnlyRefOfEnabledFields(false); 1012 1037 } 1013 1038 … … 1140 1165 1141 1166 // Only search and rebuild all reference objects of enable fields, don't transform 1142 if (hasClient) this->solveOnlyRefOfEnabledFields(false); 1143 1144 // Search and rebuild all reference object of enabled fields 1145 // if (hasClient) this->solveAllRefOfEnabledFields(false); 1167 this->solveOnlyRefOfEnabledFields(false); 1168 1169 // Search and rebuild all reference object of enabled fields, and transform 1170 this->solveAllEnabledFields(); 1171 1172 // // Check grid and calculate its distribution 1173 // if (hasClient) checkGridEnabledFields(); 1146 1174 1147 1175 // Find all fields with read access from the public API … … 1216 1244 for (size_t j = 0; j < numEnabledFields; ++j) 1217 1245 { 1218 const std::map<int, StdSize> mapSize = enabledFields[j]->getGridDataBufferSize(); 1219 std::map<int, StdSize>::const_iterator it = mapSize.begin(), itE = mapSize.end(); 1220 for (; it != itE; ++it) 1246 const std::vector<std::map<int, StdSize> > mapSize = enabledFields[j]->getGridDataBufferSize(); 1247 for (size_t c = 0; c < mapSize.size(); ++c) 1221 1248 { 1222 // If dataSize[it->first] does not exist, it will be zero-initialized 1223 // so we can use it safely without checking for its existance 1224 if (CXios::isOptPerformance) 1225 dataSize[it->first] += it->second; 1226 else if (dataSize[it->first] < it->second) 1227 dataSize[it->first] = it->second; 1228 1229 if (maxEventSize[it->first] < it->second) 1230 maxEventSize[it->first] = it->second; 1249 std::map<int, StdSize>::const_iterator it = mapSize[c].begin(), itE = mapSize[c].end(); 1250 for (; it != itE; ++it) 1251 { 1252 // If dataSize[it->first] does not exist, it will be zero-initialized 1253 // so we can use it safely without checking for its existance 1254 if (CXios::isOptPerformance) 1255 dataSize[it->first] += it->second; 1256 else if (dataSize[it->first] < it->second) 1257 dataSize[it->first] = it->second; 1258 1259 if (maxEventSize[it->first] < it->second) 1260 maxEventSize[it->first] = it->second; 1261 } 1231 1262 } 1232 1263 } -
XIOS/dev/dev_olga/src/node/context.hpp
r1071 r1099 126 126 127 127 void solveAllEnabledFields(); 128 void processGridEnabledFields(); 128 void checkGridEnabledFields(); 129 void sendGridEnabledFields(); 129 130 130 131 std::map<int, StdSize> getAttributesBufferSize(std::map<int, StdSize>& maxEventSize); -
XIOS/dev/dev_olga/src/node/domain.cpp
r1054 r1099 33 33 , hasBounds(false), hasArea(false), isDistributed_(false), isCompressible_(false), isUnstructed_(false) 34 34 , isClientAfterTransformationChecked(false), hasLonLat(false) 35 , isRedistributed_(false), hasPole(false) 35 , isRedistributed_(false), hasPole(false), doZoomByIndex_(false) 36 , lonvalue(), latvalue(), bounds_lonvalue(), bounds_latvalue() 37 , globalLocalIndexMap_() 36 38 { 37 39 } … … 42 44 , hasBounds(false), hasArea(false), isDistributed_(false), isCompressible_(false), isUnstructed_(false) 43 45 , isClientAfterTransformationChecked(false), hasLonLat(false) 44 , isRedistributed_(false), hasPole(false) 46 , isRedistributed_(false), hasPole(false), doZoomByIndex_(false) 47 , lonvalue(), latvalue(), bounds_lonvalue(), bounds_latvalue() 48 , globalLocalIndexMap_() 45 49 { 46 50 } … … 756 760 if (zoom_i_index.isEmpty()) zoom_i_index.setValue(i_index.getValue()); 757 761 if (zoom_j_index.isEmpty()) zoom_j_index.setValue(j_index.getValue()); 762 if (zoom_ibegin.isEmpty()) zoom_ibegin.setValue(ibegin); 763 if (zoom_ni.isEmpty()) zoom_ni.setValue(ni); 764 if (zoom_jbegin.isEmpty()) zoom_jbegin.setValue(jbegin); 765 if (zoom_nj.isEmpty()) zoom_nj.setValue(nj); 758 766 } 759 767 … … 1076 1084 void CDomain::completeLonLatClient(void) 1077 1085 { 1078 if (!lonvalue_2d.isEmpty()) 1086 bool lonlatValueExisted = (0 != lonvalue.numElements()) || (0 != latvalue.numElements()); 1087 if (!lonvalue_2d.isEmpty() && !lonlatValueExisted) 1079 1088 { 1080 1089 lonvalue.resize(ni * nj); … … 1106 1115 } 1107 1116 } 1108 else if (!lonvalue_1d.isEmpty() )1117 else if (!lonvalue_1d.isEmpty() && !lonlatValueExisted) 1109 1118 { 1110 1119 if (type_attr::rectilinear == type) … … 1140 1149 } 1141 1150 } 1142 else if (i_index.numElements() == lonvalue_1d.numElements() && j_index.numElements() == latvalue_1d.numElements() )1151 else if (i_index.numElements() == lonvalue_1d.numElements() && j_index.numElements() == latvalue_1d.numElements() && !lonlatValueExisted) 1143 1152 { 1144 1153 lonvalue.reference(lonvalue_1d); … … 1159 1168 << i_index.numElements() << " and " << j_index.numElements() << "."); 1160 1169 } 1161 else if (type == type_attr::curvilinear || type == type_attr::unstructured )1170 else if (type == type_attr::curvilinear || type == type_attr::unstructured && !lonlatValueExisted) 1162 1171 { 1163 1172 lonvalue.reference(lonvalue_1d); … … 1174 1183 void CDomain::checkBounds(void) 1175 1184 { 1176 if (!nvertex.isEmpty() && nvertex > 0) 1185 bool hasBoundValues = (0 != bounds_lonvalue.numElements()) || (0 != bounds_latvalue.numElements()); 1186 if (!nvertex.isEmpty() && nvertex > 0 && !hasBoundValues) 1177 1187 { 1178 1188 if (!bounds_lon_1d.isEmpty() && !bounds_lon_2d.isEmpty()) … … 1262 1272 void CDomain::checkArea(void) 1263 1273 { 1274 bool hasAreaValue = (0 != areavalue.numElements()); 1264 1275 hasArea = !area.isEmpty() || !areavalue.isEmpty(); 1265 1276 if (hasArea) … … 1292 1303 hasLonLat = (!latvalue_1d.isEmpty() && !lonvalue_1d.isEmpty()) || 1293 1304 (!latvalue_2d.isEmpty() && !lonvalue_2d.isEmpty()); 1294 if (hasLonLat) 1305 bool hasLonLatValue = (0 != lonvalue.numElements()) || (0 != latvalue.numElements()); 1306 if (hasLonLat && !hasLonLatValue) 1295 1307 { 1296 1308 if (!lonvalue_1d.isEmpty() && !lonvalue_2d.isEmpty()) … … 1443 1455 } 1444 1456 1445 /*!1446 Send distribution from client to other clients1447 Because a client in a level knows correctly the grid distribution of client on the next level1448 it calculates this distribution then sends it to the corresponding clients on the next level1449 */1450 void CDomain::sendDistributionAttributes(void)1451 {1452 CContext* context = CContext::getCurrent();1453 // Use correct context client to send message1454 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1;1455 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1;1456 for (int p = 0; p < nbSrvPools; ++p)1457 {1458 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[p]1459 : context->client;1460 1461 int nbServer = contextClientTmp->serverSize;1462 std::vector<int> nGlobDomain(2);1463 nGlobDomain[0] = this->ni_glo;1464 nGlobDomain[1] = this->nj_glo;1465 1466 CServerDistributionDescription serverDescription(nGlobDomain, nbServer);1467 if (isUnstructed_) serverDescription.computeServerDistribution(false, 0);1468 else serverDescription.computeServerDistribution(false, 1);1469 1470 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin();1471 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes();1472 1473 CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT);1474 if (contextClientTmp->isServerLeader())1475 {1476 std::list<CMessage> msgs;1477 1478 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader();1479 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)1480 {1481 // Use const int to ensure CMessage holds a copy of the value instead of just a reference1482 const int ibegin_srv = serverIndexBegin[*itRank][0];1483 const int jbegin_srv = serverIndexBegin[*itRank][1];1484 const int ni_srv = serverDimensionSizes[*itRank][0];1485 const int nj_srv = serverDimensionSizes[*itRank][1];1486 1487 msgs.push_back(CMessage());1488 CMessage& msg = msgs.back();1489 msg << this->getId() ;1490 msg << ni_srv << ibegin_srv << nj_srv << jbegin_srv;1491 msg << isCompressible_;1492 1493 event.push(*itRank,1,msg);1494 }1495 contextClientTmp->sendEvent(event);1496 }1497 else contextClientTmp->sendEvent(event);1498 }1499 }1500 1457 1501 1458 // void CDomain::computeConnectedClients(const std::vector<int>& globalDim, int orderPositionInGrid, … … 1507 1464 { 1508 1465 CContext* context=CContext::getCurrent() ; 1466 1509 1467 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1510 1468 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; … … 1512 1470 { 1513 1471 CContextClient* client = (0 != context->clientPrimServer.size()) ? context->clientPrimServer[p] : context->client; 1514 int nbServer =client->serverSize;1515 int rank = client->clientRank;1472 int nbServer = client->serverSize; 1473 int rank = client->clientRank; 1516 1474 bool doComputeGlobalIndexServer = true; 1517 1475 1518 1476 int i,j,i_ind,j_ind, nbIndex, nbIndexZoom; 1519 int global_zoom_iend=global_zoom_ibegin+global_zoom_ni-1 1520 int global_zoom_jend=global_zoom_jbegin+global_zoom_nj-1 1477 int global_zoom_iend=global_zoom_ibegin+global_zoom_ni-1; 1478 int global_zoom_jend=global_zoom_jbegin+global_zoom_nj-1; 1521 1479 1522 1480 // Precompute number of index 1523 1481 int globalIndexCountZoom = 0; 1524 1482 nbIndex = i_index.numElements(); 1483 1484 if (doZoomByIndex_) 1485 { 1486 globalIndexCountZoom = zoom_i_index.numElements(); 1487 } 1488 else 1489 { 1490 for (i = 0; i < nbIndex; ++i) 1491 { 1492 i_ind=i_index(i); 1493 j_ind=j_index(i); 1494 1495 if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1496 { 1497 ++globalIndexCountZoom; 1498 } 1499 } 1500 } 1501 1525 1502 // for (i = 0; i < nbIndex; ++i) 1526 1503 // { … … 1554 1531 // Fill in index 1555 1532 1533 CArray<size_t,1> globalIndexDomainZoom(globalIndexCountZoom); 1556 1534 CArray<size_t,1> localIndexDomainZoom(globalIndexCountZoom); 1557 1535 CArray<size_t,1> globalIndexDomain(nbIndex); … … 1559 1537 int globalIndexCount = 0; 1560 1538 1561 1562 1539 for (i = 0; i < nbIndex; ++i) 1563 1540 { … … 1565 1542 j_ind=j_index(i); 1566 1543 globalIndex = i_ind + j_ind * ni_glo; 1567 globalIndexDomain(globalIndexCount) = globalIndex; 1568 globalLocalIndexMap_[globalIndex] = i; 1569 ++globalIndexCount; 1570 } 1571 1572 nbIndexZoom = zoom_i_index.numElements(); 1573 CArray<size_t,1> globalIndexDomainZoom(nbIndexZoom); 1544 globalIndexDomain(i) = globalIndex; 1545 } 1546 1547 if (globalLocalIndexMap_.empty()) 1548 { 1549 for (i = 0; i < nbIndex; ++i) 1550 globalLocalIndexMap_[globalIndexDomain(i)] = i; 1551 1552 } 1553 1574 1554 globalIndexCountZoom = 0; 1575 for (i = 0; i < nbIndexZoom; ++i) 1576 { 1577 i_ind=zoom_i_index(i); 1578 j_ind=zoom_j_index(i); 1579 globalIndex = i_ind + j_ind * ni_glo; 1580 globalIndexDomainZoom(globalIndexCountZoom) = globalIndex; 1581 1582 ++globalIndexCountZoom; 1583 // if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1584 // { 1585 // globalIndexDomainZoom(globalIndexCountZoom) = globalIndex; 1586 // localIndexDomainZoom(globalIndexCountZoom) = i; 1587 // ++globalIndexCountZoom; 1588 // } 1589 } 1555 if (doZoomByIndex_) 1556 { 1557 int nbIndexZoom = zoom_i_index.numElements(); 1558 1559 for (i = 0; i < nbIndexZoom; ++i) 1560 { 1561 i_ind=zoom_i_index(i); 1562 j_ind=zoom_j_index(i); 1563 globalIndex = i_ind + j_ind * ni_glo; 1564 globalIndexDomainZoom(globalIndexCountZoom) = globalIndex; 1565 ++globalIndexCountZoom; 1566 // if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1567 // { 1568 // globalIndexDomainZoom(globalIndexCountZoom) = globalIndex; 1569 // localIndexDomainZoom(globalIndexCountZoom) = i; 1570 // ++globalIndexCountZoom; 1571 // } 1572 } 1573 } 1574 else 1575 { 1576 int global_zoom_iend=global_zoom_ibegin+global_zoom_ni-1; 1577 int global_zoom_jend=global_zoom_jbegin+global_zoom_nj-1; 1578 for (i = 0; i < nbIndex; ++i) 1579 { 1580 i_ind=i_index(i); 1581 j_ind=j_index(i); 1582 globalIndex = i_ind + j_ind * ni_glo; 1583 if (i_ind >= global_zoom_ibegin && i_ind <= global_zoom_iend && j_ind >= global_zoom_jbegin && j_ind <= global_zoom_jend) 1584 { 1585 globalIndexDomainZoom(globalIndexCountZoom) = globalIndex; 1586 ++globalIndexCountZoom; 1587 } 1588 } 1589 1590 int iend = ibegin + ni -1; 1591 int jend = jbegin + nj -1; 1592 zoom_ibegin = global_zoom_ibegin > ibegin ? global_zoom_ibegin : ibegin; 1593 int zoom_iend = global_zoom_iend < iend ? zoom_iend : iend ; 1594 zoom_ni = zoom_iend-zoom_ibegin+1 ; 1595 1596 zoom_jbegin = global_zoom_jbegin > jbegin ? global_zoom_jbegin : jbegin ; 1597 int zoom_jend = global_zoom_jend < jend ? zoom_jend : jend; 1598 zoom_nj = zoom_jend-zoom_jbegin+1; 1599 } 1600 1590 1601 1591 1602 // CArray<int,1> globalIndexWrittenDomain(globalIndexWrittenCount); … … 1679 1690 for (it = globalIndexDomainOnServer.begin(); it != ite; ++it) { 1680 1691 connectedServerRank_.push_back(it->first); 1681 std::vector<size_t> vec = it->second;1682 std::sort(vec.begin(), vec.end());1683 indSrv_[it->first] = vec;1684 } 1685 1686 //indSrv_.swap(globalIndexDomainOnServer);1692 // std::vector<size_t> vec = it->second; 1693 // std::sort(vec.begin(), vec.end()); 1694 // indSrv_[it->first] = vec; 1695 } 1696 1697 indSrv_.swap(globalIndexDomainOnServer); 1687 1698 nbConnectedClients_ = clientServerMap->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_); 1688 1699 … … 1690 1701 CClientServerMapping::GlobalIndexMap& globalIndexDomainZoomOnServer = clientServerMap->getGlobalIndexOnServer(); 1691 1702 // indZoomSrv_.swap(globalIndexDomainZoomOnServer); 1692 std::vector<int> connectedServerZoomRank (indZoomSrv_.size());1703 std::vector<int> connectedServerZoomRank; //(indZoomSrv_.size()); 1693 1704 // for (it = indZoomSrv_.begin(); it != indZoomSrv_.end(); ++it) 1694 1705 // connectedServerZoomRank.push_back(it->first); … … 1719 1730 sendDistributionAttributes(); 1720 1731 sendIndex(); 1732 sendIndex(); 1721 1733 sendMask(); 1722 1734 sendLonLat(); … … 1733 1745 int ns, n, i, j, ind, nv, idx; 1734 1746 CContext* context = CContext::getCurrent(); 1747 1735 1748 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1736 1749 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; … … 1776 1789 list_msgsIndex.back() << this->getId() << (int)type; // enum ne fonctionne pour les message => ToFix 1777 1790 list_msgsIndex.back() << isCurvilinear; 1778 list_msgsIndex.back() << list_indGlob.back() << list_indZoom.back(); //list_indi.back() << list_indj.back(); 1779 1791 list_msgsIndex.back() << list_indGlob.back() << list_indZoom.back() << doZoomByIndex_; //list_indi.back() << list_indj.back(); 1792 if (!doZoomByIndex_) 1793 { 1794 //list_msgsIndex.back() << zoom_ni.getValue() << zoom_ibegin.getValue() << zoom_nj.getValue() << zoom_jbegin.getValue(); 1795 } 1796 1780 1797 // if (isCompressible_) 1781 1798 // { … … 1798 1815 1799 1816 /*! 1817 Send distribution from client to other clients 1818 Because a client in a level knows correctly the grid distribution of client on the next level 1819 it calculates this distribution then sends it to the corresponding clients on the next level 1820 */ 1821 void CDomain::sendDistributionAttributes(void) 1822 { 1823 CContext* context = CContext::getCurrent(); 1824 // Use correct context client to send message 1825 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1826 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 1827 for (int i = 0; i < nbSrvPools; ++i) 1828 { 1829 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] 1830 : context->client; 1831 int nbServer = contextClientTmp->serverSize; 1832 std::vector<int> nGlobDomain(2); 1833 nGlobDomain[0] = this->ni_glo; 1834 nGlobDomain[1] = this->nj_glo; 1835 1836 CServerDistributionDescription serverDescription(nGlobDomain, nbServer); 1837 if (isUnstructed_) serverDescription.computeServerDistribution(false, 0); 1838 else serverDescription.computeServerDistribution(false, 1); 1839 1840 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 1841 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 1842 1843 CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT); 1844 if (contextClientTmp->isServerLeader()) 1845 { 1846 std::list<CMessage> msgs; 1847 1848 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 1849 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1850 { 1851 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 1852 const int ibegin_srv = serverIndexBegin[*itRank][0]; 1853 const int jbegin_srv = serverIndexBegin[*itRank][1]; 1854 const int ni_srv = serverDimensionSizes[*itRank][0]; 1855 const int nj_srv = serverDimensionSizes[*itRank][1]; 1856 1857 msgs.push_back(CMessage()); 1858 CMessage& msg = msgs.back(); 1859 msg << this->getId() ; 1860 msg << ni_srv << ibegin_srv << nj_srv << jbegin_srv; 1861 msg << global_zoom_ni.getValue() << global_zoom_ibegin.getValue() << global_zoom_nj.getValue() << global_zoom_jbegin.getValue(); 1862 msg << isCompressible_; 1863 1864 event.push(*itRank,1,msg); 1865 } 1866 contextClientTmp->sendEvent(event); 1867 } 1868 else contextClientTmp->sendEvent(event); 1869 } 1870 } 1871 1872 /*! 1800 1873 Send mask index from client to connected(s) 1801 1874 */ … … 1804 1877 int ns, n, i, j, ind, nv, idx; 1805 1878 CContext* context = CContext::getCurrent(); 1879 1806 1880 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1807 1881 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; … … 1851 1925 int ns, n, i, j, ind, nv, idx; 1852 1926 CContext* context = CContext::getCurrent(); 1927 1853 1928 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1854 1929 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; … … 1901 1976 int ns, n, i, j, ind, nv, idx; 1902 1977 CContext* context = CContext::getCurrent(); 1978 1903 1979 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1904 1980 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; … … 1986 2062 int ns, n, i, j, ind, nv, idx; 1987 2063 CContext* context = CContext::getCurrent(); 2064 1988 2065 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1989 2066 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; … … 2085 2162 2086 2163 /*! 2087 Receive attributes event from clients(s)2088 \param[in] event event contain info about rank and associated attributes2089 */2090 void CDomain::recvDistributionAttributes(CEventServer& event)2091 {2092 CBufferIn* buffer=event.subEvents.begin()->buffer;2093 string domainId ;2094 *buffer>>domainId ;2095 get(domainId)->recvDistributionAttributes(*buffer) ;2096 }2097 2098 /*!2099 Receive attributes from client(s): zoom info and begin and n of each server2100 \param[in] rank rank of client source2101 \param[in] buffer message containing attributes info2102 */2103 void CDomain::recvDistributionAttributes(CBufferIn& buffer)2104 {2105 int ni_tmp, ibegin_tmp, nj_tmp, jbegin_tmp;2106 buffer >> ni_tmp >> ibegin_tmp >> nj_tmp >> jbegin_tmp2107 >> isCompressible_;2108 ni.setValue(ni_tmp);2109 ibegin.setValue(ibegin_tmp);2110 nj.setValue(nj_tmp);2111 jbegin.setValue(jbegin_tmp);2112 }2113 2114 /*!2115 2164 Receive index event from clients(s) 2116 2165 \param[in] event event contain info about rank and associated index … … 2151 2200 recvClientRanks_.resize(nbReceived); 2152 2201 vector<CArray<int,1> > recvZoomInd(nbReceived); 2202 int ni_zoom_tmp, ibegin_zoom_tmp, nj_zoom_tmp, jbegin_zoom_tmp; 2153 2203 2154 2204 std::map<int, CBufferIn*>::iterator it = rankBuffers.begin(), ite = rankBuffers.end(); … … 2158 2208 recvClientRanks_[ind] = it->first; 2159 2209 CBufferIn& buffer = *(it->second); 2160 buffer >> type_int >> isCurvilinear >> indGlob_[it->first] >> recvZoomInd[ind]; //recvIndGlob[ind]; 2210 buffer >> type_int >> isCurvilinear >> indGlob_[it->first] >> recvZoomInd[ind] >> doZoomByIndex_; 2211 if (!doZoomByIndex_) 2212 { 2213 //buffer >> ni_zoom_tmp >> ibegin_zoom_tmp >> nj_zoom_tmp >> jbegin_zoom_tmp; 2214 } 2161 2215 type.setValue((type_attr::t_enum)type_int); // probleme des type enum avec les buffers : ToFix 2162 2216 } … … 2189 2243 } 2190 2244 2191 zoom_i_index.resize(nbZoomInd); 2192 zoom_j_index.resize(nbZoomInd); 2245 if (doZoomByIndex_) 2246 { 2247 zoom_i_index.resize(nbZoomInd); 2248 zoom_j_index.resize(nbZoomInd); 2249 2250 nbZoomInd = 0; 2251 for (i = 0; i < nbReceived; ++i) 2252 { 2253 CArray<int,1>& tmp = recvZoomInd[i]; 2254 for (ind = 0; ind < tmp.numElements(); ++ind) 2255 { 2256 index = tmp(ind); 2257 zoom_i_index(nbZoomInd) = index % ni_glo; 2258 zoom_j_index(nbZoomInd) = index / ni_glo; 2259 ++nbZoomInd; 2260 } 2261 } 2262 } 2263 else 2264 { 2265 // zoom_ni.setValue(ni_zoom_tmp); 2266 // zoom_ibegin.setValue(ibegin_zoom_tmp); 2267 // zoom_nj.setValue(nj_zoom_tmp); 2268 // zoom_jbegin.setValue(jbegin_zoom_tmp); 2269 // int nbZoom = ni_zoom_tmp * nj_zoom_tmp; 2270 // zoom_i_index.resize(nbZoom); 2271 // zoom_j_index.resize(nbZoom); 2272 // nbZoom = 0; 2273 // for (int j = 0; j < nj_zoom_tmp; ++j) 2274 // for (int i = 0; i < ni_zoom_tmp; ++i) 2275 // { 2276 // zoom_i_index(nbZoom) = ibegin_zoom_tmp + i; 2277 // zoom_j_index(nbZoom) = jbegin_zoom_tmp + j; 2278 // ++nbZoom; 2279 // } 2280 } 2281 2282 globalLocalIndexMap_.rehash(std::ceil(nbIndGlob/globalLocalIndexMap_.max_load_factor())); 2283 nbIndGlob = 0; 2284 for (int j = 0; j < nj; ++j) 2285 for (int i = 0; i < ni; ++i) 2286 { 2287 globalLocalIndexMap_[(i + ibegin) + (j + jbegin) * ni_glo] = nbIndGlob; 2288 ++nbIndGlob; 2289 } 2290 2291 2193 2292 2194 nbZoomInd = 0; 2195 for (i = 0; i < nbReceived; ++i) 2196 { 2197 CArray<int,1>& tmp = recvZoomInd[i]; 2198 for (ind = 0; ind < tmp.numElements(); ++ind) 2199 { 2200 index = tmp(ind); 2201 zoom_i_index(nbZoomInd) = index % ni_glo; 2202 zoom_j_index(nbZoomInd) = index / ni_glo; 2203 ++nbZoomInd; 2204 } 2205 } 2206 2207 { 2208 CContextServer* server = CContext::getCurrent()->server; 2209 count_write_index_.resize(2); 2210 start_write_index_.resize(2); 2211 local_write_size_.resize(2); 2212 global_write_size_.resize(2); 2213 if ((this->type) == CDomain::type_attr::unstructured) 2214 { 2215 count_write_index_[0] = zoom_i_index.numElements(); 2216 count_write_index_[1] = 0; 2217 } 2218 else 2219 { 2220 int ni_zoom = zoom_i_index.numElements(), idx, nbIZoom = 0, nbJZoom = 0; 2221 for (idx =0; idx < ni_zoom; ++idx) 2222 { 2223 if ((ibegin <= zoom_i_index(idx)) && (zoom_i_index(idx) < ibegin+ni) && (nbIZoom < ni)) 2224 ++nbIZoom; 2225 if ((jbegin <= zoom_j_index(idx)) && (zoom_j_index(idx) < jbegin+nj) && (nbJZoom < nj)) 2226 ++nbJZoom; 2227 } 2228 count_write_index_[0] = nbIZoom; 2229 count_write_index_[1] = nbJZoom; 2230 2231 // Reoder the zoom_index 2232 for (int j = 0; j < nbJZoom; ++j) 2233 for (int i = 0; i < nbIZoom; ++i) 2234 { 2235 idx = nbIZoom * j + i; 2236 if (idx < ni_zoom) 2237 { 2238 zoom_i_index(idx) = ibegin + i; 2239 zoom_j_index(idx) = jbegin + j; 2240 } 2241 } 2242 2243 // Reorder the global index 2244 for (int j = 0; j < nj; ++j) 2245 for (int i = 0; i < ni; ++i) 2246 { 2247 idx = ni * j + i; 2248 if (idx < nbIndGlob) 2249 { 2250 i_index(idx) = ibegin + i; 2251 j_index(idx) = jbegin + j; 2252 } 2253 } 2254 } 2293 // { 2294 // CContextServer* server = CContext::getCurrent()->server; 2295 // count_write_index_.resize(2); 2296 // start_write_index_.resize(2); 2297 // local_write_size_.resize(2); 2298 // global_write_size_.resize(2); 2299 // if ((this->type) == CDomain::type_attr::unstructured) 2300 // { 2301 // count_write_index_[0] = zoom_i_index.numElements(); 2302 // count_write_index_[1] = 0; 2303 // } 2304 // else 2305 // { 2306 // int ni_zoom = zoom_i_index.numElements(), idx, nbIZoom = 0, nbJZoom = 0; 2307 // for (idx =0; idx < ni_zoom; ++idx) 2308 // { 2309 // if ((ibegin <= zoom_i_index(idx)) && (zoom_i_index(idx) < ibegin+ni) && (nbIZoom < ni)) 2310 // ++nbIZoom; 2311 // if ((jbegin <= zoom_j_index(idx)) && (zoom_j_index(idx) < jbegin+nj) && (nbJZoom < nj)) 2312 // ++nbJZoom; 2313 // } 2314 // count_write_index_[0] = nbIZoom; 2315 // count_write_index_[1] = nbJZoom; 2316 2317 // // Reoder the zoom_index 2318 // for (int j = 0; j < nbJZoom; ++j) 2319 // for (int i = 0; i < nbIZoom; ++i) 2320 // { 2321 // idx = nbIZoom * j + i; 2322 // if (idx < ni_zoom) 2323 // { 2324 // zoom_i_index(idx) = ibegin + i; 2325 // zoom_j_index(idx) = jbegin + j; 2326 // } 2327 // } 2328 2329 // // Reorder the global index 2330 // for (int j = 0; j < nj; ++j) 2331 // for (int i = 0; i < ni; ++i) 2332 // { 2333 // idx = ni * j + i; 2334 // if (idx < nbIndGlob) 2335 // { 2336 // i_index(idx) = ibegin + i; 2337 // j_index(idx) = jbegin + j; 2338 // } 2339 // } 2340 // } 2341 2255 2342 2256 MPI_Scan(&count_write_index_[0], &start_write_index_[0], 2, MPI_INT, MPI_SUM, server->intraComm); 2257 if ((this->type) != CDomain::type_attr::unstructured) 2258 { 2259 start_write_index_[0] = 0; 2260 start_write_index_[1] -= count_write_index_[1]; 2261 } 2262 else 2263 { 2264 start_write_index_[0] -= count_write_index_[0]; 2265 } 2266 local_write_size_[0] = count_write_index_[0]; 2267 local_write_size_[1] = count_write_index_[1]; 2268 MPI_Allreduce(&count_write_index_[0], &global_write_size_[0], 2, MPI_INT, MPI_SUM, server->intraComm); 2269 if ((this->type) != CDomain::type_attr::unstructured) 2270 { 2271 global_write_size_[0] = count_write_index_[0]; 2272 global_write_size_[1] = (global_write_size_[1] > nj_glo) ? nj_glo : global_write_size_[1]; 2273 } 2274 } 2343 // MPI_Scan(&count_write_index_[0], &start_write_index_[0], 2, MPI_INT, MPI_SUM, server->intraComm); 2344 // start_write_index_[0] = 0; 2345 // start_write_index_[1] -= count_write_index_[1]; 2346 // local_write_size_[0] = count_write_index_[0]; 2347 // local_write_size_[1] = count_write_index_[1]; 2348 // MPI_Allreduce(&count_write_index_[0], &global_write_size_[0], 2, MPI_INT, MPI_SUM, server->intraComm); 2349 // global_write_size_[0] = count_write_index_[0]; 2350 // global_write_size_[1] = (global_write_size_[1] > nj_glo) ? nj_glo : global_write_size_[1]; 2351 2352 2353 // } 2275 2354 2276 2355 // int type_int; … … 2289 2368 2290 2369 /*! 2370 Receive attributes event from clients(s) 2371 \param[in] event event contain info about rank and associated attributes 2372 */ 2373 void CDomain::recvDistributionAttributes(CEventServer& event) 2374 { 2375 CBufferIn* buffer=event.subEvents.begin()->buffer; 2376 string domainId ; 2377 *buffer>>domainId ; 2378 get(domainId)->recvDistributionAttributes(*buffer); 2379 } 2380 2381 /*! 2382 Receive attributes from client(s): zoom info and begin and n of each server 2383 \param[in] rank rank of client source 2384 \param[in] buffer message containing attributes info 2385 */ 2386 void CDomain::recvDistributionAttributes(CBufferIn& buffer) 2387 { 2388 int ni_tmp, ibegin_tmp, nj_tmp, jbegin_tmp; 2389 int global_zoom_ni_tmp, global_zoom_ibegin_tmp, global_zoom_nj_tmp, global_zoom_jbegin_tmp; 2390 buffer >> ni_tmp >> ibegin_tmp >> nj_tmp >> jbegin_tmp 2391 >> global_zoom_ni_tmp >> global_zoom_ibegin_tmp >> global_zoom_nj_tmp >> global_zoom_jbegin_tmp 2392 >> isCompressible_; 2393 ni.setValue(ni_tmp); 2394 ibegin.setValue(ibegin_tmp); 2395 nj.setValue(nj_tmp); 2396 jbegin.setValue(jbegin_tmp); 2397 2398 global_zoom_ni.setValue(global_zoom_ni_tmp); 2399 global_zoom_ibegin.setValue(global_zoom_ibegin_tmp); 2400 global_zoom_nj.setValue(global_zoom_nj_tmp); 2401 global_zoom_jbegin.setValue(global_zoom_jbegin_tmp); 2402 2403 int iend = ibegin + ni - 1; 2404 int jend = jbegin + nj - 1; 2405 int zoom_iend_glob = global_zoom_ibegin + global_zoom_ni - 1; 2406 int zoom_jend_glob = global_zoom_jbegin + global_zoom_nj - 1; 2407 2408 zoom_ibegin.setValue(global_zoom_ibegin > ibegin ? global_zoom_ibegin : ibegin); 2409 int zoom_iend = zoom_iend_glob < iend ? zoom_iend_glob : iend ; 2410 zoom_ni.setValue(zoom_iend-zoom_ibegin+1); 2411 2412 zoom_jbegin.setValue(global_zoom_jbegin > jbegin ? global_zoom_jbegin : jbegin); 2413 int zoom_jend = zoom_jend_glob < jend ? zoom_jend_glob : jend ; 2414 zoom_nj.setValue(zoom_jend-zoom_jbegin+1); 2415 2416 if (zoom_ni<=0 || zoom_nj<=0) 2417 { 2418 zoom_ibegin=0 ; zoom_iend=0 ; zoom_ni=0 ; 2419 zoom_jbegin=0 ; zoom_jend=0 ; zoom_nj=0 ; 2420 } 2421 2422 } 2423 2424 /*! 2291 2425 Receive area event from clients(s) 2292 2426 \param[in] event event contain info about rank and associated area … … 2372 2506 void CDomain::recvLon(std::map<int, CBufferIn*>& rankBuffers) 2373 2507 { 2374 int nbReceived = rankBuffers.size(), i, ind, index ;2508 int nbReceived = rankBuffers.size(), i, ind, index, iindex, jindex; 2375 2509 if (nbReceived != recvClientRanks_.size()) 2376 2510 ERROR("void CDomain::recvLon(std::map<int, CBufferIn*>& rankBuffers)", … … 2391 2525 } 2392 2526 2393 int nbLonInd = 0; 2527 lonvalue.resize(zoom_ni*zoom_nj); 2528 lonvalue = 0; 2529 2530 if (hasBounds) 2531 { 2532 bounds_lonvalue.resize(nvertex,zoom_ni*zoom_nj); 2533 bounds_lonvalue = 0.; 2534 } 2535 2394 2536 for (i = 0; i < nbReceived; ++i) 2395 2537 { 2396 nbLonInd += recvLonValue[i].numElements(); 2397 } 2398 2399 lonvalue.resize(nbLonInd); 2400 if (hasBounds) 2401 { 2402 bounds_lonvalue.resize(nvertex, nbLonInd); 2403 } 2404 2405 nbLonInd = 0; 2406 for (i = 0; i < nbReceived; ++i) 2407 { 2408 CArray<double,1>& tmp = recvLonValue[i]; 2409 for (ind = 0; ind < tmp.numElements(); ++ind) 2410 { 2411 lonvalue(nbLonInd) = tmp(ind); 2538 int rank = recvClientRanks_[i]; 2539 CArray<int,1> &indi = indGlob_[rank], &indj = indGlob_[rank]; 2540 for (ind = 0; ind < indi.numElements(); ++ind) 2541 { 2542 iindex = indi(ind) % ni_glo; jindex = indj(ind) / ni_glo; 2543 index = (iindex - zoom_ibegin) + (jindex - zoom_jbegin) * zoom_ni; 2544 lonvalue(index) = recvLonValue[i](ind); 2412 2545 if (hasBounds) 2413 { 2414 CArray<double,2>& tmpBnds = recvBoundsLonValue[i]; 2546 { 2415 2547 for (int nv = 0; nv < nvertex; ++nv) 2416 bounds_lonvalue(nv, nbLonInd) = tmpBnds(nv, ind); 2417 } 2418 ++nbLonInd; 2548 bounds_lonvalue(nv, index) = recvBoundsLonValue[i](nv, ind); 2549 } 2419 2550 } 2420 2551 } … … 2446 2577 void CDomain::recvLat(std::map<int, CBufferIn*>& rankBuffers) 2447 2578 { 2448 int nbReceived = rankBuffers.size(), i, ind, index ;2579 int nbReceived = rankBuffers.size(), i, ind, index, iindex, jindex; 2449 2580 if (nbReceived != recvClientRanks_.size()) 2450 2581 ERROR("void CDomain::recvLat(std::map<int, CBufferIn*>& rankBuffers)", … … 2465 2596 } 2466 2597 2467 int nbLatInd = 0; 2598 latvalue.resize(zoom_ni*zoom_nj); 2599 latvalue = 0; 2600 2601 if (hasBounds) 2602 { 2603 bounds_latvalue.resize(nvertex,zoom_ni*zoom_nj); 2604 bounds_latvalue = 0. ; 2605 } 2606 2468 2607 for (i = 0; i < nbReceived; ++i) 2469 2608 { 2470 nbLatInd += recvLatValue[i].numElements(); 2471 } 2472 2473 latvalue.resize(nbLatInd); 2474 if (hasBounds) 2475 { 2476 bounds_latvalue.resize(nvertex, nbLatInd); 2477 } 2478 2479 nbLatInd = 0; 2480 for (i = 0; i < nbReceived; ++i) 2481 { 2482 CArray<double,1>& tmp = recvLatValue[i]; 2483 for (ind = 0; ind < tmp.numElements(); ++ind) 2484 { 2485 latvalue(nbLatInd) = tmp(ind); 2609 int rank = recvClientRanks_[i]; 2610 CArray<int,1> &indi = indGlob_[rank], &indj = indGlob_[rank]; 2611 CArray<double,1>& lat = recvLatValue[i]; 2612 for (ind = 0; ind < indi.numElements(); ++ind) 2613 { 2614 iindex = indi(ind) % ni_glo; jindex = indj(ind) / ni_glo; 2615 index = (iindex - zoom_ibegin) + (jindex - zoom_jbegin) * zoom_ni; 2616 latvalue(index) = lat(ind); 2486 2617 if (hasBounds) 2487 2618 { 2488 CArray<double,2>& tmpBnds= recvBoundsLatValue[i];2619 CArray<double,2>& boundslat = recvBoundsLatValue[i]; 2489 2620 for (int nv = 0; nv < nvertex; ++nv) 2490 bounds_latvalue(nv, nbLatInd) = tmpBnds(nv, ind); 2491 } 2492 ++nbLatInd; 2621 bounds_latvalue(nv, index) = boundslat(nv, ind); 2622 } 2493 2623 } 2494 2624 } -
XIOS/dev/dev_olga/src/node/domain.hpp
r1025 r1099 201 201 std::vector<int> global_write_size_; 202 202 203 bool doZoomByIndex_; 203 204 bool isChecked; 204 205 std::set<StdString> relFiles, relFilesCompressed; -
XIOS/dev/dev_olga/src/node/field.cpp
r1071 r1099 125 125 // CContextClient* client = context->client; 126 126 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 127 // int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 128 // for (int i = 0; i < nbSrvPools; ++i) 129 // { 130 // CContextClient* client = (!context->hasServer) ? context->client : context->clientPrimServer[i]; 127 // int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 128 // for (int i = 0; i < nbSrvPools; ++i) 129 // { 131 130 CContextClient* client = (!context->hasServer) ? context->client : this->file->getContextClient(); 132 131 … … 173 172 174 173 list_msg.back() << getId() << data_tmp; 175 event.push(rank, grid->nbSenders[ rank], list_msg.back());174 event.push(rank, grid->nbSenders[0][rank], list_msg.back()); 176 175 } 177 176 client->sendEvent(event); 178 177 } 179 //}178 // } 180 179 181 180 CTimer::get("XIOS Send Data").suspend(); 182 181 } 183 182 184 /* 185 void CField::sendUpdateData(const CArray<double,1>& data, CContextClient* client) 186 { 187 CTimer::get("XIOS Send Data").resume(); 188 189 CEventClient event(getType(), EVENT_ID_UPDATE_DATA); 190 191 map<int, CArray<int,1> >::iterator it; 192 list<CMessage> list_msg; 193 list<CArray<double,1> > list_data; 194 195 if (!grid->doGridHaveDataDistributed()) 196 { 197 if (client->isServerLeader()) 198 { 199 for (it = grid->storeIndex_toSrv.begin(); it != grid->storeIndex_toSrv.end(); it++) 200 { 201 int rank = it->first; 202 CArray<int,1>& index = it->second; 203 204 list_msg.push_back(CMessage()); 205 list_data.push_back(CArray<double,1>(index.numElements())); 206 207 CArray<double,1>& data_tmp = list_data.back(); 208 for (int n = 0; n < data_tmp.numElements(); n++) data_tmp(n) = data(index(n)); 209 210 list_msg.back() << getId() << data_tmp; 211 event.push(rank, 1, list_msg.back()); 212 } 213 client->sendEvent(event); 214 } 215 else client->sendEvent(event); 216 } 217 else 218 { 219 for (it = grid->storeIndex_toSrv.begin(); it != grid->storeIndex_toSrv.end(); it++) 220 { 221 int rank = it->first; 222 CArray<int,1>& index = it->second; 223 224 list_msg.push_back(CMessage()); 225 list_data.push_back(CArray<double,1>(index.numElements())); 226 227 CArray<double,1>& data_tmp = list_data.back(); 228 for (int n = 0; n < data_tmp.numElements(); n++) data_tmp(n) = data(index(n)); 229 230 list_msg.back() << getId() << data_tmp; 231 event.push(rank, grid->nbSenders[rank], list_msg.back()); 232 } 233 client->sendEvent(event); 234 } 235 CTimer::get("XIOS Send Data").suspend(); 236 } 237 */ 183 // void CField::sendUpdateData(const CArray<double,1>& data, CContextClient* client) 184 // { 185 // CTimer::get("XIOS Send Data").resume(); 186 187 // CEventClient event(getType(), EVENT_ID_UPDATE_DATA); 188 189 // map<int, CArray<int,1> >::iterator it; 190 // list<CMessage> list_msg; 191 // list<CArray<double,1> > list_data; 192 193 // if (!grid->doGridHaveDataDistributed()) 194 // { 195 // if (client->isServerLeader()) 196 // { 197 // for (it = grid->storeIndex_toSrv.begin(); it != grid->storeIndex_toSrv.end(); it++) 198 // { 199 // int rank = it->first; 200 // CArray<int,1>& index = it->second; 201 202 // list_msg.push_back(CMessage()); 203 // list_data.push_back(CArray<double,1>(index.numElements())); 204 205 // CArray<double,1>& data_tmp = list_data.back(); 206 // for (int n = 0; n < data_tmp.numElements(); n++) data_tmp(n) = data(index(n)); 207 208 // list_msg.back() << getId() << data_tmp; 209 // event.push(rank, 1, list_msg.back()); 210 // } 211 // client->sendEvent(event); 212 // } 213 // else client->sendEvent(event); 214 // } 215 // else 216 // { 217 // for (it = grid->storeIndex_toSrv.begin(); it != grid->storeIndex_toSrv.end(); it++) 218 // { 219 // int rank = it->first; 220 // CArray<int,1>& index = it->second; 221 222 // list_msg.push_back(CMessage()); 223 // list_data.push_back(CArray<double,1>(index.numElements())); 224 225 // CArray<double,1>& data_tmp = list_data.back(); 226 // for (int n = 0; n < data_tmp.numElements(); n++) data_tmp(n) = data(index(n)); 227 228 // list_msg.back() << getId() << data_tmp; 229 // event.push(rank, grid->nbSenders[rank], list_msg.back()); 230 // } 231 // client->sendEvent(event); 232 // } 233 // CTimer::get("XIOS Send Data").suspend(); 234 // } 235 238 236 void CField::recvUpdateData(CEventServer& event) 239 237 { … … 263 261 { 264 262 sizeData += it->second.numElements(); 263 data_srv.insert(std::make_pair(it->first, CArray<double,1>(it->second.numElements()))); 265 264 } 266 265 … … 271 270 272 271 CArray<double,1> recv_data_tmp(recvDataSrv.numElements()); 273 sizeData = 0;272 // sizeData = 0; 274 273 const CDate& currDate = context->getCalendar()->getCurrentDate(); 275 const CDate opeDate 274 const CDate opeDate = last_operation_srv +freq_op + freq_operation_srv - freq_op; 276 275 277 276 if (opeDate <= currDate) … … 279 278 for (map<int, CArray<size_t, 1> >::iterator it = grid->outIndexFromClient.begin(); it != grid->outIndexFromClient.end(); ++it) 280 279 { 281 CArray<double,1> tmp; 280 CArray<double,1> tmp; 281 CArray<size_t,1>& indexTmp = it->second; 282 282 *(rankBuffers[it->first]) >> tmp; 283 recv_data_tmp(Range(sizeData,sizeData+it->second.numElements()-1)) = tmp; 284 sizeData += it->second.numElements(); 283 for (int idx = 0; idx < indexTmp.numElements(); ++idx) 284 { 285 recv_data_tmp(indexTmp(idx)) = tmp(idx); 286 } 287 // recv_data_tmp(Range(sizeData,sizeData+it->second.numElements()-1)) = tmp; 288 // sizeData += it->second.numElements(); 285 289 } 286 290 } … … 525 529 else 526 530 msg << int(-1); 527 event.push(it->first, grid->nbSenders[ it->first], msg);531 event.push(it->first, grid->nbSenders[0][it->first], msg); 528 532 } 529 533 client->sendEvent(event); … … 924 928 } 925 929 926 std:: map<int, StdSize> CField::getGridDataBufferSize()930 std::vector<std::map<int, StdSize> > CField::getGridDataBufferSize() 927 931 { 928 932 return grid->getDataBufferSize(getId()); … … 1424 1428 void CField::outputField(CArray<double,1>& fieldOut) 1425 1429 { 1426 // map<int, CArray<double,1> >::iterator it; 1427 1430 map<int, CArray<double,1> >::iterator it; 1431 1432 fieldOut = recvDataSrv; 1433 1428 1434 // for (it = data_srv.begin(); it != data_srv.end(); it++) 1429 1435 // { 1430 1436 // grid->outputField(it->first, it->second, fieldOut.dataFirst()); 1431 1437 // } 1432 grid->outputField(recvDataSrv, fieldOut);1438 // grid->outputField(recvDataSrv, fieldOut); 1433 1439 } 1434 1440 -
XIOS/dev/dev_olga/src/node/field.hpp
r1054 r1099 96 96 97 97 std::map<int, StdSize> getGridAttributesBufferSize(); 98 std:: map<int, StdSize> getGridDataBufferSize();98 std::vector<std::map<int, StdSize> > getGridDataBufferSize(); // Grid data buffer size for each connection of contextclient 99 99 100 100 public: … … 146 146 static bool dispatchEvent(CEventServer& event); 147 147 void sendUpdateData(const CArray<double,1>& data); 148 //void sendUpdateData(const CArray<double,1>& data, CContextClient* client);148 void sendUpdateData(const CArray<double,1>& data, CContextClient* client); 149 149 static void recvUpdateData(CEventServer& event); 150 150 void recvUpdateData(std::map<int,CBufferIn*>& rankBuffers); -
XIOS/dev/dev_olga/src/node/grid.cpp
r1077 r1099 96 96 * 97 97 * \return A map associating the server rank with its minimum buffer size. 98 * TODO: Refactor code 98 99 */ 99 100 std::map<int, StdSize> CGrid::getAttributesBufferSize() … … 102 103 103 104 // The grid indexes require a similar size as the actual data 104 std::map<int, StdSize> dataSizes = getDataBufferSize(); 105 std::map<int, StdSize>::iterator it, itE = dataSizes.end(); 106 for (it = dataSizes.begin(); it != itE; ++it) 105 std::vector<std::map<int, StdSize> > dataSizes = getDataBufferSize(); 106 for (size_t i = 0; i < dataSizes.size(); ++i) 107 107 { 108 it->second += 2 * sizeof(bool); 109 if (it->second > attributesSizes[it->first]) 110 attributesSizes[it->first] = it->second; 108 std::map<int, StdSize>::iterator it, itE = dataSizes[i].end(); 109 for (it = dataSizes[i].begin(); it != itE; ++it) 110 { 111 it->second += 2 * sizeof(bool); 112 if (it->second > attributesSizes[it->first]) 113 attributesSizes[it->first] = it->second; 114 } 111 115 } 112 116 117 std::map<int, StdSize>::iterator it, itE; 113 118 // Account for the axis attributes 114 119 std::vector<CAxis*> axisList = getAxis(); 115 120 for (size_t i = 0; i < axisList.size(); ++i) 116 121 { 117 std::map<int, StdSize> axisAttBuffSize = axisList[i]->getAttributesBufferSize(); 122 std::map<int, StdSize> axisAttBuffSize = axisList[i]->getAttributesBufferSize(); 118 123 for (it = axisAttBuffSize.begin(), itE = axisAttBuffSize.end(); it != itE; ++it) 119 124 { … … 144 149 * \return A map associating the server rank with its minimum buffer size. 145 150 */ 146 std::map<int, StdSize> CGrid::getDataBufferSize(const std::string& id /*= ""*/) 147 { 148 std::map<int, StdSize> dataSizes; 151 std::vector<std::map<int, StdSize> > CGrid::getDataBufferSize(const std::string& id /*= ""*/) 152 { 149 153 // The record index is sometimes sent along with the data but we always 150 154 // include it in the size calculation for the sake of simplicity 151 155 const size_t extraSize = CEventClient::headerSize + (id.empty() ? getId() : id).size() + 2 * sizeof(size_t); 152 153 std::map<int, size_t>::const_iterator itEnd = connectedDataSize_.end(); 154 for (size_t k = 0; k < connectedServerRank_.size(); ++k) 156 CContext* context = CContext::getCurrent(); 157 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 158 std::vector<std::map<int, StdSize> > dataSizes(nbSrvPools); 159 for (int p = 0; p < nbSrvPools; ++p) 155 160 { 156 int rank = connectedServerRank_[k]; 157 std::map<int, size_t>::const_iterator it = connectedDataSize_.find(rank); 158 size_t count = (it != itEnd) ? it->second : 0; 159 160 dataSizes.insert(std::make_pair(rank, extraSize + CArray<double,1>::size(count))); 161 std::map<int, size_t>::const_iterator itEnd = connectedDataSize_[p].end(); 162 for (size_t k = 0; k < connectedServerRank_[p].size(); ++k) // TODO: Should change connectedServerRank_[0] to something more general 163 { 164 int rank = connectedServerRank_[p][k]; 165 std::map<int, size_t>::const_iterator it = connectedDataSize_[0].find(rank); 166 size_t count = (it != itEnd) ? it->second : 0; 167 168 dataSizes[p].insert(std::make_pair(rank, extraSize + CArray<double,1>::size(count))); 169 } 161 170 } 162 171 … … 276 285 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 277 286 nbSrvPools = 1; 278 for (int i = 0; i < nbSrvPools; ++i)287 for (int p = 0; p < nbSrvPools; ++p) 279 288 { 280 289 if (isScalarGrid()) … … 304 313 if (this->isChecked) return; 305 314 this->checkAttributesAfterTransformation(); 306 this->checkMask(); 315 316 // TODO: Transfer grid attributes 317 if (!context->hasClient && context->hasServer) this->createMask(); 307 318 this->computeIndex(); 308 319 … … 506 517 // It works only for the same number of procs on secondary pools 507 518 int nbSrvPools = 1; 508 509 for (int i = 0; i < nbSrvPools; ++i) 519 for (int p = 0; p < nbSrvPools; ++p) 510 520 { 511 CContextClient* client = (context->hasServer) ? (context->hasClient ? context->clientPrimServer[ i] : context->client) : context->client;521 CContextClient* client = (context->hasServer) ? (context->hasClient ? context->clientPrimServer[p] : context->client) : context->client; 512 522 // CContextClient* client = (context->hasServer) ? context->clientPrimServer[i] : context->client; 513 523 // CContextServer* server = (context->hasServer) ? context->server : 0 ; … … 517 527 // First of all, compute distribution on client side 518 528 if (0 != serverDistribution_) 529 { 519 530 clientDistribution_ = new CDistributionClient(rank, this, serverDistribution_->getGlobalLocalIndex()); 531 storeIndex_client.resize(serverDistribution_->getGridSize()); 532 int nbStoreIndex = storeIndex_client.numElements(); 533 for (int idx = 0; idx < nbStoreIndex; ++idx) storeIndex_client(idx) = idx; 534 } 520 535 else 536 { 521 537 clientDistribution_ = new CDistributionClient(rank, this); 522 523 // Get local data index on client 524 storeIndex_client.resize(clientDistribution_->getLocalDataIndexOnClient().size()); 525 int nbStoreIndex = storeIndex_client.numElements(); 526 for (int idx = 0; idx < nbStoreIndex; ++idx) storeIndex_client(idx) = (clientDistribution_->getLocalDataIndexOnClient())[idx]; 538 // Get local data index on client 539 storeIndex_client.resize(clientDistribution_->getLocalDataIndexOnClient().size()); 540 int nbStoreIndex = storeIndex_client.numElements(); 541 for (int idx = 0; idx < nbStoreIndex; ++idx) storeIndex_client(idx) = (clientDistribution_->getLocalDataIndexOnClient())[idx]; 542 } 543 527 544 if (0 == serverDistribution_) 528 545 isDataDistributed_= clientDistribution_->isDataDistributed(); … … 535 552 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 536 553 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; 537 // int nbSrvPools = 1; 538 for (int i = 0; i < nbSrvPools; ++i) 554 connectedServerRank_.resize(nbSrvPools); 555 connectedDataSize_.resize(nbSrvPools); 556 nbSenders.resize(nbSrvPools); 557 558 for (int p = 0; p < nbSrvPools; ++p) 539 559 { 540 CContextClient* client = (context->hasServer) ? context->clientPrimServer[ i] : context->client;541 542 connectedServerRank_ .clear();560 CContextClient* client = (context->hasServer) ? context->clientPrimServer[p] : context->client; 561 562 connectedServerRank_[p].clear(); 543 563 544 564 if (!doGridHaveDataDistributed()) … … 550 570 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 551 571 { 552 connectedServerRank_ .push_back(*itRank);553 connectedDataSize_[ *itRank] = ssize;572 connectedServerRank_[p].push_back(*itRank); 573 connectedDataSize_[p][*itRank] = ssize; 554 574 } 555 575 } … … 583 603 if (iteGlobalLocalIndexMap != itGlobalLocalIndexMap) 584 604 { 585 if (connectedDataSize_ .end() == connectedDataSize_.find(serverRank))586 connectedDataSize_[ serverRank] = 1;605 if (connectedDataSize_[p].end() == connectedDataSize_[p].find(serverRank)) 606 connectedDataSize_[p][serverRank] = 1; 587 607 else 588 ++connectedDataSize_[ serverRank];608 ++connectedDataSize_[p][serverRank]; 589 609 } 590 610 } … … 592 612 593 613 for (itGlobalMap = itbGlobalMap; itGlobalMap != iteGlobalMap; ++itGlobalMap) { 594 connectedServerRank_ .push_back(itGlobalMap->first);614 connectedServerRank_[p].push_back(itGlobalMap->first); 595 615 } 596 616 597 nbSenders = clientServerMap_->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_);617 nbSenders[p] = clientServerMap_->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_[p]); 598 618 } 599 619 } … … 626 646 // isDataDistributed_= clientDistribution_->isDataDistributed(); 627 647 648 628 649 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 629 650 … … 633 654 computeConnectedClients(); 634 655 } 635 636 656 637 657 // connectedServerRank_.clear(); … … 708 728 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; 709 729 nbSrvPools = 1; 710 for (int i = 0; i < nbSrvPools; ++i)730 for (int p = 0; p < nbSrvPools; ++p) 711 731 { 712 CContextClient* client = context->hasServer ? context->clientPrimServer[ i] : context->client;732 CContextClient* client = context->hasServer ? context->clientPrimServer[p] : context->client; 713 733 int serverSize = client->serverSize; 714 734 std::vector<CDomain*> domList = getDomains(); … … 1139 1159 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1140 1160 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; 1141 for (int i = 0; i < nbSrvPools; ++i) 1142 { 1143 CContextClient* client = context->hasServer ? context->clientPrimServer[i] : context->client; 1161 connectedServerRank_.resize(nbSrvPools); 1162 connectedDataSize_.resize(nbSrvPools); 1163 nbSenders.resize(nbSrvPools); 1164 1165 for (int p = 0; p < nbSrvPools; ++p) 1166 { 1167 CContextClient* client = context->hasServer ? context->clientPrimServer[p] : context->client; 1144 1168 1145 1169 storeIndex_client.resize(1); 1146 1170 storeIndex_client(0) = 0; 1147 1171 1148 connectedServerRank_ .clear();1172 connectedServerRank_[p].clear(); 1149 1173 1150 1174 if (0 == client->clientRank) … … 1152 1176 for (int rank = 0; rank < client->serverSize; ++rank) 1153 1177 { 1154 connectedServerRank_ .push_back(rank);1155 connectedDataSize_[ rank] = 1;1156 nbSenders[ rank] = 1;1178 connectedServerRank_[p].push_back(rank); 1179 connectedDataSize_[p][rank] = 1; 1180 nbSenders[p][rank] = 1; 1157 1181 } 1158 1182 } … … 1200 1224 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1201 1225 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 0) : 1; 1202 for (int i = 0; i < nbSrvPools; ++i)1203 { 1204 CContextClient* client = context->hasServer ? context->clientPrimServer[ i] : context->client;1226 for (int p = 0; p < nbSrvPools; ++p) 1227 { 1228 CContextClient* client = context->hasServer ? context->clientPrimServer[p] : context->client; 1205 1229 1206 1230 CEventClient event(getType(), EVENT_ID_INDEX); … … 1257 1281 { 1258 1282 CContext* context = CContext::getCurrent(); 1259 // CContextClient* client = context->client;1260 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1;1261 1283 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; 1262 1284 for (int p = 0; p < nbSrvPools; ++p) … … 1368 1390 } 1369 1391 1370 for (int ns = 0; ns < connectedServerRank_ .size(); ++ns)1392 for (int ns = 0; ns < connectedServerRank_[p].size(); ++ns) 1371 1393 { 1372 rank = connectedServerRank_[ ns];1394 rank = connectedServerRank_[p][ns]; 1373 1395 int nb = 0; 1374 1396 if (globalIndexTmp.end() != globalIndexTmp.find(rank)) … … 1391 1413 listMsg.back() << getId() << isDataDistributed_ << isCompressible_ << listOutIndex.back(); 1392 1414 1393 event.push(rank, nbSenders[ rank], listMsg.back());1415 event.push(rank, nbSenders[p][rank], listMsg.back()); 1394 1416 } 1395 1417 … … 1419 1441 { 1420 1442 CContext* context = CContext::getCurrent(); 1443 1421 1444 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; 1422 1445 nbSrvPools = 1; 1446 // connectedServerRank_.resize(nbSrvPools); 1447 // nbSenders.resize(nbSrvPools); 1423 1448 for (int p = 0; p < nbSrvPools; ++p) 1424 1449 { … … 1428 1453 // CContextClient* client = (context->hasServer) ? context->client : context->clientPrimServer[p]; 1429 1454 numberWrittenIndexes_ = totalNumberWrittenIndexes_ = offsetWrittenIndexes_ = 0; 1430 connectedServerRank_= ranks;1455 //connectedServerRank_[p] = ranks; 1431 1456 1432 1457 for (int n = 0; n < ranks.size(); n++) … … 1465 1490 if (2 == axis_domain_order(i)) //domain 1466 1491 { 1467 // nZoomBegin[indexMap[i]] = domainList[domainId]->zoom_ibegin_srv;1468 // nZoomSize[indexMap[i]] = domainList[domainId]->zoom_ni_srv;1469 //nZoomBeginGlobal[indexMap[i]] = domainList[domainId]->global_zoom_ibegin;1492 nZoomBegin[indexMap[i]] = domainList[domainId]->zoom_ibegin; 1493 nZoomSize[indexMap[i]] = domainList[domainId]->zoom_ni; 1494 nZoomBeginGlobal[indexMap[i]] = domainList[domainId]->global_zoom_ibegin; 1470 1495 // zoomIndex.push_back(domainList[domainId]->zoom_i_index); 1471 //nGlob[indexMap[i]] = domainList[domainId]->ni_glo;1472 1473 // nZoomBegin[indexMap[i] + 1] = domainList[domainId]->zoom_jbegin_srv;1474 // nZoomSize[indexMap[i] + 1] = domainList[domainId]->zoom_nj_srv;1475 //nZoomBeginGlobal[indexMap[i] + 1] = domainList[domainId]->global_zoom_jbegin;1496 nGlob[indexMap[i]] = domainList[domainId]->ni_glo; 1497 1498 nZoomBegin[indexMap[i] + 1] = domainList[domainId]->zoom_jbegin; 1499 nZoomSize[indexMap[i] + 1] = domainList[domainId]->zoom_nj; 1500 nZoomBeginGlobal[indexMap[i] + 1] = domainList[domainId]->global_zoom_jbegin; 1476 1501 // zoomIndex.push_back(domainList[domainId]->zoom_j_index); 1477 //nGlob[indexMap[i] + 1] = domainList[domainId]->nj_glo;1478 1479 int nbZoom = domainList[domainId]->zoom_i_index.numElements();1480 zoomIndex.push_back(CArray<int,1>(nbZoom));1481 CArray<int,1>& zoomDomain = zoomIndex.back();1482 for (int ind = 0; ind < nbZoom; ++ind)1483 {1484 zoomDomain(ind) = domainList[domainId]->zoom_i_index(ind) + domainList[domainId]->zoom_j_index(ind) * domainList[domainId]->ni_glo;1485 }1486 1487 globalSize *= domainList[domainId]->ni_glo * domainList[domainId]->nj_glo;1502 nGlob[indexMap[i] + 1] = domainList[domainId]->nj_glo; 1503 1504 // int nbZoom = domainList[domainId]->zoom_i_index.numElements(); 1505 // zoomIndex.push_back(CArray<int,1>(nbZoom)); 1506 // CArray<int,1>& zoomDomain = zoomIndex.back(); 1507 // for (int ind = 0; ind < nbZoom; ++ind) 1508 // { 1509 // zoomDomain(ind) = domainList[domainId]->zoom_i_index(ind) + domainList[domainId]->zoom_j_index(ind) * domainList[domainId]->ni_glo; 1510 // } 1511 1512 // globalSize *= domainList[domainId]->ni_glo * domainList[domainId]->nj_glo; 1488 1513 ++domainId; 1489 1514 } 1490 1515 else if (1 == axis_domain_order(i)) // axis 1491 1516 { 1492 // nZoomBegin[indexMap[i]] = axisList[axisId]->zoom_begin_srv;1493 // nZoomSize[indexMap[i]] = axisList[axisId]->zoom_size_srv;1494 //nZoomBeginGlobal[indexMap[i]] = axisList[axisId]->global_zoom_begin;1495 zoomIndex.push_back(axisList[axisId]->zoom_index);1496 //nGlob[indexMap[i]] = axisList[axisId]->n_glo;1497 globalSize *= axisList[axisId]->n_glo;1517 nZoomBegin[indexMap[i]] = axisList[axisId]->zoom_begin; 1518 nZoomSize[indexMap[i]] = axisList[axisId]->zoom_n; 1519 nZoomBeginGlobal[indexMap[i]] = axisList[axisId]->global_zoom_begin; 1520 // zoomIndex.push_back(axisList[axisId]->zoom_index); 1521 nGlob[indexMap[i]] = axisList[axisId]->n_glo; 1522 // globalSize *= axisList[axisId]->n_glo; 1498 1523 ++axisId; 1499 1524 } 1500 1525 else // scalar 1501 1526 { 1502 CArray<int,1> zoomScalar(1);1503 zoomScalar(0) = 0;1504 //nZoomBegin[indexMap[i]] = 0;1505 //nZoomSize[indexMap[i]] = 1;1506 //nZoomBeginGlobal[indexMap[i]] = 0;1507 zoomIndex.push_back(zoomScalar);1508 //nGlob[indexMap[i]] = 1;1527 // CArray<int,1> zoomScalar(1); 1528 // zoomScalar(0) = 0; 1529 nZoomBegin[indexMap[i]] = 0; 1530 nZoomSize[indexMap[i]] = 1; 1531 nZoomBeginGlobal[indexMap[i]] = 0; 1532 // zoomIndex.push_back(zoomScalar); 1533 nGlob[indexMap[i]] = 1; 1509 1534 ++scalarId; 1510 1535 } … … 1512 1537 dataSize = 1; 1513 1538 1514 //for (int i = 0; i < nZoomSize.size(); ++i)1515 //dataSize *= nZoomSize[i];1516 //serverDistribution_ = new CDistributionServer(server->intraCommRank, nZoomBegin, nZoomSize,1517 //nZoomBeginGlobal, nGlob);1518 for (int i = 0; i < zoomIndex.size(); ++i)1519 {1520 dataSize *= zoomIndex[i].numElements();1521 }1522 serverDistribution_ = new CDistributionServer(server->intraCommRank, zoomIndex, nGlobElement);1539 for (int i = 0; i < nZoomSize.size(); ++i) 1540 dataSize *= nZoomSize[i]; 1541 serverDistribution_ = new CDistributionServer(server->intraCommRank, nZoomBegin, nZoomSize, 1542 nZoomBeginGlobal, nGlob); 1543 // for (int i = 0; i < zoomIndex.size(); ++i) 1544 // { 1545 // dataSize *= zoomIndex[i].numElements(); 1546 // } 1547 // serverDistribution_ = new CDistributionServer(server->intraCommRank, zoomIndex, nGlobElement); 1523 1548 } 1524 1549 1525 1550 CArray<size_t,1> outIndex; 1526 1551 buffer >> outIndex; 1552 serverDistribution_->computeLocalIndex(outIndex); 1553 1527 1554 if (isDataDistributed_) 1528 serverDistribution_->computeLocalIndex(outIndex);1555 {} 1529 1556 else 1530 1557 { … … 1533 1560 // THE PROBLEM HERE IS THAT DATA CAN BE NONDISTRIBUTED ON CLIENT AND DISTRIBUTED ON SERVER 1534 1561 // BELOW IS THE TEMPORARY FIX only for a single type of element (domain, asix, scalar) 1535 dataSize = serverDistribution_->getGlobalIndexEachDimension()[0].numElements(); 1536 outIndex.resize(dataSize); 1537 outIndex = serverDistribution_->getGlobalIndexEachDimension()[0]; 1562 dataSize = serverDistribution_->getGridSize(); 1563 // dataSize = serverDistribution_->getGlobalIndexEachDimension()[0].numElements(); 1564 // outIndex.resize(dataSize); 1565 // outIndex = serverDistribution_->getGlobalIndexEachDimension()[0]; 1538 1566 1539 1567 } … … 1541 1569 1542 1570 outIndexFromClient.insert(std::make_pair(rank, outIndex)); 1543 connectedDataSize_[rank] = outIndex.numElements();1571 // connectedDataSize_[p][rank] = outIndex.numElements(); 1544 1572 numberWrittenIndexes_ += outIndex.numElements(); 1545 1573 } 1546 1574 1547 int sizeData = 0;1548 for (map<int, CArray<size_t, 1> >::iterator it = outIndexFromClient.begin(); it != outIndexFromClient.end(); ++it)1549 {1550 sizeData += it->second.numElements();1551 }1552 indexFromClients.resize(sizeData);1553 sizeData = 0;1554 for (map<int, CArray<size_t, 1> >::iterator it = outIndexFromClient.begin(); it != outIndexFromClient.end(); ++it)1555 {1556 CArray<size_t, 1>& tmp0 = it->second;1557 CArray<size_t, 1> tmp1 = indexFromClients(Range(sizeData, sizeData + tmp0.numElements() - 1));1558 tmp1 = tmp0;1559 sizeData += tmp0.numElements();1560 }1575 // int sizeData = 0; 1576 // for (map<int, CArray<size_t, 1> >::iterator it = outIndexFromClient.begin(); it != outIndexFromClient.end(); ++it) 1577 // { 1578 // sizeData += it->second.numElements(); 1579 // } 1580 // indexFromClients.resize(sizeData); 1581 // sizeData = 0; 1582 // for (map<int, CArray<size_t, 1> >::iterator it = outIndexFromClient.begin(); it != outIndexFromClient.end(); ++it) 1583 // { 1584 // CArray<size_t, 1>& tmp0 = it->second; 1585 // CArray<size_t, 1> tmp1 = indexFromClients(Range(sizeData, sizeData + tmp0.numElements() - 1)); 1586 // tmp1 = tmp0; 1587 // sizeData += tmp0.numElements(); 1588 // } 1561 1589 1562 1590 // if (isScalarGrid()) return; … … 1571 1599 totalNumberWrittenIndexes_ = numberWrittenIndexes_; 1572 1600 1573 nbSenders= CClientServerMappingDistributed::computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, ranks);1601 // nbSenders[p] = CClientServerMappingDistributed::computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, ranks); 1574 1602 } 1575 1603 } -
XIOS/dev/dev_olga/src/node/grid.hpp
r1025 r1099 174 174 std::map<int, int> getDomConServerSide(); 175 175 std::map<int, StdSize> getAttributesBufferSize(); 176 std:: map<int, StdSize> getDataBufferSize(const std::string& id = "");176 std::vector<std::map<int, StdSize> > getDataBufferSize(const std::string& id = ""); 177 177 std::vector<StdString> getDomainList(); 178 178 std::vector<StdString> getAxisList(); … … 222 222 map<int, CArray<int, 1> > storeIndex_toSrv; 223 223 map<int, CArray<int, 1> > storeIndex_fromSrv; 224 map<int,int> nbSenders;224 std::vector<map<int,int> > nbSenders; 225 225 226 226 map<int, CArray<size_t, 1> > outIndexFromClient, compressedOutIndexFromClient; … … 277 277 size_t writtenDataSize_; 278 278 int numberWrittenIndexes_, totalNumberWrittenIndexes_, offsetWrittenIndexes_; 279 std:: map<int,size_t> connectedDataSize_;280 std::vector< int> connectedServerRank_;279 std::vector<std::map<int,size_t> > connectedDataSize_; 280 std::vector<std::vector<int> > connectedServerRank_; 281 281 bool isDataDistributed_; 282 282 int positionDimensionDistributed_; -
XIOS/dev/dev_olga/src/test/test_client.f90
r1030 r1099 17 17 CHARACTER(len=15) :: calendar_type 18 18 TYPE(xios_context) :: ctx_hdl 19 INTEGER,PARAMETER :: ni_glo= 420 INTEGER,PARAMETER :: nj_glo= 419 INTEGER,PARAMETER :: ni_glo=100 20 INTEGER,PARAMETER :: nj_glo=100 21 21 INTEGER,PARAMETER :: llm=2 22 22 DOUBLE PRECISION :: lval(llm)=1 … … 50 50 lat_glo(i,j)=1000+(i-1)+(j-1)*ni_glo 51 51 DO l=1,llm 52 field_A_glo(i,j,l)=(i-1)+(j-1)*ni_glo +10000*l52 field_A_glo(i,j,l)=(i-1)+(j-1)*ni_glo*100+10000*l 53 53 ENDDO 54 54 ENDDO … … 66 66 iend=ibegin+ni-1 ; jend=jbegin+nj-1 67 67 68 ALLOCATE(lon(ni,nj),lat(ni,nj),field_A(0:ni+1,-1:nj+2,llm),lonvalue(ni,nj), axisValue(nj ), field_domain(0:ni+1,-1:nj+2))68 ALLOCATE(lon(ni,nj),lat(ni,nj),field_A(0:ni+1,-1:nj+2,llm),lonvalue(ni,nj), axisValue(nj_glo), field_domain(0:ni+1,-1:nj+2)) 69 69 lon(:,:)=lon_glo(ibegin+1:iend+1,jbegin+1:jend+1) 70 70 lat(:,:)=lat_glo(ibegin+1:iend+1,jbegin+1:jend+1) 71 71 field_A(1:ni,1:nj,:)=field_A_glo(ibegin+1:iend+1,jbegin+1:jend+1,:) 72 72 field_domain(1:ni,1:nj) = field_A_glo(ibegin+1:iend+1,jbegin+1:jend+1,1) 73 axisValue(1:nj )=field_A(1,1:nj,1);73 axisValue(1:nj_glo)=field_A_glo(1,1:nj_glo,1); 74 74 75 75 CALL xios_context_initialize("test",comm) … … 131 131 132 132 PRINT*,"field field_A is active ? ",xios_field_is_active("field_A") 133 DO ts=1,4 0133 DO ts=1,4 134 134 CALL xios_update_calendar(ts) 135 135 CALL xios_send_field("field_A",field_A) 136 !CALL xios_send_field("field_Axis",axisValue)136 CALL xios_send_field("field_Axis",axisValue) 137 137 ! CALL xios_send_field("field_Axis",lval) 138 138 CALL xios_send_field("field_Domain",field_domain)
Note: See TracChangeset
for help on using the changeset viewer.