Changeset 569 for XIOS/trunk
- Timestamp:
- 03/10/15 10:49:13 (9 years ago)
- Location:
- XIOS/trunk
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/inputs/Version2/iodef.xml
r567 r569 7 7 8 8 <field_definition level="1" enabled=".TRUE."> 9 <!-- <field id="field_A" operation="average" freq_op="3600s" domain_ref="domain_A" axis_ref="axis_A" />-->9 <field id="field_AA" operation="average" freq_op="3600s" domain_ref="domain_A" axis_ref="axis_A" /> 10 10 <field id="field_A" operation="average" freq_op="3600s" grid_ref="grid_A" /> 11 11 <field id="field_Axis" operation="average" freq_op="3600s" grid_ref="grid_Axis" /> … … 19 19 </file> 20 20 <file id="output_Axis" name="output_Axis"> 21 <field field_ref="field_Axis" />21 <!-- <field field_ref="field_Axis" />--> 22 22 <field field_ref="field_All_Axis" /> 23 23 </file> … … 28 28 <axis id="axis_A" /> 29 29 <axis id="axis_B" /> 30 <axis id="axis_C" zoom_size="2" zoom_end="2" 30 <axis id="axis_C" zoom_size="2" zoom_end="2"/> 31 31 <axis id="axis_D" zoom_size="2" zoom_end="3"/> 32 32 </axis_definition> -
XIOS/trunk/src/client_server_mapping.cpp
r568 r569 3 3 \author Ha NGUYEN 4 4 \since 04 Feb 2015 5 \date 0 6 Feb20155 \date 09 Mars 2015 6 6 7 7 \brief Mapping between index client and server. … … 20 20 } 21 21 22 void CClientServerMapping::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 23 const CArray<int,1>& localIndexOnClient) 24 { 25 // defaultComputeServerIndexMapping(globalIndexOnClient, globalIndexServer); 26 } 27 28 22 /*! 23 Compute mapping global index of server which client sends to. 24 \param [in] globalIndexOnClient global index on client 25 \param [in] globalIndexServer global index of servers 26 */ 29 27 void CClientServerMapping::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 30 28 const std::vector<CArray<size_t,1>* >& globalIndexServer) … … 33 31 } 34 32 35 //void CClientServerMapping::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient,36 // const CArray<int,1>& localIndexOnClient,37 // const std::vector<CArray<size_t,1>* >& globalIndexOfServer)38 //{39 // defaultComputeServerIndexMapping(globalIndexOnClient, globalIndexOfServer, &localIndexOnClient);40 //}41 42 33 /*! 43 34 Compute index of data which are sent to server and index global on server side 44 35 \param [in] globalIndexOnClient global index of data on client 45 36 \param [in] globalIndexServer global index of server(s) 37 \param [in] localIndexOnClient local index of data on client which are sent to server 46 38 */ 47 39 void CClientServerMapping::defaultComputeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, … … 129 121 for(int n=0;n<recvSize;n++) clientRes[recvBuff[n]]++ ; 130 122 131 // std::map<int,int> nbSenders;132 123 for(int n=0;n<nbConnectedServer;n++) 133 124 { -
XIOS/trunk/src/client_server_mapping.hpp
r568 r569 3 3 \author Ha NGUYEN 4 4 \since 04 Feb 2015 5 \date 0 6 Feb20155 \date 09 Mars 2015 6 6 7 7 \brief Mapping between index client and server. … … 13 13 #include "array_new.hpp" 14 14 #include "mpi.hpp" 15 #include <boost/unordered_map.hpp>16 15 17 16 namespace xios { … … 31 30 virtual ~CClientServerMapping(); 32 31 32 // Only need global index on client to calculate mapping (supposed client has info of distribution) 33 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient) = 0; 34 35 // In case of computing local index on client sent to server 36 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 37 const CArray<int,1>& localIndexOnClient) = 0; 38 39 // Simple case, global index on client and index on servers 33 40 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 34 41 const std::vector<CArray<size_t,1>* >& globalIndexOnServer); 35 36 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient,37 const CArray<int,1>& localIndexOnClient);38 39 // virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient,40 // const CArray<int,1>& localIndexOnClient,41 // const std::vector<CArray<size_t,1>* >& globalIndexOnServer);42 42 43 43 std::map<int,int> computeConnectedClients(int nbServer, int nbClient, -
XIOS/trunk/src/client_server_mapping_distributed.cpp
r568 r569 1 /*! 2 \file client_server_mapping.hpp 3 \author Ha NGUYEN 4 \since 27 Feb 2015 5 \date 09 Mars 2015 6 7 \brief Mapping between index client and server. 8 Clients pre-calculate all information of server distribution. 9 */ 1 10 #include "client_server_mapping_distributed.hpp" 2 11 #include <limits> … … 7 16 8 17 CClientServerMappingDistributed::CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 9 const MPI_Comm& clientIntraComm) : CClientServerMapping(), indexClientHash_() 18 const MPI_Comm& clientIntraComm) 19 : CClientServerMapping(), indexClientHash_(), countIndexGlobal_(0), countIndexServer_(0), 20 indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_() 10 21 { 11 22 clientIntraComm_ = clientIntraComm; 12 23 MPI_Comm_size(clientIntraComm,&(nbClient_)); 13 MPI_Comm_rank(clientIntraComm,&clientRank_) ; 24 MPI_Comm_rank(clientIntraComm,&clientRank_); 25 computeHashIndex(); 14 26 computeDistributedServerIndex(globalIndexOfServer, clientIntraComm); 15 27 } … … 17 29 CClientServerMappingDistributed::~CClientServerMappingDistributed() 18 30 { 19 20 } 21 31 } 32 33 /*! 34 Compute mapping global index of server which client sends to. 35 \param [in] globalIndexOnClient global index client has 36 */ 37 void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient) 38 { 39 int ssize = globalIndexOnClient.numElements(); 40 CArray<int,1>* localIndexOnClient = new CArray<int,1>(ssize); 41 for (int i = 0; i < ssize; ++i) (*localIndexOnClient)(i) = i; 42 43 this->computeServerIndexMapping(globalIndexOnClient, *localIndexOnClient); 44 delete localIndexOnClient; 45 } 46 47 /*! 48 Compute mapping global index of server which client sends to. 49 \param [in] globalIndexOnClient global index client has 50 \param [in] localIndexOnClient local index on client 51 */ 22 52 void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 23 53 const CArray<int,1>& localIndexOnClient) … … 89 119 recvBuffIndexServer = new int[nbIndexReceivedFromOthers]; 90 120 91 resetRe questAndCount();121 resetReceivingRequestAndCount(); 92 122 std::map<int, MPI_Request>::iterator itRequest; 93 123 std::vector<int> demandAlreadyReceived, repondAlreadyReceived; … … 168 198 } 169 199 200 /*! 201 Compute the hash index distribution of whole size_t space then each client will have a range of this distribution 202 */ 170 203 void CClientServerMappingDistributed::computeHashIndex() 171 204 { … … 184 217 } 185 218 219 /*! 220 Compute distribution of global index for servers 221 Each client already holds a piece of information about global index and the corresponding server. 222 This information is redistributed into size_t sipace in which each client possesses a specific range of index. 223 Afterh the redistribution, each client as long as its range of index contains all necessary information about server. 224 \param [in] globalIndexOfServer global index and the corresponding server 225 \param [in] clientIntraComm client joining distribution process. 226 */ 186 227 void CClientServerMappingDistributed::computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer, 187 228 const MPI_Comm& clientIntraComm) 188 229 { 189 computeHashIndex();190 int clientRank;191 MPI_Comm_rank(clientIntraComm,&clientRank);192 193 230 int* sendBuff = new int[nbClient_]; 194 231 int* sendNbIndexBuff = new int[nbClient_]; … … 214 251 { 215 252 int indexClient = std::distance(itbClientHash, itClientHash)-1; 216 if (clientRank == indexClient)253 if (clientRank_ == indexClient) 217 254 { 218 255 globalIndexToServerMapping_.insert(std::make_pair<size_t,int>(it->first, it->second)); … … 228 265 } 229 266 230 231 // for (boost::unordered_map<size_t,int>::const_iterator it = globalIndexToServerMapping_.begin(); 232 // it != globalIndexToServerMapping_.end(); ++it) 233 // std::cout << " " << it->first << ":" << it->second; 234 // std::cout << "First Number = " << globalIndexToServerMapping_.size() << std::endl; 235 236 267 // Calculate from how many clients each client receive message. 237 268 int* recvBuff = new int[nbClient_]; 238 269 MPI_Allreduce(sendBuff, recvBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm); 239 270 int recvNbClient = recvBuff[clientRank_]; 271 272 // Calculate size of buffer for receiving message 240 273 int* recvNbIndexBuff = new int[nbClient_]; 241 274 MPI_Allreduce(sendNbIndexBuff, recvNbIndexBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm); 242 243 MPI_Status statusIndexGlobal, statusIndexServer; 244 int flag, countIndexGlobal_ = 0, countIndexServer_ = 0; 245 246 247 std::map<int, MPI_Request>::iterator itRequestIndexGlobal, itRequestIndexServer; 248 std::map<int, int> countBuffIndexServer, countBuffIndexGlobal; 249 std::vector<int> processedList; 250 251 252 bool isFinished=false; 253 int recvNbIndexCount = recvNbIndexBuff[clientRank]; 254 int recvNbClient = recvBuff[clientRank]; 275 int recvNbIndexCount = recvNbIndexBuff[clientRank_]; 255 276 unsigned long* recvIndexGlobalBuff = new unsigned long[recvNbIndexCount]; 256 277 int* recvIndexServerBuff = new int[recvNbIndexCount]; 257 278 279 // If a client holds information about global index and servers which don't belong to it, 280 // it will send a message to the correct clients. 281 // Contents of the message are global index and its corresponding server index 258 282 std::list<MPI_Request> sendRequest; 259 283 std::map<int, std::vector<size_t> >::iterator itGlobal = client2ClientIndexGlobal.begin(), 260 284 iteGlobal = client2ClientIndexGlobal.end(); 261 for ( ; 285 for ( ;itGlobal != iteGlobal; ++itGlobal) 262 286 sendIndexGlobalToClients(itGlobal->first, itGlobal->second, clientIntraComm, sendRequest); 263 287 std::map<int, std::vector<int> >::iterator itServer = client2ClientIndexServer.begin(), … … 266 290 sendIndexServerToClients(itServer->first, itServer->second, clientIntraComm, sendRequest); 267 291 268 resetRequestAndCount(); 292 std::map<int, MPI_Request>::iterator itRequestIndexGlobal, itRequestIndexServer; 293 std::map<int, int> countBuffIndexServer, countBuffIndexGlobal; 294 std::vector<int> processedList; 295 296 bool isFinished = (0 == recvNbClient) ? true : false; 297 298 // Just to make sure before listening message, all counting index and receiving request have already beeen reset 299 resetReceivingRequestAndCount(); 300 301 // Now each client trys to listen to demand from others. 302 // If they have message, it processes: pushing global index and corresponding server to its map 269 303 while (!isFinished || (!sendRequest.empty())) 270 304 { … … 304 338 --recvNbClient; 305 339 } 306 307 340 } 308 341 … … 324 357 delete [] recvIndexGlobalBuff; 325 358 delete [] recvIndexServerBuff; 326 327 // for (boost::unordered_map<size_t,int>::const_iterator it = globalIndexToServerMapping_.begin(); 328 // it != globalIndexToServerMapping_.end(); ++it) 329 // std::cout << " " << it->first << ":" << it->second; 330 // std::cout << "Number = " << globalIndexToServerMapping_.size() << std::endl; 331 332 } 333 359 } 360 361 /*! 362 Probe and receive message containg global index from other clients. 363 Each client can send a message of global index to other clients to fulfill their maps. 364 Each client probes message from its queue then if the message is ready, it will be put into the receiving buffer 365 \param [in] recvIndexGlobalBuff buffer dedicated for receiving global index 366 \param [in] recvNbIndexCount size of the buffer 367 */ 334 368 void CClientServerMappingDistributed::probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount) 335 369 { … … 350 384 } 351 385 386 /*! 387 Probe and receive message containg server index from other clients. 388 Each client can send a message of server index to other clients to fulfill their maps. 389 Each client probes message from its queue then if the message is ready, it will be put into the receiving buffer 390 \param [in] recvIndexServerBuff buffer dedicated for receiving server index 391 \param [in] recvNbIndexCount size of the buffer 392 */ 352 393 void CClientServerMappingDistributed::probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount) 353 394 { … … 369 410 } 370 411 371 412 /*! 413 Send message containing global index to clients 414 \param [in] clientDestRank rank of destination client 415 \param [in] indexGlobal global index to send 416 \param [in] clientIntraComm communication group of client 417 \param [in] requestSendIndexGlobal list of sending request 418 */ 372 419 void CClientServerMappingDistributed::sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal, 373 420 const MPI_Comm& clientIntraComm, … … 378 425 MPI_Isend(&(indexGlobal)[0], (indexGlobal).size(), MPI_UNSIGNED_LONG, 379 426 clientDestRank, 15, clientIntraComm, &(requestSendIndexGlobal.back())); 380 381 // int nbSendClient = indexGlobal.size(); 382 // std::map<int, std::vector<size_t> >::iterator 383 // itClient2ClientIndexGlobal = indexGlobal.begin(), 384 // iteClient2ClientIndexGlobal = indexGlobal.end(); 385 // 386 // for (; itClient2ClientIndexGlobal != iteClient2ClientIndexGlobal; 387 // ++itClient2ClientIndexGlobal) 388 // { 389 // MPI_Request request; 390 // requestSendIndexGlobal.push_back(request); 391 // MPI_Isend(&(itClient2ClientIndexGlobal->second)[0], 392 // (itClient2ClientIndexGlobal->second).size(), 393 // MPI_UNSIGNED_LONG, 394 // itClient2ClientIndexGlobal->first, 395 // 15, clientIntraComm, &(requestSendIndexGlobal.back())); 396 // } 397 398 } 399 427 } 428 429 /*! 430 Send message containing server index to clients 431 \param [in] clientDestRank rank of destination client 432 \param [in] indexServer server index to send 433 \param [in] clientIntraComm communication group of client 434 \param [in] requestSendIndexServer list of sending request 435 */ 400 436 void CClientServerMappingDistributed::sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer, 401 437 const MPI_Comm& clientIntraComm, … … 406 442 MPI_Isend(&(indexServer)[0], (indexServer).size(), MPI_INT, 407 443 clientDestRank, 12, clientIntraComm, &(requestSendIndexServer.back())); 408 409 // int nbSendClient = indexServer.size(); 410 // std::map<int, std::vector<int> >::iterator 411 // itClient2ClientIndexServer = indexServer.begin(), 412 // iteClient2ClientIndexServer = indexServer.end(); 413 414 // for (; itClient2ClientIndexServer != iteClient2ClientIndexServer; 415 // ++itClient2ClientIndexServer) 416 // { 417 // MPI_Request request; 418 // requestSendIndexServer.push_back(request); 419 // MPI_Isend(&(itClient2ClientIndexServer->second)[0], 420 // (itClient2ClientIndexServer->second).size(), 421 // MPI_INT, 422 // itClient2ClientIndexServer->first, 423 // 12, clientIntraComm, &(requestSendIndexServer.back())); 424 // 425 // } 426 } 427 444 } 445 446 /*! 447 Verify status of sending request 448 \param [in] sendRequest sending request to verify 449 */ 428 450 void CClientServerMappingDistributed::testSendRequest(std::list<MPI_Request>& sendRequest) 429 451 { … … 441 463 if (true == flag) 442 464 { 443 --sizeListRequest;444 465 isErased = true; 445 466 break; … … 451 472 } 452 473 474 /*! 475 Process the received request. Pushing global index and server index into map 476 \param[in] buffIndexGlobal pointer to the begining of buffer containing global index 477 \param[in] buffIndexServer pointer to the begining of buffer containing server index 478 \param[in] count size of received message 479 */ 453 480 void CClientServerMappingDistributed::processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count) 454 481 { … … 457 484 } 458 485 486 /*! 487 Compute size of message containing global index 488 \param[in] requestRecv request of message 489 */ 459 490 int CClientServerMappingDistributed::computeBuffCountIndexGlobal(MPI_Request& requestRecv) 460 491 { … … 471 502 } 472 503 504 /*! 505 Compute size of message containing server index 506 \param[in] requestRecv request of message 507 */ 473 508 int CClientServerMappingDistributed::computeBuffCountIndexServer(MPI_Request& requestRecv) 474 509 { … … 485 520 } 486 521 487 void CClientServerMappingDistributed::resetRequestAndCount() 522 /*! 523 Reset all receiving request map and counter 524 */ 525 void CClientServerMappingDistributed::resetReceivingRequestAndCount() 488 526 { 489 527 countIndexGlobal_ = countIndexServer_ = 0; -
XIOS/trunk/src/client_server_mapping_distributed.hpp
r568 r569 1 /*! 2 \file client_server_mapping.hpp 3 \author Ha NGUYEN 4 \since 27 Feb 2015 5 \date 09 Mars 2015 6 7 \brief Mapping between index client and server. 8 Clients pre-calculate all information of server distribution. 9 */ 10 1 11 #ifndef __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__ 2 12 #define __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__ … … 10 20 namespace xios 11 21 { 12 22 /*! 23 \class CClientServerMappingDistributed 24 This class computes index of data which are sent to server as well as index of data 25 on server side with a distributed alogrithm. Each client has a piece of information about the distribution 26 of servers. To find out all these info, first of all, all client join a discovering process in which each client 27 announces the others about the info they have as well as demand others info they are lacked of. After this process, 28 each client has enough info to decide to which client it need to send a demand for corresponding server of a global index. 29 The alogrithm depends on hashed index. 30 */ 13 31 class CClientServerMappingDistributed : public CClientServerMapping 14 32 { … … 17 35 CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 18 36 const MPI_Comm& clientIntraComm); 37 38 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient); 19 39 20 40 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, … … 25 45 26 46 protected: 47 // Redistribute global index and server index among clients 27 48 void computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer, 28 49 const MPI_Comm& clientIntraComm); 29 50 30 void processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count); 31 32 void testSendRequest(std::list<MPI_Request>& sendRequest); 33 34 int computeBuffCount(MPI_Request& requestRecvIndexGlobal, MPI_Request& requestRecvIndexServer); 35 36 void computeHashIndex(); 37 38 // void sendIndexServerToClients(std::map<int, std::vector<int> >& indexServer, 39 // const MPI_Comm& clientIntraComm, 40 // std::list<MPI_Request>& requestSendIndexServer); 41 42 // void sendIndexGlobalToClients(std::map<int, std::vector<size_t> >& indexGlobal, 43 // const MPI_Comm& clientIntraComm, 44 // std::list<MPI_Request>& requestSendIndexGlobal); 45 51 // Send server index to clients 46 52 void sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer, 47 53 const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer); 48 54 55 // Send global index to clients 49 56 void sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal, 50 57 const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal); 51 58 52 void resetRequestAndCount(); 59 // Verify sending request 60 void testSendRequest(std::list<MPI_Request>& sendRequest); 53 61 62 // Process request 63 void processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count); 64 65 // Probe and receive message of global index 54 66 void probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount); 55 67 68 // Probe and receive message of server index 56 69 void probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount); 57 70 71 // Compute range of hashing 72 void computeHashIndex(); 73 74 // Compute size of receiving buffer for global index 58 75 int computeBuffCountIndexGlobal(MPI_Request& requestRecv); 59 76 77 // Compute size of receiving buffer for server index 60 78 int computeBuffCountIndexServer(MPI_Request& requestRecv); 79 80 // Reset request map 81 void resetReceivingRequestAndCount(); 82 61 83 private: 84 //! Mapping of global index to the corresponding server 62 85 boost::unordered_map<size_t,int> globalIndexToServerMapping_; 63 86 87 //! Bounds of hash index 88 std::vector<size_t> indexClientHash_; 89 90 //! Number of client 91 int nbClient_; 92 93 //! Rank of client 94 int clientRank_; 95 96 //! Counting of buffer for receiving global index 97 int countIndexGlobal_; 98 99 //! Counting of buffer for receiving server index 100 int countIndexServer_; 101 102 //! intracommuntion of clients 103 MPI_Comm clientIntraComm_; 104 105 //! Request returned by MPI_IRecv function about global index 64 106 std::map<int, MPI_Request> requestRecvIndexGlobal_; 65 107 108 //! Request returned by MPI_IRecv function about index of server 66 109 std::map<int, MPI_Request> requestRecvIndexServer_; 67 110 68 std::vector<size_t> indexClientHash_; 69 70 int nbClient_; 71 72 int clientRank_; 73 74 int countIndexGlobal_; 75 76 int countIndexServer_; 77 78 MPI_Comm clientIntraComm_; 79 111 //! Mapping client rank and the beginning position of receiving buffer for message of global index from this client 80 112 std::map<int, unsigned long*> indexGlobalBuffBegin_; 81 113 114 //! Mapping client rank and the begining position of receiving buffer for message of server index from this client 82 115 std::map<int, int*> indexServerBuffBegin_; 83 84 116 }; 85 117 -
XIOS/trunk/src/node/axis.cpp
r568 r569 126 126 this->zoom_end.setValue(zoom_end) ; 127 127 this->zoom_size.setValue(zoom_size) ; 128 129 // compute client zoom indices130 // zoom_begin_client = ibegin_client > zoom_begin ? begin_client : zoom_begin ;131 // zoom_end_client = iend_client < zoom_end ? iend_client : zoom_end ;132 // zoom_size_client = zoom_end_client-zoom_begin_client+1 ;133 // if (zoom_ni_client<0) zoom_ni_client=0 ;134 128 } 135 129 -
XIOS/trunk/src/node/context.cpp
r567 r569 330 330 331 331 // At last, we have all info of domain and axis, then send them 332 //sendRefDomainsAxis();332 sendRefDomainsAxis(); 333 333 334 334 // After that, send all grid (if any) … … 799 799 800 800 //! Client side: Send information of reference domain and axis of active fields 801 //void CContext::sendRefDomainsAxis()802 //{803 //std::set<StdString> domainIds;804 //std::set<StdString> axisIds;805 // 806 //// Find all reference domain and axis of all active fields807 //int numEnabledFiles = this->enabledFiles.size();808 //for (int i = 0; i < numEnabledFiles; ++i)809 //{810 //std::vector<CField*> enabledFields = this->enabledFiles[i]->getEnabledFields();811 //int numEnabledFields = enabledFields.size();812 //for (int j = 0; j < numEnabledFields; ++j)813 //{814 // const std::pair<StdString, StdString>& prDomAxisId = enabledFields[j]->getDomainAxisIds();815 //domainIds.insert(prDomAxisId.first);816 //axisIds.insert(prDomAxisId.second);817 //}818 //}819 // 820 //// Create all reference axis on server side821 //std::set<StdString>::iterator itDom, itAxis;822 //std::set<StdString>::const_iterator itE;823 // 824 //StdString axiDefRoot("axis_definition");825 //CAxisGroup* axisPtr = CAxisGroup::get(axiDefRoot);826 //itE = axisIds.end();827 //for (itAxis = axisIds.begin(); itAxis != itE; ++itAxis)828 //{829 //if (!itAxis->empty())830 //{831 //axisPtr->sendCreateChild(*itAxis);832 //CAxis::get(*itAxis)->sendAllAttributesToServer();833 //}834 //}835 // 836 //// Create all reference domains on server side837 //StdString domDefRoot("domain_definition");838 //CDomainGroup* domPtr = CDomainGroup::get(domDefRoot);839 //itE = domainIds.end();840 //for (itDom = domainIds.begin(); itDom != itE; ++itDom)841 //{842 //if (!itDom->empty()) {843 //domPtr->sendCreateChild(*itDom);844 //CDomain::get(*itDom)->sendAllAttributesToServer();845 //}846 //}847 //}801 void CContext::sendRefDomainsAxis() 802 { 803 std::set<StdString> domainIds; 804 std::set<StdString> axisIds; 805 806 // Find all reference domain and axis of all active fields 807 int numEnabledFiles = this->enabledFiles.size(); 808 for (int i = 0; i < numEnabledFiles; ++i) 809 { 810 std::vector<CField*> enabledFields = this->enabledFiles[i]->getEnabledFields(); 811 int numEnabledFields = enabledFields.size(); 812 for (int j = 0; j < numEnabledFields; ++j) 813 { 814 const std::pair<StdString, StdString>& prDomAxisId = enabledFields[j]->getRefDomainAxisIds(); 815 domainIds.insert(prDomAxisId.first); 816 axisIds.insert(prDomAxisId.second); 817 } 818 } 819 820 // Create all reference axis on server side 821 std::set<StdString>::iterator itDom, itAxis; 822 std::set<StdString>::const_iterator itE; 823 824 StdString axiDefRoot("axis_definition"); 825 CAxisGroup* axisPtr = CAxisGroup::get(axiDefRoot); 826 itE = axisIds.end(); 827 for (itAxis = axisIds.begin(); itAxis != itE; ++itAxis) 828 { 829 if (!itAxis->empty()) 830 { 831 axisPtr->sendCreateChild(*itAxis); 832 CAxis::get(*itAxis)->sendAllAttributesToServer(); 833 } 834 } 835 836 // Create all reference domains on server side 837 StdString domDefRoot("domain_definition"); 838 CDomainGroup* domPtr = CDomainGroup::get(domDefRoot); 839 itE = domainIds.end(); 840 for (itDom = domainIds.begin(); itDom != itE; ++itDom) 841 { 842 if (!itDom->empty()) { 843 domPtr->sendCreateChild(*itDom); 844 CDomain::get(*itDom)->sendAllAttributesToServer(); 845 } 846 } 847 } 848 848 849 849 //! Update calendar in each time step -
XIOS/trunk/src/node/distribution_client.cpp
r567 r569 3 3 \author Ha NGUYEN 4 4 \since 13 Jan 2015 5 \date 09 Feb20155 \date 09 Mars 2015 6 6 7 7 \brief Index distribution on client side. … … 56 56 itbDom = itDom = domList.begin(); iteDom = domList.end(); 57 57 itbAxis = itAxis = axisList.begin(); iteAxis = axisList.end(); 58 59 // First of all, every attribute of domain and axis should be checked60 // for (;itDom != iteDom; ++itDom) (*itDom)->checkAttributesOnClient();61 // for (;itAxis != iteAxis; ++itAxis) (*itAxis)->checkAttributesOnClient();62 58 63 59 readDistributionInfo(domList, axisList, axisDomainOrder); … … 574 570 } 575 571 572 /*! 573 Return local data index on client which are sent to servers 574 */ 576 575 const CArray<int,1>& CDistributionClient::getLocalDataIndexSendToServerOnClient() const 577 576 { -
XIOS/trunk/src/node/distribution_client.hpp
r567 r569 3 3 \author Ha NGUYEN 4 4 \since 13 Jan 2015 5 \date 09 Feb20155 \date 09 Mars 2015 6 6 7 7 \brief Index distribution on client side. … … 112 112 }; 113 113 114 /*! 115 A grid can have multiple dimension, so can its mask in the form of multi-dimension array. 116 It's not a good idea to store all multi-dimension arrays corresponding to each mask. 117 One of the ways is to convert this array into 1-dimension one and every process is taken place on it. 118 \param [in] multi-dimension array grid mask 119 */ 114 120 template<int N> 115 121 void CDistributionClient::readGridMaskInfo(const CArray<bool,N>& gridMask) -
XIOS/trunk/src/node/domain.cpp
r553 r569 15 15 #include "array_new.hpp" 16 16 #include "server_distribution_description.hpp" 17 #include "client_server_mapping_distributed.hpp" 17 18 18 19 namespace xios { … … 667 668 int zoom_jend=zoom_jbegin+zoom_nj-1 ; 668 669 669 std::vector<int> nGlobDomain(2);670 nGlobDomain[0] = ni_glo.getValue();671 nGlobDomain[1] = nj_glo.getValue();672 CServerDistributionDescription serverDescription(nGlobDomain);673 serverDescription.computeServerDistribution(nbServer, doComputeGlobalIndexServer);674 670 675 671 // Precompute number of index … … 706 702 } 707 703 708 CClientServerMapping clientServerMap; 709 clientServerMap.computeServerIndexMapping(globalIndexDomain, serverDescription.getGlobalIndex()); 710 const std::map<int, std::vector<size_t> >& globalIndexDomainOnServer = clientServerMap.getGlobalIndexOnServer(); 704 std::vector<int> nGlobDomain(2); 705 nGlobDomain[0] = ni_glo.getValue(); 706 nGlobDomain[1] = nj_glo.getValue(); 707 size_t globalSizeIndex = 1, indexBegin, indexEnd; 708 int range, clientSize = client->clientSize; 709 for (int i = 0; i < nGlobDomain.size(); ++i) globalSizeIndex *= nGlobDomain[i]; 710 indexBegin = 0; 711 for (int i = 0; i < clientSize; ++i) 712 { 713 range = globalSizeIndex / clientSize; 714 if (i < (globalSizeIndex%clientSize)) ++range; 715 if (i == client->clientRank) break; 716 indexBegin += range; 717 } 718 indexEnd = indexBegin + range - 1; 719 720 CServerDistributionDescription serverDescription(nGlobDomain); 721 serverDescription.computeServerGlobalIndexInRange(nbServer, std::make_pair<size_t,size_t>(indexBegin, indexEnd)); 722 CClientServerMapping* clientServerMap = new CClientServerMappingDistributed(serverDescription.getGlobalIndexRange(), 723 client->intraComm); 724 clientServerMap->computeServerIndexMapping(globalIndexDomain); 725 const std::map<int, std::vector<size_t> >& globalIndexDomainOnServer = clientServerMap->getGlobalIndexOnServer(); 711 726 std::vector<int> connectedServerRank; 712 727 for (std::map<int, std::vector<size_t> >::const_iterator it = globalIndexDomainOnServer.begin(); it != globalIndexDomainOnServer.end(); ++it) { 713 728 connectedServerRank.push_back(it->first); 714 729 } 715 nbConnectedClients_ = clientServerMap .computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank);730 nbConnectedClients_ = clientServerMap->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank); 716 731 indSrv_ = globalIndexDomainOnServer; 732 733 delete clientServerMap; 717 734 } 718 735 -
XIOS/trunk/src/node/field.cpp
r567 r569 154 154 if (!grid->doGridHaveDataDistributed()) 155 155 { 156 if (0 == client-> getClientRank())156 if (0 == client->clientRank) 157 157 { 158 158 for(it=grid->storeIndex_toSrv.begin();it!=grid->storeIndex_toSrv.end();it++) … … 799 799 \return pair of Domain and Axis id 800 800 */ 801 // const std::pair<StdString,StdString>& CField::getDomainAxisIds() 802 // { 803 // CGrid* cgPtr = getRelGrid(); 804 // if (NULL != cgPtr) 805 // { 806 // if (NULL != cgPtr->getRelDomain()) domAxisIds_.first = cgPtr->getRelDomain()->getId(); 807 // if (NULL != cgPtr->getRelAxis()) domAxisIds_.second = cgPtr->getRelAxis()->getId(); 808 // } 809 // 810 // return (domAxisIds_); 811 // } 801 const std::pair<StdString,StdString>& CField::getRefDomainAxisIds() 802 { 803 CGrid* cgPtr = getRelGrid(); 804 if (NULL != cgPtr) 805 { 806 std::vector<StdString>::iterator it; 807 if (!domain_ref.isEmpty()) 808 { 809 std::vector<StdString> domainList = cgPtr->getDomainList(); 810 it = std::find(domainList.begin(), domainList.end(), domain_ref.getValue()); 811 if (domainList.end() != it) domAxisIds_.first = *it; 812 } 813 814 if (!axis_ref.isEmpty()) 815 { 816 std::vector<StdString> axisList = cgPtr->getAxisList(); 817 it = std::find(axisList.begin(), axisList.end(), axis_ref.getValue()); 818 if (axisList.end() != it) domAxisIds_.second = *it; 819 } 820 } 821 return (domAxisIds_); 822 } 812 823 813 824 CVariable* CField::addVariable(const string& id) -
XIOS/trunk/src/node/field.hpp
r567 r569 157 157 158 158 159 // const std::pair<StdString, StdString>& getDomainAxisIds();159 const std::pair<StdString, StdString>& getRefDomainAxisIds(); 160 160 public : 161 161 /// Propriétés privées /// -
XIOS/trunk/src/node/grid.cpp
r568 r569 340 340 clientDistribution_->getLocalDataIndexSendToServerOnClient()); 341 341 342 // clientServerMap_->computeServerIndexMapping(clientDistribution_->getGlobalIndex(),343 // clientDistribution_->getLocalDataIndexSendToServerOnClient(),344 // serverDistributionDescription_->getGlobalIndex());345 342 const std::map<int, std::vector<size_t> >& globalIndexOnServer = clientServerMap_->getGlobalIndexOnServer(); 346 343 std::vector<int> connectedServerRank; … … 353 350 storeIndex_client.resize(clientDistribution_->getLocalDataIndexOnClient().numElements()); 354 351 storeIndex_client = (clientDistribution_->getLocalDataIndexOnClient()); 355 356 352 } 357 353 … … 457 453 if (!doGridHaveDataDistributed()) 458 454 { 459 if (0 == client-> getClientRank())455 if (0 == client->clientRank) 460 456 { 461 457 for (int ns = 0; itGlobal != iteMap; ++itGlobal, ++itLocal, ++ns) -
XIOS/trunk/src/server_distribution_description.cpp
r568 r569 3 3 \author Ha NGUYEN 4 4 \since 04 Jan 2015 5 \date 09 Feb20155 \date 09 Mars 2015 6 6 7 7 \brief Description of index distribution on server(s). … … 15 15 : nGlobal_(globalDimensionSize), indexBegin_(), dimensionSizes_(), globalIndex_(), vecGlobalIndex_() 16 16 { 17 18 17 } 19 18 20 19 CServerDistributionDescription::~CServerDistributionDescription() 21 20 { 22 // if (0 != globalIndex_) delete globalIndex_;23 21 if (!vecGlobalIndex_.empty()) 24 22 for (int i = 0; i < vecGlobalIndex_.size(); ++i) delete vecGlobalIndex_[i]; … … 92 90 } 93 91 92 /*! 93 Compute global index assigned to a server with a range.E.g: if a grid has 100 points and 94 there are 2 servers, the first one takes index from 0 to 49, the second has index from 50 to 99 95 \param [in] nServer number of server 96 \param [in] indexBeginEnd begining and ending index of range 97 \param [in] serType type of server distribution. For now, we can distribute server by band or plan 98 */ 94 99 void CServerDistributionDescription::computeServerGlobalIndexInRange(int nServer, 95 100 const std::pair<size_t, size_t>& indexBeginEnd, … … 117 122 size_t ssize = 1, idx = 0; 118 123 for (int j = 0; j < dim; ++j) ssize *= dimensionSizes_[idxServer][j]; 119 vecGlobalIndex_[idxServer] = new CArray<size_t,1>(ssize);120 124 121 125 std::vector<int> idxLoop(dim,0); 122 123 126 int innerLoopSize = dimensionSizes_[idxServer][0]; 124 127 … … 243 246 } 244 247 248 /*! 249 Get global index calculated by computeServerGlobalIndexInRange 250 */ 245 251 const boost::unordered_map<size_t,int>& CServerDistributionDescription::getGlobalIndexRange() const 246 252 { -
XIOS/trunk/src/test/test_new_features.f90
r567 r569 17 17 INTEGER,PARAMETER :: ni_glo=100 18 18 INTEGER,PARAMETER :: nj_glo=100 19 INTEGER,PARAMETER :: llm= 519 INTEGER,PARAMETER :: llm=10 20 20 DOUBLE PRECISION :: lval(llm)=1 21 21 TYPE(xios_field) :: field_hdl … … 135 135 CALL xios_get_domain_attr("domain_A",ni=ni,lonvalue=lonvalue) 136 136 137 print *,"ni",ni138 print *,"lonvalue",lonvalue ;137 ! print *,"ni",ni 138 ! print *,"lonvalue",lonvalue ; 139 139 140 140 CALL xios_is_defined_field_attr("field_A",enabled=ok) … … 146 146 CALL xios_update_calendar(ts) 147 147 CALL xios_send_field("field_A",field_A) 148 CALL xios_send_field("field_Axis",field_Axis)148 ! CALL xios_send_field("field_Axis",field_Axis) 149 149 CALL xios_send_field("field_All_Axis",field_All_Axis) 150 150 CALL wait_us(5000) ;
Note: See TracChangeset
for help on using the changeset viewer.