Changeset 1070 for XIOS


Ignore:
Timestamp:
03/09/17 18:31:07 (7 years ago)
Author:
yushan
Message:

Preperation for merge from trunk

Location:
XIOS/dev/branch_yushan/src
Files:
14 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_yushan/src/buffer_client.cpp

    r1037 r1070  
    8181      traceOn(); 
    8282      if (flag == true) pending = false; 
    83  
    84       //printf("\ncheckbuffer, flag = %d, pending = %d; request = %p, count = %d\n", flag, pending, &request, count); 
    8583    } 
    8684 
  • XIOS/dev/branch_yushan/src/client.cpp

    r1068 r1070  
    5151          int myColor ; 
    5252          int i,c ; 
    53           //MPI_Comm newComm ; 
    5453 
    5554          MPI_Comm_size(CXios::globalComm,&size) ; 
     
    229228 
    230229      MPI_Comm_rank(intraComm,&rank) ; 
    231        
    232       //printf("CClient::finalize called isServer = %d\n", CXios::isServer); 
    233   
     230        
    234231      if (!CXios::isServer) 
    235232      { 
     
    238235        { 
    239236          MPI_Send(&msg,1,MPI_INT,0,0,interComm) ; 
    240           //printf(" CClient : send finalize sign to server 0\n"); 
    241237        } 
    242238      } 
     
    254250      { 
    255251        if (CXios::usingOasis) oasis_finalize(); 
    256         else {MPI_Finalize() ; printf("CClient::finalize called MPI_finalize\n");} 
     252        else  MPI_Finalize();  
    257253      } 
    258254       
  • XIOS/dev/branch_yushan/src/client_client_dht_template_impl.hpp

    r1053 r1070  
    107107                                                                 int level) 
    108108{ 
    109   int clientRank; 
    110   MPI_Comm_rank(commLevel,&clientRank); 
    111109  int groupRankBegin = this->getGroupBegin()[level]; 
    112110  int nbClient = this->getNbInGroup()[level]; 
     
    200198 
    201199  std::vector<ep_lib::MPI_Status> status(request.size()); 
    202  
    203   //printf("1(%d): calling wait all for %lu requests\n", clientRank, request.size()); 
    204  
    205200  MPI_Waitall(request.size(), &request[0], &status[0]); 
    206201 
    207  
    208   //printf("               1(%d): calling wait all for %lu requests OK\n", clientRank, request.size()); 
    209202 
    210203  CArray<size_t,1>* tmpGlobalIndex; 
     
    311304 
    312305  std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 
    313   //printf("2(%d): calling wait all for %lu requests\n", clientRank, requestOnReturn.size()); 
    314306  MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 
    315307 
    316   //printf("            2(%d): calling wait all for %lu requests OK\n", clientRank, requestOnReturn.size()); 
    317308 
    318309  Index2VectorInfoTypeMap indexToInfoMapping; 
     
    383374                                                            int level) 
    384375{ 
    385   //printf("in computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, const MPI_Comm& commLevel, int level)\n"); 
    386376  int clientRank; 
    387377  MPI_Comm_rank(commLevel,&clientRank); 
     
    433423    { 
    434424      client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 
    435   //          ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 
    436425      ProcessDHTElement<InfoType>::packElement(infoTmp[idx], client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 
    437426      ++sendNbIndexBuff[indexClient]; 
    438427    } 
    439428  } 
    440  
    441   //printf("check 4 OK. clientRank = %d\n", clientRank); 
    442429 
    443430  // Calculate from how many clients each client receive message. 
     
    446433  sendRecvRank(level, sendBuff, sendNbIndexBuff, 
    447434               recvRankClient, recvNbIndexClientCount); 
    448   //printf("sendRecvRank OK\n"); 
    449435 
    450436  int recvNbIndexCount = 0; 
     
    459445    recvInfoBuff = new unsigned char[recvNbIndexCount*ProcessDHTElement<InfoType>::typeSize()]; 
    460446  } 
    461  
    462   //printf("check 5 OK. clientRank = %d\n", clientRank); 
    463447 
    464448  // If a client holds information about index and the corresponding which don't belong to it, 
     
    483467  } 
    484468 
    485   //printf("check 6 OK. clientRank = %d\n", clientRank); 
    486  
    487469  boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 
    488470                                                iteIndex = client2ClientIndex.end(); 
     
    493475  } 
    494476 
    495   //printf("check 7 OK. clientRank = %d\n", clientRank); 
    496  
    497477  boost::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 
    498478                                                      iteInfo = client2ClientInfo.end(); 
     
    503483  } 
    504484 
    505   //printf("check 8 OK. clientRank = %d\n", clientRank); 
    506485  std::vector<ep_lib::MPI_Status> status(request.size()); 
    507486 
     
    528507  } 
    529508 
    530   //printf("check 9 OK. clientRank = %d\n", clientRank); 
    531  
    532509  if (0 != recvNbIndexCount) 
    533510  { 
     
    543520      delete [] it->second; 
    544521 
    545   //printf("check 10 OK. clientRank = %d\n", clientRank); 
    546522  // Ok, now do something recursive 
    547523  if (0 < level) 
     
    608584  ep_lib::MPI_Request request; 
    609585  requestSendInfo.push_back(request); 
    610   //printf("MPI_IsendInfo(info, infoSize, MPI_CHAR,... char count = %d, dest = %d, buf_size = %d\n", infoSize, clientDestRank, sizeof(*info) ); 
    611586  MPI_Isend(info, infoSize, MPI_CHAR, 
    612587            clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 
     
    717692  } 
    718693   
    719   int clientRank; 
    720   MPI_Comm_rank(this->internalComm_,&clientRank); 
    721   //printf("4(%d): calling wait all for %lu requests\n", clientRank, sendNbRank.size()+recvNbRank.size()); 
     694   
    722695  MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 
    723   //printf("        4(%d): calling wait all for %lu requests OK\n", clientRank, sendNbRank.size()+recvNbRank.size()); 
    724696} 
    725697 
     
    738710                                                 std::vector<int>& recvNbRank, std::vector<int>& recvNbElements) 
    739711{ 
    740   int myRank; 
    741   MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 
    742   //printf("myRank = %d, in sendRecvRank(int level, const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements, std::vector<int>& recvNbRank, std::vector<int>& recvNbElements)\n", myRank); 
    743712  int groupBegin = this->getGroupBegin()[level]; 
    744713 
     
    757726  for (int idx = 0; idx < recvBuffSize; ++idx) 
    758727  { 
    759     //printf("myRank = %d starts irecv with src = %d, tag = %d, idx = %d\n", myRank, recvRank[idx], MPI_DHT_INDEX_0, idx); 
    760728    MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
    761729              recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    762     //printf("myRank = %d MPI_Irecv OK, idx = %d, nRequest = %d\n", myRank, idx, nRequest); 
    763730    ++nRequest; 
    764731  } 
    765732 
    766   //printf("myRank = %d, check 1 OK\n", myRank); 
    767733 
    768734  for (int idx = 0; idx < sendBuffSize; ++idx) 
     
    775741  for (int idx = 0; idx < sendBuffSize; ++idx) 
    776742  { 
    777     //printf("myRank = %d starts isend with dest = %d, tag = %d, idx = %d\n", myRank, sendRank[idx], MPI_DHT_INDEX_0, idx); 
    778743    MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 
    779744              sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    780     //printf("myRank = %d MPI_Isend OK, idx = %d, nRequest = %d\n", myRank, idx, nRequest); 
    781745    ++nRequest; 
    782746  } 
     
    784748  MPI_Barrier(this->internalComm_); 
    785749 
    786   //printf("myRank = %d, check 2 OK\n", myRank); 
    787  
    788   int clientRank; 
    789   MPI_Comm_rank(this->internalComm_,&clientRank); 
    790  
    791   //printf("5(%d): calling wait all for %lu requests\n", myRank, sendBuffSize+recvBuffSize); 
     750 
    792751  MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 
    793   //printf("            5(%d): calling wait all for %lu requests OK\n", myRank, sendBuffSize+recvBuffSize); 
    794   //printf("check 3 OK\n"); 
    795752 
    796753  int nbRecvRank = 0, nbRecvElements = 0; 
     
    805762    } 
    806763  } 
    807   //printf("check 4 OK\n"); 
    808 } 
    809  
    810 } 
     764} 
     765 
     766} 
  • XIOS/dev/branch_yushan/src/context_client.cpp

    r1060 r1070  
    116116      while (parentServer->server->hasPendingEvent()) 
    117117      { 
    118        parentServer->server->eventLoop(); //printf("parentServer->server->eventLoop()\n"); 
     118       parentServer->server->eventLoop(); 
    119119      } 
    120120    } 
  • XIOS/dev/branch_yushan/src/context_server.cpp

    r1067 r1070  
    7676    for(rank=0;rank<commSize;rank++) 
    7777    { 
    78       //printf("in CContextServer::listen, rank = %d, commSize = %d, pendingRequest.find(rank) = %d\n", rank, commSize, pendingRequest.find(rank)); 
    7978      if (pendingRequest.find(rank)==pendingRequest.end()) 
    8079      { 
     
    9291            mapBufferSize_.insert(std::make_pair(rank, buffSize)); 
    9392            it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; 
    94             //printf("find message, is buffer end, receiving, buffSize = %d, rank = %d, commSize = %d\n", buffSize, rank, commSize); 
    9593          } 
    9694          else 
     
    103101              ep_lib::MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 
    104102              bufferRequest[rank]=addr; 
    105               //printf("find message, i-receiving to buffer %p, rank = %d, commSize = %d\n", addr, rank, commSize); 
    106103            } 
    107104          } 
     
    148145 
    149146    CBufferIn buffer(buff,count); 
    150     //char* startBuffer,endBuffer; 
    151147    int size; 
    152     //int offset; 
    153148    size_t timeLine; 
    154149    map<size_t,CEventServer*>::iterator it; 
  • XIOS/dev/branch_yushan/src/cxios.cpp

    r1069 r1070  
    8686       
    8787    globalComm = passage[omp_get_thread_num()]; 
    88      
    89     // int tmp_size; 
    90     // MPI_Comm_size(globalComm, &tmp_size); 
    91     // if(isClient) printf("Client : globalcomm size = %d\n", tmp_size); 
    92     // if(isServer) printf("Server : globalcomm size = %d\n", tmp_size); 
    93  
    9488     
    9589  } 
  • XIOS/dev/branch_yushan/src/interface/c/icdata.cpp

    r1069 r1070  
    9595     comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_comm)); 
    9696 
    97      
    9897     CClient::registerContext(str,comm); 
    99       
    100      //printf("icdata.cpp: client register context %s : %p\n", context_id, &comm); 
    101       
     98           
    10299     CTimer::get("XIOS init context").suspend(); 
    103100     CTimer::get("XIOS").suspend(); 
     
    289286      { 
    290287        CVariable::get(context->getId(), varIdStr)->setData<double>(data); 
    291         //CVariable::get(context->getId(), varIdStr)->sendValue(); 
    292288      } 
    293289 
     
    310306      { 
    311307        CVariable::get(context->getId(), varIdStr)->setData<float>(data); 
    312         //CVariable::get(context->getId(), varIdStr)->sendValue(); 
    313308      } 
    314309 
     
    331326      { 
    332327        CVariable::get(context->getId(), varIdStr)->setData<int>(data); 
    333         //CVariable::get(context->getId(), varIdStr)->sendValue(); 
    334328      } 
    335329 
     
    353347      { 
    354348        CVariable::get(context->getId(), varIdStr)->setData<bool>(data); 
    355         //CVariable::get(context->getId(), varIdStr)->sendValue(); 
    356349      } 
    357350 
     
    379372      { 
    380373        CVariable::get(context->getId(), varIdStr)->setData<string>(dataStr); 
    381         //CVariable::get(context->getId(), varIdStr)->sendValue(); 
    382374      } 
    383375 
  • XIOS/dev/branch_yushan/src/io/onetcdf4.cpp

    r1069 r1070  
    4747         wmpi = comm && !multifile; 
    4848          
    49          //ep_lib::MPI_Info info_null; 
    5049 
    5150         if (wmpi) 
     
    5756            if (wmpi) 
    5857            { 
    59                // printf("start creating file with createPar\n"); 
    60                //CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), static_cast<MPI_Info>(info_null.mpi_info), this->ncidp); 
    6158               CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), MPI_INFO_NULL_STD, this->ncidp); 
    62                // printf("creating file with createPar\n"); 
    6359            } 
    6460            else 
    6561            { 
    6662               CNetCdfInterface::create(filename, mode, this->ncidp); 
    67                // printf("creating file with create\n");   
    6863            }   
    6964                
     
    7671            if (wmpi) 
    7772            { 
    78 //               printf("start opening file with openPar\n"); 
    79                // CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), static_cast<MPI_Info>(info_null.mpi_info), this->ncidp); 
    8073               CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), MPI_INFO_NULL_STD, this->ncidp); 
    81 //               printf("opening file with openPar\n"); 
    8274            } 
    8375            else 
    8476            { 
    8577               CNetCdfInterface::open(filename, mode, this->ncidp); 
    86                // printf("opening file with open\n"); 
    8778            } 
    8879 
  • XIOS/dev/branch_yushan/src/node/context.cpp

    r1067 r1070  
    390390   void CContext::closeDefinition(void) 
    391391   { 
    392      int myRank; 
    393      MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 
    394  
    395      //printf("myRank = %d, hasClient = %d, hasServer = %d\n", myRank, hasClient, hasServer); 
    396392 
    397393     // There is nothing client need to send to server 
     
    400396       // After xml is parsed, there are some more works with post processing 
    401397       postProcessing();  
    402        //printf("myRank = %d,                postProcessing OK\n", myRank); 
    403      } 
    404      setClientServerBuffer(); //printf("myRank = %d, setClientServerBuffer OK\n", myRank); 
    405  
    406      //printf("hasClient = %d, hasServer = %d\n", hasClient, hasServer); 
     398     } 
     399     setClientServerBuffer();  
    407400 
    408401     if (hasClient && !hasServer) 
    409402     { 
    410403      // Send all attributes of current context to server 
    411       this->sendAllAttributesToServer(); //printf("myRank = %d, this->sendAllAttributesToServer OK\n", myRank); 
     404      this->sendAllAttributesToServer(); 
    412405 
    413406      // Send all attributes of current calendar 
    414407      CCalendarWrapper::get(CCalendarWrapper::GetDefName())->sendAllAttributesToServer(); 
    415       //printf("myRank = %d, CCalendarWrapper::get(CCalendarWrapper::GetDefName())->sendAllAttributesToServer OK\n", myRank); 
    416408 
    417409      // We have enough information to send to server 
    418410      // First of all, send all enabled files 
    419        sendEnabledFiles();  //printf("myRank = %d, sendEnabledFiles OK\n", myRank); 
     411       sendEnabledFiles();  
    420412 
    421413      // Then, send all enabled fields 
    422        sendEnabledFields();  //printf("myRank = %d, sendEnabledFields OK\n", myRank); 
     414       sendEnabledFields();  
    423415 
    424416      // At last, we have all info of domain and axis, then send them 
    425        sendRefDomainsAxis();  //printf("myRank = %d, sendRefDomainsAxis OK\n", myRank); 
    426  
     417       sendRefDomainsAxis();  
    427418      // After that, send all grid (if any) 
    428        sendRefGrid(); //printf("myRank = %d, sendRefGrid OK\n", myRank); 
     419       sendRefGrid();  
    429420    } 
    430421 
     
    435426    if (hasClient) 
    436427    { 
    437       this->buildFilterGraphOfEnabledFields();  //printf("myRank = %d, buildFilterGraphOfEnabledFields OK\n", myRank); 
    438       buildFilterGraphOfFieldsWithReadAccess();  //printf("myRank = %d, buildFilterGraphOfFieldsWithReadAccess OK\n", myRank); 
    439       this->solveAllRefOfEnabledFields(true);  //printf("myRank = %d, solveAllRefOfEnabledFields OK\n", myRank); 
     428      this->buildFilterGraphOfEnabledFields();   
     429      buildFilterGraphOfFieldsWithReadAccess();   
     430      this->solveAllRefOfEnabledFields(true);  
    440431    } 
    441432 
     
    448439    if (hasClient) 
    449440    { 
    450       sendCreateFileHeader();  //printf("myRank = %d, sendCreateFileHeader OK\n", myRank); 
    451  
    452       startPrefetchingOfEnabledReadModeFiles();  //printf("myRank = %d, startPrefetchingOfEnabledReadModeFiles OK\n", myRank); 
     441      sendCreateFileHeader();  
     442 
     443      startPrefetchingOfEnabledReadModeFiles();   
    453444    } 
    454445   } 
  • XIOS/dev/branch_yushan/src/node/domain.cpp

    r1053 r1070  
    13031303   void CDomain::checkAttributesOnClientAfterTransformation() 
    13041304   { 
    1305      int myRank; 
    1306      MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 
    1307  
    1308      CContext* context=CContext::getCurrent() ;  //printf("myRank = %d, CContext::getCurrent OK\n", myRank); 
    1309  
    1310      //printf("myRank = %d, this->isClientAfterTransformationChecked = %d\n", myRank, this->isClientAfterTransformationChecked); 
     1305      
     1306     CContext* context=CContext::getCurrent() ; 
     1307 
    13111308     if (this->isClientAfterTransformationChecked) return; 
    1312      //printf("myRank = %d, context->hasClient = %d\n", myRank, context->hasClient); 
    13131309     if (context->hasClient) 
    13141310     { 
    1315        this->checkMask();  //printf("myRank = %d, this->checkMask OK\n", myRank); 
    1316        //printf("myRank = %d, hasLonLat = %d, hasArea = %d, isCompressible_ = %d\n", myRank, hasLonLat, hasArea, isCompressible_); 
     1311       this->checkMask();  
    13171312       if (hasLonLat || hasArea || isCompressible_)  
    13181313        { 
    1319           //printf("myRank = %d, start this->computeConnectedServer\n", myRank); 
    13201314          this->computeConnectedServer(); 
    1321           //printf("myRank = %d, this->computeConnectedServer OK\n", myRank); 
    1322         } 
    1323        //printf("myRank = %d, hasLonLat = %d\n", myRank, hasLonLat); 
     1315        } 
    13241316       if (hasLonLat)  
    13251317        { 
    13261318          this->completeLonLatClient(); 
    1327           //printf("myRank = %d, this->completeLonLatClient OK\n", myRank); 
    13281319        } 
    13291320     } 
     
    14601451  void CDomain::computeConnectedServer(void) 
    14611452  { 
    1462     int myRank; 
    1463     MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 
    14641453 
    14651454    CContext* context=CContext::getCurrent() ; 
     
    15761565    else serverDescription.computeServerGlobalIndexInRange(std::make_pair<size_t,size_t>(indexBegin, indexEnd), 1); 
    15771566 
    1578     //printf("myRank = %d, check 7 OK\n", myRank); 
    1579  
    15801567    CClientServerMapping* clientServerMap = new CClientServerMappingDistributed(serverDescription.getGlobalIndexRange(), 
    15811568                                                                                client->intraComm); 
    1582     //printf("myRank = %d new OK\n", myRank); 
    1583  
    15841569    clientServerMap->computeServerIndexMapping(globalIndexDomain);   
    1585     //printf("myRank = %d, clientServerMap->computeServerIndexMapping(globalIndexDomain) OK\n", myRank); 
    15861570     
    15871571    const CClientServerMapping::GlobalIndexMap& globalIndexDomainOnServer = clientServerMap->getGlobalIndexOnServer(); 
    1588     //printf("myRank = %d, clientServerMap->getGlobalIndexOnServer OK\n", myRank); 
    15891572     
    1590     //printf("myRank = %d, check 8 OK\n", myRank); 
    1591  
    15921573    CClientServerMapping::GlobalIndexMap::const_iterator it  = globalIndexDomainOnServer.begin(), 
    15931574                                                         ite = globalIndexDomainOnServer.end(); 
     
    16221603    } 
    16231604 
    1624     //printf("myRank = %d, check 9 OK\n", myRank); 
    1625  
    16261605    connectedServerRank_.clear(); 
    16271606    for (it = globalIndexDomainOnServer.begin(); it != ite; ++it) { 
     
    16321611 
    16331612    delete clientServerMap; 
    1634     //printf("myRank = %d, check 10 OK\n", myRank); 
    16351613  } 
    16361614 
  • XIOS/dev/branch_yushan/src/node/field.cpp

    r1037 r1070  
    621621   void CField::solveAllReferenceEnabledField(bool doSending2Server) 
    622622   { 
    623      int myRank; 
    624      MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 
    625  
    626      CContext* context = CContext::getCurrent();  //printf("my_Rank = %d, CContext* context = CContext::getCurrent OK\n", myRank); 
    627      solveOnlyReferenceEnabledField(doSending2Server);  //printf("my_Rank = %d, solveOnlyReferenceEnabledField(doSending2Server) OK\n", myRank); 
     623 
     624     CContext* context = CContext::getCurrent();  
     625     solveOnlyReferenceEnabledField(doSending2Server); 
    628626 
    629627     if (!areAllReferenceSolved) 
     
    633631        if (context->hasClient) 
    634632        { 
    635           solveRefInheritance(true);  //printf("my_Rank = %d, solveRefInheritance(true) OK\n", myRank); 
     633          solveRefInheritance(true);   
    636634          if (hasDirectFieldReference())  
    637635          { 
    638636            getDirectFieldReference()->solveAllReferenceEnabledField(false);  
    639             //printf("my_Rank = %d, getDirectFieldReference()->solveAllReferenceEnabledField(false) OK\n", myRank); 
     637             
    640638          } 
    641639        } 
     
    643641        { 
    644642          solveServerOperation(); 
    645           //printf("my_Rank = %d, solveServerOperation OK\n", myRank); 
     643           
    646644        } 
    647645 
    648         solveGridReference();  //printf("my_Rank = %d, solveGridReference OK\n", myRank); 
    649      } 
    650  
    651      solveGridDomainAxisRef(doSending2Server);  //printf("my_Rank = %d, solveGridDomainAxisRef(doSending2Server) OK\n", myRank); 
     646        solveGridReference();   
     647     } 
     648 
     649     solveGridDomainAxisRef(doSending2Server);  
    652650 
    653651     if (context->hasClient) 
    654652     { 
    655        solveTransformedGrid();  //printf("my_Rank = %d, solveTransformedGrid OK\n", myRank); 
    656      } 
    657  
    658      solveCheckMaskIndex(doSending2Server);  //printf("FIELD.CPP: my_Rank = %d, solveCheckMaskIndex(doSending2Server) OK\n", myRank); 
     653       solveTransformedGrid();  
     654     } 
     655 
     656     solveCheckMaskIndex(doSending2Server);  
    659657   } 
    660658 
  • XIOS/dev/branch_yushan/src/node/grid.cpp

    r1037 r1070  
    166166   void CGrid::checkAttributesAfterTransformation() 
    167167   { 
    168       int myRank; 
    169       MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 
    170  
    171       setAxisList();  //printf("myRank = %d, setAxisList OK\n", myRank); 
    172       std::vector<CAxis*> axisListP = this->getAxis();  //printf("myRank = %d, this->getAxis OK\n", myRank); 
     168       
     169      setAxisList();   
     170      std::vector<CAxis*> axisListP = this->getAxis();  
    173171      if (!axisListP.empty()) 
    174172      { 
     
    183181            axisPositionInGrid_.push_back(idx); 
    184182            ++idx; 
    185             //printf("myRank = %d, axisPositionInGrid_.push_back OK\n", myRank); 
     183             
    186184          } 
    187185          else if (2 == elementDimension) idx += 2; 
     
    191189        { 
    192190          axisListP[i]->checkAttributesOnClientAfterTransformation(globalDim_,axisPositionInGrid_[i]); 
    193           //printf("myRank = %d, axisListP[%d/%d]->checkAttributesOnClientAfterTransformation OK\n", myRank, i, axisListP.size()); 
    194         } 
    195       } 
    196  
    197       setDomainList(); //printf("myRank = %d, setDomainList OK\n", myRank); 
    198       std::vector<CDomain*> domListP = this->getDomains(); //printf("myRank = %d, this->getDomains OK\n", myRank); 
     191        } 
     192      } 
     193 
     194      setDomainList();  
     195      std::vector<CDomain*> domListP = this->getDomains(); 
    199196      if (!domListP.empty()) 
    200197      { 
    201198        for (int i = 0; i < domListP.size(); ++i) 
    202199        { 
    203           //printf("myRank = %d, start domListP[%d]->checkAttributesOnClientAfterTransformation\n", myRank, i); 
    204200          domListP[i]->checkAttributesOnClientAfterTransformation(); 
    205           //printf("myRank = %d, domListP[%d]->checkAttributesOnClientAfterTransformation OK\n", myRank, i); 
    206201        } 
    207202      } 
  • XIOS/dev/branch_yushan/src/server.cpp

    r1069 r1070  
    165165      { 
    166166        if (CXios::usingOasis) oasis_finalize(); 
    167         //else  {MPI_Finalize() ; printf("CServer::finalize called MPI_finalize\n");} 
     167        //else  {MPI_Finalize() ;} 
    168168      } 
    169169 
  • XIOS/dev/branch_yushan/src/transformation/grid_transformation.cpp

    r1053 r1070  
    531531 
    532532  status.resize(requests.size()); 
    533   //printf("(%d) src/transformation/grid_transformation 1st waitall\n", clientRank); 
    534533  MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    535   //printf("(%d) src/transformation/grid_transformation 1st waitall OK\n", clientRank); 
    536534 
    537535  // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 
     
    585583  } 
    586584  status.resize(requests.size()); 
    587   //printf("(%d) src/transformation/grid_transformation 2nd waitall\n", clientRank); 
    588585  MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    589   //printf("(%d) src/transformation/grid_transformation 2nd waitall OK\n", clientRank); 
    590586 
    591587  // Cool, now we can fill in local index of grid destination (counted for masked index) 
Note: See TracChangeset for help on using the changeset viewer.