Ignore:
Timestamp:
06/21/17 09:09:59 (7 years ago)
Author:
yushan
Message:

save modif

Location:
XIOS/dev/branch_yushan_merged/src
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_yushan_merged/src/buffer_client.cpp

    r1134 r1176  
    8888      { 
    8989        MPI_Issend(buffer[current], count, MPI_CHAR, serverRank, 20, interComm, &request); 
     90        #pragma omp critical (_output) 
    9091        pending = true; 
    9192        if (current == 1) current = 0; 
  • XIOS/dev/branch_yushan_merged/src/client.cpp

    r1164 r1176  
    107107            MPI_Comm_rank(intraComm,&intraCommRank) ; 
    108108             
    109             #pragma omp critical(_output) 
     109            /*#pragma omp critical(_output) 
    110110            { 
    111111              info(10)<<"intercommCreate::client "<<test_omp_rank<< " "<< &test_omp_rank <<" intraCommSize : "<<intraCommSize 
    112112                 <<" intraCommRank :"<<intraCommRank<<"  serverLeader "<< serverLeader 
    113113                 <<" globalComm : "<< &(CXios::globalComm) << endl ;   
    114             } 
     114            }*/ 
    115115 
    116116             
  • XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp

    r1172 r1176  
    104104  int clientRank; 
    105105  MPI_Comm_rank(commLevel,&clientRank); 
     106  ep_lib::MPI_Barrier(commLevel); 
    106107  int groupRankBegin = this->getGroupBegin()[level]; 
    107108  int nbClient = this->getNbInGroup()[level]; 
     
    180181  int currentIndex = 0; 
    181182  int nbRecvClient = recvRankClient.size(); 
    182   for (int idx = 0; idx < nbRecvClient; ++idx) 
    183   { 
    184     if (0 != recvNbIndexClientCount[idx]) 
    185     { 
    186       recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
    187     } 
    188     currentIndex += recvNbIndexClientCount[idx]; 
    189   } 
    190  
     183   
    191184  boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 
    192185                                                iteIndex = client2ClientIndex.end(); 
     
    194187    sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
    195188 
     189 
     190 
     191  for (int idx = 0; idx < nbRecvClient; ++idx) 
     192  { 
     193    if (0 != recvNbIndexClientCount[idx]) 
     194    { 
     195      recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
     196    } 
     197    currentIndex += recvNbIndexClientCount[idx]; 
     198  } 
     199 
     200   
    196201  std::vector<ep_lib::MPI_Status> status(request.size()); 
    197202  MPI_Waitall(request.size(), &request[0], &status[0]); 
     203   
    198204 
    199205  CArray<size_t,1>* tmpGlobalIndex; 
     
    208214    --level; 
    209215    computeIndexInfoMappingLevel(*tmpGlobalIndex, this->internalComm_, level); 
     216       
    210217  } 
    211218  else // Now, we are in the last level where necessary mappings are. 
     
    372379  MPI_Comm_rank(commLevel,&clientRank); 
    373380  computeSendRecvRank(level, clientRank); 
     381  ep_lib::MPI_Barrier(commLevel); 
    374382 
    375383  int groupRankBegin = this->getGroupBegin()[level]; 
     
    666674 
    667675  int nRequest = 0; 
     676   
     677 
     678  for (int idx = 0; idx < sendNbRank.size(); ++idx) 
     679  { 
     680    MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 
     681              sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 
     682    ++nRequest; 
     683  } 
     684   
    668685  for (int idx = 0; idx < recvNbRank.size(); ++idx) 
    669686  { 
    670687    MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 
    671688              recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 
    672     ++nRequest; 
    673   } 
    674  
    675   for (int idx = 0; idx < sendNbRank.size(); ++idx) 
    676   { 
    677     MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 
    678               sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 
    679689    ++nRequest; 
    680690  } 
     
    714724   
    715725  int nRequest = 0; 
    716   for (int idx = 0; idx < recvBuffSize; ++idx) 
    717   { 
    718     MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
    719               recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    720     ++nRequest; 
    721   } 
     726   
    722727 
    723728  for (int idx = 0; idx < sendBuffSize; ++idx) 
     
    734739    MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 
    735740              sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
     741    ++nRequest; 
     742  } 
     743   
     744  for (int idx = 0; idx < recvBuffSize; ++idx) 
     745  { 
     746    MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
     747              recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    736748    ++nRequest; 
    737749  } 
  • XIOS/dev/branch_yushan_merged/src/data_output.cpp

    r1096 r1176  
    44#include "group_template.hpp" 
    55#include "context.hpp" 
    6  
     6//mpi.hpp 
    77namespace xios 
    88{ 
    9       /// ////////////////////// Définitions ////////////////////// /// 
     9      /// ////////////////////// Dfinitions ////////////////////// /// 
    1010 
    1111      CDataOutput::~CDataOutput(void) 
  • XIOS/dev/branch_yushan_merged/src/io/nc4_data_input.cpp

    r1172 r1176  
    5757    #ifdef _usingEP 
    5858    SuperClass::type = ONE_FILE; 
    59     //printf("SuperClass::type = %d\n", SuperClass::type); 
     59    printf("SuperClass::type = %d\n", SuperClass::type); 
    6060    #endif 
    6161    switch (SuperClass::type) 
  • XIOS/dev/branch_yushan_merged/src/io/nc4_data_output.cpp

    r1172 r1176  
    11021102 
    11031103            SuperClassWriter::definition_end(); 
    1104             printf("SuperClass::type = %d\n", SuperClass::type); 
     1104            printf("SuperClass::type = %d, typePrec = %d\n", SuperClass::type, typePrec); 
    11051105            switch (SuperClass::type) 
    11061106            { 
  • XIOS/dev/branch_yushan_merged/src/test/test_complete_omp.f90

    r1134 r1176  
    8484    jbegin=jbegin+nj 
    8585  ENDDO 
     86   
     87  if((ni.LE.0) .OR. (nj.LE.0)) call MPI_Abort() 
    8688 
    8789  iend=ibegin+ni-1 ; jend=jbegin+nj-1 
  • XIOS/dev/branch_yushan_merged/src/test/test_remap_omp.f90

    r1153 r1176  
    5252  if(rank < size-2) then 
    5353 
    54   !$omp parallel default(firstprivate) firstprivate(dtime) 
     54  !$omp parallel default(private) firstprivate(dtime) 
    5555 
    5656!!! XIOS Initialization (get the local communicator) 
     
    6969 
    7070  ierr=NF90_INQ_VARID(ncid,"bounds_lon",varid) 
    71   ierr=NF90_INQUIRE_VARIABLE(ncid, varid,dimids=dimids) 
     71  ierr=NF90_INQUIRE_VARIABLE(ncid, varid, dimids=dimids) 
    7272  ierr=NF90_INQUIRE_DIMENSION(ncid, dimids(1), len=src_nvertex) 
    7373  ierr=NF90_INQUIRE_DIMENSION(ncid, dimids(2), len=src_ni_glo) 
     
    8282    src_ibegin= remain * (div+1) + (rank-remain) * div ; 
    8383  ENDIF 
     84 
     85  if(src_ni .LE. 0) CALL MPI_ABORT() 
     86 
    8487 
    8588  ALLOCATE(src_lon(src_ni), src_lon_tmp(src_ni)) 
     
    9598  ALLOCATE(lval1(interpolatedLlm)) 
    9699  ALLOCATE(lval2(llm2)) 
     100  lval2 = 0 
     101  lval=0 
     102  lval1=0 
    97103 
    98104  ierr=NF90_INQ_VARID(ncid,"lon",varid) 
     
    161167  ENDIF 
    162168 
     169  if(dst_ni .LE. 0) CALL MPI_ABORT() 
     170 
    163171  ALLOCATE(dst_lon(dst_ni)) 
    164172  ALLOCATE(dst_lat(dst_ni)) 
Note: See TracChangeset for help on using the changeset viewer.