Ignore:
Timestamp:
07/10/17 15:18:16 (7 years ago)
Author:
yushan
Message:

prep to merge with trunk @1200

Location:
XIOS/dev/branch_yushan_merged/src
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp

    r1196 r1203  
    670670                                                       std::vector<ep_lib::MPI_Request>& requestSendIndex) 
    671671{ 
     672  printf("should not call this function sendIndexToClients");   
    672673  ep_lib::MPI_Request request; 
    673674  requestSendIndex.push_back(request); 
     
    688689                                                         std::vector<ep_lib::MPI_Request>& requestRecvIndex) 
    689690{ 
     691  printf("should not call this function recvIndexFromClients"); 
    690692  ep_lib::MPI_Request request; 
    691693  requestRecvIndex.push_back(request); 
     
    707709                                                      std::vector<ep_lib::MPI_Request>& requestSendInfo) 
    708710{ 
     711  printf("should not call this function sendInfoToClients"); 
    709712  ep_lib::MPI_Request request; 
    710713  requestSendInfo.push_back(request); 
     
    726729                                                        std::vector<ep_lib::MPI_Request>& requestRecvInfo) 
    727730{ 
     731  printf("should not call this function recvInfoFromClients\n"); 
    728732  ep_lib::MPI_Request request; 
    729733  requestRecvInfo.push_back(request); 
  • XIOS/dev/branch_yushan_merged/src/filter/spatial_transform_filter.cpp

    r1134 r1203  
    153153          sendBuff[idxSendBuff] = new double[itSend->second.numElements()]; 
    154154      } 
     155       
     156      const CGridTransformation::RecvIndexGridDestinationMap& localIndexToReceive = *itListRecv; 
     157      CGridTransformation::RecvIndexGridDestinationMap::const_iterator itbRecv = localIndexToReceive.begin(), itRecv, 
     158                                                                       iteRecv = localIndexToReceive.end(); 
    155159 
    156160      idxSendBuff = 0; 
    157       std::vector<ep_lib::MPI_Request> sendRecvRequest; 
     161      std::vector<ep_lib::MPI_Request> sendRecvRequest(localIndexToSend.size()+localIndexToReceive.size()); 
     162      int position = 0; 
    158163      for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 
    159164      { 
     
    165170          sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 
    166171        } 
    167         sendRecvRequest.push_back(ep_lib::MPI_Request()); 
    168         MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 
     172        MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest[position]); 
     173        position++; 
    169174      } 
    170175 
    171176      // Receiving data on destination fields 
    172       const CGridTransformation::RecvIndexGridDestinationMap& localIndexToReceive = *itListRecv; 
    173       CGridTransformation::RecvIndexGridDestinationMap::const_iterator itbRecv = localIndexToReceive.begin(), itRecv, 
    174                                                                        iteRecv = localIndexToReceive.end(); 
     177       
    175178      int recvBuffSize = 0; 
    176179      for (itRecv = itbRecv; itRecv != iteRecv; ++itRecv) recvBuffSize += itRecv->second.size(); //(recvBuffSize < itRecv->second.size()) 
     
    183186        int srcRank = itRecv->first; 
    184187        int countSize = itRecv->second.size(); 
    185         sendRecvRequest.push_back(ep_lib::MPI_Request()); 
    186         MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 
     188         
     189        MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest[position]); 
     190        position++; 
    187191        currentBuff += countSize; 
    188192      } 
  • XIOS/dev/branch_yushan_merged/src/node/field.cpp

    r1164 r1203  
    715715     if (context->hasClient) 
    716716     { 
     717       printf("proc %d begein transformation\n", myRank); 
    717718       solveTransformedGrid(); 
     719       printf("proc %d end transformation\n", myRank); 
     720       MPI_Barrier(context->client->intraComm); 
    718721     } 
    719722 
  • XIOS/dev/branch_yushan_merged/src/test/test_remap_omp.f90

    r1179 r1203  
    183183  ierr=NF90_GET_VAR(ncid,varid, dst_boundslat, start=(/1,dst_ibegin+1/),count=(/dst_nvertex,dst_ni/)) 
    184184 
    185  
     185   !$omp barrier 
     186 
     187  !$omp master  
     188  CALL MPI_barrier(comm, ierr) 
     189  !$omp end master 
     190 
     191  !$omp barrier 
     192 
     193   
    186194  CALL xios_context_initialize("test",comm) 
    187195  CALL xios_get_handle("test",ctx_hdl) 
     
    214222  CALL xios_close_context_definition() 
    215223 
    216   CALL xios_get_domain_attr("src_domain_regular_read", ni=src_tmp_ni, nj=src_tmp_nj) 
    217   ALLOCATE(tmp_field_0(src_tmp_ni*src_tmp_nj)) 
    218  
    219   CALL xios_get_axis_attr("src_axis_curvilinear_read", n=src_tmp_n) 
    220   CALL xios_get_domain_attr("src_domain_curvilinear_read", ni=src_tmp_ni, nj=src_tmp_nj) 
    221   ALLOCATE(tmp_field_1(src_tmp_ni*src_tmp_nj*src_tmp_n)) 
    222  
    223   CALL xios_get_domain_attr("src_domain_unstructured_read", ni=src_tmp_ni, nj=src_tmp_nj) 
    224   ALLOCATE(tmp_field_2(src_tmp_ni*src_tmp_nj)) 
    225    
    226   CALL xios_recv_field("src_field_regular", tmp_field_0) 
    227   CALL xios_recv_field("src_field_curvilinear", tmp_field_1) 
    228   CALL xios_recv_field("src_field_unstructured", tmp_field_2) 
     224!  CALL xios_get_domain_attr("src_domain_regular_read", ni=src_tmp_ni, nj=src_tmp_nj) 
     225!  ALLOCATE(tmp_field_0(src_tmp_ni*src_tmp_nj)) 
     226 
     227!  CALL xios_get_axis_attr("src_axis_curvilinear_read", n=src_tmp_n) 
     228!  CALL xios_get_domain_attr("src_domain_curvilinear_read", ni=src_tmp_ni, nj=src_tmp_nj) 
     229!  ALLOCATE(tmp_field_1(src_tmp_ni*src_tmp_nj*src_tmp_n)) 
     230 
     231!  CALL xios_get_domain_attr("src_domain_unstructured_read", ni=src_tmp_ni, nj=src_tmp_nj) 
     232!  ALLOCATE(tmp_field_2(src_tmp_ni*src_tmp_nj)) 
     233   
     234!  CALL xios_recv_field("src_field_regular", tmp_field_0) 
     235!  CALL xios_recv_field("src_field_curvilinear", tmp_field_1) 
     236!  CALL xios_recv_field("src_field_unstructured", tmp_field_2) 
    229237 
    230238  DO ts=1,10 
     
    232240    CALL xios_send_field("src_field_2D",src_field_2D) 
    233241     
    234     !DO i=1,src_ni 
    235     !  src_field_2D_clone(i) = src_field_2D(i) 
    236     !  IF ((23.5 * ts < src_lat(i)) .AND. (src_lat(i) < 65.5 *ts) .AND. (0 < src_lon(i)) .AND. (src_lon(i) < 30*ts)) THEN       
    237     !    src_field_2D_clone(i) = missing_value     
    238     !  ENDIF 
    239     !ENDDO 
    240  
    241     !CALL xios_send_field("src_field_2D_clone",src_field_2D_clone) 
    242     !CALL xios_send_field("src_field_3D",src_field_3D) 
    243     !CALL xios_send_field("src_field_3D_clone",src_field_3D) 
    244     !CALL xios_send_field("src_field_4D",src_field_4D) 
    245     !CALL xios_send_field("src_field_3D_pression",src_field_pression) 
    246     CALL xios_send_field("tmp_field_0",tmp_field_0) 
    247     CALL xios_send_field("tmp_field_1",tmp_field_1) 
    248     CALL xios_send_field("tmp_field_2",tmp_field_2) 
     242    DO i=1,src_ni 
     243      src_field_2D_clone(i) = src_field_2D(i) 
     244      IF ((23.5 * ts < src_lat(i)) .AND. (src_lat(i) < 65.5 *ts) .AND. (0 < src_lon(i)) .AND. (src_lon(i) < 30*ts)) THEN       
     245        src_field_2D_clone(i) = missing_value     
     246      ENDIF 
     247    ENDDO 
     248 
     249    CALL xios_send_field("src_field_2D_clone",src_field_2D_clone) 
     250    CALL xios_send_field("src_field_3D",src_field_3D) 
     251    CALL xios_send_field("src_field_3D_clone",src_field_3D) 
     252    CALL xios_send_field("src_field_4D",src_field_4D) 
     253    CALL xios_send_field("src_field_3D_pression",src_field_pression) 
     254 !   CALL xios_send_field("tmp_field_0",tmp_field_0) 
     255 !   CALL xios_send_field("tmp_field_1",tmp_field_1) 
     256 !   CALL xios_send_field("tmp_field_2",tmp_field_2) 
    249257    CALL wait_us(5000) ; 
    250258   ENDDO 
     
    254262  DEALLOCATE(src_lon, src_lat, src_boundslon,src_boundslat, src_field_2D) 
    255263  DEALLOCATE(dst_lon, dst_lat, dst_boundslon,dst_boundslat) 
    256   DEALLOCATE(tmp_field_0, tmp_field_1, tmp_field_2) 
     264  !DEALLOCATE(tmp_field_0, tmp_field_1, tmp_field_2) 
    257265   
    258266  CALL xios_finalize() 
  • XIOS/dev/branch_yushan_merged/src/test/test_unstruct_omp.f90

    r1177 r1203  
    267267  CALL xios_finalize() 
    268268 
    269 print *, "Client : xios_finalize " 
     269print *, mpi_rank, "Client : xios_finalize " 
    270270 
    271271  !$omp barrier 
  • XIOS/dev/branch_yushan_merged/src/transformation/grid_transformation.cpp

    r1196 r1203  
    473473 
    474474  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    475   std::vector<ep_lib::MPI_Request> requests; 
    476   requests.reserve(2*recvRankSizeMap.size()+2*globaIndexWeightFromSrcToDst.size()); 
     475  std::vector<ep_lib::MPI_Request> requests(2*recvRankSizeMap.size()+2*globaIndexWeightFromSrcToDst.size()); 
    477476  std::vector<ep_lib::MPI_Status> status; 
    478477  boost::unordered_map<int, unsigned char* > recvMaskDst; 
    479478  boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
     479  int position = 0; 
    480480  for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 
    481481  { 
     
    485485    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 
    486486 
    487     requests.push_back(ep_lib::MPI_Request()); 
    488     MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    489     requests.push_back(ep_lib::MPI_Request()); 
    490     MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 
     487 
     488    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[position]); 
     489    position++; 
     490 
     491    MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[position]); 
     492    position++; 
    491493  } 
    492494 
     
    523525 
    524526    // Send global index source and mask 
    525     requests.push_back(ep_lib::MPI_Request()); 
    526     MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    527     requests.push_back(ep_lib::MPI_Request()); 
    528     MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 
     527 
     528    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[position]); 
     529    position++; 
     530 
     531    MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[position]); 
     532    position++; 
    529533  } 
    530534 
     
    533537 
    534538  // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 
    535   std::vector<ep_lib::MPI_Request>().swap(requests); 
    536   std::vector<ep_lib::MPI_Status>().swap(status); 
    537   requests.reserve(sendRankSizeMap.size()+recvRankSizeMap.size()); 
     539  //std::vector<ep_lib::MPI_Request>().swap(requests); 
     540  //std::vector<ep_lib::MPI_Status>().swap(status); 
     541  requests.resize(sendRankSizeMap.size()+recvRankSizeMap.size()); 
     542  position = 0; 
    538543  // Okie, on destination side, we will wait for information of masked index of source 
    539544  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) 
     
    542547    int recvSize = itSend->second; 
    543548 
    544     requests.push_back(ep_lib::MPI_Request()); 
    545     MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
     549    MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[position]); 
     550    position++; 
    546551  } 
    547552 
     
    579584 
    580585    // Okie, now inform the destination which source index are masked 
    581     requests.push_back(ep_lib::MPI_Request()); 
    582     MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
     586    MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[position]); 
     587    position++; 
    583588  } 
    584589  status.resize(requests.size()); 
Note: See TracChangeset for help on using the changeset viewer.