Ignore:
Timestamp:
02/17/17 17:55:37 (7 years ago)
Author:
yushan
Message:

ep_lib namespace specified when netcdf involved

Location:
XIOS/dev/branch_yushan/src/transformation
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.cpp

    r1037 r1053  
    173173 
    174174  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    175   std::vector<MPI_Request> requests; 
    176   std::vector<MPI_Status> status; 
     175  std::vector<ep_lib::MPI_Request> requests; 
     176  std::vector<ep_lib::MPI_Status> status; 
    177177  boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
    178178  boost::unordered_map<int, double* > sendValueToDest; 
     
    184184    sendValueToDest[recvRank] = new double [recvSize]; 
    185185 
    186     requests.push_back(MPI_Request()); 
     186    requests.push_back(ep_lib::MPI_Request()); 
    187187    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    188188  } 
     
    206206 
    207207    // Send global index source and mask 
    208     requests.push_back(MPI_Request()); 
     208    requests.push_back(ep_lib::MPI_Request()); 
    209209    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    210210  } 
     
    215215  //printf("(%d) src/transformation/axis_algorithm_inverse 1st waitall OK\n", clientRank); 
    216216 
    217   std::vector<MPI_Request>().swap(requests); 
    218   std::vector<MPI_Status>().swap(status); 
     217  std::vector<ep_lib::MPI_Request>().swap(requests); 
     218  std::vector<ep_lib::MPI_Status>().swap(status); 
    219219 
    220220  // Okie, on destination side, we will wait for information of masked index of source 
     
    224224    int recvSize = itSend->second; 
    225225 
    226     requests.push_back(MPI_Request()); 
     226    requests.push_back(ep_lib::MPI_Request()); 
    227227    MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
    228228  } 
     
    242242    } 
    243243    // Okie, now inform the destination which source index are masked 
    244     requests.push_back(MPI_Request()); 
     244    requests.push_back(ep_lib::MPI_Request()); 
    245245    MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
    246246  } 
  • XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.hpp

    r933 r1053  
    1212#include "axis_algorithm_transformation.hpp" 
    1313#include "transformation.hpp" 
    14  
     14#ifdef _usingEP 
     15#include "ep_declaration.hpp" 
     16#endif 
     17    
    1518namespace xios { 
    1619 
  • XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.cpp

    r1037 r1053  
    371371  CContextClient* client=context->client; 
    372372 
    373   MPI_Comm poleComme(MPI_COMM_NULL); 
    374   MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 
     373  ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 
     374  ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 
    375375  if (MPI_COMM_NULL != poleComme) 
    376376  { 
    377377    int nbClientPole; 
    378     MPI_Comm_size(poleComme, &nbClientPole); 
     378    ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 
    379379 
    380380    std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, 
     
    541541  double* sendWeightBuff = new double [sendBuffSize]; 
    542542 
    543   std::vector<MPI_Request> sendRequest; 
     543  std::vector<ep_lib::MPI_Request> sendRequest; 
    544544 
    545545  int sendOffSet = 0, l = 0; 
     
    562562    } 
    563563 
    564     sendRequest.push_back(MPI_Request()); 
     564    sendRequest.push_back(ep_lib::MPI_Request()); 
    565565    MPI_Isend(sendIndexDestBuff + sendOffSet, 
    566566             k, 
     
    570570             client->intraComm, 
    571571             &sendRequest.back()); 
    572     sendRequest.push_back(MPI_Request()); 
     572    sendRequest.push_back(ep_lib::MPI_Request()); 
    573573    MPI_Isend(sendIndexSrcBuff + sendOffSet, 
    574574             k, 
     
    578578             client->intraComm, 
    579579             &sendRequest.back()); 
    580     sendRequest.push_back(MPI_Request()); 
     580    sendRequest.push_back(ep_lib::MPI_Request()); 
    581581    MPI_Isend(sendWeightBuff + sendOffSet, 
    582582             k, 
     
    597597  while (receivedSize < recvBuffSize) 
    598598  { 
    599     MPI_Status recvStatus; 
     599    ep_lib::MPI_Status recvStatus; 
    600600    MPI_Recv((recvIndexDestBuff + receivedSize), 
    601601             recvBuffSize, 
     
    637637  } 
    638638 
    639   std::vector<MPI_Status> requestStatus(sendRequest.size()); 
    640   MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 
     639  std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 
     640  ep_lib::MPI_Status stat_ignore; 
     641  MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 
     642  //MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 
    641643 
    642644  delete [] sendIndexDestBuff; 
     
    724726 
    725727  MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    726   MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     728  ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    727729   
    728730  std::vector<StdSize> start(1, startIndex - localNbWeight); 
    729731  std::vector<StdSize> count(1, localNbWeight); 
    730732 
    731   WriteNetCdf netCdfWriter(filename, client->intraComm); 
     733  WriteNetCdf netCdfWriter(filename, static_cast<MPI_Comm>(client->intraComm.mpi_comm)); 
    732734 
    733735  // netCdfWriter = CONetCDF4(filename, false, false, true, client->intraComm, false); 
  • XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.hpp

    r1037 r1053  
    1313#include "transformation.hpp" 
    1414#include "nc4_data_output.hpp" 
     15#ifdef _usingEP 
     16#include "ep_declaration.hpp" 
     17#endif 
    1518 
    1619namespace xios { 
  • XIOS/dev/branch_yushan/src/transformation/grid_transformation.cpp

    r1037 r1053  
    475475 
    476476  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    477   std::vector<MPI_Request> requests; 
    478   std::vector<MPI_Status> status; 
     477  std::vector<ep_lib::MPI_Request> requests; 
     478  std::vector<ep_lib::MPI_Status> status; 
    479479  boost::unordered_map<int, unsigned char* > recvMaskDst; 
    480480  boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
     
    486486    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 
    487487 
    488     requests.push_back(MPI_Request()); 
     488    requests.push_back(ep_lib::MPI_Request()); 
    489489    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    490     requests.push_back(MPI_Request()); 
     490    requests.push_back(ep_lib::MPI_Request()); 
    491491    MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 
    492492  } 
     
    524524 
    525525    // Send global index source and mask 
    526     requests.push_back(MPI_Request()); 
     526    requests.push_back(ep_lib::MPI_Request()); 
    527527    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    528     requests.push_back(MPI_Request()); 
     528    requests.push_back(ep_lib::MPI_Request()); 
    529529    MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 
    530530  } 
     
    536536 
    537537  // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 
    538   std::vector<MPI_Request>().swap(requests); 
    539   std::vector<MPI_Status>().swap(status); 
     538  std::vector<ep_lib::MPI_Request>().swap(requests); 
     539  std::vector<ep_lib::MPI_Status>().swap(status); 
    540540  // Okie, on destination side, we will wait for information of masked index of source 
    541541  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) 
     
    544544    int recvSize = itSend->second; 
    545545 
    546     requests.push_back(MPI_Request()); 
     546    requests.push_back(ep_lib::MPI_Request()); 
    547547    MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
    548548  } 
     
    581581 
    582582    // Okie, now inform the destination which source index are masked 
    583     requests.push_back(MPI_Request()); 
     583    requests.push_back(ep_lib::MPI_Request()); 
    584584    MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
    585585  } 
Note: See TracChangeset for help on using the changeset viewer.