Ignore:
Timestamp:
07/05/17 14:14:09 (7 years ago)
Author:
yushan
Message:

add request_check. test client and complete OK

Location:
XIOS/dev/branch_yushan_merged/src
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_yushan_merged/src/client.cpp

    r1187 r1196  
    115115 
    116116             
    117             test_sendrecv(CXios::globalComm); 
     117            //test_sendrecv(CXios::globalComm); 
    118118            MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 
    119119 
  • XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp

    r1185 r1196  
    175175    recvIndexBuff = new unsigned long[recvNbIndexCount]; 
    176176 
    177   std::vector<ep_lib::MPI_Request> request; 
     177  int request_size = 0; 
     178 
     179  int currentIndex = 0; 
     180  int nbRecvClient = recvRankClient.size(); 
     181 
     182  int position = 0; 
     183 
     184  for (int idx = 0; idx < nbRecvClient; ++idx) 
     185  { 
     186    if (0 != recvNbIndexClientCount[idx]) 
     187    { 
     188      request_size++; 
     189    } 
     190  } 
     191 
     192  request_size += client2ClientIndex.size(); 
     193 
     194 
     195  std::vector<ep_lib::MPI_Request> request(request_size); 
     196 
    178197  std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 
    179198                             iteRecvIndex = recvRankClient.end(), 
    180199                           itbRecvNbIndex = recvNbIndexClientCount.begin(), 
    181200                           itRecvNbIndex; 
    182   int currentIndex = 0; 
    183   int nbRecvClient = recvRankClient.size(); 
     201   
    184202   
    185203  boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 
    186204                                                iteIndex = client2ClientIndex.end(); 
    187205  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 
    188     sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
    189  
    190  
    191  
     206  { 
     207    MPI_Isend(itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], MPI_UNSIGNED_LONG, itIndex->first, MPI_DHT_INDEX, commLevel, &request[position]); 
     208    position++; 
     209    //sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
     210  } 
     211     
    192212  for (int idx = 0; idx < nbRecvClient; ++idx) 
    193213  { 
    194214    if (0 != recvNbIndexClientCount[idx]) 
    195215    { 
    196       recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
     216      MPI_Irecv(recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], MPI_UNSIGNED_LONG, 
     217            recvRankClient[idx], MPI_DHT_INDEX, commLevel, &request[position]); 
     218      position++; 
     219      //recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
    197220    } 
    198221    currentIndex += recvNbIndexClientCount[idx]; 
     
    200223 
    201224   
    202   std::vector<ep_lib::MPI_Status> status(request.size()); 
     225  std::vector<ep_lib::MPI_Status> status(request_size); 
    203226  MPI_Waitall(request.size(), &request[0], &status[0]); 
    204    
     227 
    205228 
    206229  CArray<size_t,1>* tmpGlobalIndex; 
     
    256279  } 
    257280 
    258   std::vector<ep_lib::MPI_Request> requestOnReturn; 
     281  request_size = 0; 
     282  for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 
     283  { 
     284    if (0 != recvNbIndexOnReturn[idx]) 
     285    { 
     286      request_size += 2; 
     287    } 
     288  } 
     289 
     290  for (int idx = 0; idx < nbRecvClient; ++idx) 
     291  { 
     292    if (0 != sendNbIndexOnReturn[idx]) 
     293    { 
     294      request_size += 2; 
     295    } 
     296  } 
     297 
     298  std::vector<ep_lib::MPI_Request> requestOnReturn(request_size); 
    259299  currentIndex = 0; 
     300  position = 0; 
    260301  for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 
    261302  { 
    262303    if (0 != recvNbIndexOnReturn[idx]) 
    263304    { 
    264       recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 
    265       recvInfoFromClients(recvRankOnReturn[idx], 
    266                           recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 
    267                           recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 
    268                           commLevel, requestOnReturn); 
     305      //recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 
     306      MPI_Irecv(recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], MPI_UNSIGNED_LONG, 
     307            recvRankOnReturn[idx], MPI_DHT_INDEX, commLevel, &requestOnReturn[position]); 
     308      position++; 
     309      //recvInfoFromClients(recvRankOnReturn[idx], 
     310      //                    recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 
     311      //                    recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 
     312      //                    commLevel, requestOnReturn); 
     313      MPI_Irecv(recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(),  
     314                recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), MPI_CHAR, 
     315                recvRankOnReturn[idx], MPI_DHT_INFO, commLevel, &requestOnReturn[position]); 
     316      position++; 
    269317    } 
    270318    currentIndex += recvNbIndexOnReturn[idx]; 
     
    299347      } 
    300348 
    301       sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 
    302                          sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 
    303       sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 
    304                         sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 
     349      //sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 
     350      //                   sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 
     351      MPI_Isend(client2ClientIndexOnReturn[rank], sendNbIndexOnReturn[idx], MPI_UNSIGNED_LONG, 
     352            rank, MPI_DHT_INDEX, commLevel, &requestOnReturn[position]); 
     353      position++; 
     354      //sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 
     355      //                  sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 
     356      MPI_Isend(client2ClientInfoOnReturn[rank], sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), MPI_CHAR, 
     357            rank, MPI_DHT_INFO, commLevel, &requestOnReturn[position]); 
     358      position++; 
    305359    } 
    306360    currentIndex += recvNbIndexClientCount[idx]; 
     
    440494  int recvNbIndexCount = 0; 
    441495  for (int idx = 0; idx < recvNbIndexClientCount.size(); ++idx) 
     496  {  
    442497    recvNbIndexCount += recvNbIndexClientCount[idx]; 
     498  } 
    443499 
    444500  unsigned long* recvIndexBuff; 
     
    453509  // it will send a message to the correct clients. 
    454510  // Contents of the message are index and its corresponding informatioin 
    455   std::vector<ep_lib::MPI_Request> request; 
     511  int request_size = 0;   
    456512  int currentIndex = 0; 
    457513  int nbRecvClient = recvRankClient.size(); 
     514  int current_pos = 0;  
     515 
    458516  for (int idx = 0; idx < nbRecvClient; ++idx) 
    459517  { 
    460518    if (0 != recvNbIndexClientCount[idx]) 
    461519    { 
    462       recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
    463       recvInfoFromClients(recvRankClient[idx], 
    464                           recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 
    465                           recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 
    466                           commLevel, request); 
    467     } 
    468     currentIndex += recvNbIndexClientCount[idx]; 
    469   } 
     520      request_size += 2; 
     521    } 
     522    //currentIndex += recvNbIndexClientCount[idx]; 
     523  } 
     524 
     525  request_size += client2ClientIndex.size(); 
     526  request_size += client2ClientInfo.size(); 
     527 
     528 
     529 
     530  std::vector<ep_lib::MPI_Request> request(request_size); 
     531   
     532  //unsigned long* tmp_send_buf_long[client2ClientIndex.size()]; 
     533  //unsigned char* tmp_send_buf_char[client2ClientInfo.size()]; 
     534   
     535  int info_position = 0; 
     536  int index_position = 0; 
     537 
    470538 
    471539  boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 
     
    473541  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 
    474542  { 
    475     sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
     543    //sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
     544 
     545    //tmp_send_buf_long[index_position] = new unsigned long[sendNbIndexBuff[itIndex->first-groupRankBegin]]; 
     546    //for(int i=0; i<sendNbIndexBuff[itIndex->first-groupRankBegin]; i++) 
     547    //{ 
     548    //  tmp_send_buf_long[index_position][i] = (static_cast<unsigned long * >(itIndex->second))[i]; 
     549    //} 
     550    //MPI_Isend(tmp_send_buf_long[current_pos], sendNbIndexBuff[itIndex->first-groupRankBegin], MPI_UNSIGNED_LONG, 
     551    MPI_Isend(itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], MPI_UNSIGNED_LONG, 
     552              itIndex->first, MPI_DHT_INDEX, commLevel, &request[current_pos]); 
     553    current_pos++;  
     554    index_position++; 
     555 
    476556  } 
    477557 
     
    480560  for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 
    481561  { 
    482     sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 
    483  
     562    //sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 
     563 
     564    //tmp_send_buf_char[info_position] = new unsigned char[sendNbInfo[itInfo->first-groupRankBegin]]; 
     565    //for(int i=0; i<sendNbInfo[itInfo->first-groupRankBegin]; i++) 
     566    //{ 
     567    //  tmp_send_buf_char[info_position][i] = (static_cast<unsigned char * >(itInfo->second))[i]; 
     568    //} 
     569 
     570    MPI_Isend(itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], MPI_CHAR, 
     571              itInfo->first, MPI_DHT_INFO, commLevel, &request[current_pos]); 
     572 
     573    current_pos++; 
     574    info_position++; 
     575  } 
     576   
     577  for (int idx = 0; idx < nbRecvClient; ++idx) 
     578  { 
     579    if (0 != recvNbIndexClientCount[idx]) 
     580    { 
     581      //recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
     582      MPI_Irecv(recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], MPI_UNSIGNED_LONG, 
     583                recvRankClient[idx], MPI_DHT_INDEX, commLevel, &request[current_pos]); 
     584      current_pos++; 
     585       
     586       
     587      MPI_Irecv(recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(),  
     588                recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(),  
     589                MPI_CHAR, recvRankClient[idx], MPI_DHT_INFO, commLevel, &request[current_pos]); 
     590       
     591      current_pos++; 
     592       
     593 
     594 
     595      // recvInfoFromClients(recvRankClient[idx], 
     596      //                     recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 
     597      //                     recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 
     598      //                     commLevel, request); 
     599    } 
     600    currentIndex += recvNbIndexClientCount[idx]; 
    484601  } 
    485602 
    486603  std::vector<ep_lib::MPI_Status> status(request.size()); 
    487  
     604   
    488605  MPI_Waitall(request.size(), &request[0], &status[0]); 
     606   
     607  
     608  //for(int i=0; i<client2ClientInfo.size(); i++) 
     609  //  delete[] tmp_send_buf_char[i]; 
     610 
     611   
     612 
     613  //for(int i=0; i<client2ClientIndex.size(); i++) 
     614  //  delete[] tmp_send_buf_long[i]; 
     615 
    489616 
    490617  Index2VectorInfoTypeMap indexToInfoMapping; 
     
    527654  else 
    528655    index2InfoMapping_.swap(indexToInfoMapping); 
     656   
    529657} 
    530658 
     
    720848  std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 
    721849  std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 
    722  
     850  //ep_lib::MPI_Request request[sendBuffSize+recvBuffSize]; 
     851  //ep_lib::MPI_Status requestStatus[sendBuffSize+recvBuffSize]; 
     852   
    723853  int my_rank; 
    724854  MPI_Comm_rank(this->internalComm_, &my_rank); 
    725855   
    726856  int nRequest = 0; 
     857  for (int idx = 0; idx < recvBuffSize; ++idx) 
     858  { 
     859    MPI_Irecv(&recvBuff[2*idx], 2, MPI_INT, 
     860              recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
     861    ++nRequest; 
     862  } 
    727863   
    728864 
     
    743879  } 
    744880   
    745   for (int idx = 0; idx < recvBuffSize; ++idx) 
    746   { 
    747     MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
    748               recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    749     ++nRequest; 
    750   } 
     881   
    751882 
    752883  //MPI_Barrier(this->internalComm_); 
    753884 
    754885  MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 
    755  
     886  //MPI_Waitall(sendBuffSize+recvBuffSize, request, requestStatus); 
     887 
     888   
    756889  int nbRecvRank = 0, nbRecvElements = 0; 
    757890  recvNbRank.clear(); 
     
    765898    } 
    766899  } 
    767 } 
    768  
    769 } 
     900 
     901 
     902   
     903   
     904} 
     905 
     906} 
     907 
  • XIOS/dev/branch_yushan_merged/src/server.cpp

    r1187 r1196  
    9393                     <<" intraCommRank :"<<intraCommRank<<"  clientLeader "<< clientLeader<<endl ; 
    9494 
    95             test_sendrecv(CXios::globalComm); 
     95            // test_sendrecv(CXios::globalComm); 
    9696             MPI_Intercomm_create(intraComm,0,CXios::globalComm,clientLeader,0,&newComm) ; 
    9797             interComm.push_back(newComm) ; 
  • XIOS/dev/branch_yushan_merged/src/test/test_omp.f90

    r1134 r1196  
    4747    if(rank < size-2) then 
    4848 
    49     !$omp parallel default(private) 
     49    !$omp parallel default(firstprivate) 
    5050   
    5151    CALL xios_initialize(id,return_comm=comm) 
  • XIOS/dev/branch_yushan_merged/src/transformation/grid_transformation.cpp

    r1134 r1196  
    474474  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    475475  std::vector<ep_lib::MPI_Request> requests; 
     476  requests.reserve(2*recvRankSizeMap.size()+2*globaIndexWeightFromSrcToDst.size()); 
    476477  std::vector<ep_lib::MPI_Status> status; 
    477478  boost::unordered_map<int, unsigned char* > recvMaskDst; 
     
    534535  std::vector<ep_lib::MPI_Request>().swap(requests); 
    535536  std::vector<ep_lib::MPI_Status>().swap(status); 
     537  requests.reserve(sendRankSizeMap.size()+recvRankSizeMap.size()); 
    536538  // Okie, on destination side, we will wait for information of masked index of source 
    537539  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) 
Note: See TracChangeset for help on using the changeset viewer.