Ignore:
Timestamp:
03/09/17 18:31:07 (7 years ago)
Author:
yushan
Message:

Preperation for merge from trunk

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_yushan/src/client_client_dht_template_impl.hpp

    r1053 r1070  
    107107                                                                 int level) 
    108108{ 
    109   int clientRank; 
    110   MPI_Comm_rank(commLevel,&clientRank); 
    111109  int groupRankBegin = this->getGroupBegin()[level]; 
    112110  int nbClient = this->getNbInGroup()[level]; 
     
    200198 
    201199  std::vector<ep_lib::MPI_Status> status(request.size()); 
    202  
    203   //printf("1(%d): calling wait all for %lu requests\n", clientRank, request.size()); 
    204  
    205200  MPI_Waitall(request.size(), &request[0], &status[0]); 
    206201 
    207  
    208   //printf("               1(%d): calling wait all for %lu requests OK\n", clientRank, request.size()); 
    209202 
    210203  CArray<size_t,1>* tmpGlobalIndex; 
     
    311304 
    312305  std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 
    313   //printf("2(%d): calling wait all for %lu requests\n", clientRank, requestOnReturn.size()); 
    314306  MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 
    315307 
    316   //printf("            2(%d): calling wait all for %lu requests OK\n", clientRank, requestOnReturn.size()); 
    317308 
    318309  Index2VectorInfoTypeMap indexToInfoMapping; 
     
    383374                                                            int level) 
    384375{ 
    385   //printf("in computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, const MPI_Comm& commLevel, int level)\n"); 
    386376  int clientRank; 
    387377  MPI_Comm_rank(commLevel,&clientRank); 
     
    433423    { 
    434424      client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 
    435   //          ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 
    436425      ProcessDHTElement<InfoType>::packElement(infoTmp[idx], client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 
    437426      ++sendNbIndexBuff[indexClient]; 
    438427    } 
    439428  } 
    440  
    441   //printf("check 4 OK. clientRank = %d\n", clientRank); 
    442429 
    443430  // Calculate from how many clients each client receive message. 
     
    446433  sendRecvRank(level, sendBuff, sendNbIndexBuff, 
    447434               recvRankClient, recvNbIndexClientCount); 
    448   //printf("sendRecvRank OK\n"); 
    449435 
    450436  int recvNbIndexCount = 0; 
     
    459445    recvInfoBuff = new unsigned char[recvNbIndexCount*ProcessDHTElement<InfoType>::typeSize()]; 
    460446  } 
    461  
    462   //printf("check 5 OK. clientRank = %d\n", clientRank); 
    463447 
    464448  // If a client holds information about index and the corresponding which don't belong to it, 
     
    483467  } 
    484468 
    485   //printf("check 6 OK. clientRank = %d\n", clientRank); 
    486  
    487469  boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 
    488470                                                iteIndex = client2ClientIndex.end(); 
     
    493475  } 
    494476 
    495   //printf("check 7 OK. clientRank = %d\n", clientRank); 
    496  
    497477  boost::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 
    498478                                                      iteInfo = client2ClientInfo.end(); 
     
    503483  } 
    504484 
    505   //printf("check 8 OK. clientRank = %d\n", clientRank); 
    506485  std::vector<ep_lib::MPI_Status> status(request.size()); 
    507486 
     
    528507  } 
    529508 
    530   //printf("check 9 OK. clientRank = %d\n", clientRank); 
    531  
    532509  if (0 != recvNbIndexCount) 
    533510  { 
     
    543520      delete [] it->second; 
    544521 
    545   //printf("check 10 OK. clientRank = %d\n", clientRank); 
    546522  // Ok, now do something recursive 
    547523  if (0 < level) 
     
    608584  ep_lib::MPI_Request request; 
    609585  requestSendInfo.push_back(request); 
    610   //printf("MPI_IsendInfo(info, infoSize, MPI_CHAR,... char count = %d, dest = %d, buf_size = %d\n", infoSize, clientDestRank, sizeof(*info) ); 
    611586  MPI_Isend(info, infoSize, MPI_CHAR, 
    612587            clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 
     
    717692  } 
    718693   
    719   int clientRank; 
    720   MPI_Comm_rank(this->internalComm_,&clientRank); 
    721   //printf("4(%d): calling wait all for %lu requests\n", clientRank, sendNbRank.size()+recvNbRank.size()); 
     694   
    722695  MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 
    723   //printf("        4(%d): calling wait all for %lu requests OK\n", clientRank, sendNbRank.size()+recvNbRank.size()); 
    724696} 
    725697 
     
    738710                                                 std::vector<int>& recvNbRank, std::vector<int>& recvNbElements) 
    739711{ 
    740   int myRank; 
    741   MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 
    742   //printf("myRank = %d, in sendRecvRank(int level, const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements, std::vector<int>& recvNbRank, std::vector<int>& recvNbElements)\n", myRank); 
    743712  int groupBegin = this->getGroupBegin()[level]; 
    744713 
     
    757726  for (int idx = 0; idx < recvBuffSize; ++idx) 
    758727  { 
    759     //printf("myRank = %d starts irecv with src = %d, tag = %d, idx = %d\n", myRank, recvRank[idx], MPI_DHT_INDEX_0, idx); 
    760728    MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
    761729              recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    762     //printf("myRank = %d MPI_Irecv OK, idx = %d, nRequest = %d\n", myRank, idx, nRequest); 
    763730    ++nRequest; 
    764731  } 
    765732 
    766   //printf("myRank = %d, check 1 OK\n", myRank); 
    767733 
    768734  for (int idx = 0; idx < sendBuffSize; ++idx) 
     
    775741  for (int idx = 0; idx < sendBuffSize; ++idx) 
    776742  { 
    777     //printf("myRank = %d starts isend with dest = %d, tag = %d, idx = %d\n", myRank, sendRank[idx], MPI_DHT_INDEX_0, idx); 
    778743    MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 
    779744              sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    780     //printf("myRank = %d MPI_Isend OK, idx = %d, nRequest = %d\n", myRank, idx, nRequest); 
    781745    ++nRequest; 
    782746  } 
     
    784748  MPI_Barrier(this->internalComm_); 
    785749 
    786   //printf("myRank = %d, check 2 OK\n", myRank); 
    787  
    788   int clientRank; 
    789   MPI_Comm_rank(this->internalComm_,&clientRank); 
    790  
    791   //printf("5(%d): calling wait all for %lu requests\n", myRank, sendBuffSize+recvBuffSize); 
     750 
    792751  MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 
    793   //printf("            5(%d): calling wait all for %lu requests OK\n", myRank, sendBuffSize+recvBuffSize); 
    794   //printf("check 3 OK\n"); 
    795752 
    796753  int nbRecvRank = 0, nbRecvElements = 0; 
     
    805762    } 
    806763  } 
    807   //printf("check 4 OK\n"); 
    808 } 
    809  
    810 } 
     764} 
     765 
     766} 
Note: See TracChangeset for help on using the changeset viewer.