Changeset 1196 for XIOS/dev/branch_yushan_merged/src
- Timestamp:
- 07/05/17 14:14:09 (7 years ago)
- Location:
- XIOS/dev/branch_yushan_merged/src
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/src/client.cpp
r1187 r1196 115 115 116 116 117 test_sendrecv(CXios::globalComm);117 //test_sendrecv(CXios::globalComm); 118 118 MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 119 119 -
XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp
r1185 r1196 175 175 recvIndexBuff = new unsigned long[recvNbIndexCount]; 176 176 177 std::vector<ep_lib::MPI_Request> request; 177 int request_size = 0; 178 179 int currentIndex = 0; 180 int nbRecvClient = recvRankClient.size(); 181 182 int position = 0; 183 184 for (int idx = 0; idx < nbRecvClient; ++idx) 185 { 186 if (0 != recvNbIndexClientCount[idx]) 187 { 188 request_size++; 189 } 190 } 191 192 request_size += client2ClientIndex.size(); 193 194 195 std::vector<ep_lib::MPI_Request> request(request_size); 196 178 197 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 179 198 iteRecvIndex = recvRankClient.end(), 180 199 itbRecvNbIndex = recvNbIndexClientCount.begin(), 181 200 itRecvNbIndex; 182 int currentIndex = 0; 183 int nbRecvClient = recvRankClient.size(); 201 184 202 185 203 boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 186 204 iteIndex = client2ClientIndex.end(); 187 205 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 188 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 189 190 191 206 { 207 MPI_Isend(itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], MPI_UNSIGNED_LONG, itIndex->first, MPI_DHT_INDEX, commLevel, &request[position]); 208 position++; 209 //sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 210 } 211 192 212 for (int idx = 0; idx < nbRecvClient; ++idx) 193 213 { 194 214 if (0 != recvNbIndexClientCount[idx]) 195 215 { 196 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 216 MPI_Irecv(recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], MPI_UNSIGNED_LONG, 217 recvRankClient[idx], MPI_DHT_INDEX, commLevel, &request[position]); 218 position++; 219 //recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 197 220 } 198 221 currentIndex += recvNbIndexClientCount[idx]; … … 200 223 201 224 202 std::vector<ep_lib::MPI_Status> status(request .size());225 std::vector<ep_lib::MPI_Status> status(request_size); 203 226 MPI_Waitall(request.size(), &request[0], &status[0]); 204 227 205 228 206 229 CArray<size_t,1>* tmpGlobalIndex; … … 256 279 } 257 280 258 std::vector<ep_lib::MPI_Request> requestOnReturn; 281 request_size = 0; 282 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 283 { 284 if (0 != recvNbIndexOnReturn[idx]) 285 { 286 request_size += 2; 287 } 288 } 289 290 for (int idx = 0; idx < nbRecvClient; ++idx) 291 { 292 if (0 != sendNbIndexOnReturn[idx]) 293 { 294 request_size += 2; 295 } 296 } 297 298 std::vector<ep_lib::MPI_Request> requestOnReturn(request_size); 259 299 currentIndex = 0; 300 position = 0; 260 301 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 261 302 { 262 303 if (0 != recvNbIndexOnReturn[idx]) 263 304 { 264 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 265 recvInfoFromClients(recvRankOnReturn[idx], 266 recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 267 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 268 commLevel, requestOnReturn); 305 //recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 306 MPI_Irecv(recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], MPI_UNSIGNED_LONG, 307 recvRankOnReturn[idx], MPI_DHT_INDEX, commLevel, &requestOnReturn[position]); 308 position++; 309 //recvInfoFromClients(recvRankOnReturn[idx], 310 // recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 311 // recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 312 // commLevel, requestOnReturn); 313 MPI_Irecv(recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 314 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), MPI_CHAR, 315 recvRankOnReturn[idx], MPI_DHT_INFO, commLevel, &requestOnReturn[position]); 316 position++; 269 317 } 270 318 currentIndex += recvNbIndexOnReturn[idx]; … … 299 347 } 300 348 301 sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 302 sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 303 sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 304 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 349 //sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 350 // sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 351 MPI_Isend(client2ClientIndexOnReturn[rank], sendNbIndexOnReturn[idx], MPI_UNSIGNED_LONG, 352 rank, MPI_DHT_INDEX, commLevel, &requestOnReturn[position]); 353 position++; 354 //sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 355 // sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 356 MPI_Isend(client2ClientInfoOnReturn[rank], sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), MPI_CHAR, 357 rank, MPI_DHT_INFO, commLevel, &requestOnReturn[position]); 358 position++; 305 359 } 306 360 currentIndex += recvNbIndexClientCount[idx]; … … 440 494 int recvNbIndexCount = 0; 441 495 for (int idx = 0; idx < recvNbIndexClientCount.size(); ++idx) 496 { 442 497 recvNbIndexCount += recvNbIndexClientCount[idx]; 498 } 443 499 444 500 unsigned long* recvIndexBuff; … … 453 509 // it will send a message to the correct clients. 454 510 // Contents of the message are index and its corresponding informatioin 455 std::vector<ep_lib::MPI_Request> request;511 int request_size = 0; 456 512 int currentIndex = 0; 457 513 int nbRecvClient = recvRankClient.size(); 514 int current_pos = 0; 515 458 516 for (int idx = 0; idx < nbRecvClient; ++idx) 459 517 { 460 518 if (0 != recvNbIndexClientCount[idx]) 461 519 { 462 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 463 recvInfoFromClients(recvRankClient[idx], 464 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 465 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 466 commLevel, request); 467 } 468 currentIndex += recvNbIndexClientCount[idx]; 469 } 520 request_size += 2; 521 } 522 //currentIndex += recvNbIndexClientCount[idx]; 523 } 524 525 request_size += client2ClientIndex.size(); 526 request_size += client2ClientInfo.size(); 527 528 529 530 std::vector<ep_lib::MPI_Request> request(request_size); 531 532 //unsigned long* tmp_send_buf_long[client2ClientIndex.size()]; 533 //unsigned char* tmp_send_buf_char[client2ClientInfo.size()]; 534 535 int info_position = 0; 536 int index_position = 0; 537 470 538 471 539 boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, … … 473 541 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 474 542 { 475 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 543 //sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 544 545 //tmp_send_buf_long[index_position] = new unsigned long[sendNbIndexBuff[itIndex->first-groupRankBegin]]; 546 //for(int i=0; i<sendNbIndexBuff[itIndex->first-groupRankBegin]; i++) 547 //{ 548 // tmp_send_buf_long[index_position][i] = (static_cast<unsigned long * >(itIndex->second))[i]; 549 //} 550 //MPI_Isend(tmp_send_buf_long[current_pos], sendNbIndexBuff[itIndex->first-groupRankBegin], MPI_UNSIGNED_LONG, 551 MPI_Isend(itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], MPI_UNSIGNED_LONG, 552 itIndex->first, MPI_DHT_INDEX, commLevel, &request[current_pos]); 553 current_pos++; 554 index_position++; 555 476 556 } 477 557 … … 480 560 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 481 561 { 482 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 483 562 //sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 563 564 //tmp_send_buf_char[info_position] = new unsigned char[sendNbInfo[itInfo->first-groupRankBegin]]; 565 //for(int i=0; i<sendNbInfo[itInfo->first-groupRankBegin]; i++) 566 //{ 567 // tmp_send_buf_char[info_position][i] = (static_cast<unsigned char * >(itInfo->second))[i]; 568 //} 569 570 MPI_Isend(itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], MPI_CHAR, 571 itInfo->first, MPI_DHT_INFO, commLevel, &request[current_pos]); 572 573 current_pos++; 574 info_position++; 575 } 576 577 for (int idx = 0; idx < nbRecvClient; ++idx) 578 { 579 if (0 != recvNbIndexClientCount[idx]) 580 { 581 //recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 582 MPI_Irecv(recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], MPI_UNSIGNED_LONG, 583 recvRankClient[idx], MPI_DHT_INDEX, commLevel, &request[current_pos]); 584 current_pos++; 585 586 587 MPI_Irecv(recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 588 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 589 MPI_CHAR, recvRankClient[idx], MPI_DHT_INFO, commLevel, &request[current_pos]); 590 591 current_pos++; 592 593 594 595 // recvInfoFromClients(recvRankClient[idx], 596 // recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 597 // recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 598 // commLevel, request); 599 } 600 currentIndex += recvNbIndexClientCount[idx]; 484 601 } 485 602 486 603 std::vector<ep_lib::MPI_Status> status(request.size()); 487 604 488 605 MPI_Waitall(request.size(), &request[0], &status[0]); 606 607 608 //for(int i=0; i<client2ClientInfo.size(); i++) 609 // delete[] tmp_send_buf_char[i]; 610 611 612 613 //for(int i=0; i<client2ClientIndex.size(); i++) 614 // delete[] tmp_send_buf_long[i]; 615 489 616 490 617 Index2VectorInfoTypeMap indexToInfoMapping; … … 527 654 else 528 655 index2InfoMapping_.swap(indexToInfoMapping); 656 529 657 } 530 658 … … 720 848 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 721 849 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 722 850 //ep_lib::MPI_Request request[sendBuffSize+recvBuffSize]; 851 //ep_lib::MPI_Status requestStatus[sendBuffSize+recvBuffSize]; 852 723 853 int my_rank; 724 854 MPI_Comm_rank(this->internalComm_, &my_rank); 725 855 726 856 int nRequest = 0; 857 for (int idx = 0; idx < recvBuffSize; ++idx) 858 { 859 MPI_Irecv(&recvBuff[2*idx], 2, MPI_INT, 860 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 861 ++nRequest; 862 } 727 863 728 864 … … 743 879 } 744 880 745 for (int idx = 0; idx < recvBuffSize; ++idx) 746 { 747 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 748 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 749 ++nRequest; 750 } 881 751 882 752 883 //MPI_Barrier(this->internalComm_); 753 884 754 885 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 755 886 //MPI_Waitall(sendBuffSize+recvBuffSize, request, requestStatus); 887 888 756 889 int nbRecvRank = 0, nbRecvElements = 0; 757 890 recvNbRank.clear(); … … 765 898 } 766 899 } 767 } 768 769 } 900 901 902 903 904 } 905 906 } 907 -
XIOS/dev/branch_yushan_merged/src/server.cpp
r1187 r1196 93 93 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 94 94 95 test_sendrecv(CXios::globalComm);95 // test_sendrecv(CXios::globalComm); 96 96 MPI_Intercomm_create(intraComm,0,CXios::globalComm,clientLeader,0,&newComm) ; 97 97 interComm.push_back(newComm) ; -
XIOS/dev/branch_yushan_merged/src/test/test_omp.f90
r1134 r1196 47 47 if(rank < size-2) then 48 48 49 !$omp parallel default( private)49 !$omp parallel default(firstprivate) 50 50 51 51 CALL xios_initialize(id,return_comm=comm) -
XIOS/dev/branch_yushan_merged/src/transformation/grid_transformation.cpp
r1134 r1196 474 474 // Sending global index of grid source to corresponding process as well as the corresponding mask 475 475 std::vector<ep_lib::MPI_Request> requests; 476 requests.reserve(2*recvRankSizeMap.size()+2*globaIndexWeightFromSrcToDst.size()); 476 477 std::vector<ep_lib::MPI_Status> status; 477 478 boost::unordered_map<int, unsigned char* > recvMaskDst; … … 534 535 std::vector<ep_lib::MPI_Request>().swap(requests); 535 536 std::vector<ep_lib::MPI_Status>().swap(status); 537 requests.reserve(sendRankSizeMap.size()+recvRankSizeMap.size()); 536 538 // Okie, on destination side, we will wait for information of masked index of source 537 539 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend)
Note: See TracChangeset
for help on using the changeset viewer.