Changeset 1053 for XIOS/dev/branch_yushan/src/transformation
- Timestamp:
- 02/17/17 17:55:37 (7 years ago)
- Location:
- XIOS/dev/branch_yushan/src/transformation
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.cpp
r1037 r1053 173 173 174 174 // Sending global index of grid source to corresponding process as well as the corresponding mask 175 std::vector< MPI_Request> requests;176 std::vector< MPI_Status> status;175 std::vector<ep_lib::MPI_Request> requests; 176 std::vector<ep_lib::MPI_Status> status; 177 177 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 178 178 boost::unordered_map<int, double* > sendValueToDest; … … 184 184 sendValueToDest[recvRank] = new double [recvSize]; 185 185 186 requests.push_back( MPI_Request());186 requests.push_back(ep_lib::MPI_Request()); 187 187 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 188 188 } … … 206 206 207 207 // Send global index source and mask 208 requests.push_back( MPI_Request());208 requests.push_back(ep_lib::MPI_Request()); 209 209 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 210 210 } … … 215 215 //printf("(%d) src/transformation/axis_algorithm_inverse 1st waitall OK\n", clientRank); 216 216 217 std::vector< MPI_Request>().swap(requests);218 std::vector< MPI_Status>().swap(status);217 std::vector<ep_lib::MPI_Request>().swap(requests); 218 std::vector<ep_lib::MPI_Status>().swap(status); 219 219 220 220 // Okie, on destination side, we will wait for information of masked index of source … … 224 224 int recvSize = itSend->second; 225 225 226 requests.push_back( MPI_Request());226 requests.push_back(ep_lib::MPI_Request()); 227 227 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 228 228 } … … 242 242 } 243 243 // Okie, now inform the destination which source index are masked 244 requests.push_back( MPI_Request());244 requests.push_back(ep_lib::MPI_Request()); 245 245 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 246 246 } -
XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.hpp
r933 r1053 12 12 #include "axis_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 14 #ifdef _usingEP 15 #include "ep_declaration.hpp" 16 #endif 17 15 18 namespace xios { 16 19 -
XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.cpp
r1037 r1053 371 371 CContextClient* client=context->client; 372 372 373 MPI_Comm poleComme(MPI_COMM_NULL);374 MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme);373 ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 374 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 375 375 if (MPI_COMM_NULL != poleComme) 376 376 { 377 377 int nbClientPole; 378 MPI_Comm_size(poleComme, &nbClientPole);378 ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 379 379 380 380 std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, … … 541 541 double* sendWeightBuff = new double [sendBuffSize]; 542 542 543 std::vector< MPI_Request> sendRequest;543 std::vector<ep_lib::MPI_Request> sendRequest; 544 544 545 545 int sendOffSet = 0, l = 0; … … 562 562 } 563 563 564 sendRequest.push_back( MPI_Request());564 sendRequest.push_back(ep_lib::MPI_Request()); 565 565 MPI_Isend(sendIndexDestBuff + sendOffSet, 566 566 k, … … 570 570 client->intraComm, 571 571 &sendRequest.back()); 572 sendRequest.push_back( MPI_Request());572 sendRequest.push_back(ep_lib::MPI_Request()); 573 573 MPI_Isend(sendIndexSrcBuff + sendOffSet, 574 574 k, … … 578 578 client->intraComm, 579 579 &sendRequest.back()); 580 sendRequest.push_back( MPI_Request());580 sendRequest.push_back(ep_lib::MPI_Request()); 581 581 MPI_Isend(sendWeightBuff + sendOffSet, 582 582 k, … … 597 597 while (receivedSize < recvBuffSize) 598 598 { 599 MPI_Status recvStatus;599 ep_lib::MPI_Status recvStatus; 600 600 MPI_Recv((recvIndexDestBuff + receivedSize), 601 601 recvBuffSize, … … 637 637 } 638 638 639 std::vector<MPI_Status> requestStatus(sendRequest.size()); 640 MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 639 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 640 ep_lib::MPI_Status stat_ignore; 641 MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 642 //MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 641 643 642 644 delete [] sendIndexDestBuff; … … 724 726 725 727 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 726 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm);728 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 727 729 728 730 std::vector<StdSize> start(1, startIndex - localNbWeight); 729 731 std::vector<StdSize> count(1, localNbWeight); 730 732 731 WriteNetCdf netCdfWriter(filename, client->intraComm);733 WriteNetCdf netCdfWriter(filename, static_cast<MPI_Comm>(client->intraComm.mpi_comm)); 732 734 733 735 // netCdfWriter = CONetCDF4(filename, false, false, true, client->intraComm, false); -
XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.hpp
r1037 r1053 13 13 #include "transformation.hpp" 14 14 #include "nc4_data_output.hpp" 15 #ifdef _usingEP 16 #include "ep_declaration.hpp" 17 #endif 15 18 16 19 namespace xios { -
XIOS/dev/branch_yushan/src/transformation/grid_transformation.cpp
r1037 r1053 475 475 476 476 // Sending global index of grid source to corresponding process as well as the corresponding mask 477 std::vector< MPI_Request> requests;478 std::vector< MPI_Status> status;477 std::vector<ep_lib::MPI_Request> requests; 478 std::vector<ep_lib::MPI_Status> status; 479 479 boost::unordered_map<int, unsigned char* > recvMaskDst; 480 480 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; … … 486 486 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 487 487 488 requests.push_back( MPI_Request());488 requests.push_back(ep_lib::MPI_Request()); 489 489 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 490 requests.push_back( MPI_Request());490 requests.push_back(ep_lib::MPI_Request()); 491 491 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 492 492 } … … 524 524 525 525 // Send global index source and mask 526 requests.push_back( MPI_Request());526 requests.push_back(ep_lib::MPI_Request()); 527 527 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 528 requests.push_back( MPI_Request());528 requests.push_back(ep_lib::MPI_Request()); 529 529 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 530 530 } … … 536 536 537 537 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 538 std::vector< MPI_Request>().swap(requests);539 std::vector< MPI_Status>().swap(status);538 std::vector<ep_lib::MPI_Request>().swap(requests); 539 std::vector<ep_lib::MPI_Status>().swap(status); 540 540 // Okie, on destination side, we will wait for information of masked index of source 541 541 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 544 544 int recvSize = itSend->second; 545 545 546 requests.push_back( MPI_Request());546 requests.push_back(ep_lib::MPI_Request()); 547 547 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 548 548 } … … 581 581 582 582 // Okie, now inform the destination which source index are masked 583 requests.push_back( MPI_Request());583 requests.push_back(ep_lib::MPI_Request()); 584 584 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 585 585 }
Note: See TracChangeset
for help on using the changeset viewer.