Changeset 1203 for XIOS/dev/branch_yushan_merged/src
- Timestamp:
- 07/10/17 15:18:16 (7 years ago)
- Location:
- XIOS/dev/branch_yushan_merged/src
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp
r1196 r1203 670 670 std::vector<ep_lib::MPI_Request>& requestSendIndex) 671 671 { 672 printf("should not call this function sendIndexToClients"); 672 673 ep_lib::MPI_Request request; 673 674 requestSendIndex.push_back(request); … … 688 689 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 689 690 { 691 printf("should not call this function recvIndexFromClients"); 690 692 ep_lib::MPI_Request request; 691 693 requestRecvIndex.push_back(request); … … 707 709 std::vector<ep_lib::MPI_Request>& requestSendInfo) 708 710 { 711 printf("should not call this function sendInfoToClients"); 709 712 ep_lib::MPI_Request request; 710 713 requestSendInfo.push_back(request); … … 726 729 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 727 730 { 731 printf("should not call this function recvInfoFromClients\n"); 728 732 ep_lib::MPI_Request request; 729 733 requestRecvInfo.push_back(request); -
XIOS/dev/branch_yushan_merged/src/filter/spatial_transform_filter.cpp
r1134 r1203 153 153 sendBuff[idxSendBuff] = new double[itSend->second.numElements()]; 154 154 } 155 156 const CGridTransformation::RecvIndexGridDestinationMap& localIndexToReceive = *itListRecv; 157 CGridTransformation::RecvIndexGridDestinationMap::const_iterator itbRecv = localIndexToReceive.begin(), itRecv, 158 iteRecv = localIndexToReceive.end(); 155 159 156 160 idxSendBuff = 0; 157 std::vector<ep_lib::MPI_Request> sendRecvRequest; 161 std::vector<ep_lib::MPI_Request> sendRecvRequest(localIndexToSend.size()+localIndexToReceive.size()); 162 int position = 0; 158 163 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 159 164 { … … 165 170 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 166 171 } 167 sendRecvRequest.push_back(ep_lib::MPI_Request());168 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back());172 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest[position]); 173 position++; 169 174 } 170 175 171 176 // Receiving data on destination fields 172 const CGridTransformation::RecvIndexGridDestinationMap& localIndexToReceive = *itListRecv; 173 CGridTransformation::RecvIndexGridDestinationMap::const_iterator itbRecv = localIndexToReceive.begin(), itRecv, 174 iteRecv = localIndexToReceive.end(); 177 175 178 int recvBuffSize = 0; 176 179 for (itRecv = itbRecv; itRecv != iteRecv; ++itRecv) recvBuffSize += itRecv->second.size(); //(recvBuffSize < itRecv->second.size()) … … 183 186 int srcRank = itRecv->first; 184 187 int countSize = itRecv->second.size(); 185 sendRecvRequest.push_back(ep_lib::MPI_Request()); 186 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 188 189 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest[position]); 190 position++; 187 191 currentBuff += countSize; 188 192 } -
XIOS/dev/branch_yushan_merged/src/node/field.cpp
r1164 r1203 715 715 if (context->hasClient) 716 716 { 717 printf("proc %d begein transformation\n", myRank); 717 718 solveTransformedGrid(); 719 printf("proc %d end transformation\n", myRank); 720 MPI_Barrier(context->client->intraComm); 718 721 } 719 722 -
XIOS/dev/branch_yushan_merged/src/test/test_remap_omp.f90
r1179 r1203 183 183 ierr=NF90_GET_VAR(ncid,varid, dst_boundslat, start=(/1,dst_ibegin+1/),count=(/dst_nvertex,dst_ni/)) 184 184 185 185 !$omp barrier 186 187 !$omp master 188 CALL MPI_barrier(comm, ierr) 189 !$omp end master 190 191 !$omp barrier 192 193 186 194 CALL xios_context_initialize("test",comm) 187 195 CALL xios_get_handle("test",ctx_hdl) … … 214 222 CALL xios_close_context_definition() 215 223 216 CALL xios_get_domain_attr("src_domain_regular_read", ni=src_tmp_ni, nj=src_tmp_nj)217 ALLOCATE(tmp_field_0(src_tmp_ni*src_tmp_nj))218 219 CALL xios_get_axis_attr("src_axis_curvilinear_read", n=src_tmp_n)220 CALL xios_get_domain_attr("src_domain_curvilinear_read", ni=src_tmp_ni, nj=src_tmp_nj)221 ALLOCATE(tmp_field_1(src_tmp_ni*src_tmp_nj*src_tmp_n))222 223 CALL xios_get_domain_attr("src_domain_unstructured_read", ni=src_tmp_ni, nj=src_tmp_nj)224 ALLOCATE(tmp_field_2(src_tmp_ni*src_tmp_nj))225 226 CALL xios_recv_field("src_field_regular", tmp_field_0)227 CALL xios_recv_field("src_field_curvilinear", tmp_field_1)228 CALL xios_recv_field("src_field_unstructured", tmp_field_2)224 ! CALL xios_get_domain_attr("src_domain_regular_read", ni=src_tmp_ni, nj=src_tmp_nj) 225 ! ALLOCATE(tmp_field_0(src_tmp_ni*src_tmp_nj)) 226 227 ! CALL xios_get_axis_attr("src_axis_curvilinear_read", n=src_tmp_n) 228 ! CALL xios_get_domain_attr("src_domain_curvilinear_read", ni=src_tmp_ni, nj=src_tmp_nj) 229 ! ALLOCATE(tmp_field_1(src_tmp_ni*src_tmp_nj*src_tmp_n)) 230 231 ! CALL xios_get_domain_attr("src_domain_unstructured_read", ni=src_tmp_ni, nj=src_tmp_nj) 232 ! ALLOCATE(tmp_field_2(src_tmp_ni*src_tmp_nj)) 233 234 ! CALL xios_recv_field("src_field_regular", tmp_field_0) 235 ! CALL xios_recv_field("src_field_curvilinear", tmp_field_1) 236 ! CALL xios_recv_field("src_field_unstructured", tmp_field_2) 229 237 230 238 DO ts=1,10 … … 232 240 CALL xios_send_field("src_field_2D",src_field_2D) 233 241 234 !DO i=1,src_ni235 !src_field_2D_clone(i) = src_field_2D(i)236 !IF ((23.5 * ts < src_lat(i)) .AND. (src_lat(i) < 65.5 *ts) .AND. (0 < src_lon(i)) .AND. (src_lon(i) < 30*ts)) THEN237 !src_field_2D_clone(i) = missing_value238 !ENDIF239 !ENDDO240 241 !CALL xios_send_field("src_field_2D_clone",src_field_2D_clone)242 !CALL xios_send_field("src_field_3D",src_field_3D)243 !CALL xios_send_field("src_field_3D_clone",src_field_3D)244 !CALL xios_send_field("src_field_4D",src_field_4D)245 !CALL xios_send_field("src_field_3D_pression",src_field_pression)246 CALL xios_send_field("tmp_field_0",tmp_field_0)247 CALL xios_send_field("tmp_field_1",tmp_field_1)248 CALL xios_send_field("tmp_field_2",tmp_field_2)242 DO i=1,src_ni 243 src_field_2D_clone(i) = src_field_2D(i) 244 IF ((23.5 * ts < src_lat(i)) .AND. (src_lat(i) < 65.5 *ts) .AND. (0 < src_lon(i)) .AND. (src_lon(i) < 30*ts)) THEN 245 src_field_2D_clone(i) = missing_value 246 ENDIF 247 ENDDO 248 249 CALL xios_send_field("src_field_2D_clone",src_field_2D_clone) 250 CALL xios_send_field("src_field_3D",src_field_3D) 251 CALL xios_send_field("src_field_3D_clone",src_field_3D) 252 CALL xios_send_field("src_field_4D",src_field_4D) 253 CALL xios_send_field("src_field_3D_pression",src_field_pression) 254 ! CALL xios_send_field("tmp_field_0",tmp_field_0) 255 ! CALL xios_send_field("tmp_field_1",tmp_field_1) 256 ! CALL xios_send_field("tmp_field_2",tmp_field_2) 249 257 CALL wait_us(5000) ; 250 258 ENDDO … … 254 262 DEALLOCATE(src_lon, src_lat, src_boundslon,src_boundslat, src_field_2D) 255 263 DEALLOCATE(dst_lon, dst_lat, dst_boundslon,dst_boundslat) 256 DEALLOCATE(tmp_field_0, tmp_field_1, tmp_field_2)264 !DEALLOCATE(tmp_field_0, tmp_field_1, tmp_field_2) 257 265 258 266 CALL xios_finalize() -
XIOS/dev/branch_yushan_merged/src/test/test_unstruct_omp.f90
r1177 r1203 267 267 CALL xios_finalize() 268 268 269 print *, "Client : xios_finalize "269 print *, mpi_rank, "Client : xios_finalize " 270 270 271 271 !$omp barrier -
XIOS/dev/branch_yushan_merged/src/transformation/grid_transformation.cpp
r1196 r1203 473 473 474 474 // Sending global index of grid source to corresponding process as well as the corresponding mask 475 std::vector<ep_lib::MPI_Request> requests; 476 requests.reserve(2*recvRankSizeMap.size()+2*globaIndexWeightFromSrcToDst.size()); 475 std::vector<ep_lib::MPI_Request> requests(2*recvRankSizeMap.size()+2*globaIndexWeightFromSrcToDst.size()); 477 476 std::vector<ep_lib::MPI_Status> status; 478 477 boost::unordered_map<int, unsigned char* > recvMaskDst; 479 478 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 479 int position = 0; 480 480 for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 481 481 { … … 485 485 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 486 486 487 requests.push_back(ep_lib::MPI_Request()); 488 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 489 requests.push_back(ep_lib::MPI_Request()); 490 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 487 488 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[position]); 489 position++; 490 491 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[position]); 492 position++; 491 493 } 492 494 … … 523 525 524 526 // Send global index source and mask 525 requests.push_back(ep_lib::MPI_Request()); 526 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 527 requests.push_back(ep_lib::MPI_Request()); 528 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 527 528 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[position]); 529 position++; 530 531 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[position]); 532 position++; 529 533 } 530 534 … … 533 537 534 538 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 535 std::vector<ep_lib::MPI_Request>().swap(requests); 536 std::vector<ep_lib::MPI_Status>().swap(status); 537 requests.reserve(sendRankSizeMap.size()+recvRankSizeMap.size()); 539 //std::vector<ep_lib::MPI_Request>().swap(requests); 540 //std::vector<ep_lib::MPI_Status>().swap(status); 541 requests.resize(sendRankSizeMap.size()+recvRankSizeMap.size()); 542 position = 0; 538 543 // Okie, on destination side, we will wait for information of masked index of source 539 544 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 542 547 int recvSize = itSend->second; 543 548 544 requests.push_back(ep_lib::MPI_Request());545 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back());549 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[position]); 550 position++; 546 551 } 547 552 … … 579 584 580 585 // Okie, now inform the destination which source index are masked 581 requests.push_back(ep_lib::MPI_Request());582 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back());586 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[position]); 587 position++; 583 588 } 584 589 status.resize(requests.size());
Note: See TracChangeset
for help on using the changeset viewer.