Changeset 1203
- Timestamp:
- 07/10/17 15:18:16 (6 years ago)
- Location:
- XIOS/dev/branch_yushan_merged
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/bld.cfg
r1196 r1203 34 34 #bld::target generate_fortran_interface.exe 35 35 #bld::target xios_server.exe 36 #bld::target test_remap.exe test_remap_omp.exe36 bld::target test_remap.exe 37 37 #bld::target test_regular.exe 38 38 #bld::target test_expand_domain.exe -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_gatherv.cpp
r1164 r1203 537 537 recv_plus_displs[j*num_ep + num_ep -1] < displs[j*num_ep + num_ep -2]) 538 538 { 539 Debug("Call special implementation of mpi_allgatherv.\n");539 //printf("proc %d/%d Call special implementation of mpi_allgatherv.\n", ep_rank, ep_size); 540 540 return MPI_Allgatherv_special(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); 541 541 } … … 546 546 recv_plus_displs[j*num_ep+i] < displs[j*num_ep+i-1]) 547 547 { 548 Debug("Call special implementation of mpi_allgatherv.\n");548 //printf("proc %d/%d Call special implementation of mpi_allgatherv.\n", ep_rank, ep_size); 549 549 return MPI_Allgatherv_special(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); 550 550 } -
XIOS/dev/branch_yushan_merged/inputs/REMAP/iodef.xml
r1185 r1203 36 36 37 37 <file_group id="read_then_write_files" enabled=".TRUE."> 38 <file id="output_regular_pole" name="output_dst_regular" >39 <field field_ref="tmp_field_0" name="field_regular_0" enabled=". TRUE."/>40 <field field_ref="dst_field_regular" name="field_regular" enabled=". TRUE."/>41 <field field_ref="dst_field_regular_pole_0" name="field_regular_pole_0" enabled=". TRUE." />42 <field field_ref="dst_field_regular_pole_1" name="field_regular_pole_1" enabled=". TRUE." />43 </file> 44 <file id="output_dst_curvilinear" name="output_dst_curvilinear" enabled=". TRUE." >38 <file id="output_regular_pole" name="output_dst_regular" enabled=".FALSE." > 39 <field field_ref="tmp_field_0" name="field_regular_0" enabled=".FALSE."/> 40 <field field_ref="dst_field_regular" name="field_regular" enabled=".FALSE."/> 41 <field field_ref="dst_field_regular_pole_0" name="field_regular_pole_0" enabled=".FALSE." /> 42 <field field_ref="dst_field_regular_pole_1" name="field_regular_pole_1" enabled=".FALSE." /> 43 </file> 44 <file id="output_dst_curvilinear" name="output_dst_curvilinear" enabled=".FALSE." > 45 45 <field field_ref="tmp_field_1" operation="instant"/> 46 46 </file> 47 <file id="output_dst_unstructured" name="output_dst_unstructured" enabled=". TRUE." >47 <file id="output_dst_unstructured" name="output_dst_unstructured" enabled=".FALSE." > 48 48 <field field_ref="tmp_field_2" operation="instant"/> 49 49 </file> … … 52 52 <file_group id="write_files" > 53 53 <file id="output_2D" name="output_2D" enabled=".TRUE."> 54 <field field_ref="src_field_2D" name="field_src" enabled=". TRUE."/>55 <field field_ref="src_field_2D_clone" name="field_src_clone" default_value="100000" enabled=". TRUE."/>56 <field field_ref="src_field_2D" name="field_dst_regular_0" domain_ref="dst_domain_regular_pole" enabled=". TRUE."/>54 <field field_ref="src_field_2D" name="field_src" enabled=".FALSE."/> 55 <field field_ref="src_field_2D_clone" name="field_src_clone" default_value="100000" enabled=".FALSE."/> 56 <field field_ref="src_field_2D" name="field_dst_regular_0" domain_ref="dst_domain_regular_pole" enabled=".FALSE."/> 57 57 <field field_ref="dst_field_2D" name="field_dst_regular_1" enabled=".TRUE." /> 58 <field field_ref="dst_field_2D_regular_pole" name="field_dst_regular_2" enabled=". TRUE."/>59 <field field_ref="dst_field_2D_clone" name="field_dst_regular_3" detect_missing_value=".false." default_value="100000" enabled=". TRUE."/>60 <field field_ref="dst_field_2D_extract" name="field_dst_regular_4" enabled=". TRUE."/>58 <field field_ref="dst_field_2D_regular_pole" name="field_dst_regular_2" enabled=".FALSE."/> 59 <field field_ref="dst_field_2D_clone" name="field_dst_regular_3" detect_missing_value=".false." default_value="100000" enabled=".FALSE."/> 60 <field field_ref="dst_field_2D_extract" name="field_dst_regular_4" enabled=".FALSE."/> 61 61 </file> 62 <file id="output_3D" name="output_3D" enabled=". TRUE.">62 <file id="output_3D" name="output_3D" enabled=".FALSE."> 63 63 <field field_ref="src_field_3D" name="field_src" /> 64 64 <field field_ref="src_field_3D_pression" name="field" /> 65 65 <field field_ref="dst_field_3D_interp" name="field_dst_interp_domain" /> 66 <field field_ref="dst_field_3D_interp" name="field_dst_interp_domain_axis" domain_ref="dst_domain_regular_pole" />67 </file> 68 <file id="output_4D" name="output_4D" enabled=". TRUE.">66 <field field_ref="dst_field_3D_interp" name="field_dst_interp_domain_axis" domain_ref="dst_domain_regular_pole" enabled=".FALSE."/> 67 </file> 68 <file id="output_4D" name="output_4D" enabled=".FALSE."> 69 69 <field field_ref="src_field_4D" name="field_4D" /> 70 <field field_ref="dst_field_4D_extract" name="field_4D_extract" />70 <field field_ref="dst_field_4D_extract" name="field_4D_extract" enabled=".FALSE."/> 71 71 </file> 72 72 </file_group> 73 <file_group id="read_files" >73 <file_group id="read_files" enabled=".FALSE." > 74 74 <file id="output_src_regular" name="output_src_regular" mode="read" > 75 75 <field id="src_field_regular" name="field" grid_ref="src_grid_regular_read" operation="instant"/> -
XIOS/dev/branch_yushan_merged/inputs/Unstruct/iodef.xml
r1179 r1203 15 15 <!-- <field field_ref="field_A_expand" name="field"/> --> 16 16 <field field_ref="field_A_srf" name="field_A"/> 17 <field field_ref="field_A_srf" name="field_rect" grid_ref="grid_rect"/> 18 <field field_ref="field_A_srf" name="field_rect2" grid_ref="grid_rect2"/> 17 <field field_ref="field_A_srf" name="field_rect" grid_ref="grid_rect" enabled=".FALSE." /> 18 <field field_ref="field_A_srf" name="field_rect2" grid_ref="grid_rect2" enabled=".TRUE." /> 19 <field field_ref="field_A_srf" name="field_rect3" grid_ref="grid_rect3" enabled=".TRUE."/> 19 20 <!-- <field field_ref="field_A_srf" name="field_curv" grid_ref="grid_curv"/> --> 20 21 </file> … … 44 45 </domain> 45 46 46 <domain id="dst_domain_regular_pole2" ni_glo=" 90" nj_glo="90" type="rectilinear">47 <domain id="dst_domain_regular_pole2" ni_glo="80" nj_glo="80" type="rectilinear"> 47 48 <generate_rectilinear_domain id="domain_regular_pole2"/> 48 49 <interpolate_domain write_weight="false" order="1" renormalize="true"/> 49 50 </domain> 51 52 <domain id="dst_domain_regular_pole3" ni_glo="80" nj_glo="80" type="rectilinear"> 53 <generate_rectilinear_domain id="domain_regular_pole3"/> 54 <interpolate_domain write_weight="false" order="1" renormalize="true"/> 55 <zoom_domain ibegin="0" ni="65" jbegin="0" nj="65" /> 56 </domain> 57 50 58 51 59 <domain id="src_domain_curvilinear_read" type="curvilinear"> … … 75 83 <axis axis_ref="axis_srf" /> 76 84 </grid> 85 86 <grid id="grid_rect3"> 87 <domain domain_ref="dst_domain_regular_pole3" /> 88 <axis axis_ref="axis_srf" /> 89 </grid> 90 77 91 78 92 <grid id="grid_curv"> -
XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp
r1196 r1203 670 670 std::vector<ep_lib::MPI_Request>& requestSendIndex) 671 671 { 672 printf("should not call this function sendIndexToClients"); 672 673 ep_lib::MPI_Request request; 673 674 requestSendIndex.push_back(request); … … 688 689 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 689 690 { 691 printf("should not call this function recvIndexFromClients"); 690 692 ep_lib::MPI_Request request; 691 693 requestRecvIndex.push_back(request); … … 707 709 std::vector<ep_lib::MPI_Request>& requestSendInfo) 708 710 { 711 printf("should not call this function sendInfoToClients"); 709 712 ep_lib::MPI_Request request; 710 713 requestSendInfo.push_back(request); … … 726 729 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 727 730 { 731 printf("should not call this function recvInfoFromClients\n"); 728 732 ep_lib::MPI_Request request; 729 733 requestRecvInfo.push_back(request); -
XIOS/dev/branch_yushan_merged/src/filter/spatial_transform_filter.cpp
r1134 r1203 153 153 sendBuff[idxSendBuff] = new double[itSend->second.numElements()]; 154 154 } 155 156 const CGridTransformation::RecvIndexGridDestinationMap& localIndexToReceive = *itListRecv; 157 CGridTransformation::RecvIndexGridDestinationMap::const_iterator itbRecv = localIndexToReceive.begin(), itRecv, 158 iteRecv = localIndexToReceive.end(); 155 159 156 160 idxSendBuff = 0; 157 std::vector<ep_lib::MPI_Request> sendRecvRequest; 161 std::vector<ep_lib::MPI_Request> sendRecvRequest(localIndexToSend.size()+localIndexToReceive.size()); 162 int position = 0; 158 163 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 159 164 { … … 165 170 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 166 171 } 167 sendRecvRequest.push_back(ep_lib::MPI_Request());168 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back());172 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest[position]); 173 position++; 169 174 } 170 175 171 176 // Receiving data on destination fields 172 const CGridTransformation::RecvIndexGridDestinationMap& localIndexToReceive = *itListRecv; 173 CGridTransformation::RecvIndexGridDestinationMap::const_iterator itbRecv = localIndexToReceive.begin(), itRecv, 174 iteRecv = localIndexToReceive.end(); 177 175 178 int recvBuffSize = 0; 176 179 for (itRecv = itbRecv; itRecv != iteRecv; ++itRecv) recvBuffSize += itRecv->second.size(); //(recvBuffSize < itRecv->second.size()) … … 183 186 int srcRank = itRecv->first; 184 187 int countSize = itRecv->second.size(); 185 sendRecvRequest.push_back(ep_lib::MPI_Request()); 186 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 188 189 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest[position]); 190 position++; 187 191 currentBuff += countSize; 188 192 } -
XIOS/dev/branch_yushan_merged/src/node/field.cpp
r1164 r1203 715 715 if (context->hasClient) 716 716 { 717 printf("proc %d begein transformation\n", myRank); 717 718 solveTransformedGrid(); 719 printf("proc %d end transformation\n", myRank); 720 MPI_Barrier(context->client->intraComm); 718 721 } 719 722 -
XIOS/dev/branch_yushan_merged/src/test/test_remap_omp.f90
r1179 r1203 183 183 ierr=NF90_GET_VAR(ncid,varid, dst_boundslat, start=(/1,dst_ibegin+1/),count=(/dst_nvertex,dst_ni/)) 184 184 185 185 !$omp barrier 186 187 !$omp master 188 CALL MPI_barrier(comm, ierr) 189 !$omp end master 190 191 !$omp barrier 192 193 186 194 CALL xios_context_initialize("test",comm) 187 195 CALL xios_get_handle("test",ctx_hdl) … … 214 222 CALL xios_close_context_definition() 215 223 216 CALL xios_get_domain_attr("src_domain_regular_read", ni=src_tmp_ni, nj=src_tmp_nj)217 ALLOCATE(tmp_field_0(src_tmp_ni*src_tmp_nj))218 219 CALL xios_get_axis_attr("src_axis_curvilinear_read", n=src_tmp_n)220 CALL xios_get_domain_attr("src_domain_curvilinear_read", ni=src_tmp_ni, nj=src_tmp_nj)221 ALLOCATE(tmp_field_1(src_tmp_ni*src_tmp_nj*src_tmp_n))222 223 CALL xios_get_domain_attr("src_domain_unstructured_read", ni=src_tmp_ni, nj=src_tmp_nj)224 ALLOCATE(tmp_field_2(src_tmp_ni*src_tmp_nj))225 226 CALL xios_recv_field("src_field_regular", tmp_field_0)227 CALL xios_recv_field("src_field_curvilinear", tmp_field_1)228 CALL xios_recv_field("src_field_unstructured", tmp_field_2)224 ! CALL xios_get_domain_attr("src_domain_regular_read", ni=src_tmp_ni, nj=src_tmp_nj) 225 ! ALLOCATE(tmp_field_0(src_tmp_ni*src_tmp_nj)) 226 227 ! CALL xios_get_axis_attr("src_axis_curvilinear_read", n=src_tmp_n) 228 ! CALL xios_get_domain_attr("src_domain_curvilinear_read", ni=src_tmp_ni, nj=src_tmp_nj) 229 ! ALLOCATE(tmp_field_1(src_tmp_ni*src_tmp_nj*src_tmp_n)) 230 231 ! CALL xios_get_domain_attr("src_domain_unstructured_read", ni=src_tmp_ni, nj=src_tmp_nj) 232 ! ALLOCATE(tmp_field_2(src_tmp_ni*src_tmp_nj)) 233 234 ! CALL xios_recv_field("src_field_regular", tmp_field_0) 235 ! CALL xios_recv_field("src_field_curvilinear", tmp_field_1) 236 ! CALL xios_recv_field("src_field_unstructured", tmp_field_2) 229 237 230 238 DO ts=1,10 … … 232 240 CALL xios_send_field("src_field_2D",src_field_2D) 233 241 234 !DO i=1,src_ni235 !src_field_2D_clone(i) = src_field_2D(i)236 !IF ((23.5 * ts < src_lat(i)) .AND. (src_lat(i) < 65.5 *ts) .AND. (0 < src_lon(i)) .AND. (src_lon(i) < 30*ts)) THEN237 !src_field_2D_clone(i) = missing_value238 !ENDIF239 !ENDDO240 241 !CALL xios_send_field("src_field_2D_clone",src_field_2D_clone)242 !CALL xios_send_field("src_field_3D",src_field_3D)243 !CALL xios_send_field("src_field_3D_clone",src_field_3D)244 !CALL xios_send_field("src_field_4D",src_field_4D)245 !CALL xios_send_field("src_field_3D_pression",src_field_pression)246 CALL xios_send_field("tmp_field_0",tmp_field_0)247 CALL xios_send_field("tmp_field_1",tmp_field_1)248 CALL xios_send_field("tmp_field_2",tmp_field_2)242 DO i=1,src_ni 243 src_field_2D_clone(i) = src_field_2D(i) 244 IF ((23.5 * ts < src_lat(i)) .AND. (src_lat(i) < 65.5 *ts) .AND. (0 < src_lon(i)) .AND. (src_lon(i) < 30*ts)) THEN 245 src_field_2D_clone(i) = missing_value 246 ENDIF 247 ENDDO 248 249 CALL xios_send_field("src_field_2D_clone",src_field_2D_clone) 250 CALL xios_send_field("src_field_3D",src_field_3D) 251 CALL xios_send_field("src_field_3D_clone",src_field_3D) 252 CALL xios_send_field("src_field_4D",src_field_4D) 253 CALL xios_send_field("src_field_3D_pression",src_field_pression) 254 ! CALL xios_send_field("tmp_field_0",tmp_field_0) 255 ! CALL xios_send_field("tmp_field_1",tmp_field_1) 256 ! CALL xios_send_field("tmp_field_2",tmp_field_2) 249 257 CALL wait_us(5000) ; 250 258 ENDDO … … 254 262 DEALLOCATE(src_lon, src_lat, src_boundslon,src_boundslat, src_field_2D) 255 263 DEALLOCATE(dst_lon, dst_lat, dst_boundslon,dst_boundslat) 256 DEALLOCATE(tmp_field_0, tmp_field_1, tmp_field_2)264 !DEALLOCATE(tmp_field_0, tmp_field_1, tmp_field_2) 257 265 258 266 CALL xios_finalize() -
XIOS/dev/branch_yushan_merged/src/test/test_unstruct_omp.f90
r1177 r1203 267 267 CALL xios_finalize() 268 268 269 print *, "Client : xios_finalize "269 print *, mpi_rank, "Client : xios_finalize " 270 270 271 271 !$omp barrier -
XIOS/dev/branch_yushan_merged/src/transformation/grid_transformation.cpp
r1196 r1203 473 473 474 474 // Sending global index of grid source to corresponding process as well as the corresponding mask 475 std::vector<ep_lib::MPI_Request> requests; 476 requests.reserve(2*recvRankSizeMap.size()+2*globaIndexWeightFromSrcToDst.size()); 475 std::vector<ep_lib::MPI_Request> requests(2*recvRankSizeMap.size()+2*globaIndexWeightFromSrcToDst.size()); 477 476 std::vector<ep_lib::MPI_Status> status; 478 477 boost::unordered_map<int, unsigned char* > recvMaskDst; 479 478 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 479 int position = 0; 480 480 for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 481 481 { … … 485 485 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 486 486 487 requests.push_back(ep_lib::MPI_Request()); 488 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 489 requests.push_back(ep_lib::MPI_Request()); 490 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 487 488 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[position]); 489 position++; 490 491 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[position]); 492 position++; 491 493 } 492 494 … … 523 525 524 526 // Send global index source and mask 525 requests.push_back(ep_lib::MPI_Request()); 526 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 527 requests.push_back(ep_lib::MPI_Request()); 528 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 527 528 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[position]); 529 position++; 530 531 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[position]); 532 position++; 529 533 } 530 534 … … 533 537 534 538 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 535 std::vector<ep_lib::MPI_Request>().swap(requests); 536 std::vector<ep_lib::MPI_Status>().swap(status); 537 requests.reserve(sendRankSizeMap.size()+recvRankSizeMap.size()); 539 //std::vector<ep_lib::MPI_Request>().swap(requests); 540 //std::vector<ep_lib::MPI_Status>().swap(status); 541 requests.resize(sendRankSizeMap.size()+recvRankSizeMap.size()); 542 position = 0; 538 543 // Okie, on destination side, we will wait for information of masked index of source 539 544 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 542 547 int recvSize = itSend->second; 543 548 544 requests.push_back(ep_lib::MPI_Request());545 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back());549 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[position]); 550 position++; 546 551 } 547 552 … … 579 584 580 585 // Okie, now inform the destination which source index are masked 581 requests.push_back(ep_lib::MPI_Request());582 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back());586 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[position]); 587 position++; 583 588 } 584 589 status.resize(requests.size());
Note: See TracChangeset
for help on using the changeset viewer.