Changeset 1176
- Timestamp:
- 06/21/17 09:09:59 (7 years ago)
- Location:
- XIOS/dev/branch_yushan_merged
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/bld.cfg
r1141 r1176 38 38 #bld::target test_expand_domain.exe 39 39 #bld::target test_new_features.exe test_unstruct_complete.exe 40 bld::target test_omp.exe test_complete_omp.exe test_remap_omp.exe 40 bld::target test_omp.exe test_complete_omp.exe test_remap_omp.exe test_unstruct_omp.exe 41 41 #bld::target test_client.exe test_complete.exe #test_xios2_cmip6.exe 42 42 #bld::target test_connectivity_expand.exe -
XIOS/dev/branch_yushan_merged/extern/remap/src/libmapper.cpp
r1155 r1176 16 16 #include "gridRemap.hpp" 17 17 18 #include <stdio.h> 19 18 20 using namespace sphereRemap ; 19 21 20 extern CRemapGrid srcGrid;21 #pragma omp threadprivate(srcGrid)22 //extern CRemapGrid srcGrid; 23 //#pragma omp threadprivate(srcGrid) 22 24 23 extern CRemapGrid tgtGrid;24 #pragma omp threadprivate(tgtGrid)25 //extern CRemapGrid tgtGrid; 26 //#pragma omp threadprivate(tgtGrid) 25 27 26 28 … … 40 42 int order, int* n_weights) 41 43 { 42 assert(src_bounds_lon); 43 assert(src_bounds_lat); 44 assert(n_vert_per_cell_src >= 3); 45 assert(n_cell_src >= 4); 46 assert(dst_bounds_lon); 47 assert(dst_bounds_lat); 48 assert(n_vert_per_cell_dst >= 3); 49 assert(n_cell_dst >= 4); 50 assert(1 <= order && order <= 2); 44 printf("libmapper callded : remap_get_num_weights\n"); 45 assert(src_bounds_lon); 46 assert(src_bounds_lat); 47 assert(n_vert_per_cell_src >= 3); 48 assert(n_cell_src >= 4); 49 assert(dst_bounds_lon); 50 assert(dst_bounds_lat); 51 assert(n_vert_per_cell_dst >= 3); 52 assert(n_cell_dst >= 4); 53 assert(1 <= order && order <= 2); 51 54 52 55 mapper = new Mapper(MPI_COMM_WORLD); … … 87 90 double tic = cputime(); 88 91 mapper = new Mapper(MPI_COMM_WORLD); 89 mapper->setVerbosity(PROGRESS) ;92 mapper->setVerbosity(PROGRESS) ; 90 93 mapper->buildSSTree(src_msh, dst_msh); 91 94 double tac = cputime(); … … 122 125 double* centre_lon, double* centre_lat, double* areas) 123 126 { 127 printf("libmapper callded : remap_get_barycentres_and_areas\n"); 124 128 for (int i = 0; i < n_cell; i++) 125 129 { … … 145 149 extern "C" void remap_get_weights(double* weights, int* src_indices, int* dst_indices) 146 150 { 151 printf("libmapper callded : remap_get_weights\n"); 147 152 memcpy(weights, mapper->remapMatrix, mapper->nWeights*sizeof(double)); 148 153 memcpy(src_indices, mapper->srcAddress, mapper->nWeights*sizeof(int)); -
XIOS/dev/branch_yushan_merged/extern/remap/src/polyg.cpp
r950 r1176 3 3 #include <cassert> 4 4 #include <iostream> 5 #include <stdio.h> 5 6 #include "elt.hpp" 6 7 #include "errhandle.hpp" … … 161 162 { 162 163 if (N < 3) 163 return 0; /* polygons with less th en three vertices have zero area */164 return 0; /* polygons with less than three vertices have zero area */ 164 165 Coord t[3]; 165 166 t[0] = barycentre(x, N); … … 174 175 t[2] = x[ii]; 175 176 double sc=scalarprod(crossprod(t[1] - t[0], t[2] - t[0]), t[0]) ; 176 assert(sc >= -1e-10); // Error: tri a l'env (wrong orientation) 177 //assert(sc >= -1e-10); // Error: tri a l'env (wrong orientation) 178 if(sc < -1e-10) 179 { 180 printf("N=%d, sc = %f, t[0]=(%f,%f,%f), t[1]=(%f,%f,%f), t[2]=(%f,%f,%f)\n", N, sc, 181 t[0].x, t[0].y, t[0].z, 182 t[1].x, t[1].y, t[1].z, 183 t[2].x, t[2].y, t[2].z); 184 assert(sc >= -1e-10); 185 } 177 186 double area_gc = triarea(t[0], t[1], t[2]); 178 187 double area_sc_gc_moon = 0; -
XIOS/dev/branch_yushan_merged/extern/remap/src/triple.cpp
r1016 r1176 3 3 namespace sphereRemap { 4 4 5 externconst Coord ORIGIN(0.0, 0.0, 0.0);5 const Coord ORIGIN(0.0, 0.0, 0.0); 6 6 7 7 std::ostream& operator<<(std::ostream& os, const Coord& c) { -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_message.cpp
r1134 r1176 50 50 } 51 51 #elif _intelmpi 52 ::MPI_Improbe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &flag, &message, &status); 52 //#pragma omp critical (_mpi_call) 53 //::MPI_Improbe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &flag, &message, &status); 54 #pragma omp critical (_mpi_call) 55 { 56 ::MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &flag, &status); 57 if(flag) 58 { 59 Debug("find message in mpi comm \n"); 60 mpi_source = status.MPI_SOURCE; 61 int tag = status.MPI_TAG; 62 ::MPI_Mprobe(mpi_source, tag, mpi_comm, &message, &status); 63 64 } 65 } 53 66 #endif 54 67 55 68 if(flag) 56 69 { … … 128 141 } 129 142 #elif _intelmpi 130 ::MPI_Improbe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &flag, &message, &status); 143 #pragma omp critical (_mpi_call) 144 { 145 ::MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &flag, &status); 146 if(flag) 147 { 148 Debug("find message in mpi comm \n"); 149 mpi_source = status.MPI_SOURCE; 150 int tag = status.MPI_TAG; 151 ::MPI_Mprobe(mpi_source, tag, mpi_comm, &message, &status); 152 153 } 154 } 155 //::MPI_Improbe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &flag, &message, &status); 131 156 #endif 132 157 … … 183 208 } 184 209 #elif _intelmpi 185 ::MPI_Improbe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &flag, &message, &status); 210 #pragma omp critical (_mpi_call) 211 { 212 ::MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &flag, &status); 213 if(flag) 214 { 215 Debug("find message in mpi comm \n"); 216 mpi_source = status.MPI_SOURCE; 217 int tag = status.MPI_TAG; 218 ::MPI_Mprobe(mpi_source, tag, mpi_comm, &message, &status); 219 220 } 221 } 222 //::MPI_Improbe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &flag, &message, &status); 186 223 #endif 187 224 -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_wait.cpp
r1164 r1176 22 22 if(request->type == 1) 23 23 { 24 ::MPI_Request *mpi_request = static_cast< ::MPI_Request* >(&(request->mpi_request));24 ::MPI_Request mpi_request = static_cast< ::MPI_Request >(request->mpi_request); 25 25 ::MPI_Status mpi_status; 26 26 ::MPI_Errhandler_set(MPI_COMM_WORLD_STD, MPI_ERRORS_RETURN); 27 int error_code = ::MPI_Wait( mpi_request, &mpi_status);27 int error_code = ::MPI_Wait(&mpi_request, &mpi_status); 28 28 if (error_code != MPI_SUCCESS) { 29 29 … … 68 68 if(request->type == 3) 69 69 { 70 ::MPI_Request *mpi_request = static_cast< ::MPI_Request* >(&(request->mpi_request));70 ::MPI_Request mpi_request = static_cast< ::MPI_Request >(request->mpi_request); 71 71 ::MPI_Status mpi_status; 72 72 ::MPI_Errhandler_set(MPI_COMM_WORLD_STD, MPI_ERRORS_RETURN); 73 int error_code = ::MPI_Wait( mpi_request, &mpi_status);73 int error_code = ::MPI_Wait(&mpi_request, &mpi_status); 74 74 if (error_code != MPI_SUCCESS) { 75 75 … … 122 122 if(array_of_requests[i].type != 2) // isend or imrecv 123 123 { 124 MPI_Wait(&array_of_requests[i], &array_of_statuses[i]); 125 //int tested=false; 126 //while(!tested) MPI_Test(&array_of_requests[i], &tested, &array_of_statuses[i]); 124 //MPI_Wait(&array_of_requests[i], &array_of_statuses[i]); 125 int tested; 126 MPI_Test(&array_of_requests[i], &tested, &array_of_statuses[i]); 127 if(!tested) MPI_Wait(&array_of_requests[i], &array_of_statuses[i]); 127 128 finished++; 128 129 finished_index[i] = true; -
XIOS/dev/branch_yushan_merged/inputs/REMAP/iodef.xml
r1172 r1176 35 35 <file_definition type="one_file" par_access="collective" output_freq="1ts" output_level="10" enabled=".TRUE."> 36 36 37 <file_group id="read_then_write_files" enabled=". TRUE.">37 <file_group id="read_then_write_files" enabled=".FALSE."> 38 38 <file id="output_regular_pole" name="output_dst_regular" > 39 <field field_ref="tmp_field_0" name="field_regular_0" enabled=". TRUE."/>40 <field field_ref="dst_field_regular" name="field_regular" enabled=". TRUE."/>39 <field field_ref="tmp_field_0" name="field_regular_0" enabled=".FALSE."/> 40 <field field_ref="dst_field_regular" name="field_regular" enabled=".FALSE."/> 41 41 <field field_ref="dst_field_regular_pole_0" name="field_regular_pole_0" enabled=".FALSE." /> 42 42 <field field_ref="dst_field_regular_pole_1" name="field_regular_pole_1" enabled=".FALSE." /> 43 43 </file> 44 <file id="output_dst_curvilinear" name="output_dst_curvilinear" enabled=". TRUE." >44 <file id="output_dst_curvilinear" name="output_dst_curvilinear" enabled=".FALSE." > 45 45 <field field_ref="tmp_field_1" operation="instant"/> 46 46 </file> 47 <file id="output_dst_unstructured" name="output_dst_unstructured" enabled=". TRUE." >47 <file id="output_dst_unstructured" name="output_dst_unstructured" enabled=".FALSE." > 48 48 <field field_ref="tmp_field_2" operation="instant"/> 49 49 </file> … … 52 52 <file_group id="write_files" > 53 53 <file id="output_2D" name="output_2D" enabled=".TRUE."> 54 <field field_ref="src_field_2D" name="field_src" />55 <field field_ref="src_field_2D_clone" name="field_src_clone" default_value="100000" />56 <field field_ref="src_field_2D" name="field_dst_regular_0" domain_ref="dst_domain_regular_pole" />57 <field field_ref="dst_field_2D" name="field_dst_regular_1" />58 <field field_ref="dst_field_2D_regular_pole" name="field_dst_regular_2" />59 <field field_ref="dst_field_2D_clone" name="field_dst_regular_3" detect_missing_value=".false." default_value="100000" />60 <field field_ref="dst_field_2D_extract" name="field_dst_regular_4" />54 <field field_ref="src_field_2D" name="field_src" enabled=".FALSE."/> 55 <field field_ref="src_field_2D_clone" name="field_src_clone" default_value="100000" enabled=".FALSE."/> 56 <field field_ref="src_field_2D" name="field_dst_regular_0" domain_ref="dst_domain_regular_pole" enabled=".TRUE."/> 57 <field field_ref="dst_field_2D" name="field_dst_regular_1" enabled=".FALSE." /> 58 <field field_ref="dst_field_2D_regular_pole" name="field_dst_regular_2" enabled=".FALSE."/> 59 <field field_ref="dst_field_2D_clone" name="field_dst_regular_3" detect_missing_value=".false." default_value="100000" enabled=".FALSE."/> 60 <field field_ref="dst_field_2D_extract" name="field_dst_regular_4" enabled=".FALSE."/> 61 61 </file> 62 <file id="output_3D" name="output_3D" enabled=". TRUE.">62 <file id="output_3D" name="output_3D" enabled=".FALSE."> 63 63 <field field_ref="src_field_3D" name="field_src" /> 64 64 <field field_ref="src_field_3D_pression" name="field" /> … … 66 66 <field field_ref="dst_field_3D_interp" name="field_dst_interp_domain_axis" domain_ref="dst_domain_regular_pole"/> 67 67 </file> 68 <file id="output_4D" name="output_4D" enabled=". TRUE.">69 <field field_ref="src_field_4D" name="field_4D" />68 <file id="output_4D" name="output_4D" enabled=".FALSE."> 69 <field field_ref="src_field_4D" name="field_4D" enabled=".FALSE."/> 70 70 <field field_ref="dst_field_4D_extract" name="field_4D_extract" /> 71 71 </file> -
XIOS/dev/branch_yushan_merged/inputs/Unstruct/iodef.xml
r944 r1176 7 7 <field_definition level="1" enabled=".TRUE." default_value="1000"> 8 8 <field id="field_A_srf" operation="average" freq_op="3600s" grid_ref="grid_A"/> 9 <field id="field_B_srf" operation="average" freq_op="3600s" grid_ref="grid_A"/> 9 10 <field id="field_A_expand" operation="average" grid_ref="grid_dst" field_ref="field_A_srf"/> 10 11 </field_definition> … … 12 13 <file_definition type="one_file" par_access="collective" output_freq="1h" output_level="10" enabled=".TRUE." > 13 14 <file id="output" name="output"> 14 <field field_ref="field_A_expand" name="field"/> 15 <!-- <field field_ref="field_A_expand" name="field"/> --> 16 <field field_ref="field_A_srf" name="field_A"/> 17 <field field_ref="field_A_srf" name="field_rect" grid_ref="grid_rect"/> 18 <!-- <field field_ref="field_A_srf" name="field_curv" grid_ref="grid_curv"/> --> 15 19 </file> 20 21 <!-- <file id="output_src_curvilinear" name="output_src_curvilinear" mode="read" > 22 <field id="src_field_curvilinear" name="field_A" grid_ref="src_grid_curvilinear_read" operation="instant"/> 23 </file> --> 24 16 25 </file_definition> 17 26 18 27 <axis_definition> 19 28 <axis id="axis_srf" positive="up"/> 29 <axis id="src_axis_curvilinear_read" positive="down" /> 20 30 </axis_definition> 21 31 22 32 <domain_definition> 23 <domain id="domain_srf" /> 33 <domain id="domain_srf" /> 34 24 35 <domain id="domain_dst" domain_ref="domain_srf" > 25 36 <expand_domain/> 26 37 <compute_connectivity_domain id="compute" type="node"/> 27 38 </domain> 39 40 <domain id="dst_domain_regular_pole" ni_glo="90" nj_glo="45" type="rectilinear"> 41 <generate_rectilinear_domain id="domain_regular_pole"/> 42 <interpolate_domain write_weight="false" order="1" renormalize="true"/> 43 </domain> 44 45 <domain id="src_domain_curvilinear_read" type="curvilinear"> 46 <generate_rectilinear_domain /> 47 </domain> 48 28 49 </domain_definition> 29 50 … … 33 54 <axis axis_ref="axis_srf" /> 34 55 </grid> 56 35 57 <grid id="grid_dst"> 36 58 <domain domain_ref="domain_dst" /> 37 59 <axis axis_ref="axis_srf" /> 38 60 </grid> 61 62 <grid id="grid_rect"> 63 <domain domain_ref="dst_domain_regular_pole" /> 64 <axis axis_ref="axis_srf" /> 65 </grid> 66 67 <grid id="grid_curv"> 68 <domain id="src_domain_curvilinear_read" /> 69 <axis axis_ref="axis_srf" /> 70 </grid> 71 72 <grid id="src_grid_curvilinear_read"> 73 <domain id="src_domain_curvilinear_read"/> 74 <axis axis_ref="src_axis_curvilinear_read" /> 75 </grid> 76 39 77 </grid_definition> 40 78 </context> -
XIOS/dev/branch_yushan_merged/src/buffer_client.cpp
r1134 r1176 88 88 { 89 89 MPI_Issend(buffer[current], count, MPI_CHAR, serverRank, 20, interComm, &request); 90 #pragma omp critical (_output) 90 91 pending = true; 91 92 if (current == 1) current = 0; -
XIOS/dev/branch_yushan_merged/src/client.cpp
r1164 r1176 107 107 MPI_Comm_rank(intraComm,&intraCommRank) ; 108 108 109 #pragma omp critical(_output)109 /*#pragma omp critical(_output) 110 110 { 111 111 info(10)<<"intercommCreate::client "<<test_omp_rank<< " "<< &test_omp_rank <<" intraCommSize : "<<intraCommSize 112 112 <<" intraCommRank :"<<intraCommRank<<" serverLeader "<< serverLeader 113 113 <<" globalComm : "<< &(CXios::globalComm) << endl ; 114 } 114 }*/ 115 115 116 116 -
XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp
r1172 r1176 104 104 int clientRank; 105 105 MPI_Comm_rank(commLevel,&clientRank); 106 ep_lib::MPI_Barrier(commLevel); 106 107 int groupRankBegin = this->getGroupBegin()[level]; 107 108 int nbClient = this->getNbInGroup()[level]; … … 180 181 int currentIndex = 0; 181 182 int nbRecvClient = recvRankClient.size(); 182 for (int idx = 0; idx < nbRecvClient; ++idx) 183 { 184 if (0 != recvNbIndexClientCount[idx]) 185 { 186 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 187 } 188 currentIndex += recvNbIndexClientCount[idx]; 189 } 190 183 191 184 boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 192 185 iteIndex = client2ClientIndex.end(); … … 194 187 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 195 188 189 190 191 for (int idx = 0; idx < nbRecvClient; ++idx) 192 { 193 if (0 != recvNbIndexClientCount[idx]) 194 { 195 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 196 } 197 currentIndex += recvNbIndexClientCount[idx]; 198 } 199 200 196 201 std::vector<ep_lib::MPI_Status> status(request.size()); 197 202 MPI_Waitall(request.size(), &request[0], &status[0]); 203 198 204 199 205 CArray<size_t,1>* tmpGlobalIndex; … … 208 214 --level; 209 215 computeIndexInfoMappingLevel(*tmpGlobalIndex, this->internalComm_, level); 216 210 217 } 211 218 else // Now, we are in the last level where necessary mappings are. … … 372 379 MPI_Comm_rank(commLevel,&clientRank); 373 380 computeSendRecvRank(level, clientRank); 381 ep_lib::MPI_Barrier(commLevel); 374 382 375 383 int groupRankBegin = this->getGroupBegin()[level]; … … 666 674 667 675 int nRequest = 0; 676 677 678 for (int idx = 0; idx < sendNbRank.size(); ++idx) 679 { 680 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 681 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 682 ++nRequest; 683 } 684 668 685 for (int idx = 0; idx < recvNbRank.size(); ++idx) 669 686 { 670 687 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 671 688 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 672 ++nRequest;673 }674 675 for (int idx = 0; idx < sendNbRank.size(); ++idx)676 {677 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT,678 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]);679 689 ++nRequest; 680 690 } … … 714 724 715 725 int nRequest = 0; 716 for (int idx = 0; idx < recvBuffSize; ++idx) 717 { 718 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 719 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 720 ++nRequest; 721 } 726 722 727 723 728 for (int idx = 0; idx < sendBuffSize; ++idx) … … 734 739 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 735 740 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 741 ++nRequest; 742 } 743 744 for (int idx = 0; idx < recvBuffSize; ++idx) 745 { 746 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 747 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 736 748 ++nRequest; 737 749 } -
XIOS/dev/branch_yushan_merged/src/data_output.cpp
r1096 r1176 4 4 #include "group_template.hpp" 5 5 #include "context.hpp" 6 6 //mpi.hpp 7 7 namespace xios 8 8 { 9 /// ////////////////////// D éfinitions ////////////////////// ///9 /// ////////////////////// Dfinitions ////////////////////// /// 10 10 11 11 CDataOutput::~CDataOutput(void) -
XIOS/dev/branch_yushan_merged/src/io/nc4_data_input.cpp
r1172 r1176 57 57 #ifdef _usingEP 58 58 SuperClass::type = ONE_FILE; 59 //printf("SuperClass::type = %d\n", SuperClass::type);59 printf("SuperClass::type = %d\n", SuperClass::type); 60 60 #endif 61 61 switch (SuperClass::type) -
XIOS/dev/branch_yushan_merged/src/io/nc4_data_output.cpp
r1172 r1176 1102 1102 1103 1103 SuperClassWriter::definition_end(); 1104 printf("SuperClass::type = %d \n", SuperClass::type);1104 printf("SuperClass::type = %d, typePrec = %d\n", SuperClass::type, typePrec); 1105 1105 switch (SuperClass::type) 1106 1106 { -
XIOS/dev/branch_yushan_merged/src/test/test_complete_omp.f90
r1134 r1176 84 84 jbegin=jbegin+nj 85 85 ENDDO 86 87 if((ni.LE.0) .OR. (nj.LE.0)) call MPI_Abort() 86 88 87 89 iend=ibegin+ni-1 ; jend=jbegin+nj-1 -
XIOS/dev/branch_yushan_merged/src/test/test_remap_omp.f90
r1153 r1176 52 52 if(rank < size-2) then 53 53 54 !$omp parallel default( firstprivate) firstprivate(dtime)54 !$omp parallel default(private) firstprivate(dtime) 55 55 56 56 !!! XIOS Initialization (get the local communicator) … … 69 69 70 70 ierr=NF90_INQ_VARID(ncid,"bounds_lon",varid) 71 ierr=NF90_INQUIRE_VARIABLE(ncid, varid, dimids=dimids)71 ierr=NF90_INQUIRE_VARIABLE(ncid, varid, dimids=dimids) 72 72 ierr=NF90_INQUIRE_DIMENSION(ncid, dimids(1), len=src_nvertex) 73 73 ierr=NF90_INQUIRE_DIMENSION(ncid, dimids(2), len=src_ni_glo) … … 82 82 src_ibegin= remain * (div+1) + (rank-remain) * div ; 83 83 ENDIF 84 85 if(src_ni .LE. 0) CALL MPI_ABORT() 86 84 87 85 88 ALLOCATE(src_lon(src_ni), src_lon_tmp(src_ni)) … … 95 98 ALLOCATE(lval1(interpolatedLlm)) 96 99 ALLOCATE(lval2(llm2)) 100 lval2 = 0 101 lval=0 102 lval1=0 97 103 98 104 ierr=NF90_INQ_VARID(ncid,"lon",varid) … … 161 167 ENDIF 162 168 169 if(dst_ni .LE. 0) CALL MPI_ABORT() 170 163 171 ALLOCATE(dst_lon(dst_ni)) 164 172 ALLOCATE(dst_lat(dst_ni))
Note: See TracChangeset
for help on using the changeset viewer.