- Timestamp:
- 06/02/17 17:53:25 (7 years ago)
- Location:
- XIOS/dev/branch_yushan_merged
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/extern/remap/src/intersect.cpp
r1149 r1153 57 57 { 58 58 // share a full edge ? be carefull at the orientation 59 /*60 if(squaredist(a.vertex[i], b.vertex[j]) > 1e-10*1e-10 ||61 squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]) > 1e-10*1e-10 )62 {63 printf("A : squaredist(a.vertex[%d], b.vertex[%d]) = %.10e %d\n",64 i, j, squaredist(a.vertex[i], b.vertex[j]),65 squaredist(a.vertex[i], b.vertex[j]) > 1e-10*1e-10 ? true : false);66 printf("B : squaredist(a.vertex[%d], b.vertex[%d]) = %.10e %d\n",67 (i+1)%a.n, (j+1)%b.n, squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]),68 squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]) > 1e-10*1e-10 ? true : false);59 60 //if(squaredist(a.vertex[i], b.vertex[j]) > 1e-10*1e-10 || 61 // squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]) > 1e-10*1e-10 ) 62 //{ 63 //printf("A : squaredist(a.vertex[%d], b.vertex[%d]) = %.10e %d\n", 64 // i, j, squaredist(a.vertex[i], b.vertex[j]), 65 // squaredist(a.vertex[i], b.vertex[j]) > 1e-10*1e-10 ? true : false); 66 //printf("B : squaredist(a.vertex[%d], b.vertex[%d]) = %.10e %d\n", 67 // (i+1)%a.n, (j+1)%b.n, squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]), 68 // squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]) > 1e-10*1e-10 ? true : false); 69 69 70 70 assert(squaredist(a.vertex[ i ], b.vertex[ j ]) > 1e-10*1e-10 || 71 71 squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]) > 1e-10*1e-10); 72 exit(0); 73 }*/ 72 73 //} 74 74 75 if ( squaredist(a.vertex[ i ], b.vertex[ j ]) < 1e-10*1e-10 && 75 76 squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+b.n-1)%b.n]) < 1e-10*1e-10) -
XIOS/dev/branch_yushan_merged/extern/remap/src/mapper.cpp
r1149 r1153 303 303 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 304 304 nbSendRequest++; 305 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);305 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 1, communicator, &sendRequest[nbSendRequest]); 306 306 nbSendRequest++; 307 307 if (order == 2) 308 308 { 309 309 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), 310 MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);310 MPI_DOUBLE, rank, 2, communicator, &sendRequest[nbSendRequest]); 311 311 nbSendRequest++; 312 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);312 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &sendRequest[nbSendRequest]); 313 313 //ym --> attention taille GloId 314 314 nbSendRequest++; … … 316 316 else 317 317 { 318 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);318 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 4, communicator, &sendRequest[nbSendRequest]); 319 319 //ym --> attention taille GloId 320 320 nbSendRequest++; … … 325 325 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 326 326 nbRecvRequest++; 327 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);327 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 1, communicator, &recvRequest[nbRecvRequest]); 328 328 nbRecvRequest++; 329 329 if (order == 2) 330 330 { 331 331 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 332 MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);332 MPI_DOUBLE, rank, 2, communicator, &recvRequest[nbRecvRequest]); 333 333 nbRecvRequest++; 334 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);334 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &recvRequest[nbRecvRequest]); 335 335 //ym --> attention taille GloId 336 336 nbRecvRequest++; … … 338 338 else 339 339 { 340 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);340 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 4, communicator, &recvRequest[nbRecvRequest]); 341 341 //ym --> attention taille GloId 342 342 nbRecvRequest++; -
XIOS/dev/branch_yushan_merged/extern/remap/src/node.hpp
r923 r1153 15 15 struct Circle 16 16 { 17 18 17 Coord centre; 18 double radius; 19 19 }; 20 20 … … 116 116 struct Node 117 117 { 118 119 120 121 122 123 124 125 126 127 128 129 118 int level; /* FIXME leafs are 0 and root is max level? */ 119 int leafCount; /* number of leafs that are descendants of this node (the elements in it's cycle) */ 120 Coord centre; 121 double radius; 122 NodePtr parent, ref; 123 std::vector<NodePtr> child; 124 std::list<NodePtr> intersectors; 125 bool reinserted; 126 int updateCount; // double var; 127 CBasicTree* tree; 128 void *data; 129 int route; 130 130 bool toDelete ; 131 131 132 133 134 132 Node() : level(0), leafCount(1), centre(ORIGIN), radius(0), reinserted(false), updateCount(0), toDelete(false) {} 133 Node(const Coord& centre, double radius, void *data) 134 : level(0), leafCount(1), centre(centre), radius(radius), reinserted(false), updateCount(0), data(data), toDelete(false) {} 135 135 136 136 //#ifdef DEBUG … … 178 178 //#endif 179 179 180 181 182 183 180 void move(const NodePtr node); 181 void remove(const NodePtr node); 182 void inflate(const NodePtr node); 183 void update(); 184 184 void output(std::ostream& flux, int level, int color) ; 185 186 187 188 189 190 191 192 193 185 NodePtr closest(std::vector<NodePtr>& list, int n = CLOSEST); 186 NodePtr farthest(std::vector<NodePtr>& list); 187 void findClosest(int level, NodePtr src, double& minDist, NodePtr &closest); 188 189 void search(NodePtr node); 190 bool centreInside(Node &node); 191 bool intersects(NodePtr node); 192 bool isInside(Node &node); 193 int incluCheck(); 194 194 void checkParent(void) ; 195 196 197 198 199 200 201 195 void printChildren(); 196 void assignRoute(std::vector<int>::iterator& rank, int level); 197 void assignCircleAndPropagateUp(Coord *centres, double *radia, int level); 198 void printLevel(int level); 199 void routeNode(NodePtr node, int level); 200 void routingIntersecting(std::vector<Node>* routingList, NodePtr node); 201 void routeIntersection(std::vector<int>& routes, NodePtr node); 202 202 void getNodeLevel(int level,std::list<NodePtr>& NodeList) ; 203 203 bool removeDeletedNodes(int assignLevel) ; -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_wait.cpp
r1149 r1153 58 58 if(request->type == 3) 59 59 { 60 ::MPI_Request mpi_request = static_cast< ::MPI_Request >(request->mpi_request);60 ::MPI_Request *mpi_request = static_cast< ::MPI_Request* >(&(request->mpi_request)); 61 61 ::MPI_Status mpi_status; 62 ::MPI_Wait(&mpi_request, &mpi_status); 62 ::MPI_Errhandler_set(MPI_COMM_WORLD_STD, MPI_ERRORS_RETURN); 63 int error_code = ::MPI_Wait(mpi_request, &mpi_status); 64 if (error_code != MPI_SUCCESS) { 65 66 char error_string[BUFSIZ]; 67 int length_of_error_string, error_class; 68 69 ::MPI_Error_class(error_code, &error_class); 70 ::MPI_Error_string(error_class, error_string, &length_of_error_string); 71 printf("%s\n", error_string); 72 } 73 63 74 64 75 status->mpi_status = new ::MPI_Status(mpi_status); … … 81 92 int MPI_Waitall(int count, MPI_Request *array_of_requests, MPI_Status *array_of_statuses) 82 93 { 83 int dest_rank; 84 MPI_Comm_rank(MPI_COMM_WORLD, &dest_rank); 94 //int dest_rank; 95 //MPI_Comm_rank(MPI_COMM_WORLD, &dest_rank); 96 //printf("proc %d enters waitall\n", dest_rank); 85 97 86 98 int finished = 0; … … 100 112 if(array_of_requests[i].type != 2) // isend or imrecv 101 113 { 102 //MPI_Wait(&array_of_requests[i], &array_of_statuses[i]);103 int tested=false;104 while(!tested) MPI_Test(&array_of_requests[i], &tested, &array_of_statuses[i]);114 MPI_Wait(&array_of_requests[i], &array_of_statuses[i]); 115 //int tested=false; 116 //while(!tested) MPI_Test(&array_of_requests[i], &tested, &array_of_statuses[i]); 105 117 finished++; 106 118 finished_index[i] = true; … … 127 139 } 128 140 } 141 //printf("proc %d exits waitall\n", dest_rank); 129 142 return MPI_SUCCESS; 130 143 } /* end of mpi_waitall*/ -
XIOS/dev/branch_yushan_merged/src/io/netCdfInterface.cpp
r1149 r1153 75 75 int CNetCdfInterface::open(const StdString& fileName, int oMode, int& ncId) 76 76 { 77 int status = nc_open(fileName.c_str(), oMode, &ncId); 77 int status = NC_NOERR; 78 #pragma omp critical (_netcdf) 79 status = nc_open(fileName.c_str(), oMode, &ncId); 80 78 81 if (NC_NOERR != status) 79 82 { … … 105 108 int status; 106 109 #pragma omp critical (_netcdf) 107 status = xios::nc_open_par(fileName.c_str(), oMode, comm, info, &ncId); 110 status = xios::nc_open_par(fileName.c_str(), oMode, comm, info, &ncId); // nc_open 108 111 if (NC_NOERR != status) 109 112 { … … 129 132 { 130 133 int status = NC_NOERR; 131 //#pragma omp critical (_netcdf)132 #pragma omp master134 #pragma omp critical (_netcdf) 135 //#pragma omp master 133 136 { 134 137 status = nc_close(ncId); -
XIOS/dev/branch_yushan_merged/src/node/field.cpp
r1149 r1153 713 713 if (context->hasClient) 714 714 { 715 MPI_Barrier(context->client->intraComm);716 printf("Proc %d enters function\n", myRank);715 //MPI_Barrier(context->client->intraComm); 716 //printf("Proc %d enters function\n", myRank); 717 717 solveTransformedGrid(); 718 MPI_Barrier(context->client->intraComm);719 printf("Proc %d exits function\n", myRank);718 //MPI_Barrier(context->client->intraComm); 719 //printf("Proc %d exits function\n", myRank); 720 720 } 721 721 -
XIOS/dev/branch_yushan_merged/src/node/file.cpp
r1149 r1153 578 578 bool isCollective = par_access.isEmpty() || par_access == par_access_attr::collective; 579 579 580 #ifdef _usingEP 581 if(omp_get_num_threads() != 1 ) multifile = true; 582 #endif 583 580 584 if (isOpen) data_out->closeFile(); 581 585 if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast< ::MPI_Comm >(fileComm.mpi_comm), multifile, isCollective)); … … 609 613 610 614 // It would probably be better to call initFile() somehow 615 611 616 MPI_Comm_dup(client->intraComm, &fileComm); 612 617 if (time_counter_name.isEmpty()) time_counter_name = "time_counter"; 613 618 614 //#pragma omp critical (_checkFile) 615 checkFile(); 619 //#pragma omp critical (_readAttributesOfEnabledFieldsInReadMode_) 620 //{ 621 checkFile(); // calls nc_open 616 622 617 623 for (int idx = 0; idx < enabledFields.size(); ++idx) … … 627 633 628 634 // Read necessary value from file 629 this->data_in->readFieldAttributesValues(enabledFields[idx]); 630 635 #pragma omp critical (_func) 636 { 637 //checkFile(); 638 this->data_in->readFieldAttributesValues(enabledFields[idx]); 639 //close(); 640 } 641 631 642 // Fill attributes for base reference 632 643 enabledFields[idx]->solveGridDomainAxisBaseRef(); … … 634 645 635 646 // Now everything is ok, close it 636 //close(); 647 close(); 648 //} 649 650 //if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 651 637 652 } 638 653 -
XIOS/dev/branch_yushan_merged/src/test/test_remap_omp.f90
r1146 r1153 52 52 if(rank < size-2) then 53 53 54 !$omp parallel default(firstprivate) 54 !$omp parallel default(firstprivate) firstprivate(dtime) 55 55 56 56 !!! XIOS Initialization (get the local communicator) … … 59 59 CALL MPI_COMM_RANK(comm,rank,ierr) 60 60 CALL MPI_COMM_SIZE(comm,size,ierr) 61 62 rank = rank*omp_get_num_threads() + omp_get_thread_num() 63 size = size*omp_get_num_threads() 61 64 62 65 … … 202 205 203 206 CALL xios_close_context_definition() 207 208 CALL xios_get_domain_attr("src_domain_regular_read", ni=src_tmp_ni, nj=src_tmp_nj) 209 ALLOCATE(tmp_field_0(src_tmp_ni*src_tmp_nj)) 210 211 CALL xios_get_axis_attr("src_axis_curvilinear_read", n=src_tmp_n) 212 CALL xios_get_domain_attr("src_domain_curvilinear_read", ni=src_tmp_ni, nj=src_tmp_nj) 213 ALLOCATE(tmp_field_1(src_tmp_ni*src_tmp_nj*src_tmp_n)) 214 215 CALL xios_get_domain_attr("src_domain_unstructured_read", ni=src_tmp_ni, nj=src_tmp_nj) 216 ALLOCATE(tmp_field_2(src_tmp_ni*src_tmp_nj)) 217 218 CALL xios_recv_field("src_field_regular", tmp_field_0) 219 CALL xios_recv_field("src_field_curvilinear", tmp_field_1) 220 CALL xios_recv_field("src_field_unstructured", tmp_field_2) 221 222 DO ts=1,10 223 CALL xios_update_calendar(ts) 224 CALL xios_send_field("src_field_2D",src_field_2D) 225 226 DO i=1,src_ni 227 src_field_2D_clone(i) = src_field_2D(i) 228 IF ((23.5 * ts < src_lat(i)) .AND. (src_lat(i) < 65.5 *ts) .AND. (0 < src_lon(i)) .AND. (src_lon(i) < 30*ts)) THEN 229 src_field_2D_clone(i) = missing_value 230 ENDIF 231 ENDDO 232 233 CALL xios_send_field("src_field_2D_clone",src_field_2D_clone) 234 CALL xios_send_field("src_field_3D",src_field_3D) 235 CALL xios_send_field("src_field_3D_clone",src_field_3D) 236 CALL xios_send_field("src_field_4D",src_field_4D) 237 CALL xios_send_field("src_field_3D_pression",src_field_pression) 238 CALL xios_send_field("tmp_field_0",tmp_field_0) 239 CALL xios_send_field("tmp_field_1",tmp_field_1) 240 CALL xios_send_field("tmp_field_2",tmp_field_2) 241 CALL wait_us(5000) ; 242 ENDDO 204 243 205 244 CALL xios_context_finalize() 245 246 DEALLOCATE(src_lon, src_lat, src_boundslon,src_boundslat, src_field_2D) 247 DEALLOCATE(dst_lon, dst_lat, dst_boundslon,dst_boundslat) 248 DEALLOCATE(tmp_field_0, tmp_field_1, tmp_field_2) 206 249 207 250 CALL xios_finalize() -
XIOS/dev/branch_yushan_merged/src/transformation/Functions/reduction.cpp
r1134 r1153 50 50 (*CReductionAlgorithm::ReductionOperations_ptr)["average"] = TRANS_REDUCE_AVERAGE; 51 51 CAverageReductionAlgorithm::registerTrans(); 52 53 printf("*CReductionAlgorithm::ReductionOperations_ptr = %p\t %p\n", *CReductionAlgorithm::ReductionOperations_ptr, &(*CReductionAlgorithm::ReductionOperations_ptr)); 52 54 } 53 55 -
XIOS/dev/branch_yushan_merged/src/transformation/scalar_algorithm_reduce_axis.cpp
r1134 r1153 75 75 } 76 76 77 if ((*CReductionAlgorithm::ReductionOperations_ptr).end() == (*CReductionAlgorithm::ReductionOperations_ptr).find(op)) 78 ERROR("CScalarAlgorithmReduceAxis::CScalarAlgorithmReduceAxis(CAxis* axisDestination, CAxis* axisSource, CReduceAxisToScalar* algo)", 79 << "Operation '" << op << "' not found. Please make sure to use a supported one" 80 << "Axis source " <<axisSource->getId() << std::endl 81 << "Scalar destination " << scalarDestination->getId()); 77 //if ((*CReductionAlgorithm::ReductionOperations_ptr).end() == (*CReductionAlgorithm::ReductionOperations_ptr).find(op)) 78 // if ((CReductionAlgorithm::ReductionOperations_ptr)->end() == (CReductionAlgorithm::ReductionOperations_ptr)->find(op)) 79 // ERROR("CScalarAlgorithmReduceAxis::CScalarAlgorithmReduceAxis(CAxis* axisDestination, CAxis* axisSource, CReduceAxisToScalar* algo)", 80 // << "Operation '" << op << "' not found. Please make sure to use a supported one" 81 // << "Axis source " <<axisSource->getId() << std::endl 82 // << "Scalar destination " << scalarDestination->getId()); 82 83 83 84 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]);
Note: See TracChangeset
for help on using the changeset viewer.