Changeset 1149 for XIOS/dev/branch_yushan_merged
- Timestamp:
- 05/31/17 10:51:06 (7 years ago)
- Location:
- XIOS/dev/branch_yushan_merged
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/extern/remap/src/intersect.cpp
r1016 r1149 56 56 for (int j = 0; j < b.n; j++) 57 57 { 58 // share a full edge ? be carefull at the orientation 59 assert(squaredist(a.vertex[ i ], b.vertex[ j ]) > 1e-10*1e-10 || 60 squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]) > 1e-10*1e-10); 58 // share a full edge ? be carefull at the orientation 59 /* 60 if(squaredist(a.vertex[i], b.vertex[j]) > 1e-10*1e-10 || 61 squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]) > 1e-10*1e-10 ) 62 { 63 printf("A : squaredist(a.vertex[%d], b.vertex[%d]) = %.10e %d\n", 64 i, j, squaredist(a.vertex[i], b.vertex[j]), 65 squaredist(a.vertex[i], b.vertex[j]) > 1e-10*1e-10 ? true : false); 66 printf("B : squaredist(a.vertex[%d], b.vertex[%d]) = %.10e %d\n", 67 (i+1)%a.n, (j+1)%b.n, squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]), 68 squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]) > 1e-10*1e-10 ? true : false); 69 70 assert(squaredist(a.vertex[ i ], b.vertex[ j ]) > 1e-10*1e-10 || 71 squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+1)%b.n]) > 1e-10*1e-10); 72 exit(0); 73 }*/ 61 74 if ( squaredist(a.vertex[ i ], b.vertex[ j ]) < 1e-10*1e-10 && 62 75 squaredist(a.vertex[(i+1)%a.n], b.vertex[(j+b.n-1)%b.n]) < 1e-10*1e-10) -
XIOS/dev/branch_yushan_merged/extern/remap/src/mapper.cpp
r1147 r1149 270 270 MPI_Status *status = new MPI_Status[4*mpiSize]; 271 271 272 MPI_Waitall(nbRecvRequest, recvRequest, status); 272 273 MPI_Waitall(nbSendRequest, sendRequest, status); 273 MPI_Waitall(nbRecvRequest, recvRequest, status);274 274 275 275 276 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ … … 300 301 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 301 302 } 302 MPI_Issend(sendValue[rank], 303 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 303 304 nbSendRequest++; 304 MPI_Issend(sendArea[rank], 305 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 305 306 nbSendRequest++; 306 307 if (order == 2) … … 317 318 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 318 319 //ym --> attention taille GloId 319 nbSendRequest++; 320 320 nbSendRequest++; 321 321 } 322 322 } … … 345 345 } 346 346 347 MPI_Waitall(nbSendRequest, sendRequest, status);348 347 MPI_Waitall(nbRecvRequest, recvRequest, status); 348 MPI_Waitall(nbSendRequest, sendRequest, status); 349 349 350 350 351 … … 407 408 } 408 409 } 409 410 410 411 /* free all memory allocated in this function */ 411 for (int rank = 0; rank < mpiSize; rank++)412 /*for (int rank = 0; rank < mpiSize; rank++) 412 413 { 413 414 if (nbSendElement[rank] > 0) … … 446 447 delete[] sendNeighIds; 447 448 delete[] recvNeighIds; 449 */ 448 450 return i; 449 451 } … … 546 548 } 547 549 } 548 550 551 MPI_Waitall(nbRecvRequest, recvRequest, status); 549 552 MPI_Waitall(nbSendRequest, sendRequest, status); 550 MPI_Waitall(nbRecvRequest, recvRequest, status);551 553 552 554 for (int rank = 0; rank < mpiSize; rank++) … … 619 621 } 620 622 623 MPI_Waitall(nbRecvRequest, recvRequest, status); 621 624 MPI_Waitall(nbSendRequest, sendRequest, status); 622 MPI_Waitall(nbRecvRequest, recvRequest, status);623 625 624 626 int nbNeighbourNodes = 0; … … 803 805 } 804 806 807 MPI_Waitall(nbRecvRequest, recvRequest, status); 805 808 MPI_Waitall(nbSendRequest, sendRequest, status); 806 MPI_Waitall(nbRecvRequest, recvRequest, status);809 807 810 808 811 char **sendBuffer2 = new char*[mpiSize]; … … 878 881 { 879 882 MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 880 printf("proc %d send %d elements to proc %d\n", mpiRank, sentMessageSize[rank], rank);881 883 nbSendRequest++; 882 884 } … … 884 886 { 885 887 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 886 printf("proc %d recv %d elements from proc %d\n", mpiRank, recvMessageSize[rank], rank);887 888 nbRecvRequest++; 888 889 } 889 890 } 890 891 892 MPI_Waitall(nbRecvRequest, recvRequest, status); 891 893 MPI_Waitall(nbSendRequest, sendRequest, status); 892 MPI_Waitall(nbRecvRequest, recvRequest, status);894 893 895 894 896 -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_test.cpp
r1134 r1149 15 15 namespace ep_lib { 16 16 17 18 19 17 int MPI_Test(MPI_Request *request, int *flag, MPI_Status *status) 18 { 19 Debug("MPI_Test with EP"); 20 20 21 21 *flag = false; … … 56 56 if(request->type == 3) // imrecv 57 57 { 58 ::MPI_Request mpi_request = static_cast< ::MPI_Request >(request->mpi_request);58 ::MPI_Request *mpi_request = static_cast< ::MPI_Request* >(&(request->mpi_request)); 59 59 ::MPI_Status mpi_status; 60 ::MPI_Test(&mpi_request, flag, &mpi_status); 60 61 ::MPI_Errhandler_set(MPI_COMM_WORLD_STD, MPI_ERRORS_RETURN); 62 int error_code = ::MPI_Test(mpi_request, flag, &mpi_status); 63 if (error_code != MPI_SUCCESS) { 64 65 char error_string[BUFSIZ]; 66 int length_of_error_string, error_class; 67 68 ::MPI_Error_class(error_code, &error_class); 69 ::MPI_Error_string(error_class, error_string, &length_of_error_string); 70 printf("%s\n", error_string); 71 } 72 61 73 if(*flag) 62 74 { … … 79 91 } 80 92 81 93 } 82 94 83 95 84 85 86 96 int MPI_Testall(int count, MPI_Request *array_of_requests, int *flag, MPI_Status *array_of_statuses) 97 { 98 Debug("MPI_Testall with EP"); 87 99 *flag = true; 88 100 int i=0; … … 92 104 i++; 93 105 } 94 106 } 95 107 96 108 -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_wait.cpp
r1147 r1149 100 100 if(array_of_requests[i].type != 2) // isend or imrecv 101 101 { 102 MPI_Wait(&array_of_requests[i], &array_of_statuses[i]); 102 //MPI_Wait(&array_of_requests[i], &array_of_statuses[i]); 103 int tested=false; 104 while(!tested) MPI_Test(&array_of_requests[i], &tested, &array_of_statuses[i]); 103 105 finished++; 104 106 finished_index[i] = true; -
XIOS/dev/branch_yushan_merged/src/io/netCdfInterface.cpp
r1146 r1149 128 128 int CNetCdfInterface::close(int ncId) 129 129 { 130 int status = nc_close(ncId); 130 int status = NC_NOERR; 131 //#pragma omp critical (_netcdf) 132 #pragma omp master 133 { 134 status = nc_close(ncId); 131 135 if (NC_NOERR != status) 132 136 { … … 139 143 throw CNetCdfException(e); 140 144 } 141 145 } 142 146 return status; 143 147 } -
XIOS/dev/branch_yushan_merged/src/node/field.cpp
r1134 r1149 26 26 namespace xios{ 27 27 28 /// ////////////////////// D éfinitions ////////////////////// ///28 /// ////////////////////// Dfinitions ////////////////////// /// 29 29 30 30 CField::CField(void) … … 691 691 CContext* context = CContext::getCurrent(); 692 692 solveOnlyReferenceEnabledField(doSending2Server); 693 int myRank; 694 MPI_Comm_rank(context->client->intraComm, &myRank); 693 695 694 696 if (!areAllReferenceSolved) … … 711 713 if (context->hasClient) 712 714 { 715 MPI_Barrier(context->client->intraComm); 716 printf("Proc %d enters function\n", myRank); 713 717 solveTransformedGrid(); 718 MPI_Barrier(context->client->intraComm); 719 printf("Proc %d exits function\n", myRank); 714 720 } 715 721 -
XIOS/dev/branch_yushan_merged/src/node/file.cpp
r1146 r1149 634 634 635 635 // Now everything is ok, close it 636 close();636 //close(); 637 637 } 638 638 -
XIOS/dev/branch_yushan_merged/src/transformation/domain_algorithm_interpolate.cpp
r1134 r1149 404 404 CContext* context = CContext::getCurrent(); 405 405 CContextClient* client=context->client; 406 int split_key; 407 ep_lib::MPI_Comm_rank(client->intraComm, &split_key); 406 408 407 409 ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 408 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme);410 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 9 : 1, split_key, &poleComme); 409 411 if (MPI_COMM_NULL != poleComme) 410 412 { … … 421 423 std::vector<int> recvCount(nbClientPole,0); 422 424 std::vector<int> displ(nbClientPole,0); 423 MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ;425 ep_lib::MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 424 426 425 427 displ[0]=0; 426 428 for(int n=1;n<nbClientPole;++n) displ[n]=displ[n-1]+recvCount[n-1] ; 427 429 int recvSize=displ[nbClientPole-1]+recvCount[nbClientPole-1] ; 430 428 431 429 432 std::vector<int> sendSourceIndexBuff(nbWeight); … … 444 447 445 448 // Gather all index and weight for pole 446 MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme);447 MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme);449 ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 450 ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 448 451 449 452 std::map<int,double> recvTemp; … … 567 570 568 571 569 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm);572 ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 570 573 571 574 int* sendIndexDestBuff = new int [sendBuffSize]; … … 595 598 596 599 sendRequest.push_back(ep_lib::MPI_Request()); 597 MPI_Isend(sendIndexDestBuff + sendOffSet,600 ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet, 598 601 k, 599 602 MPI_INT, … … 603 606 &sendRequest.back()); 604 607 sendRequest.push_back(ep_lib::MPI_Request()); 605 MPI_Isend(sendIndexSrcBuff + sendOffSet,608 ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet, 606 609 k, 607 610 MPI_INT, … … 611 614 &sendRequest.back()); 612 615 sendRequest.push_back(ep_lib::MPI_Request()); 613 MPI_Isend(sendWeightBuff + sendOffSet,616 ep_lib::MPI_Isend(sendWeightBuff + sendOffSet, 614 617 k, 615 618 MPI_DOUBLE, … … 630 633 { 631 634 ep_lib::MPI_Status recvStatus; 632 MPI_Recv((recvIndexDestBuff + receivedSize),635 ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 633 636 recvBuffSize, 634 637 MPI_INT, … … 639 642 640 643 int countBuff = 0; 641 MPI_Get_count(&recvStatus, MPI_INT, &countBuff);644 ep_lib::MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 642 645 #ifdef _usingMPI 643 646 clientSrcRank = recvStatus.MPI_SOURCE; … … 645 648 clientSrcRank = recvStatus.ep_src; 646 649 #endif 647 MPI_Recv((recvIndexSrcBuff + receivedSize),650 ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 648 651 recvBuffSize, 649 652 MPI_INT, … … 653 656 &recvStatus); 654 657 655 MPI_Recv((recvWeightBuff + receivedSize),658 ep_lib::MPI_Recv((recvWeightBuff + receivedSize), 656 659 recvBuffSize, 657 660 MPI_DOUBLE, … … 671 674 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 672 675 ep_lib::MPI_Status stat_ignore; 673 MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore);676 ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 674 677 675 678 delete [] sendIndexDestBuff; … … 761 764 } 762 765 763 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm);766 ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 764 767 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 765 768
Note: See TracChangeset
for help on using the changeset viewer.