Changeset 1149 for XIOS/dev/branch_yushan_merged/src
- Timestamp:
- 05/31/17 10:51:06 (7 years ago)
- Location:
- XIOS/dev/branch_yushan_merged/src
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/src/io/netCdfInterface.cpp
r1146 r1149 128 128 int CNetCdfInterface::close(int ncId) 129 129 { 130 int status = nc_close(ncId); 130 int status = NC_NOERR; 131 //#pragma omp critical (_netcdf) 132 #pragma omp master 133 { 134 status = nc_close(ncId); 131 135 if (NC_NOERR != status) 132 136 { … … 139 143 throw CNetCdfException(e); 140 144 } 141 145 } 142 146 return status; 143 147 } -
XIOS/dev/branch_yushan_merged/src/node/field.cpp
r1134 r1149 26 26 namespace xios{ 27 27 28 /// ////////////////////// D éfinitions ////////////////////// ///28 /// ////////////////////// Dfinitions ////////////////////// /// 29 29 30 30 CField::CField(void) … … 691 691 CContext* context = CContext::getCurrent(); 692 692 solveOnlyReferenceEnabledField(doSending2Server); 693 int myRank; 694 MPI_Comm_rank(context->client->intraComm, &myRank); 693 695 694 696 if (!areAllReferenceSolved) … … 711 713 if (context->hasClient) 712 714 { 715 MPI_Barrier(context->client->intraComm); 716 printf("Proc %d enters function\n", myRank); 713 717 solveTransformedGrid(); 718 MPI_Barrier(context->client->intraComm); 719 printf("Proc %d exits function\n", myRank); 714 720 } 715 721 -
XIOS/dev/branch_yushan_merged/src/node/file.cpp
r1146 r1149 634 634 635 635 // Now everything is ok, close it 636 close();636 //close(); 637 637 } 638 638 -
XIOS/dev/branch_yushan_merged/src/transformation/domain_algorithm_interpolate.cpp
r1134 r1149 404 404 CContext* context = CContext::getCurrent(); 405 405 CContextClient* client=context->client; 406 int split_key; 407 ep_lib::MPI_Comm_rank(client->intraComm, &split_key); 406 408 407 409 ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 408 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme);410 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 9 : 1, split_key, &poleComme); 409 411 if (MPI_COMM_NULL != poleComme) 410 412 { … … 421 423 std::vector<int> recvCount(nbClientPole,0); 422 424 std::vector<int> displ(nbClientPole,0); 423 MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ;425 ep_lib::MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 424 426 425 427 displ[0]=0; 426 428 for(int n=1;n<nbClientPole;++n) displ[n]=displ[n-1]+recvCount[n-1] ; 427 429 int recvSize=displ[nbClientPole-1]+recvCount[nbClientPole-1] ; 430 428 431 429 432 std::vector<int> sendSourceIndexBuff(nbWeight); … … 444 447 445 448 // Gather all index and weight for pole 446 MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme);447 MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme);449 ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 450 ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 448 451 449 452 std::map<int,double> recvTemp; … … 567 570 568 571 569 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm);572 ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 570 573 571 574 int* sendIndexDestBuff = new int [sendBuffSize]; … … 595 598 596 599 sendRequest.push_back(ep_lib::MPI_Request()); 597 MPI_Isend(sendIndexDestBuff + sendOffSet,600 ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet, 598 601 k, 599 602 MPI_INT, … … 603 606 &sendRequest.back()); 604 607 sendRequest.push_back(ep_lib::MPI_Request()); 605 MPI_Isend(sendIndexSrcBuff + sendOffSet,608 ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet, 606 609 k, 607 610 MPI_INT, … … 611 614 &sendRequest.back()); 612 615 sendRequest.push_back(ep_lib::MPI_Request()); 613 MPI_Isend(sendWeightBuff + sendOffSet,616 ep_lib::MPI_Isend(sendWeightBuff + sendOffSet, 614 617 k, 615 618 MPI_DOUBLE, … … 630 633 { 631 634 ep_lib::MPI_Status recvStatus; 632 MPI_Recv((recvIndexDestBuff + receivedSize),635 ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 633 636 recvBuffSize, 634 637 MPI_INT, … … 639 642 640 643 int countBuff = 0; 641 MPI_Get_count(&recvStatus, MPI_INT, &countBuff);644 ep_lib::MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 642 645 #ifdef _usingMPI 643 646 clientSrcRank = recvStatus.MPI_SOURCE; … … 645 648 clientSrcRank = recvStatus.ep_src; 646 649 #endif 647 MPI_Recv((recvIndexSrcBuff + receivedSize),650 ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 648 651 recvBuffSize, 649 652 MPI_INT, … … 653 656 &recvStatus); 654 657 655 MPI_Recv((recvWeightBuff + receivedSize),658 ep_lib::MPI_Recv((recvWeightBuff + receivedSize), 656 659 recvBuffSize, 657 660 MPI_DOUBLE, … … 671 674 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 672 675 ep_lib::MPI_Status stat_ignore; 673 MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore);676 ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 674 677 675 678 delete [] sendIndexDestBuff; … … 761 764 } 762 765 763 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm);766 ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 764 767 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 765 768
Note: See TracChangeset
for help on using the changeset viewer.