Changeset 1338 for XIOS/dev/branch_openmp/src
- Timestamp:
- 11/21/17 10:47:57 (6 years ago)
- Location:
- XIOS/dev/branch_openmp/src
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/src/buffer_client.cpp
r1328 r1338 30 30 buffer[1] = new char[bufferSize]; 31 31 retBuffer = new CBufferOut(buffer[current], bufferSize); 32 info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl;32 //info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl; 33 33 } 34 34 -
XIOS/dev/branch_openmp/src/client.cpp
r1334 r1338 255 255 } 256 256 257 info(20) << "Client side context is finalized"<<endl ;258 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ;259 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ;260 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ;261 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ;262 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ;257 //info(20) << "Client side context is finalized"<<endl ; 258 //report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 259 //report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 260 //report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 261 //report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 262 //report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 263 263 // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 264 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ;265 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ;266 report(100)<<CTimer::getAllCumulatedTime()<<endl ;264 //report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 265 //report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 266 //report(100)<<CTimer::getAllCumulatedTime()<<endl ; 267 267 } 268 268 -
XIOS/dev/branch_openmp/src/context_client.cpp
r1328 r1338 400 400 for (itMap = itbMap; itMap != iteMap; ++itMap) 401 401 { 402 report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl403 << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl;402 //report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl 403 // << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 404 404 totalBuf += itMap->second; 405 405 } 406 report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl;406 //report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; 407 407 408 408 releaseBuffers(); -
XIOS/dev/branch_openmp/src/context_server.cpp
r1328 r1338 254 254 { 255 255 finished=true; 256 info(20)<<"Server Side context <"<<context->getId()<<"> finalized"<<endl;256 //info(20)<<"Server Side context <"<<context->getId()<<"> finalized"<<endl; 257 257 std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), 258 258 iteMap = mapBufferSize_.end(), itMap; … … 260 260 for (itMap = itbMap; itMap != iteMap; ++itMap) 261 261 { 262 report(10)<< " Memory report : Context <"<<context->getId()<<"> : server side : memory used for buffer of each connection to client" << endl263 << " +) With client of rank " << itMap->first << " : " << itMap->second << " bytes " << endl;262 //report(10)<< " Memory report : Context <"<<context->getId()<<"> : server side : memory used for buffer of each connection to client" << endl 263 // << " +) With client of rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 264 264 totalBuf += itMap->second; 265 265 } 266 266 context->finalize(); 267 report(0)<< " Memory report : Context <"<<context->getId()<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl;267 //report(0)<< " Memory report : Context <"<<context->getId()<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 268 268 } 269 269 else if (event.classId==CContext::GetType()) CContext::dispatchEvent(event); -
XIOS/dev/branch_openmp/src/io/nc4_data_input.cpp
r1328 r1338 53 53 CArray<double,1> fieldData(grid->getWrittenDataSize()); 54 54 if (!field->default_value.isEmpty()) fieldData = field->default_value; 55 55 #ifdef _usingEP 56 SuperClass::type = ONE_FILE; 57 printf("SuperClass::type = %d\n", SuperClass::type); 58 #endif 59 56 60 switch (SuperClass::type) 57 61 { -
XIOS/dev/branch_openmp/src/io/netCdfInterface.cpp
r1334 r1338 128 128 int CNetCdfInterface::close(int ncId) 129 129 { 130 int status = nc_close(ncId); 131 if (NC_NOERR != status) 132 { 133 StdString errormsg(nc_strerror(status)); 134 StdStringStream sstr; 135 sstr << "Error when calling function nc_close(ncId)" << std::endl 136 << errormsg << std::endl 137 << "Unable to close file, given its id: " << ncId << std::endl; 138 StdString e = sstr.str(); 139 throw CNetCdfException(e); 140 } 141 130 int status = NC_NOERR; 131 #pragma omp master 132 { 133 status = nc_close(ncId); 134 if (NC_NOERR != status) 135 { 136 StdString errormsg(nc_strerror(status)); 137 StdStringStream sstr; 138 sstr << "Error when calling function nc_close(ncId)" << std::endl 139 << errormsg << std::endl 140 << "Unable to close file, given its id: " << ncId << std::endl; 141 StdString e = sstr.str(); 142 throw CNetCdfException(e); 143 } 144 } 142 145 return status; 143 146 } -
XIOS/dev/branch_openmp/src/node/context.cpp
r1334 r1338 1205 1205 void CContext::updateCalendar(int step) 1206 1206 { 1207 info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl;1207 //info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl; 1208 1208 calendar->update(step); 1209 info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl;1209 //info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl; 1210 1210 #ifdef XIOS_MEMTRACK_LIGHT 1211 1211 info(50) << " Current memory used by XIOS : "<< MemTrack::getCurrentMemorySize()*1.0/(1024*1024)<<" Mbyte, at timestep "<<step<<" of context "<<this->getId()<<endl ; -
XIOS/dev/branch_openmp/src/node/field.cpp
r1328 r1338 316 316 while (currentDate >= lastDataRequestedFromServer) 317 317 { 318 info(20) << "currentDate : " << currentDate << endl ;319 info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ;320 info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ;321 info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ;318 //info(20) << "currentDate : " << currentDate << endl ; 319 //info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ; 320 //info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ; 321 //info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 322 322 323 323 dataRequested |= sendReadDataRequest(lastDataRequestedFromServer + file->output_freq); … … 518 518 while (isDataLate && timer.getCumulatedTime() < CXios::recvFieldTimeout); 519 519 520 if (isDataLate)521 ERROR("void CField::checkForLateDataFromServer(void)",522 << "Late data at timestep = " << currentDate);520 //if (isDataLate) 521 // ERROR("void CField::checkForLateDataFromServer(void)", 522 // << "Late data at timestep = " << currentDate); 523 523 } 524 524 } -
XIOS/dev/branch_openmp/src/node/file.cpp
r1334 r1338 596 596 597 597 bool isCollective = par_access.isEmpty() || par_access == par_access_attr::collective; 598 599 if (isOpen) data_out->closeFile(); 600 if (time_counter_name.isEmpty()) data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective)); 601 else data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name)); 602 isOpen = true; 598 #ifdef _usingEP 599 //printf("multifile was %d\n", multifile); 600 //multifile = true; 601 if (isOpen) data_out->closeFile(); 602 if (time_counter_name.isEmpty()) data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective)); 603 else data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name)); 604 isOpen = true; 605 #elif _usingMPI 606 if (isOpen) data_out->closeFile(); 607 if (time_counter_name.isEmpty()) data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective)); 608 else data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name)); 609 isOpen = true; 610 #endif 603 611 } 604 612 } … … 655 663 656 664 // Now everything is ok, close it 657 close();665 //close(); 658 666 } 659 667 -
XIOS/dev/branch_openmp/src/test/test_remap_omp.f90
r1334 r1338 95 95 ALLOCATE(lval1(interpolatedLlm)) 96 96 ALLOCATE(lval2(llm2)) 97 lval2 = 0 98 lval=0 99 lval1=0 97 100 98 101 ierr=NF90_INQ_VARID(ncid,"lon",varid) -
XIOS/dev/branch_openmp/src/transformation/domain_algorithm_interpolate.cpp
r1334 r1338 405 405 CContext* context = CContext::getCurrent(); 406 406 CContextClient* client=context->client; 407 int mykey; 408 ep_lib::MPI_Comm_rank(client->intraComm, &mykey); 407 409 408 410 ep_lib::MPI_Comm poleComme; 409 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 411 //ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 412 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, mykey, &poleComme); 410 413 if (!poleComme.is_null()) 411 414 { … … 423 426 std::vector<int> displ(nbClientPole,0); 424 427 ep_lib::MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 425 426 428 displ[0]=0; 427 429 for(int n=1;n<nbClientPole;++n) displ[n]=displ[n-1]+recvCount[n-1] ;
Note: See TracChangeset
for help on using the changeset viewer.