Changeset 2258
- Timestamp:
- 11/16/21 17:37:42 (2 years ago)
- Location:
- XIOS/dev/dev_ym/XIOS_COUPLING/src
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_ym/XIOS_COUPLING/src/buffer_client.cpp
r2246 r2258 16 16 CClientBuffer::CClientBuffer(MPI_Comm interComm, vector<MPI_Win>& windows, int clientRank, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize) 17 17 : interComm(interComm) 18 , clientRank_( clientRank)18 , clientRank_(0) 19 19 , serverRank(serverRank) 20 20 , bufferSize(bufferSize) … … 378 378 bool CClientBuffer::isNotifiedFinalized(void) 379 379 { 380 381 bool ret ; 382 lockBuffer() ; 383 ret=*notify[current] == notifyFinalize_ ? true : false ; 384 unlockBuffer() ; 385 386 return ret; 380 if (!isFinalized_) 381 { 382 double time=MPI_Wtime() ; 383 // if (time - lastCheckedNotify_ > latency_) 384 { 385 int flag ; 386 MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, MPI_STATUS_IGNORE); 387 lockBuffer() ; 388 isFinalized_=*notify[current] == notifyFinalize_ ? true : false ; 389 unlockBuffer() ; 390 lastCheckedNotify_=time ; 391 } 392 } 393 return isFinalized_ ; 387 394 } 388 395 -
XIOS/dev/dev_ym/XIOS_COUPLING/src/buffer_client.hpp
r2246 r2258 57 57 StdSize bufferSize; 58 58 const StdSize estimatedMaxEventSize; 59 59 bool isFinalized_=false ; 60 60 61 61 const int serverRank; … … 72 72 double latency_=1e-2 ; 73 73 double lastCheckedWithNothing_=0 ; 74 double lastCheckedNotify_=0 ; 74 75 }; 75 76 } -
XIOS/dev/dev_ym/XIOS_COUPLING/src/buffer_server.hpp
r2246 r2258 20 20 void freeBuffer(size_t count) ; 21 21 void createWindows(MPI_Comm oneSidedComm) ; 22 bool freeWindows(void) ;23 22 bool getBufferFromClient(size_t timeLine, char* & buffer, size_t& count) ; 24 23 bool isBufferEmpty(void) ; -
XIOS/dev/dev_ym/XIOS_COUPLING/src/context_client.cpp
r2246 r2258 60 60 // Why ? Just because on openMPI, it reduce the creation time of windows otherwhise which increase quadratically 61 61 // We don't know the reason 62 62 double time ; 63 63 MPI_Comm commSelf ; 64 64 MPI_Comm_split(intraComm_,clientRank,clientRank, &commSelf) ; 65 vector<MPI_Comm> dummyComm(serverSize) ; 66 for(int rank=0; rank<serverSize; rank++) MPI_Intercomm_create(commSelf, 0, interCommMerged, clientSize+rank, 0, &dummyComm[rank]) ; 67 68 // create windows for one-sided 69 windows.resize(serverSize) ; 70 MPI_Comm winComm ; 71 for(int rank=0; rank<serverSize; rank++) 72 { 73 windows[rank].resize(2) ; 74 MPI_Comm_split(interCommMerged, rank, clientRank, &winComm); 75 MPI_Win_create_dynamic(MPI_INFO_NULL, winComm, &windows[rank][0]); 76 MPI_Win_create_dynamic(MPI_INFO_NULL, winComm, &windows[rank][1]); 77 // ym : Warning : intelMPI doesn't support that communicator of windows be deallocated before the windows deallocation, crash at MPI_Win_lock 78 // Bug or not ? 79 // MPI_Comm_free(&winComm) ; 80 } 81 82 // free dummy intercommunicator => take times ? 83 for(int rank=0; rank<serverSize; rank++) MPI_Comm_free(&dummyComm[rank]) ; 65 MPI_Comm interComm ; 66 winComm_.resize(serverSize) ; 67 windows_.resize(serverSize) ; 68 for(int rank=0; rank<serverSize; rank++) 69 { 70 time=MPI_Wtime() ; 71 MPI_Intercomm_create(commSelf, 0, interCommMerged, clientSize+rank, 0, &interComm) ; 72 MPI_Intercomm_merge(interComm, false, &winComm_[rank]) ; 73 windows_[rank].resize(2) ; 74 MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][0]); 75 MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][1]); 76 time=MPI_Wtime()-time ; 77 info(100)<< "MPI_Win_create_dynamic : client to server rank "<<rank<<" => "<<time/1e-6<<" us"<<endl ; 78 } 84 79 MPI_Comm_free(&commSelf) ; 85 86 80 CTimer::get("create Windows").resume() ; 87 81 } … … 345 339 346 340 vector<MPI_Win> Wins(2,MPI_WIN_NULL) ; 347 if (!isAttachedModeEnabled()) Wins=windows [rank] ;341 if (!isAttachedModeEnabled()) Wins=windows_[rank] ; 348 342 349 343 CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, Wins, clientRank, rank, mapBufferSize_[rank], maxEventSizes[rank]); … … 392 386 for(int rank=0; rank<serverSize; rank++) 393 387 { 394 MPI_Win_free(&windows[rank][0]); 395 MPI_Win_free(&windows[rank][1]); 388 MPI_Win_free(&windows_[rank][0]); 389 MPI_Win_free(&windows_[rank][1]); 390 MPI_Comm_free(&winComm_[rank]) ; 396 391 } 397 392 } -
XIOS/dev/dev_ym/XIOS_COUPLING/src/context_client.hpp
r2246 r2258 120 120 std::list<int> ranksServerNotLeader; 121 121 122 std::vector<std::vector<MPI_Win> >windows ; //! one sided mpi windows to expose client buffers to servers == windows[nbServers][2] 122 std::vector<MPI_Comm> winComm_ ; //! Window communicators 123 std::vector<std::vector<MPI_Win> >windows_ ; //! one sided mpi windows to expose client buffers to servers == windows[nbServers][2] 123 124 bool isAttached_ ; 124 125 CContextServer* associatedServer_ ; //!< The server associated to the pair client/server -
XIOS/dev/dev_ym/XIOS_COUPLING/src/context_server.cpp
r2246 r2258 46 46 else attachedMode=true ; 47 47 48 int clientSize ; 49 if (flag) MPI_Comm_remote_size(interComm,&clientSize); 50 else MPI_Comm_size(interComm,&clientSize); 48 if (flag) MPI_Comm_remote_size(interComm,&clientSize_); 49 else MPI_Comm_size(interComm,&clientSize_); 51 50 52 51 … … 80 79 MPI_Intercomm_merge(interComm_,true,&interCommMerged) ; 81 80 82 // We create dummy pair of intercommunicator between clients and server 83 // Why ? Just because on openMPI, it reduce the creation time of windows otherwhise which increase quadratically 84 // We don't know the reason 81 double time ; 82 windows_.resize(clientSize_) ; 85 83 MPI_Comm commSelf ; 86 84 MPI_Comm_split(intraComm_, intraCommRank, intraCommRank, &commSelf) ; 87 vector<MPI_Comm> dummyComm(clientSize) ; 88 for(int rank=0; rank<clientSize ; rank++) MPI_Intercomm_create(commSelf, 0, interCommMerged, rank, 0 , &dummyComm[rank]) ; 89 90 // create windows for one sided comm 91 MPI_Comm winComm ; 92 windows.resize(2) ; 93 for(int rank=clientSize; rank<clientSize+intraCommSize; rank++) 94 { 95 if (rank==clientSize+intraCommRank) 96 { 97 MPI_Comm_split(interCommMerged, intraCommRank, rank, &winComm); 98 MPI_Win_create_dynamic(MPI_INFO_NULL, winComm, &windows[0]); 99 MPI_Win_create_dynamic(MPI_INFO_NULL, winComm, &windows[1]); 100 } 101 else MPI_Comm_split(interCommMerged, intraCommRank, rank, &winComm); 102 // ym : Warning : intelMPI doesn't support that communicator of windows be deallocated before the windows deallocation, crash at MPI_Win_lock 103 // Bug or not ? 104 // MPI_Comm_free(&winComm) ; 105 } 106 107 // free dummy intercommunicator 108 for(int rank=0; rank<clientSize ; rank++) MPI_Comm_free(&dummyComm[rank]) ; 85 MPI_Comm interComm ; 86 winComm_.resize(clientSize_) ; 87 for(int rank=0; rank<clientSize_ ; rank++) 88 { 89 time=MPI_Wtime() ; 90 MPI_Intercomm_create(commSelf, 0, interCommMerged, rank, 0 , &interComm) ; 91 MPI_Intercomm_merge(interComm, true, &winComm_[rank]) ; 92 windows_[rank].resize(2) ; 93 MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][0]); 94 MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][1]); 95 time=MPI_Wtime()-time ; 96 info(100)<< "MPI_Win_create_dynamic : server to client rank "<<rank<<" => "<<time/1e-6<<" us"<<endl ; 97 } 109 98 MPI_Comm_free(&commSelf) ; 110 99 CTimer::get("create Windows").suspend() ; … … 112 101 else 113 102 { 114 windows.resize(2) ; 115 windows[0]=MPI_WIN_NULL ; 116 windows[1]=MPI_WIN_NULL ; 103 winComm_.resize(clientSize_) ; 104 windows_.resize(clientSize_) ; 105 for(int rank=0; rank<clientSize_ ; rank++) 106 { 107 winComm_[rank] = MPI_COMM_NULL ; 108 windows_[rank].resize(2) ; 109 windows_[rank][0]=MPI_WIN_NULL ; 110 windows_[rank][1]=MPI_WIN_NULL ; 111 } 117 112 } 118 113 … … 160 155 return finished; 161 156 } 162 /*163 void CContextServer::listen(void)164 {165 int rank;166 int flag;167 int count;168 char * addr;169 MPI_Status status;170 map<int,CServerBuffer*>::iterator it;171 bool okLoop;172 173 traceOff();174 // WARNING : with intel MPI, probing crash on an intercommunicator with release library but not with release_mt175 // ==> source $I_MPI_ROOT/intel64/bin/mpivars.sh release_mt needed176 MPI_Iprobe(MPI_ANY_SOURCE, 20,interComm,&flag,&status);177 traceOn();178 179 if (flag==true)180 {181 rank=status.MPI_SOURCE ;182 okLoop = true;183 if (pendingRequest.find(rank)==pendingRequest.end())184 okLoop = !listenPendingRequest(status) ;185 if (okLoop)186 {187 for(rank=0;rank<commSize;rank++)188 {189 if (pendingRequest.find(rank)==pendingRequest.end())190 {191 192 traceOff();193 MPI_Iprobe(rank, 20,interComm,&flag,&status);194 traceOn();195 if (flag==true) listenPendingRequest(status) ;196 }197 }198 }199 }200 }201 202 bool CContextServer::listenPendingRequest(MPI_Status& status)203 {204 int count;205 char * addr;206 map<int,CServerBuffer*>::iterator it;207 int rank=status.MPI_SOURCE ;208 209 it=buffers.find(rank);210 if (it==buffers.end()) // Receive the buffer size and allocate the buffer211 {212 MPI_Aint recvBuff[4] ;213 MPI_Recv(recvBuff, 4, MPI_AINT, rank, 20, interComm, &status);214 remoteHashId_ = recvBuff[0] ;215 StdSize buffSize = recvBuff[1];216 vector<MPI_Aint> winAdress(2) ;217 winAdress[0]=recvBuff[2] ; winAdress[1]=recvBuff[3] ;218 mapBufferSize_.insert(std::make_pair(rank, buffSize));219 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows, winAdress, rank, buffSize)))).first;220 221 lastTimeLine[rank]=0 ;222 itLastTimeLine=lastTimeLine.begin() ;223 224 return true;225 }226 else227 {228 MPI_Get_count(&status,MPI_CHAR,&count);229 if (it->second->isBufferFree(count))230 {231 addr=(char*)it->second->getBuffer(count);232 MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]);233 bufferRequest[rank]=addr;234 return true;235 }236 else237 return false;238 }239 }240 */241 157 242 158 void CContextServer::listen(void) … … 274 190 winAdress[0]=recvBuff[2] ; winAdress[1]=recvBuff[3] ; 275 191 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 276 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows , winAdress, rank, buffSize)))).first;192 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows_[rank], winAdress, 0, buffSize)))).first; 277 193 lastTimeLine[rank]=0 ; 278 194 itLastTimeLine=lastTimeLine.begin() ; … … 407 323 newBuffer>>newSize>>winAdress[0]>>winAdress[1] ; 408 324 buffers.erase(rank) ; 409 buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows , winAdress, rank, newSize)));325 buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows_[rank], winAdress, 0, newSize))); 410 326 } 411 327 else … … 500 416 void CContextServer::releaseBuffers() 501 417 { 502 map<int,CServerBuffer*>::iterator it; 503 bool out ; 504 do 505 { 506 out=true ; 507 for(it=buffers.begin();it!=buffers.end();++it) 508 { 509 // out = out && it->second->freeWindows() ; 510 511 } 512 } while (! out) ; 513 MPI_Win_free(&windows[0]) ; 514 MPI_Win_free(&windows[1]) ; 418 for(auto it=buffers.begin();it!=buffers.end();++it) delete it->second ; 419 buffers.clear() ; 420 freeWindows() ; 421 } 422 423 void CContextServer::freeWindows() 424 { 425 if (!isAttachedModeEnabled()) 426 { 427 for(int rank=0; rank<clientSize_; rank++) 428 { 429 MPI_Win_free(&windows_[rank][0]); 430 MPI_Win_free(&windows_[rank][1]); 431 MPI_Comm_free(&winComm_[rank]) ; 432 } 433 } 515 434 } 516 435 … … 538 457 finished=true; 539 458 info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; 540 // releaseBuffers() ;541 459 notifyClientsFinalize() ; 542 460 CTimer::get("receiving requests").suspend(); 543 461 context->finalize(); 544 545 // don't know where release windows 546 MPI_Win_free(&windows[0]) ; 547 MPI_Win_free(&windows[1]) ; 462 freeWindows() ; 548 463 549 464 std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), -
XIOS/dev/dev_ym/XIOS_COUPLING/src/context_server.hpp
r2246 r2258 34 34 void releaseBuffers(void) ; 35 35 void notifyClientsFinalize(void) ; 36 void freeWindows(void) ; // !<< free Windows for one sided communication 36 37 37 38 MPI_Comm intraComm ; … … 41 42 MPI_Comm interComm ; 42 43 int commSize ; 44 int clientSize_ ; 43 45 44 46 MPI_Comm interCommMerged; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication. … … 71 73 72 74 std::map<int, StdSize> mapBufferSize_; 73 vector<MPI_Win> windows ; //! one sided mpi windows to expose client buffers to servers ; No memory will be attached on server side. 75 std::vector<MPI_Comm>winComm_ ; //! Window communicators 76 std::vector<std::vector<MPI_Win> >windows_ ; //! one sided mpi windows to expose client buffers to servers ; No memory will be attached on server side. 74 77 CEventScheduler* eventScheduler_ ; 75 78 bool isProcessingEvent_ ; -
XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/contexts_manager.cpp
r2246 r2258 109 109 void CContextsManager::sendNotification(int rank) 110 110 { 111 winNotify_->lockWindow (rank,0) ;112 winNotify_->pushTo Window(rank, this, &CContextsManager::notificationsDumpOut) ;113 winNotify_->unlockWindow(rank ,0) ;111 winNotify_->lockWindowExclusive(rank) ; 112 winNotify_->pushToLockedWindow(rank, this, &CContextsManager::notificationsDumpOut) ; 113 winNotify_->unlockWindow(rank) ; 114 114 } 115 115 … … 168 168 int commRank ; 169 169 MPI_Comm_rank(xiosComm_, &commRank) ; 170 winNotify_->lockWindow (commRank,0) ;171 winNotify_->popFrom Window(commRank, this, &CContextsManager::notificationsDumpIn) ;172 winNotify_->unlockWindow(commRank ,0) ;170 winNotify_->lockWindowExclusive(commRank) ; 171 winNotify_->popFromLockedWindow(commRank, this, &CContextsManager::notificationsDumpIn) ; 172 winNotify_->unlockWindow(commRank) ; 173 173 if (notifyType_==NOTIFY_CREATE_CONTEXT) createServerContext() ; 174 174 else if (notifyType_==NOTIFY_CREATE_INTERCOMM) createServerContextIntercomm() ; -
XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/ressources_manager.cpp
r2246 r2258 72 72 void CRessourcesManager::sendNotification(int rank) 73 73 { 74 winNotify_->lockWindow (rank,0) ;75 winNotify_->pushTo Window(rank, this, &CRessourcesManager::notificationsDumpOut) ;76 winNotify_->unlockWindow(rank ,0) ;74 winNotify_->lockWindowExclusive(rank) ; 75 winNotify_->pushToLockedWindow(rank, this, &CRessourcesManager::notificationsDumpOut) ; 76 winNotify_->unlockWindow(rank) ; 77 77 } 78 78 … … 128 128 MPI_Comm_rank(xiosComm_, &commRank) ; 129 129 CTimer::get("CRessourcesManager::checkNotifications lock").resume(); 130 winNotify_->lockWindow (commRank,0) ;130 winNotify_->lockWindowExclusive(commRank) ; 131 131 CTimer::get("CRessourcesManager::checkNotifications lock").suspend(); 132 132 CTimer::get("CRessourcesManager::checkNotifications pop").resume(); 133 winNotify_->popFrom Window(commRank, this, &CRessourcesManager::notificationsDumpIn) ;133 winNotify_->popFromLockedWindow(commRank, this, &CRessourcesManager::notificationsDumpIn) ; 134 134 CTimer::get("CRessourcesManager::checkNotifications pop").suspend(); 135 135 CTimer::get("CRessourcesManager::checkNotifications unlock").resume(); 136 winNotify_->unlockWindow(commRank ,0) ;136 winNotify_->unlockWindow(commRank) ; 137 137 CTimer::get("CRessourcesManager::checkNotifications unlock").suspend(); 138 138 if (notifyType_==NOTIFY_CREATE_POOL) createPool() ; -
XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/server_context.cpp
r2246 r2258 160 160 void CServerContext::sendNotification(int rank) 161 161 { 162 winNotify_->lockWindow (rank,0) ;163 winNotify_->pushTo Window(rank, this, &CServerContext::notificationsDumpOut) ;164 winNotify_->unlockWindow(rank ,0) ;162 winNotify_->lockWindowExclusive(rank) ; 163 winNotify_->pushToLockedWindow(rank, this, &CServerContext::notificationsDumpOut) ; 164 winNotify_->unlockWindow(rank) ; 165 165 } 166 166 … … 201 201 int commRank ; 202 202 MPI_Comm_rank(contextComm_, &commRank) ; 203 winNotify_->lockWindow (commRank,0) ;204 winNotify_->popFrom Window(commRank, this, &CServerContext::notificationsDumpIn) ;205 winNotify_->unlockWindow(commRank ,0) ;203 winNotify_->lockWindowExclusive(commRank) ; 204 winNotify_->popFromLockedWindow(commRank, this, &CServerContext::notificationsDumpIn) ; 205 winNotify_->unlockWindow(commRank) ; 206 206 207 207 if (notifyInType_!= NOTIFY_NOTHING) -
XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/services.cpp
r2246 r2258 131 131 void CService::sendNotification(int rank) 132 132 { 133 winNotify_->lockWindow (rank,0) ;134 winNotify_->pushTo Window(rank, this, &CService::notificationsDumpOut) ;135 winNotify_->unlockWindow(rank ,0) ;133 winNotify_->lockWindowExclusive(rank) ; 134 winNotify_->pushToLockedWindow(rank, this, &CService::notificationsDumpOut) ; 135 winNotify_->unlockWindow(rank) ; 136 136 } 137 137 … … 175 175 int commRank ; 176 176 MPI_Comm_rank(serviceComm_, &commRank) ; 177 winNotify_->lockWindow (commRank,0) ;178 winNotify_->popFrom Window(commRank, this, &CService::notificationsDumpIn) ;179 winNotify_->unlockWindow(commRank ,0) ;177 winNotify_->lockWindowExclusive(commRank) ; 178 winNotify_->popFromLockedWindow(commRank, this, &CService::notificationsDumpIn) ; 179 winNotify_->unlockWindow(commRank) ; 180 180 181 181 if (notifyInType_!= NOTIFY_NOTHING) -
XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/window_manager.hpp
r2246 r2258 189 189 } 190 190 191 template< class T > 192 void pushToLockedWindow(int rank, T* object, void (T::*dumpOut)(CBufferOut&) ) 193 { 194 size_t size ; 195 MPI_Get(&size, SIZE_BUFFER_SIZE, MPI_CHAR, rank, OFFSET_BUFFER_SIZE, SIZE_BUFFER_SIZE, MPI_CHAR, window_) ; 196 MPI_Win_flush(rank,window_) ; 197 CBufferOut buffer ; 198 (object->*dumpOut)(buffer) ; 199 size_t bufferSize=buffer.count() ; 200 size_t newSize = size + bufferSize; 201 MPI_Put(&newSize, SIZE_BUFFER_SIZE, MPI_CHAR, rank, OFFSET_BUFFER_SIZE, SIZE_BUFFER_SIZE, MPI_CHAR, window_) ; 202 MPI_Put(buffer.start(), bufferSize, MPI_CHAR, rank, OFFSET_BUFFER+size, bufferSize, MPI_CHAR, window_) ; 203 } 204 191 205 template< typename T > 192 206 void popFromWindow(int rank, T* object, void (T::*dumpIn)(CBufferIn&) ) … … 206 220 MPI_Win_unlock(rank, window_) ; 207 221 222 } 223 224 template< typename T > 225 void popFromLockedWindow(int rank, T* object, void (T::*dumpIn)(CBufferIn&) ) 226 { 227 size_t size ; 228 MPI_Get(&size, SIZE_BUFFER_SIZE, MPI_CHAR, rank, OFFSET_BUFFER_SIZE, SIZE_BUFFER_SIZE, MPI_CHAR, window_) ; 229 MPI_Win_flush(rank,window_) ; 230 CBufferIn buffer(size) ; 231 MPI_Get(buffer.start(), size, MPI_CHAR, rank,OFFSET_BUFFER, size, MPI_CHAR, window_) ; 232 MPI_Win_flush(rank,window_) ; 233 (object->*dumpIn)(buffer) ; 234 235 size=buffer.remain() ; 236 MPI_Put(&size, SIZE_BUFFER_SIZE, MPI_CHAR, rank, OFFSET_BUFFER_SIZE, SIZE_BUFFER_SIZE, MPI_CHAR, window_) ; 237 MPI_Put(buffer.ptr(),buffer.remain(), MPI_CHAR, rank, OFFSET_BUFFER, buffer.remain(), MPI_CHAR, window_) ; 208 238 } 209 239 -
XIOS/dev/dev_ym/XIOS_COUPLING/src/node/context.cpp
r2240 r2258 742 742 notifiedFinalized=client->isNotifiedFinalized() ; 743 743 } while (!notifiedFinalized) ; 744 744 745 server->releaseBuffers(); 745 746 client->releaseBuffers();
Note: See TracChangeset
for help on using the changeset viewer.