Changeset 2258


Ignore:
Timestamp:
11/16/21 17:37:42 (2 years ago)
Author:
ymipsl
Message:

One sided protocol improvment.
YM

Location:
XIOS/dev/dev_ym/XIOS_COUPLING/src
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/buffer_client.cpp

    r2246 r2258  
    1616  CClientBuffer::CClientBuffer(MPI_Comm interComm, vector<MPI_Win>& windows, int clientRank, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize) 
    1717    : interComm(interComm) 
    18     , clientRank_(clientRank) 
     18    , clientRank_(0) 
    1919    , serverRank(serverRank) 
    2020    , bufferSize(bufferSize) 
     
    378378  bool CClientBuffer::isNotifiedFinalized(void) 
    379379  { 
    380     
    381     bool ret ; 
    382     lockBuffer() ; 
    383     ret=*notify[current] == notifyFinalize_ ? true : false ; 
    384     unlockBuffer() ; 
    385  
    386     return ret; 
     380    if (!isFinalized_) 
     381    { 
     382      double time=MPI_Wtime() ; 
     383//      if (time - lastCheckedNotify_ > latency_) 
     384      { 
     385        int flag ; 
     386        MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, MPI_STATUS_IGNORE); 
     387        lockBuffer() ; 
     388        isFinalized_=*notify[current] == notifyFinalize_ ? true : false ; 
     389        unlockBuffer() ; 
     390        lastCheckedNotify_=time ; 
     391      } 
     392    } 
     393    return isFinalized_ ; 
    387394  } 
    388395 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/buffer_client.hpp

    r2246 r2258  
    5757      StdSize bufferSize; 
    5858      const StdSize estimatedMaxEventSize; 
    59  
     59      bool isFinalized_=false ; 
    6060 
    6161      const int serverRank; 
     
    7272      double latency_=1e-2 ; 
    7373      double lastCheckedWithNothing_=0 ; 
     74      double lastCheckedNotify_=0 ; 
    7475  }; 
    7576} 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/buffer_server.hpp

    r2246 r2258  
    2020      void freeBuffer(size_t count) ; 
    2121      void createWindows(MPI_Comm oneSidedComm) ; 
    22       bool freeWindows(void) ; 
    2322      bool getBufferFromClient(size_t timeLine, char* & buffer, size_t& count) ; 
    2423      bool isBufferEmpty(void) ; 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/context_client.cpp

    r2246 r2258  
    6060        // Why ? Just because on openMPI, it reduce the creation time of windows otherwhise which increase quadratically 
    6161        // We don't know the reason 
    62        
     62        double time ; 
    6363        MPI_Comm commSelf ; 
    6464        MPI_Comm_split(intraComm_,clientRank,clientRank, &commSelf) ; 
    65         vector<MPI_Comm> dummyComm(serverSize) ; 
    66         for(int rank=0; rank<serverSize; rank++) MPI_Intercomm_create(commSelf, 0, interCommMerged, clientSize+rank, 0, &dummyComm[rank]) ; 
    67  
    68         // create windows for one-sided 
    69         windows.resize(serverSize) ; 
    70         MPI_Comm winComm ; 
    71         for(int rank=0; rank<serverSize; rank++) 
    72         { 
    73           windows[rank].resize(2) ; 
    74           MPI_Comm_split(interCommMerged, rank, clientRank, &winComm); 
    75           MPI_Win_create_dynamic(MPI_INFO_NULL, winComm, &windows[rank][0]); 
    76           MPI_Win_create_dynamic(MPI_INFO_NULL, winComm, &windows[rank][1]); 
    77 //       ym : Warning : intelMPI doesn't support that communicator of windows be deallocated before the windows deallocation, crash at MPI_Win_lock 
    78 //            Bug or not ?           
    79 //          MPI_Comm_free(&winComm) ; 
    80         } 
    81          
    82         // free dummy intercommunicator => take times ? 
    83         for(int rank=0; rank<serverSize; rank++)  MPI_Comm_free(&dummyComm[rank]) ; 
     65        MPI_Comm interComm ; 
     66        winComm_.resize(serverSize) ; 
     67        windows_.resize(serverSize) ; 
     68        for(int rank=0; rank<serverSize; rank++)  
     69        { 
     70          time=MPI_Wtime() ; 
     71          MPI_Intercomm_create(commSelf, 0, interCommMerged, clientSize+rank, 0, &interComm) ; 
     72          MPI_Intercomm_merge(interComm, false, &winComm_[rank]) ; 
     73          windows_[rank].resize(2) ; 
     74          MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][0]); 
     75          MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][1]);   
     76          time=MPI_Wtime()-time ; 
     77          info(100)<< "MPI_Win_create_dynamic : client to server rank "<<rank<<" => "<<time/1e-6<<" us"<<endl ; 
     78        } 
    8479        MPI_Comm_free(&commSelf) ; 
    85  
    8680        CTimer::get("create Windows").resume() ; 
    8781     } 
     
    345339       
    346340      vector<MPI_Win> Wins(2,MPI_WIN_NULL) ; 
    347       if (!isAttachedModeEnabled()) Wins=windows[rank] ; 
     341      if (!isAttachedModeEnabled()) Wins=windows_[rank] ; 
    348342   
    349343      CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, Wins, clientRank, rank, mapBufferSize_[rank], maxEventSizes[rank]); 
     
    392386        for(int rank=0; rank<serverSize; rank++) 
    393387        { 
    394           MPI_Win_free(&windows[rank][0]); 
    395           MPI_Win_free(&windows[rank][1]); 
     388          MPI_Win_free(&windows_[rank][0]); 
     389          MPI_Win_free(&windows_[rank][1]); 
     390          MPI_Comm_free(&winComm_[rank]) ; 
    396391        } 
    397392      }  
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/context_client.hpp

    r2246 r2258  
    120120      std::list<int> ranksServerNotLeader; 
    121121 
    122       std::vector<std::vector<MPI_Win> >windows ; //! one sided mpi windows to expose client buffers to servers == windows[nbServers][2] 
     122      std::vector<MPI_Comm> winComm_ ; //! Window communicators 
     123      std::vector<std::vector<MPI_Win> >windows_ ; //! one sided mpi windows to expose client buffers to servers == windows[nbServers][2] 
    123124      bool isAttached_ ; 
    124125      CContextServer* associatedServer_ ; //!< The server associated to the pair client/server 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/context_server.cpp

    r2246 r2258  
    4646    else  attachedMode=true ; 
    4747     
    48     int clientSize ; 
    49     if (flag) MPI_Comm_remote_size(interComm,&clientSize); 
    50     else  MPI_Comm_size(interComm,&clientSize); 
     48    if (flag) MPI_Comm_remote_size(interComm,&clientSize_); 
     49    else  MPI_Comm_size(interComm,&clientSize_); 
    5150 
    5251    
     
    8079      MPI_Intercomm_merge(interComm_,true,&interCommMerged) ; 
    8180 
    82       // We create dummy pair of intercommunicator between clients and server 
    83       // Why ? Just because on openMPI, it reduce the creation time of windows otherwhise which increase quadratically 
    84       // We don't know the reason 
     81      double time ; 
     82      windows_.resize(clientSize_) ; 
    8583      MPI_Comm commSelf ; 
    8684      MPI_Comm_split(intraComm_, intraCommRank, intraCommRank, &commSelf) ; 
    87       vector<MPI_Comm> dummyComm(clientSize) ; 
    88       for(int rank=0; rank<clientSize ; rank++) MPI_Intercomm_create(commSelf, 0, interCommMerged, rank, 0 , &dummyComm[rank]) ; 
    89  
    90       // create windows for one sided comm 
    91       MPI_Comm winComm ; 
    92       windows.resize(2) ; 
    93       for(int rank=clientSize; rank<clientSize+intraCommSize; rank++) 
    94       { 
    95         if (rank==clientSize+intraCommRank)  
    96         { 
    97           MPI_Comm_split(interCommMerged, intraCommRank, rank, &winComm); 
    98           MPI_Win_create_dynamic(MPI_INFO_NULL, winComm, &windows[0]); 
    99           MPI_Win_create_dynamic(MPI_INFO_NULL, winComm, &windows[1]);    
    100         } 
    101         else MPI_Comm_split(interCommMerged, intraCommRank, rank, &winComm); 
    102         //       ym : Warning : intelMPI doesn't support that communicator of windows be deallocated before the windows deallocation, crash at MPI_Win_lock 
    103         //            Bug or not ?           
    104         //         MPI_Comm_free(&winComm) ; 
    105       } 
    106        
    107       // free dummy intercommunicator 
    108       for(int rank=0; rank<clientSize ; rank++)  MPI_Comm_free(&dummyComm[rank]) ; 
     85      MPI_Comm interComm ; 
     86      winComm_.resize(clientSize_) ; 
     87      for(int rank=0; rank<clientSize_ ; rank++)  
     88      { 
     89        time=MPI_Wtime() ; 
     90        MPI_Intercomm_create(commSelf, 0, interCommMerged, rank, 0 , &interComm) ; 
     91        MPI_Intercomm_merge(interComm, true, &winComm_[rank]) ; 
     92        windows_[rank].resize(2) ; 
     93        MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][0]); 
     94        MPI_Win_create_dynamic(MPI_INFO_NULL, winComm_[rank], &windows_[rank][1]);   
     95        time=MPI_Wtime()-time ; 
     96        info(100)<< "MPI_Win_create_dynamic : server to client rank "<<rank<<" => "<<time/1e-6<<" us"<<endl ; 
     97      } 
    10998      MPI_Comm_free(&commSelf) ; 
    11099      CTimer::get("create Windows").suspend() ; 
     
    112101    else  
    113102    { 
    114       windows.resize(2) ; 
    115       windows[0]=MPI_WIN_NULL ; 
    116       windows[1]=MPI_WIN_NULL ; 
     103      winComm_.resize(clientSize_) ; 
     104      windows_.resize(clientSize_) ; 
     105      for(int rank=0; rank<clientSize_ ; rank++)  
     106      { 
     107        winComm_[rank] = MPI_COMM_NULL ; 
     108        windows_[rank].resize(2) ; 
     109        windows_[rank][0]=MPI_WIN_NULL ; 
     110        windows_[rank][1]=MPI_WIN_NULL ; 
     111      } 
    117112    } 
    118113     
     
    160155    return finished; 
    161156  } 
    162 /* 
    163   void CContextServer::listen(void) 
    164   { 
    165     int rank; 
    166     int flag; 
    167     int count; 
    168     char * addr; 
    169     MPI_Status status; 
    170     map<int,CServerBuffer*>::iterator it; 
    171     bool okLoop; 
    172  
    173     traceOff(); 
    174     // WARNING : with intel MPI, probing crash on an intercommunicator with release library but not with release_mt 
    175     // ==>  source $I_MPI_ROOT/intel64/bin/mpivars.sh release_mt    needed 
    176     MPI_Iprobe(MPI_ANY_SOURCE, 20,interComm,&flag,&status); 
    177     traceOn(); 
    178  
    179     if (flag==true) 
    180     { 
    181       rank=status.MPI_SOURCE ; 
    182       okLoop = true; 
    183       if (pendingRequest.find(rank)==pendingRequest.end()) 
    184         okLoop = !listenPendingRequest(status) ; 
    185       if (okLoop) 
    186       { 
    187         for(rank=0;rank<commSize;rank++) 
    188         { 
    189           if (pendingRequest.find(rank)==pendingRequest.end()) 
    190           { 
    191  
    192             traceOff(); 
    193             MPI_Iprobe(rank, 20,interComm,&flag,&status); 
    194             traceOn(); 
    195             if (flag==true) listenPendingRequest(status) ; 
    196           } 
    197         } 
    198       } 
    199     } 
    200   } 
    201  
    202   bool CContextServer::listenPendingRequest(MPI_Status& status) 
    203   { 
    204     int count; 
    205     char * addr; 
    206     map<int,CServerBuffer*>::iterator it; 
    207     int rank=status.MPI_SOURCE ; 
    208  
    209     it=buffers.find(rank); 
    210     if (it==buffers.end()) // Receive the buffer size and allocate the buffer 
    211     { 
    212        MPI_Aint recvBuff[4] ; 
    213        MPI_Recv(recvBuff, 4, MPI_AINT, rank, 20, interComm, &status); 
    214        remoteHashId_ = recvBuff[0] ; 
    215        StdSize buffSize = recvBuff[1]; 
    216        vector<MPI_Aint> winAdress(2) ; 
    217        winAdress[0]=recvBuff[2] ; winAdress[1]=recvBuff[3] ; 
    218        mapBufferSize_.insert(std::make_pair(rank, buffSize)); 
    219        it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows, winAdress, rank, buffSize)))).first; 
    220       
    221        lastTimeLine[rank]=0 ; 
    222        itLastTimeLine=lastTimeLine.begin() ; 
    223  
    224        return true; 
    225     } 
    226     else 
    227     { 
    228       MPI_Get_count(&status,MPI_CHAR,&count); 
    229       if (it->second->isBufferFree(count)) 
    230       { 
    231          addr=(char*)it->second->getBuffer(count); 
    232          MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 
    233          bufferRequest[rank]=addr; 
    234          return true; 
    235        } 
    236       else 
    237         return false; 
    238     } 
    239   } 
    240 */ 
    241157 
    242158 void CContextServer::listen(void) 
     
    274190       winAdress[0]=recvBuff[2] ; winAdress[1]=recvBuff[3] ; 
    275191       mapBufferSize_.insert(std::make_pair(rank, buffSize)); 
    276        it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows, winAdress, rank, buffSize)))).first; 
     192       it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows_[rank], winAdress, 0, buffSize)))).first; 
    277193       lastTimeLine[rank]=0 ; 
    278194       itLastTimeLine=lastTimeLine.begin() ; 
     
    407323        newBuffer>>newSize>>winAdress[0]>>winAdress[1] ; 
    408324        buffers.erase(rank) ; 
    409         buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows, winAdress, rank, newSize))); 
     325        buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(windows_[rank], winAdress, 0, newSize))); 
    410326      } 
    411327      else 
     
    500416  void CContextServer::releaseBuffers() 
    501417  { 
    502     map<int,CServerBuffer*>::iterator it; 
    503     bool out ; 
    504     do 
    505     { 
    506       out=true ; 
    507       for(it=buffers.begin();it!=buffers.end();++it) 
    508       { 
    509 //        out = out && it->second->freeWindows() ; 
    510  
    511       } 
    512     } while (! out) ;  
    513       MPI_Win_free(&windows[0]) ; 
    514       MPI_Win_free(&windows[1]) ; 
     418    for(auto it=buffers.begin();it!=buffers.end();++it) delete it->second ; 
     419    buffers.clear() ;  
     420    freeWindows() ; 
     421  } 
     422 
     423  void CContextServer::freeWindows() 
     424  { 
     425    if (!isAttachedModeEnabled()) 
     426    { 
     427      for(int rank=0; rank<clientSize_; rank++) 
     428      { 
     429        MPI_Win_free(&windows_[rank][0]); 
     430        MPI_Win_free(&windows_[rank][1]); 
     431        MPI_Comm_free(&winComm_[rank]) ; 
     432      } 
     433    } 
    515434  } 
    516435 
     
    538457      finished=true; 
    539458      info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; 
    540 //      releaseBuffers() ; 
    541459      notifyClientsFinalize() ; 
    542460      CTimer::get("receiving requests").suspend(); 
    543461      context->finalize(); 
    544  
    545 // don't know where release windows 
    546       MPI_Win_free(&windows[0]) ; 
    547       MPI_Win_free(&windows[1]) ; 
     462      freeWindows() ; 
    548463 
    549464      std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/context_server.hpp

    r2246 r2258  
    3434    void releaseBuffers(void) ; 
    3535    void notifyClientsFinalize(void) ; 
     36    void freeWindows(void) ; // !<< free Windows for one sided communication 
    3637     
    3738    MPI_Comm intraComm ; 
     
    4142    MPI_Comm interComm ; 
    4243    int commSize ; 
     44    int clientSize_ ; 
    4345 
    4446    MPI_Comm interCommMerged; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication. 
     
    7173 
    7274      std::map<int, StdSize> mapBufferSize_; 
    73       vector<MPI_Win> windows ; //! one sided mpi windows to expose client buffers to servers ; No memory will be attached on server side. 
     75      std::vector<MPI_Comm>winComm_ ; //! Window communicators 
     76      std::vector<std::vector<MPI_Win> >windows_ ; //! one sided mpi windows to expose client buffers to servers ; No memory will be attached on server side. 
    7477      CEventScheduler* eventScheduler_ ; 
    7578      bool isProcessingEvent_ ; 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/contexts_manager.cpp

    r2246 r2258  
    109109  void CContextsManager::sendNotification(int rank) 
    110110  { 
    111     winNotify_->lockWindow(rank,0) ; 
    112     winNotify_->pushToWindow(rank, this, &CContextsManager::notificationsDumpOut) ; 
    113     winNotify_->unlockWindow(rank,0) ; 
     111    winNotify_->lockWindowExclusive(rank) ; 
     112    winNotify_->pushToLockedWindow(rank, this, &CContextsManager::notificationsDumpOut) ; 
     113    winNotify_->unlockWindow(rank) ; 
    114114  } 
    115115 
     
    168168    int commRank ; 
    169169    MPI_Comm_rank(xiosComm_, &commRank) ; 
    170     winNotify_->lockWindow(commRank,0) ; 
    171     winNotify_->popFromWindow(commRank, this, &CContextsManager::notificationsDumpIn) ; 
    172     winNotify_->unlockWindow(commRank,0) ; 
     170    winNotify_->lockWindowExclusive(commRank) ; 
     171    winNotify_->popFromLockedWindow(commRank, this, &CContextsManager::notificationsDumpIn) ; 
     172    winNotify_->unlockWindow(commRank) ; 
    173173    if (notifyType_==NOTIFY_CREATE_CONTEXT) createServerContext() ; 
    174174    else if (notifyType_==NOTIFY_CREATE_INTERCOMM) createServerContextIntercomm() ; 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/ressources_manager.cpp

    r2246 r2258  
    7272  void CRessourcesManager::sendNotification(int rank) 
    7373  { 
    74     winNotify_->lockWindow(rank,0) ; 
    75     winNotify_->pushToWindow(rank, this, &CRessourcesManager::notificationsDumpOut) ; 
    76     winNotify_->unlockWindow(rank,0) ; 
     74    winNotify_->lockWindowExclusive(rank) ; 
     75    winNotify_->pushToLockedWindow(rank, this, &CRessourcesManager::notificationsDumpOut) ; 
     76    winNotify_->unlockWindow(rank) ; 
    7777  } 
    7878 
     
    128128    MPI_Comm_rank(xiosComm_, &commRank) ; 
    129129    CTimer::get("CRessourcesManager::checkNotifications lock").resume(); 
    130     winNotify_->lockWindow(commRank,0) ; 
     130    winNotify_->lockWindowExclusive(commRank) ; 
    131131    CTimer::get("CRessourcesManager::checkNotifications lock").suspend(); 
    132132    CTimer::get("CRessourcesManager::checkNotifications pop").resume(); 
    133     winNotify_->popFromWindow(commRank, this, &CRessourcesManager::notificationsDumpIn) ; 
     133    winNotify_->popFromLockedWindow(commRank, this, &CRessourcesManager::notificationsDumpIn) ; 
    134134    CTimer::get("CRessourcesManager::checkNotifications pop").suspend(); 
    135135    CTimer::get("CRessourcesManager::checkNotifications unlock").resume(); 
    136     winNotify_->unlockWindow(commRank,0) ; 
     136    winNotify_->unlockWindow(commRank) ; 
    137137    CTimer::get("CRessourcesManager::checkNotifications unlock").suspend(); 
    138138    if (notifyType_==NOTIFY_CREATE_POOL) createPool() ; 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/server_context.cpp

    r2246 r2258  
    160160  void CServerContext::sendNotification(int rank) 
    161161  { 
    162     winNotify_->lockWindow(rank,0) ; 
    163     winNotify_->pushToWindow(rank, this, &CServerContext::notificationsDumpOut) ; 
    164     winNotify_->unlockWindow(rank,0) ; 
     162    winNotify_->lockWindowExclusive(rank) ; 
     163    winNotify_->pushToLockedWindow(rank, this, &CServerContext::notificationsDumpOut) ; 
     164    winNotify_->unlockWindow(rank) ; 
    165165  } 
    166166 
     
    201201        int commRank ; 
    202202        MPI_Comm_rank(contextComm_, &commRank) ; 
    203         winNotify_->lockWindow(commRank,0) ; 
    204         winNotify_->popFromWindow(commRank, this, &CServerContext::notificationsDumpIn) ; 
    205         winNotify_->unlockWindow(commRank,0) ; 
     203        winNotify_->lockWindowExclusive(commRank) ; 
     204        winNotify_->popFromLockedWindow(commRank, this, &CServerContext::notificationsDumpIn) ; 
     205        winNotify_->unlockWindow(commRank) ; 
    206206       
    207207        if (notifyInType_!= NOTIFY_NOTHING) 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/services.cpp

    r2246 r2258  
    131131  void CService::sendNotification(int rank) 
    132132  { 
    133     winNotify_->lockWindow(rank,0) ; 
    134     winNotify_->pushToWindow(rank, this, &CService::notificationsDumpOut) ; 
    135     winNotify_->unlockWindow(rank,0) ; 
     133    winNotify_->lockWindowExclusive(rank) ; 
     134    winNotify_->pushToLockedWindow(rank, this, &CService::notificationsDumpOut) ; 
     135    winNotify_->unlockWindow(rank) ; 
    136136  } 
    137137 
     
    175175        int commRank ; 
    176176        MPI_Comm_rank(serviceComm_, &commRank) ; 
    177         winNotify_->lockWindow(commRank,0) ; 
    178         winNotify_->popFromWindow(commRank, this, &CService::notificationsDumpIn) ; 
    179         winNotify_->unlockWindow(commRank,0) ; 
     177        winNotify_->lockWindowExclusive(commRank) ; 
     178        winNotify_->popFromLockedWindow(commRank, this, &CService::notificationsDumpIn) ; 
     179        winNotify_->unlockWindow(commRank) ; 
    180180       
    181181        if (notifyInType_!= NOTIFY_NOTHING) 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/manager/window_manager.hpp

    r2246 r2258  
    189189    } 
    190190 
     191    template< class T > 
     192    void pushToLockedWindow(int rank, T* object, void (T::*dumpOut)(CBufferOut&) ) 
     193    { 
     194      size_t size ; 
     195      MPI_Get(&size, SIZE_BUFFER_SIZE, MPI_CHAR, rank, OFFSET_BUFFER_SIZE, SIZE_BUFFER_SIZE, MPI_CHAR, window_) ; 
     196      MPI_Win_flush(rank,window_) ; 
     197      CBufferOut buffer ; 
     198      (object->*dumpOut)(buffer) ; 
     199      size_t bufferSize=buffer.count() ; 
     200      size_t newSize = size + bufferSize; 
     201      MPI_Put(&newSize, SIZE_BUFFER_SIZE, MPI_CHAR, rank, OFFSET_BUFFER_SIZE, SIZE_BUFFER_SIZE, MPI_CHAR, window_) ; 
     202      MPI_Put(buffer.start(), bufferSize, MPI_CHAR, rank, OFFSET_BUFFER+size, bufferSize, MPI_CHAR, window_) ; 
     203    } 
     204 
    191205    template< typename T > 
    192206    void popFromWindow(int rank, T* object, void (T::*dumpIn)(CBufferIn&) )  
     
    206220      MPI_Win_unlock(rank, window_) ; 
    207221       
     222    } 
     223 
     224    template< typename T > 
     225    void popFromLockedWindow(int rank, T* object, void (T::*dumpIn)(CBufferIn&) )  
     226    { 
     227      size_t size ; 
     228      MPI_Get(&size, SIZE_BUFFER_SIZE, MPI_CHAR, rank, OFFSET_BUFFER_SIZE, SIZE_BUFFER_SIZE, MPI_CHAR, window_) ; 
     229      MPI_Win_flush(rank,window_) ; 
     230      CBufferIn buffer(size) ; 
     231      MPI_Get(buffer.start(), size, MPI_CHAR, rank,OFFSET_BUFFER, size, MPI_CHAR, window_) ; 
     232      MPI_Win_flush(rank,window_) ; 
     233      (object->*dumpIn)(buffer) ; 
     234       
     235      size=buffer.remain() ; 
     236      MPI_Put(&size, SIZE_BUFFER_SIZE, MPI_CHAR, rank, OFFSET_BUFFER_SIZE, SIZE_BUFFER_SIZE, MPI_CHAR, window_) ; 
     237      MPI_Put(buffer.ptr(),buffer.remain(), MPI_CHAR, rank, OFFSET_BUFFER, buffer.remain(), MPI_CHAR, window_) ; 
    208238    } 
    209239 
  • XIOS/dev/dev_ym/XIOS_COUPLING/src/node/context.cpp

    r2240 r2258  
    742742          notifiedFinalized=client->isNotifiedFinalized() ; 
    743743        } while (!notifiedFinalized) ; 
     744 
    744745        server->releaseBuffers(); 
    745746        client->releaseBuffers(); 
Note: See TracChangeset for help on using the changeset viewer.