1 | #include "p2p_context_server.hpp" |
---|
2 | #include "buffer_in.hpp" |
---|
3 | #include "type.hpp" |
---|
4 | #include "context.hpp" |
---|
5 | #include "object_template.hpp" |
---|
6 | #include "group_template.hpp" |
---|
7 | #include "attribute_template.hpp" |
---|
8 | #include "domain.hpp" |
---|
9 | #include "field.hpp" |
---|
10 | #include "file.hpp" |
---|
11 | #include "grid.hpp" |
---|
12 | #include "mpi.hpp" |
---|
13 | #include "tracer.hpp" |
---|
14 | #include "timer.hpp" |
---|
15 | #include "cxios.hpp" |
---|
16 | #include "event_scheduler.hpp" |
---|
17 | #include "server.hpp" |
---|
18 | #include "servers_ressource.hpp" |
---|
19 | #include "pool_ressource.hpp" |
---|
20 | #include "services.hpp" |
---|
21 | #include "contexts_manager.hpp" |
---|
22 | #include "timeline_events.hpp" |
---|
23 | |
---|
24 | #include <boost/functional/hash.hpp> |
---|
25 | #include <random> |
---|
26 | #include <chrono> |
---|
27 | |
---|
28 | |
---|
29 | namespace xios |
---|
30 | { |
---|
31 | using namespace std ; |
---|
32 | extern CLogType logTimers ; |
---|
33 | extern CLogType logProfile ; |
---|
34 | |
---|
35 | CP2pContextServer::CP2pContextServer(CContext* parent,MPI_Comm intraComm_,MPI_Comm interComm_) |
---|
36 | : CContextServer(parent, intraComm_, interComm_), |
---|
37 | isProcessingEvent_(false) |
---|
38 | { |
---|
39 | |
---|
40 | xios::MPI_Comm_dup(intraComm, &processEventBarrier_) ; |
---|
41 | CXios::getMpiGarbageCollector().registerCommunicator(processEventBarrier_) ; |
---|
42 | |
---|
43 | currentTimeLine=1; |
---|
44 | scheduled=false; |
---|
45 | finished=false; |
---|
46 | |
---|
47 | xios::MPI_Intercomm_merge(interComm_,true,&interCommMerged_) ; |
---|
48 | CXios::getMpiGarbageCollector().registerCommunicator(interCommMerged_) ; |
---|
49 | xios::MPI_Comm_split(intraComm_, intraCommRank, intraCommRank, &commSelf_) ; // for windows |
---|
50 | CXios::getMpiGarbageCollector().registerCommunicator(commSelf_) ; |
---|
51 | |
---|
52 | itLastTimeLine=lastTimeLine.begin() ; |
---|
53 | |
---|
54 | pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) |
---|
55 | |
---|
56 | } |
---|
57 | |
---|
58 | void CP2pContextServer::setPendingEvent(void) |
---|
59 | { |
---|
60 | pendingEvent=true; |
---|
61 | } |
---|
62 | |
---|
63 | bool CP2pContextServer::hasPendingEvent(void) |
---|
64 | { |
---|
65 | return ((pendingEvents_.size()!=0)||(completedEvents_.size()!=0)); |
---|
66 | } |
---|
67 | |
---|
68 | bool CP2pContextServer::hasFinished(void) |
---|
69 | { |
---|
70 | return finished; |
---|
71 | } |
---|
72 | |
---|
73 | bool CP2pContextServer::eventLoop(bool enableEventsProcessing /*= true*/) |
---|
74 | { |
---|
75 | if (info.isActive(logProfile)) CTimer::get("Recv event loop (p2p)").resume(); |
---|
76 | if (info.isActive(logTimers)) CTimer::get("listen request").resume(); |
---|
77 | listen(); |
---|
78 | if (info.isActive(logTimers)) CTimer::get("listen request").suspend(); |
---|
79 | |
---|
80 | if (info.isActive(logTimers)) CTimer::get("listen pending request").resume(); |
---|
81 | listenPendingRequest() ; |
---|
82 | if (info.isActive(logTimers)) CTimer::get("listen pending request").suspend(); |
---|
83 | |
---|
84 | if (info.isActive(logTimers)) CTimer::get("check server Buffers").resume(); |
---|
85 | checkBuffers() ; |
---|
86 | if (info.isActive(logTimers)) CTimer::get("check server Buffers").suspend(); |
---|
87 | |
---|
88 | if (info.isActive(logTimers)) CTimer::get("check event process").resume(); |
---|
89 | processEvents(enableEventsProcessing); |
---|
90 | if (info.isActive(logTimers)) CTimer::get("check event process").suspend(); |
---|
91 | if (info.isActive(logProfile)) CTimer::get("Recv event loop (p2p)").suspend(); |
---|
92 | return finished; |
---|
93 | |
---|
94 | } |
---|
95 | |
---|
96 | void CP2pContextServer::listen(void) |
---|
97 | { |
---|
98 | int rank; |
---|
99 | int flag; |
---|
100 | MPI_Status status; |
---|
101 | flag=true ; |
---|
102 | |
---|
103 | while(flag) |
---|
104 | { |
---|
105 | traceOff(); |
---|
106 | MPI_Iprobe(MPI_ANY_SOURCE, 20,interCommMerged_, &flag, &status); |
---|
107 | |
---|
108 | traceOn(); |
---|
109 | if (flag==true) |
---|
110 | { |
---|
111 | int rank=status.MPI_SOURCE ; |
---|
112 | auto& rankRequests = requests_[rank]; |
---|
113 | rankRequests.push_back(new CRequest(interCommMerged_, status)) ; |
---|
114 | // Test 1st request of the list, request treatment must be ordered |
---|
115 | if (rankRequests.front()->test()) |
---|
116 | { |
---|
117 | processRequest( *(rankRequests.front()) ); |
---|
118 | delete rankRequests.front(); |
---|
119 | rankRequests.pop_front() ; |
---|
120 | } |
---|
121 | } |
---|
122 | } |
---|
123 | } |
---|
124 | |
---|
125 | void CP2pContextServer::listenPendingRequest(void) |
---|
126 | { |
---|
127 | for(auto it_rank=requests_.begin() ; it_rank!=requests_.end() ; ++it_rank) |
---|
128 | { |
---|
129 | int rank = it_rank->first; |
---|
130 | auto& rankRequests = it_rank->second; |
---|
131 | while ( (!rankRequests.empty()) && (rankRequests.front()->test()) ) |
---|
132 | { |
---|
133 | processRequest( *(rankRequests.front()) ); |
---|
134 | delete rankRequests.front(); |
---|
135 | rankRequests.pop_front() ; |
---|
136 | } |
---|
137 | } |
---|
138 | } |
---|
139 | |
---|
140 | void CP2pContextServer::processRequest(CRequest& request) |
---|
141 | { |
---|
142 | int rank = request.getRank() ; |
---|
143 | auto it=buffers_.find(rank); |
---|
144 | if (it==buffers_.end()) |
---|
145 | { |
---|
146 | buffers_[rank] = new CP2pServerBuffer(rank, commSelf_, interCommMerged_, pendingEvents_, completedEvents_, request.getBuffer()) ; |
---|
147 | } |
---|
148 | else |
---|
149 | { |
---|
150 | it->second->receivedRequest(request.getBuffer()) ; |
---|
151 | } |
---|
152 | } |
---|
153 | |
---|
154 | void CP2pContextServer::checkBuffers(void) |
---|
155 | { |
---|
156 | if (!pendingEvents_.empty()) |
---|
157 | { |
---|
158 | /* |
---|
159 | SPendingEvent& nextEvent = pendingEvents_.begin()->second ; |
---|
160 | for(auto& buffer : nextEvent.buffers ) buffer->eventLoop() ; |
---|
161 | if (nextEvent.nbSenders==0) pendingEvents_.erase(pendingEvents_.begin()) ; |
---|
162 | */ |
---|
163 | for(auto it=pendingEvents_.begin() ; it!=pendingEvents_.end() ;) |
---|
164 | { |
---|
165 | SPendingEvent& nextEvent = it->second ; |
---|
166 | for(auto& buffer : nextEvent.buffers ) buffer->eventLoop() ; |
---|
167 | if (nextEvent.nbSenders==0) it=pendingEvents_.erase(it) ; |
---|
168 | else ++it ; |
---|
169 | } |
---|
170 | } |
---|
171 | } |
---|
172 | |
---|
173 | |
---|
174 | void CP2pContextServer::processEvents(bool enableEventsProcessing) |
---|
175 | { |
---|
176 | |
---|
177 | if (isProcessingEvent_) return ; |
---|
178 | |
---|
179 | auto it=completedEvents_.find(currentTimeLine); |
---|
180 | |
---|
181 | if (it!=completedEvents_.end()) |
---|
182 | { |
---|
183 | if (it->second.nbSenders == it->second.currentNbSenders) |
---|
184 | { |
---|
185 | if (!scheduled) |
---|
186 | { |
---|
187 | eventScheduler_->registerEvent(currentTimeLine,hashId); |
---|
188 | scheduled=true; |
---|
189 | } |
---|
190 | else if (eventScheduler_->queryEvent(currentTimeLine,hashId) ) |
---|
191 | { |
---|
192 | //if (!enableEventsProcessing && isCollectiveEvent(event)) return ; |
---|
193 | |
---|
194 | if (!eventScheduled_) |
---|
195 | { |
---|
196 | MPI_Ibarrier(processEventBarrier_,&processEventRequest_) ; |
---|
197 | eventScheduled_=true ; |
---|
198 | return ; |
---|
199 | } |
---|
200 | else |
---|
201 | { |
---|
202 | MPI_Status status ; |
---|
203 | int flag ; |
---|
204 | MPI_Test(&processEventRequest_, &flag, &status) ; |
---|
205 | if (!flag) return ; |
---|
206 | eventScheduled_=false ; |
---|
207 | } |
---|
208 | |
---|
209 | eventScheduler_->popEvent() ; |
---|
210 | |
---|
211 | isProcessingEvent_=true ; |
---|
212 | CEventServer event(this) ; |
---|
213 | for(auto& buffer : it->second.buffers) buffer->fillEventServer(currentTimeLine, event) ; |
---|
214 | // MPI_Barrier(intraComm) ; |
---|
215 | CTimer::get("Process events").resume(); |
---|
216 | info(100)<<"Context id "<<context->getId()<<" : Process Event "<<currentTimeLine<<" of class "<<event.classId<<" of type "<<event.type<<endl ; |
---|
217 | dispatchEvent(event); |
---|
218 | CTimer::get("Process events").suspend(); |
---|
219 | isProcessingEvent_=false ; |
---|
220 | // context->unsetProcessingEvent() ; |
---|
221 | pendingEvent=false; |
---|
222 | completedEvents_.erase(it); |
---|
223 | currentTimeLine++; |
---|
224 | scheduled = false; |
---|
225 | } |
---|
226 | } |
---|
227 | } |
---|
228 | } |
---|
229 | |
---|
230 | CP2pContextServer::~CP2pContextServer() |
---|
231 | { |
---|
232 | for(auto& buffer : buffers_) delete buffer.second; |
---|
233 | buffers_.clear() ; |
---|
234 | } |
---|
235 | |
---|
236 | void CP2pContextServer::releaseBuffers() |
---|
237 | { |
---|
238 | //for(auto it=buffers.begin();it!=buffers.end();++it) delete it->second ; |
---|
239 | //buffers.clear() ; |
---|
240 | freeWindows() ; |
---|
241 | } |
---|
242 | |
---|
243 | void CP2pContextServer::freeWindows() |
---|
244 | { |
---|
245 | // for(auto& it : winComm_) |
---|
246 | // { |
---|
247 | // int rank = it.first ; |
---|
248 | // MPI_Win_free(&windows_[rank][0]); |
---|
249 | // MPI_Win_free(&windows_[rank][1]); |
---|
250 | // xios::MPI_Comm_free(&winComm_[rank]) ; |
---|
251 | // } |
---|
252 | } |
---|
253 | |
---|
254 | void CP2pContextServer::notifyClientsFinalize(void) |
---|
255 | { |
---|
256 | for(auto it=buffers_.begin();it!=buffers_.end();++it) |
---|
257 | { |
---|
258 | it->second->notifyClientFinalize() ; |
---|
259 | } |
---|
260 | } |
---|
261 | |
---|
262 | void CP2pContextServer::dispatchEvent(CEventServer& event) |
---|
263 | { |
---|
264 | string contextName; |
---|
265 | string buff; |
---|
266 | int MsgSize; |
---|
267 | int rank; |
---|
268 | list<CEventServer::SSubEvent>::iterator it; |
---|
269 | StdString ctxId = context->getId(); |
---|
270 | CContext::setCurrent(ctxId); |
---|
271 | StdSize totalBuf = 0; |
---|
272 | |
---|
273 | if (event.classId==CContext::GetType() && event.type==CContext::EVENT_ID_CONTEXT_FINALIZE) |
---|
274 | { |
---|
275 | CTimer::get("Context finalize").resume(); |
---|
276 | finished=true; |
---|
277 | info(20)<<" CP2pContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; |
---|
278 | notifyClientsFinalize() ; |
---|
279 | if (info.isActive(logTimers)) CTimer::get("receiving requests").suspend(); |
---|
280 | context->finalize(); |
---|
281 | |
---|
282 | std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), |
---|
283 | iteMap = mapBufferSize_.end(), itMap; |
---|
284 | for (itMap = itbMap; itMap != iteMap; ++itMap) |
---|
285 | { |
---|
286 | rank = itMap->first; |
---|
287 | report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl |
---|
288 | << " +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl; |
---|
289 | totalBuf += itMap->second; |
---|
290 | } |
---|
291 | report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; |
---|
292 | CTimer::get("Context finalize").suspend(); |
---|
293 | } |
---|
294 | else if (event.classId==CContext::GetType()) CContext::dispatchEvent(event); |
---|
295 | else if (event.classId==CContextGroup::GetType()) CContextGroup::dispatchEvent(event); |
---|
296 | else if (event.classId==CCalendarWrapper::GetType()) CCalendarWrapper::dispatchEvent(event); |
---|
297 | else if (event.classId==CDomain::GetType()) CDomain::dispatchEvent(event); |
---|
298 | else if (event.classId==CDomainGroup::GetType()) CDomainGroup::dispatchEvent(event); |
---|
299 | else if (event.classId==CAxis::GetType()) CAxis::dispatchEvent(event); |
---|
300 | else if (event.classId==CAxisGroup::GetType()) CAxisGroup::dispatchEvent(event); |
---|
301 | else if (event.classId==CScalar::GetType()) CScalar::dispatchEvent(event); |
---|
302 | else if (event.classId==CScalarGroup::GetType()) CScalarGroup::dispatchEvent(event); |
---|
303 | else if (event.classId==CGrid::GetType()) CGrid::dispatchEvent(event); |
---|
304 | else if (event.classId==CGridGroup::GetType()) CGridGroup::dispatchEvent(event); |
---|
305 | else if (event.classId==CField::GetType()) |
---|
306 | { |
---|
307 | if (event.type==CField::EVENT_ID_UPDATE_DATA) CField::dispatchEvent(event); |
---|
308 | else CField::dispatchEvent(event); |
---|
309 | } |
---|
310 | else if (event.classId==CFieldGroup::GetType()) CFieldGroup::dispatchEvent(event); |
---|
311 | else if (event.classId==CFile::GetType()) CFile::dispatchEvent(event); |
---|
312 | else if (event.classId==CFileGroup::GetType()) CFileGroup::dispatchEvent(event); |
---|
313 | else if (event.classId==CVariable::GetType()) CVariable::dispatchEvent(event); |
---|
314 | else |
---|
315 | { |
---|
316 | ERROR("void CP2pContextServer::dispatchEvent(CEventServer& event)",<<" Bad event class Id"<<endl); |
---|
317 | } |
---|
318 | } |
---|
319 | |
---|
320 | bool CP2pContextServer::isCollectiveEvent(CEventServer& event) |
---|
321 | { |
---|
322 | if (event.type>1000) return false ; |
---|
323 | else return true ; |
---|
324 | } |
---|
325 | } |
---|