Project
Loading...
Searching...
No Matches
DataProcessingDevice.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
13#include <atomic>
33#include "Framework/InputSpan.h"
34#if defined(__APPLE__) || defined(NDEBUG)
35#define O2_SIGNPOST_IMPLEMENTATION
36#endif
37#include "Framework/Signpost.h"
50
51#include "DecongestionService.h"
53#include "DataRelayerHelpers.h"
54#include "Headers/DataHeader.h"
56
57#include <Framework/Tracing.h>
58
59#include <fairmq/Parts.h>
60#include <fairmq/Socket.h>
61#include <fairmq/ProgOptions.h>
62#include <fairmq/shmem/Message.h>
63#include <Configuration/ConfigurationInterface.h>
64#include <Configuration/ConfigurationFactory.h>
65#include <Monitoring/Monitoring.h>
66#include <TMessage.h>
67#include <TClonesArray.h>
68
69#include <fmt/ostream.h>
70#include <algorithm>
71#include <vector>
72#include <numeric>
73#include <memory>
74#include <uv.h>
75#include <execinfo.h>
76#include <sstream>
77#include <boost/property_tree/json_parser.hpp>
78
79// Formatter to avoid having to rewrite the ostream operator for the enum
80namespace fmt
81{
82template <>
85} // namespace fmt
86
87// A log to use for general device logging
89// A log to use for general device logging
91// Special log to keep track of the lifetime of the parts
93// Stream which keeps track of the calibration lifetime logic
95// Special log to track the async queue behavior
97// Special log to track the forwarding requests
99// Special log to track CCDB related requests
101// Special log to track task scheduling
103
104using namespace o2::framework;
105using ConfigurationInterface = o2::configuration::ConfigurationInterface;
107
108constexpr int DEFAULT_MAX_CHANNEL_AHEAD = 128;
109
110namespace o2::framework
111{
112
113template <>
117
121{
122 auto* state = (DeviceState*)handle->data;
123 state->loopReason |= DeviceState::TIMER_EXPIRED;
124}
125
127{
128 auto* state = (DeviceState*)s->data;
130}
131
132DeviceSpec const& getRunningDevice(RunningDeviceRef const& running, ServiceRegistryRef const& services)
133{
134 auto& devices = services.get<o2::framework::RunningWorkflowInfo const>().devices;
135 return devices[running.index];
136}
137
143
145 : mRunningDevice{running},
146 mConfigRegistry{nullptr},
147 mServiceRegistry{registry}
148{
149 GetConfig()->Subscribe<std::string>("dpl", [&registry = mServiceRegistry](const std::string& key, std::string value) {
150 if (key == "cleanup") {
152 auto& deviceState = ref.get<DeviceState>();
153 int64_t cleanupCount = deviceState.cleanupCount.load();
154 int64_t newCleanupCount = std::stoll(value);
155 if (newCleanupCount <= cleanupCount) {
156 return;
157 }
158 deviceState.cleanupCount.store(newCleanupCount);
159 for (auto& info : deviceState.inputChannelInfos) {
160 fair::mq::Parts parts;
161 while (info.channel->Receive(parts, 0)) {
162 LOGP(debug, "Dropping {} parts", parts.Size());
163 if (parts.Size() == 0) {
164 break;
165 }
166 }
167 }
168 }
169 });
170
171 std::function<void(const fair::mq::State)> stateWatcher = [this, &registry = mServiceRegistry](const fair::mq::State state) -> void {
173 auto& deviceState = ref.get<DeviceState>();
174 auto& control = ref.get<ControlService>();
175 auto& callbacks = ref.get<CallbackService>();
176 control.notifyDeviceState(fair::mq::GetStateName(state));
178
179 if (deviceState.nextFairMQState.empty() == false) {
180 auto state = deviceState.nextFairMQState.back();
181 (void)this->ChangeState(state);
182 deviceState.nextFairMQState.pop_back();
183 }
184 };
185
186 // 99 is to execute DPL callbacks last
187 this->SubscribeToStateChange("99-dpl", stateWatcher);
188
189 // One task for now.
190 mStreams.resize(1);
191 mHandles.resize(1);
192
193 ServiceRegistryRef ref{mServiceRegistry};
194
195 mAwakeHandle = (uv_async_t*)malloc(sizeof(uv_async_t));
196 auto& state = ref.get<DeviceState>();
197 assert(state.loop);
198 int res = uv_async_init(state.loop, mAwakeHandle, on_communication_requested);
199 mAwakeHandle->data = &state;
200 if (res < 0) {
201 LOG(error) << "Unable to initialise subscription";
202 }
203
205 SubscribeToNewTransition("dpl", [wakeHandle = mAwakeHandle](fair::mq::Transition t) {
206 int res = uv_async_send(wakeHandle);
207 if (res < 0) {
208 LOG(error) << "Unable to notify subscription";
209 }
210 LOG(debug) << "State transition requested";
211 });
212}
213
214// Callback to execute the processing. Notice how the data is
215// is a vector of DataProcessorContext so that we can index the correct
216// one with the thread id. For the moment we simply use the first one.
217void run_callback(uv_work_t* handle)
218{
219 auto* task = (TaskStreamInfo*)handle->data;
220 auto ref = ServiceRegistryRef{*task->registry, ServiceRegistry::globalStreamSalt(task->id.index + 1)};
221 // We create a new signpost interval for this specific data processor. Same id, same data processor.
222 auto& dataProcessorContext = ref.get<DataProcessorContext>();
223 O2_SIGNPOST_ID_FROM_POINTER(sid, device, &dataProcessorContext);
224 O2_SIGNPOST_START(device, sid, "run_callback", "Starting run callback on stream %d", task->id.index);
227 O2_SIGNPOST_END(device, sid, "run_callback", "Done processing data for stream %d", task->id.index);
228}
229
230// Once the processing in a thread is done, this is executed on the main thread.
231void run_completion(uv_work_t* handle, int status)
232{
233 auto* task = (TaskStreamInfo*)handle->data;
234 // Notice that the completion, while running on the main thread, still
235 // has a salt which is associated to the actual stream which was doing the computation
236 auto ref = ServiceRegistryRef{*task->registry, ServiceRegistry::globalStreamSalt(task->id.index + 1)};
237 auto& state = ref.get<DeviceState>();
238 auto& quotaEvaluator = ref.get<ComputingQuotaEvaluator>();
239
240 using o2::monitoring::Metric;
241 using o2::monitoring::Monitoring;
242 using o2::monitoring::tags::Key;
243 using o2::monitoring::tags::Value;
244
245 static std::function<void(ComputingQuotaOffer const&, ComputingQuotaStats&)> reportConsumedOffer = [ref](ComputingQuotaOffer const& accumulatedConsumed, ComputingQuotaStats& stats) {
246 auto& dpStats = ref.get<DataProcessingStats>();
247 stats.totalConsumedBytes += accumulatedConsumed.sharedMemory;
248 // For now we give back the offer if we did not use it completely.
249 // In principle we should try to run until the offer is fully consumed.
250 stats.totalConsumedTimeslices += std::min<int64_t>(accumulatedConsumed.timeslices, 1);
251
252 dpStats.updateStats({static_cast<short>(ProcessingStatsId::SHM_OFFER_BYTES_CONSUMED), DataProcessingStats::Op::Set, stats.totalConsumedBytes});
253 dpStats.updateStats({static_cast<short>(ProcessingStatsId::TIMESLICE_OFFER_NUMBER_CONSUMED), DataProcessingStats::Op::Set, stats.totalConsumedTimeslices});
254 dpStats.processCommandQueue();
255 assert(stats.totalConsumedBytes == dpStats.metrics[(short)ProcessingStatsId::SHM_OFFER_BYTES_CONSUMED]);
256 assert(stats.totalConsumedTimeslices == dpStats.metrics[(short)ProcessingStatsId::TIMESLICE_OFFER_NUMBER_CONSUMED]);
257 };
258
259 static std::function<void(ComputingQuotaOffer const&, ComputingQuotaStats const&)> reportExpiredOffer = [ref](ComputingQuotaOffer const& offer, ComputingQuotaStats const& stats) {
260 auto& dpStats = ref.get<DataProcessingStats>();
261 dpStats.updateStats({static_cast<short>(ProcessingStatsId::RESOURCE_OFFER_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredOffers});
262 dpStats.updateStats({static_cast<short>(ProcessingStatsId::ARROW_BYTES_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredBytes});
263 dpStats.updateStats({static_cast<short>(ProcessingStatsId::TIMESLICE_NUMBER_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredTimeslices});
264 dpStats.processCommandQueue();
265 };
266
267 for (auto& consumer : state.offerConsumers) {
268 quotaEvaluator.consume(task->id.index, consumer, reportConsumedOffer);
269 }
270 state.offerConsumers.clear();
271 quotaEvaluator.handleExpired(reportExpiredOffer);
272 quotaEvaluator.dispose(task->id.index);
273 task->running = false;
274}
275
276// Context for polling
278 enum struct PollerState : char { Stopped,
280 Connected,
281 Suspended };
282 char const* name = nullptr;
283 uv_loop_t* loop = nullptr;
285 DeviceState* state = nullptr;
286 fair::mq::Socket* socket = nullptr;
288 int fd = -1;
289 bool read = true;
291};
292
293void on_socket_polled(uv_poll_t* poller, int status, int events)
294{
295 auto* context = (PollerContext*)poller->data;
296 assert(context);
297 O2_SIGNPOST_ID_FROM_POINTER(sid, sockets, poller);
298 context->state->loopReason |= DeviceState::DATA_SOCKET_POLLED;
299 switch (events) {
300 case UV_READABLE: {
301 O2_SIGNPOST_EVENT_EMIT(sockets, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name);
302 context->state->loopReason |= DeviceState::DATA_INCOMING;
303 } break;
304 case UV_WRITABLE: {
305 O2_SIGNPOST_END(sockets, sid, "socket_state", "Socket connected for channel %{public}s", context->name);
306 if (context->read) {
307 O2_SIGNPOST_START(sockets, sid, "socket_state", "Socket connected for read in context %{public}s", context->name);
308 uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_socket_polled);
309 context->state->loopReason |= DeviceState::DATA_CONNECTED;
310 } else {
311 O2_SIGNPOST_START(sockets, sid, "socket_state", "Socket connected for write for channel %{public}s", context->name);
312 context->state->loopReason |= DeviceState::DATA_OUTGOING;
313 // If the socket is writable, fairmq will handle the rest, so we can stop polling and
314 // just wait for the disconnect.
315 uv_poll_start(poller, UV_DISCONNECT | UV_PRIORITIZED, &on_socket_polled);
316 }
317 context->pollerState = PollerContext::PollerState::Connected;
318 } break;
319 case UV_DISCONNECT: {
320 O2_SIGNPOST_END(sockets, sid, "socket_state", "Socket disconnected in context %{public}s", context->name);
321 } break;
322 case UV_PRIORITIZED: {
323 O2_SIGNPOST_EVENT_EMIT(sockets, sid, "socket_state", "Socket prioritized for context %{public}s", context->name);
324 } break;
325 }
326 // We do nothing, all the logic for now stays in DataProcessingDevice::doRun()
327}
328
329void on_out_of_band_polled(uv_poll_t* poller, int status, int events)
330{
331 O2_SIGNPOST_ID_FROM_POINTER(sid, sockets, poller);
332 auto* context = (PollerContext*)poller->data;
333 context->state->loopReason |= DeviceState::OOB_ACTIVITY;
334 if (status < 0) {
335 LOGP(fatal, "Error while polling {}: {}", context->name, status);
336 uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled);
337 }
338 switch (events) {
339 case UV_READABLE: {
340 O2_SIGNPOST_EVENT_EMIT(sockets, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name);
341 context->state->loopReason |= DeviceState::DATA_INCOMING;
342 assert(context->channelInfo);
343 context->channelInfo->readPolled = true;
344 } break;
345 case UV_WRITABLE: {
346 O2_SIGNPOST_END(sockets, sid, "socket_state", "OOB socket connected for channel %{public}s", context->name);
347 if (context->read) {
348 O2_SIGNPOST_START(sockets, sid, "socket_state", "OOB socket connected for read in context %{public}s", context->name);
349 uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_out_of_band_polled);
350 } else {
351 O2_SIGNPOST_START(sockets, sid, "socket_state", "OOB socket connected for write for channel %{public}s", context->name);
352 context->state->loopReason |= DeviceState::DATA_OUTGOING;
353 }
354 } break;
355 case UV_DISCONNECT: {
356 O2_SIGNPOST_END(sockets, sid, "socket_state", "OOB socket disconnected in context %{public}s", context->name);
357 uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled);
358 } break;
359 case UV_PRIORITIZED: {
360 O2_SIGNPOST_EVENT_EMIT(sockets, sid, "socket_state", "OOB socket prioritized for context %{public}s", context->name);
361 } break;
362 }
363 // We do nothing, all the logic for now stays in DataProcessingDevice::doRun()
364}
365
374{
375 auto ref = ServiceRegistryRef{mServiceRegistry};
376 auto& context = ref.get<DataProcessorContext>();
377 auto& spec = getRunningDevice(mRunningDevice, ref);
378
379 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
380 O2_SIGNPOST_START(device, cid, "Init", "Entering Init callback.");
381 context.statelessProcess = spec.algorithm.onProcess;
382 context.statefulProcess = nullptr;
383 context.error = spec.algorithm.onError;
384 context.initError = spec.algorithm.onInitError;
385
386 auto configStore = DeviceConfigurationHelpers::getConfiguration(mServiceRegistry, spec.name.c_str(), spec.options);
387 if (configStore == nullptr) {
388 std::vector<std::unique_ptr<ParamRetriever>> retrievers;
389 retrievers.emplace_back(std::make_unique<FairOptionsRetriever>(GetConfig()));
390 configStore = std::make_unique<ConfigParamStore>(spec.options, std::move(retrievers));
391 configStore->preload();
392 configStore->activate();
393 }
394
395 using boost::property_tree::ptree;
396
398 for (auto& entry : configStore->store()) {
399 std::stringstream ss;
400 std::string str;
401 if (entry.second.empty() == false) {
402 boost::property_tree::json_parser::write_json(ss, entry.second, false);
403 str = ss.str();
404 str.pop_back(); // remove EoL
405 } else {
406 str = entry.second.get_value<std::string>();
407 }
408 std::string configString = fmt::format("[CONFIG] {}={} 1 {}", entry.first, str, configStore->provenance(entry.first.c_str())).c_str();
409 mServiceRegistry.get<DriverClient>(ServiceRegistry::globalDeviceSalt()).tell(configString.c_str());
410 }
411
412 mConfigRegistry = std::make_unique<ConfigParamRegistry>(std::move(configStore));
413
414 // Setup the error handlers for init
415 if (context.initError) {
416 context.initErrorHandling = [&errorCallback = context.initError,
417 &serviceRegistry = mServiceRegistry](RuntimeErrorRef e) {
421 auto& context = ref.get<DataProcessorContext>();
422 auto& err = error_from_ref(e);
423 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
424 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Init", "Exception caught while in Init: %{public}s. Invoking errorCallback.", err.what);
425 BacktraceHelpers::demangled_backtrace_symbols(err.backtrace, err.maxBacktrace, STDERR_FILENO);
426 auto& stats = ref.get<DataProcessingStats>();
428 InitErrorContext errorContext{ref, e};
429 errorCallback(errorContext);
430 };
431 } else {
432 context.initErrorHandling = [&serviceRegistry = mServiceRegistry](RuntimeErrorRef e) {
433 auto& err = error_from_ref(e);
437 auto& context = ref.get<DataProcessorContext>();
438 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
439 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Init", "Exception caught while in Init: %{public}s. Exiting with 1.", err.what);
440 BacktraceHelpers::demangled_backtrace_symbols(err.backtrace, err.maxBacktrace, STDERR_FILENO);
441 auto& stats = ref.get<DataProcessingStats>();
443 exit(1);
444 };
445 }
446
447 context.expirationHandlers.clear();
448 context.init = spec.algorithm.onInit;
449 if (context.init) {
450 static bool noCatch = getenv("O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv("O2_NO_CATCHALL_EXCEPTIONS"), "0");
451 InitContext initContext{*mConfigRegistry, mServiceRegistry};
452
453 if (noCatch) {
454 try {
455 context.statefulProcess = context.init(initContext);
457 if (context.initErrorHandling) {
458 (context.initErrorHandling)(e);
459 }
460 }
461 } else {
462 try {
463 context.statefulProcess = context.init(initContext);
464 } catch (std::exception& ex) {
468 auto e = runtime_error(ex.what());
469 (context.initErrorHandling)(e);
471 (context.initErrorHandling)(e);
472 }
473 }
474 }
475 auto& state = ref.get<DeviceState>();
476 state.inputChannelInfos.resize(spec.inputChannels.size());
480 int validChannelId = 0;
481 for (size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
482 auto& name = spec.inputChannels[ci].name;
483 if (name.find(spec.channelPrefix + "from_internal-dpl-clock") == 0) {
484 state.inputChannelInfos[ci].state = InputChannelState::Pull;
485 state.inputChannelInfos[ci].id = {ChannelIndex::INVALID};
486 validChannelId++;
487 } else {
488 state.inputChannelInfos[ci].id = {validChannelId++};
489 }
490 }
491
492 // Invoke the callback policy for this device.
493 if (spec.callbacksPolicy.policy != nullptr) {
494 InitContext initContext{*mConfigRegistry, mServiceRegistry};
495 spec.callbacksPolicy.policy(mServiceRegistry.get<CallbackService>(ServiceRegistry::globalDeviceSalt()), initContext);
496 }
497
498 // Services which are stream should be initialised now
499 auto* options = GetConfig();
500 for (size_t si = 0; si < mStreams.size(); ++si) {
502 mServiceRegistry.lateBindStreamServices(state, *options, streamSalt);
503 }
504 O2_SIGNPOST_END(device, cid, "Init", "Exiting Init callback.");
505}
506
507void on_signal_callback(uv_signal_t* handle, int signum)
508{
509 O2_SIGNPOST_ID_FROM_POINTER(sid, device, handle);
510 O2_SIGNPOST_START(device, sid, "signal_state", "Signal %d received.", signum);
511
512 auto* registry = (ServiceRegistry*)handle->data;
513 if (!registry) {
514 O2_SIGNPOST_END(device, sid, "signal_state", "No registry active. Ignoring signal.");
515 return;
516 }
517 ServiceRegistryRef ref{*registry};
518 auto& state = ref.get<DeviceState>();
519 auto& quotaEvaluator = ref.get<ComputingQuotaEvaluator>();
520 auto& stats = ref.get<DataProcessingStats>();
522 size_t ri = 0;
523 while (ri != quotaEvaluator.mOffers.size()) {
524 auto& offer = quotaEvaluator.mOffers[ri];
525 // We were already offered some sharedMemory, so we
526 // do not consider the offer.
527 // FIXME: in principle this should account for memory
528 // available and being offered, however we
529 // want to get out of the woods for now.
530 if (offer.valid && offer.sharedMemory != 0) {
531 O2_SIGNPOST_END(device, sid, "signal_state", "Memory already offered.");
532 return;
533 }
534 ri++;
535 }
536 // Find the first empty offer and have 1GB of shared memory there
537 for (auto& offer : quotaEvaluator.mOffers) {
538 if (offer.valid == false) {
539 offer.cpu = 0;
540 offer.memory = 0;
541 offer.sharedMemory = 1000000000;
542 offer.valid = true;
543 offer.user = -1;
544 break;
545 }
546 }
548 O2_SIGNPOST_END(device, sid, "signal_state", "Done processing signals.");
549}
550
551struct DecongestionContext {
554};
555
556auto decongestionCallbackLate = [](AsyncTask& task, size_t aid) -> void {
557 auto& oldestTimeslice = task.user<DecongestionContext>().oldestTimeslice;
558 auto& ref = task.user<DecongestionContext>().ref;
559
560 auto& decongestion = ref.get<DecongestionService>();
561 auto& proxy = ref.get<FairMQDeviceProxy>();
562 if (oldestTimeslice.timeslice.value <= decongestion.lastTimeslice) {
563 LOG(debug) << "Not sending already sent oldest possible timeslice " << oldestTimeslice.timeslice.value;
564 return;
565 }
566 for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
567 auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi});
568 auto& state = proxy.getForwardChannelState(ChannelIndex{fi});
569 O2_SIGNPOST_ID_GENERATE(aid, async_queue);
570 // TODO: this we could cache in the proxy at the bind moment.
571 if (info.channelType != ChannelAccountingType::DPL) {
572 O2_SIGNPOST_EVENT_EMIT(async_queue, aid, "forwardInputsCallback", "Skipping channel %{public}s because it's not a DPL channel",
573 info.name.c_str());
574
575 continue;
576 }
577 if (DataProcessingHelpers::sendOldestPossibleTimeframe(ref, info, state, oldestTimeslice.timeslice.value)) {
578 O2_SIGNPOST_EVENT_EMIT(async_queue, aid, "forwardInputsCallback", "Forwarding to channel %{public}s oldest possible timeslice %zu, prio 20",
579 info.name.c_str(), oldestTimeslice.timeslice.value);
580 }
581 }
582};
583
584// This is how we do the forwarding, i.e. we push
585// the inputs which are shared between this device and others
586// to the next one in the daisy chain.
587// FIXME: do it in a smarter way than O(N^2)
588static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, std::vector<MessageSet>& currentSetOfInputs,
589 TimesliceIndex::OldestOutputInfo oldestTimeslice, bool copy, bool consume = true) {
590 auto& proxy = registry.get<FairMQDeviceProxy>();
591
592 O2_SIGNPOST_ID_GENERATE(sid, forwarding);
593 O2_SIGNPOST_START(forwarding, sid, "forwardInputs", "Starting forwarding for slot %zu with oldestTimeslice %zu %{public}s%{public}s%{public}s",
594 slot.index, oldestTimeslice.timeslice.value, copy ? "with copy" : "", copy && consume ? " and " : "", consume ? "with consume" : "");
595 auto forwardedParts = DataProcessingHelpers::routeForwardedMessageSet(proxy, currentSetOfInputs, copy, consume);
596
597 for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
598 if (forwardedParts[fi].Size() == 0) {
599 continue;
600 }
601 ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi});
602 auto& parts = forwardedParts[fi];
603 if (info.policy == nullptr) {
604 O2_SIGNPOST_EVENT_EMIT_ERROR(forwarding, sid, "forwardInputs", "Forwarding to %{public}s %d has no policy.", info.name.c_str(), fi);
605 continue;
606 }
607 O2_SIGNPOST_EVENT_EMIT(forwarding, sid, "forwardInputs", "Forwarding to %{public}s %d", info.name.c_str(), fi);
608 info.policy->forward(parts, ChannelIndex{fi}, registry);
609 }
610
611 auto& asyncQueue = registry.get<AsyncQueue>();
612 auto& decongestion = registry.get<DecongestionService>();
613 O2_SIGNPOST_ID_GENERATE(aid, async_queue);
614 O2_SIGNPOST_EVENT_EMIT(async_queue, aid, "forwardInputs", "Queuing forwarding oldestPossible %zu", oldestTimeslice.timeslice.value);
615 AsyncQueueHelpers::post(asyncQueue, AsyncTask{.timeslice = oldestTimeslice.timeslice, .id = decongestion.oldestPossibleTimesliceTask, .debounce = -1, .callback = decongestionCallbackLate}
616 .user<DecongestionContext>({.ref = registry, .oldestTimeslice = oldestTimeslice}));
617 O2_SIGNPOST_END(forwarding, sid, "forwardInputs", "Forwarding done");
618};
619
620static auto cleanEarlyForward = [](ServiceRegistryRef registry, TimesliceSlot slot, std::vector<MessageSet>& currentSetOfInputs,
621 TimesliceIndex::OldestOutputInfo oldestTimeslice, bool copy, bool consume = true) {
622 auto& proxy = registry.get<FairMQDeviceProxy>();
623
624 O2_SIGNPOST_ID_GENERATE(sid, forwarding);
625 O2_SIGNPOST_START(forwarding, sid, "forwardInputs", "Cleaning up slot %zu with oldestTimeslice %zu %{public}s%{public}s%{public}s",
626 slot.index, oldestTimeslice.timeslice.value, copy ? "with copy" : "", copy && consume ? " and " : "", consume ? "with consume" : "");
627 // Always copy them, because we do not want to actually send them.
628 // We merely need the side effect of the consume, if applicable.
629 for (size_t ii = 0, ie = currentSetOfInputs.size(); ii < ie; ++ii) {
630 auto span = std::span<fair::mq::MessagePtr>(currentSetOfInputs[ii].messages);
632 }
633
634 O2_SIGNPOST_END(forwarding, sid, "forwardInputs", "Cleaning done");
635};
636
637extern volatile int region_read_global_dummy_variable;
639
641void handleRegionCallbacks(ServiceRegistryRef registry, std::vector<fair::mq::RegionInfo>& infos)
642{
643 if (infos.empty() == false) {
644 std::vector<fair::mq::RegionInfo> toBeNotified;
645 toBeNotified.swap(infos); // avoid any MT issue.
646 static bool dummyRead = getenv("DPL_DEBUG_MAP_ALL_SHM_REGIONS") && atoi(getenv("DPL_DEBUG_MAP_ALL_SHM_REGIONS"));
647 for (auto const& info : toBeNotified) {
648 if (dummyRead) {
649 for (size_t i = 0; i < info.size / sizeof(region_read_global_dummy_variable); i += 4096 / sizeof(region_read_global_dummy_variable)) {
650 region_read_global_dummy_variable = ((int*)info.ptr)[i];
651 }
652 }
653 registry.get<CallbackService>().call<CallbackService::Id::RegionInfoCallback>(info);
654 }
655 }
656}
657
658namespace
659{
661{
662 auto* state = (DeviceState*)handle->data;
664}
665} // namespace
666
667void DataProcessingDevice::initPollers()
668{
669 auto ref = ServiceRegistryRef{mServiceRegistry};
670 auto& deviceContext = ref.get<DeviceContext>();
671 auto& context = ref.get<DataProcessorContext>();
672 auto& spec = ref.get<DeviceSpec const>();
673 auto& state = ref.get<DeviceState>();
674 // We add a timer only in case a channel poller is not there.
675 if ((context.statefulProcess != nullptr) || (context.statelessProcess != nullptr)) {
676 for (auto& [channelName, channel] : GetChannels()) {
677 InputChannelInfo* channelInfo;
678 for (size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
679 auto& channelSpec = spec.inputChannels[ci];
680 channelInfo = &state.inputChannelInfos[ci];
681 if (channelSpec.name != channelName) {
682 continue;
683 }
684 channelInfo->channel = &this->GetChannel(channelName, 0);
685 break;
686 }
687 if ((channelName.rfind("from_internal-dpl", 0) == 0) &&
688 (channelName.rfind("from_internal-dpl-aod", 0) != 0) &&
689 (channelName.rfind("from_internal-dpl-ccdb-backend", 0) != 0) &&
690 (channelName.rfind("from_internal-dpl-injected", 0)) != 0) {
691 LOGP(detail, "{} is an internal channel. Skipping as no input will come from there.", channelName);
692 continue;
693 }
694 // We only watch receiving sockets.
695 if (channelName.rfind("from_" + spec.name + "_", 0) == 0) {
696 LOGP(detail, "{} is to send data. Not polling.", channelName);
697 continue;
698 }
699
700 if (channelName.rfind("from_", 0) != 0) {
701 LOGP(detail, "{} is not a DPL socket. Not polling.", channelName);
702 continue;
703 }
704
705 // We assume there is always a ZeroMQ socket behind.
706 int zmq_fd = 0;
707 size_t zmq_fd_len = sizeof(zmq_fd);
708 // FIXME: I should probably save those somewhere... ;-)
709 auto* poller = (uv_poll_t*)malloc(sizeof(uv_poll_t));
710 channel[0].GetSocket().GetOption("fd", &zmq_fd, &zmq_fd_len);
711 if (zmq_fd == 0) {
712 LOG(error) << "Cannot get file descriptor for channel." << channelName;
713 continue;
714 }
715 LOGP(detail, "Polling socket for {}", channelName);
716 auto* pCtx = (PollerContext*)malloc(sizeof(PollerContext));
717 pCtx->name = strdup(channelName.c_str());
718 pCtx->loop = state.loop;
719 pCtx->device = this;
720 pCtx->state = &state;
721 pCtx->fd = zmq_fd;
722 assert(channelInfo != nullptr);
723 pCtx->channelInfo = channelInfo;
724 pCtx->socket = &channel[0].GetSocket();
725 pCtx->read = true;
726 poller->data = pCtx;
727 uv_poll_init(state.loop, poller, zmq_fd);
728 if (channelName.rfind("from_", 0) != 0) {
729 LOGP(detail, "{} is an out of band channel.", channelName);
730 state.activeOutOfBandPollers.push_back(poller);
731 } else {
732 channelInfo->pollerIndex = state.activeInputPollers.size();
733 state.activeInputPollers.push_back(poller);
734 }
735 }
736 // In case we do not have any input channel and we do not have
737 // any timers or signal watchers we still wake up whenever we can send data to downstream
738 // devices to allow for enumerations.
739 if (state.activeInputPollers.empty() &&
740 state.activeOutOfBandPollers.empty() &&
741 state.activeTimers.empty() &&
742 state.activeSignals.empty()) {
743 // FIXME: this is to make sure we do not reset the output timer
744 // for readout proxies or similar. In principle this should go once
745 // we move to OutOfBand InputSpec.
746 if (state.inputChannelInfos.empty()) {
747 LOGP(detail, "No input channels. Setting exit transition timeout to 0.");
748 deviceContext.exitTransitionTimeout = 0;
749 }
750 for (auto& [channelName, channel] : GetChannels()) {
751 if (channelName.rfind(spec.channelPrefix + "from_internal-dpl", 0) == 0) {
752 LOGP(detail, "{} is an internal channel. Not polling.", channelName);
753 continue;
754 }
755 if (channelName.rfind(spec.channelPrefix + "from_" + spec.name + "_", 0) == 0) {
756 LOGP(detail, "{} is an out of band channel. Not polling for output.", channelName);
757 continue;
758 }
759 // We assume there is always a ZeroMQ socket behind.
760 int zmq_fd = 0;
761 size_t zmq_fd_len = sizeof(zmq_fd);
762 // FIXME: I should probably save those somewhere... ;-)
763 auto* poller = (uv_poll_t*)malloc(sizeof(uv_poll_t));
764 channel[0].GetSocket().GetOption("fd", &zmq_fd, &zmq_fd_len);
765 if (zmq_fd == 0) {
766 LOGP(error, "Cannot get file descriptor for channel {}", channelName);
767 continue;
768 }
769 LOG(detail) << "Polling socket for " << channel[0].GetName();
770 // FIXME: leak
771 auto* pCtx = (PollerContext*)malloc(sizeof(PollerContext));
772 pCtx->name = strdup(channelName.c_str());
773 pCtx->loop = state.loop;
774 pCtx->device = this;
775 pCtx->state = &state;
776 pCtx->fd = zmq_fd;
777 pCtx->read = false;
778 poller->data = pCtx;
779 uv_poll_init(state.loop, poller, zmq_fd);
780 state.activeOutputPollers.push_back(poller);
781 }
782 }
783 } else {
784 LOGP(detail, "This is a fake device so we exit after the first iteration.");
785 deviceContext.exitTransitionTimeout = 0;
786 // This is a fake device, so we can request to exit immediately
787 ServiceRegistryRef ref{mServiceRegistry};
788 ref.get<ControlService>().readyToQuit(QuitRequest::Me);
789 // A two second timer to stop internal devices which do not want to
790 auto* timer = (uv_timer_t*)malloc(sizeof(uv_timer_t));
791 uv_timer_init(state.loop, timer);
792 timer->data = &state;
793 uv_update_time(state.loop);
794 uv_timer_start(timer, on_idle_timer, 2000, 2000);
795 state.activeTimers.push_back(timer);
796 }
797}
798
799void DataProcessingDevice::startPollers()
800{
801 auto ref = ServiceRegistryRef{mServiceRegistry};
802 auto& deviceContext = ref.get<DeviceContext>();
803 auto& state = ref.get<DeviceState>();
804
805 for (auto* poller : state.activeInputPollers) {
806 O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller);
807 O2_SIGNPOST_START(device, sid, "socket_state", "Input socket waiting for connection.");
808 uv_poll_start(poller, UV_WRITABLE, &on_socket_polled);
809 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected;
810 }
811 for (auto& poller : state.activeOutOfBandPollers) {
812 uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled);
813 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected;
814 }
815 for (auto* poller : state.activeOutputPollers) {
816 O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller);
817 O2_SIGNPOST_START(device, sid, "socket_state", "Output socket waiting for connection.");
818 uv_poll_start(poller, UV_WRITABLE, &on_socket_polled);
819 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected;
820 }
821
822 deviceContext.gracePeriodTimer = (uv_timer_t*)malloc(sizeof(uv_timer_t));
823 deviceContext.gracePeriodTimer->data = new ServiceRegistryRef(mServiceRegistry);
824 uv_timer_init(state.loop, deviceContext.gracePeriodTimer);
825
826 deviceContext.dataProcessingGracePeriodTimer = (uv_timer_t*)malloc(sizeof(uv_timer_t));
827 deviceContext.dataProcessingGracePeriodTimer->data = new ServiceRegistryRef(mServiceRegistry);
828 uv_timer_init(state.loop, deviceContext.dataProcessingGracePeriodTimer);
829}
830
831void DataProcessingDevice::stopPollers()
832{
833 auto ref = ServiceRegistryRef{mServiceRegistry};
834 auto& deviceContext = ref.get<DeviceContext>();
835 auto& state = ref.get<DeviceState>();
836 LOGP(detail, "Stopping {} input pollers", state.activeInputPollers.size());
837 for (auto* poller : state.activeInputPollers) {
838 O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller);
839 O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed.");
840 uv_poll_stop(poller);
841 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped;
842 }
843 LOGP(detail, "Stopping {} out of band pollers", state.activeOutOfBandPollers.size());
844 for (auto* poller : state.activeOutOfBandPollers) {
845 uv_poll_stop(poller);
846 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped;
847 }
848 LOGP(detail, "Stopping {} output pollers", state.activeOutOfBandPollers.size());
849 for (auto* poller : state.activeOutputPollers) {
850 O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller);
851 O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed.");
852 uv_poll_stop(poller);
853 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped;
854 }
855
856 uv_timer_stop(deviceContext.gracePeriodTimer);
857 delete (ServiceRegistryRef*)deviceContext.gracePeriodTimer->data;
858 free(deviceContext.gracePeriodTimer);
859 deviceContext.gracePeriodTimer = nullptr;
860
861 uv_timer_stop(deviceContext.dataProcessingGracePeriodTimer);
862 delete (ServiceRegistryRef*)deviceContext.dataProcessingGracePeriodTimer->data;
863 free(deviceContext.dataProcessingGracePeriodTimer);
864 deviceContext.dataProcessingGracePeriodTimer = nullptr;
865}
866
868{
869 auto ref = ServiceRegistryRef{mServiceRegistry};
870 auto& deviceContext = ref.get<DeviceContext>();
871 auto& context = ref.get<DataProcessorContext>();
872
873 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
874 O2_SIGNPOST_START(device, cid, "InitTask", "Entering InitTask callback.");
875 auto& spec = getRunningDevice(mRunningDevice, mServiceRegistry);
876 auto distinct = DataRelayerHelpers::createDistinctRouteIndex(spec.inputs);
877 auto& state = ref.get<DeviceState>();
878 int i = 0;
879 for (auto& di : distinct) {
880 auto& route = spec.inputs[di];
881 if (route.configurator.has_value() == false) {
882 i++;
883 continue;
884 }
885 ExpirationHandler handler{
886 .name = route.configurator->name,
887 .routeIndex = RouteIndex{i++},
888 .lifetime = route.matcher.lifetime,
889 .creator = route.configurator->creatorConfigurator(state, mServiceRegistry, *mConfigRegistry),
890 .checker = route.configurator->danglingConfigurator(state, *mConfigRegistry),
891 .handler = route.configurator->expirationConfigurator(state, *mConfigRegistry)};
892 context.expirationHandlers.emplace_back(std::move(handler));
893 }
894
895 if (state.awakeMainThread == nullptr) {
896 state.awakeMainThread = (uv_async_t*)malloc(sizeof(uv_async_t));
897 state.awakeMainThread->data = &state;
898 uv_async_init(state.loop, state.awakeMainThread, on_awake_main_thread);
899 }
900
901 deviceContext.expectedRegionCallbacks = std::stoi(fConfig->GetValue<std::string>("expected-region-callbacks"));
902 deviceContext.exitTransitionTimeout = std::stoi(fConfig->GetValue<std::string>("exit-transition-timeout"));
903 deviceContext.dataProcessingTimeout = std::stoi(fConfig->GetValue<std::string>("data-processing-timeout"));
904
905 for (auto& channel : GetChannels()) {
906 channel.second.at(0).Transport()->SubscribeToRegionEvents([&context = deviceContext,
907 &registry = mServiceRegistry,
908 &pendingRegionInfos = mPendingRegionInfos,
909 &regionInfoMutex = mRegionInfoMutex](fair::mq::RegionInfo info) {
910 std::lock_guard<std::mutex> lock(regionInfoMutex);
911 LOG(detail) << ">>> Region info event" << info.event;
912 LOG(detail) << "id: " << info.id;
913 LOG(detail) << "ptr: " << info.ptr;
914 LOG(detail) << "size: " << info.size;
915 LOG(detail) << "flags: " << info.flags;
916 // Now we check for pending events with the mutex,
917 // so the lines below are atomic.
918 pendingRegionInfos.push_back(info);
919 context.expectedRegionCallbacks -= 1;
920 // We always want to handle these on the main loop,
921 // so we awake it.
922 ServiceRegistryRef ref{registry};
923 uv_async_send(ref.get<DeviceState>().awakeMainThread);
924 });
925 }
926
927 // Add a signal manager for SIGUSR1 so that we can force
928 // an event from the outside, making sure that the event loop can
929 // be unblocked (e.g. by a quitting DPL driver) even when there
930 // is no data pending to be processed.
931 if (deviceContext.sigusr1Handle == nullptr) {
932 deviceContext.sigusr1Handle = (uv_signal_t*)malloc(sizeof(uv_signal_t));
933 deviceContext.sigusr1Handle->data = &mServiceRegistry;
934 uv_signal_init(state.loop, deviceContext.sigusr1Handle);
935 uv_signal_start(deviceContext.sigusr1Handle, on_signal_callback, SIGUSR1);
936 }
937 // If there is any signal, we want to make sure they are active
938 for (auto& handle : state.activeSignals) {
939 handle->data = &state;
940 }
941 // When we start, we must make sure that we do listen to the signal
942 deviceContext.sigusr1Handle->data = &mServiceRegistry;
943
945 DataProcessingDevice::initPollers();
946
947 // Whenever we InitTask, we consider as if the previous iteration
948 // was successful, so that even if there is no timer or receiving
949 // channel, we can still start an enumeration.
950 DataProcessorContext* initialContext = nullptr;
951 bool idle = state.lastActiveDataProcessor.compare_exchange_strong(initialContext, (DataProcessorContext*)-1);
952 if (!idle) {
953 LOG(error) << "DataProcessor " << state.lastActiveDataProcessor.load()->spec->name << " was unexpectedly active";
954 }
955
956 // We should be ready to run here. Therefore we copy all the
957 // required parts in the DataProcessorContext. Eventually we should
958 // do so on a per thread basis, with fine grained locks.
959 // FIXME: this should not use ServiceRegistry::threadSalt, but
960 // more a ServiceRegistry::globalDataProcessorSalt(N) where
961 // N is the number of the multiplexed data processor.
962 // We will get there.
963 this->fillContext(mServiceRegistry.get<DataProcessorContext>(ServiceRegistry::globalDeviceSalt()), deviceContext);
964
965 O2_SIGNPOST_END(device, cid, "InitTask", "Exiting InitTask callback waiting for the remaining region callbacks.");
966
967 auto hasPendingEvents = [&mutex = mRegionInfoMutex, &pendingRegionInfos = mPendingRegionInfos](DeviceContext& deviceContext) {
968 std::lock_guard<std::mutex> lock(mutex);
969 return (pendingRegionInfos.empty() == false) || deviceContext.expectedRegionCallbacks > 0;
970 };
971 O2_SIGNPOST_START(device, cid, "InitTask", "Waiting for registation events.");
976 while (hasPendingEvents(deviceContext)) {
977 // Wait for the callback to signal its done, so that we do not busy wait.
978 uv_run(state.loop, UV_RUN_ONCE);
979 // Handle callbacks if any
980 {
981 O2_SIGNPOST_EVENT_EMIT(device, cid, "InitTask", "Memory registration event received.");
982 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
983 handleRegionCallbacks(mServiceRegistry, mPendingRegionInfos);
984 }
985 }
986 O2_SIGNPOST_END(device, cid, "InitTask", "Done waiting for registration events.");
987}
988
990{
991 context.isSink = false;
992 // If nothing is a sink, the rate limiting simply does not trigger.
993 bool enableRateLimiting = std::stoi(fConfig->GetValue<std::string>("timeframes-rate-limit"));
994
995 auto ref = ServiceRegistryRef{mServiceRegistry};
996 auto& spec = ref.get<DeviceSpec const>();
997
998 // The policy is now allowed to state the default.
999 context.balancingInputs = spec.completionPolicy.balanceChannels;
1000 // This is needed because the internal injected dummy sink should not
1001 // try to balance inputs unless the rate limiting is requested.
1002 if (enableRateLimiting == false && spec.name.find("internal-dpl-injected-dummy-sink") != std::string::npos) {
1003 context.balancingInputs = false;
1004 }
1005 if (enableRateLimiting) {
1006 for (auto& spec : spec.outputs) {
1007 if (spec.matcher.binding.value == "dpl-summary") {
1008 context.isSink = true;
1009 break;
1010 }
1011 }
1012 }
1013
1014 context.registry = &mServiceRegistry;
1017 if (context.error != nullptr) {
1018 context.errorHandling = [&errorCallback = context.error,
1019 &serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) {
1023 auto& err = error_from_ref(e);
1024 auto& context = ref.get<DataProcessorContext>();
1025 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
1026 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Run", "Exception while running: %{public}s. Invoking callback.", err.what);
1027 BacktraceHelpers::demangled_backtrace_symbols(err.backtrace, err.maxBacktrace, STDERR_FILENO);
1028 auto& stats = ref.get<DataProcessingStats>();
1030 ErrorContext errorContext{record, ref, e};
1031 errorCallback(errorContext);
1032 };
1033 } else {
1034 context.errorHandling = [&serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) {
1035 auto& err = error_from_ref(e);
1039 auto& context = ref.get<DataProcessorContext>();
1040 auto& deviceContext = ref.get<DeviceContext>();
1041 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
1042 BacktraceHelpers::demangled_backtrace_symbols(err.backtrace, err.maxBacktrace, STDERR_FILENO);
1043 auto& stats = ref.get<DataProcessingStats>();
1045 switch (deviceContext.processingPolicies.error) {
1047 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Run", "Exception while running: %{public}s. Rethrowing.", err.what);
1048 throw e;
1049 default:
1050 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Run", "Exception while running: %{public}s. Skipping to next timeframe.", err.what);
1051 break;
1052 }
1053 };
1054 }
1055
1056 auto decideEarlyForward = [&context, &deviceContext, &spec, this]() -> ForwardPolicy {
1057 ForwardPolicy defaultEarlyForwardPolicy = getenv("DPL_OLD_EARLY_FORWARD") ? ForwardPolicy::AtCompletionPolicySatisified : ForwardPolicy::AtInjection;
1058 // FIXME: try again with the new policy by default.
1059 //
1060 // Make the new policy optional until we handle some of the corner cases
1061 // with custom policies which expect the early forward to happen only when
1062 // all the data is available, like in the TPC case.
1063 // ForwardPolicy defaultEarlyForwardPolicy = getenv("DPL_NEW_EARLY_FORWARD") ? ForwardPolicy::AtInjection : ForwardPolicy::AtCompletionPolicySatisified;
1064 for (auto& forward : spec.forwards) {
1065 if (DataSpecUtils::match(forward.matcher, ConcreteDataTypeMatcher{"TPC", "DIGITSMCTR"}) ||
1066 DataSpecUtils::match(forward.matcher, ConcreteDataTypeMatcher{"TPC", "CLNATIVEMCLBL"}) ||
1067 DataSpecUtils::match(forward.matcher, ConcreteDataTypeMatcher{o2::header::gDataOriginTPC, "DIGITS"}) ||
1068 DataSpecUtils::match(forward.matcher, ConcreteDataTypeMatcher{o2::header::gDataOriginTPC, "CLUSTERNATIVE"})) {
1069 defaultEarlyForwardPolicy = ForwardPolicy::AtCompletionPolicySatisified;
1070 break;
1071 }
1072 }
1073 // Output proxies should wait for the completion policy before forwarding.
1074 // Because they actually do not do anything, that's equivalent to
1075 // forwarding after the processing.
1076 for (auto& label : spec.labels) {
1077 if (label.value == "output-proxy") {
1078 defaultEarlyForwardPolicy = ForwardPolicy::AfterProcessing;
1079 break;
1080 }
1081 }
1082
1085 ForwardPolicy forwardPolicy = defaultEarlyForwardPolicy;
1086 if (spec.forwards.empty() == false) {
1087 switch (deviceContext.processingPolicies.earlyForward) {
1089 forwardPolicy = ForwardPolicy::AfterProcessing;
1090 break;
1092 forwardPolicy = defaultEarlyForwardPolicy;
1093 break;
1095 forwardPolicy = defaultEarlyForwardPolicy;
1096 break;
1097 }
1098 }
1099 bool onlyConditions = true;
1100 bool overriddenEarlyForward = false;
1101 for (auto& forwarded : spec.forwards) {
1102 if (forwarded.matcher.lifetime != Lifetime::Condition) {
1103 onlyConditions = false;
1104 }
1106 forwardPolicy = ForwardPolicy::AfterProcessing;
1107 overriddenEarlyForward = true;
1108 LOG(detail) << "Cannot forward early because of RAWDATA input: " << DataSpecUtils::describe(forwarded.matcher);
1109 break;
1110 }
1111 if (forwarded.matcher.lifetime == Lifetime::Optional) {
1112 forwardPolicy = ForwardPolicy::AfterProcessing;
1113 overriddenEarlyForward = true;
1114 LOG(detail) << "Cannot forward early because of Optional input: " << DataSpecUtils::describe(forwarded.matcher);
1115 break;
1116 }
1117 }
1118 if (!overriddenEarlyForward && onlyConditions) {
1119 forwardPolicy = defaultEarlyForwardPolicy;
1120 LOG(detail) << "Enabling early forwarding because only conditions to be forwarded";
1121 }
1122 return forwardPolicy;
1123 };
1124 context.forwardPolicy = decideEarlyForward();
1125}
1126
1128{
1129 auto ref = ServiceRegistryRef{mServiceRegistry};
1130 auto& state = ref.get<DeviceState>();
1131
1132 O2_SIGNPOST_ID_FROM_POINTER(cid, device, state.loop);
1133 O2_SIGNPOST_START(device, cid, "PreRun", "Entering PreRun callback.");
1134 state.quitRequested = false;
1136 state.allowedProcessing = DeviceState::Any;
1137 for (auto& info : state.inputChannelInfos) {
1138 if (info.state != InputChannelState::Pull) {
1139 info.state = InputChannelState::Running;
1140 }
1141 }
1142
1143 // Catch callbacks which fail before we start.
1144 // Notice that when running multiple dataprocessors
1145 // we should probably allow expendable ones to fail.
1146 try {
1147 auto& dpContext = ref.get<DataProcessorContext>();
1148 dpContext.preStartCallbacks(ref);
1149 for (size_t i = 0; i < mStreams.size(); ++i) {
1150 auto streamRef = ServiceRegistryRef{mServiceRegistry, ServiceRegistry::globalStreamSalt(i + 1)};
1151 auto& context = streamRef.get<StreamContext>();
1152 context.preStartStreamCallbacks(streamRef);
1153 }
1154 } catch (std::exception& e) {
1155 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "PreRun", "Exception of type std::exception caught in PreRun: %{public}s. Rethrowing.", e.what());
1156 O2_SIGNPOST_END(device, cid, "PreRun", "Exiting PreRun due to exception thrown.");
1157 throw;
1158 } catch (o2::framework::RuntimeErrorRef& e) {
1159 auto& err = error_from_ref(e);
1160 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "PreRun", "Exception of type o2::framework::RuntimeErrorRef caught in PreRun: %{public}s. Rethrowing.", err.what);
1161 O2_SIGNPOST_END(device, cid, "PreRun", "Exiting PreRun due to exception thrown.");
1162 throw;
1163 } catch (...) {
1164 O2_SIGNPOST_END(device, cid, "PreRun", "Unknown exception being thrown. Rethrowing.");
1165 throw;
1166 }
1167
1168 ref.get<CallbackService>().call<CallbackService::Id::Start>();
1169 startPollers();
1170
1171 // Raise to 1 when we are ready to start processing
1172 using o2::monitoring::Metric;
1173 using o2::monitoring::Monitoring;
1174 using o2::monitoring::tags::Key;
1175 using o2::monitoring::tags::Value;
1176
1177 auto& monitoring = ref.get<Monitoring>();
1178 monitoring.send(Metric{(uint64_t)1, "device_state"}.addTag(Key::Subsystem, Value::DPL));
1179 O2_SIGNPOST_END(device, cid, "PreRun", "Exiting PreRun callback.");
1180}
1181
1183{
1184 ServiceRegistryRef ref{mServiceRegistry};
1185 // Raise to 1 when we are ready to start processing
1186 using o2::monitoring::Metric;
1187 using o2::monitoring::Monitoring;
1188 using o2::monitoring::tags::Key;
1189 using o2::monitoring::tags::Value;
1190
1191 auto& monitoring = ref.get<Monitoring>();
1192 monitoring.send(Metric{(uint64_t)0, "device_state"}.addTag(Key::Subsystem, Value::DPL));
1193
1194 stopPollers();
1195 ref.get<CallbackService>().call<CallbackService::Id::Stop>();
1196 auto& dpContext = ref.get<DataProcessorContext>();
1197 dpContext.postStopCallbacks(ref);
1198}
1199
1201{
1202 ServiceRegistryRef ref{mServiceRegistry};
1203 ref.get<CallbackService>().call<CallbackService::Id::Reset>();
1204}
1205
1207{
1208 ServiceRegistryRef ref{mServiceRegistry};
1209 auto& state = ref.get<DeviceState>();
1211 bool firstLoop = true;
1212 O2_SIGNPOST_ID_FROM_POINTER(lid, device, state.loop);
1213 O2_SIGNPOST_START(device, lid, "device_state", "First iteration of the device loop");
1214
1215 bool dplEnableMultithreding = getenv("DPL_THREADPOOL_SIZE") != nullptr;
1216 if (dplEnableMultithreding) {
1217 setenv("UV_THREADPOOL_SIZE", "1", 1);
1218 }
1219
1220 while (state.transitionHandling != TransitionHandlingState::Expired) {
1221 if (state.nextFairMQState.empty() == false) {
1222 (void)this->ChangeState(state.nextFairMQState.back());
1223 state.nextFairMQState.pop_back();
1224 }
1225 // Notify on the main thread the new region callbacks, making sure
1226 // no callback is issued if there is something still processing.
1227 {
1228 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
1229 handleRegionCallbacks(mServiceRegistry, mPendingRegionInfos);
1230 }
1231 // This will block for the correct delay (or until we get data
1232 // on a socket). We also do not block on the first iteration
1233 // so that devices which do not have a timer can still start an
1234 // enumeration.
1235 {
1236 ServiceRegistryRef ref{mServiceRegistry};
1237 ref.get<DriverClient>().flushPending(mServiceRegistry);
1238 DataProcessorContext* lastActive = state.lastActiveDataProcessor.load();
1239 // Reset to zero unless some other DataPorcessorContext completed in the meanwhile.
1240 // In such case we will take care of it at next iteration.
1241 state.lastActiveDataProcessor.compare_exchange_strong(lastActive, nullptr);
1242
1243 auto shouldNotWait = (lastActive != nullptr &&
1244 (state.streaming != StreamingState::Idle) && (state.activeSignals.empty())) ||
1246 if (firstLoop) {
1247 shouldNotWait = true;
1248 firstLoop = false;
1249 }
1250 if (lastActive != nullptr) {
1252 }
1253 if (NewStatePending()) {
1254 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "New state pending. Waiting for it to be handled.");
1255 shouldNotWait = true;
1257 }
1259 // If we are Idle, we can then consider the transition to be expired.
1260 if (state.transitionHandling == TransitionHandlingState::Requested && state.streaming == StreamingState::Idle) {
1261 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "State transition requested and we are now in Idle. We can consider it to be completed.");
1262 state.transitionHandling = TransitionHandlingState::Expired;
1263 }
1264 if (state.severityStack.empty() == false) {
1265 fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back());
1266 state.severityStack.pop_back();
1267 }
1268 // for (auto &info : mDeviceContext.state->inputChannelInfos) {
1269 // shouldNotWait |= info.readPolled;
1270 // }
1271 state.loopReason = DeviceState::NO_REASON;
1272 state.firedTimers.clear();
1273 if ((state.tracingFlags & DeviceState::LoopReason::TRACE_CALLBACKS) != 0) {
1274 state.severityStack.push_back((int)fair::Logger::GetConsoleSeverity());
1275 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
1276 }
1277 // Run the asynchronous queue just before sleeping again, so that:
1278 // - we can trigger further events from the queue
1279 // - we can guarantee this is the last thing we do in the loop (
1280 // assuming no one else is adding to the queue before this point).
1281 auto onDrop = [&registry = mServiceRegistry, lid](TimesliceSlot slot, std::vector<MessageSet>& dropped, TimesliceIndex::OldestOutputInfo oldestOutputInfo) {
1282 O2_SIGNPOST_START(device, lid, "run_loop", "Dropping message from slot %" PRIu64 ". Forwarding as needed.", (uint64_t)slot.index);
1283 ServiceRegistryRef ref{registry};
1284 ref.get<AsyncQueue>();
1285 ref.get<DecongestionService>();
1286 ref.get<DataRelayer>();
1287 // Get the current timeslice for the slot.
1288 auto& variables = ref.get<TimesliceIndex>().getVariablesForSlot(slot);
1290 forwardInputs(registry, slot, dropped, oldestOutputInfo, false, true);
1291 };
1292 auto& relayer = ref.get<DataRelayer>();
1293 relayer.prunePending(onDrop);
1294 auto& queue = ref.get<AsyncQueue>();
1295 auto oldestPossibleTimeslice = relayer.getOldestPossibleOutput();
1296 AsyncQueueHelpers::run(queue, {oldestPossibleTimeslice.timeslice.value});
1297 if (shouldNotWait == false) {
1298 auto& dpContext = ref.get<DataProcessorContext>();
1299 dpContext.preLoopCallbacks(ref);
1300 }
1301 O2_SIGNPOST_END(device, lid, "run_loop", "Run loop completed. %{}s", shouldNotWait ? "Will immediately schedule a new one" : "Waiting for next event.");
1302 uv_run(state.loop, shouldNotWait ? UV_RUN_NOWAIT : UV_RUN_ONCE);
1303 O2_SIGNPOST_START(device, lid, "run_loop", "Run loop started. Loop reason %d.", state.loopReason);
1304 if ((state.loopReason & state.tracingFlags) != 0) {
1305 state.severityStack.push_back((int)fair::Logger::GetConsoleSeverity());
1306 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
1307 } else if (state.severityStack.empty() == false) {
1308 fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back());
1309 state.severityStack.pop_back();
1310 }
1311 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "Loop reason mask %x & %x = %x", state.loopReason, state.tracingFlags, state.loopReason & state.tracingFlags);
1312
1313 if ((state.loopReason & DeviceState::LoopReason::OOB_ACTIVITY) != 0) {
1314 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "Out of band activity detected. Rescanning everything.");
1315 relayer.rescan();
1316 }
1317
1318 if (!state.pendingOffers.empty()) {
1319 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "Pending %" PRIu64 " offers. updating the ComputingQuotaEvaluator.", (uint64_t)state.pendingOffers.size());
1320 ref.get<ComputingQuotaEvaluator>().updateOffers(state.pendingOffers, uv_now(state.loop));
1321 }
1322 }
1323
1324 // Notify on the main thread the new region callbacks, making sure
1325 // no callback is issued if there is something still processing.
1326 // Notice that we still need to perform callbacks also after
1327 // the socket epolled, because otherwise we would end up serving
1328 // the callback after the first data arrives is the system is too
1329 // fast to transition from Init to Run.
1330 {
1331 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
1332 handleRegionCallbacks(mServiceRegistry, mPendingRegionInfos);
1333 }
1334
1335 assert(mStreams.size() == mHandles.size());
1337 TaskStreamRef streamRef{-1};
1338 for (size_t ti = 0; ti < mStreams.size(); ti++) {
1339 auto& taskInfo = mStreams[ti];
1340 if (taskInfo.running) {
1341 continue;
1342 }
1343 // Stream 0 is for when we run in
1344 streamRef.index = ti;
1345 }
1346 using o2::monitoring::Metric;
1347 using o2::monitoring::Monitoring;
1348 using o2::monitoring::tags::Key;
1349 using o2::monitoring::tags::Value;
1350 // We have an empty stream, let's check if we have enough
1351 // resources for it to run something
1352 if (streamRef.index != -1) {
1353 // Synchronous execution of the callbacks. This will be moved in the
1354 // moved in the on_socket_polled once we have threading in place.
1355 uv_work_t& handle = mHandles[streamRef.index];
1356 TaskStreamInfo& stream = mStreams[streamRef.index];
1357 handle.data = &mStreams[streamRef.index];
1358
1359 static std::function<void(ComputingQuotaOffer const&, ComputingQuotaStats const& stats)> reportExpiredOffer = [&registry = mServiceRegistry](ComputingQuotaOffer const& offer, ComputingQuotaStats const& stats) {
1360 ServiceRegistryRef ref{registry};
1361 auto& dpStats = ref.get<DataProcessingStats>();
1362 dpStats.updateStats({static_cast<short>(ProcessingStatsId::RESOURCE_OFFER_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredOffers});
1363 dpStats.updateStats({static_cast<short>(ProcessingStatsId::ARROW_BYTES_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredBytes});
1364 dpStats.updateStats({static_cast<short>(ProcessingStatsId::TIMESLICE_NUMBER_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredTimeslices});
1365 dpStats.processCommandQueue();
1366 };
1367 auto ref = ServiceRegistryRef{mServiceRegistry};
1368
1369 // Deciding wether to run or not can be done by passing a request to
1370 // the evaluator. In this case, the request is always satisfied and
1371 // we run on whatever resource is available.
1372 auto& spec = ref.get<DeviceSpec const>();
1373 bool enough = ref.get<ComputingQuotaEvaluator>().selectOffer(streamRef.index, spec.resourcePolicy.request, uv_now(state.loop));
1374
1375 struct SchedulingStats {
1376 std::atomic<size_t> lastScheduled = 0;
1377 std::atomic<size_t> numberOfUnscheduledSinceLastScheduled = 0;
1378 std::atomic<size_t> numberOfUnscheduled = 0;
1379 std::atomic<size_t> numberOfScheduled = 0;
1380 };
1381 static SchedulingStats schedulingStats;
1382 O2_SIGNPOST_ID_GENERATE(sid, scheduling);
1383 if (enough) {
1384 stream.id = streamRef;
1385 stream.running = true;
1386 stream.registry = &mServiceRegistry;
1387 schedulingStats.lastScheduled = uv_now(state.loop);
1388 schedulingStats.numberOfScheduled++;
1389 schedulingStats.numberOfUnscheduledSinceLastScheduled = 0;
1390 O2_SIGNPOST_EVENT_EMIT(scheduling, sid, "Run", "Enough resources to schedule computation on stream %d", streamRef.index);
1391 if (dplEnableMultithreding) [[unlikely]] {
1392 stream.task = &handle;
1393 uv_queue_work(state.loop, stream.task, run_callback, run_completion);
1394 } else {
1395 run_callback(&handle);
1396 run_completion(&handle, 0);
1397 }
1398 } else {
1399 if (schedulingStats.numberOfUnscheduledSinceLastScheduled > 100 ||
1400 (uv_now(state.loop) - schedulingStats.lastScheduled) > 30000) {
1401 O2_SIGNPOST_EVENT_EMIT_WARN(scheduling, sid, "Run",
1402 "Not enough resources to schedule computation. %zu skipped so far. Last scheduled at %zu.",
1403 schedulingStats.numberOfUnscheduledSinceLastScheduled.load(),
1404 schedulingStats.lastScheduled.load());
1405 } else {
1406 O2_SIGNPOST_EVENT_EMIT(scheduling, sid, "Run",
1407 "Not enough resources to schedule computation. %zu skipped so far. Last scheduled at %zu.",
1408 schedulingStats.numberOfUnscheduledSinceLastScheduled.load(),
1409 schedulingStats.lastScheduled.load());
1410 }
1411 schedulingStats.numberOfUnscheduled++;
1412 schedulingStats.numberOfUnscheduledSinceLastScheduled++;
1413 auto ref = ServiceRegistryRef{mServiceRegistry};
1414 ref.get<ComputingQuotaEvaluator>().handleExpired(reportExpiredOffer);
1415 }
1416 }
1417 }
1418
1419 O2_SIGNPOST_END(device, lid, "run_loop", "Run loop completed. Transition handling state %d.", (int)state.transitionHandling);
1420 auto& spec = ref.get<DeviceSpec const>();
1422 for (size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
1423 auto& info = state.inputChannelInfos[ci];
1424 info.parts.fParts.clear();
1425 }
1426 state.transitionHandling = TransitionHandlingState::NoTransition;
1427}
1428
1432{
1433 auto& context = ref.get<DataProcessorContext>();
1434 O2_SIGNPOST_ID_FROM_POINTER(dpid, device, &context);
1435 O2_SIGNPOST_START(device, dpid, "do_prepare", "Starting DataProcessorContext::doPrepare.");
1436
1437 {
1438 ref.get<CallbackService>().call<CallbackService::Id::ClockTick>();
1439 }
1440 // Whether or not we had something to do.
1441
1442 // Initialise the value for context.allDone. It will possibly be updated
1443 // below if any of the channels is not done.
1444 //
1445 // Notice that fake input channels (InputChannelState::Pull) cannot possibly
1446 // expect to receive an EndOfStream signal. Thus we do not wait for these
1447 // to be completed. In the case of data source devices, as they do not have
1448 // real data input channels, they have to signal EndOfStream themselves.
1449 auto& state = ref.get<DeviceState>();
1450 auto& spec = ref.get<DeviceSpec const>();
1451 O2_SIGNPOST_ID_FROM_POINTER(cid, device, state.inputChannelInfos.data());
1452 O2_SIGNPOST_START(device, cid, "do_prepare", "Reported channel states.");
1453 context.allDone = std::any_of(state.inputChannelInfos.begin(), state.inputChannelInfos.end(), [cid](const auto& info) {
1454 if (info.channel) {
1455 O2_SIGNPOST_EVENT_EMIT(device, cid, "do_prepare", "Input channel %{public}s%{public}s has %zu parts left and is in state %d.",
1456 info.channel->GetName().c_str(), (info.id.value == ChannelIndex::INVALID ? " (non DPL)" : ""), info.parts.fParts.size(), (int)info.state);
1457 } else {
1458 O2_SIGNPOST_EVENT_EMIT(device, cid, "do_prepare", "External channel %d is in state %d.", info.id.value, (int)info.state);
1459 }
1460 return (info.parts.fParts.empty() == true && info.state != InputChannelState::Pull);
1461 });
1462 O2_SIGNPOST_END(device, cid, "do_prepare", "End report.");
1463 O2_SIGNPOST_EVENT_EMIT(device, dpid, "do_prepare", "Processing %zu input channels.", spec.inputChannels.size());
1466 static std::vector<int> pollOrder;
1467 pollOrder.resize(state.inputChannelInfos.size());
1468 std::iota(pollOrder.begin(), pollOrder.end(), 0);
1469 std::sort(pollOrder.begin(), pollOrder.end(), [&infos = state.inputChannelInfos](int a, int b) {
1470 return infos[a].oldestForChannel.value < infos[b].oldestForChannel.value;
1471 });
1472
1473 // Nothing to poll...
1474 if (pollOrder.empty()) {
1475 O2_SIGNPOST_END(device, dpid, "do_prepare", "Nothing to poll. Waiting for next iteration.");
1476 return;
1477 }
1478 auto currentOldest = state.inputChannelInfos[pollOrder.front()].oldestForChannel;
1479 auto currentNewest = state.inputChannelInfos[pollOrder.back()].oldestForChannel;
1480 auto delta = currentNewest.value - currentOldest.value;
1481 O2_SIGNPOST_EVENT_EMIT(device, dpid, "do_prepare", "Oldest possible timeframe range %" PRIu64 " => %" PRIu64 " delta %" PRIu64,
1482 (int64_t)currentOldest.value, (int64_t)currentNewest.value, (int64_t)delta);
1483 auto& infos = state.inputChannelInfos;
1484
1485 if (context.balancingInputs) {
1486 static int pipelineLength = DefaultsHelpers::pipelineLength();
1487 static uint64_t ahead = getenv("DPL_MAX_CHANNEL_AHEAD") ? std::atoll(getenv("DPL_MAX_CHANNEL_AHEAD")) : std::max(8, std::min(pipelineLength - 48, pipelineLength / 2));
1488 auto newEnd = std::remove_if(pollOrder.begin(), pollOrder.end(), [&infos, limitNew = currentOldest.value + ahead](int a) -> bool {
1489 return infos[a].oldestForChannel.value > limitNew;
1490 });
1491 for (auto it = pollOrder.begin(); it < pollOrder.end(); it++) {
1492 const auto& channelInfo = state.inputChannelInfos[*it];
1493 if (channelInfo.pollerIndex != -1) {
1494 auto& poller = state.activeInputPollers[channelInfo.pollerIndex];
1495 auto& pollerContext = *(PollerContext*)(poller->data);
1496 if (pollerContext.pollerState == PollerContext::PollerState::Connected || pollerContext.pollerState == PollerContext::PollerState::Suspended) {
1497 bool running = pollerContext.pollerState == PollerContext::PollerState::Connected;
1498 bool shouldBeRunning = it < newEnd;
1499 if (running != shouldBeRunning) {
1500 uv_poll_start(poller, shouldBeRunning ? UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED : 0, &on_socket_polled);
1501 pollerContext.pollerState = shouldBeRunning ? PollerContext::PollerState::Connected : PollerContext::PollerState::Suspended;
1502 }
1503 }
1504 }
1505 }
1506 pollOrder.erase(newEnd, pollOrder.end());
1507 }
1508 O2_SIGNPOST_END(device, dpid, "do_prepare", "%zu channels pass the channel inbalance balance check.", pollOrder.size());
1509
1510 for (auto sci : pollOrder) {
1511 auto& info = state.inputChannelInfos[sci];
1512 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &info);
1513 O2_SIGNPOST_START(device, cid, "channels", "Processing channel %s", info.channel->GetName().c_str());
1514
1515 if (info.state != InputChannelState::Completed && info.state != InputChannelState::Pull) {
1516 context.allDone = false;
1517 }
1518 if (info.state != InputChannelState::Running) {
1519 // Remember to flush data if we are not running
1520 // and there is some message pending.
1521 if (info.parts.Size()) {
1523 }
1524 O2_SIGNPOST_END(device, cid, "channels", "Flushing channel %s which is in state %d and has %zu parts still pending.",
1525 info.channel->GetName().c_str(), (int)info.state, info.parts.Size());
1526 continue;
1527 }
1528 if (info.channel == nullptr) {
1529 O2_SIGNPOST_END(device, cid, "channels", "Channel %s which is in state %d is nullptr and has %zu parts still pending.",
1530 info.channel->GetName().c_str(), (int)info.state, info.parts.Size());
1531 continue;
1532 }
1533 // Only poll DPL channels for now.
1535 O2_SIGNPOST_END(device, cid, "channels", "Channel %s which is in state %d is not a DPL channel and has %zu parts still pending.",
1536 info.channel->GetName().c_str(), (int)info.state, info.parts.Size());
1537 continue;
1538 }
1539 auto& socket = info.channel->GetSocket();
1540 // If we have pending events from a previous iteration,
1541 // we do receive in any case.
1542 // Otherwise we check if there is any pending event and skip
1543 // this channel in case there is none.
1544 if (info.hasPendingEvents == 0) {
1545 socket.Events(&info.hasPendingEvents);
1546 // If we do not read, we can continue.
1547 if ((info.hasPendingEvents & 1) == 0 && (info.parts.Size() == 0)) {
1548 O2_SIGNPOST_END(device, cid, "channels", "No pending events and no remaining parts to process for channel %{public}s", info.channel->GetName().c_str());
1549 continue;
1550 }
1551 }
1552 // We can reset this, because it means we have seen at least 1
1553 // message after the UV_READABLE was raised.
1554 info.readPolled = false;
1555 // Notice that there seems to be a difference between the documentation
1556 // of zeromq and the observed behavior. The fact that ZMQ_POLLIN
1557 // is raised does not mean that a message is immediately available to
1558 // read, just that it will be available soon, so the receive can
1559 // still return -2. To avoid this we keep receiving on the socket until
1560 // we get a message. In order not to overflow the DPL queue we process
1561 // one message at the time and we keep track of wether there were more
1562 // to process.
1563 bool newMessages = false;
1564 while (true) {
1565 O2_SIGNPOST_EVENT_EMIT(device, cid, "channels", "Receiving loop called for channel %{public}s (%d) with oldest possible timeslice %zu",
1566 info.channel->GetName().c_str(), info.id.value, info.oldestForChannel.value);
1567 if (info.parts.Size() < 64) {
1568 fair::mq::Parts parts;
1569 info.channel->Receive(parts, 0);
1570 if (parts.Size()) {
1571 O2_SIGNPOST_EVENT_EMIT(device, cid, "channels", "Received %zu parts from channel %{public}s (%d).", parts.Size(), info.channel->GetName().c_str(), info.id.value);
1572 }
1573 for (auto&& part : parts) {
1574 info.parts.fParts.emplace_back(std::move(part));
1575 }
1576 newMessages |= true;
1577 }
1578
1579 if (info.parts.Size() >= 0) {
1581 // Receiving data counts as activity now, so that
1582 // We can make sure we process all the pending
1583 // messages without hanging on the uv_run.
1584 break;
1585 }
1586 }
1587 // We check once again for pending events, keeping track if this was the
1588 // case so that we can immediately repeat this loop and avoid remaining
1589 // stuck in uv_run. This is because we will not get notified on the socket
1590 // if more events are pending due to zeromq level triggered approach.
1591 socket.Events(&info.hasPendingEvents);
1592 if (info.hasPendingEvents) {
1593 info.readPolled = false;
1594 // In case there were messages, we consider it as activity
1595 if (newMessages) {
1596 state.lastActiveDataProcessor.store(&context);
1597 }
1598 }
1599 O2_SIGNPOST_END(device, cid, "channels", "Done processing channel %{public}s (%d).",
1600 info.channel->GetName().c_str(), info.id.value);
1601 }
1602}
1603
1605{
1606 auto& context = ref.get<DataProcessorContext>();
1607 O2_SIGNPOST_ID_FROM_POINTER(dpid, device, &context);
1608 auto& state = ref.get<DeviceState>();
1609 auto& spec = ref.get<DeviceSpec const>();
1610
1611 if (state.streaming == StreamingState::Idle) {
1612 return;
1613 }
1614
1615 context.completed.clear();
1616 context.completed.reserve(16);
1617 if (DataProcessingDevice::tryDispatchComputation(ref, context.completed)) {
1618 state.lastActiveDataProcessor.store(&context);
1619 }
1620 DanglingContext danglingContext{*context.registry};
1621
1622 context.preDanglingCallbacks(danglingContext);
1623 if (state.lastActiveDataProcessor.load() == nullptr) {
1624 ref.get<CallbackService>().call<CallbackService::Id::Idle>();
1625 }
1626 auto activity = ref.get<DataRelayer>().processDanglingInputs(context.expirationHandlers, *context.registry, true);
1627 if (activity.expiredSlots > 0) {
1628 state.lastActiveDataProcessor = &context;
1629 }
1630
1631 context.completed.clear();
1632 if (DataProcessingDevice::tryDispatchComputation(ref, context.completed)) {
1633 state.lastActiveDataProcessor = &context;
1634 }
1635
1636 context.postDanglingCallbacks(danglingContext);
1637
1638 // If we got notified that all the sources are done, we call the EndOfStream
1639 // callback and return false. Notice that what happens next is actually
1640 // dependent on the callback, not something which is controlled by the
1641 // framework itself.
1642 if (context.allDone == true && state.streaming == StreamingState::Streaming) {
1644 state.lastActiveDataProcessor = &context;
1645 }
1646
1647 if (state.streaming == StreamingState::EndOfStreaming) {
1648 O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "We are in EndOfStreaming. Flushing queues.");
1649 // We keep processing data until we are Idle.
1650 // FIXME: not sure this is the correct way to drain the queues, but
1651 // I guess we will see.
1654 auto& relayer = ref.get<DataRelayer>();
1655
1656 bool shouldProcess = DataProcessingHelpers::hasOnlyGenerated(spec) == false;
1657
1658 while (DataProcessingDevice::tryDispatchComputation(ref, context.completed) && shouldProcess) {
1659 relayer.processDanglingInputs(context.expirationHandlers, *context.registry, false);
1660 }
1661
1662 auto& timingInfo = ref.get<TimingInfo>();
1663 // We should keep the data generated at end of stream only for those
1664 // which are not sources.
1665 timingInfo.keepAtEndOfStream = shouldProcess;
1666 // Fill timinginfo with some reasonable values for data sent with endOfStream
1667 timingInfo.timeslice = relayer.getOldestPossibleOutput().timeslice.value;
1668 timingInfo.tfCounter = -1;
1669 timingInfo.firstTForbit = -1;
1670 // timingInfo.runNumber = ; // Not sure where to get this if not already set
1671 timingInfo.creation = std::chrono::time_point_cast<std::chrono::milliseconds>(std::chrono::system_clock::now()).time_since_epoch().count();
1672 O2_SIGNPOST_EVENT_EMIT(calibration, dpid, "calibration", "TimingInfo.keepAtEndOfStream %d", timingInfo.keepAtEndOfStream);
1673
1674 EndOfStreamContext eosContext{*context.registry, ref.get<DataAllocator>()};
1675
1676 context.preEOSCallbacks(eosContext);
1677 auto& streamContext = ref.get<StreamContext>();
1678 streamContext.preEOSCallbacks(eosContext);
1679 ref.get<CallbackService>().call<CallbackService::Id::EndOfStream>(eosContext);
1680 streamContext.postEOSCallbacks(eosContext);
1681 context.postEOSCallbacks(eosContext);
1682
1683 for (auto& channel : spec.outputChannels) {
1684 O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Sending end of stream to %{public}s.", channel.name.c_str());
1686 }
1687 // This is needed because the transport is deleted before the device.
1688 relayer.clear();
1690 // In case we should process, note the data processor responsible for it
1691 if (shouldProcess) {
1692 state.lastActiveDataProcessor = &context;
1693 }
1694 // On end of stream we shut down all output pollers.
1695 O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Shutting down output pollers.");
1696 for (auto& poller : state.activeOutputPollers) {
1697 uv_poll_stop(poller);
1698 }
1699 return;
1700 }
1701
1702 if (state.streaming == StreamingState::Idle) {
1703 // On end of stream we shut down all output pollers.
1704 O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Shutting down output pollers.");
1705 for (auto& poller : state.activeOutputPollers) {
1706 uv_poll_stop(poller);
1707 }
1708 }
1709
1710 return;
1711}
1712
1714{
1715 ServiceRegistryRef ref{mServiceRegistry};
1716 ref.get<DataRelayer>().clear();
1717 auto& deviceContext = ref.get<DeviceContext>();
1718 // If the signal handler is there, we should
1719 // hide the registry from it, so that we do not
1720 // end up calling the signal handler on something
1721 // which might not be there anymore.
1722 if (deviceContext.sigusr1Handle) {
1723 deviceContext.sigusr1Handle->data = nullptr;
1724 }
1725 // Makes sure we do not have a working context on
1726 // shutdown.
1727 for (auto& handle : ref.get<DeviceState>().activeSignals) {
1728 handle->data = nullptr;
1729 }
1730}
1731
1734 {
1735 }
1736};
1737
1738auto forwardOnInsertion(ServiceRegistryRef& ref, std::span<fair::mq::MessagePtr>& messages) -> void
1739{
1740 O2_SIGNPOST_ID_GENERATE(sid, forwarding);
1741
1742 auto& spec = ref.get<DeviceSpec const>();
1743 auto& context = ref.get<DataProcessorContext>();
1744 if (context.forwardPolicy == ForwardPolicy::AfterProcessing || spec.forwards.empty()) {
1745 O2_SIGNPOST_EVENT_EMIT(device, sid, "device", "Early forwardinding not enabled / needed.");
1746 return;
1747 }
1748
1749 O2_SIGNPOST_EVENT_EMIT(device, sid, "device", "Early forwardinding before injecting data into relayer.");
1750 auto& timesliceIndex = ref.get<TimesliceIndex>();
1751 auto oldestTimeslice = timesliceIndex.getOldestPossibleOutput();
1752
1753 auto& proxy = ref.get<FairMQDeviceProxy>();
1754
1755 O2_SIGNPOST_START(forwarding, sid, "forwardInputs",
1756 "Starting forwarding for incoming messages with oldestTimeslice %zu with copy",
1757 oldestTimeslice.timeslice.value);
1758 std::vector<fair::mq::Parts> forwardedParts(proxy.getNumForwardChannels());
1759 DataProcessingHelpers::routeForwardedMessages(proxy, messages, forwardedParts, true, false);
1760
1761 for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
1762 if (forwardedParts[fi].Size() == 0) {
1763 continue;
1764 }
1765 ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi});
1766 auto& parts = forwardedParts[fi];
1767 if (info.policy == nullptr) {
1768 O2_SIGNPOST_EVENT_EMIT_ERROR(forwarding, sid, "forwardInputs", "Forwarding to %{public}s %d has no policy.", info.name.c_str(), fi);
1769 continue;
1770 }
1771 O2_SIGNPOST_EVENT_EMIT(forwarding, sid, "forwardInputs", "Forwarding to %{public}s %d", info.name.c_str(), fi);
1772 info.policy->forward(parts, ChannelIndex{fi}, ref);
1773 }
1774 auto& asyncQueue = ref.get<AsyncQueue>();
1775 auto& decongestion = ref.get<DecongestionService>();
1776 O2_SIGNPOST_ID_GENERATE(aid, async_queue);
1777 O2_SIGNPOST_EVENT_EMIT(async_queue, aid, "forwardInputs", "Queuing forwarding oldestPossible %zu", oldestTimeslice.timeslice.value);
1778 AsyncQueueHelpers::post(asyncQueue, AsyncTask{.timeslice = oldestTimeslice.timeslice, .id = decongestion.oldestPossibleTimesliceTask, .debounce = -1, .callback = decongestionCallbackLate}
1779 .user<DecongestionContext>({.ref = ref, .oldestTimeslice = oldestTimeslice}));
1780 O2_SIGNPOST_END(forwarding, sid, "forwardInputs", "Forwarding done");
1781};
1782
1788{
1789 using InputInfo = DataRelayer::InputInfo;
1791
1792 auto& context = ref.get<DataProcessorContext>();
1793 // This is the same id as the upper level function, so we get the events
1794 // associated with the same interval. We will simply use "handle_data" as
1795 // the category.
1796 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &info);
1797
1798 // This is how we validate inputs. I.e. we try to enforce the O2 Data model
1799 // and we do a few stats. We bind parts as a lambda captured variable, rather
1800 // than an input, because we do not want the outer loop actually be exposed
1801 // to the implementation details of the messaging layer.
1802 auto getInputTypes = [&info, &context]() -> std::optional<std::vector<InputInfo>> {
1803 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &info);
1804 auto ref = ServiceRegistryRef{*context.registry};
1805 auto& stats = ref.get<DataProcessingStats>();
1806 auto& state = ref.get<DeviceState>();
1807 auto& parts = info.parts;
1808 stats.updateStats({(int)ProcessingStatsId::TOTAL_INPUTS, DataProcessingStats::Op::Set, (int64_t)parts.Size()});
1809
1810 std::vector<InputInfo> results;
1811 // we can reserve the upper limit
1812 results.reserve(parts.Size() / 2);
1813 size_t nTotalPayloads = 0;
1814
1815 auto insertInputInfo = [&results, &nTotalPayloads](size_t position, size_t length, InputType type, ChannelIndex index) {
1816 results.emplace_back(position, length, type, index);
1817 if (type != InputType::Invalid && length > 1) {
1818 nTotalPayloads += length - 1;
1819 }
1820 };
1821
1822 for (size_t pi = 0; pi < parts.Size(); pi += 2) {
1823 auto* headerData = parts.At(pi)->GetData();
1824 auto sih = o2::header::get<SourceInfoHeader*>(headerData);
1825 auto dh = o2::header::get<DataHeader*>(headerData);
1826 if (sih) {
1827 O2_SIGNPOST_EVENT_EMIT(device, cid, "handle_data", "Got SourceInfoHeader with state %d", (int)sih->state);
1828 info.state = sih->state;
1829 insertInputInfo(pi, 2, InputType::SourceInfo, info.id);
1830 state.lastActiveDataProcessor = &context;
1831 if (dh) {
1832 LOGP(error, "Found data attached to a SourceInfoHeader");
1833 }
1834 continue;
1835 }
1836 auto dih = o2::header::get<DomainInfoHeader*>(headerData);
1837 if (dih) {
1838 O2_SIGNPOST_EVENT_EMIT(device, cid, "handle_data", "Got DomainInfoHeader with oldestPossibleTimeslice %d", (int)dih->oldestPossibleTimeslice);
1839 insertInputInfo(pi, 2, InputType::DomainInfo, info.id);
1840 state.lastActiveDataProcessor = &context;
1841 if (dh) {
1842 LOGP(error, "Found data attached to a DomainInfoHeader");
1843 }
1844 continue;
1845 }
1846 if (!dh) {
1847 insertInputInfo(pi, 0, InputType::Invalid, info.id);
1848 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "Header is not a DataHeader?");
1849 continue;
1850 }
1851 if (dh->payloadSize > parts.At(pi + 1)->GetSize()) {
1852 insertInputInfo(pi, 0, InputType::Invalid, info.id);
1853 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "DataHeader payloadSize mismatch");
1854 continue;
1855 }
1856 auto dph = o2::header::get<DataProcessingHeader*>(headerData);
1857 // We only deal with the tracking of parts if the log is enabled.
1858 // This is because in principle we should track the size of each of
1859 // the parts and sum it up. Not for now.
1860 O2_SIGNPOST_ID_FROM_POINTER(pid, parts, headerData);
1861 O2_SIGNPOST_START(parts, pid, "parts", "Processing DataHeader %{public}-4s/%{public}-16s/%d with splitPayloadParts %d and splitPayloadIndex %d",
1862 dh->dataOrigin.str, dh->dataDescription.str, dh->subSpecification, dh->splitPayloadParts, dh->splitPayloadIndex);
1863 if (!dph) {
1864 insertInputInfo(pi, 2, InputType::Invalid, info.id);
1865 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "Header stack does not contain DataProcessingHeader");
1866 continue;
1867 }
1868 if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) {
1869 // this is indicating a sequence of payloads following the header
1870 // FIXME: we will probably also set the DataHeader version
1871 insertInputInfo(pi, dh->splitPayloadParts + 1, InputType::Data, info.id);
1872 pi += dh->splitPayloadParts - 1;
1873 } else {
1874 // We can set the type for the next splitPayloadParts
1875 // because we are guaranteed they are all the same.
1876 // If splitPayloadParts = 0, we assume that means there is only one (header, payload)
1877 // pair.
1878 size_t finalSplitPayloadIndex = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2;
1879 if (finalSplitPayloadIndex > parts.Size()) {
1880 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "DataHeader::splitPayloadParts invalid");
1881 insertInputInfo(pi, 0, InputType::Invalid, info.id);
1882 continue;
1883 }
1884 insertInputInfo(pi, 2, InputType::Data, info.id);
1885 for (; pi + 2 < finalSplitPayloadIndex; pi += 2) {
1886 insertInputInfo(pi + 2, 2, InputType::Data, info.id);
1887 }
1888 }
1889 }
1890 if (results.size() + nTotalPayloads != parts.Size()) {
1891 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "inconsistent number of inputs extracted. %zu vs parts (%zu)", results.size() + nTotalPayloads, parts.Size());
1892 return std::nullopt;
1893 }
1894 return results;
1895 };
1896
1897 auto reportError = [ref](const char* message) {
1898 auto& stats = ref.get<DataProcessingStats>();
1900 };
1901
1902 auto handleValidMessages = [&info, ref, &reportError, &context](std::vector<InputInfo> const& inputInfos) {
1903 auto& relayer = ref.get<DataRelayer>();
1904 auto& state = ref.get<DeviceState>();
1905 static WaitBackpressurePolicy policy;
1906 auto& parts = info.parts;
1907 // We relay execution to make sure we have a complete set of parts
1908 // available.
1909 bool hasBackpressure = false;
1910 size_t minBackpressureTimeslice = -1;
1911 bool hasData = false;
1912 size_t oldestPossibleTimeslice = -1;
1913 static std::vector<int> ordering;
1914 // Same as inputInfos but with iota.
1915 ordering.resize(inputInfos.size());
1916 std::iota(ordering.begin(), ordering.end(), 0);
1917 // stable sort orderings by type and position
1918 std::stable_sort(ordering.begin(), ordering.end(), [&inputInfos](int const& a, int const& b) {
1919 auto const& ai = inputInfos[a];
1920 auto const& bi = inputInfos[b];
1921 if (ai.type != bi.type) {
1922 return ai.type < bi.type;
1923 }
1924 return ai.position < bi.position;
1925 });
1926 for (size_t ii = 0; ii < inputInfos.size(); ++ii) {
1927 auto const& input = inputInfos[ordering[ii]];
1928 switch (input.type) {
1929 case InputType::Data: {
1930 hasData = true;
1931 auto headerIndex = input.position;
1932 auto nMessages = 0;
1933 auto nPayloadsPerHeader = 0;
1934 if (input.size > 2) {
1935 // header and multiple payload sequence
1936 nMessages = input.size;
1937 nPayloadsPerHeader = nMessages - 1;
1938 } else {
1939 // multiple header-payload pairs
1940 auto dh = o2::header::get<DataHeader*>(parts.At(headerIndex)->GetData());
1941 nMessages = dh->splitPayloadParts > 0 ? dh->splitPayloadParts * 2 : 2;
1942 nPayloadsPerHeader = 1;
1943 ii += (nMessages / 2) - 1;
1944 }
1945 auto onDrop = [ref](TimesliceSlot slot, std::vector<MessageSet>& dropped, TimesliceIndex::OldestOutputInfo oldestOutputInfo) {
1946 O2_SIGNPOST_ID_GENERATE(cid, async_queue);
1947 O2_SIGNPOST_EVENT_EMIT(async_queue, cid, "onDrop", "Dropping message from slot %zu. Forwarding as needed. Timeslice %zu",
1948 slot.index, oldestOutputInfo.timeslice.value);
1949 ref.get<AsyncQueue>();
1950 ref.get<DecongestionService>();
1951 ref.get<DataRelayer>();
1952 // Get the current timeslice for the slot.
1953 auto& variables = ref.get<TimesliceIndex>().getVariablesForSlot(slot);
1955 forwardInputs(ref, slot, dropped, oldestOutputInfo, false, true);
1956 };
1957
1958 auto relayed = relayer.relay(parts.At(headerIndex)->GetData(),
1959 &parts.At(headerIndex),
1960 input,
1961 nMessages,
1962 nPayloadsPerHeader,
1963 context.forwardPolicy == ForwardPolicy::AtInjection ? forwardOnInsertion : nullptr,
1964 onDrop);
1965 switch (relayed.type) {
1967 if (info.normalOpsNotified == true && info.backpressureNotified == false) {
1968 LOGP(alarm, "Backpressure on channel {}. Waiting.", info.channel->GetName());
1969 auto& monitoring = ref.get<o2::monitoring::Monitoring>();
1970 monitoring.send(o2::monitoring::Metric{1, fmt::format("backpressure_{}", info.channel->GetName())});
1971 info.backpressureNotified = true;
1972 info.normalOpsNotified = false;
1973 }
1974 policy.backpressure(info);
1975 hasBackpressure = true;
1976 minBackpressureTimeslice = std::min<size_t>(minBackpressureTimeslice, relayed.timeslice.value);
1977 break;
1981 if (info.normalOpsNotified == false && info.backpressureNotified == true) {
1982 LOGP(info, "Back to normal on channel {}.", info.channel->GetName());
1983 auto& monitoring = ref.get<o2::monitoring::Monitoring>();
1984 monitoring.send(o2::monitoring::Metric{0, fmt::format("backpressure_{}", info.channel->GetName())});
1985 info.normalOpsNotified = true;
1986 info.backpressureNotified = false;
1987 }
1988 break;
1989 }
1990 } break;
1991 case InputType::SourceInfo: {
1992 LOGP(detail, "Received SourceInfo");
1993 auto& context = ref.get<DataProcessorContext>();
1994 state.lastActiveDataProcessor = &context;
1995 auto headerIndex = input.position;
1996 auto payloadIndex = input.position + 1;
1997 assert(payloadIndex < parts.Size());
1998 // FIXME: the message with the end of stream cannot contain
1999 // split parts.
2000 parts.At(headerIndex).reset(nullptr);
2001 parts.At(payloadIndex).reset(nullptr);
2002 // for (size_t i = 0; i < dh->splitPayloadParts > 0 ? dh->splitPayloadParts * 2 - 1 : 1; ++i) {
2003 // parts.At(headerIndex + 1 + i).reset(nullptr);
2004 // }
2005 // pi += dh->splitPayloadParts > 0 ? dh->splitPayloadParts - 1 : 0;
2006
2007 } break;
2008 case InputType::DomainInfo: {
2011 auto& context = ref.get<DataProcessorContext>();
2012 state.lastActiveDataProcessor = &context;
2013 auto headerIndex = input.position;
2014 auto payloadIndex = input.position + 1;
2015 assert(payloadIndex < parts.Size());
2016 // FIXME: the message with the end of stream cannot contain
2017 // split parts.
2018
2019 auto dih = o2::header::get<DomainInfoHeader*>(parts.At(headerIndex)->GetData());
2020 if (hasBackpressure && dih->oldestPossibleTimeslice >= minBackpressureTimeslice) {
2021 break;
2022 }
2023 oldestPossibleTimeslice = std::min(oldestPossibleTimeslice, dih->oldestPossibleTimeslice);
2024 LOGP(debug, "Got DomainInfoHeader, new oldestPossibleTimeslice {} on channel {}", oldestPossibleTimeslice, info.id.value);
2025 parts.At(headerIndex).reset(nullptr);
2026 parts.At(payloadIndex).reset(nullptr);
2027 } break;
2028 case InputType::Invalid: {
2029 reportError("Invalid part found.");
2030 } break;
2031 }
2032 }
2035 if (oldestPossibleTimeslice != (size_t)-1) {
2036 info.oldestForChannel = {oldestPossibleTimeslice};
2037 auto& context = ref.get<DataProcessorContext>();
2038 context.domainInfoUpdatedCallback(*context.registry, oldestPossibleTimeslice, info.id);
2039 ref.get<CallbackService>().call<CallbackService::Id::DomainInfoUpdated>((ServiceRegistryRef)*context.registry, (size_t)oldestPossibleTimeslice, (ChannelIndex)info.id);
2040 state.lastActiveDataProcessor = &context;
2041 }
2042 auto it = std::remove_if(parts.fParts.begin(), parts.fParts.end(), [](auto& msg) -> bool { return msg.get() == nullptr; });
2043 parts.fParts.erase(it, parts.end());
2044 if (parts.fParts.size()) {
2045 LOG(debug) << parts.fParts.size() << " messages backpressured";
2046 }
2047 };
2048
2049 // Second part. This is the actual outer loop we want to obtain, with
2050 // implementation details which can be read. Notice how most of the state
2051 // is actually hidden. For example we do not expose what "input" is. This
2052 // will allow us to keep the same toplevel logic even if the actual meaning
2053 // of input is changed (for example we might move away from multipart
2054 // messages). Notice also that we need to act diffently depending on the
2055 // actual CompletionOp we want to perform. In particular forwarding inputs
2056 // also gets rid of them from the cache.
2057 auto inputTypes = getInputTypes();
2058 if (bool(inputTypes) == false) {
2059 reportError("Parts should come in couples. Dropping it.");
2060 return;
2061 }
2062 handleValidMessages(*inputTypes);
2063 return;
2064}
2065
2066namespace
2067{
2068struct InputLatency {
2069 uint64_t minLatency = std::numeric_limits<uint64_t>::max();
2070 uint64_t maxLatency = std::numeric_limits<uint64_t>::min();
2071};
2072
2073auto calculateInputRecordLatency(InputRecord const& record, uint64_t currentTime) -> InputLatency
2074{
2075 InputLatency result;
2076
2077 for (auto& item : record) {
2078 auto* header = o2::header::get<DataProcessingHeader*>(item.header);
2079 if (header == nullptr) {
2080 continue;
2081 }
2082 int64_t partLatency = (0x7fffffffffffffff & currentTime) - (0x7fffffffffffffff & header->creation);
2083 if (partLatency < 0) {
2084 partLatency = 0;
2085 }
2086 result.minLatency = std::min(result.minLatency, (uint64_t)partLatency);
2087 result.maxLatency = std::max(result.maxLatency, (uint64_t)partLatency);
2088 }
2089 return result;
2090};
2091
2092auto calculateTotalInputRecordSize(InputRecord const& record) -> int
2093{
2094 size_t totalInputSize = 0;
2095 for (auto& item : record) {
2096 auto* header = o2::header::get<DataHeader*>(item.header);
2097 if (header == nullptr) {
2098 continue;
2099 }
2100 totalInputSize += header->payloadSize;
2101 }
2102 return totalInputSize;
2103};
2104
2105template <typename T>
2106void update_maximum(std::atomic<T>& maximum_value, T const& value) noexcept
2107{
2108 T prev_value = maximum_value;
2109 while (prev_value < value &&
2110 !maximum_value.compare_exchange_weak(prev_value, value)) {
2111 }
2112}
2113} // namespace
2114
2115bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::vector<DataRelayer::RecordAction>& completed)
2116{
2117 auto& context = ref.get<DataProcessorContext>();
2118 LOGP(debug, "DataProcessingDevice::tryDispatchComputation");
2119 // This is the actual hidden state for the outer loop. In case we decide we
2120 // want to support multithreaded dispatching of operations, I can simply
2121 // move these to some thread local store and the rest of the lambdas
2122 // should work just fine.
2123 std::vector<MessageSet> currentSetOfInputs;
2124
2125 //
2126 auto getInputSpan = [ref, &currentSetOfInputs](TimesliceSlot slot, bool consume = true) {
2127 auto& relayer = ref.get<DataRelayer>();
2128 if (consume) {
2129 currentSetOfInputs = relayer.consumeAllInputsForTimeslice(slot);
2130 } else {
2131 currentSetOfInputs = relayer.consumeExistingInputsForTimeslice(slot);
2132 }
2133 auto getter = [&currentSetOfInputs](size_t i, size_t partindex) -> DataRef {
2134 if (currentSetOfInputs[i].getNumberOfPairs() > partindex) {
2135 const char* headerptr = nullptr;
2136 const char* payloadptr = nullptr;
2137 size_t payloadSize = 0;
2138 // - each input can have multiple parts
2139 // - "part" denotes a sequence of messages belonging together, the first message of the
2140 // sequence is the header message
2141 // - each part has one or more payload messages
2142 // - InputRecord provides all payloads as header-payload pair
2143 auto const& headerMsg = currentSetOfInputs[i].associatedHeader(partindex);
2144 auto const& payloadMsg = currentSetOfInputs[i].associatedPayload(partindex);
2145 headerptr = static_cast<char const*>(headerMsg->GetData());
2146 payloadptr = payloadMsg ? static_cast<char const*>(payloadMsg->GetData()) : nullptr;
2147 payloadSize = payloadMsg ? payloadMsg->GetSize() : 0;
2148 return DataRef{nullptr, headerptr, payloadptr, payloadSize};
2149 }
2150 return DataRef{};
2151 };
2152 auto nofPartsGetter = [&currentSetOfInputs](size_t i) -> size_t {
2153 return currentSetOfInputs[i].getNumberOfPairs();
2154 };
2155 auto refCountGetter = [&currentSetOfInputs](size_t idx) -> int {
2156 auto& header = static_cast<const fair::mq::shmem::Message&>(*currentSetOfInputs[idx].header(0));
2157 return header.GetRefCount();
2158 };
2159 return InputSpan{getter, nofPartsGetter, refCountGetter, currentSetOfInputs.size()};
2160 };
2161
2162 auto markInputsAsDone = [ref](TimesliceSlot slot) -> void {
2163 auto& relayer = ref.get<DataRelayer>();
2165 };
2166
2167 // I need a preparation step which gets the current timeslice id and
2168 // propagates it to the various contextes (i.e. the actual entities which
2169 // create messages) because the messages need to have the timeslice id into
2170 // it.
2171 auto prepareAllocatorForCurrentTimeSlice = [ref](TimesliceSlot i) -> void {
2172 auto& relayer = ref.get<DataRelayer>();
2173 auto& timingInfo = ref.get<TimingInfo>();
2174 auto timeslice = relayer.getTimesliceForSlot(i);
2175
2176 timingInfo.timeslice = timeslice.value;
2177 timingInfo.tfCounter = relayer.getFirstTFCounterForSlot(i);
2178 timingInfo.firstTForbit = relayer.getFirstTFOrbitForSlot(i);
2179 timingInfo.runNumber = relayer.getRunNumberForSlot(i);
2180 timingInfo.creation = relayer.getCreationTimeForSlot(i);
2181 };
2182 auto updateRunInformation = [ref](TimesliceSlot i) -> void {
2183 auto& dataProcessorContext = ref.get<DataProcessorContext>();
2184 auto& relayer = ref.get<DataRelayer>();
2185 auto& timingInfo = ref.get<TimingInfo>();
2186 auto timeslice = relayer.getTimesliceForSlot(i);
2187 // We report wether or not this timing info refers to a new Run.
2188 timingInfo.globalRunNumberChanged = !TimingInfo::timesliceIsTimer(timeslice.value) && dataProcessorContext.lastRunNumberProcessed != timingInfo.runNumber;
2189 // A switch to runNumber=0 should not appear and thus does not set globalRunNumberChanged, unless it is seen in the first processed timeslice
2190 timingInfo.globalRunNumberChanged &= (dataProcessorContext.lastRunNumberProcessed == -1 || timingInfo.runNumber != 0);
2191 // FIXME: for now there is only one stream, however we
2192 // should calculate this correctly once we finally get the
2193 // the StreamContext in.
2194 timingInfo.streamRunNumberChanged = timingInfo.globalRunNumberChanged;
2195 };
2196
2197 // When processing them, timers will have to be cleaned up
2198 // to avoid double counting them.
2199 // This was actually the easiest solution we could find for
2200 // O2-646.
2201 auto cleanTimers = [&currentSetOfInputs](TimesliceSlot slot, InputRecord& record) {
2202 assert(record.size() == currentSetOfInputs.size());
2203 for (size_t ii = 0, ie = record.size(); ii < ie; ++ii) {
2204 // assuming that for timer inputs we do have exactly one PartRef object
2205 // in the MessageSet, multiple PartRef Objects are only possible for either
2206 // split payload messages of wildcard matchers, both for data inputs
2207 DataRef input = record.getByPos(ii);
2208 if (input.spec->lifetime != Lifetime::Timer) {
2209 continue;
2210 }
2211 if (input.header == nullptr) {
2212 continue;
2213 }
2214 // This will hopefully delete the message.
2215 currentSetOfInputs[ii].clear();
2216 }
2217 };
2218
2219 // Function to cleanup record. For the moment we
2220 // simply use it to keep track of input messages
2221 // which are not needed, to display them in the GUI.
2222 auto cleanupRecord = [](InputRecord& record) {
2223 if (O2_LOG_ENABLED(parts) == false) {
2224 return;
2225 }
2226 for (size_t pi = 0, pe = record.size(); pi < pe; ++pi) {
2227 DataRef input = record.getByPos(pi);
2228 if (input.header == nullptr) {
2229 continue;
2230 }
2231 auto sih = o2::header::get<SourceInfoHeader*>(input.header);
2232 if (sih) {
2233 continue;
2234 }
2235
2236 auto dh = o2::header::get<DataHeader*>(input.header);
2237 if (!dh) {
2238 continue;
2239 }
2240 // We use the address of the first header of a split payload
2241 // to identify the interval.
2242 O2_SIGNPOST_ID_FROM_POINTER(pid, parts, dh);
2243 O2_SIGNPOST_END(parts, pid, "parts", "Cleaning up parts associated to %p", dh);
2244
2245 // No split parts, we simply skip the payload
2246 if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) {
2247 // this is indicating a sequence of payloads following the header
2248 // FIXME: we will probably also set the DataHeader version
2249 pi += dh->splitPayloadParts - 1;
2250 } else {
2251 size_t pi = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2;
2252 }
2253 }
2254 };
2255
2256 ref.get<DataRelayer>().getReadyToProcess(completed);
2257 if (completed.empty() == true) {
2258 LOGP(debug, "No computations available for dispatching.");
2259 return false;
2260 }
2261
2262 auto postUpdateStats = [ref](DataRelayer::RecordAction const& action, InputRecord const& record, uint64_t tStart, uint64_t tStartMilli) {
2263 auto& stats = ref.get<DataProcessingStats>();
2264 auto& states = ref.get<DataProcessingStates>();
2265 std::atomic_thread_fence(std::memory_order_release);
2266 char relayerSlotState[1024];
2267 int written = snprintf(relayerSlotState, 1024, "%d ", DefaultsHelpers::pipelineLength());
2268 char* buffer = relayerSlotState + written;
2269 for (size_t ai = 0; ai != record.size(); ai++) {
2270 buffer[ai] = record.isValid(ai) ? '3' : '0';
2271 }
2272 buffer[record.size()] = 0;
2273 states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index),
2274 .size = (int)(record.size() + buffer - relayerSlotState),
2275 .data = relayerSlotState});
2276 uint64_t tEnd = uv_hrtime();
2277 // tEnd and tStart are in nanoseconds according to https://docs.libuv.org/en/v1.x/misc.html#c.uv_hrtime
2278 int64_t wallTimeMs = (tEnd - tStart) / 1000000;
2280 // Sum up the total wall time, in milliseconds.
2282 // The time interval is in seconds while tEnd - tStart is in nanoseconds, so we divide by 1000000 to get the fraction in ms/s.
2284 stats.updateStats({(int)ProcessingStatsId::LAST_PROCESSED_SIZE, DataProcessingStats::Op::Set, calculateTotalInputRecordSize(record)});
2285 stats.updateStats({(int)ProcessingStatsId::TOTAL_PROCESSED_SIZE, DataProcessingStats::Op::Add, calculateTotalInputRecordSize(record)});
2286 auto latency = calculateInputRecordLatency(record, tStartMilli);
2287 stats.updateStats({(int)ProcessingStatsId::LAST_MIN_LATENCY, DataProcessingStats::Op::Set, (int)latency.minLatency});
2288 stats.updateStats({(int)ProcessingStatsId::LAST_MAX_LATENCY, DataProcessingStats::Op::Set, (int)latency.maxLatency});
2289 static int count = 0;
2291 count++;
2292 };
2293
2294 auto preUpdateStats = [ref](DataRelayer::RecordAction const& action, InputRecord const& record, uint64_t) {
2295 auto& states = ref.get<DataProcessingStates>();
2296 std::atomic_thread_fence(std::memory_order_release);
2297 char relayerSlotState[1024];
2298 snprintf(relayerSlotState, 1024, "%d ", DefaultsHelpers::pipelineLength());
2299 char* buffer = strchr(relayerSlotState, ' ') + 1;
2300 for (size_t ai = 0; ai != record.size(); ai++) {
2301 buffer[ai] = record.isValid(ai) ? '2' : '0';
2302 }
2303 buffer[record.size()] = 0;
2304 states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), .size = (int)(record.size() + buffer - relayerSlotState), .data = relayerSlotState});
2305 };
2306
2307 // This is the main dispatching loop
2308 auto& state = ref.get<DeviceState>();
2309 auto& spec = ref.get<DeviceSpec const>();
2310
2311 auto& dpContext = ref.get<DataProcessorContext>();
2312 auto& streamContext = ref.get<StreamContext>();
2313 O2_SIGNPOST_ID_GENERATE(sid, device);
2314 O2_SIGNPOST_START(device, sid, "device", "Start processing ready actions");
2315
2316 auto& stats = ref.get<DataProcessingStats>();
2317 auto& relayer = ref.get<DataRelayer>();
2318 using namespace o2::framework;
2319 stats.updateStats({(int)ProcessingStatsId::PENDING_INPUTS, DataProcessingStats::Op::Set, static_cast<int64_t>(relayer.getParallelTimeslices() - completed.size())});
2320 stats.updateStats({(int)ProcessingStatsId::INCOMPLETE_INPUTS, DataProcessingStats::Op::Set, completed.empty() ? 1 : 0});
2321 switch (spec.completionPolicy.order) {
2323 std::sort(completed.begin(), completed.end(), [](auto const& a, auto const& b) { return a.timeslice.value < b.timeslice.value; });
2324 break;
2326 std::sort(completed.begin(), completed.end(), [](auto const& a, auto const& b) { return a.slot.index < b.slot.index; });
2327 break;
2329 default:
2330 break;
2331 }
2332
2333 for (auto action : completed) {
2334 O2_SIGNPOST_ID_GENERATE(aid, device);
2335 O2_SIGNPOST_START(device, aid, "device", "Processing action on slot %lu for action %{public}s", action.slot.index, fmt::format("{}", action.op).c_str());
2337 O2_SIGNPOST_END(device, aid, "device", "Waiting for more data.");
2338 continue;
2339 }
2340
2341 bool shouldConsume = action.op == CompletionPolicy::CompletionOp::Consume ||
2343 prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot});
2347 updateRunInformation(TimesliceSlot{action.slot});
2348 }
2349 InputSpan span = getInputSpan(action.slot, shouldConsume);
2350 auto& spec = ref.get<DeviceSpec const>();
2351 InputRecord record{spec.inputs,
2352 span,
2353 *context.registry};
2354 ProcessingContext processContext{record, ref, ref.get<DataAllocator>()};
2355 {
2356 // Notice this should be thread safe and reentrant
2357 // as it is called from many threads.
2358 streamContext.preProcessingCallbacks(processContext);
2359 dpContext.preProcessingCallbacks(processContext);
2360 }
2362 context.postDispatchingCallbacks(processContext);
2363 if (spec.forwards.empty() == false) {
2364 auto& timesliceIndex = ref.get<TimesliceIndex>();
2365 forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false);
2366 O2_SIGNPOST_END(device, aid, "device", "Forwarding inputs consume: %d.", false);
2367 continue;
2368 }
2369 }
2370 // If there is no optional inputs we canForwardEarly
2371 // the messages to that parallel processing can happen.
2372 // In this case we pass true to indicate that we want to
2373 // copy the messages to the subsequent data processor.
2374 bool hasForwards = spec.forwards.empty() == false;
2376
2377 if (context.forwardPolicy == ForwardPolicy::AtCompletionPolicySatisified && hasForwards && consumeSomething) {
2378 O2_SIGNPOST_EVENT_EMIT(device, aid, "device", "Early forwarding: %{public}s.", fmt::format("{}", action.op).c_str());
2379 auto& timesliceIndex = ref.get<TimesliceIndex>();
2380 forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), true, action.op == CompletionPolicy::CompletionOp::Consume);
2381 } else if (context.forwardPolicy == ForwardPolicy::AtInjection && hasForwards && consumeSomething) {
2382 // We used to do fowarding here, however we now do it much earlier.
2383 // We still need to clean the inputs which were already consumed
2384 // via ConsumeExisting and which still have an header to hold the slot.
2385 // FIXME: do we? This should really happen when we do the forwarding on
2386 // insertion, because otherwise we lose the relevant information on how to
2387 // navigate the set of headers. We could actually rely on the messageset index,
2388 // is that the right thing to do though?
2389 O2_SIGNPOST_EVENT_EMIT(device, aid, "device", "cleaning early forwarding: %{public}s.", fmt::format("{}", action.op).c_str());
2390 auto& timesliceIndex = ref.get<TimesliceIndex>();
2391 cleanEarlyForward(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), true, action.op == CompletionPolicy::CompletionOp::Consume);
2392 }
2393
2394 markInputsAsDone(action.slot);
2395
2396 uint64_t tStart = uv_hrtime();
2397 uint64_t tStartMilli = TimingHelpers::getRealtimeSinceEpochStandalone();
2398 preUpdateStats(action, record, tStart);
2399
2400 static bool noCatch = getenv("O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv("O2_NO_CATCHALL_EXCEPTIONS"), "0");
2401
2402 auto runNoCatch = [&context, ref, &processContext](DataRelayer::RecordAction& action) mutable {
2403 auto& state = ref.get<DeviceState>();
2404 auto& spec = ref.get<DeviceSpec const>();
2405 auto& streamContext = ref.get<StreamContext>();
2406 auto& dpContext = ref.get<DataProcessorContext>();
2407 auto shouldProcess = [](DataRelayer::RecordAction& action) -> bool {
2408 switch (action.op) {
2413 return true;
2414 break;
2415 default:
2416 return false;
2417 }
2418 };
2419 if (state.quitRequested == false) {
2420 {
2421 // Callbacks from services
2422 dpContext.preProcessingCallbacks(processContext);
2423 streamContext.preProcessingCallbacks(processContext);
2424 dpContext.preProcessingCallbacks(processContext);
2425 // Callbacks from users
2426 ref.get<CallbackService>().call<CallbackService::Id::PreProcessing>(o2::framework::ServiceRegistryRef{ref}, (int)action.op);
2427 }
2428 O2_SIGNPOST_ID_FROM_POINTER(pcid, device, &processContext);
2429 if (context.statefulProcess && shouldProcess(action)) {
2430 // This way, usercode can use the the same processing context to identify
2431 // its signposts and we can map user code to device iterations.
2432 O2_SIGNPOST_START(device, pcid, "device", "Stateful process");
2433 (context.statefulProcess)(processContext);
2434 O2_SIGNPOST_END(device, pcid, "device", "Stateful process");
2435 } else if (context.statelessProcess && shouldProcess(action)) {
2436 O2_SIGNPOST_START(device, pcid, "device", "Stateful process");
2437 (context.statelessProcess)(processContext);
2438 O2_SIGNPOST_END(device, pcid, "device", "Stateful process");
2439 } else if (context.statelessProcess || context.statefulProcess) {
2440 O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "Skipping processing because we are discarding.");
2441 } else {
2442 O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "No processing callback provided. Switching to %{public}s.", "Idle");
2444 }
2445 if (shouldProcess(action)) {
2446 auto& timingInfo = ref.get<TimingInfo>();
2447 if (timingInfo.globalRunNumberChanged) {
2448 context.lastRunNumberProcessed = timingInfo.runNumber;
2449 }
2450 }
2451
2452 // Notify the sink we just consumed some timeframe data
2453 if (context.isSink && action.op == CompletionPolicy::CompletionOp::Consume) {
2454 O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "Sending dpl-summary");
2455 auto& allocator = ref.get<DataAllocator>();
2456 allocator.make<int>(OutputRef{"dpl-summary", runtime_hash(spec.name.c_str())}, 1);
2457 }
2458
2459 // Extra callback which allows a service to add extra outputs.
2460 // This is needed e.g. to ensure that injected CCDB outputs are added
2461 // before an end of stream.
2462 {
2463 ref.get<CallbackService>().call<CallbackService::Id::FinaliseOutputs>(o2::framework::ServiceRegistryRef{ref}, (int)action.op);
2464 dpContext.finaliseOutputsCallbacks(processContext);
2465 streamContext.finaliseOutputsCallbacks(processContext);
2466 }
2467
2468 {
2469 ref.get<CallbackService>().call<CallbackService::Id::PostProcessing>(o2::framework::ServiceRegistryRef{ref}, (int)action.op);
2470 dpContext.postProcessingCallbacks(processContext);
2471 streamContext.postProcessingCallbacks(processContext);
2472 }
2473 }
2474 };
2475
2476 if ((state.tracingFlags & DeviceState::LoopReason::TRACE_USERCODE) != 0) {
2477 state.severityStack.push_back((int)fair::Logger::GetConsoleSeverity());
2478 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
2479 }
2480 if (noCatch) {
2481 try {
2482 runNoCatch(action);
2483 } catch (o2::framework::RuntimeErrorRef e) {
2484 (context.errorHandling)(e, record);
2485 }
2486 } else {
2487 try {
2488 runNoCatch(action);
2489 } catch (std::exception& ex) {
2493 auto e = runtime_error(ex.what());
2494 (context.errorHandling)(e, record);
2495 } catch (o2::framework::RuntimeErrorRef e) {
2496 (context.errorHandling)(e, record);
2497 }
2498 }
2499 if (state.severityStack.empty() == false) {
2500 fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back());
2501 state.severityStack.pop_back();
2502 }
2503
2504 postUpdateStats(action, record, tStart, tStartMilli);
2505 // We forward inputs only when we consume them. If we simply Process them,
2506 // we keep them for next message arriving.
2508 cleanupRecord(record);
2509 context.postDispatchingCallbacks(processContext);
2510 ref.get<CallbackService>().call<CallbackService::Id::DataConsumed>(o2::framework::ServiceRegistryRef{ref});
2511 }
2512 if ((context.forwardPolicy == ForwardPolicy::AfterProcessing) && hasForwards && consumeSomething) {
2513 O2_SIGNPOST_EVENT_EMIT(device, aid, "device", "Late forwarding");
2514 auto& timesliceIndex = ref.get<TimesliceIndex>();
2515 forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false, action.op == CompletionPolicy::CompletionOp::Consume);
2516 }
2517 context.postForwardingCallbacks(processContext);
2519 cleanTimers(action.slot, record);
2520 }
2521 O2_SIGNPOST_END(device, aid, "device", "Done processing action on slot %lu for action %{public}s", action.slot.index, fmt::format("{}", action.op).c_str());
2522 }
2523 O2_SIGNPOST_END(device, sid, "device", "Start processing ready actions");
2524
2525 // We now broadcast the end of stream if it was requested
2526 if (state.streaming == StreamingState::EndOfStreaming) {
2527 LOGP(detail, "Broadcasting end of stream");
2528 for (auto& channel : spec.outputChannels) {
2530 }
2532 }
2533
2534 return true;
2535}
2536
2538{
2539 LOG(error) << msg;
2540 ServiceRegistryRef ref{mServiceRegistry};
2541 auto& stats = ref.get<DataProcessingStats>();
2543}
2544
2545std::unique_ptr<ConfigParamStore> DeviceConfigurationHelpers::getConfiguration(ServiceRegistryRef registry, const char* name, std::vector<ConfigParamSpec> const& options)
2546{
2547
2548 if (registry.active<ConfigurationInterface>()) {
2549 auto& cfg = registry.get<ConfigurationInterface>();
2550 try {
2551 cfg.getRecursive(name);
2552 std::vector<std::unique_ptr<ParamRetriever>> retrievers;
2553 retrievers.emplace_back(std::make_unique<ConfigurationOptionsRetriever>(&cfg, name));
2554 auto configStore = std::make_unique<ConfigParamStore>(options, std::move(retrievers));
2555 configStore->preload();
2556 configStore->activate();
2557 return configStore;
2558 } catch (...) {
2559 // No overrides...
2560 }
2561 }
2562 return {nullptr};
2563}
2564
2565} // namespace o2::framework
benchmark::State & state
struct uv_timer_s uv_timer_t
struct uv_signal_s uv_signal_t
struct uv_async_s uv_async_t
struct uv_poll_s uv_poll_t
struct uv_loop_s uv_loop_t
o2::monitoring::Metric Metric
uint64_t maxLatency
o2::configuration::ConfigurationInterface ConfigurationInterface
constexpr int DEFAULT_MAX_CHANNEL_AHEAD
uint64_t minLatency
std::ostringstream debug
int32_t i
std::enable_if_t< std::is_signed< T >::value, bool > hasData(const CalArray< T > &cal)
Definition Painter.cxx:600
uint16_t pid
Definition RawData.h:2
uint32_t res
Definition RawData.h:0
#define O2_SIGNPOST_EVENT_EMIT_ERROR(log, id, name, format,...)
Definition Signpost.h:553
#define O2_DECLARE_DYNAMIC_LOG(name)
Definition Signpost.h:489
#define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer)
Definition Signpost.h:505
#define O2_SIGNPOST_END(log, id, name, format,...)
Definition Signpost.h:608
#define O2_LOG_ENABLED(log)
Definition Signpost.h:110
#define O2_SIGNPOST_ID_GENERATE(name, log)
Definition Signpost.h:506
#define O2_SIGNPOST_EVENT_EMIT_WARN(log, id, name, format,...)
Definition Signpost.h:563
#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format,...)
Definition Signpost.h:522
#define O2_SIGNPOST_START(log, id, name, format,...)
Definition Signpost.h:602
constexpr uint32_t runtime_hash(char const *str)
o2::monitoring::Monitoring Monitoring
StringRef key
@ DeviceStateChanged
Invoked the device undergoes a state change.
decltype(auto) make(const Output &spec, Args... args)
static void doRun(ServiceRegistryRef)
void fillContext(DataProcessorContext &context, DeviceContext &deviceContext)
DataProcessingDevice(RunningDeviceRef ref, ServiceRegistry &)
static void doPrepare(ServiceRegistryRef)
static bool tryDispatchComputation(ServiceRegistryRef ref, std::vector< DataRelayer::RecordAction > &completed)
static void handleData(ServiceRegistryRef, InputChannelInfo &)
uint32_t getFirstTFOrbitForSlot(TimesliceSlot slot)
Get the firstTForbit associate to a given slot.
void updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStatus, CacheEntryStatus newStatus)
uint32_t getRunNumberForSlot(TimesliceSlot slot)
Get the runNumber associated to a given slot.
void prunePending(OnDropCallback)
Prune all the pending entries in the cache.
std::vector< MessageSet > consumeAllInputsForTimeslice(TimesliceSlot id)
uint64_t getCreationTimeForSlot(TimesliceSlot slot)
Get the creation time associated to a given slot.
ActivityStats processDanglingInputs(std::vector< ExpirationHandler > const &, ServiceRegistryRef context, bool createNew)
uint32_t getFirstTFCounterForSlot(TimesliceSlot slot)
Get the firstTFCounter associate to a given slot.
A service API to communicate with the driver.
The input API of the Data Processing Layer This class holds the inputs which are valid for processing...
size_t size() const
Number of elements in the InputSpan.
Definition InputSpan.h:82
bool active() const
Check if service of type T is currently active.
OldestOutputInfo getOldestPossibleOutput() const
GLint GLsizei count
Definition glcorearb.h:399
GLuint64EXT * result
Definition glcorearb.h:5662
GLuint buffer
Definition glcorearb.h:655
GLuint entry
Definition glcorearb.h:5735
GLuint index
Definition glcorearb.h:781
GLuint const GLchar * name
Definition glcorearb.h:781
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLsizei const GLfloat * value
Definition glcorearb.h:819
GLint GLint GLsizei GLint GLenum GLenum type
Definition glcorearb.h:275
GLboolean * data
Definition glcorearb.h:298
GLuint GLsizei GLsizei * length
Definition glcorearb.h:790
GLuint GLsizei const GLchar * label
Definition glcorearb.h:2519
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLuint GLsizei const GLchar * message
Definition glcorearb.h:2517
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
GLuint GLuint stream
Definition glcorearb.h:1806
GLint ref
Definition glcorearb.h:291
GLuint * states
Definition glcorearb.h:4932
Defining PrimaryVertex explicitly as messageable.
RuntimeErrorRef runtime_error(const char *)
ServiceKind
The kind of service we are asking for.
void on_idle_timer(uv_timer_t *handle)
@ DPL
The channel is a normal input channel.
void run_completion(uv_work_t *handle, int status)
void on_socket_polled(uv_poll_t *poller, int status, int events)
void run_callback(uv_work_t *handle)
auto forwardOnInsertion(ServiceRegistryRef &ref, std::span< fair::mq::MessagePtr > &messages) -> void
volatile int region_read_global_dummy_variable
void handleRegionCallbacks(ServiceRegistryRef registry, std::vector< fair::mq::RegionInfo > &infos)
Invoke the callbacks for the mPendingRegionInfos.
void on_out_of_band_polled(uv_poll_t *poller, int status, int events)
DeviceSpec const & getRunningDevice(RunningDeviceRef const &running, ServiceRegistryRef const &services)
@ EndOfStreaming
End of streaming requested, but not notified.
@ Streaming
Data is being processed.
@ Idle
End of streaming notified.
void on_communication_requested(uv_async_t *s)
@ Expired
A transition needs to be fullfilled ASAP.
@ NoTransition
No pending transitions.
@ Requested
A transition was notified to be requested.
RuntimeError & error_from_ref(RuntimeErrorRef)
void on_awake_main_thread(uv_async_t *handle)
@ Completed
The channel was signaled it will not receive any data.
@ Running
The channel is actively receiving data.
void on_signal_callback(uv_signal_t *handle, int signum)
@ Me
Only quit this data processor.
constexpr const char * channelName(int channel)
Definition Constants.h:318
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
Defining DataPointCompositeObject explicitly as copiable.
static void run(AsyncQueue &queue, TimesliceId oldestPossibleTimeslice)
static void post(AsyncQueue &queue, AsyncTask const &task)
An actuatual task to be executed.
Definition AsyncQueue.h:32
static void demangled_backtrace_symbols(void **backtrace, unsigned int total, int fd)
static constexpr int INVALID
CompletionOp
Action to take with the InputRecord:
@ Retry
Like Wait but mark the cacheline as dirty.
int64_t timeslices
How many timeslices it can process without giving back control.
int64_t sharedMemory
How much shared memory it can allocate.
Statistics on the offers consumed, expired.
static bool hasOnlyGenerated(DeviceSpec const &spec)
check if spec is a source devide
static TransitionHandlingState updateStateTransition(ServiceRegistryRef const &ref, ProcessingPolicies const &policies)
starts the EoS timers and returns the new TransitionHandlingState in case as new state is requested
static std::vector< fair::mq::Parts > routeForwardedMessageSet(FairMQDeviceProxy &proxy, std::vector< MessageSet > &currentSetOfInputs, bool copy, bool consume)
Helper to route messages for forwarding.
static void switchState(ServiceRegistryRef const &ref, StreamingState newState)
change the device StreamingState to newState
static void sendEndOfStream(ServiceRegistryRef const &ref, OutputChannelSpec const &channel)
static bool sendOldestPossibleTimeframe(ServiceRegistryRef const &ref, ForwardChannelInfo const &info, ForwardChannelState &state, size_t timeslice)
static void cleanForwardedMessages(std::span< fair::mq::MessagePtr > &currentSetOfInputs, bool consume)
static void routeForwardedMessages(FairMQDeviceProxy &proxy, std::span< fair::mq::MessagePtr > &currentSetOfInputs, std::vector< fair::mq::Parts > &forwardedParts, bool copy, bool consume)
Helper to route messages for forwarding.
Helper struct to hold statistics about the data processing happening.
@ CumulativeRate
Set the value to the specified value if it is positive.
@ Add
Update the rate of the metric given the amount since the last time.
std::function< void(o2::framework::RuntimeErrorRef e, InputRecord &record)> errorHandling
ForwardPolicy forwardPolicy
Wether or not the associated DataProcessor can forward things early.
AlgorithmSpec::InitErrorCallback initError
void preLoopCallbacks(ServiceRegistryRef)
Invoke callbacks before we enter the event loop.
void postStopCallbacks(ServiceRegistryRef)
Invoke callbacks on stop.
void preProcessingCallbacks(ProcessingContext &)
Invoke callbacks to be executed before every process method invokation.
void preStartCallbacks(ServiceRegistryRef)
Invoke callbacks to be executed in PreRun(), before the User Start callbacks.
AlgorithmSpec::ProcessCallback statefulProcess
const char * header
Definition DataRef.h:27
const InputSpec * spec
Definition DataRef.h:26
static std::vector< size_t > createDistinctRouteIndex(std::vector< InputRoute > const &)
CompletionPolicy::CompletionOp op
Definition DataRelayer.h:81
@ Invalid
Ownership of the data has been taken.
@ Backpressured
The incoming data was not valid and has been dropped.
@ Dropped
The incoming data was not relayed, because we are backpressured.
static bool partialMatch(InputSpec const &spec, o2::header::DataOrigin const &origin)
static std::string describe(InputSpec const &spec)
static bool match(InputSpec const &spec, ConcreteDataMatcher const &target)
TimesliceIndex::OldestOutputInfo oldestTimeslice
static unsigned int pipelineLength()
get max number of timeslices in the queue
static std::unique_ptr< ConfigParamStore > getConfiguration(ServiceRegistryRef registry, const char *name, std::vector< ConfigParamSpec > const &options)
ProcessingPolicies & processingPolicies
Running state information of a given device.
Definition DeviceState.h:34
std::atomic< int64_t > cleanupCount
Definition DeviceState.h:82
Forward channel information.
Definition ChannelInfo.h:88
ChannelAccountingType channelType
Wether or not it's a DPL internal channel.
Definition ChannelInfo.h:92
std::string name
The name of the channel.
Definition ChannelInfo.h:90
ForwardingPolicy const * policy
Definition ChannelInfo.h:94
fair::mq::Channel * channel
Definition ChannelInfo.h:51
TimesliceId oldestForChannel
Oldest possible timeslice for the given channel.
Definition ChannelInfo.h:65
enum Lifetime lifetime
Definition InputSpec.h:73
enum EarlyForwardPolicy earlyForward
Information about the running workflow.
static constexpr ServiceKind kind
static Salt streamSalt(short streamId, short dataProcessorId)
void lateBindStreamServices(DeviceState &state, fair::mq::ProgOptions &options, ServiceRegistry::Salt salt)
static Salt globalStreamSalt(short streamId)
void * get(ServiceTypeHash typeHash, Salt salt, ServiceKind kind, char const *name=nullptr) const
void finaliseOutputsCallbacks(ProcessingContext &)
Invoke callbacks to be executed after every process method invokation.
void preProcessingCallbacks(ProcessingContext &pcx)
Invoke callbacks to be executed before every process method invokation.
void preEOSCallbacks(EndOfStreamContext &eosContext)
Invoke callbacks to be executed before every EOS user callback invokation.
void postProcessingCallbacks(ProcessingContext &pcx)
Invoke callbacks to be executed after every process method invokation.
static int64_t getRealtimeSinceEpochStandalone()
bool keepAtEndOfStream
Wether this kind of data should be flushed during end of stream.
Definition TimingInfo.h:44
static bool timesliceIsTimer(size_t timeslice)
Definition TimingInfo.h:46
static TimesliceId getTimeslice(data_matcher::VariableContext const &variables)
void backpressure(InputChannelInfo const &)
locked_execution(ServiceRegistryRef &ref_)
the main header struct
Definition DataHeader.h:619
constexpr size_t min
constexpr size_t max
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
vec clear()
const std::string str
uint64_t const void const *restrict const msg
Definition x9.h:153