Project
Loading...
Searching...
No Matches
DataProcessingDevice.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
13#include <atomic>
33#include "Framework/InputSpan.h"
34#if defined(__APPLE__) || defined(NDEBUG)
35#define O2_SIGNPOST_IMPLEMENTATION
36#endif
37#include "Framework/Signpost.h"
50
51#include "DecongestionService.h"
54#include "DataRelayerHelpers.h"
55#include "Headers/DataHeader.h"
57
58#include <Framework/Tracing.h>
59
60#include <fairmq/Parts.h>
61#include <fairmq/Socket.h>
62#include <fairmq/ProgOptions.h>
63#include <fairmq/shmem/Message.h>
64#include <Configuration/ConfigurationInterface.h>
65#include <Configuration/ConfigurationFactory.h>
66#include <Monitoring/Monitoring.h>
67#include <TMessage.h>
68#include <TClonesArray.h>
69
70#include <fmt/ostream.h>
71#include <algorithm>
72#include <vector>
73#include <numeric>
74#include <memory>
75#include <uv.h>
76#include <execinfo.h>
77#include <sstream>
78#include <boost/property_tree/json_parser.hpp>
79
80// Formatter to avoid having to rewrite the ostream operator for the enum
81namespace fmt
82{
83template <>
86} // namespace fmt
87
88// A log to use for general device logging
90// A log to use for general device logging
92// Special log to keep track of the lifetime of the parts
94// Stream which keeps track of the calibration lifetime logic
96// Special log to track the async queue behavior
98// Special log to track the forwarding requests
100// Special log to track CCDB related requests
102// Special log to track task scheduling
104
105using namespace o2::framework;
106using ConfigurationInterface = o2::configuration::ConfigurationInterface;
108
109constexpr int DEFAULT_MAX_CHANNEL_AHEAD = 128;
110
111namespace o2::framework
112{
113
114template <>
118
122{
123 auto* state = (DeviceState*)handle->data;
124 state->loopReason |= DeviceState::TIMER_EXPIRED;
125}
126
128{
129 auto* state = (DeviceState*)s->data;
131}
132
133DeviceSpec const& getRunningDevice(RunningDeviceRef const& running, ServiceRegistryRef const& services)
134{
135 auto& devices = services.get<o2::framework::RunningWorkflowInfo const>().devices;
136 return devices[running.index];
137}
138
144
146 : mRunningDevice{running},
147 mConfigRegistry{nullptr},
148 mServiceRegistry{registry}
149{
150 GetConfig()->Subscribe<std::string>("dpl", [&registry = mServiceRegistry](const std::string& key, std::string value) {
151 if (key == "cleanup") {
153 auto& deviceState = ref.get<DeviceState>();
154 int64_t cleanupCount = deviceState.cleanupCount.load();
155 int64_t newCleanupCount = std::stoll(value);
156 if (newCleanupCount <= cleanupCount) {
157 return;
158 }
159 deviceState.cleanupCount.store(newCleanupCount);
160 for (auto& info : deviceState.inputChannelInfos) {
161 fair::mq::Parts parts;
162 while (info.channel->Receive(parts, 0)) {
163 LOGP(debug, "Dropping {} parts", parts.Size());
164 if (parts.Size() == 0) {
165 break;
166 }
167 }
168 }
169 }
170 });
171
172 std::function<void(const fair::mq::State)> stateWatcher = [this, &registry = mServiceRegistry](const fair::mq::State state) -> void {
174 auto& deviceState = ref.get<DeviceState>();
175 auto& control = ref.get<ControlService>();
176 auto& callbacks = ref.get<CallbackService>();
177 control.notifyDeviceState(fair::mq::GetStateName(state));
179
180 if (deviceState.nextFairMQState.empty() == false) {
181 auto state = deviceState.nextFairMQState.back();
182 (void)this->ChangeState(state);
183 deviceState.nextFairMQState.pop_back();
184 }
185 };
186
187 // 99 is to execute DPL callbacks last
188 this->SubscribeToStateChange("99-dpl", stateWatcher);
189
190 // One task for now.
191 mStreams.resize(1);
192 mHandles.resize(1);
193
194 ServiceRegistryRef ref{mServiceRegistry};
195
196 mAwakeHandle = (uv_async_t*)malloc(sizeof(uv_async_t));
197 auto& state = ref.get<DeviceState>();
198 assert(state.loop);
199 int res = uv_async_init(state.loop, mAwakeHandle, on_communication_requested);
200 mAwakeHandle->data = &state;
201 if (res < 0) {
202 LOG(error) << "Unable to initialise subscription";
203 }
204
206 SubscribeToNewTransition("dpl", [wakeHandle = mAwakeHandle](fair::mq::Transition t) {
207 int res = uv_async_send(wakeHandle);
208 if (res < 0) {
209 LOG(error) << "Unable to notify subscription";
210 }
211 LOG(debug) << "State transition requested";
212 });
213}
214
215// Callback to execute the processing. Notice how the data is
216// is a vector of DataProcessorContext so that we can index the correct
217// one with the thread id. For the moment we simply use the first one.
218void run_callback(uv_work_t* handle)
219{
220 auto* task = (TaskStreamInfo*)handle->data;
221 auto ref = ServiceRegistryRef{*task->registry, ServiceRegistry::globalStreamSalt(task->id.index + 1)};
222 // We create a new signpost interval for this specific data processor. Same id, same data processor.
223 auto& dataProcessorContext = ref.get<DataProcessorContext>();
224 O2_SIGNPOST_ID_FROM_POINTER(sid, device, &dataProcessorContext);
225 O2_SIGNPOST_START(device, sid, "run_callback", "Starting run callback on stream %d", task->id.index);
228 O2_SIGNPOST_END(device, sid, "run_callback", "Done processing data for stream %d", task->id.index);
229}
230
231// Once the processing in a thread is done, this is executed on the main thread.
232void run_completion(uv_work_t* handle, int status)
233{
234 auto* task = (TaskStreamInfo*)handle->data;
235 // Notice that the completion, while running on the main thread, still
236 // has a salt which is associated to the actual stream which was doing the computation
237 auto ref = ServiceRegistryRef{*task->registry, ServiceRegistry::globalStreamSalt(task->id.index + 1)};
238 auto& state = ref.get<DeviceState>();
239 auto& quotaEvaluator = ref.get<ComputingQuotaEvaluator>();
240
241 using o2::monitoring::Metric;
242 using o2::monitoring::Monitoring;
243 using o2::monitoring::tags::Key;
244 using o2::monitoring::tags::Value;
245
246 static std::function<void(ComputingQuotaOffer const&, ComputingQuotaStats&)> reportConsumedOffer = [ref](ComputingQuotaOffer const& accumulatedConsumed, ComputingQuotaStats& stats) {
247 auto& dpStats = ref.get<DataProcessingStats>();
248 stats.totalConsumedBytes += accumulatedConsumed.sharedMemory;
249 // For now we give back the offer if we did not use it completely.
250 // In principle we should try to run until the offer is fully consumed.
251 stats.totalConsumedTimeslices += std::min<int64_t>(accumulatedConsumed.timeslices, 1);
252
253 dpStats.updateStats({static_cast<short>(ProcessingStatsId::SHM_OFFER_BYTES_CONSUMED), DataProcessingStats::Op::Set, stats.totalConsumedBytes});
254 dpStats.updateStats({static_cast<short>(ProcessingStatsId::TIMESLICE_OFFER_NUMBER_CONSUMED), DataProcessingStats::Op::Set, stats.totalConsumedTimeslices});
255 dpStats.processCommandQueue();
256 assert(stats.totalConsumedBytes == dpStats.metrics[(short)ProcessingStatsId::SHM_OFFER_BYTES_CONSUMED]);
257 assert(stats.totalConsumedTimeslices == dpStats.metrics[(short)ProcessingStatsId::TIMESLICE_OFFER_NUMBER_CONSUMED]);
258 };
259
260 static std::function<void(ComputingQuotaOffer const&, ComputingQuotaStats const&)> reportExpiredOffer = [ref](ComputingQuotaOffer const& offer, ComputingQuotaStats const& stats) {
261 auto& dpStats = ref.get<DataProcessingStats>();
262 dpStats.updateStats({static_cast<short>(ProcessingStatsId::RESOURCE_OFFER_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredOffers});
263 dpStats.updateStats({static_cast<short>(ProcessingStatsId::ARROW_BYTES_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredBytes});
264 dpStats.updateStats({static_cast<short>(ProcessingStatsId::TIMESLICE_NUMBER_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredTimeslices});
265 dpStats.processCommandQueue();
266 };
267
268 for (auto& consumer : state.offerConsumers) {
269 quotaEvaluator.consume(task->id.index, consumer, reportConsumedOffer);
270 }
271 state.offerConsumers.clear();
272 quotaEvaluator.handleExpired(reportExpiredOffer);
273 quotaEvaluator.dispose(task->id.index);
274 task->running = false;
275}
276
277// Context for polling
279 enum struct PollerState : char { Stopped,
281 Connected,
282 Suspended };
283 char const* name = nullptr;
284 uv_loop_t* loop = nullptr;
286 DeviceState* state = nullptr;
287 fair::mq::Socket* socket = nullptr;
289 int fd = -1;
290 bool read = true;
292};
293
294void on_socket_polled(uv_poll_t* poller, int status, int events)
295{
296 auto* context = (PollerContext*)poller->data;
297 assert(context);
298 O2_SIGNPOST_ID_FROM_POINTER(sid, sockets, poller);
299 context->state->loopReason |= DeviceState::DATA_SOCKET_POLLED;
300 switch (events) {
301 case UV_READABLE: {
302 O2_SIGNPOST_EVENT_EMIT(sockets, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name);
303 context->state->loopReason |= DeviceState::DATA_INCOMING;
304 } break;
305 case UV_WRITABLE: {
306 O2_SIGNPOST_END(sockets, sid, "socket_state", "Socket connected for channel %{public}s", context->name);
307 if (context->read) {
308 O2_SIGNPOST_START(sockets, sid, "socket_state", "Socket connected for read in context %{public}s", context->name);
309 uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_socket_polled);
310 context->state->loopReason |= DeviceState::DATA_CONNECTED;
311 } else {
312 O2_SIGNPOST_START(sockets, sid, "socket_state", "Socket connected for write for channel %{public}s", context->name);
313 context->state->loopReason |= DeviceState::DATA_OUTGOING;
314 // If the socket is writable, fairmq will handle the rest, so we can stop polling and
315 // just wait for the disconnect.
316 uv_poll_start(poller, UV_DISCONNECT | UV_PRIORITIZED, &on_socket_polled);
317 }
318 context->pollerState = PollerContext::PollerState::Connected;
319 } break;
320 case UV_DISCONNECT: {
321 O2_SIGNPOST_END(sockets, sid, "socket_state", "Socket disconnected in context %{public}s", context->name);
322 } break;
323 case UV_PRIORITIZED: {
324 O2_SIGNPOST_EVENT_EMIT(sockets, sid, "socket_state", "Socket prioritized for context %{public}s", context->name);
325 } break;
326 }
327 // We do nothing, all the logic for now stays in DataProcessingDevice::doRun()
328}
329
330void on_out_of_band_polled(uv_poll_t* poller, int status, int events)
331{
332 O2_SIGNPOST_ID_FROM_POINTER(sid, sockets, poller);
333 auto* context = (PollerContext*)poller->data;
334 context->state->loopReason |= DeviceState::OOB_ACTIVITY;
335 if (status < 0) {
336 LOGP(fatal, "Error while polling {}: {}", context->name, status);
337 uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled);
338 }
339 switch (events) {
340 case UV_READABLE: {
341 O2_SIGNPOST_EVENT_EMIT(sockets, sid, "socket_state", "Data pending on socket for channel %{public}s", context->name);
342 context->state->loopReason |= DeviceState::DATA_INCOMING;
343 assert(context->channelInfo);
344 context->channelInfo->readPolled = true;
345 } break;
346 case UV_WRITABLE: {
347 O2_SIGNPOST_END(sockets, sid, "socket_state", "OOB socket connected for channel %{public}s", context->name);
348 if (context->read) {
349 O2_SIGNPOST_START(sockets, sid, "socket_state", "OOB socket connected for read in context %{public}s", context->name);
350 uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &on_out_of_band_polled);
351 } else {
352 O2_SIGNPOST_START(sockets, sid, "socket_state", "OOB socket connected for write for channel %{public}s", context->name);
353 context->state->loopReason |= DeviceState::DATA_OUTGOING;
354 }
355 } break;
356 case UV_DISCONNECT: {
357 O2_SIGNPOST_END(sockets, sid, "socket_state", "OOB socket disconnected in context %{public}s", context->name);
358 uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled);
359 } break;
360 case UV_PRIORITIZED: {
361 O2_SIGNPOST_EVENT_EMIT(sockets, sid, "socket_state", "OOB socket prioritized for context %{public}s", context->name);
362 } break;
363 }
364 // We do nothing, all the logic for now stays in DataProcessingDevice::doRun()
365}
366
375{
376 auto ref = ServiceRegistryRef{mServiceRegistry};
377 auto& context = ref.get<DataProcessorContext>();
378 auto& spec = getRunningDevice(mRunningDevice, ref);
379
380 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
381 O2_SIGNPOST_START(device, cid, "Init", "Entering Init callback.");
382 context.statelessProcess = spec.algorithm.onProcess;
383 context.statefulProcess = nullptr;
384 context.error = spec.algorithm.onError;
385 context.initError = spec.algorithm.onInitError;
386
387 auto configStore = DeviceConfigurationHelpers::getConfiguration(mServiceRegistry, spec.name.c_str(), spec.options);
388 if (configStore == nullptr) {
389 std::vector<std::unique_ptr<ParamRetriever>> retrievers;
390 retrievers.emplace_back(std::make_unique<FairOptionsRetriever>(GetConfig()));
391 configStore = std::make_unique<ConfigParamStore>(spec.options, std::move(retrievers));
392 configStore->preload();
393 configStore->activate();
394 }
395
396 using boost::property_tree::ptree;
397
399 for (auto& entry : configStore->store()) {
400 std::stringstream ss;
401 std::string str;
402 if (entry.second.empty() == false) {
403 boost::property_tree::json_parser::write_json(ss, entry.second, false);
404 str = ss.str();
405 str.pop_back(); // remove EoL
406 } else {
407 str = entry.second.get_value<std::string>();
408 }
409 std::string configString = fmt::format("[CONFIG] {}={} 1 {}", entry.first, str, configStore->provenance(entry.first.c_str())).c_str();
410 mServiceRegistry.get<DriverClient>(ServiceRegistry::globalDeviceSalt()).tell(configString.c_str());
411 }
412
413 mConfigRegistry = std::make_unique<ConfigParamRegistry>(std::move(configStore));
414
415 // Setup the error handlers for init
416 if (context.initError) {
417 context.initErrorHandling = [&errorCallback = context.initError,
418 &serviceRegistry = mServiceRegistry](RuntimeErrorRef e) {
422 auto& context = ref.get<DataProcessorContext>();
423 auto& err = error_from_ref(e);
424 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
425 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Init", "Exception caught while in Init: %{public}s. Invoking errorCallback.", err.what);
426 BacktraceHelpers::demangled_backtrace_symbols(err.backtrace, err.maxBacktrace, STDERR_FILENO);
427 auto& stats = ref.get<DataProcessingStats>();
429 InitErrorContext errorContext{ref, e};
430 errorCallback(errorContext);
431 };
432 } else {
433 context.initErrorHandling = [&serviceRegistry = mServiceRegistry](RuntimeErrorRef e) {
434 auto& err = error_from_ref(e);
438 auto& context = ref.get<DataProcessorContext>();
439 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
440 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Init", "Exception caught while in Init: %{public}s. Exiting with 1.", err.what);
441 BacktraceHelpers::demangled_backtrace_symbols(err.backtrace, err.maxBacktrace, STDERR_FILENO);
442 auto& stats = ref.get<DataProcessingStats>();
444 exit(1);
445 };
446 }
447
448 context.expirationHandlers.clear();
449 context.init = spec.algorithm.onInit;
450 if (context.init) {
451 static bool noCatch = getenv("O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv("O2_NO_CATCHALL_EXCEPTIONS"), "0");
452 InitContext initContext{*mConfigRegistry, mServiceRegistry};
453
454 if (noCatch) {
455 try {
456 context.statefulProcess = context.init(initContext);
458 if (context.initErrorHandling) {
459 (context.initErrorHandling)(e);
460 }
461 }
462 } else {
463 try {
464 context.statefulProcess = context.init(initContext);
465 } catch (std::exception& ex) {
469 auto e = runtime_error(ex.what());
470 (context.initErrorHandling)(e);
472 (context.initErrorHandling)(e);
473 }
474 }
475 }
476 auto& state = ref.get<DeviceState>();
477 state.inputChannelInfos.resize(spec.inputChannels.size());
481 int validChannelId = 0;
482 for (size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
483 auto& name = spec.inputChannels[ci].name;
484 if (name.find(spec.channelPrefix + "from_internal-dpl-clock") == 0) {
485 state.inputChannelInfos[ci].state = InputChannelState::Pull;
486 state.inputChannelInfos[ci].id = {ChannelIndex::INVALID};
487 validChannelId++;
488 } else {
489 state.inputChannelInfos[ci].id = {validChannelId++};
490 }
491 }
492
493 // Invoke the callback policy for this device.
494 if (spec.callbacksPolicy.policy != nullptr) {
495 InitContext initContext{*mConfigRegistry, mServiceRegistry};
496 spec.callbacksPolicy.policy(mServiceRegistry.get<CallbackService>(ServiceRegistry::globalDeviceSalt()), initContext);
497 }
498
499 // Services which are stream should be initialised now
500 auto* options = GetConfig();
501 for (size_t si = 0; si < mStreams.size(); ++si) {
503 mServiceRegistry.lateBindStreamServices(state, *options, streamSalt);
504 }
505 O2_SIGNPOST_END(device, cid, "Init", "Exiting Init callback.");
506}
507
508void on_signal_callback(uv_signal_t* handle, int signum)
509{
510 O2_SIGNPOST_ID_FROM_POINTER(sid, device, handle);
511 O2_SIGNPOST_START(device, sid, "signal_state", "Signal %d received.", signum);
512
513 auto* registry = (ServiceRegistry*)handle->data;
514 if (!registry) {
515 O2_SIGNPOST_END(device, sid, "signal_state", "No registry active. Ignoring signal.");
516 return;
517 }
518 ServiceRegistryRef ref{*registry};
519 auto& state = ref.get<DeviceState>();
520 auto& quotaEvaluator = ref.get<ComputingQuotaEvaluator>();
521 auto& stats = ref.get<DataProcessingStats>();
523 size_t ri = 0;
524 while (ri != quotaEvaluator.mOffers.size()) {
525 auto& offer = quotaEvaluator.mOffers[ri];
526 // We were already offered some sharedMemory, so we
527 // do not consider the offer.
528 // FIXME: in principle this should account for memory
529 // available and being offered, however we
530 // want to get out of the woods for now.
531 if (offer.valid && offer.sharedMemory != 0) {
532 O2_SIGNPOST_END(device, sid, "signal_state", "Memory already offered.");
533 return;
534 }
535 ri++;
536 }
537 // Find the first empty offer and have 1GB of shared memory there
538 for (auto& offer : quotaEvaluator.mOffers) {
539 if (offer.valid == false) {
540 offer.cpu = 0;
541 offer.memory = 0;
542 offer.sharedMemory = 1000000000;
543 offer.valid = true;
544 offer.user = -1;
545 break;
546 }
547 }
549 O2_SIGNPOST_END(device, sid, "signal_state", "Done processing signals.");
550}
551
552struct DecongestionContext {
555};
556
557auto decongestionCallbackLate = [](AsyncTask& task, size_t aid) -> void {
558 auto& oldestTimeslice = task.user<DecongestionContext>().oldestTimeslice;
559 auto& ref = task.user<DecongestionContext>().ref;
560
561 auto& decongestion = ref.get<DecongestionService>();
562 auto& proxy = ref.get<FairMQDeviceProxy>();
563 if (oldestTimeslice.timeslice.value <= decongestion.lastTimeslice) {
564 LOG(debug) << "Not sending already sent oldest possible timeslice " << oldestTimeslice.timeslice.value;
565 return;
566 }
567 for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
568 auto& info = proxy.getForwardChannelInfo(ChannelIndex{fi});
569 auto& state = proxy.getForwardChannelState(ChannelIndex{fi});
570 O2_SIGNPOST_ID_GENERATE(aid, async_queue);
571 // TODO: this we could cache in the proxy at the bind moment.
572 if (info.channelType != ChannelAccountingType::DPL) {
573 O2_SIGNPOST_EVENT_EMIT(async_queue, aid, "forwardInputsCallback", "Skipping channel %{public}s because it's not a DPL channel",
574 info.name.c_str());
575
576 continue;
577 }
578 if (DataProcessingHelpers::sendOldestPossibleTimeframe(ref, info, state, oldestTimeslice.timeslice.value)) {
579 O2_SIGNPOST_EVENT_EMIT(async_queue, aid, "forwardInputsCallback", "Forwarding to channel %{public}s oldest possible timeslice %zu, prio 20",
580 info.name.c_str(), oldestTimeslice.timeslice.value);
581 }
582 }
583};
584
585// This is how we do the forwarding, i.e. we push
586// the inputs which are shared between this device and others
587// to the next one in the daisy chain.
588// FIXME: do it in a smarter way than O(N^2)
589static auto forwardInputs = [](ServiceRegistryRef registry, TimesliceSlot slot, std::vector<std::vector<fair::mq::MessagePtr>>& currentSetOfInputs,
590 TimesliceIndex::OldestOutputInfo oldestTimeslice, bool copy, bool consume = true) {
591 auto& proxy = registry.get<FairMQDeviceProxy>();
592
593 O2_SIGNPOST_ID_GENERATE(sid, forwarding);
594 O2_SIGNPOST_START(forwarding, sid, "forwardInputs", "Starting forwarding for slot %zu with oldestTimeslice %zu %{public}s%{public}s%{public}s",
595 slot.index, oldestTimeslice.timeslice.value, copy ? "with copy" : "", copy && consume ? " and " : "", consume ? "with consume" : "");
596 auto forwardedParts = DataProcessingHelpers::routeForwardedMessageSet(proxy, currentSetOfInputs, copy, consume);
597
598 for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
599 if (forwardedParts[fi].Size() == 0) {
600 continue;
601 }
602 ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi});
603 auto& parts = forwardedParts[fi];
604 if (info.policy == nullptr) {
605 O2_SIGNPOST_EVENT_EMIT_ERROR(forwarding, sid, "forwardInputs", "Forwarding to %{public}s %d has no policy.", info.name.c_str(), fi);
606 continue;
607 }
608 O2_SIGNPOST_EVENT_EMIT(forwarding, sid, "forwardInputs", "Forwarding to %{public}s %d", info.name.c_str(), fi);
609 info.policy->forward(parts, ChannelIndex{fi}, registry);
610 }
611
612 auto& asyncQueue = registry.get<AsyncQueue>();
613 auto& decongestion = registry.get<DecongestionService>();
614 O2_SIGNPOST_ID_GENERATE(aid, async_queue);
615 O2_SIGNPOST_EVENT_EMIT(async_queue, aid, "forwardInputs", "Queuing forwarding oldestPossible %zu", oldestTimeslice.timeslice.value);
616 AsyncQueueHelpers::post(asyncQueue, AsyncTask{.timeslice = oldestTimeslice.timeslice, .id = decongestion.oldestPossibleTimesliceTask, .debounce = -1, .callback = decongestionCallbackLate}
617 .user<DecongestionContext>({.ref = registry, .oldestTimeslice = oldestTimeslice}));
618 O2_SIGNPOST_END(forwarding, sid, "forwardInputs", "Forwarding done");
619};
620
621static auto cleanEarlyForward = [](ServiceRegistryRef registry, TimesliceSlot slot, std::vector<std::vector<fair::mq::MessagePtr>>& currentSetOfInputs,
622 TimesliceIndex::OldestOutputInfo oldestTimeslice, bool copy, bool consume = true) {
623 auto& proxy = registry.get<FairMQDeviceProxy>();
624
625 O2_SIGNPOST_ID_GENERATE(sid, forwarding);
626 O2_SIGNPOST_START(forwarding, sid, "forwardInputs", "Cleaning up slot %zu with oldestTimeslice %zu %{public}s%{public}s%{public}s",
627 slot.index, oldestTimeslice.timeslice.value, copy ? "with copy" : "", copy && consume ? " and " : "", consume ? "with consume" : "");
628 // Always copy them, because we do not want to actually send them.
629 // We merely need the side effect of the consume, if applicable.
630 for (size_t ii = 0, ie = currentSetOfInputs.size(); ii < ie; ++ii) {
631 auto span = std::span<fair::mq::MessagePtr>(currentSetOfInputs[ii]);
633 }
634
635 O2_SIGNPOST_END(forwarding, sid, "forwardInputs", "Cleaning done");
636};
637
638extern volatile int region_read_global_dummy_variable;
640
642void handleRegionCallbacks(ServiceRegistryRef registry, std::vector<fair::mq::RegionInfo>& infos)
643{
644 if (infos.empty() == false) {
645 std::vector<fair::mq::RegionInfo> toBeNotified;
646 toBeNotified.swap(infos); // avoid any MT issue.
647 static bool dummyRead = getenv("DPL_DEBUG_MAP_ALL_SHM_REGIONS") && atoi(getenv("DPL_DEBUG_MAP_ALL_SHM_REGIONS"));
648 for (auto const& info : toBeNotified) {
649 if (dummyRead) {
650 for (size_t i = 0; i < info.size / sizeof(region_read_global_dummy_variable); i += 4096 / sizeof(region_read_global_dummy_variable)) {
651 region_read_global_dummy_variable = ((int*)info.ptr)[i];
652 }
653 }
654 registry.get<CallbackService>().call<CallbackService::Id::RegionInfoCallback>(info);
655 }
656 }
657}
658
659namespace
660{
662{
663 auto* state = (DeviceState*)handle->data;
665}
666} // namespace
667
668void DataProcessingDevice::initPollers()
669{
670 auto ref = ServiceRegistryRef{mServiceRegistry};
671 auto& deviceContext = ref.get<DeviceContext>();
672 auto& context = ref.get<DataProcessorContext>();
673 auto& spec = ref.get<DeviceSpec const>();
674 auto& state = ref.get<DeviceState>();
675 // We add a timer only in case a channel poller is not there.
676 if ((context.statefulProcess != nullptr) || (context.statelessProcess != nullptr)) {
677 for (auto& [channelName, channel] : GetChannels()) {
678 InputChannelInfo* channelInfo;
679 for (size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
680 auto& channelSpec = spec.inputChannels[ci];
681 channelInfo = &state.inputChannelInfos[ci];
682 if (channelSpec.name != channelName) {
683 continue;
684 }
685 channelInfo->channel = &this->GetChannel(channelName, 0);
686 break;
687 }
688 if ((channelName.rfind("from_internal-dpl", 0) == 0) &&
689 (channelName.rfind("from_internal-dpl-aod", 0) != 0) &&
690 (channelName.rfind("from_internal-dpl-ccdb-backend", 0) != 0) &&
691 (channelName.rfind("from_internal-dpl-injected", 0)) != 0) {
692 LOGP(detail, "{} is an internal channel. Skipping as no input will come from there.", channelName);
693 continue;
694 }
695 // We only watch receiving sockets.
696 if (channelName.rfind("from_" + spec.name + "_", 0) == 0) {
697 LOGP(detail, "{} is to send data. Not polling.", channelName);
698 continue;
699 }
700
701 if (channelName.rfind("from_", 0) != 0) {
702 LOGP(detail, "{} is not a DPL socket. Not polling.", channelName);
703 continue;
704 }
705
706 // We assume there is always a ZeroMQ socket behind.
707 int zmq_fd = 0;
708 size_t zmq_fd_len = sizeof(zmq_fd);
709 // FIXME: I should probably save those somewhere... ;-)
710 auto* poller = (uv_poll_t*)malloc(sizeof(uv_poll_t));
711 channel[0].GetSocket().GetOption("fd", &zmq_fd, &zmq_fd_len);
712 if (zmq_fd == 0) {
713 LOG(error) << "Cannot get file descriptor for channel." << channelName;
714 continue;
715 }
716 LOGP(detail, "Polling socket for {}", channelName);
717 auto* pCtx = (PollerContext*)malloc(sizeof(PollerContext));
718 pCtx->name = strdup(channelName.c_str());
719 pCtx->loop = state.loop;
720 pCtx->device = this;
721 pCtx->state = &state;
722 pCtx->fd = zmq_fd;
723 assert(channelInfo != nullptr);
724 pCtx->channelInfo = channelInfo;
725 pCtx->socket = &channel[0].GetSocket();
726 pCtx->read = true;
727 poller->data = pCtx;
728 uv_poll_init(state.loop, poller, zmq_fd);
729 if (channelName.rfind("from_", 0) != 0) {
730 LOGP(detail, "{} is an out of band channel.", channelName);
731 state.activeOutOfBandPollers.push_back(poller);
732 } else {
733 channelInfo->pollerIndex = state.activeInputPollers.size();
734 state.activeInputPollers.push_back(poller);
735 }
736 }
737 // In case we do not have any input channel and we do not have
738 // any timers or signal watchers we still wake up whenever we can send data to downstream
739 // devices to allow for enumerations.
740 if (state.activeInputPollers.empty() &&
741 state.activeOutOfBandPollers.empty() &&
742 state.activeTimers.empty() &&
743 state.activeSignals.empty()) {
744 // FIXME: this is to make sure we do not reset the output timer
745 // for readout proxies or similar. In principle this should go once
746 // we move to OutOfBand InputSpec.
747 if (state.inputChannelInfos.empty()) {
748 LOGP(detail, "No input channels. Setting exit transition timeout to 0.");
749 deviceContext.exitTransitionTimeout = 0;
750 }
751 for (auto& [channelName, channel] : GetChannels()) {
752 if (channelName.rfind(spec.channelPrefix + "from_internal-dpl", 0) == 0) {
753 LOGP(detail, "{} is an internal channel. Not polling.", channelName);
754 continue;
755 }
756 if (channelName.rfind(spec.channelPrefix + "from_" + spec.name + "_", 0) == 0) {
757 LOGP(detail, "{} is an out of band channel. Not polling for output.", channelName);
758 continue;
759 }
760 // We assume there is always a ZeroMQ socket behind.
761 int zmq_fd = 0;
762 size_t zmq_fd_len = sizeof(zmq_fd);
763 // FIXME: I should probably save those somewhere... ;-)
764 auto* poller = (uv_poll_t*)malloc(sizeof(uv_poll_t));
765 channel[0].GetSocket().GetOption("fd", &zmq_fd, &zmq_fd_len);
766 if (zmq_fd == 0) {
767 LOGP(error, "Cannot get file descriptor for channel {}", channelName);
768 continue;
769 }
770 LOG(detail) << "Polling socket for " << channel[0].GetName();
771 // FIXME: leak
772 auto* pCtx = (PollerContext*)malloc(sizeof(PollerContext));
773 pCtx->name = strdup(channelName.c_str());
774 pCtx->loop = state.loop;
775 pCtx->device = this;
776 pCtx->state = &state;
777 pCtx->fd = zmq_fd;
778 pCtx->read = false;
779 poller->data = pCtx;
780 uv_poll_init(state.loop, poller, zmq_fd);
781 state.activeOutputPollers.push_back(poller);
782 }
783 }
784 } else {
785 LOGP(detail, "This is a fake device so we exit after the first iteration.");
786 deviceContext.exitTransitionTimeout = 0;
787 // This is a fake device, so we can request to exit immediately
788 ServiceRegistryRef ref{mServiceRegistry};
789 ref.get<ControlService>().readyToQuit(QuitRequest::Me);
790 // A two second timer to stop internal devices which do not want to
791 auto* timer = (uv_timer_t*)malloc(sizeof(uv_timer_t));
792 uv_timer_init(state.loop, timer);
793 timer->data = &state;
794 uv_update_time(state.loop);
795 uv_timer_start(timer, on_idle_timer, 2000, 2000);
796 state.activeTimers.push_back(timer);
797 }
798}
799
800void DataProcessingDevice::startPollers()
801{
802 auto ref = ServiceRegistryRef{mServiceRegistry};
803 auto& deviceContext = ref.get<DeviceContext>();
804 auto& state = ref.get<DeviceState>();
805
806 for (auto* poller : state.activeInputPollers) {
807 O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller);
808 O2_SIGNPOST_START(device, sid, "socket_state", "Input socket waiting for connection.");
809 uv_poll_start(poller, UV_WRITABLE, &on_socket_polled);
810 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected;
811 }
812 for (auto& poller : state.activeOutOfBandPollers) {
813 uv_poll_start(poller, UV_WRITABLE, &on_out_of_band_polled);
814 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected;
815 }
816 for (auto* poller : state.activeOutputPollers) {
817 O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller);
818 O2_SIGNPOST_START(device, sid, "socket_state", "Output socket waiting for connection.");
819 uv_poll_start(poller, UV_WRITABLE, &on_socket_polled);
820 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Disconnected;
821 }
822
823 deviceContext.gracePeriodTimer = (uv_timer_t*)malloc(sizeof(uv_timer_t));
824 deviceContext.gracePeriodTimer->data = new ServiceRegistryRef(mServiceRegistry);
825 uv_timer_init(state.loop, deviceContext.gracePeriodTimer);
826
827 deviceContext.dataProcessingGracePeriodTimer = (uv_timer_t*)malloc(sizeof(uv_timer_t));
828 deviceContext.dataProcessingGracePeriodTimer->data = new ServiceRegistryRef(mServiceRegistry);
829 uv_timer_init(state.loop, deviceContext.dataProcessingGracePeriodTimer);
830}
831
832void DataProcessingDevice::stopPollers()
833{
834 auto ref = ServiceRegistryRef{mServiceRegistry};
835 auto& deviceContext = ref.get<DeviceContext>();
836 auto& state = ref.get<DeviceState>();
837 LOGP(detail, "Stopping {} input pollers", state.activeInputPollers.size());
838 for (auto* poller : state.activeInputPollers) {
839 O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller);
840 O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed.");
841 uv_poll_stop(poller);
842 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped;
843 }
844 LOGP(detail, "Stopping {} out of band pollers", state.activeOutOfBandPollers.size());
845 for (auto* poller : state.activeOutOfBandPollers) {
846 uv_poll_stop(poller);
847 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped;
848 }
849 LOGP(detail, "Stopping {} output pollers", state.activeOutOfBandPollers.size());
850 for (auto* poller : state.activeOutputPollers) {
851 O2_SIGNPOST_ID_FROM_POINTER(sid, device, poller);
852 O2_SIGNPOST_END(device, sid, "socket_state", "Output socket closed.");
853 uv_poll_stop(poller);
854 ((PollerContext*)poller->data)->pollerState = PollerContext::PollerState::Stopped;
855 }
856
857 uv_timer_stop(deviceContext.gracePeriodTimer);
858 delete (ServiceRegistryRef*)deviceContext.gracePeriodTimer->data;
859 free(deviceContext.gracePeriodTimer);
860 deviceContext.gracePeriodTimer = nullptr;
861
862 uv_timer_stop(deviceContext.dataProcessingGracePeriodTimer);
863 delete (ServiceRegistryRef*)deviceContext.dataProcessingGracePeriodTimer->data;
864 free(deviceContext.dataProcessingGracePeriodTimer);
865 deviceContext.dataProcessingGracePeriodTimer = nullptr;
866}
867
869{
870 auto ref = ServiceRegistryRef{mServiceRegistry};
871 auto& deviceContext = ref.get<DeviceContext>();
872 auto& context = ref.get<DataProcessorContext>();
873
874 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
875 O2_SIGNPOST_START(device, cid, "InitTask", "Entering InitTask callback.");
876 auto& spec = getRunningDevice(mRunningDevice, mServiceRegistry);
877 auto distinct = DataRelayerHelpers::createDistinctRouteIndex(spec.inputs);
878 auto& state = ref.get<DeviceState>();
879 int i = 0;
880 for (auto& di : distinct) {
881 auto& route = spec.inputs[di];
882 if (route.configurator.has_value() == false) {
883 i++;
884 continue;
885 }
886 ExpirationHandler handler{
887 .name = route.configurator->name,
888 .routeIndex = RouteIndex{i++},
889 .lifetime = route.matcher.lifetime,
890 .creator = route.configurator->creatorConfigurator(state, mServiceRegistry, *mConfigRegistry),
891 .checker = route.configurator->danglingConfigurator(state, *mConfigRegistry),
892 .handler = route.configurator->expirationConfigurator(state, *mConfigRegistry)};
893 context.expirationHandlers.emplace_back(std::move(handler));
894 }
895
896 if (state.awakeMainThread == nullptr) {
897 state.awakeMainThread = (uv_async_t*)malloc(sizeof(uv_async_t));
898 state.awakeMainThread->data = &state;
899 uv_async_init(state.loop, state.awakeMainThread, on_awake_main_thread);
900 }
901
902 deviceContext.expectedRegionCallbacks = std::stoi(fConfig->GetValue<std::string>("expected-region-callbacks"));
903 deviceContext.exitTransitionTimeout = std::stoi(fConfig->GetValue<std::string>("exit-transition-timeout"));
904 deviceContext.dataProcessingTimeout = std::stoi(fConfig->GetValue<std::string>("data-processing-timeout"));
905
906 for (auto& channel : GetChannels()) {
907 channel.second.at(0).Transport()->SubscribeToRegionEvents([&context = deviceContext,
908 &registry = mServiceRegistry,
909 &pendingRegionInfos = mPendingRegionInfos,
910 &regionInfoMutex = mRegionInfoMutex](fair::mq::RegionInfo info) {
911 std::lock_guard<std::mutex> lock(regionInfoMutex);
912 LOG(detail) << ">>> Region info event" << info.event;
913 LOG(detail) << "id: " << info.id;
914 LOG(detail) << "ptr: " << info.ptr;
915 LOG(detail) << "size: " << info.size;
916 LOG(detail) << "flags: " << info.flags;
917 // Now we check for pending events with the mutex,
918 // so the lines below are atomic.
919 pendingRegionInfos.push_back(info);
920 context.expectedRegionCallbacks -= 1;
921 // We always want to handle these on the main loop,
922 // so we awake it.
923 ServiceRegistryRef ref{registry};
924 uv_async_send(ref.get<DeviceState>().awakeMainThread);
925 });
926 }
927
928 // Add a signal manager for SIGUSR1 so that we can force
929 // an event from the outside, making sure that the event loop can
930 // be unblocked (e.g. by a quitting DPL driver) even when there
931 // is no data pending to be processed.
932 if (deviceContext.sigusr1Handle == nullptr) {
933 deviceContext.sigusr1Handle = (uv_signal_t*)malloc(sizeof(uv_signal_t));
934 deviceContext.sigusr1Handle->data = &mServiceRegistry;
935 uv_signal_init(state.loop, deviceContext.sigusr1Handle);
936 uv_signal_start(deviceContext.sigusr1Handle, on_signal_callback, SIGUSR1);
937 }
938 // If there is any signal, we want to make sure they are active
939 for (auto& handle : state.activeSignals) {
940 handle->data = &state;
941 }
942 // When we start, we must make sure that we do listen to the signal
943 deviceContext.sigusr1Handle->data = &mServiceRegistry;
944
946 DataProcessingDevice::initPollers();
947
948 // Whenever we InitTask, we consider as if the previous iteration
949 // was successful, so that even if there is no timer or receiving
950 // channel, we can still start an enumeration.
951 DataProcessorContext* initialContext = nullptr;
952 bool idle = state.lastActiveDataProcessor.compare_exchange_strong(initialContext, (DataProcessorContext*)-1);
953 if (!idle) {
954 LOG(error) << "DataProcessor " << state.lastActiveDataProcessor.load()->spec->name << " was unexpectedly active";
955 }
956
957 // We should be ready to run here. Therefore we copy all the
958 // required parts in the DataProcessorContext. Eventually we should
959 // do so on a per thread basis, with fine grained locks.
960 // FIXME: this should not use ServiceRegistry::threadSalt, but
961 // more a ServiceRegistry::globalDataProcessorSalt(N) where
962 // N is the number of the multiplexed data processor.
963 // We will get there.
964 this->fillContext(mServiceRegistry.get<DataProcessorContext>(ServiceRegistry::globalDeviceSalt()), deviceContext);
965
966 O2_SIGNPOST_END(device, cid, "InitTask", "Exiting InitTask callback waiting for the remaining region callbacks.");
967
968 auto hasPendingEvents = [&mutex = mRegionInfoMutex, &pendingRegionInfos = mPendingRegionInfos](DeviceContext& deviceContext) {
969 std::lock_guard<std::mutex> lock(mutex);
970 return (pendingRegionInfos.empty() == false) || deviceContext.expectedRegionCallbacks > 0;
971 };
972 O2_SIGNPOST_START(device, cid, "InitTask", "Waiting for registation events.");
977 while (hasPendingEvents(deviceContext)) {
978 // Wait for the callback to signal its done, so that we do not busy wait.
979 uv_run(state.loop, UV_RUN_ONCE);
980 // Handle callbacks if any
981 {
982 O2_SIGNPOST_EVENT_EMIT(device, cid, "InitTask", "Memory registration event received.");
983 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
984 handleRegionCallbacks(mServiceRegistry, mPendingRegionInfos);
985 }
986 }
987 O2_SIGNPOST_END(device, cid, "InitTask", "Done waiting for registration events.");
988}
989
991{
992 context.isSink = false;
993 // If nothing is a sink, the rate limiting simply does not trigger.
994 bool enableRateLimiting = std::stoi(fConfig->GetValue<std::string>("timeframes-rate-limit"));
995
996 auto ref = ServiceRegistryRef{mServiceRegistry};
997 auto& spec = ref.get<DeviceSpec const>();
998
999 // The policy is now allowed to state the default.
1000 context.balancingInputs = spec.completionPolicy.balanceChannels;
1001 // This is needed because the internal injected dummy sink should not
1002 // try to balance inputs unless the rate limiting is requested.
1003 if (enableRateLimiting == false && spec.name.find("internal-dpl-injected-dummy-sink") != std::string::npos) {
1004 context.balancingInputs = false;
1005 }
1006 if (enableRateLimiting) {
1007 for (auto& spec : spec.outputs) {
1008 if (spec.matcher.binding.value == "dpl-summary") {
1009 context.isSink = true;
1010 break;
1011 }
1012 }
1013 }
1014
1015 context.registry = &mServiceRegistry;
1018 if (context.error != nullptr) {
1019 context.errorHandling = [&errorCallback = context.error,
1020 &serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) {
1024 auto& err = error_from_ref(e);
1025 auto& context = ref.get<DataProcessorContext>();
1026 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
1027 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Run", "Exception while running: %{public}s. Invoking callback.", err.what);
1028 BacktraceHelpers::demangled_backtrace_symbols(err.backtrace, err.maxBacktrace, STDERR_FILENO);
1029 auto& stats = ref.get<DataProcessingStats>();
1031 ErrorContext errorContext{record, ref, e};
1032 errorCallback(errorContext);
1033 };
1034 } else {
1035 context.errorHandling = [&serviceRegistry = mServiceRegistry](RuntimeErrorRef e, InputRecord& record) {
1036 auto& err = error_from_ref(e);
1040 auto& context = ref.get<DataProcessorContext>();
1041 auto& deviceContext = ref.get<DeviceContext>();
1042 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &context);
1043 BacktraceHelpers::demangled_backtrace_symbols(err.backtrace, err.maxBacktrace, STDERR_FILENO);
1044 auto& stats = ref.get<DataProcessingStats>();
1046 switch (deviceContext.processingPolicies.error) {
1048 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Run", "Exception while running: %{public}s. Rethrowing.", err.what);
1049 throw e;
1050 default:
1051 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "Run", "Exception while running: %{public}s. Skipping to next timeframe.", err.what);
1052 break;
1053 }
1054 };
1055 }
1056
1057 auto decideEarlyForward = [&context, &deviceContext, &spec, this]() -> ForwardPolicy {
1058 ForwardPolicy defaultEarlyForwardPolicy = getenv("DPL_OLD_EARLY_FORWARD") ? ForwardPolicy::AtCompletionPolicySatisified : ForwardPolicy::AtInjection;
1059 // FIXME: try again with the new policy by default.
1060 //
1061 // Make the new policy optional until we handle some of the corner cases
1062 // with custom policies which expect the early forward to happen only when
1063 // all the data is available, like in the TPC case.
1064 // ForwardPolicy defaultEarlyForwardPolicy = getenv("DPL_NEW_EARLY_FORWARD") ? ForwardPolicy::AtInjection : ForwardPolicy::AtCompletionPolicySatisified;
1065 for (auto& forward : spec.forwards) {
1066 if (DataSpecUtils::match(forward.matcher, ConcreteDataTypeMatcher{"TPC", "DIGITSMCTR"}) ||
1067 DataSpecUtils::match(forward.matcher, ConcreteDataTypeMatcher{"TPC", "CLNATIVEMCLBL"}) ||
1068 DataSpecUtils::match(forward.matcher, ConcreteDataTypeMatcher{o2::header::gDataOriginTPC, "DIGITS"}) ||
1069 DataSpecUtils::match(forward.matcher, ConcreteDataTypeMatcher{o2::header::gDataOriginTPC, "CLUSTERNATIVE"})) {
1070 defaultEarlyForwardPolicy = ForwardPolicy::AtCompletionPolicySatisified;
1071 break;
1072 }
1073 }
1074 // Output proxies should wait for the completion policy before forwarding.
1075 // Because they actually do not do anything, that's equivalent to
1076 // forwarding after the processing.
1077 for (auto& label : spec.labels) {
1078 if (label.value == "output-proxy") {
1079 defaultEarlyForwardPolicy = ForwardPolicy::AfterProcessing;
1080 break;
1081 }
1082 }
1083
1086 ForwardPolicy forwardPolicy = defaultEarlyForwardPolicy;
1087 if (spec.forwards.empty() == false) {
1088 switch (deviceContext.processingPolicies.earlyForward) {
1090 forwardPolicy = ForwardPolicy::AfterProcessing;
1091 break;
1093 forwardPolicy = defaultEarlyForwardPolicy;
1094 break;
1096 forwardPolicy = defaultEarlyForwardPolicy;
1097 break;
1098 }
1099 }
1100 bool onlyConditions = true;
1101 bool overriddenEarlyForward = false;
1102 for (auto& forwarded : spec.forwards) {
1103 if (forwarded.matcher.lifetime != Lifetime::Condition) {
1104 onlyConditions = false;
1105 }
1107 forwardPolicy = ForwardPolicy::AfterProcessing;
1108 overriddenEarlyForward = true;
1109 LOG(detail) << "Cannot forward early because of RAWDATA input: " << DataSpecUtils::describe(forwarded.matcher);
1110 break;
1111 }
1112 if (forwarded.matcher.lifetime == Lifetime::Optional) {
1113 forwardPolicy = ForwardPolicy::AfterProcessing;
1114 overriddenEarlyForward = true;
1115 LOG(detail) << "Cannot forward early because of Optional input: " << DataSpecUtils::describe(forwarded.matcher);
1116 break;
1117 }
1118 }
1119 if (!overriddenEarlyForward && onlyConditions) {
1120 forwardPolicy = defaultEarlyForwardPolicy;
1121 LOG(detail) << "Enabling early forwarding because only conditions to be forwarded";
1122 }
1123 return forwardPolicy;
1124 };
1125 context.forwardPolicy = decideEarlyForward();
1126}
1127
1129{
1130 auto ref = ServiceRegistryRef{mServiceRegistry};
1131 auto& state = ref.get<DeviceState>();
1132
1133 O2_SIGNPOST_ID_FROM_POINTER(cid, device, state.loop);
1134 O2_SIGNPOST_START(device, cid, "PreRun", "Entering PreRun callback.");
1135 state.quitRequested = false;
1137 state.allowedProcessing = DeviceState::Any;
1138 for (auto& info : state.inputChannelInfos) {
1139 if (info.state != InputChannelState::Pull) {
1140 info.state = InputChannelState::Running;
1141 }
1142 }
1143
1144 // Catch callbacks which fail before we start.
1145 // Notice that when running multiple dataprocessors
1146 // we should probably allow expendable ones to fail.
1147 try {
1148 auto& dpContext = ref.get<DataProcessorContext>();
1149 dpContext.preStartCallbacks(ref);
1150 for (size_t i = 0; i < mStreams.size(); ++i) {
1151 auto streamRef = ServiceRegistryRef{mServiceRegistry, ServiceRegistry::globalStreamSalt(i + 1)};
1152 auto& context = streamRef.get<StreamContext>();
1153 context.preStartStreamCallbacks(streamRef);
1154 }
1155 } catch (std::exception& e) {
1156 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "PreRun", "Exception of type std::exception caught in PreRun: %{public}s. Rethrowing.", e.what());
1157 O2_SIGNPOST_END(device, cid, "PreRun", "Exiting PreRun due to exception thrown.");
1158 throw;
1159 } catch (o2::framework::RuntimeErrorRef& e) {
1160 auto& err = error_from_ref(e);
1161 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "PreRun", "Exception of type o2::framework::RuntimeErrorRef caught in PreRun: %{public}s. Rethrowing.", err.what);
1162 O2_SIGNPOST_END(device, cid, "PreRun", "Exiting PreRun due to exception thrown.");
1163 throw;
1164 } catch (...) {
1165 O2_SIGNPOST_END(device, cid, "PreRun", "Unknown exception being thrown. Rethrowing.");
1166 throw;
1167 }
1168
1169 ref.get<CallbackService>().call<CallbackService::Id::Start>();
1170 startPollers();
1171
1172 // Raise to 1 when we are ready to start processing
1173 using o2::monitoring::Metric;
1174 using o2::monitoring::Monitoring;
1175 using o2::monitoring::tags::Key;
1176 using o2::monitoring::tags::Value;
1177
1178 auto& monitoring = ref.get<Monitoring>();
1179 monitoring.send(Metric{(uint64_t)1, "device_state"}.addTag(Key::Subsystem, Value::DPL));
1180 O2_SIGNPOST_END(device, cid, "PreRun", "Exiting PreRun callback.");
1181}
1182
1184{
1185 ServiceRegistryRef ref{mServiceRegistry};
1186 // Raise to 1 when we are ready to start processing
1187 using o2::monitoring::Metric;
1188 using o2::monitoring::Monitoring;
1189 using o2::monitoring::tags::Key;
1190 using o2::monitoring::tags::Value;
1191
1192 auto& monitoring = ref.get<Monitoring>();
1193 monitoring.send(Metric{(uint64_t)0, "device_state"}.addTag(Key::Subsystem, Value::DPL));
1194
1195 stopPollers();
1196 ref.get<CallbackService>().call<CallbackService::Id::Stop>();
1197 auto& dpContext = ref.get<DataProcessorContext>();
1198 dpContext.postStopCallbacks(ref);
1199}
1200
1202{
1203 ServiceRegistryRef ref{mServiceRegistry};
1204 ref.get<CallbackService>().call<CallbackService::Id::Reset>();
1205}
1206
1208{
1209 ServiceRegistryRef ref{mServiceRegistry};
1210 auto& state = ref.get<DeviceState>();
1212 bool firstLoop = true;
1213 O2_SIGNPOST_ID_FROM_POINTER(lid, device, state.loop);
1214 O2_SIGNPOST_START(device, lid, "device_state", "First iteration of the device loop");
1215
1216 bool dplEnableMultithreding = getenv("DPL_THREADPOOL_SIZE") != nullptr;
1217 if (dplEnableMultithreding) {
1218 setenv("UV_THREADPOOL_SIZE", "1", 1);
1219 }
1220
1221 while (state.transitionHandling != TransitionHandlingState::Expired) {
1222 if (state.nextFairMQState.empty() == false) {
1223 (void)this->ChangeState(state.nextFairMQState.back());
1224 state.nextFairMQState.pop_back();
1225 }
1226 // Notify on the main thread the new region callbacks, making sure
1227 // no callback is issued if there is something still processing.
1228 {
1229 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
1230 handleRegionCallbacks(mServiceRegistry, mPendingRegionInfos);
1231 }
1232 // This will block for the correct delay (or until we get data
1233 // on a socket). We also do not block on the first iteration
1234 // so that devices which do not have a timer can still start an
1235 // enumeration.
1236 {
1237 ServiceRegistryRef ref{mServiceRegistry};
1238 ref.get<DriverClient>().flushPending(mServiceRegistry);
1239 DataProcessorContext* lastActive = state.lastActiveDataProcessor.load();
1240 // Reset to zero unless some other DataPorcessorContext completed in the meanwhile.
1241 // In such case we will take care of it at next iteration.
1242 state.lastActiveDataProcessor.compare_exchange_strong(lastActive, nullptr);
1243
1244 auto shouldNotWait = (lastActive != nullptr &&
1245 (state.streaming != StreamingState::Idle) && (state.activeSignals.empty())) ||
1247 if (firstLoop) {
1248 shouldNotWait = true;
1249 firstLoop = false;
1250 }
1251 if (lastActive != nullptr) {
1253 }
1254 if (NewStatePending()) {
1255 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "New state pending. Waiting for it to be handled.");
1256 shouldNotWait = true;
1258 }
1260 // If we are Idle, we can then consider the transition to be expired.
1261 if (state.transitionHandling == TransitionHandlingState::Requested && state.streaming == StreamingState::Idle) {
1262 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "State transition requested and we are now in Idle. We can consider it to be completed.");
1263 state.transitionHandling = TransitionHandlingState::Expired;
1264 }
1265 if (state.severityStack.empty() == false) {
1266 fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back());
1267 state.severityStack.pop_back();
1268 }
1269 // for (auto &info : mDeviceContext.state->inputChannelInfos) {
1270 // shouldNotWait |= info.readPolled;
1271 // }
1272 state.loopReason = DeviceState::NO_REASON;
1273 state.firedTimers.clear();
1274 if ((state.tracingFlags & DeviceState::LoopReason::TRACE_CALLBACKS) != 0) {
1275 state.severityStack.push_back((int)fair::Logger::GetConsoleSeverity());
1276 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
1277 }
1278 // Run the asynchronous queue just before sleeping again, so that:
1279 // - we can trigger further events from the queue
1280 // - we can guarantee this is the last thing we do in the loop (
1281 // assuming no one else is adding to the queue before this point).
1282 auto onDrop = [&registry = mServiceRegistry, lid](TimesliceSlot slot, std::vector<std::vector<fair::mq::MessagePtr>>& dropped, TimesliceIndex::OldestOutputInfo oldestOutputInfo) {
1283 O2_SIGNPOST_START(device, lid, "run_loop", "Dropping message from slot %" PRIu64 ". Forwarding as needed.", (uint64_t)slot.index);
1284 ServiceRegistryRef ref{registry};
1285 ref.get<AsyncQueue>();
1286 ref.get<DecongestionService>();
1287 ref.get<DataRelayer>();
1288 // Get the current timeslice for the slot.
1289 auto& variables = ref.get<TimesliceIndex>().getVariablesForSlot(slot);
1291 forwardInputs(registry, slot, dropped, oldestOutputInfo, false, true);
1292 };
1293 auto& relayer = ref.get<DataRelayer>();
1294 relayer.prunePending(onDrop);
1295 auto& queue = ref.get<AsyncQueue>();
1296 auto oldestPossibleTimeslice = relayer.getOldestPossibleOutput();
1297 AsyncQueueHelpers::run(queue, {oldestPossibleTimeslice.timeslice.value});
1298 if (shouldNotWait == false) {
1299 auto& dpContext = ref.get<DataProcessorContext>();
1300 dpContext.preLoopCallbacks(ref);
1301 }
1302 O2_SIGNPOST_END(device, lid, "run_loop", "Run loop completed. %{}s", shouldNotWait ? "Will immediately schedule a new one" : "Waiting for next event.");
1303 uv_run(state.loop, shouldNotWait ? UV_RUN_NOWAIT : UV_RUN_ONCE);
1304 O2_SIGNPOST_START(device, lid, "run_loop", "Run loop started. Loop reason %d.", state.loopReason);
1305 if ((state.loopReason & state.tracingFlags) != 0) {
1306 state.severityStack.push_back((int)fair::Logger::GetConsoleSeverity());
1307 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
1308 } else if (state.severityStack.empty() == false) {
1309 fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back());
1310 state.severityStack.pop_back();
1311 }
1312 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "Loop reason mask %x & %x = %x", state.loopReason, state.tracingFlags, state.loopReason & state.tracingFlags);
1313
1314 if ((state.loopReason & DeviceState::LoopReason::OOB_ACTIVITY) != 0) {
1315 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "Out of band activity detected. Rescanning everything.");
1316 relayer.rescan();
1317 }
1318
1319 if (!state.pendingOffers.empty()) {
1320 O2_SIGNPOST_EVENT_EMIT(device, lid, "run_loop", "Pending %" PRIu64 " offers. updating the ComputingQuotaEvaluator.", (uint64_t)state.pendingOffers.size());
1321 ref.get<ComputingQuotaEvaluator>().updateOffers(state.pendingOffers, uv_now(state.loop));
1322 }
1323 }
1324
1325 // Notify on the main thread the new region callbacks, making sure
1326 // no callback is issued if there is something still processing.
1327 // Notice that we still need to perform callbacks also after
1328 // the socket epolled, because otherwise we would end up serving
1329 // the callback after the first data arrives is the system is too
1330 // fast to transition from Init to Run.
1331 {
1332 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
1333 handleRegionCallbacks(mServiceRegistry, mPendingRegionInfos);
1334 }
1335
1336 assert(mStreams.size() == mHandles.size());
1338 TaskStreamRef streamRef{-1};
1339 for (size_t ti = 0; ti < mStreams.size(); ti++) {
1340 auto& taskInfo = mStreams[ti];
1341 if (taskInfo.running) {
1342 continue;
1343 }
1344 // Stream 0 is for when we run in
1345 streamRef.index = ti;
1346 }
1347 using o2::monitoring::Metric;
1348 using o2::monitoring::Monitoring;
1349 using o2::monitoring::tags::Key;
1350 using o2::monitoring::tags::Value;
1351 // We have an empty stream, let's check if we have enough
1352 // resources for it to run something
1353 if (streamRef.index != -1) {
1354 // Synchronous execution of the callbacks. This will be moved in the
1355 // moved in the on_socket_polled once we have threading in place.
1356 uv_work_t& handle = mHandles[streamRef.index];
1357 TaskStreamInfo& stream = mStreams[streamRef.index];
1358 handle.data = &mStreams[streamRef.index];
1359
1360 static std::function<void(ComputingQuotaOffer const&, ComputingQuotaStats const& stats)> reportExpiredOffer = [&registry = mServiceRegistry](ComputingQuotaOffer const& offer, ComputingQuotaStats const& stats) {
1361 ServiceRegistryRef ref{registry};
1362 auto& dpStats = ref.get<DataProcessingStats>();
1363 dpStats.updateStats({static_cast<short>(ProcessingStatsId::RESOURCE_OFFER_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredOffers});
1364 dpStats.updateStats({static_cast<short>(ProcessingStatsId::ARROW_BYTES_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredBytes});
1365 dpStats.updateStats({static_cast<short>(ProcessingStatsId::TIMESLICE_NUMBER_EXPIRED), DataProcessingStats::Op::Set, stats.totalExpiredTimeslices});
1366 dpStats.processCommandQueue();
1367 };
1368 auto ref = ServiceRegistryRef{mServiceRegistry};
1369
1370 // Deciding wether to run or not can be done by passing a request to
1371 // the evaluator. In this case, the request is always satisfied and
1372 // we run on whatever resource is available.
1373 auto& spec = ref.get<DeviceSpec const>();
1374 bool enough = ref.get<ComputingQuotaEvaluator>().selectOffer(streamRef.index, spec.resourcePolicy.request, uv_now(state.loop));
1375
1376 struct SchedulingStats {
1377 std::atomic<size_t> lastScheduled = 0;
1378 std::atomic<size_t> numberOfUnscheduledSinceLastScheduled = 0;
1379 std::atomic<size_t> numberOfUnscheduled = 0;
1380 std::atomic<size_t> numberOfScheduled = 0;
1381 std::atomic<size_t> nextWarnAt = 1;
1382 };
1383 static SchedulingStats schedulingStats;
1384 O2_SIGNPOST_ID_GENERATE(sid, scheduling);
1385 if (enough) {
1386 stream.id = streamRef;
1387 stream.running = true;
1388 stream.registry = &mServiceRegistry;
1389 schedulingStats.lastScheduled = uv_now(state.loop);
1390 schedulingStats.numberOfScheduled++;
1391 schedulingStats.numberOfUnscheduledSinceLastScheduled = 0;
1392 schedulingStats.nextWarnAt = 1;
1393 O2_SIGNPOST_EVENT_EMIT(scheduling, sid, "Run", "Enough resources to schedule computation on stream %d", streamRef.index);
1394 if (dplEnableMultithreding) [[unlikely]] {
1395 stream.task = &handle;
1396 uv_queue_work(state.loop, stream.task, run_callback, run_completion);
1397 } else {
1398 run_callback(&handle);
1399 run_completion(&handle, 0);
1400 }
1401 } else {
1402 if (schedulingStats.numberOfUnscheduledSinceLastScheduled >= schedulingStats.nextWarnAt) {
1403 O2_SIGNPOST_EVENT_EMIT_WARN(scheduling, sid, "Run",
1404 "Not enough resources to schedule computation. %zu skipped so far. Last scheduled at %zu. Data is not lost and it will be scheduled again.",
1405 schedulingStats.numberOfUnscheduledSinceLastScheduled.load(),
1406 schedulingStats.lastScheduled.load());
1407 schedulingStats.nextWarnAt = schedulingStats.nextWarnAt * 2;
1408 } else {
1409 O2_SIGNPOST_EVENT_EMIT(scheduling, sid, "Run",
1410 "Not enough resources to schedule computation. %zu skipped so far. Last scheduled at %zu. Data is not lost and it will be scheduled again.",
1411 schedulingStats.numberOfUnscheduledSinceLastScheduled.load(),
1412 schedulingStats.lastScheduled.load());
1413 }
1414 schedulingStats.numberOfUnscheduled++;
1415 schedulingStats.numberOfUnscheduledSinceLastScheduled++;
1416 auto ref = ServiceRegistryRef{mServiceRegistry};
1417 ref.get<ComputingQuotaEvaluator>().handleExpired(reportExpiredOffer);
1418 }
1419 }
1420 }
1421
1422 O2_SIGNPOST_END(device, lid, "run_loop", "Run loop completed. Transition handling state %d.", (int)state.transitionHandling);
1423 auto& spec = ref.get<DeviceSpec const>();
1425 for (size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
1426 auto& info = state.inputChannelInfos[ci];
1427 info.parts.fParts.clear();
1428 }
1429 state.transitionHandling = TransitionHandlingState::NoTransition;
1430}
1431
1435{
1436 auto& context = ref.get<DataProcessorContext>();
1437 O2_SIGNPOST_ID_FROM_POINTER(dpid, device, &context);
1438 O2_SIGNPOST_START(device, dpid, "do_prepare", "Starting DataProcessorContext::doPrepare.");
1439
1440 {
1441 ref.get<CallbackService>().call<CallbackService::Id::ClockTick>();
1442 }
1443 // Whether or not we had something to do.
1444
1445 // Initialise the value for context.allDone. It will possibly be updated
1446 // below if any of the channels is not done.
1447 //
1448 // Notice that fake input channels (InputChannelState::Pull) cannot possibly
1449 // expect to receive an EndOfStream signal. Thus we do not wait for these
1450 // to be completed. In the case of data source devices, as they do not have
1451 // real data input channels, they have to signal EndOfStream themselves.
1452 auto& state = ref.get<DeviceState>();
1453 auto& spec = ref.get<DeviceSpec const>();
1454 O2_SIGNPOST_ID_FROM_POINTER(cid, device, state.inputChannelInfos.data());
1455 O2_SIGNPOST_START(device, cid, "do_prepare", "Reported channel states.");
1456 context.allDone = std::any_of(state.inputChannelInfos.begin(), state.inputChannelInfos.end(), [cid](const auto& info) {
1457 if (info.channel) {
1458 O2_SIGNPOST_EVENT_EMIT(device, cid, "do_prepare", "Input channel %{public}s%{public}s has %zu parts left and is in state %d.",
1459 info.channel->GetName().c_str(), (info.id.value == ChannelIndex::INVALID ? " (non DPL)" : ""), info.parts.fParts.size(), (int)info.state);
1460 } else {
1461 O2_SIGNPOST_EVENT_EMIT(device, cid, "do_prepare", "External channel %d is in state %d.", info.id.value, (int)info.state);
1462 }
1463 return (info.parts.fParts.empty() == true && info.state != InputChannelState::Pull);
1464 });
1465 O2_SIGNPOST_END(device, cid, "do_prepare", "End report.");
1466 O2_SIGNPOST_EVENT_EMIT(device, dpid, "do_prepare", "Processing %zu input channels.", spec.inputChannels.size());
1469 static std::vector<int> pollOrder;
1470 pollOrder.resize(state.inputChannelInfos.size());
1471 std::iota(pollOrder.begin(), pollOrder.end(), 0);
1472 std::sort(pollOrder.begin(), pollOrder.end(), [&infos = state.inputChannelInfos](int a, int b) {
1473 return infos[a].oldestForChannel.value < infos[b].oldestForChannel.value;
1474 });
1475
1476 // Nothing to poll...
1477 if (pollOrder.empty()) {
1478 O2_SIGNPOST_END(device, dpid, "do_prepare", "Nothing to poll. Waiting for next iteration.");
1479 return;
1480 }
1481 auto currentOldest = state.inputChannelInfos[pollOrder.front()].oldestForChannel;
1482 auto currentNewest = state.inputChannelInfos[pollOrder.back()].oldestForChannel;
1483 auto delta = currentNewest.value - currentOldest.value;
1484 O2_SIGNPOST_EVENT_EMIT(device, dpid, "do_prepare", "Oldest possible timeframe range %" PRIu64 " => %" PRIu64 " delta %" PRIu64,
1485 (int64_t)currentOldest.value, (int64_t)currentNewest.value, (int64_t)delta);
1486 auto& infos = state.inputChannelInfos;
1487
1488 if (context.balancingInputs) {
1489 static int pipelineLength = DefaultsHelpers::pipelineLength(*ref.get<RawDeviceService>().device()->fConfig);
1490 static uint64_t ahead = getenv("DPL_MAX_CHANNEL_AHEAD") ? std::atoll(getenv("DPL_MAX_CHANNEL_AHEAD")) : std::max(8, std::min(pipelineLength - 48, pipelineLength / 2));
1491 auto newEnd = std::remove_if(pollOrder.begin(), pollOrder.end(), [&infos, limitNew = currentOldest.value + ahead](int a) -> bool {
1492 return infos[a].oldestForChannel.value > limitNew;
1493 });
1494 for (auto it = pollOrder.begin(); it < pollOrder.end(); it++) {
1495 const auto& channelInfo = state.inputChannelInfos[*it];
1496 if (channelInfo.pollerIndex != -1) {
1497 auto& poller = state.activeInputPollers[channelInfo.pollerIndex];
1498 auto& pollerContext = *(PollerContext*)(poller->data);
1499 if (pollerContext.pollerState == PollerContext::PollerState::Connected || pollerContext.pollerState == PollerContext::PollerState::Suspended) {
1500 bool running = pollerContext.pollerState == PollerContext::PollerState::Connected;
1501 bool shouldBeRunning = it < newEnd;
1502 if (running != shouldBeRunning) {
1503 uv_poll_start(poller, shouldBeRunning ? UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED : 0, &on_socket_polled);
1504 pollerContext.pollerState = shouldBeRunning ? PollerContext::PollerState::Connected : PollerContext::PollerState::Suspended;
1505 }
1506 }
1507 }
1508 }
1509 pollOrder.erase(newEnd, pollOrder.end());
1510 }
1511 O2_SIGNPOST_END(device, dpid, "do_prepare", "%zu channels pass the channel inbalance balance check.", pollOrder.size());
1512
1513 for (auto sci : pollOrder) {
1514 auto& info = state.inputChannelInfos[sci];
1515 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &info);
1516 O2_SIGNPOST_START(device, cid, "channels", "Processing channel %s", info.channel->GetName().c_str());
1517
1518 if (info.state != InputChannelState::Completed && info.state != InputChannelState::Pull) {
1519 context.allDone = false;
1520 }
1521 if (info.state != InputChannelState::Running) {
1522 // Remember to flush data if we are not running
1523 // and there is some message pending.
1524 if (info.parts.Size()) {
1526 }
1527 O2_SIGNPOST_END(device, cid, "channels", "Flushing channel %s which is in state %d and has %zu parts still pending.",
1528 info.channel->GetName().c_str(), (int)info.state, info.parts.Size());
1529 continue;
1530 }
1531 if (info.channel == nullptr) {
1532 O2_SIGNPOST_END(device, cid, "channels", "Channel %s which is in state %d is nullptr and has %zu parts still pending.",
1533 info.channel->GetName().c_str(), (int)info.state, info.parts.Size());
1534 continue;
1535 }
1536 // Only poll DPL channels for now.
1538 O2_SIGNPOST_END(device, cid, "channels", "Channel %s which is in state %d is not a DPL channel and has %zu parts still pending.",
1539 info.channel->GetName().c_str(), (int)info.state, info.parts.Size());
1540 continue;
1541 }
1542 auto& socket = info.channel->GetSocket();
1543 // If we have pending events from a previous iteration,
1544 // we do receive in any case.
1545 // Otherwise we check if there is any pending event and skip
1546 // this channel in case there is none.
1547 if (info.hasPendingEvents == 0) {
1548 socket.Events(&info.hasPendingEvents);
1549 // If we do not read, we can continue.
1550 if ((info.hasPendingEvents & 1) == 0 && (info.parts.Size() == 0)) {
1551 O2_SIGNPOST_END(device, cid, "channels", "No pending events and no remaining parts to process for channel %{public}s", info.channel->GetName().c_str());
1552 continue;
1553 }
1554 }
1555 // We can reset this, because it means we have seen at least 1
1556 // message after the UV_READABLE was raised.
1557 info.readPolled = false;
1558 // Notice that there seems to be a difference between the documentation
1559 // of zeromq and the observed behavior. The fact that ZMQ_POLLIN
1560 // is raised does not mean that a message is immediately available to
1561 // read, just that it will be available soon, so the receive can
1562 // still return -2. To avoid this we keep receiving on the socket until
1563 // we get a message. In order not to overflow the DPL queue we process
1564 // one message at the time and we keep track of wether there were more
1565 // to process.
1566 bool newMessages = false;
1567 while (true) {
1568 O2_SIGNPOST_EVENT_EMIT(device, cid, "channels", "Receiving loop called for channel %{public}s (%d) with oldest possible timeslice %zu",
1569 info.channel->GetName().c_str(), info.id.value, info.oldestForChannel.value);
1570 if (info.parts.Size() < 64) {
1571 fair::mq::Parts parts;
1572 info.channel->Receive(parts, 0);
1573 if (parts.Size()) {
1574 O2_SIGNPOST_EVENT_EMIT(device, cid, "channels", "Received %zu parts from channel %{public}s (%d).", parts.Size(), info.channel->GetName().c_str(), info.id.value);
1575 }
1576 for (auto&& part : parts) {
1577 info.parts.fParts.emplace_back(std::move(part));
1578 }
1579 newMessages |= true;
1580 }
1581
1582 if (info.parts.Size() >= 0) {
1584 // Receiving data counts as activity now, so that
1585 // We can make sure we process all the pending
1586 // messages without hanging on the uv_run.
1587 break;
1588 }
1589 }
1590 // We check once again for pending events, keeping track if this was the
1591 // case so that we can immediately repeat this loop and avoid remaining
1592 // stuck in uv_run. This is because we will not get notified on the socket
1593 // if more events are pending due to zeromq level triggered approach.
1594 socket.Events(&info.hasPendingEvents);
1595 if (info.hasPendingEvents) {
1596 info.readPolled = false;
1597 // In case there were messages, we consider it as activity
1598 if (newMessages) {
1599 state.lastActiveDataProcessor.store(&context);
1600 }
1601 }
1602 O2_SIGNPOST_END(device, cid, "channels", "Done processing channel %{public}s (%d).",
1603 info.channel->GetName().c_str(), info.id.value);
1604 }
1605}
1606
1608{
1609 auto& context = ref.get<DataProcessorContext>();
1610 O2_SIGNPOST_ID_FROM_POINTER(dpid, device, &context);
1611 auto& state = ref.get<DeviceState>();
1612 auto& spec = ref.get<DeviceSpec const>();
1613
1614 if (state.streaming == StreamingState::Idle) {
1615 return;
1616 }
1617
1618 context.completed.clear();
1619 context.completed.reserve(16);
1620 if (DataProcessingDevice::tryDispatchComputation(ref, context.completed)) {
1621 state.lastActiveDataProcessor.store(&context);
1622 }
1623 DanglingContext danglingContext{*context.registry};
1624
1625 context.preDanglingCallbacks(danglingContext);
1626 if (state.lastActiveDataProcessor.load() == nullptr) {
1627 ref.get<CallbackService>().call<CallbackService::Id::Idle>();
1628 }
1629 auto activity = ref.get<DataRelayer>().processDanglingInputs(context.expirationHandlers, *context.registry, true);
1630 if (activity.expiredSlots > 0) {
1631 state.lastActiveDataProcessor = &context;
1632 }
1633
1634 context.completed.clear();
1635 if (DataProcessingDevice::tryDispatchComputation(ref, context.completed)) {
1636 state.lastActiveDataProcessor = &context;
1637 }
1638
1639 context.postDanglingCallbacks(danglingContext);
1640
1641 // If we got notified that all the sources are done, we call the EndOfStream
1642 // callback and return false. Notice that what happens next is actually
1643 // dependent on the callback, not something which is controlled by the
1644 // framework itself.
1645 if (context.allDone == true && state.streaming == StreamingState::Streaming) {
1647 state.lastActiveDataProcessor = &context;
1648 }
1649
1650 if (state.streaming == StreamingState::EndOfStreaming) {
1651 O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "We are in EndOfStreaming. Flushing queues.");
1652 // We keep processing data until we are Idle.
1653 // FIXME: not sure this is the correct way to drain the queues, but
1654 // I guess we will see.
1657 auto& relayer = ref.get<DataRelayer>();
1658
1659 bool shouldProcess = DataProcessingHelpers::hasOnlyGenerated(spec) == false;
1660
1661 while (DataProcessingDevice::tryDispatchComputation(ref, context.completed) && shouldProcess) {
1662 relayer.processDanglingInputs(context.expirationHandlers, *context.registry, false);
1663 }
1664
1665 auto& timingInfo = ref.get<TimingInfo>();
1666 // We should keep the data generated at end of stream only for those
1667 // which are not sources.
1668 timingInfo.keepAtEndOfStream = shouldProcess;
1669 // Fill timinginfo with some reasonable values for data sent with endOfStream
1670 timingInfo.timeslice = relayer.getOldestPossibleOutput().timeslice.value;
1671 timingInfo.tfCounter = -1;
1672 timingInfo.firstTForbit = -1;
1673 // timingInfo.runNumber = ; // Not sure where to get this if not already set
1674 timingInfo.creation = std::chrono::time_point_cast<std::chrono::milliseconds>(std::chrono::system_clock::now()).time_since_epoch().count();
1675 O2_SIGNPOST_EVENT_EMIT(calibration, dpid, "calibration", "TimingInfo.keepAtEndOfStream %d", timingInfo.keepAtEndOfStream);
1676
1677 EndOfStreamContext eosContext{*context.registry, ref.get<DataAllocator>()};
1678
1679 context.preEOSCallbacks(eosContext);
1680 auto& streamContext = ref.get<StreamContext>();
1681 streamContext.preEOSCallbacks(eosContext);
1682 ref.get<CallbackService>().call<CallbackService::Id::EndOfStream>(eosContext);
1683 streamContext.postEOSCallbacks(eosContext);
1684 context.postEOSCallbacks(eosContext);
1685
1686 for (auto& channel : spec.outputChannels) {
1687 O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Sending end of stream to %{public}s.", channel.name.c_str());
1689 }
1690 // This is needed because the transport is deleted before the device.
1691 relayer.clear();
1693 // In case we should process, note the data processor responsible for it
1694 if (shouldProcess) {
1695 state.lastActiveDataProcessor = &context;
1696 }
1697 // On end of stream we shut down all output pollers.
1698 O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Shutting down output pollers.");
1699 for (auto& poller : state.activeOutputPollers) {
1700 uv_poll_stop(poller);
1701 }
1702 return;
1703 }
1704
1705 if (state.streaming == StreamingState::Idle) {
1706 // On end of stream we shut down all output pollers.
1707 O2_SIGNPOST_EVENT_EMIT(device, dpid, "state", "Shutting down output pollers.");
1708 for (auto& poller : state.activeOutputPollers) {
1709 uv_poll_stop(poller);
1710 }
1711 }
1712
1713 return;
1714}
1715
1717{
1718 ServiceRegistryRef ref{mServiceRegistry};
1719 ref.get<DataRelayer>().clear();
1720 auto& deviceContext = ref.get<DeviceContext>();
1721 // If the signal handler is there, we should
1722 // hide the registry from it, so that we do not
1723 // end up calling the signal handler on something
1724 // which might not be there anymore.
1725 if (deviceContext.sigusr1Handle) {
1726 deviceContext.sigusr1Handle->data = nullptr;
1727 }
1728 // Makes sure we do not have a working context on
1729 // shutdown.
1730 for (auto& handle : ref.get<DeviceState>().activeSignals) {
1731 handle->data = nullptr;
1732 }
1733}
1734
1737 {
1738 }
1739};
1740
1741auto forwardOnInsertion(ServiceRegistryRef& ref, std::span<fair::mq::MessagePtr>& messages) -> void
1742{
1743 O2_SIGNPOST_ID_GENERATE(sid, forwarding);
1744
1745 auto& spec = ref.get<DeviceSpec const>();
1746 auto& context = ref.get<DataProcessorContext>();
1747 if (context.forwardPolicy == ForwardPolicy::AfterProcessing || spec.forwards.empty()) {
1748 O2_SIGNPOST_EVENT_EMIT(device, sid, "device", "Early forwardinding not enabled / needed.");
1749 return;
1750 }
1751
1752 O2_SIGNPOST_EVENT_EMIT(device, sid, "device", "Early forwardinding before injecting data into relayer.");
1753 auto& timesliceIndex = ref.get<TimesliceIndex>();
1754 auto oldestTimeslice = timesliceIndex.getOldestPossibleOutput();
1755
1756 auto& proxy = ref.get<FairMQDeviceProxy>();
1757
1758 O2_SIGNPOST_START(forwarding, sid, "forwardInputs",
1759 "Starting forwarding for incoming messages with oldestTimeslice %zu with copy",
1760 oldestTimeslice.timeslice.value);
1761 std::vector<fair::mq::Parts> forwardedParts(proxy.getNumForwardChannels());
1762 DataProcessingHelpers::routeForwardedMessages(proxy, messages, forwardedParts, true, false);
1763
1764 for (int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
1765 if (forwardedParts[fi].Size() == 0) {
1766 continue;
1767 }
1768 ForwardChannelInfo info = proxy.getForwardChannelInfo(ChannelIndex{fi});
1769 auto& parts = forwardedParts[fi];
1770 if (info.policy == nullptr) {
1771 O2_SIGNPOST_EVENT_EMIT_ERROR(forwarding, sid, "forwardInputs", "Forwarding to %{public}s %d has no policy.", info.name.c_str(), fi);
1772 continue;
1773 }
1774 O2_SIGNPOST_EVENT_EMIT(forwarding, sid, "forwardInputs", "Forwarding to %{public}s %d", info.name.c_str(), fi);
1775 info.policy->forward(parts, ChannelIndex{fi}, ref);
1776 }
1777 auto& asyncQueue = ref.get<AsyncQueue>();
1778 auto& decongestion = ref.get<DecongestionService>();
1779 O2_SIGNPOST_ID_GENERATE(aid, async_queue);
1780 O2_SIGNPOST_EVENT_EMIT(async_queue, aid, "forwardInputs", "Queuing forwarding oldestPossible %zu", oldestTimeslice.timeslice.value);
1781 AsyncQueueHelpers::post(asyncQueue, AsyncTask{.timeslice = oldestTimeslice.timeslice, .id = decongestion.oldestPossibleTimesliceTask, .debounce = -1, .callback = decongestionCallbackLate}
1782 .user<DecongestionContext>({.ref = ref, .oldestTimeslice = oldestTimeslice}));
1783 O2_SIGNPOST_END(forwarding, sid, "forwardInputs", "Forwarding done");
1784};
1785
1791{
1792 using InputInfo = DataRelayer::InputInfo;
1794
1795 auto& context = ref.get<DataProcessorContext>();
1796 // This is the same id as the upper level function, so we get the events
1797 // associated with the same interval. We will simply use "handle_data" as
1798 // the category.
1799 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &info);
1800
1801 // This is how we validate inputs. I.e. we try to enforce the O2 Data model
1802 // and we do a few stats. We bind parts as a lambda captured variable, rather
1803 // than an input, because we do not want the outer loop actually be exposed
1804 // to the implementation details of the messaging layer.
1805 auto getInputTypes = [&info, &context]() -> std::optional<std::vector<InputInfo>> {
1806 O2_SIGNPOST_ID_FROM_POINTER(cid, device, &info);
1807 auto ref = ServiceRegistryRef{*context.registry};
1808 auto& stats = ref.get<DataProcessingStats>();
1809 auto& state = ref.get<DeviceState>();
1810 auto& parts = info.parts;
1811 stats.updateStats({(int)ProcessingStatsId::TOTAL_INPUTS, DataProcessingStats::Op::Set, (int64_t)parts.Size()});
1812
1813 std::vector<InputInfo> results;
1814 // we can reserve the upper limit
1815 results.reserve(parts.Size() / 2);
1816 size_t nTotalPayloads = 0;
1817
1818 auto insertInputInfo = [&results, &nTotalPayloads](size_t position, size_t length, InputType type, ChannelIndex index) {
1819 results.emplace_back(position, length, type, index);
1820 if (type != InputType::Invalid && length > 1) {
1821 nTotalPayloads += length - 1;
1822 }
1823 };
1824
1825 for (size_t pi = 0; pi < parts.Size(); pi += 2) {
1826 auto* headerData = parts.At(pi)->GetData();
1827 auto sih = o2::header::get<SourceInfoHeader*>(headerData);
1828 auto dh = o2::header::get<DataHeader*>(headerData);
1829 if (sih) {
1830 O2_SIGNPOST_EVENT_EMIT(device, cid, "handle_data", "Got SourceInfoHeader with state %d", (int)sih->state);
1831 info.state = sih->state;
1832 insertInputInfo(pi, 2, InputType::SourceInfo, info.id);
1833 state.lastActiveDataProcessor = &context;
1834 if (dh) {
1835 LOGP(error, "Found data attached to a SourceInfoHeader");
1836 }
1837 continue;
1838 }
1839 auto dih = o2::header::get<DomainInfoHeader*>(headerData);
1840 if (dih) {
1841 O2_SIGNPOST_EVENT_EMIT(device, cid, "handle_data", "Got DomainInfoHeader with oldestPossibleTimeslice %d", (int)dih->oldestPossibleTimeslice);
1842 insertInputInfo(pi, 2, InputType::DomainInfo, info.id);
1843 state.lastActiveDataProcessor = &context;
1844 if (dh) {
1845 LOGP(error, "Found data attached to a DomainInfoHeader");
1846 }
1847 continue;
1848 }
1849 if (!dh) {
1850 insertInputInfo(pi, 0, InputType::Invalid, info.id);
1851 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "Header is not a DataHeader?");
1852 continue;
1853 }
1854 if (dh->payloadSize > parts.At(pi + 1)->GetSize()) {
1855 insertInputInfo(pi, 0, InputType::Invalid, info.id);
1856 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "DataHeader payloadSize mismatch");
1857 continue;
1858 }
1859 auto dph = o2::header::get<DataProcessingHeader*>(headerData);
1860 // We only deal with the tracking of parts if the log is enabled.
1861 // This is because in principle we should track the size of each of
1862 // the parts and sum it up. Not for now.
1863 O2_SIGNPOST_ID_FROM_POINTER(pid, parts, headerData);
1864 O2_SIGNPOST_START(parts, pid, "parts", "Processing DataHeader %{public}-4s/%{public}-16s/%d with splitPayloadParts %d and splitPayloadIndex %d",
1865 dh->dataOrigin.str, dh->dataDescription.str, dh->subSpecification, dh->splitPayloadParts, dh->splitPayloadIndex);
1866 if (!dph) {
1867 insertInputInfo(pi, 2, InputType::Invalid, info.id);
1868 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "Header stack does not contain DataProcessingHeader");
1869 continue;
1870 }
1871 if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) {
1872 // this is indicating a sequence of payloads following the header
1873 // FIXME: we will probably also set the DataHeader version
1874 insertInputInfo(pi, dh->splitPayloadParts + 1, InputType::Data, info.id);
1875 pi += dh->splitPayloadParts - 1;
1876 } else {
1877 // We can set the type for the next splitPayloadParts
1878 // because we are guaranteed they are all the same.
1879 // If splitPayloadParts = 0, we assume that means there is only one (header, payload)
1880 // pair.
1881 size_t finalSplitPayloadIndex = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2;
1882 if (finalSplitPayloadIndex > parts.Size()) {
1883 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "DataHeader::splitPayloadParts invalid");
1884 insertInputInfo(pi, 0, InputType::Invalid, info.id);
1885 continue;
1886 }
1887 insertInputInfo(pi, 2, InputType::Data, info.id);
1888 for (; pi + 2 < finalSplitPayloadIndex; pi += 2) {
1889 insertInputInfo(pi + 2, 2, InputType::Data, info.id);
1890 }
1891 }
1892 }
1893 if (results.size() + nTotalPayloads != parts.Size()) {
1894 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid, "handle_data", "inconsistent number of inputs extracted. %zu vs parts (%zu)", results.size() + nTotalPayloads, parts.Size());
1895 return std::nullopt;
1896 }
1897 return results;
1898 };
1899
1900 auto reportError = [ref](const char* message) {
1901 auto& stats = ref.get<DataProcessingStats>();
1903 };
1904
1905 auto handleValidMessages = [&info, ref, &reportError, &context](std::vector<InputInfo> const& inputInfos) {
1906 auto& relayer = ref.get<DataRelayer>();
1907 auto& state = ref.get<DeviceState>();
1908 static WaitBackpressurePolicy policy;
1909 auto& parts = info.parts;
1910 // We relay execution to make sure we have a complete set of parts
1911 // available.
1912 bool hasBackpressure = false;
1913 size_t minBackpressureTimeslice = -1;
1914 bool hasData = false;
1915 size_t oldestPossibleTimeslice = -1;
1916 static std::vector<int> ordering;
1917 // Same as inputInfos but with iota.
1918 ordering.resize(inputInfos.size());
1919 std::iota(ordering.begin(), ordering.end(), 0);
1920 // stable sort orderings by type and position
1921 std::stable_sort(ordering.begin(), ordering.end(), [&inputInfos](int const& a, int const& b) {
1922 auto const& ai = inputInfos[a];
1923 auto const& bi = inputInfos[b];
1924 if (ai.type != bi.type) {
1925 return ai.type < bi.type;
1926 }
1927 return ai.position < bi.position;
1928 });
1929 for (size_t ii = 0; ii < inputInfos.size(); ++ii) {
1930 auto const& input = inputInfos[ordering[ii]];
1931 switch (input.type) {
1932 case InputType::Data: {
1933 hasData = true;
1934 auto headerIndex = input.position;
1935 auto nMessages = 0;
1936 auto nPayloadsPerHeader = 0;
1937 if (input.size > 2) {
1938 // header and multiple payload sequence
1939 nMessages = input.size;
1940 nPayloadsPerHeader = nMessages - 1;
1941 } else {
1942 // multiple header-payload pairs
1943 auto dh = o2::header::get<DataHeader*>(parts.At(headerIndex)->GetData());
1944 nMessages = dh->splitPayloadParts > 0 ? dh->splitPayloadParts * 2 : 2;
1945 nPayloadsPerHeader = 1;
1946 ii += (nMessages / 2) - 1;
1947 }
1948 auto onDrop = [ref](TimesliceSlot slot, std::vector<std::vector<fair::mq::MessagePtr>>& dropped, TimesliceIndex::OldestOutputInfo oldestOutputInfo) {
1949 O2_SIGNPOST_ID_GENERATE(cid, async_queue);
1950 O2_SIGNPOST_EVENT_EMIT(async_queue, cid, "onDrop", "Dropping message from slot %zu. Forwarding as needed. Timeslice %zu",
1951 slot.index, oldestOutputInfo.timeslice.value);
1952 ref.get<AsyncQueue>();
1953 ref.get<DecongestionService>();
1954 ref.get<DataRelayer>();
1955 // Get the current timeslice for the slot.
1956 auto& variables = ref.get<TimesliceIndex>().getVariablesForSlot(slot);
1958 forwardInputs(ref, slot, dropped, oldestOutputInfo, false, true);
1959 };
1960
1961 auto relayed = relayer.relay(parts.At(headerIndex)->GetData(),
1962 &parts.At(headerIndex),
1963 input,
1964 nMessages,
1965 nPayloadsPerHeader,
1966 context.forwardPolicy == ForwardPolicy::AtInjection ? forwardOnInsertion : nullptr,
1967 onDrop);
1968 switch (relayed.type) {
1970 if (info.normalOpsNotified == true && info.backpressureNotified == false) {
1971 LOGP(alarm, "Backpressure on channel {}. Waiting.", info.channel->GetName());
1972 auto& monitoring = ref.get<o2::monitoring::Monitoring>();
1973 monitoring.send(o2::monitoring::Metric{1, fmt::format("backpressure_{}", info.channel->GetName())});
1974 info.backpressureNotified = true;
1975 info.normalOpsNotified = false;
1976 }
1977 policy.backpressure(info);
1978 hasBackpressure = true;
1979 minBackpressureTimeslice = std::min<size_t>(minBackpressureTimeslice, relayed.timeslice.value);
1980 break;
1984 if (info.normalOpsNotified == false && info.backpressureNotified == true) {
1985 LOGP(info, "Back to normal on channel {}.", info.channel->GetName());
1986 auto& monitoring = ref.get<o2::monitoring::Monitoring>();
1987 monitoring.send(o2::monitoring::Metric{0, fmt::format("backpressure_{}", info.channel->GetName())});
1988 info.normalOpsNotified = true;
1989 info.backpressureNotified = false;
1990 }
1991 break;
1992 }
1993 } break;
1994 case InputType::SourceInfo: {
1995 LOGP(detail, "Received SourceInfo");
1996 auto& context = ref.get<DataProcessorContext>();
1997 state.lastActiveDataProcessor = &context;
1998 auto headerIndex = input.position;
1999 auto payloadIndex = input.position + 1;
2000 assert(payloadIndex < parts.Size());
2001 // FIXME: the message with the end of stream cannot contain
2002 // split parts.
2003 parts.At(headerIndex).reset(nullptr);
2004 parts.At(payloadIndex).reset(nullptr);
2005 // for (size_t i = 0; i < dh->splitPayloadParts > 0 ? dh->splitPayloadParts * 2 - 1 : 1; ++i) {
2006 // parts.At(headerIndex + 1 + i).reset(nullptr);
2007 // }
2008 // pi += dh->splitPayloadParts > 0 ? dh->splitPayloadParts - 1 : 0;
2009
2010 } break;
2011 case InputType::DomainInfo: {
2014 auto& context = ref.get<DataProcessorContext>();
2015 state.lastActiveDataProcessor = &context;
2016 auto headerIndex = input.position;
2017 auto payloadIndex = input.position + 1;
2018 assert(payloadIndex < parts.Size());
2019 // FIXME: the message with the end of stream cannot contain
2020 // split parts.
2021
2022 auto dih = o2::header::get<DomainInfoHeader*>(parts.At(headerIndex)->GetData());
2023 if (hasBackpressure && dih->oldestPossibleTimeslice >= minBackpressureTimeslice) {
2024 break;
2025 }
2026 oldestPossibleTimeslice = std::min(oldestPossibleTimeslice, dih->oldestPossibleTimeslice);
2027 LOGP(debug, "Got DomainInfoHeader, new oldestPossibleTimeslice {} on channel {}", oldestPossibleTimeslice, info.id.value);
2028 parts.At(headerIndex).reset(nullptr);
2029 parts.At(payloadIndex).reset(nullptr);
2030 } break;
2031 case InputType::Invalid: {
2032 reportError("Invalid part found.");
2033 } break;
2034 }
2035 }
2038 if (oldestPossibleTimeslice != (size_t)-1) {
2039 info.oldestForChannel = {oldestPossibleTimeslice};
2040 auto& context = ref.get<DataProcessorContext>();
2041 context.domainInfoUpdatedCallback(*context.registry, oldestPossibleTimeslice, info.id);
2042 ref.get<CallbackService>().call<CallbackService::Id::DomainInfoUpdated>((ServiceRegistryRef)*context.registry, (size_t)oldestPossibleTimeslice, (ChannelIndex)info.id);
2043 state.lastActiveDataProcessor = &context;
2044 }
2045 auto it = std::remove_if(parts.fParts.begin(), parts.fParts.end(), [](auto& msg) -> bool { return msg.get() == nullptr; });
2046 parts.fParts.erase(it, parts.end());
2047 if (parts.fParts.size()) {
2048 LOG(debug) << parts.fParts.size() << " messages backpressured";
2049 }
2050 };
2051
2052 // Second part. This is the actual outer loop we want to obtain, with
2053 // implementation details which can be read. Notice how most of the state
2054 // is actually hidden. For example we do not expose what "input" is. This
2055 // will allow us to keep the same toplevel logic even if the actual meaning
2056 // of input is changed (for example we might move away from multipart
2057 // messages). Notice also that we need to act diffently depending on the
2058 // actual CompletionOp we want to perform. In particular forwarding inputs
2059 // also gets rid of them from the cache.
2060 auto inputTypes = getInputTypes();
2061 if (bool(inputTypes) == false) {
2062 reportError("Parts should come in couples. Dropping it.");
2063 return;
2064 }
2065 handleValidMessages(*inputTypes);
2066 return;
2067}
2068
2069namespace
2070{
2071struct InputLatency {
2072 uint64_t minLatency = std::numeric_limits<uint64_t>::max();
2073 uint64_t maxLatency = std::numeric_limits<uint64_t>::min();
2074};
2075
2076auto calculateInputRecordLatency(InputRecord const& record, uint64_t currentTime) -> InputLatency
2077{
2078 InputLatency result;
2079
2080 for (auto& item : record) {
2081 auto* header = o2::header::get<DataProcessingHeader*>(item.header);
2082 if (header == nullptr) {
2083 continue;
2084 }
2085 int64_t partLatency = (0x7fffffffffffffff & currentTime) - (0x7fffffffffffffff & header->creation);
2086 if (partLatency < 0) {
2087 partLatency = 0;
2088 }
2089 result.minLatency = std::min(result.minLatency, (uint64_t)partLatency);
2090 result.maxLatency = std::max(result.maxLatency, (uint64_t)partLatency);
2091 }
2092 return result;
2093};
2094
2095auto calculateTotalInputRecordSize(InputRecord const& record) -> int
2096{
2097 size_t totalInputSize = 0;
2098 for (auto& item : record) {
2099 auto* header = o2::header::get<DataHeader*>(item.header);
2100 if (header == nullptr) {
2101 continue;
2102 }
2103 totalInputSize += header->payloadSize;
2104 }
2105 return totalInputSize;
2106};
2107
2108template <typename T>
2109void update_maximum(std::atomic<T>& maximum_value, T const& value) noexcept
2110{
2111 T prev_value = maximum_value;
2112 while (prev_value < value &&
2113 !maximum_value.compare_exchange_weak(prev_value, value)) {
2114 }
2115}
2116} // namespace
2117
2118bool DataProcessingDevice::tryDispatchComputation(ServiceRegistryRef ref, std::vector<DataRelayer::RecordAction>& completed)
2119{
2120 auto& context = ref.get<DataProcessorContext>();
2121 LOGP(debug, "DataProcessingDevice::tryDispatchComputation");
2122 // This is the actual hidden state for the outer loop. In case we decide we
2123 // want to support multithreaded dispatching of operations, I can simply
2124 // move these to some thread local store and the rest of the lambdas
2125 // should work just fine.
2126 std::vector<std::vector<fair::mq::MessagePtr>> currentSetOfInputs;
2127
2128 //
2129 auto getInputSpan = [ref, &currentSetOfInputs](TimesliceSlot slot, bool consume = true) {
2130 auto& relayer = ref.get<DataRelayer>();
2131 if (consume) {
2132 currentSetOfInputs = relayer.consumeAllInputsForTimeslice(slot);
2133 } else {
2134 currentSetOfInputs = relayer.consumeExistingInputsForTimeslice(slot);
2135 }
2136 auto getter = [&currentSetOfInputs](size_t i, size_t partindex) -> DataRef {
2137 if ((currentSetOfInputs[i] | count_payloads{}) > partindex) {
2138 const char* headerptr = nullptr;
2139 const char* payloadptr = nullptr;
2140 size_t payloadSize = 0;
2141 // - each input can have multiple parts
2142 // - "part" denotes a sequence of messages belonging together, the first message of the
2143 // sequence is the header message
2144 // - each part has one or more payload messages
2145 // - InputRecord provides all payloads as header-payload pair
2146 auto const indices = currentSetOfInputs[i] | get_pair{partindex};
2147 auto const& headerMsg = currentSetOfInputs[i][indices.headerIdx];
2148 auto const& payloadMsg = currentSetOfInputs[i][indices.payloadIdx];
2149 headerptr = static_cast<char const*>(headerMsg->GetData());
2150 payloadptr = payloadMsg ? static_cast<char const*>(payloadMsg->GetData()) : nullptr;
2151 payloadSize = payloadMsg ? payloadMsg->GetSize() : 0;
2152 return DataRef{nullptr, headerptr, payloadptr, payloadSize};
2153 }
2154 return DataRef{};
2155 };
2156 auto nofPartsGetter = [&currentSetOfInputs](size_t i) -> size_t {
2157 return (currentSetOfInputs[i] | count_payloads{});
2158 };
2159 auto refCountGetter = [&currentSetOfInputs](size_t idx) -> int {
2160 auto& header = static_cast<const fair::mq::shmem::Message&>(*(currentSetOfInputs[idx] | get_header{0}));
2161 return header.GetRefCount();
2162 };
2163 return InputSpan{getter, nofPartsGetter, refCountGetter, currentSetOfInputs.size()};
2164 };
2165
2166 auto markInputsAsDone = [ref](TimesliceSlot slot) -> void {
2167 auto& relayer = ref.get<DataRelayer>();
2169 };
2170
2171 // I need a preparation step which gets the current timeslice id and
2172 // propagates it to the various contextes (i.e. the actual entities which
2173 // create messages) because the messages need to have the timeslice id into
2174 // it.
2175 auto prepareAllocatorForCurrentTimeSlice = [ref](TimesliceSlot i) -> void {
2176 auto& relayer = ref.get<DataRelayer>();
2177 auto& timingInfo = ref.get<TimingInfo>();
2178 auto timeslice = relayer.getTimesliceForSlot(i);
2179
2180 timingInfo.timeslice = timeslice.value;
2181 timingInfo.tfCounter = relayer.getFirstTFCounterForSlot(i);
2182 timingInfo.firstTForbit = relayer.getFirstTFOrbitForSlot(i);
2183 timingInfo.runNumber = relayer.getRunNumberForSlot(i);
2184 timingInfo.creation = relayer.getCreationTimeForSlot(i);
2185 };
2186 auto updateRunInformation = [ref](TimesliceSlot i) -> void {
2187 auto& dataProcessorContext = ref.get<DataProcessorContext>();
2188 auto& relayer = ref.get<DataRelayer>();
2189 auto& timingInfo = ref.get<TimingInfo>();
2190 auto timeslice = relayer.getTimesliceForSlot(i);
2191 // We report wether or not this timing info refers to a new Run.
2192 timingInfo.globalRunNumberChanged = !TimingInfo::timesliceIsTimer(timeslice.value) && dataProcessorContext.lastRunNumberProcessed != timingInfo.runNumber;
2193 // A switch to runNumber=0 should not appear and thus does not set globalRunNumberChanged, unless it is seen in the first processed timeslice
2194 timingInfo.globalRunNumberChanged &= (dataProcessorContext.lastRunNumberProcessed == -1 || timingInfo.runNumber != 0);
2195 // FIXME: for now there is only one stream, however we
2196 // should calculate this correctly once we finally get the
2197 // the StreamContext in.
2198 timingInfo.streamRunNumberChanged = timingInfo.globalRunNumberChanged;
2199 };
2200
2201 // When processing them, timers will have to be cleaned up
2202 // to avoid double counting them.
2203 // This was actually the easiest solution we could find for
2204 // O2-646.
2205 auto cleanTimers = [&currentSetOfInputs](TimesliceSlot slot, InputRecord& record) {
2206 assert(record.size() == currentSetOfInputs.size());
2207 for (size_t ii = 0, ie = record.size(); ii < ie; ++ii) {
2208 // assuming that for timer inputs we do have exactly one PartRef object
2209 // in the MessageSet, multiple PartRef Objects are only possible for either
2210 // split payload messages of wildcard matchers, both for data inputs
2211 DataRef input = record.getByPos(ii);
2212 if (input.spec->lifetime != Lifetime::Timer) {
2213 continue;
2214 }
2215 if (input.header == nullptr) {
2216 continue;
2217 }
2218 // This will hopefully delete the message.
2219 currentSetOfInputs[ii].clear();
2220 }
2221 };
2222
2223 // Function to cleanup record. For the moment we
2224 // simply use it to keep track of input messages
2225 // which are not needed, to display them in the GUI.
2226 auto cleanupRecord = [](InputRecord& record) {
2227 if (O2_LOG_ENABLED(parts) == false) {
2228 return;
2229 }
2230 for (size_t pi = 0, pe = record.size(); pi < pe; ++pi) {
2231 DataRef input = record.getByPos(pi);
2232 if (input.header == nullptr) {
2233 continue;
2234 }
2235 auto sih = o2::header::get<SourceInfoHeader*>(input.header);
2236 if (sih) {
2237 continue;
2238 }
2239
2240 auto dh = o2::header::get<DataHeader*>(input.header);
2241 if (!dh) {
2242 continue;
2243 }
2244 // We use the address of the first header of a split payload
2245 // to identify the interval.
2246 O2_SIGNPOST_ID_FROM_POINTER(pid, parts, dh);
2247 O2_SIGNPOST_END(parts, pid, "parts", "Cleaning up parts associated to %p", dh);
2248
2249 // No split parts, we simply skip the payload
2250 if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) {
2251 // this is indicating a sequence of payloads following the header
2252 // FIXME: we will probably also set the DataHeader version
2253 pi += dh->splitPayloadParts - 1;
2254 } else {
2255 size_t pi = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2;
2256 }
2257 }
2258 };
2259
2260 ref.get<DataRelayer>().getReadyToProcess(completed);
2261 if (completed.empty() == true) {
2262 LOGP(debug, "No computations available for dispatching.");
2263 return false;
2264 }
2265
2266 int pipelineLength = DefaultsHelpers::pipelineLength(*ref.get<RawDeviceService>().device()->fConfig);
2267
2268 auto postUpdateStats = [ref, pipelineLength](DataRelayer::RecordAction const& action, InputRecord const& record, uint64_t tStart, uint64_t tStartMilli) {
2269 auto& stats = ref.get<DataProcessingStats>();
2270 auto& states = ref.get<DataProcessingStates>();
2271 std::atomic_thread_fence(std::memory_order_release);
2272 char relayerSlotState[1024];
2273 int written = snprintf(relayerSlotState, 1024, "%d ", pipelineLength);
2274 char* buffer = relayerSlotState + written;
2275 for (size_t ai = 0; ai != record.size(); ai++) {
2276 buffer[ai] = record.isValid(ai) ? '3' : '0';
2277 }
2278 buffer[record.size()] = 0;
2279 states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index),
2280 .size = (int)(record.size() + buffer - relayerSlotState),
2281 .data = relayerSlotState});
2282 uint64_t tEnd = uv_hrtime();
2283 // tEnd and tStart are in nanoseconds according to https://docs.libuv.org/en/v1.x/misc.html#c.uv_hrtime
2284 int64_t wallTimeMs = (tEnd - tStart) / 1000000;
2286 // Sum up the total wall time, in milliseconds.
2288 // The time interval is in seconds while tEnd - tStart is in nanoseconds, so we divide by 1000000 to get the fraction in ms/s.
2290 stats.updateStats({(int)ProcessingStatsId::LAST_PROCESSED_SIZE, DataProcessingStats::Op::Set, calculateTotalInputRecordSize(record)});
2291 stats.updateStats({(int)ProcessingStatsId::TOTAL_PROCESSED_SIZE, DataProcessingStats::Op::Add, calculateTotalInputRecordSize(record)});
2292 auto latency = calculateInputRecordLatency(record, tStartMilli);
2293 stats.updateStats({(int)ProcessingStatsId::LAST_MIN_LATENCY, DataProcessingStats::Op::Set, (int)latency.minLatency});
2294 stats.updateStats({(int)ProcessingStatsId::LAST_MAX_LATENCY, DataProcessingStats::Op::Set, (int)latency.maxLatency});
2295 static int count = 0;
2297 count++;
2298 };
2299
2300 auto preUpdateStats = [ref, pipelineLength](DataRelayer::RecordAction const& action, InputRecord const& record, uint64_t) {
2301 auto& states = ref.get<DataProcessingStates>();
2302 std::atomic_thread_fence(std::memory_order_release);
2303 char relayerSlotState[1024];
2304 snprintf(relayerSlotState, 1024, "%d ", pipelineLength);
2305 char* buffer = strchr(relayerSlotState, ' ') + 1;
2306 for (size_t ai = 0; ai != record.size(); ai++) {
2307 buffer[ai] = record.isValid(ai) ? '2' : '0';
2308 }
2309 buffer[record.size()] = 0;
2310 states.updateState({.id = short((int)ProcessingStateId::DATA_RELAYER_BASE + action.slot.index), .size = (int)(record.size() + buffer - relayerSlotState), .data = relayerSlotState});
2311 };
2312
2313 // This is the main dispatching loop
2314 auto& state = ref.get<DeviceState>();
2315 auto& spec = ref.get<DeviceSpec const>();
2316
2317 auto& dpContext = ref.get<DataProcessorContext>();
2318 auto& streamContext = ref.get<StreamContext>();
2319 O2_SIGNPOST_ID_GENERATE(sid, device);
2320 O2_SIGNPOST_START(device, sid, "device", "Start processing ready actions");
2321
2322 auto& stats = ref.get<DataProcessingStats>();
2323 auto& relayer = ref.get<DataRelayer>();
2324 using namespace o2::framework;
2325 stats.updateStats({(int)ProcessingStatsId::PENDING_INPUTS, DataProcessingStats::Op::Set, static_cast<int64_t>(relayer.getParallelTimeslices() - completed.size())});
2326 stats.updateStats({(int)ProcessingStatsId::INCOMPLETE_INPUTS, DataProcessingStats::Op::Set, completed.empty() ? 1 : 0});
2327 switch (spec.completionPolicy.order) {
2329 std::sort(completed.begin(), completed.end(), [](auto const& a, auto const& b) { return a.timeslice.value < b.timeslice.value; });
2330 break;
2332 std::sort(completed.begin(), completed.end(), [](auto const& a, auto const& b) { return a.slot.index < b.slot.index; });
2333 break;
2335 default:
2336 break;
2337 }
2338
2339 for (auto action : completed) {
2340 O2_SIGNPOST_ID_GENERATE(aid, device);
2341 O2_SIGNPOST_START(device, aid, "device", "Processing action on slot %lu for action %{public}s", action.slot.index, fmt::format("{}", action.op).c_str());
2342 if (action.op == CompletionPolicy::CompletionOp::Wait) {
2343 O2_SIGNPOST_END(device, aid, "device", "Waiting for more data.");
2344 continue;
2345 }
2346
2347 bool shouldConsume = action.op == CompletionPolicy::CompletionOp::Consume ||
2349 prepareAllocatorForCurrentTimeSlice(TimesliceSlot{action.slot});
2350 if (action.op != CompletionPolicy::CompletionOp::Discard &&
2353 updateRunInformation(TimesliceSlot{action.slot});
2354 }
2355 InputSpan span = getInputSpan(action.slot, shouldConsume);
2356 auto& spec = ref.get<DeviceSpec const>();
2357 InputRecord record{spec.inputs,
2358 span,
2359 *context.registry};
2360 ProcessingContext processContext{record, ref, ref.get<DataAllocator>()};
2361 {
2362 // Notice this should be thread safe and reentrant
2363 // as it is called from many threads.
2364 streamContext.preProcessingCallbacks(processContext);
2365 dpContext.preProcessingCallbacks(processContext);
2366 }
2367 if (action.op == CompletionPolicy::CompletionOp::Discard) {
2368 context.postDispatchingCallbacks(processContext);
2369 if (spec.forwards.empty() == false) {
2370 auto& timesliceIndex = ref.get<TimesliceIndex>();
2371 forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false);
2372 O2_SIGNPOST_END(device, aid, "device", "Forwarding inputs consume: %d.", false);
2373 continue;
2374 }
2375 }
2376 // If there is no optional inputs we canForwardEarly
2377 // the messages to that parallel processing can happen.
2378 // In this case we pass true to indicate that we want to
2379 // copy the messages to the subsequent data processor.
2380 bool hasForwards = spec.forwards.empty() == false;
2381 bool consumeSomething = action.op == CompletionPolicy::CompletionOp::Consume || action.op == CompletionPolicy::CompletionOp::ConsumeExisting;
2382
2383 if (context.forwardPolicy == ForwardPolicy::AtCompletionPolicySatisified && hasForwards && consumeSomething) {
2384 O2_SIGNPOST_EVENT_EMIT(device, aid, "device", "Early forwarding: %{public}s.", fmt::format("{}", action.op).c_str());
2385 auto& timesliceIndex = ref.get<TimesliceIndex>();
2386 forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), true, action.op == CompletionPolicy::CompletionOp::Consume);
2387 } else if (context.forwardPolicy == ForwardPolicy::AtInjection && hasForwards && consumeSomething) {
2388 // We used to do fowarding here, however we now do it much earlier.
2389 // We still need to clean the inputs which were already consumed
2390 // via ConsumeExisting and which still have an header to hold the slot.
2391 // FIXME: do we? This should really happen when we do the forwarding on
2392 // insertion, because otherwise we lose the relevant information on how to
2393 // navigate the set of headers. We could actually rely on the messageset index,
2394 // is that the right thing to do though?
2395 O2_SIGNPOST_EVENT_EMIT(device, aid, "device", "cleaning early forwarding: %{public}s.", fmt::format("{}", action.op).c_str());
2396 auto& timesliceIndex = ref.get<TimesliceIndex>();
2397 cleanEarlyForward(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), true, action.op == CompletionPolicy::CompletionOp::Consume);
2398 }
2399
2400 markInputsAsDone(action.slot);
2401
2402 uint64_t tStart = uv_hrtime();
2403 uint64_t tStartMilli = TimingHelpers::getRealtimeSinceEpochStandalone();
2404 preUpdateStats(action, record, tStart);
2405
2406 static bool noCatch = getenv("O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv("O2_NO_CATCHALL_EXCEPTIONS"), "0");
2407
2408 auto runNoCatch = [&context, ref, &processContext](DataRelayer::RecordAction& action) mutable {
2409 auto& state = ref.get<DeviceState>();
2410 auto& spec = ref.get<DeviceSpec const>();
2411 auto& streamContext = ref.get<StreamContext>();
2412 auto& dpContext = ref.get<DataProcessorContext>();
2413 auto shouldProcess = [](DataRelayer::RecordAction& action) -> bool {
2414 switch (action.op) {
2419 return true;
2420 break;
2421 default:
2422 return false;
2423 }
2424 };
2425 if (state.quitRequested == false) {
2426 {
2427 // Callbacks from services
2428 dpContext.preProcessingCallbacks(processContext);
2429 streamContext.preProcessingCallbacks(processContext);
2430 dpContext.preProcessingCallbacks(processContext);
2431 // Callbacks from users
2432 ref.get<CallbackService>().call<CallbackService::Id::PreProcessing>(o2::framework::ServiceRegistryRef{ref}, (int)action.op);
2433 }
2434 O2_SIGNPOST_ID_FROM_POINTER(pcid, device, &processContext);
2435 if (context.statefulProcess && shouldProcess(action)) {
2436 // This way, usercode can use the the same processing context to identify
2437 // its signposts and we can map user code to device iterations.
2438 O2_SIGNPOST_START(device, pcid, "device", "Stateful process");
2439 (context.statefulProcess)(processContext);
2440 O2_SIGNPOST_END(device, pcid, "device", "Stateful process");
2441 } else if (context.statelessProcess && shouldProcess(action)) {
2442 O2_SIGNPOST_START(device, pcid, "device", "Stateful process");
2443 (context.statelessProcess)(processContext);
2444 O2_SIGNPOST_END(device, pcid, "device", "Stateful process");
2445 } else if (context.statelessProcess || context.statefulProcess) {
2446 O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "Skipping processing because we are discarding.");
2447 } else {
2448 O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "No processing callback provided. Switching to %{public}s.", "Idle");
2450 }
2451 if (shouldProcess(action)) {
2452 auto& timingInfo = ref.get<TimingInfo>();
2453 if (timingInfo.globalRunNumberChanged) {
2454 context.lastRunNumberProcessed = timingInfo.runNumber;
2455 }
2456 }
2457
2458 // Notify the sink we just consumed some timeframe data
2459 if (context.isSink && action.op == CompletionPolicy::CompletionOp::Consume) {
2460 O2_SIGNPOST_EVENT_EMIT(device, pcid, "device", "Sending dpl-summary");
2461 auto& allocator = ref.get<DataAllocator>();
2462 allocator.make<int>(OutputRef{"dpl-summary", runtime_hash(spec.name.c_str())}, 1);
2463 }
2464
2465 // Extra callback which allows a service to add extra outputs.
2466 // This is needed e.g. to ensure that injected CCDB outputs are added
2467 // before an end of stream.
2468 {
2469 ref.get<CallbackService>().call<CallbackService::Id::FinaliseOutputs>(o2::framework::ServiceRegistryRef{ref}, (int)action.op);
2470 dpContext.finaliseOutputsCallbacks(processContext);
2471 streamContext.finaliseOutputsCallbacks(processContext);
2472 }
2473
2474 {
2475 ref.get<CallbackService>().call<CallbackService::Id::PostProcessing>(o2::framework::ServiceRegistryRef{ref}, (int)action.op);
2476 dpContext.postProcessingCallbacks(processContext);
2477 streamContext.postProcessingCallbacks(processContext);
2478 }
2479 }
2480 };
2481
2482 if ((state.tracingFlags & DeviceState::LoopReason::TRACE_USERCODE) != 0) {
2483 state.severityStack.push_back((int)fair::Logger::GetConsoleSeverity());
2484 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
2485 }
2486 if (noCatch) {
2487 try {
2488 runNoCatch(action);
2489 } catch (o2::framework::RuntimeErrorRef e) {
2490 (context.errorHandling)(e, record);
2491 }
2492 } else {
2493 try {
2494 runNoCatch(action);
2495 } catch (std::exception& ex) {
2499 auto e = runtime_error(ex.what());
2500 (context.errorHandling)(e, record);
2501 } catch (o2::framework::RuntimeErrorRef e) {
2502 (context.errorHandling)(e, record);
2503 }
2504 }
2505 if (state.severityStack.empty() == false) {
2506 fair::Logger::SetConsoleSeverity((fair::Severity)state.severityStack.back());
2507 state.severityStack.pop_back();
2508 }
2509
2510 postUpdateStats(action, record, tStart, tStartMilli);
2511 // We forward inputs only when we consume them. If we simply Process them,
2512 // we keep them for next message arriving.
2513 if (action.op == CompletionPolicy::CompletionOp::Consume) {
2514 cleanupRecord(record);
2515 context.postDispatchingCallbacks(processContext);
2516 ref.get<CallbackService>().call<CallbackService::Id::DataConsumed>(o2::framework::ServiceRegistryRef{ref});
2517 }
2518 if ((context.forwardPolicy == ForwardPolicy::AfterProcessing) && hasForwards && consumeSomething) {
2519 O2_SIGNPOST_EVENT_EMIT(device, aid, "device", "Late forwarding");
2520 auto& timesliceIndex = ref.get<TimesliceIndex>();
2521 forwardInputs(ref, action.slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(), false, action.op == CompletionPolicy::CompletionOp::Consume);
2522 }
2523 context.postForwardingCallbacks(processContext);
2524 if (action.op == CompletionPolicy::CompletionOp::Process) {
2525 cleanTimers(action.slot, record);
2526 }
2527 O2_SIGNPOST_END(device, aid, "device", "Done processing action on slot %lu for action %{public}s", action.slot.index, fmt::format("{}", action.op).c_str());
2528 }
2529 O2_SIGNPOST_END(device, sid, "device", "Start processing ready actions");
2530
2531 // We now broadcast the end of stream if it was requested
2532 if (state.streaming == StreamingState::EndOfStreaming) {
2533 LOGP(detail, "Broadcasting end of stream");
2534 for (auto& channel : spec.outputChannels) {
2536 }
2538 }
2539
2540 return true;
2541}
2542
2544{
2545 LOG(error) << msg;
2546 ServiceRegistryRef ref{mServiceRegistry};
2547 auto& stats = ref.get<DataProcessingStats>();
2549}
2550
2551std::unique_ptr<ConfigParamStore> DeviceConfigurationHelpers::getConfiguration(ServiceRegistryRef registry, const char* name, std::vector<ConfigParamSpec> const& options)
2552{
2553
2554 if (registry.active<ConfigurationInterface>()) {
2555 auto& cfg = registry.get<ConfigurationInterface>();
2556 try {
2557 cfg.getRecursive(name);
2558 std::vector<std::unique_ptr<ParamRetriever>> retrievers;
2559 retrievers.emplace_back(std::make_unique<ConfigurationOptionsRetriever>(&cfg, name));
2560 auto configStore = std::make_unique<ConfigParamStore>(options, std::move(retrievers));
2561 configStore->preload();
2562 configStore->activate();
2563 return configStore;
2564 } catch (...) {
2565 // No overrides...
2566 }
2567 }
2568 return {nullptr};
2569}
2570
2571} // namespace o2::framework
benchmark::State & state
struct uv_timer_s uv_timer_t
struct uv_signal_s uv_signal_t
struct uv_async_s uv_async_t
struct uv_poll_s uv_poll_t
struct uv_loop_s uv_loop_t
o2::monitoring::Metric Metric
uint64_t maxLatency
o2::configuration::ConfigurationInterface ConfigurationInterface
constexpr int DEFAULT_MAX_CHANNEL_AHEAD
uint64_t minLatency
std::ostringstream debug
int32_t i
std::enable_if_t< std::is_signed< T >::value, bool > hasData(const CalArray< T > &cal)
Definition Painter.cxx:600
uint16_t pid
Definition RawData.h:2
uint32_t res
Definition RawData.h:0
#define O2_SIGNPOST_EVENT_EMIT_ERROR(log, id, name, format,...)
Definition Signpost.h:553
#define O2_DECLARE_DYNAMIC_LOG(name)
Definition Signpost.h:489
#define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer)
Definition Signpost.h:505
#define O2_SIGNPOST_END(log, id, name, format,...)
Definition Signpost.h:608
#define O2_LOG_ENABLED(log)
Definition Signpost.h:110
#define O2_SIGNPOST_ID_GENERATE(name, log)
Definition Signpost.h:506
#define O2_SIGNPOST_EVENT_EMIT_WARN(log, id, name, format,...)
Definition Signpost.h:563
#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format,...)
Definition Signpost.h:522
#define O2_SIGNPOST_START(log, id, name, format,...)
Definition Signpost.h:602
constexpr uint32_t runtime_hash(char const *str)
o2::monitoring::Monitoring Monitoring
StringRef key
@ DeviceStateChanged
Invoked the device undergoes a state change.
decltype(auto) make(const Output &spec, Args... args)
static void doRun(ServiceRegistryRef)
void fillContext(DataProcessorContext &context, DeviceContext &deviceContext)
DataProcessingDevice(RunningDeviceRef ref, ServiceRegistry &)
static void doPrepare(ServiceRegistryRef)
static bool tryDispatchComputation(ServiceRegistryRef ref, std::vector< DataRelayer::RecordAction > &completed)
static void handleData(ServiceRegistryRef, InputChannelInfo &)
uint32_t getFirstTFOrbitForSlot(TimesliceSlot slot)
Get the firstTForbit associate to a given slot.
void updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStatus, CacheEntryStatus newStatus)
uint32_t getRunNumberForSlot(TimesliceSlot slot)
Get the runNumber associated to a given slot.
void prunePending(OnDropCallback)
Prune all the pending entries in the cache.
std::vector< std::vector< fair::mq::MessagePtr > > consumeAllInputsForTimeslice(TimesliceSlot id)
uint64_t getCreationTimeForSlot(TimesliceSlot slot)
Get the creation time associated to a given slot.
ActivityStats processDanglingInputs(std::vector< ExpirationHandler > const &, ServiceRegistryRef context, bool createNew)
uint32_t getFirstTFCounterForSlot(TimesliceSlot slot)
Get the firstTFCounter associate to a given slot.
A service API to communicate with the driver.
The input API of the Data Processing Layer This class holds the inputs which are valid for processing...
size_t size() const
Number of elements in the InputSpan.
Definition InputSpan.h:82
virtual fair::mq::Device * device()=0
bool active() const
Check if service of type T is currently active.
OldestOutputInfo getOldestPossibleOutput() const
GLint GLsizei count
Definition glcorearb.h:399
GLuint64EXT * result
Definition glcorearb.h:5662
GLuint buffer
Definition glcorearb.h:655
GLuint entry
Definition glcorearb.h:5735
GLuint index
Definition glcorearb.h:781
GLuint const GLchar * name
Definition glcorearb.h:781
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLsizei const GLfloat * value
Definition glcorearb.h:819
GLint GLint GLsizei GLint GLenum GLenum type
Definition glcorearb.h:275
GLboolean * data
Definition glcorearb.h:298
GLuint GLsizei GLsizei * length
Definition glcorearb.h:790
GLuint GLsizei const GLchar * label
Definition glcorearb.h:2519
GLsizei GLenum const void * indices
Definition glcorearb.h:400
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLuint GLsizei const GLchar * message
Definition glcorearb.h:2517
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
GLuint GLuint stream
Definition glcorearb.h:1806
GLint ref
Definition glcorearb.h:291
GLuint * states
Definition glcorearb.h:4932
Defining PrimaryVertex explicitly as messageable.
Definition Cartesian.h:288
RuntimeErrorRef runtime_error(const char *)
ServiceKind
The kind of service we are asking for.
void on_idle_timer(uv_timer_t *handle)
@ DPL
The channel is a normal input channel.
void run_completion(uv_work_t *handle, int status)
void on_socket_polled(uv_poll_t *poller, int status, int events)
void run_callback(uv_work_t *handle)
auto forwardOnInsertion(ServiceRegistryRef &ref, std::span< fair::mq::MessagePtr > &messages) -> void
volatile int region_read_global_dummy_variable
void handleRegionCallbacks(ServiceRegistryRef registry, std::vector< fair::mq::RegionInfo > &infos)
Invoke the callbacks for the mPendingRegionInfos.
void on_out_of_band_polled(uv_poll_t *poller, int status, int events)
DeviceSpec const & getRunningDevice(RunningDeviceRef const &running, ServiceRegistryRef const &services)
@ EndOfStreaming
End of streaming requested, but not notified.
@ Streaming
Data is being processed.
@ Idle
End of streaming notified.
void on_communication_requested(uv_async_t *s)
@ Expired
A transition needs to be fullfilled ASAP.
@ NoTransition
No pending transitions.
@ Requested
A transition was notified to be requested.
RuntimeError & error_from_ref(RuntimeErrorRef)
void on_awake_main_thread(uv_async_t *handle)
@ Completed
The channel was signaled it will not receive any data.
@ Running
The channel is actively receiving data.
void on_signal_callback(uv_signal_t *handle, int signum)
@ Me
Only quit this data processor.
constexpr const char * channelName(int channel)
Definition Constants.h:318
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
static void run(AsyncQueue &queue, TimesliceId oldestPossibleTimeslice)
static void post(AsyncQueue &queue, AsyncTask const &task)
An actuatual task to be executed.
Definition AsyncQueue.h:32
static void demangled_backtrace_symbols(void **backtrace, unsigned int total, int fd)
static constexpr int INVALID
CompletionOp
Action to take with the InputRecord:
@ Retry
Like Wait but mark the cacheline as dirty.
int64_t timeslices
How many timeslices it can process without giving back control.
int64_t sharedMemory
How much shared memory it can allocate.
Statistics on the offers consumed, expired.
static bool hasOnlyGenerated(DeviceSpec const &spec)
check if spec is a source devide
static TransitionHandlingState updateStateTransition(ServiceRegistryRef const &ref, ProcessingPolicies const &policies)
starts the EoS timers and returns the new TransitionHandlingState in case as new state is requested
static void switchState(ServiceRegistryRef const &ref, StreamingState newState)
change the device StreamingState to newState
static void sendEndOfStream(ServiceRegistryRef const &ref, OutputChannelSpec const &channel)
static bool sendOldestPossibleTimeframe(ServiceRegistryRef const &ref, ForwardChannelInfo const &info, ForwardChannelState &state, size_t timeslice)
static void cleanForwardedMessages(std::span< fair::mq::MessagePtr > &currentSetOfInputs, bool consume)
static std::vector< fair::mq::Parts > routeForwardedMessageSet(FairMQDeviceProxy &proxy, std::vector< std::vector< fair::mq::MessagePtr > > &currentSetOfInputs, bool copy, bool consume)
Helper to route messages for forwarding.
static void routeForwardedMessages(FairMQDeviceProxy &proxy, std::span< fair::mq::MessagePtr > &currentSetOfInputs, std::vector< fair::mq::Parts > &forwardedParts, bool copy, bool consume)
Helper to route messages for forwarding.
Helper struct to hold statistics about the data processing happening.
@ CumulativeRate
Set the value to the specified value if it is positive.
@ Add
Update the rate of the metric given the amount since the last time.
std::function< void(o2::framework::RuntimeErrorRef e, InputRecord &record)> errorHandling
ForwardPolicy forwardPolicy
Wether or not the associated DataProcessor can forward things early.
AlgorithmSpec::InitErrorCallback initError
void preLoopCallbacks(ServiceRegistryRef)
Invoke callbacks before we enter the event loop.
void postStopCallbacks(ServiceRegistryRef)
Invoke callbacks on stop.
void preProcessingCallbacks(ProcessingContext &)
Invoke callbacks to be executed before every process method invokation.
void preStartCallbacks(ServiceRegistryRef)
Invoke callbacks to be executed in PreRun(), before the User Start callbacks.
AlgorithmSpec::ProcessCallback statefulProcess
const char * header
Definition DataRef.h:27
const InputSpec * spec
Definition DataRef.h:26
static std::vector< size_t > createDistinctRouteIndex(std::vector< InputRoute > const &)
@ Invalid
Ownership of the data has been taken.
@ Backpressured
The incoming data was not valid and has been dropped.
@ Dropped
The incoming data was not relayed, because we are backpressured.
static bool partialMatch(InputSpec const &spec, o2::header::DataOrigin const &origin)
static std::string describe(InputSpec const &spec)
static bool match(InputSpec const &spec, ConcreteDataMatcher const &target)
TimesliceIndex::OldestOutputInfo oldestTimeslice
static unsigned int pipelineLength(unsigned int minLength)
get max number of timeslices in the queue
static std::unique_ptr< ConfigParamStore > getConfiguration(ServiceRegistryRef registry, const char *name, std::vector< ConfigParamSpec > const &options)
ProcessingPolicies & processingPolicies
Running state information of a given device.
Definition DeviceState.h:34
std::atomic< int64_t > cleanupCount
Definition DeviceState.h:82
Forward channel information.
Definition ChannelInfo.h:88
ChannelAccountingType channelType
Wether or not it's a DPL internal channel.
Definition ChannelInfo.h:92
std::string name
The name of the channel.
Definition ChannelInfo.h:90
ForwardingPolicy const * policy
Definition ChannelInfo.h:94
fair::mq::Channel * channel
Definition ChannelInfo.h:51
TimesliceId oldestForChannel
Oldest possible timeslice for the given channel.
Definition ChannelInfo.h:65
enum Lifetime lifetime
Definition InputSpec.h:73
enum EarlyForwardPolicy earlyForward
Information about the running workflow.
static constexpr ServiceKind kind
static Salt streamSalt(short streamId, short dataProcessorId)
void lateBindStreamServices(DeviceState &state, fair::mq::ProgOptions &options, ServiceRegistry::Salt salt)
static Salt globalStreamSalt(short streamId)
void * get(ServiceTypeHash typeHash, Salt salt, ServiceKind kind, char const *name=nullptr) const
void finaliseOutputsCallbacks(ProcessingContext &)
Invoke callbacks to be executed after every process method invokation.
void preProcessingCallbacks(ProcessingContext &pcx)
Invoke callbacks to be executed before every process method invokation.
void preEOSCallbacks(EndOfStreamContext &eosContext)
Invoke callbacks to be executed before every EOS user callback invokation.
void postProcessingCallbacks(ProcessingContext &pcx)
Invoke callbacks to be executed after every process method invokation.
static int64_t getRealtimeSinceEpochStandalone()
bool keepAtEndOfStream
Wether this kind of data should be flushed during end of stream.
Definition TimingInfo.h:44
static bool timesliceIsTimer(size_t timeslice)
Definition TimingInfo.h:46
static TimesliceId getTimeslice(data_matcher::VariableContext const &variables)
void backpressure(InputChannelInfo const &)
locked_execution(ServiceRegistryRef &ref_)
the main header struct
Definition DataHeader.h:620
constexpr size_t min
constexpr size_t max
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
vec clear()
const std::string str
uint64_t const void const *restrict const msg
Definition x9.h:153