34#if defined(__APPLE__) || defined(NDEBUG)
35#define O2_SIGNPOST_IMPLEMENTATION
59#include <fairmq/Parts.h>
60#include <fairmq/Socket.h>
61#include <fairmq/ProgOptions.h>
62#include <fairmq/shmem/Message.h>
63#include <Configuration/ConfigurationInterface.h>
64#include <Configuration/ConfigurationFactory.h>
65#include <Monitoring/Monitoring.h>
67#include <TClonesArray.h>
69#include <fmt/ostream.h>
77#include <boost/property_tree/json_parser.hpp>
135 return devices[running.
index];
145 : mRunningDevice{running},
146 mConfigRegistry{nullptr},
147 mServiceRegistry{registry}
149 GetConfig()->Subscribe<std::string>(
"dpl", [®istry = mServiceRegistry](
const std::string&
key, std::string
value) {
150 if (
key ==
"cleanup") {
154 int64_t newCleanupCount = std::stoll(
value);
155 if (newCleanupCount <= cleanupCount) {
158 deviceState.cleanupCount.store(newCleanupCount);
159 for (
auto& info : deviceState.inputChannelInfos) {
160 fair::mq::Parts parts;
161 while (info.channel->Receive(parts, 0)) {
162 LOGP(
debug,
"Dropping {} parts", parts.Size());
163 if (parts.Size() == 0) {
171 std::function<
void(
const fair::mq::State)> stateWatcher = [
this, ®istry = mServiceRegistry](
const fair::mq::State
state) ->
void {
176 control.notifyDeviceState(fair::mq::GetStateName(
state));
179 if (deviceState.nextFairMQState.empty() ==
false) {
180 auto state = deviceState.nextFairMQState.back();
182 deviceState.nextFairMQState.pop_back();
187 this->SubscribeToStateChange(
"99-dpl", stateWatcher);
199 mAwakeHandle->data = &
state;
201 LOG(
error) <<
"Unable to initialise subscription";
205 SubscribeToNewTransition(
"dpl", [wakeHandle = mAwakeHandle](fair::mq::Transition t) {
206 int res = uv_async_send(wakeHandle);
208 LOG(
error) <<
"Unable to notify subscription";
210 LOG(
debug) <<
"State transition requested";
224 O2_SIGNPOST_START(device, sid,
"run_callback",
"Starting run callback on stream %d", task->id.index);
227 O2_SIGNPOST_END(device, sid,
"run_callback",
"Done processing data for stream %d", task->id.index);
240 using o2::monitoring::Metric;
241 using o2::monitoring::Monitoring;
242 using o2::monitoring::tags::Key;
243 using o2::monitoring::tags::Value;
247 stats.totalConsumedBytes += accumulatedConsumed.
sharedMemory;
250 stats.totalConsumedTimeslices += std::min<int64_t>(accumulatedConsumed.
timeslices, 1);
254 dpStats.processCommandQueue();
264 dpStats.processCommandQueue();
267 for (
auto& consumer :
state.offerConsumers) {
268 quotaEvaluator.consume(task->id.index, consumer, reportConsumedOffer);
270 state.offerConsumers.clear();
271 quotaEvaluator.handleExpired(reportExpiredOffer);
272 quotaEvaluator.dispose(task->id.index);
273 task->running =
false;
301 O2_SIGNPOST_EVENT_EMIT(sockets, sid,
"socket_state",
"Data pending on socket for channel %{public}s", context->name);
305 O2_SIGNPOST_END(sockets, sid,
"socket_state",
"Socket connected for channel %{public}s", context->name);
307 O2_SIGNPOST_START(sockets, sid,
"socket_state",
"Socket connected for read in context %{public}s", context->name);
308 uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &
on_socket_polled);
311 O2_SIGNPOST_START(sockets, sid,
"socket_state",
"Socket connected for write for channel %{public}s", context->name);
319 case UV_DISCONNECT: {
320 O2_SIGNPOST_END(sockets, sid,
"socket_state",
"Socket disconnected in context %{public}s", context->name);
322 case UV_PRIORITIZED: {
323 O2_SIGNPOST_EVENT_EMIT(sockets, sid,
"socket_state",
"Socket prioritized for context %{public}s", context->name);
335 LOGP(fatal,
"Error while polling {}: {}", context->name, status);
340 O2_SIGNPOST_EVENT_EMIT(sockets, sid,
"socket_state",
"Data pending on socket for channel %{public}s", context->name);
342 assert(context->channelInfo);
343 context->channelInfo->readPolled =
true;
346 O2_SIGNPOST_END(sockets, sid,
"socket_state",
"OOB socket connected for channel %{public}s", context->name);
348 O2_SIGNPOST_START(sockets, sid,
"socket_state",
"OOB socket connected for read in context %{public}s", context->name);
351 O2_SIGNPOST_START(sockets, sid,
"socket_state",
"OOB socket connected for write for channel %{public}s", context->name);
355 case UV_DISCONNECT: {
356 O2_SIGNPOST_END(sockets, sid,
"socket_state",
"OOB socket disconnected in context %{public}s", context->name);
359 case UV_PRIORITIZED: {
360 O2_SIGNPOST_EVENT_EMIT(sockets, sid,
"socket_state",
"OOB socket prioritized for context %{public}s", context->name);
381 context.statelessProcess = spec.algorithm.onProcess;
383 context.error = spec.algorithm.onError;
384 context.
initError = spec.algorithm.onInitError;
387 if (configStore ==
nullptr) {
388 std::vector<std::unique_ptr<ParamRetriever>> retrievers;
389 retrievers.emplace_back(std::make_unique<FairOptionsRetriever>(GetConfig()));
390 configStore = std::make_unique<ConfigParamStore>(spec.options, std::move(retrievers));
391 configStore->preload();
392 configStore->activate();
395 using boost::property_tree::ptree;
398 for (
auto&
entry : configStore->store()) {
399 std::stringstream ss;
401 if (
entry.second.empty() ==
false) {
402 boost::property_tree::json_parser::write_json(ss,
entry.second,
false);
406 str =
entry.second.get_value<std::string>();
408 std::string configString = fmt::format(
"[CONFIG] {}={} 1 {}",
entry.first,
str, configStore->provenance(
entry.first.c_str())).c_str();
412 mConfigRegistry = std::make_unique<ConfigParamRegistry>(std::move(configStore));
415 if (context.initError) {
416 context.initErrorHandling = [&errorCallback = context.initError,
429 errorCallback(errorContext);
432 context.initErrorHandling = [&serviceRegistry = mServiceRegistry](
RuntimeErrorRef e) {
447 context.expirationHandlers.clear();
448 context.init = spec.algorithm.onInit;
450 static bool noCatch = getenv(
"O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv(
"O2_NO_CATCHALL_EXCEPTIONS"),
"0");
451 InitContext initContext{*mConfigRegistry, mServiceRegistry};
455 context.statefulProcess = context.init(initContext);
457 if (context.initErrorHandling) {
458 (context.initErrorHandling)(e);
463 context.statefulProcess = context.init(initContext);
464 }
catch (std::exception& ex) {
469 (context.initErrorHandling)(e);
471 (context.initErrorHandling)(e);
476 state.inputChannelInfos.resize(spec.inputChannels.size());
480 int validChannelId = 0;
481 for (
size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
482 auto&
name = spec.inputChannels[ci].name;
483 if (
name.find(spec.channelPrefix +
"from_internal-dpl-clock") == 0) {
488 state.inputChannelInfos[ci].id = {validChannelId++};
493 if (spec.callbacksPolicy.policy !=
nullptr) {
494 InitContext initContext{*mConfigRegistry, mServiceRegistry};
499 auto* options = GetConfig();
500 for (
size_t si = 0; si < mStreams.size(); ++si) {
514 O2_SIGNPOST_END(device, sid,
"signal_state",
"No registry active. Ignoring signal.");
523 while (ri != quotaEvaluator.mOffers.size()) {
524 auto& offer = quotaEvaluator.mOffers[ri];
530 if (offer.valid && offer.sharedMemory != 0) {
531 O2_SIGNPOST_END(device, sid,
"signal_state",
"Memory already offered.");
537 for (
auto& offer : quotaEvaluator.mOffers) {
538 if (offer.valid ==
false) {
541 offer.sharedMemory = 1000000000;
548 O2_SIGNPOST_END(device, sid,
"signal_state",
"Done processing signals.");
562 if (oldestTimeslice.timeslice.value <= decongestion.lastTimeslice) {
563 LOG(
debug) <<
"Not sending already sent oldest possible timeslice " << oldestTimeslice.timeslice.value;
566 for (
int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
567 auto& info = proxy.getForwardChannelInfo(
ChannelIndex{fi});
572 O2_SIGNPOST_EVENT_EMIT(async_queue, aid,
"forwardInputsCallback",
"Skipping channel %{public}s because it's not a DPL channel",
578 O2_SIGNPOST_EVENT_EMIT(async_queue, aid,
"forwardInputsCallback",
"Forwarding to channel %{public}s oldest possible timeslice %zu, prio 20",
579 info.name.c_str(), oldestTimeslice.timeslice.value);
594 O2_SIGNPOST_EVENT_EMIT(forwarding, sid,
"forwardInputs",
"Forwarding %zu messages", forwardedParts.size());
595 for (
int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
596 if (forwardedParts[fi].
Size() == 0) {
600 auto& parts = forwardedParts[fi];
601 if (info.
policy ==
nullptr) {
612 O2_SIGNPOST_EVENT_EMIT(async_queue, aid,
"forwardInputs",
"Queuing forwarding oldestPossible %zu", oldestTimeslice.timeslice.value);
624 if (infos.empty() ==
false) {
625 std::vector<fair::mq::RegionInfo> toBeNotified;
626 toBeNotified.swap(infos);
627 static bool dummyRead = getenv(
"DPL_DEBUG_MAP_ALL_SHM_REGIONS") && atoi(getenv(
"DPL_DEBUG_MAP_ALL_SHM_REGIONS"));
628 for (
auto const& info : toBeNotified) {
648void DataProcessingDevice::initPollers()
656 if ((context.statefulProcess !=
nullptr) || (context.statelessProcess !=
nullptr)) {
657 for (
auto& [channelName, channel] : GetChannels()) {
659 for (
size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
660 auto& channelSpec = spec.inputChannels[ci];
661 channelInfo = &
state.inputChannelInfos[ci];
662 if (channelSpec.name != channelName) {
665 channelInfo->
channel = &this->GetChannel(channelName, 0);
668 if ((
channelName.rfind(
"from_internal-dpl", 0) == 0) &&
669 (
channelName.rfind(
"from_internal-dpl-aod", 0) != 0) &&
670 (
channelName.rfind(
"from_internal-dpl-ccdb-backend", 0) != 0) &&
671 (
channelName.rfind(
"from_internal-dpl-injected", 0)) != 0) {
672 LOGP(detail,
"{} is an internal channel. Skipping as no input will come from there.", channelName);
676 if (
channelName.rfind(
"from_" + spec.name +
"_", 0) == 0) {
677 LOGP(detail,
"{} is to send data. Not polling.", channelName);
682 LOGP(detail,
"{} is not a DPL socket. Not polling.", channelName);
688 size_t zmq_fd_len =
sizeof(zmq_fd);
691 channel[0].GetSocket().GetOption(
"fd", &zmq_fd, &zmq_fd_len);
696 LOGP(detail,
"Polling socket for {}", channelName);
699 pCtx->loop =
state.loop;
701 pCtx->state = &
state;
703 assert(channelInfo !=
nullptr);
704 pCtx->channelInfo = channelInfo;
705 pCtx->socket = &channel[0].GetSocket();
708 uv_poll_init(
state.loop, poller, zmq_fd);
710 LOGP(detail,
"{} is an out of band channel.", channelName);
711 state.activeOutOfBandPollers.push_back(poller);
714 state.activeInputPollers.push_back(poller);
720 if (
state.activeInputPollers.empty() &&
721 state.activeOutOfBandPollers.empty() &&
722 state.activeTimers.empty() &&
723 state.activeSignals.empty()) {
727 if (
state.inputChannelInfos.empty()) {
728 LOGP(detail,
"No input channels. Setting exit transition timeout to 0.");
729 deviceContext.exitTransitionTimeout = 0;
731 for (
auto& [channelName, channel] : GetChannels()) {
732 if (
channelName.rfind(spec.channelPrefix +
"from_internal-dpl", 0) == 0) {
733 LOGP(detail,
"{} is an internal channel. Not polling.", channelName);
736 if (
channelName.rfind(spec.channelPrefix +
"from_" + spec.name +
"_", 0) == 0) {
737 LOGP(detail,
"{} is an out of band channel. Not polling for output.", channelName);
742 size_t zmq_fd_len =
sizeof(zmq_fd);
745 channel[0].GetSocket().GetOption(
"fd", &zmq_fd, &zmq_fd_len);
747 LOGP(
error,
"Cannot get file descriptor for channel {}", channelName);
750 LOG(detail) <<
"Polling socket for " << channel[0].GetName();
754 pCtx->loop =
state.loop;
756 pCtx->state = &
state;
760 uv_poll_init(
state.loop, poller, zmq_fd);
761 state.activeOutputPollers.push_back(poller);
765 LOGP(detail,
"This is a fake device so we exit after the first iteration.");
766 deviceContext.exitTransitionTimeout = 0;
772 uv_timer_init(
state.loop, timer);
773 timer->data = &
state;
774 uv_update_time(
state.loop);
776 state.activeTimers.push_back(timer);
780void DataProcessingDevice::startPollers()
786 for (
auto* poller :
state.activeInputPollers) {
788 O2_SIGNPOST_START(device, sid,
"socket_state",
"Input socket waiting for connection.");
792 for (
auto& poller :
state.activeOutOfBandPollers) {
796 for (
auto* poller :
state.activeOutputPollers) {
798 O2_SIGNPOST_START(device, sid,
"socket_state",
"Output socket waiting for connection.");
805 uv_timer_init(
state.loop, deviceContext.gracePeriodTimer);
808 deviceContext.dataProcessingGracePeriodTimer->data =
new ServiceRegistryRef(mServiceRegistry);
809 uv_timer_init(
state.loop, deviceContext.dataProcessingGracePeriodTimer);
812void DataProcessingDevice::stopPollers()
817 LOGP(detail,
"Stopping {} input pollers",
state.activeInputPollers.size());
818 for (
auto* poller :
state.activeInputPollers) {
821 uv_poll_stop(poller);
824 LOGP(detail,
"Stopping {} out of band pollers",
state.activeOutOfBandPollers.size());
825 for (
auto* poller :
state.activeOutOfBandPollers) {
826 uv_poll_stop(poller);
829 LOGP(detail,
"Stopping {} output pollers",
state.activeOutOfBandPollers.size());
830 for (
auto* poller :
state.activeOutputPollers) {
833 uv_poll_stop(poller);
837 uv_timer_stop(deviceContext.gracePeriodTimer);
839 free(deviceContext.gracePeriodTimer);
840 deviceContext.gracePeriodTimer =
nullptr;
842 uv_timer_stop(deviceContext.dataProcessingGracePeriodTimer);
844 free(deviceContext.dataProcessingGracePeriodTimer);
845 deviceContext.dataProcessingGracePeriodTimer =
nullptr;
860 for (
auto&
di : distinct) {
861 auto& route = spec.inputs[
di];
862 if (route.configurator.has_value() ==
false) {
867 .
name = route.configurator->name,
869 .lifetime = route.matcher.lifetime,
870 .creator = route.configurator->creatorConfigurator(
state, mServiceRegistry, *mConfigRegistry),
871 .checker = route.configurator->danglingConfigurator(
state, *mConfigRegistry),
872 .handler = route.configurator->expirationConfigurator(
state, *mConfigRegistry)};
873 context.expirationHandlers.emplace_back(std::move(handler));
876 if (
state.awakeMainThread ==
nullptr) {
882 deviceContext.expectedRegionCallbacks = std::stoi(fConfig->GetValue<std::string>(
"expected-region-callbacks"));
883 deviceContext.exitTransitionTimeout = std::stoi(fConfig->GetValue<std::string>(
"exit-transition-timeout"));
884 deviceContext.dataProcessingTimeout = std::stoi(fConfig->GetValue<std::string>(
"data-processing-timeout"));
886 for (
auto& channel : GetChannels()) {
887 channel.second.at(0).Transport()->SubscribeToRegionEvents([&context = deviceContext,
888 ®istry = mServiceRegistry,
889 &pendingRegionInfos = mPendingRegionInfos,
890 ®ionInfoMutex = mRegionInfoMutex](fair::mq::RegionInfo info) {
891 std::lock_guard<std::mutex> lock(regionInfoMutex);
892 LOG(detail) <<
">>> Region info event" << info.event;
893 LOG(detail) <<
"id: " << info.id;
894 LOG(detail) <<
"ptr: " << info.ptr;
895 LOG(detail) <<
"size: " << info.size;
896 LOG(detail) <<
"flags: " << info.flags;
899 pendingRegionInfos.push_back(info);
912 if (deviceContext.sigusr1Handle ==
nullptr) {
914 deviceContext.sigusr1Handle->data = &mServiceRegistry;
915 uv_signal_init(
state.loop, deviceContext.sigusr1Handle);
919 for (
auto& handle :
state.activeSignals) {
920 handle->data = &
state;
923 deviceContext.sigusr1Handle->data = &mServiceRegistry;
926 DataProcessingDevice::initPollers();
934 LOG(
error) <<
"DataProcessor " <<
state.lastActiveDataProcessor.load()->spec->name <<
" was unexpectedly active";
946 O2_SIGNPOST_END(device, cid,
"InitTask",
"Exiting InitTask callback waiting for the remaining region callbacks.");
948 auto hasPendingEvents = [&mutex = mRegionInfoMutex, &pendingRegionInfos = mPendingRegionInfos](
DeviceContext& deviceContext) {
949 std::lock_guard<std::mutex> lock(mutex);
950 return (pendingRegionInfos.empty() ==
false) || deviceContext.expectedRegionCallbacks > 0;
957 while (hasPendingEvents(deviceContext)) {
959 uv_run(
state.loop, UV_RUN_ONCE);
963 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
967 O2_SIGNPOST_END(device, cid,
"InitTask",
"Done waiting for registration events.");
974 bool enableRateLimiting = std::stoi(fConfig->GetValue<std::string>(
"timeframes-rate-limit"));
983 if (enableRateLimiting ==
false && spec.name.find(
"internal-dpl-injected-dummy-sink") != std::string::npos) {
986 if (enableRateLimiting) {
987 for (
auto& spec : spec.outputs) {
988 if (spec.matcher.binding.value ==
"dpl-summary") {
995 context.
registry = &mServiceRegistry;
998 if (context.
error !=
nullptr) {
1012 errorCallback(errorContext);
1026 switch (deviceContext.processingPolicies.
error) {
1037 auto decideEarlyForward = [&context, &deviceContext, &spec,
this]() ->
bool {
1041 bool onlyConditions =
true;
1042 bool overriddenEarlyForward =
false;
1043 for (
auto& forwarded : spec.forwards) {
1044 if (forwarded.matcher.lifetime != Lifetime::Condition) {
1045 onlyConditions =
false;
1049 overriddenEarlyForward =
true;
1053 if (forwarded.matcher.lifetime == Lifetime::Optional) {
1055 overriddenEarlyForward =
true;
1060 if (!overriddenEarlyForward && onlyConditions) {
1062 LOG(detail) <<
"Enabling early forwarding because only conditions to be forwarded";
1064 return canForwardEarly;
1076 state.quitRequested =
false;
1079 for (
auto& info :
state.inputChannelInfos) {
1091 for (
size_t i = 0;
i < mStreams.size(); ++
i) {
1094 context.preStartStreamCallbacks(streamRef);
1096 }
catch (std::exception& e) {
1097 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid,
"PreRun",
"Exception of type std::exception caught in PreRun: %{public}s. Rethrowing.", e.what());
1098 O2_SIGNPOST_END(device, cid,
"PreRun",
"Exiting PreRun due to exception thrown.");
1102 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid,
"PreRun",
"Exception of type o2::framework::RuntimeErrorRef caught in PreRun: %{public}s. Rethrowing.", err.what);
1103 O2_SIGNPOST_END(device, cid,
"PreRun",
"Exiting PreRun due to exception thrown.");
1106 O2_SIGNPOST_END(device, cid,
"PreRun",
"Unknown exception being thrown. Rethrowing.");
1114 using o2::monitoring::Metric;
1115 using o2::monitoring::Monitoring;
1116 using o2::monitoring::tags::Key;
1117 using o2::monitoring::tags::Value;
1120 monitoring.send(
Metric{(uint64_t)1,
"device_state"}.addTag(Key::Subsystem, Value::DPL));
1128 using o2::monitoring::Metric;
1129 using o2::monitoring::Monitoring;
1130 using o2::monitoring::tags::Key;
1131 using o2::monitoring::tags::Value;
1134 monitoring.send(
Metric{(uint64_t)0,
"device_state"}.addTag(Key::Subsystem, Value::DPL));
1153 bool firstLoop =
true;
1155 O2_SIGNPOST_START(device, lid,
"device_state",
"First iteration of the device loop");
1157 bool dplEnableMultithreding = getenv(
"DPL_THREADPOOL_SIZE") !=
nullptr;
1158 if (dplEnableMultithreding) {
1159 setenv(
"UV_THREADPOOL_SIZE",
"1", 1);
1163 if (
state.nextFairMQState.empty() ==
false) {
1164 (
void)this->ChangeState(
state.nextFairMQState.back());
1165 state.nextFairMQState.pop_back();
1170 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
1183 state.lastActiveDataProcessor.compare_exchange_strong(lastActive,
nullptr);
1185 auto shouldNotWait = (lastActive !=
nullptr &&
1189 shouldNotWait =
true;
1192 if (lastActive !=
nullptr) {
1195 if (NewStatePending()) {
1197 shouldNotWait =
true;
1203 O2_SIGNPOST_EVENT_EMIT(device, lid,
"run_loop",
"State transition requested and we are now in Idle. We can consider it to be completed.");
1206 if (
state.severityStack.empty() ==
false) {
1207 fair::Logger::SetConsoleSeverity((fair::Severity)
state.severityStack.back());
1208 state.severityStack.pop_back();
1214 state.firedTimers.clear();
1216 state.severityStack.push_back((
int)fair::Logger::GetConsoleSeverity());
1217 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
1224 O2_SIGNPOST_START(device, lid,
"run_loop",
"Dropping message from slot %" PRIu64
". Forwarding as needed.", (uint64_t)slot.index);
1232 forwardInputs(registry, slot, dropped, oldestOutputInfo,
false,
true);
1237 auto oldestPossibleTimeslice = relayer.getOldestPossibleOutput();
1239 if (shouldNotWait ==
false) {
1243 O2_SIGNPOST_END(device, lid,
"run_loop",
"Run loop completed. %{}s", shouldNotWait ?
"Will immediately schedule a new one" :
"Waiting for next event.");
1244 uv_run(
state.loop, shouldNotWait ? UV_RUN_NOWAIT : UV_RUN_ONCE);
1246 if ((
state.loopReason &
state.tracingFlags) != 0) {
1247 state.severityStack.push_back((
int)fair::Logger::GetConsoleSeverity());
1248 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
1249 }
else if (
state.severityStack.empty() ==
false) {
1250 fair::Logger::SetConsoleSeverity((fair::Severity)
state.severityStack.back());
1251 state.severityStack.pop_back();
1256 O2_SIGNPOST_EVENT_EMIT(device, lid,
"run_loop",
"Out of band activity detected. Rescanning everything.");
1260 if (!
state.pendingOffers.empty()) {
1261 O2_SIGNPOST_EVENT_EMIT(device, lid,
"run_loop",
"Pending %" PRIu64
" offers. updating the ComputingQuotaEvaluator.", (uint64_t)
state.pendingOffers.size());
1273 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
1277 assert(mStreams.size() == mHandles.size());
1280 for (
size_t ti = 0; ti < mStreams.size(); ti++) {
1281 auto& taskInfo = mStreams[ti];
1282 if (taskInfo.running) {
1286 streamRef.index = ti;
1288 using o2::monitoring::Metric;
1289 using o2::monitoring::Monitoring;
1290 using o2::monitoring::tags::Key;
1291 using o2::monitoring::tags::Value;
1294 if (streamRef.index != -1) {
1297 uv_work_t& handle = mHandles[streamRef.index];
1299 handle.data = &mStreams[streamRef.index];
1307 dpStats.processCommandQueue();
1317 struct SchedulingStats {
1318 std::atomic<size_t> lastScheduled = 0;
1319 std::atomic<size_t> numberOfUnscheduledSinceLastScheduled = 0;
1320 std::atomic<size_t> numberOfUnscheduled = 0;
1321 std::atomic<size_t> numberOfScheduled = 0;
1323 static SchedulingStats schedulingStats;
1328 stream.registry = &mServiceRegistry;
1329 schedulingStats.lastScheduled = uv_now(
state.loop);
1330 schedulingStats.numberOfScheduled++;
1331 schedulingStats.numberOfUnscheduledSinceLastScheduled = 0;
1332 O2_SIGNPOST_EVENT_EMIT(scheduling, sid,
"Run",
"Enough resources to schedule computation on stream %d", streamRef.index);
1333 if (dplEnableMultithreding) [[unlikely]] {
1341 if (schedulingStats.numberOfUnscheduledSinceLastScheduled > 100 ||
1342 (uv_now(
state.loop) - schedulingStats.lastScheduled) > 30000) {
1344 "Not enough resources to schedule computation. %zu skipped so far. Last scheduled at %zu.",
1345 schedulingStats.numberOfUnscheduledSinceLastScheduled.load(),
1346 schedulingStats.lastScheduled.load());
1349 "Not enough resources to schedule computation. %zu skipped so far. Last scheduled at %zu.",
1350 schedulingStats.numberOfUnscheduledSinceLastScheduled.load(),
1351 schedulingStats.lastScheduled.load());
1353 schedulingStats.numberOfUnscheduled++;
1354 schedulingStats.numberOfUnscheduledSinceLastScheduled++;
1361 O2_SIGNPOST_END(device, lid,
"run_loop",
"Run loop completed. Transition handling state %d.", (
int)
state.transitionHandling);
1364 for (
size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
1365 auto& info =
state.inputChannelInfos[ci];
1366 info.parts.fParts.clear();
1377 O2_SIGNPOST_START(device, dpid,
"do_prepare",
"Starting DataProcessorContext::doPrepare.");
1395 context.allDone = std::any_of(
state.inputChannelInfos.begin(),
state.inputChannelInfos.end(), [cid](
const auto& info) {
1397 O2_SIGNPOST_EVENT_EMIT(device, cid,
"do_prepare",
"Input channel %{public}s%{public}s has %zu parts left and is in state %d.",
1398 info.channel->GetName().c_str(), (info.id.value == ChannelIndex::INVALID ?
" (non DPL)" :
""), info.parts.fParts.size(), (int)info.state);
1400 O2_SIGNPOST_EVENT_EMIT(device, cid,
"do_prepare",
"External channel %d is in state %d.", info.id.value, (int)info.state);
1405 O2_SIGNPOST_EVENT_EMIT(device, dpid,
"do_prepare",
"Processing %zu input channels.", spec.inputChannels.size());
1408 static std::vector<int> pollOrder;
1409 pollOrder.resize(
state.inputChannelInfos.size());
1410 std::iota(pollOrder.begin(), pollOrder.end(), 0);
1411 std::sort(pollOrder.begin(), pollOrder.end(), [&infos =
state.inputChannelInfos](
int a,
int b) {
1412 return infos[a].oldestForChannel.value < infos[b].oldestForChannel.value;
1416 if (pollOrder.empty()) {
1417 O2_SIGNPOST_END(device, dpid,
"do_prepare",
"Nothing to poll. Waiting for next iteration.");
1420 auto currentOldest =
state.inputChannelInfos[pollOrder.front()].oldestForChannel;
1421 auto currentNewest =
state.inputChannelInfos[pollOrder.back()].oldestForChannel;
1422 auto delta = currentNewest.value - currentOldest.value;
1423 O2_SIGNPOST_EVENT_EMIT(device, dpid,
"do_prepare",
"Oldest possible timeframe range %" PRIu64
" => %" PRIu64
" delta %" PRIu64,
1424 (int64_t)currentOldest.value, (int64_t)currentNewest.value, (int64_t)delta);
1425 auto& infos =
state.inputChannelInfos;
1427 if (context.balancingInputs) {
1429 static uint64_t ahead = getenv(
"DPL_MAX_CHANNEL_AHEAD") ? std::atoll(getenv(
"DPL_MAX_CHANNEL_AHEAD")) :
std::
max(8,
std::
min(pipelineLength - 48, pipelineLength / 2));
1430 auto newEnd = std::remove_if(pollOrder.begin(), pollOrder.end(), [&infos, limitNew = currentOldest.value + ahead](
int a) ->
bool {
1431 return infos[a].oldestForChannel.value > limitNew;
1433 for (
auto it = pollOrder.begin(); it < pollOrder.end(); it++) {
1434 const auto& channelInfo =
state.inputChannelInfos[*it];
1440 bool shouldBeRunning = it < newEnd;
1441 if (running != shouldBeRunning) {
1442 uv_poll_start(poller, shouldBeRunning ? UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED : 0, &
on_socket_polled);
1448 pollOrder.erase(newEnd, pollOrder.end());
1450 O2_SIGNPOST_END(device, dpid,
"do_prepare",
"%zu channels pass the channel inbalance balance check.", pollOrder.size());
1452 for (
auto sci : pollOrder) {
1453 auto& info =
state.inputChannelInfos[sci];
1454 auto& channelSpec = spec.inputChannels[sci];
1456 O2_SIGNPOST_START(device, cid,
"channels",
"Processing channel %s", channelSpec.name.c_str());
1459 context.allDone =
false;
1464 if (info.parts.Size()) {
1467 O2_SIGNPOST_END(device, cid,
"channels",
"Flushing channel %s which is in state %d and has %zu parts still pending.",
1468 channelSpec.name.c_str(), (
int)info.state, info.parts.Size());
1471 if (info.
channel ==
nullptr) {
1472 O2_SIGNPOST_END(device, cid,
"channels",
"Channel %s which is in state %d is nullptr and has %zu parts still pending.",
1473 channelSpec.name.c_str(), (
int)info.state, info.parts.Size());
1478 O2_SIGNPOST_END(device, cid,
"channels",
"Channel %s which is in state %d is not a DPL channel and has %zu parts still pending.",
1479 channelSpec.name.c_str(), (
int)info.state, info.parts.Size());
1482 auto& socket = info.
channel->GetSocket();
1487 if (info.hasPendingEvents == 0) {
1488 socket.Events(&info.hasPendingEvents);
1490 if ((info.hasPendingEvents & 1) == 0 && (info.parts.Size() == 0)) {
1491 O2_SIGNPOST_END(device, cid,
"channels",
"No pending events and no remaining parts to process for channel %{public}s", channelSpec.name.c_str());
1497 info.readPolled =
false;
1506 bool newMessages =
false;
1508 O2_SIGNPOST_EVENT_EMIT(device, cid,
"channels",
"Receiving loop called for channel %{public}s (%d) with oldest possible timeslice %zu",
1509 channelSpec.name.c_str(), info.id.value, info.oldestForChannel.value);
1510 if (info.parts.Size() < 64) {
1511 fair::mq::Parts parts;
1512 info.
channel->Receive(parts, 0);
1514 O2_SIGNPOST_EVENT_EMIT(device, cid,
"channels",
"Received %zu parts from channel %{public}s (%d).", parts.Size(), channelSpec.name.c_str(), info.id.value);
1516 for (
auto&& part : parts) {
1517 info.parts.fParts.emplace_back(std::move(part));
1519 newMessages |=
true;
1522 if (info.parts.Size() >= 0) {
1534 socket.Events(&info.hasPendingEvents);
1535 if (info.hasPendingEvents) {
1536 info.readPolled =
false;
1539 state.lastActiveDataProcessor.store(&context);
1542 O2_SIGNPOST_END(device, cid,
"channels",
"Done processing channel %{public}s (%d).",
1543 channelSpec.name.c_str(), info.id.value);
1558 context.completed.clear();
1559 context.completed.reserve(16);
1561 state.lastActiveDataProcessor.store(&context);
1565 context.preDanglingCallbacks(danglingContext);
1566 if (
state.lastActiveDataProcessor.load() ==
nullptr) {
1569 auto activity =
ref.get<
DataRelayer>().processDanglingInputs(context.expirationHandlers, *context.registry,
true);
1570 if (activity.expiredSlots > 0) {
1571 state.lastActiveDataProcessor = &context;
1574 context.completed.clear();
1576 state.lastActiveDataProcessor = &context;
1579 context.postDanglingCallbacks(danglingContext);
1587 state.lastActiveDataProcessor = &context;
1610 timingInfo.timeslice = relayer.getOldestPossibleOutput().timeslice.value;
1611 timingInfo.tfCounter = -1;
1612 timingInfo.firstTForbit = -1;
1614 timingInfo.creation = std::chrono::time_point_cast<std::chrono::milliseconds>(std::chrono::system_clock::now()).time_since_epoch().count();
1615 O2_SIGNPOST_EVENT_EMIT(calibration, dpid,
"calibration",
"TimingInfo.keepAtEndOfStream %d", timingInfo.keepAtEndOfStream);
1619 context.preEOSCallbacks(eosContext);
1623 streamContext.postEOSCallbacks(eosContext);
1624 context.postEOSCallbacks(eosContext);
1626 for (
auto& channel : spec.outputChannels) {
1627 O2_SIGNPOST_EVENT_EMIT(device, dpid,
"state",
"Sending end of stream to %{public}s.", channel.name.c_str());
1634 if (shouldProcess) {
1635 state.lastActiveDataProcessor = &context;
1639 for (
auto& poller :
state.activeOutputPollers) {
1640 uv_poll_stop(poller);
1648 for (
auto& poller :
state.activeOutputPollers) {
1649 uv_poll_stop(poller);
1665 if (deviceContext.sigusr1Handle) {
1671 handle->data =
nullptr;
1700 auto getInputTypes = [&info, &context]() -> std::optional<std::vector<InputInfo>> {
1705 auto& parts = info.
parts;
1708 std::vector<InputInfo> results;
1710 results.reserve(parts.Size() / 2);
1711 size_t nTotalPayloads = 0;
1715 if (
type != InputType::Invalid &&
length > 1) {
1716 nTotalPayloads +=
length - 1;
1720 for (
size_t pi = 0; pi < parts.Size(); pi += 2) {
1721 auto* headerData = parts.At(pi)->GetData();
1722 auto sih = o2::header::get<SourceInfoHeader*>(headerData);
1723 auto dh = o2::header::get<DataHeader*>(headerData);
1725 O2_SIGNPOST_EVENT_EMIT(device, cid,
"handle_data",
"Got SourceInfoHeader with state %d", (
int)sih->state);
1726 info.
state = sih->state;
1727 insertInputInfo(pi, 2, InputType::SourceInfo, info.
id);
1728 state.lastActiveDataProcessor = &context;
1730 LOGP(
error,
"Found data attached to a SourceInfoHeader");
1734 auto dih = o2::header::get<DomainInfoHeader*>(headerData);
1736 O2_SIGNPOST_EVENT_EMIT(device, cid,
"handle_data",
"Got DomainInfoHeader with oldestPossibleTimeslice %d", (
int)dih->oldestPossibleTimeslice);
1737 insertInputInfo(pi, 2, InputType::DomainInfo, info.
id);
1738 state.lastActiveDataProcessor = &context;
1740 LOGP(
error,
"Found data attached to a DomainInfoHeader");
1745 insertInputInfo(pi, 0, InputType::Invalid, info.
id);
1749 if (dh->payloadSize > parts.At(pi + 1)->GetSize()) {
1750 insertInputInfo(pi, 0, InputType::Invalid, info.
id);
1754 auto dph = o2::header::get<DataProcessingHeader*>(headerData);
1759 O2_SIGNPOST_START(parts,
pid,
"parts",
"Processing DataHeader %{public}-4s/%{public}-16s/%d with splitPayloadParts %d and splitPayloadIndex %d",
1760 dh->dataOrigin.str, dh->dataDescription.str, dh->subSpecification, dh->splitPayloadParts, dh->splitPayloadIndex);
1762 insertInputInfo(pi, 2, InputType::Invalid, info.
id);
1766 if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) {
1769 insertInputInfo(pi, dh->splitPayloadParts + 1, InputType::Data, info.
id);
1770 pi += dh->splitPayloadParts - 1;
1776 size_t finalSplitPayloadIndex = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2;
1777 if (finalSplitPayloadIndex > parts.Size()) {
1779 insertInputInfo(pi, 0, InputType::Invalid, info.
id);
1782 insertInputInfo(pi, 2, InputType::Data, info.
id);
1783 for (; pi + 2 < finalSplitPayloadIndex; pi += 2) {
1784 insertInputInfo(pi + 2, 2, InputType::Data, info.
id);
1788 if (results.size() + nTotalPayloads != parts.Size()) {
1789 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid,
"handle_data",
"inconsistent number of inputs extracted. %zu vs parts (%zu)", results.size() + nTotalPayloads, parts.Size());
1790 return std::nullopt;
1795 auto reportError = [
ref](
const char*
message) {
1800 auto handleValidMessages = [&info,
ref, &reportError](std::vector<InputInfo>
const& inputInfos) {
1804 auto& parts = info.
parts;
1807 bool hasBackpressure =
false;
1808 size_t minBackpressureTimeslice = -1;
1810 size_t oldestPossibleTimeslice = -1;
1811 static std::vector<int> ordering;
1813 ordering.resize(inputInfos.size());
1814 std::iota(ordering.begin(), ordering.end(), 0);
1816 std::stable_sort(ordering.begin(), ordering.end(), [&inputInfos](
int const&
a,
int const&
b) {
1817 auto const& ai = inputInfos[a];
1818 auto const& bi = inputInfos[b];
1819 if (ai.type != bi.type) {
1820 return ai.type < bi.type;
1822 return ai.position < bi.position;
1824 for (
size_t ii = 0; ii < inputInfos.size(); ++ii) {
1825 auto const& input = inputInfos[ordering[ii]];
1826 switch (input.type) {
1827 case InputType::Data: {
1829 auto headerIndex = input.position;
1831 auto nPayloadsPerHeader = 0;
1832 if (input.size > 2) {
1834 nMessages = input.size;
1835 nPayloadsPerHeader = nMessages - 1;
1838 auto dh = o2::header::get<DataHeader*>(parts.At(headerIndex)->GetData());
1839 nMessages = dh->splitPayloadParts > 0 ? dh->splitPayloadParts * 2 : 2;
1840 nPayloadsPerHeader = 1;
1841 ii += (nMessages / 2) - 1;
1845 O2_SIGNPOST_EVENT_EMIT(async_queue, cid,
"onDrop",
"Dropping message from slot %zu. Forwarding as needed. Timeslice %zu",
1846 slot.
index, oldestOutputInfo.timeslice.value);
1853 forwardInputs(
ref, slot, dropped, oldestOutputInfo,
false,
true);
1855 auto relayed = relayer.relay(parts.At(headerIndex)->GetData(),
1856 &parts.At(headerIndex),
1861 switch (relayed.type) {
1864 LOGP(alarm,
"Backpressure on channel {}. Waiting.", info.
channel->GetName());
1865 auto& monitoring =
ref.get<o2::monitoring::Monitoring>();
1866 monitoring.send(o2::monitoring::Metric{1, fmt::format(
"backpressure_{}", info.
channel->GetName())});
1870 policy.backpressure(info);
1871 hasBackpressure =
true;
1872 minBackpressureTimeslice = std::min<size_t>(minBackpressureTimeslice, relayed.timeslice.value);
1878 LOGP(info,
"Back to normal on channel {}.", info.
channel->GetName());
1879 auto& monitoring =
ref.get<o2::monitoring::Monitoring>();
1880 monitoring.send(o2::monitoring::Metric{0, fmt::format(
"backpressure_{}", info.
channel->GetName())});
1887 case InputType::SourceInfo: {
1888 LOGP(detail,
"Received SourceInfo");
1890 state.lastActiveDataProcessor = &context;
1891 auto headerIndex = input.position;
1892 auto payloadIndex = input.position + 1;
1893 assert(payloadIndex < parts.Size());
1896 parts.At(headerIndex).reset(
nullptr);
1897 parts.At(payloadIndex).reset(
nullptr);
1904 case InputType::DomainInfo: {
1908 state.lastActiveDataProcessor = &context;
1909 auto headerIndex = input.position;
1910 auto payloadIndex = input.position + 1;
1911 assert(payloadIndex < parts.Size());
1915 auto dih = o2::header::get<DomainInfoHeader*>(parts.At(headerIndex)->GetData());
1916 if (hasBackpressure && dih->oldestPossibleTimeslice >= minBackpressureTimeslice) {
1919 oldestPossibleTimeslice = std::min(oldestPossibleTimeslice, dih->oldestPossibleTimeslice);
1920 LOGP(
debug,
"Got DomainInfoHeader, new oldestPossibleTimeslice {} on channel {}", oldestPossibleTimeslice, info.
id.
value);
1921 parts.At(headerIndex).reset(
nullptr);
1922 parts.At(payloadIndex).reset(
nullptr);
1924 case InputType::Invalid: {
1925 reportError(
"Invalid part found.");
1931 if (oldestPossibleTimeslice != (
size_t)-1) {
1934 context.domainInfoUpdatedCallback(*context.registry, oldestPossibleTimeslice, info.
id);
1936 state.lastActiveDataProcessor = &context;
1938 auto it = std::remove_if(parts.fParts.begin(), parts.fParts.end(), [](
auto&
msg) ->
bool { return msg.get() == nullptr; });
1939 parts.fParts.erase(it, parts.end());
1940 if (parts.fParts.size()) {
1941 LOG(
debug) << parts.fParts.size() <<
" messages backpressured";
1953 auto inputTypes = getInputTypes();
1954 if (
bool(inputTypes) ==
false) {
1955 reportError(
"Parts should come in couples. Dropping it.");
1958 handleValidMessages(*inputTypes);
1964struct InputLatency {
1969auto calculateInputRecordLatency(
InputRecord const& record, uint64_t currentTime) -> InputLatency
1973 for (
auto& item : record) {
1974 auto* header = o2::header::get<DataProcessingHeader*>(item.header);
1975 if (header ==
nullptr) {
1978 int64_t partLatency = (0x7fffffffffffffff & currentTime) - (0x7fffffffffffffff & header->creation);
1979 if (partLatency < 0) {
1982 result.minLatency = std::min(
result.minLatency, (uint64_t)partLatency);
1983 result.maxLatency = std::max(
result.maxLatency, (uint64_t)partLatency);
1988auto calculateTotalInputRecordSize(
InputRecord const& record) ->
int
1990 size_t totalInputSize = 0;
1991 for (
auto& item : record) {
1992 auto* header = o2::header::get<DataHeader*>(item.header);
1993 if (header ==
nullptr) {
1996 totalInputSize += header->payloadSize;
1998 return totalInputSize;
2001template <
typename T>
2002void update_maximum(std::atomic<T>& maximum_value, T
const&
value)
noexcept
2004 T prev_value = maximum_value;
2005 while (prev_value <
value &&
2006 !maximum_value.compare_exchange_weak(prev_value,
value)) {
2014 LOGP(
debug,
"DataProcessingDevice::tryDispatchComputation");
2019 std::vector<MessageSet> currentSetOfInputs;
2022 auto getInputSpan = [
ref, ¤tSetOfInputs](
TimesliceSlot slot,
bool consume =
true) {
2027 currentSetOfInputs = relayer.consumeExistingInputsForTimeslice(slot);
2029 auto getter = [¤tSetOfInputs](
size_t i,
size_t partindex) ->
DataRef {
2030 if (currentSetOfInputs[
i].getNumberOfPairs() > partindex) {
2031 const char* headerptr =
nullptr;
2032 const char* payloadptr =
nullptr;
2033 size_t payloadSize = 0;
2039 auto const& headerMsg = currentSetOfInputs[
i].associatedHeader(partindex);
2040 auto const& payloadMsg = currentSetOfInputs[
i].associatedPayload(partindex);
2041 headerptr =
static_cast<char const*
>(headerMsg->GetData());
2042 payloadptr = payloadMsg ?
static_cast<char const*
>(payloadMsg->GetData()) :
nullptr;
2043 payloadSize = payloadMsg ? payloadMsg->GetSize() : 0;
2044 return DataRef{
nullptr, headerptr, payloadptr, payloadSize};
2048 auto nofPartsGetter = [¤tSetOfInputs](
size_t i) ->
size_t {
2049 return currentSetOfInputs[
i].getNumberOfPairs();
2051 auto refCountGetter = [¤tSetOfInputs](
size_t idx) ->
int {
2052 auto& header =
static_cast<const fair::mq::shmem::Message&
>(*currentSetOfInputs[idx].header(0));
2053 return header.GetRefCount();
2055 return InputSpan{getter, nofPartsGetter, refCountGetter, currentSetOfInputs.
size()};
2070 auto timeslice = relayer.getTimesliceForSlot(
i);
2072 timingInfo.timeslice = timeslice.value;
2082 auto timeslice = relayer.getTimesliceForSlot(
i);
2084 timingInfo.globalRunNumberChanged = !
TimingInfo::timesliceIsTimer(timeslice.value) && dataProcessorContext.lastRunNumberProcessed != timingInfo.runNumber;
2086 timingInfo.globalRunNumberChanged &= (dataProcessorContext.lastRunNumberProcessed == -1 || timingInfo.runNumber != 0);
2090 timingInfo.streamRunNumberChanged = timingInfo.globalRunNumberChanged;
2098 assert(record.size() == currentSetOfInputs.size());
2099 for (
size_t ii = 0, ie = record.size(); ii < ie; ++ii) {
2103 DataRef input = record.getByPos(ii);
2107 if (input.
header ==
nullptr) {
2111 currentSetOfInputs[ii].clear();
2122 for (
size_t pi = 0, pe = record.size(); pi < pe; ++pi) {
2123 DataRef input = record.getByPos(pi);
2124 if (input.
header ==
nullptr) {
2127 auto sih = o2::header::get<SourceInfoHeader*>(input.
header);
2132 auto dh = o2::header::get<DataHeader*>(input.
header);
2142 if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) {
2145 pi += dh->splitPayloadParts - 1;
2147 size_t pi = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2;
2153 if (completed.empty() ==
true) {
2154 LOGP(
debug,
"No computations available for dispatching.");
2161 std::atomic_thread_fence(std::memory_order_release);
2162 char relayerSlotState[1024];
2164 char*
buffer = relayerSlotState + written;
2165 for (
size_t ai = 0; ai != record.size(); ai++) {
2166 buffer[ai] = record.isValid(ai) ?
'3' :
'0';
2168 buffer[record.size()] = 0;
2170 .size = (
int)(record.size() +
buffer - relayerSlotState),
2171 .
data = relayerSlotState});
2172 uint64_t tEnd = uv_hrtime();
2174 int64_t wallTimeMs = (tEnd - tStart) / 1000000;
2182 auto latency = calculateInputRecordLatency(record, tStartMilli);
2185 static int count = 0;
2192 std::atomic_thread_fence(std::memory_order_release);
2193 char relayerSlotState[1024];
2195 char*
buffer = strchr(relayerSlotState,
' ') + 1;
2196 for (
size_t ai = 0; ai != record.size(); ai++) {
2197 buffer[ai] = record.isValid(ai) ?
'2' :
'0';
2199 buffer[record.size()] = 0;
2217 switch (spec.completionPolicy.order) {
2219 std::sort(completed.begin(), completed.end(), [](
auto const&
a,
auto const&
b) { return a.timeslice.value < b.timeslice.value; });
2222 std::sort(completed.begin(), completed.end(), [](
auto const&
a,
auto const&
b) { return a.slot.index < b.slot.index; });
2229 for (
auto action : completed) {
2231 O2_SIGNPOST_START(device, aid,
"device",
"Processing action on slot %lu for action %{public}s", action.
slot.
index, fmt::format(
"{}", action.
op).c_str());
2255 dpContext.preProcessingCallbacks(processContext);
2258 context.postDispatchingCallbacks(processContext);
2259 if (spec.forwards.empty() ==
false) {
2261 forwardInputs(
ref, action.
slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(),
false);
2262 O2_SIGNPOST_END(device, aid,
"device",
"Forwarding inputs consume: %d.",
false);
2270 bool hasForwards = spec.forwards.empty() ==
false;
2273 if (context.canForwardEarly && hasForwards && consumeSomething) {
2274 O2_SIGNPOST_EVENT_EMIT(device, aid,
"device",
"Early forwainding: %{public}s.", fmt::format(
"{}", action.
op).c_str());
2278 markInputsAsDone(action.
slot);
2280 uint64_t tStart = uv_hrtime();
2282 preUpdateStats(action, record, tStart);
2284 static bool noCatch = getenv(
"O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv(
"O2_NO_CATCHALL_EXCEPTIONS"),
"0");
2292 switch (action.
op) {
2303 if (
state.quitRequested ==
false) {
2307 streamContext.preProcessingCallbacks(processContext);
2313 if (context.statefulProcess && shouldProcess(action)) {
2317 (context.statefulProcess)(processContext);
2319 }
else if (context.statelessProcess && shouldProcess(action)) {
2321 (context.statelessProcess)(processContext);
2323 }
else if (context.statelessProcess || context.statefulProcess) {
2326 O2_SIGNPOST_EVENT_EMIT(device, pcid,
"device",
"No processing callback provided. Switching to %{public}s.",
"Idle");
2329 if (shouldProcess(action)) {
2331 if (timingInfo.globalRunNumberChanged) {
2332 context.lastRunNumberProcessed = timingInfo.runNumber;
2349 streamContext.finaliseOutputsCallbacks(processContext);
2355 streamContext.postProcessingCallbacks(processContext);
2361 state.severityStack.push_back((
int)fair::Logger::GetConsoleSeverity());
2362 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
2368 (context.errorHandling)(e, record);
2373 }
catch (std::exception& ex) {
2378 (context.errorHandling)(e, record);
2380 (context.errorHandling)(e, record);
2383 if (
state.severityStack.empty() ==
false) {
2384 fair::Logger::SetConsoleSeverity((fair::Severity)
state.severityStack.back());
2385 state.severityStack.pop_back();
2388 postUpdateStats(action, record, tStart, tStartMilli);
2392 cleanupRecord(record);
2393 context.postDispatchingCallbacks(processContext);
2396 if ((context.canForwardEarly ==
false) && hasForwards && consumeSomething) {
2401 context.postForwardingCallbacks(processContext);
2403 cleanTimers(action.
slot, record);
2405 O2_SIGNPOST_END(device, aid,
"device",
"Done processing action on slot %lu for action %{public}s", action.
slot.
index, fmt::format(
"{}", action.
op).c_str());
2407 O2_SIGNPOST_END(device, sid,
"device",
"Start processing ready actions");
2411 LOGP(detail,
"Broadcasting end of stream");
2412 for (
auto& channel : spec.outputChannels) {
2435 cfg.getRecursive(
name);
2436 std::vector<std::unique_ptr<ParamRetriever>> retrievers;
2437 retrievers.emplace_back(std::make_unique<ConfigurationOptionsRetriever>(&cfg,
name));
2438 auto configStore = std::make_unique<ConfigParamStore>(options, std::move(retrievers));
2439 configStore->preload();
2440 configStore->activate();
struct uv_timer_s uv_timer_t
struct uv_signal_s uv_signal_t
struct uv_async_s uv_async_t
struct uv_poll_s uv_poll_t
struct uv_loop_s uv_loop_t
o2::monitoring::Metric Metric
o2::configuration::ConfigurationInterface ConfigurationInterface
constexpr int DEFAULT_MAX_CHANNEL_AHEAD
std::enable_if_t< std::is_signed< T >::value, bool > hasData(const CalArray< T > &cal)
#define O2_SIGNPOST_EVENT_EMIT_ERROR(log, id, name, format,...)
#define O2_DECLARE_DYNAMIC_LOG(name)
#define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer)
#define O2_SIGNPOST_END(log, id, name, format,...)
#define O2_LOG_ENABLED(log)
#define O2_SIGNPOST_ID_GENERATE(name, log)
#define O2_SIGNPOST_EVENT_EMIT_WARN(log, id, name, format,...)
#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format,...)
#define O2_SIGNPOST_START(log, id, name, format,...)
constexpr uint32_t runtime_hash(char const *str)
o2::monitoring::Monitoring Monitoring
@ DeviceStateChanged
Invoked the device undergoes a state change.
decltype(auto) make(const Output &spec, Args... args)
static void doRun(ServiceRegistryRef)
void fillContext(DataProcessorContext &context, DeviceContext &deviceContext)
void error(const char *msg)
DataProcessingDevice(RunningDeviceRef ref, ServiceRegistry &)
static void doPrepare(ServiceRegistryRef)
static bool tryDispatchComputation(ServiceRegistryRef ref, std::vector< DataRelayer::RecordAction > &completed)
static void handleData(ServiceRegistryRef, InputChannelInfo &)
uint32_t getFirstTFOrbitForSlot(TimesliceSlot slot)
Get the firstTForbit associate to a given slot.
void updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStatus, CacheEntryStatus newStatus)
uint32_t getRunNumberForSlot(TimesliceSlot slot)
Get the runNumber associated to a given slot.
void prunePending(OnDropCallback)
Prune all the pending entries in the cache.
std::vector< MessageSet > consumeAllInputsForTimeslice(TimesliceSlot id)
uint64_t getCreationTimeForSlot(TimesliceSlot slot)
Get the creation time associated to a given slot.
ActivityStats processDanglingInputs(std::vector< ExpirationHandler > const &, ServiceRegistryRef context, bool createNew)
uint32_t getFirstTFCounterForSlot(TimesliceSlot slot)
Get the firstTFCounter associate to a given slot.
A service API to communicate with the driver.
bool active() const
Check if service of type T is currently active.
GLuint const GLchar * name
GLboolean GLboolean GLboolean b
GLsizei const GLfloat * value
GLint GLint GLsizei GLint GLenum GLenum type
GLuint GLsizei GLsizei * length
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLuint GLsizei const GLchar * message
GLboolean GLboolean GLboolean GLboolean a
Defining PrimaryVertex explicitly as messageable.
auto decongestionCallbackLate
RuntimeErrorRef runtime_error(const char *)
ServiceKind
The kind of service we are asking for.
void on_idle_timer(uv_timer_t *handle)
@ DPL
The channel is a normal input channel.
void run_completion(uv_work_t *handle, int status)
void on_socket_polled(uv_poll_t *poller, int status, int events)
void run_callback(uv_work_t *handle)
volatile int region_read_global_dummy_variable
void handleRegionCallbacks(ServiceRegistryRef registry, std::vector< fair::mq::RegionInfo > &infos)
Invoke the callbacks for the mPendingRegionInfos.
void on_out_of_band_polled(uv_poll_t *poller, int status, int events)
DeviceSpec const & getRunningDevice(RunningDeviceRef const &running, ServiceRegistryRef const &services)
@ EndOfStreaming
End of streaming requested, but not notified.
@ Streaming
Data is being processed.
@ Idle
End of streaming notified.
void on_communication_requested(uv_async_t *s)
@ Expired
A transition needs to be fullfilled ASAP.
@ NoTransition
No pending transitions.
@ Requested
A transition was notified to be requested.
RuntimeError & error_from_ref(RuntimeErrorRef)
void on_awake_main_thread(uv_async_t *handle)
@ SHM_OFFER_BYTES_CONSUMED
@ TIMESLICE_NUMBER_EXPIRED
@ TIMESLICE_OFFER_NUMBER_CONSUMED
@ Completed
The channel was signaled it will not receive any data.
@ Running
The channel is actively receiving data.
void on_signal_callback(uv_signal_t *handle, int signum)
@ Me
Only quit this data processor.
constexpr const char * channelName(int channel)
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
Defining DataPointCompositeObject explicitly as copiable.
static void run(AsyncQueue &queue, TimesliceId oldestPossibleTimeslice)
static void post(AsyncQueue &queue, AsyncTask const &task)
An actuatual task to be executed.
static void demangled_backtrace_symbols(void **backtrace, unsigned int total, int fd)
static constexpr int INVALID
CompletionOp
Action to take with the InputRecord:
@ Retry
Like Wait but mark the cacheline as dirty.
int64_t timeslices
How many timeslices it can process without giving back control.
int64_t sharedMemory
How much shared memory it can allocate.
Statistics on the offers consumed, expired.
static bool hasOnlyGenerated(DeviceSpec const &spec)
check if spec is a source devide
static TransitionHandlingState updateStateTransition(ServiceRegistryRef const &ref, ProcessingPolicies const &policies)
starts the EoS timers and returns the new TransitionHandlingState in case as new state is requested
static void switchState(ServiceRegistryRef const &ref, StreamingState newState)
change the device StreamingState to newState
static void sendEndOfStream(ServiceRegistryRef const &ref, OutputChannelSpec const &channel)
static std::vector< fair::mq::Parts > routeForwardedMessages(FairMQDeviceProxy &proxy, TimesliceSlot slot, std::vector< MessageSet > ¤tSetOfInputs, TimesliceIndex::OldestOutputInfo oldestTimeslice, bool copy, bool consume)
Helper to route messages for forwarding.
static bool sendOldestPossibleTimeframe(ServiceRegistryRef const &ref, ForwardChannelInfo const &info, ForwardChannelState &state, size_t timeslice)
Helper struct to hold statistics about the data processing happening.
@ CumulativeRate
Set the value to the specified value if it is positive.
@ Add
Update the rate of the metric given the amount since the last time.
void updateStats(CommandSpec cmd)
std::function< void(o2::framework::RuntimeErrorRef e, InputRecord &record)> errorHandling
AlgorithmSpec::InitErrorCallback initError
void preLoopCallbacks(ServiceRegistryRef)
Invoke callbacks before we enter the event loop.
void postStopCallbacks(ServiceRegistryRef)
Invoke callbacks on stop.
void preProcessingCallbacks(ProcessingContext &)
Invoke callbacks to be executed before every process method invokation.
ServiceRegistry * registry
bool canForwardEarly
Wether or not the associated DataProcessor can forward things early.
AlgorithmSpec::ErrorCallback error
void preStartCallbacks(ServiceRegistryRef)
Invoke callbacks to be executed in PreRun(), before the User Start callbacks.
AlgorithmSpec::ProcessCallback statefulProcess
static std::vector< size_t > createDistinctRouteIndex(std::vector< InputRoute > const &)
CompletionPolicy::CompletionOp op
@ Invalid
Ownership of the data has been taken.
@ Backpressured
The incoming data was not valid and has been dropped.
@ Dropped
The incoming data was not relayed, because we are backpressured.
static bool partialMatch(InputSpec const &spec, o2::header::DataOrigin const &origin)
static std::string describe(InputSpec const &spec)
TimesliceIndex::OldestOutputInfo oldestTimeslice
static unsigned int pipelineLength()
get max number of timeslices in the queue
static std::unique_ptr< ConfigParamStore > getConfiguration(ServiceRegistryRef registry, const char *name, std::vector< ConfigParamSpec > const &options)
uv_signal_t * sigusr1Handle
ProcessingPolicies & processingPolicies
int expectedRegionCallbacks
Running state information of a given device.
uv_async_t * awakeMainThread
std::atomic< int64_t > cleanupCount
Forward channel information.
ChannelAccountingType channelType
Wether or not it's a DPL internal channel.
fair::mq::Channel & channel
std::string name
The name of the channel.
ForwardingPolicy const * policy
ForwardingCallback forward
InputChannelInfo * channelInfo
fair::mq::Socket * socket
DataProcessingDevice * device
enum EarlyForwardPolicy earlyForward
Information about the running workflow.
static Salt streamSalt(short streamId, short dataProcessorId)
void lateBindStreamServices(DeviceState &state, fair::mq::ProgOptions &options, ServiceRegistry::Salt salt)
static Salt globalStreamSalt(short streamId)
static Salt globalDeviceSalt()
void * get(ServiceTypeHash typeHash, Salt salt, ServiceKind kind, char const *name=nullptr) const
void finaliseOutputsCallbacks(ProcessingContext &)
Invoke callbacks to be executed after every process method invokation.
void preProcessingCallbacks(ProcessingContext &pcx)
Invoke callbacks to be executed before every process method invokation.
void preEOSCallbacks(EndOfStreamContext &eosContext)
Invoke callbacks to be executed before every EOS user callback invokation.
void postProcessingCallbacks(ProcessingContext &pcx)
Invoke callbacks to be executed after every process method invokation.
static int64_t getRealtimeSinceEpochStandalone()
bool keepAtEndOfStream
Wether this kind of data should be flushed during end of stream.
static bool timesliceIsTimer(size_t timeslice)
static TimesliceId getTimeslice(data_matcher::VariableContext const &variables)
void backpressure(InputChannelInfo const &)
locked_execution(ServiceRegistryRef &ref_)
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
uint64_t const void const *restrict const msg