34#if defined(__APPLE__) || defined(NDEBUG)
35#define O2_SIGNPOST_IMPLEMENTATION
59#include <fairmq/Parts.h>
60#include <fairmq/Socket.h>
61#include <fairmq/ProgOptions.h>
62#include <fairmq/shmem/Message.h>
63#include <Configuration/ConfigurationInterface.h>
64#include <Configuration/ConfigurationFactory.h>
65#include <Monitoring/Monitoring.h>
67#include <TClonesArray.h>
69#include <fmt/ostream.h>
77#include <boost/property_tree/json_parser.hpp>
135 return devices[running.
index];
145 : mRunningDevice{running},
146 mConfigRegistry{nullptr},
147 mServiceRegistry{registry}
149 GetConfig()->Subscribe<std::string>(
"dpl", [®istry = mServiceRegistry](
const std::string&
key, std::string
value) {
150 if (
key ==
"cleanup") {
154 int64_t newCleanupCount = std::stoll(
value);
155 if (newCleanupCount <= cleanupCount) {
158 deviceState.cleanupCount.store(newCleanupCount);
159 for (
auto& info : deviceState.inputChannelInfos) {
160 fair::mq::Parts parts;
161 while (info.channel->Receive(parts, 0)) {
162 LOGP(
debug,
"Dropping {} parts", parts.Size());
163 if (parts.Size() == 0) {
171 std::function<
void(
const fair::mq::State)> stateWatcher = [
this, ®istry = mServiceRegistry](
const fair::mq::State
state) ->
void {
176 control.notifyDeviceState(fair::mq::GetStateName(
state));
179 if (deviceState.nextFairMQState.empty() ==
false) {
180 auto state = deviceState.nextFairMQState.back();
182 deviceState.nextFairMQState.pop_back();
187 this->SubscribeToStateChange(
"99-dpl", stateWatcher);
199 mAwakeHandle->data = &
state;
201 LOG(
error) <<
"Unable to initialise subscription";
205 SubscribeToNewTransition(
"dpl", [wakeHandle = mAwakeHandle](fair::mq::Transition t) {
206 int res = uv_async_send(wakeHandle);
208 LOG(
error) <<
"Unable to notify subscription";
210 LOG(
debug) <<
"State transition requested";
224 O2_SIGNPOST_START(device, sid,
"run_callback",
"Starting run callback on stream %d", task->id.index);
227 O2_SIGNPOST_END(device, sid,
"run_callback",
"Done processing data for stream %d", task->id.index);
240 using o2::monitoring::Metric;
241 using o2::monitoring::Monitoring;
242 using o2::monitoring::tags::Key;
243 using o2::monitoring::tags::Value;
247 stats.totalConsumedBytes += accumulatedConsumed.
sharedMemory;
250 stats.totalConsumedTimeslices += std::min<int64_t>(accumulatedConsumed.
timeslices, 1);
254 dpStats.processCommandQueue();
264 dpStats.processCommandQueue();
267 for (
auto& consumer :
state.offerConsumers) {
268 quotaEvaluator.consume(task->id.index, consumer, reportConsumedOffer);
270 state.offerConsumers.clear();
271 quotaEvaluator.handleExpired(reportExpiredOffer);
272 quotaEvaluator.dispose(task->id.index);
273 task->running =
false;
301 O2_SIGNPOST_EVENT_EMIT(sockets, sid,
"socket_state",
"Data pending on socket for channel %{public}s", context->name);
305 O2_SIGNPOST_END(sockets, sid,
"socket_state",
"Socket connected for channel %{public}s", context->name);
307 O2_SIGNPOST_START(sockets, sid,
"socket_state",
"Socket connected for read in context %{public}s", context->name);
308 uv_poll_start(poller, UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED, &
on_socket_polled);
311 O2_SIGNPOST_START(sockets, sid,
"socket_state",
"Socket connected for write for channel %{public}s", context->name);
319 case UV_DISCONNECT: {
320 O2_SIGNPOST_END(sockets, sid,
"socket_state",
"Socket disconnected in context %{public}s", context->name);
322 case UV_PRIORITIZED: {
323 O2_SIGNPOST_EVENT_EMIT(sockets, sid,
"socket_state",
"Socket prioritized for context %{public}s", context->name);
335 LOGP(fatal,
"Error while polling {}: {}", context->name, status);
340 O2_SIGNPOST_EVENT_EMIT(sockets, sid,
"socket_state",
"Data pending on socket for channel %{public}s", context->name);
342 assert(context->channelInfo);
343 context->channelInfo->readPolled =
true;
346 O2_SIGNPOST_END(sockets, sid,
"socket_state",
"OOB socket connected for channel %{public}s", context->name);
348 O2_SIGNPOST_START(sockets, sid,
"socket_state",
"OOB socket connected for read in context %{public}s", context->name);
351 O2_SIGNPOST_START(sockets, sid,
"socket_state",
"OOB socket connected for write for channel %{public}s", context->name);
355 case UV_DISCONNECT: {
356 O2_SIGNPOST_END(sockets, sid,
"socket_state",
"OOB socket disconnected in context %{public}s", context->name);
359 case UV_PRIORITIZED: {
360 O2_SIGNPOST_EVENT_EMIT(sockets, sid,
"socket_state",
"OOB socket prioritized for context %{public}s", context->name);
381 context.statelessProcess = spec.algorithm.onProcess;
383 context.error = spec.algorithm.onError;
384 context.
initError = spec.algorithm.onInitError;
387 if (configStore ==
nullptr) {
388 std::vector<std::unique_ptr<ParamRetriever>> retrievers;
389 retrievers.emplace_back(std::make_unique<FairOptionsRetriever>(GetConfig()));
390 configStore = std::make_unique<ConfigParamStore>(spec.options, std::move(retrievers));
391 configStore->preload();
392 configStore->activate();
395 using boost::property_tree::ptree;
398 for (
auto&
entry : configStore->store()) {
399 std::stringstream ss;
401 if (
entry.second.empty() ==
false) {
402 boost::property_tree::json_parser::write_json(ss,
entry.second,
false);
406 str =
entry.second.get_value<std::string>();
408 std::string configString = fmt::format(
"[CONFIG] {}={} 1 {}",
entry.first,
str, configStore->provenance(
entry.first.c_str())).c_str();
412 mConfigRegistry = std::make_unique<ConfigParamRegistry>(std::move(configStore));
415 if (context.initError) {
416 context.initErrorHandling = [&errorCallback = context.initError,
429 errorCallback(errorContext);
432 context.initErrorHandling = [&serviceRegistry = mServiceRegistry](
RuntimeErrorRef e) {
447 context.expirationHandlers.clear();
448 context.init = spec.algorithm.onInit;
450 static bool noCatch = getenv(
"O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv(
"O2_NO_CATCHALL_EXCEPTIONS"),
"0");
451 InitContext initContext{*mConfigRegistry, mServiceRegistry};
455 context.statefulProcess = context.init(initContext);
457 if (context.initErrorHandling) {
458 (context.initErrorHandling)(e);
463 context.statefulProcess = context.init(initContext);
464 }
catch (std::exception& ex) {
469 (context.initErrorHandling)(e);
471 (context.initErrorHandling)(e);
476 state.inputChannelInfos.resize(spec.inputChannels.size());
480 int validChannelId = 0;
481 for (
size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
482 auto&
name = spec.inputChannels[ci].name;
483 if (
name.find(spec.channelPrefix +
"from_internal-dpl-clock") == 0) {
488 state.inputChannelInfos[ci].id = {validChannelId++};
493 if (spec.callbacksPolicy.policy !=
nullptr) {
494 InitContext initContext{*mConfigRegistry, mServiceRegistry};
499 auto* options = GetConfig();
500 for (
size_t si = 0; si < mStreams.size(); ++si) {
514 O2_SIGNPOST_END(device, sid,
"signal_state",
"No registry active. Ignoring signal.");
523 while (ri != quotaEvaluator.mOffers.size()) {
524 auto& offer = quotaEvaluator.mOffers[ri];
530 if (offer.valid && offer.sharedMemory != 0) {
531 O2_SIGNPOST_END(device, sid,
"signal_state",
"Memory already offered.");
537 for (
auto& offer : quotaEvaluator.mOffers) {
538 if (offer.valid ==
false) {
541 offer.sharedMemory = 1000000000;
548 O2_SIGNPOST_END(device, sid,
"signal_state",
"Done processing signals.");
562 if (oldestTimeslice.timeslice.value <= decongestion.lastTimeslice) {
563 LOG(
debug) <<
"Not sending already sent oldest possible timeslice " << oldestTimeslice.timeslice.value;
566 for (
int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
567 auto& info = proxy.getForwardChannelInfo(
ChannelIndex{fi});
572 O2_SIGNPOST_EVENT_EMIT(async_queue, aid,
"forwardInputsCallback",
"Skipping channel %{public}s because it's not a DPL channel",
578 O2_SIGNPOST_EVENT_EMIT(async_queue, aid,
"forwardInputsCallback",
"Forwarding to channel %{public}s oldest possible timeslice %zu, prio 20",
579 info.name.c_str(), oldestTimeslice.timeslice.value);
593 O2_SIGNPOST_START(forwarding, sid,
"forwardInputs",
"Starting forwarding for slot %zu with oldestTimeslice %zu %{public}s%{public}s%{public}s",
594 slot.index, oldestTimeslice.timeslice.value, copy ?
"with copy" :
"", copy && consume ?
" and " :
"", consume ?
"with consume" :
"");
597 for (
int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
598 if (forwardedParts[fi].
Size() == 0) {
602 auto& parts = forwardedParts[fi];
603 if (info.
policy ==
nullptr) {
614 O2_SIGNPOST_EVENT_EMIT(async_queue, aid,
"forwardInputs",
"Queuing forwarding oldestPossible %zu", oldestTimeslice.timeslice.value);
625 O2_SIGNPOST_START(forwarding, sid,
"forwardInputs",
"Cleaning up slot %zu with oldestTimeslice %zu %{public}s%{public}s%{public}s",
626 slot.index, oldestTimeslice.timeslice.value, copy ?
"with copy" :
"", copy && consume ?
" and " :
"", consume ?
"with consume" :
"");
629 for (
size_t ii = 0, ie = currentSetOfInputs.size(); ii < ie; ++ii) {
630 auto span = std::span<fair::mq::MessagePtr>(currentSetOfInputs[ii].messages);
643 if (infos.empty() ==
false) {
644 std::vector<fair::mq::RegionInfo> toBeNotified;
645 toBeNotified.swap(infos);
646 static bool dummyRead = getenv(
"DPL_DEBUG_MAP_ALL_SHM_REGIONS") && atoi(getenv(
"DPL_DEBUG_MAP_ALL_SHM_REGIONS"));
647 for (
auto const& info : toBeNotified) {
667void DataProcessingDevice::initPollers()
675 if ((context.statefulProcess !=
nullptr) || (context.statelessProcess !=
nullptr)) {
676 for (
auto& [channelName, channel] : GetChannels()) {
678 for (
size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
679 auto& channelSpec = spec.inputChannels[ci];
680 channelInfo = &
state.inputChannelInfos[ci];
681 if (channelSpec.name != channelName) {
684 channelInfo->
channel = &this->GetChannel(channelName, 0);
687 if ((
channelName.rfind(
"from_internal-dpl", 0) == 0) &&
688 (
channelName.rfind(
"from_internal-dpl-aod", 0) != 0) &&
689 (
channelName.rfind(
"from_internal-dpl-ccdb-backend", 0) != 0) &&
690 (
channelName.rfind(
"from_internal-dpl-injected", 0)) != 0) {
691 LOGP(detail,
"{} is an internal channel. Skipping as no input will come from there.", channelName);
695 if (
channelName.rfind(
"from_" + spec.name +
"_", 0) == 0) {
696 LOGP(detail,
"{} is to send data. Not polling.", channelName);
701 LOGP(detail,
"{} is not a DPL socket. Not polling.", channelName);
707 size_t zmq_fd_len =
sizeof(zmq_fd);
710 channel[0].GetSocket().GetOption(
"fd", &zmq_fd, &zmq_fd_len);
715 LOGP(detail,
"Polling socket for {}", channelName);
718 pCtx->loop =
state.loop;
720 pCtx->state = &
state;
722 assert(channelInfo !=
nullptr);
723 pCtx->channelInfo = channelInfo;
724 pCtx->socket = &channel[0].GetSocket();
727 uv_poll_init(
state.loop, poller, zmq_fd);
729 LOGP(detail,
"{} is an out of band channel.", channelName);
730 state.activeOutOfBandPollers.push_back(poller);
733 state.activeInputPollers.push_back(poller);
739 if (
state.activeInputPollers.empty() &&
740 state.activeOutOfBandPollers.empty() &&
741 state.activeTimers.empty() &&
742 state.activeSignals.empty()) {
746 if (
state.inputChannelInfos.empty()) {
747 LOGP(detail,
"No input channels. Setting exit transition timeout to 0.");
748 deviceContext.exitTransitionTimeout = 0;
750 for (
auto& [channelName, channel] : GetChannels()) {
751 if (
channelName.rfind(spec.channelPrefix +
"from_internal-dpl", 0) == 0) {
752 LOGP(detail,
"{} is an internal channel. Not polling.", channelName);
755 if (
channelName.rfind(spec.channelPrefix +
"from_" + spec.name +
"_", 0) == 0) {
756 LOGP(detail,
"{} is an out of band channel. Not polling for output.", channelName);
761 size_t zmq_fd_len =
sizeof(zmq_fd);
764 channel[0].GetSocket().GetOption(
"fd", &zmq_fd, &zmq_fd_len);
766 LOGP(
error,
"Cannot get file descriptor for channel {}", channelName);
769 LOG(detail) <<
"Polling socket for " << channel[0].GetName();
773 pCtx->loop =
state.loop;
775 pCtx->state = &
state;
779 uv_poll_init(
state.loop, poller, zmq_fd);
780 state.activeOutputPollers.push_back(poller);
784 LOGP(detail,
"This is a fake device so we exit after the first iteration.");
785 deviceContext.exitTransitionTimeout = 0;
791 uv_timer_init(
state.loop, timer);
792 timer->data = &
state;
793 uv_update_time(
state.loop);
795 state.activeTimers.push_back(timer);
799void DataProcessingDevice::startPollers()
805 for (
auto* poller :
state.activeInputPollers) {
807 O2_SIGNPOST_START(device, sid,
"socket_state",
"Input socket waiting for connection.");
811 for (
auto& poller :
state.activeOutOfBandPollers) {
815 for (
auto* poller :
state.activeOutputPollers) {
817 O2_SIGNPOST_START(device, sid,
"socket_state",
"Output socket waiting for connection.");
824 uv_timer_init(
state.loop, deviceContext.gracePeriodTimer);
827 deviceContext.dataProcessingGracePeriodTimer->data =
new ServiceRegistryRef(mServiceRegistry);
828 uv_timer_init(
state.loop, deviceContext.dataProcessingGracePeriodTimer);
831void DataProcessingDevice::stopPollers()
836 LOGP(detail,
"Stopping {} input pollers",
state.activeInputPollers.size());
837 for (
auto* poller :
state.activeInputPollers) {
840 uv_poll_stop(poller);
843 LOGP(detail,
"Stopping {} out of band pollers",
state.activeOutOfBandPollers.size());
844 for (
auto* poller :
state.activeOutOfBandPollers) {
845 uv_poll_stop(poller);
848 LOGP(detail,
"Stopping {} output pollers",
state.activeOutOfBandPollers.size());
849 for (
auto* poller :
state.activeOutputPollers) {
852 uv_poll_stop(poller);
856 uv_timer_stop(deviceContext.gracePeriodTimer);
858 free(deviceContext.gracePeriodTimer);
859 deviceContext.gracePeriodTimer =
nullptr;
861 uv_timer_stop(deviceContext.dataProcessingGracePeriodTimer);
863 free(deviceContext.dataProcessingGracePeriodTimer);
864 deviceContext.dataProcessingGracePeriodTimer =
nullptr;
879 for (
auto&
di : distinct) {
880 auto& route = spec.inputs[
di];
881 if (route.configurator.has_value() ==
false) {
886 .
name = route.configurator->name,
888 .lifetime = route.matcher.lifetime,
889 .creator = route.configurator->creatorConfigurator(
state, mServiceRegistry, *mConfigRegistry),
890 .checker = route.configurator->danglingConfigurator(
state, *mConfigRegistry),
891 .handler = route.configurator->expirationConfigurator(
state, *mConfigRegistry)};
892 context.expirationHandlers.emplace_back(std::move(handler));
895 if (
state.awakeMainThread ==
nullptr) {
901 deviceContext.expectedRegionCallbacks = std::stoi(fConfig->GetValue<std::string>(
"expected-region-callbacks"));
902 deviceContext.exitTransitionTimeout = std::stoi(fConfig->GetValue<std::string>(
"exit-transition-timeout"));
903 deviceContext.dataProcessingTimeout = std::stoi(fConfig->GetValue<std::string>(
"data-processing-timeout"));
905 for (
auto& channel : GetChannels()) {
906 channel.second.at(0).Transport()->SubscribeToRegionEvents([&context = deviceContext,
907 ®istry = mServiceRegistry,
908 &pendingRegionInfos = mPendingRegionInfos,
909 ®ionInfoMutex = mRegionInfoMutex](fair::mq::RegionInfo info) {
910 std::lock_guard<std::mutex> lock(regionInfoMutex);
911 LOG(detail) <<
">>> Region info event" << info.event;
912 LOG(detail) <<
"id: " << info.id;
913 LOG(detail) <<
"ptr: " << info.ptr;
914 LOG(detail) <<
"size: " << info.size;
915 LOG(detail) <<
"flags: " << info.flags;
918 pendingRegionInfos.push_back(info);
931 if (deviceContext.sigusr1Handle ==
nullptr) {
933 deviceContext.sigusr1Handle->data = &mServiceRegistry;
934 uv_signal_init(
state.loop, deviceContext.sigusr1Handle);
938 for (
auto& handle :
state.activeSignals) {
939 handle->data = &
state;
942 deviceContext.sigusr1Handle->data = &mServiceRegistry;
945 DataProcessingDevice::initPollers();
953 LOG(
error) <<
"DataProcessor " <<
state.lastActiveDataProcessor.load()->spec->name <<
" was unexpectedly active";
965 O2_SIGNPOST_END(device, cid,
"InitTask",
"Exiting InitTask callback waiting for the remaining region callbacks.");
967 auto hasPendingEvents = [&mutex = mRegionInfoMutex, &pendingRegionInfos = mPendingRegionInfos](
DeviceContext& deviceContext) {
968 std::lock_guard<std::mutex> lock(mutex);
969 return (pendingRegionInfos.empty() ==
false) || deviceContext.expectedRegionCallbacks > 0;
976 while (hasPendingEvents(deviceContext)) {
978 uv_run(
state.loop, UV_RUN_ONCE);
982 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
986 O2_SIGNPOST_END(device, cid,
"InitTask",
"Done waiting for registration events.");
993 bool enableRateLimiting = std::stoi(fConfig->GetValue<std::string>(
"timeframes-rate-limit"));
1002 if (enableRateLimiting ==
false && spec.name.find(
"internal-dpl-injected-dummy-sink") != std::string::npos) {
1005 if (enableRateLimiting) {
1006 for (
auto& spec : spec.outputs) {
1007 if (spec.matcher.binding.value ==
"dpl-summary") {
1014 context.
registry = &mServiceRegistry;
1017 if (context.
error !=
nullptr) {
1031 errorCallback(errorContext);
1045 switch (deviceContext.processingPolicies.
error) {
1056 auto decideEarlyForward = [&context, &deviceContext, &spec,
this]() ->
ForwardPolicy {
1064 for (
auto& forward : spec.forwards) {
1077 if (spec.forwards.empty() ==
false) {
1083 forwardPolicy = defaultEarlyForwardPolicy;
1086 forwardPolicy = defaultEarlyForwardPolicy;
1090 bool onlyConditions =
true;
1091 bool overriddenEarlyForward =
false;
1092 for (
auto& forwarded : spec.forwards) {
1093 if (forwarded.matcher.lifetime != Lifetime::Condition) {
1094 onlyConditions =
false;
1098 overriddenEarlyForward =
true;
1102 if (forwarded.matcher.lifetime == Lifetime::Optional) {
1104 overriddenEarlyForward =
true;
1109 if (!overriddenEarlyForward && onlyConditions) {
1110 forwardPolicy = defaultEarlyForwardPolicy;
1111 LOG(detail) <<
"Enabling early forwarding because only conditions to be forwarded";
1113 return forwardPolicy;
1125 state.quitRequested =
false;
1128 for (
auto& info :
state.inputChannelInfos) {
1140 for (
size_t i = 0;
i < mStreams.size(); ++
i) {
1143 context.preStartStreamCallbacks(streamRef);
1145 }
catch (std::exception& e) {
1146 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid,
"PreRun",
"Exception of type std::exception caught in PreRun: %{public}s. Rethrowing.", e.what());
1147 O2_SIGNPOST_END(device, cid,
"PreRun",
"Exiting PreRun due to exception thrown.");
1151 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid,
"PreRun",
"Exception of type o2::framework::RuntimeErrorRef caught in PreRun: %{public}s. Rethrowing.", err.what);
1152 O2_SIGNPOST_END(device, cid,
"PreRun",
"Exiting PreRun due to exception thrown.");
1155 O2_SIGNPOST_END(device, cid,
"PreRun",
"Unknown exception being thrown. Rethrowing.");
1163 using o2::monitoring::Metric;
1164 using o2::monitoring::Monitoring;
1165 using o2::monitoring::tags::Key;
1166 using o2::monitoring::tags::Value;
1169 monitoring.send(
Metric{(uint64_t)1,
"device_state"}.addTag(Key::Subsystem, Value::DPL));
1177 using o2::monitoring::Metric;
1178 using o2::monitoring::Monitoring;
1179 using o2::monitoring::tags::Key;
1180 using o2::monitoring::tags::Value;
1183 monitoring.send(
Metric{(uint64_t)0,
"device_state"}.addTag(Key::Subsystem, Value::DPL));
1202 bool firstLoop =
true;
1204 O2_SIGNPOST_START(device, lid,
"device_state",
"First iteration of the device loop");
1206 bool dplEnableMultithreding = getenv(
"DPL_THREADPOOL_SIZE") !=
nullptr;
1207 if (dplEnableMultithreding) {
1208 setenv(
"UV_THREADPOOL_SIZE",
"1", 1);
1212 if (
state.nextFairMQState.empty() ==
false) {
1213 (
void)this->ChangeState(
state.nextFairMQState.back());
1214 state.nextFairMQState.pop_back();
1219 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
1232 state.lastActiveDataProcessor.compare_exchange_strong(lastActive,
nullptr);
1234 auto shouldNotWait = (lastActive !=
nullptr &&
1238 shouldNotWait =
true;
1241 if (lastActive !=
nullptr) {
1244 if (NewStatePending()) {
1246 shouldNotWait =
true;
1252 O2_SIGNPOST_EVENT_EMIT(device, lid,
"run_loop",
"State transition requested and we are now in Idle. We can consider it to be completed.");
1255 if (
state.severityStack.empty() ==
false) {
1256 fair::Logger::SetConsoleSeverity((fair::Severity)
state.severityStack.back());
1257 state.severityStack.pop_back();
1263 state.firedTimers.clear();
1265 state.severityStack.push_back((
int)fair::Logger::GetConsoleSeverity());
1266 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
1273 O2_SIGNPOST_START(device, lid,
"run_loop",
"Dropping message from slot %" PRIu64
". Forwarding as needed.", (uint64_t)slot.index);
1281 forwardInputs(registry, slot, dropped, oldestOutputInfo,
false,
true);
1286 auto oldestPossibleTimeslice = relayer.getOldestPossibleOutput();
1288 if (shouldNotWait ==
false) {
1292 O2_SIGNPOST_END(device, lid,
"run_loop",
"Run loop completed. %{}s", shouldNotWait ?
"Will immediately schedule a new one" :
"Waiting for next event.");
1293 uv_run(
state.loop, shouldNotWait ? UV_RUN_NOWAIT : UV_RUN_ONCE);
1295 if ((
state.loopReason &
state.tracingFlags) != 0) {
1296 state.severityStack.push_back((
int)fair::Logger::GetConsoleSeverity());
1297 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
1298 }
else if (
state.severityStack.empty() ==
false) {
1299 fair::Logger::SetConsoleSeverity((fair::Severity)
state.severityStack.back());
1300 state.severityStack.pop_back();
1305 O2_SIGNPOST_EVENT_EMIT(device, lid,
"run_loop",
"Out of band activity detected. Rescanning everything.");
1309 if (!
state.pendingOffers.empty()) {
1310 O2_SIGNPOST_EVENT_EMIT(device, lid,
"run_loop",
"Pending %" PRIu64
" offers. updating the ComputingQuotaEvaluator.", (uint64_t)
state.pendingOffers.size());
1322 std::lock_guard<std::mutex> lock(mRegionInfoMutex);
1326 assert(mStreams.size() == mHandles.size());
1329 for (
size_t ti = 0; ti < mStreams.size(); ti++) {
1330 auto& taskInfo = mStreams[ti];
1331 if (taskInfo.running) {
1335 streamRef.index = ti;
1337 using o2::monitoring::Metric;
1338 using o2::monitoring::Monitoring;
1339 using o2::monitoring::tags::Key;
1340 using o2::monitoring::tags::Value;
1343 if (streamRef.index != -1) {
1346 uv_work_t& handle = mHandles[streamRef.index];
1348 handle.data = &mStreams[streamRef.index];
1356 dpStats.processCommandQueue();
1366 struct SchedulingStats {
1367 std::atomic<size_t> lastScheduled = 0;
1368 std::atomic<size_t> numberOfUnscheduledSinceLastScheduled = 0;
1369 std::atomic<size_t> numberOfUnscheduled = 0;
1370 std::atomic<size_t> numberOfScheduled = 0;
1372 static SchedulingStats schedulingStats;
1377 stream.registry = &mServiceRegistry;
1378 schedulingStats.lastScheduled = uv_now(
state.loop);
1379 schedulingStats.numberOfScheduled++;
1380 schedulingStats.numberOfUnscheduledSinceLastScheduled = 0;
1381 O2_SIGNPOST_EVENT_EMIT(scheduling, sid,
"Run",
"Enough resources to schedule computation on stream %d", streamRef.index);
1382 if (dplEnableMultithreding) [[unlikely]] {
1390 if (schedulingStats.numberOfUnscheduledSinceLastScheduled > 100 ||
1391 (uv_now(
state.loop) - schedulingStats.lastScheduled) > 30000) {
1393 "Not enough resources to schedule computation. %zu skipped so far. Last scheduled at %zu.",
1394 schedulingStats.numberOfUnscheduledSinceLastScheduled.load(),
1395 schedulingStats.lastScheduled.load());
1398 "Not enough resources to schedule computation. %zu skipped so far. Last scheduled at %zu.",
1399 schedulingStats.numberOfUnscheduledSinceLastScheduled.load(),
1400 schedulingStats.lastScheduled.load());
1402 schedulingStats.numberOfUnscheduled++;
1403 schedulingStats.numberOfUnscheduledSinceLastScheduled++;
1410 O2_SIGNPOST_END(device, lid,
"run_loop",
"Run loop completed. Transition handling state %d.", (
int)
state.transitionHandling);
1413 for (
size_t ci = 0; ci < spec.inputChannels.size(); ++ci) {
1414 auto& info =
state.inputChannelInfos[ci];
1415 info.parts.fParts.clear();
1426 O2_SIGNPOST_START(device, dpid,
"do_prepare",
"Starting DataProcessorContext::doPrepare.");
1444 context.allDone = std::any_of(
state.inputChannelInfos.begin(),
state.inputChannelInfos.end(), [cid](
const auto& info) {
1446 O2_SIGNPOST_EVENT_EMIT(device, cid,
"do_prepare",
"Input channel %{public}s%{public}s has %zu parts left and is in state %d.",
1447 info.channel->GetName().c_str(), (info.id.value == ChannelIndex::INVALID ?
" (non DPL)" :
""), info.parts.fParts.size(), (int)info.state);
1449 O2_SIGNPOST_EVENT_EMIT(device, cid,
"do_prepare",
"External channel %d is in state %d.", info.id.value, (int)info.state);
1454 O2_SIGNPOST_EVENT_EMIT(device, dpid,
"do_prepare",
"Processing %zu input channels.", spec.inputChannels.size());
1457 static std::vector<int> pollOrder;
1458 pollOrder.resize(
state.inputChannelInfos.size());
1459 std::iota(pollOrder.begin(), pollOrder.end(), 0);
1460 std::sort(pollOrder.begin(), pollOrder.end(), [&infos =
state.inputChannelInfos](
int a,
int b) {
1461 return infos[a].oldestForChannel.value < infos[b].oldestForChannel.value;
1465 if (pollOrder.empty()) {
1466 O2_SIGNPOST_END(device, dpid,
"do_prepare",
"Nothing to poll. Waiting for next iteration.");
1469 auto currentOldest =
state.inputChannelInfos[pollOrder.front()].oldestForChannel;
1470 auto currentNewest =
state.inputChannelInfos[pollOrder.back()].oldestForChannel;
1471 auto delta = currentNewest.value - currentOldest.value;
1472 O2_SIGNPOST_EVENT_EMIT(device, dpid,
"do_prepare",
"Oldest possible timeframe range %" PRIu64
" => %" PRIu64
" delta %" PRIu64,
1473 (int64_t)currentOldest.value, (int64_t)currentNewest.value, (int64_t)delta);
1474 auto& infos =
state.inputChannelInfos;
1476 if (context.balancingInputs) {
1478 static uint64_t ahead = getenv(
"DPL_MAX_CHANNEL_AHEAD") ? std::atoll(getenv(
"DPL_MAX_CHANNEL_AHEAD")) :
std::
max(8,
std::
min(pipelineLength - 48, pipelineLength / 2));
1479 auto newEnd = std::remove_if(pollOrder.begin(), pollOrder.end(), [&infos, limitNew = currentOldest.value + ahead](
int a) ->
bool {
1480 return infos[a].oldestForChannel.value > limitNew;
1482 for (
auto it = pollOrder.begin(); it < pollOrder.end(); it++) {
1483 const auto& channelInfo =
state.inputChannelInfos[*it];
1489 bool shouldBeRunning = it < newEnd;
1490 if (running != shouldBeRunning) {
1491 uv_poll_start(poller, shouldBeRunning ? UV_READABLE | UV_DISCONNECT | UV_PRIORITIZED : 0, &
on_socket_polled);
1497 pollOrder.erase(newEnd, pollOrder.end());
1499 O2_SIGNPOST_END(device, dpid,
"do_prepare",
"%zu channels pass the channel inbalance balance check.", pollOrder.size());
1501 for (
auto sci : pollOrder) {
1502 auto& info =
state.inputChannelInfos[sci];
1503 auto& channelSpec = spec.inputChannels[sci];
1505 O2_SIGNPOST_START(device, cid,
"channels",
"Processing channel %s", channelSpec.name.c_str());
1508 context.allDone =
false;
1513 if (info.parts.Size()) {
1516 O2_SIGNPOST_END(device, cid,
"channels",
"Flushing channel %s which is in state %d and has %zu parts still pending.",
1517 channelSpec.name.c_str(), (
int)info.state, info.parts.Size());
1520 if (info.
channel ==
nullptr) {
1521 O2_SIGNPOST_END(device, cid,
"channels",
"Channel %s which is in state %d is nullptr and has %zu parts still pending.",
1522 channelSpec.name.c_str(), (
int)info.state, info.parts.Size());
1527 O2_SIGNPOST_END(device, cid,
"channels",
"Channel %s which is in state %d is not a DPL channel and has %zu parts still pending.",
1528 channelSpec.name.c_str(), (
int)info.state, info.parts.Size());
1531 auto& socket = info.
channel->GetSocket();
1536 if (info.hasPendingEvents == 0) {
1537 socket.Events(&info.hasPendingEvents);
1539 if ((info.hasPendingEvents & 1) == 0 && (info.parts.Size() == 0)) {
1540 O2_SIGNPOST_END(device, cid,
"channels",
"No pending events and no remaining parts to process for channel %{public}s", channelSpec.name.c_str());
1546 info.readPolled =
false;
1555 bool newMessages =
false;
1557 O2_SIGNPOST_EVENT_EMIT(device, cid,
"channels",
"Receiving loop called for channel %{public}s (%d) with oldest possible timeslice %zu",
1558 channelSpec.name.c_str(), info.id.value, info.oldestForChannel.value);
1559 if (info.parts.Size() < 64) {
1560 fair::mq::Parts parts;
1561 info.
channel->Receive(parts, 0);
1563 O2_SIGNPOST_EVENT_EMIT(device, cid,
"channels",
"Received %zu parts from channel %{public}s (%d).", parts.Size(), channelSpec.name.c_str(), info.id.value);
1565 for (
auto&& part : parts) {
1566 info.parts.fParts.emplace_back(std::move(part));
1568 newMessages |=
true;
1571 if (info.parts.Size() >= 0) {
1583 socket.Events(&info.hasPendingEvents);
1584 if (info.hasPendingEvents) {
1585 info.readPolled =
false;
1588 state.lastActiveDataProcessor.store(&context);
1591 O2_SIGNPOST_END(device, cid,
"channels",
"Done processing channel %{public}s (%d).",
1592 channelSpec.name.c_str(), info.id.value);
1607 context.completed.clear();
1608 context.completed.reserve(16);
1610 state.lastActiveDataProcessor.store(&context);
1614 context.preDanglingCallbacks(danglingContext);
1615 if (
state.lastActiveDataProcessor.load() ==
nullptr) {
1618 auto activity =
ref.get<
DataRelayer>().processDanglingInputs(context.expirationHandlers, *context.registry,
true);
1619 if (activity.expiredSlots > 0) {
1620 state.lastActiveDataProcessor = &context;
1623 context.completed.clear();
1625 state.lastActiveDataProcessor = &context;
1628 context.postDanglingCallbacks(danglingContext);
1636 state.lastActiveDataProcessor = &context;
1659 timingInfo.timeslice = relayer.getOldestPossibleOutput().timeslice.value;
1660 timingInfo.tfCounter = -1;
1661 timingInfo.firstTForbit = -1;
1663 timingInfo.creation = std::chrono::time_point_cast<std::chrono::milliseconds>(std::chrono::system_clock::now()).time_since_epoch().count();
1664 O2_SIGNPOST_EVENT_EMIT(calibration, dpid,
"calibration",
"TimingInfo.keepAtEndOfStream %d", timingInfo.keepAtEndOfStream);
1668 context.preEOSCallbacks(eosContext);
1672 streamContext.postEOSCallbacks(eosContext);
1673 context.postEOSCallbacks(eosContext);
1675 for (
auto& channel : spec.outputChannels) {
1676 O2_SIGNPOST_EVENT_EMIT(device, dpid,
"state",
"Sending end of stream to %{public}s.", channel.name.c_str());
1683 if (shouldProcess) {
1684 state.lastActiveDataProcessor = &context;
1688 for (
auto& poller :
state.activeOutputPollers) {
1689 uv_poll_stop(poller);
1697 for (
auto& poller :
state.activeOutputPollers) {
1698 uv_poll_stop(poller);
1714 if (deviceContext.sigusr1Handle) {
1720 handle->data =
nullptr;
1741 O2_SIGNPOST_EVENT_EMIT(device, sid,
"device",
"Early forwardinding before injecting data into relayer.");
1748 "Starting forwarding for incoming messages with oldestTimeslice %zu with copy",
1749 oldestTimeslice.timeslice.value);
1750 std::vector<fair::mq::Parts> forwardedParts(proxy.getNumForwardChannels());
1753 for (
int fi = 0; fi < proxy.getNumForwardChannels(); fi++) {
1754 if (forwardedParts[fi].
Size() == 0) {
1758 auto& parts = forwardedParts[fi];
1759 if (info.
policy ==
nullptr) {
1769 O2_SIGNPOST_EVENT_EMIT(async_queue, aid,
"forwardInputs",
"Queuing forwarding oldestPossible %zu", oldestTimeslice.timeslice.value);
1794 auto getInputTypes = [&info, &context]() -> std::optional<std::vector<InputInfo>> {
1799 auto& parts = info.
parts;
1802 std::vector<InputInfo> results;
1804 results.reserve(parts.Size() / 2);
1805 size_t nTotalPayloads = 0;
1809 if (
type != InputType::Invalid &&
length > 1) {
1810 nTotalPayloads +=
length - 1;
1814 for (
size_t pi = 0; pi < parts.Size(); pi += 2) {
1815 auto* headerData = parts.At(pi)->GetData();
1816 auto sih = o2::header::get<SourceInfoHeader*>(headerData);
1817 auto dh = o2::header::get<DataHeader*>(headerData);
1819 O2_SIGNPOST_EVENT_EMIT(device, cid,
"handle_data",
"Got SourceInfoHeader with state %d", (
int)sih->state);
1820 info.
state = sih->state;
1821 insertInputInfo(pi, 2, InputType::SourceInfo, info.
id);
1822 state.lastActiveDataProcessor = &context;
1824 LOGP(
error,
"Found data attached to a SourceInfoHeader");
1828 auto dih = o2::header::get<DomainInfoHeader*>(headerData);
1830 O2_SIGNPOST_EVENT_EMIT(device, cid,
"handle_data",
"Got DomainInfoHeader with oldestPossibleTimeslice %d", (
int)dih->oldestPossibleTimeslice);
1831 insertInputInfo(pi, 2, InputType::DomainInfo, info.
id);
1832 state.lastActiveDataProcessor = &context;
1834 LOGP(
error,
"Found data attached to a DomainInfoHeader");
1839 insertInputInfo(pi, 0, InputType::Invalid, info.
id);
1843 if (dh->payloadSize > parts.At(pi + 1)->GetSize()) {
1844 insertInputInfo(pi, 0, InputType::Invalid, info.
id);
1848 auto dph = o2::header::get<DataProcessingHeader*>(headerData);
1853 O2_SIGNPOST_START(parts,
pid,
"parts",
"Processing DataHeader %{public}-4s/%{public}-16s/%d with splitPayloadParts %d and splitPayloadIndex %d",
1854 dh->dataOrigin.str, dh->dataDescription.str, dh->subSpecification, dh->splitPayloadParts, dh->splitPayloadIndex);
1856 insertInputInfo(pi, 2, InputType::Invalid, info.
id);
1860 if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) {
1863 insertInputInfo(pi, dh->splitPayloadParts + 1, InputType::Data, info.
id);
1864 pi += dh->splitPayloadParts - 1;
1870 size_t finalSplitPayloadIndex = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2;
1871 if (finalSplitPayloadIndex > parts.Size()) {
1873 insertInputInfo(pi, 0, InputType::Invalid, info.
id);
1876 insertInputInfo(pi, 2, InputType::Data, info.
id);
1877 for (; pi + 2 < finalSplitPayloadIndex; pi += 2) {
1878 insertInputInfo(pi + 2, 2, InputType::Data, info.
id);
1882 if (results.size() + nTotalPayloads != parts.Size()) {
1883 O2_SIGNPOST_EVENT_EMIT_ERROR(device, cid,
"handle_data",
"inconsistent number of inputs extracted. %zu vs parts (%zu)", results.size() + nTotalPayloads, parts.Size());
1884 return std::nullopt;
1889 auto reportError = [
ref](
const char*
message) {
1894 auto handleValidMessages = [&info,
ref, &reportError, &context](std::vector<InputInfo>
const& inputInfos) {
1898 auto& parts = info.
parts;
1901 bool hasBackpressure =
false;
1902 size_t minBackpressureTimeslice = -1;
1904 size_t oldestPossibleTimeslice = -1;
1905 static std::vector<int> ordering;
1907 ordering.resize(inputInfos.size());
1908 std::iota(ordering.begin(), ordering.end(), 0);
1910 std::stable_sort(ordering.begin(), ordering.end(), [&inputInfos](
int const&
a,
int const&
b) {
1911 auto const& ai = inputInfos[a];
1912 auto const& bi = inputInfos[b];
1913 if (ai.type != bi.type) {
1914 return ai.type < bi.type;
1916 return ai.position < bi.position;
1918 for (
size_t ii = 0; ii < inputInfos.size(); ++ii) {
1919 auto const& input = inputInfos[ordering[ii]];
1920 switch (input.type) {
1921 case InputType::Data: {
1923 auto headerIndex = input.position;
1925 auto nPayloadsPerHeader = 0;
1926 if (input.size > 2) {
1928 nMessages = input.size;
1929 nPayloadsPerHeader = nMessages - 1;
1932 auto dh = o2::header::get<DataHeader*>(parts.At(headerIndex)->GetData());
1933 nMessages = dh->splitPayloadParts > 0 ? dh->splitPayloadParts * 2 : 2;
1934 nPayloadsPerHeader = 1;
1935 ii += (nMessages / 2) - 1;
1939 O2_SIGNPOST_EVENT_EMIT(async_queue, cid,
"onDrop",
"Dropping message from slot %zu. Forwarding as needed. Timeslice %zu",
1940 slot.
index, oldestOutputInfo.timeslice.value);
1947 forwardInputs(
ref, slot, dropped, oldestOutputInfo,
false,
true);
1950 auto relayed = relayer.relay(parts.At(headerIndex)->GetData(),
1951 &parts.At(headerIndex),
1957 switch (relayed.type) {
1960 LOGP(alarm,
"Backpressure on channel {}. Waiting.", info.
channel->GetName());
1961 auto& monitoring =
ref.get<o2::monitoring::Monitoring>();
1962 monitoring.send(o2::monitoring::Metric{1, fmt::format(
"backpressure_{}", info.
channel->GetName())});
1966 policy.backpressure(info);
1967 hasBackpressure =
true;
1968 minBackpressureTimeslice = std::min<size_t>(minBackpressureTimeslice, relayed.timeslice.value);
1974 LOGP(info,
"Back to normal on channel {}.", info.
channel->GetName());
1975 auto& monitoring =
ref.get<o2::monitoring::Monitoring>();
1976 monitoring.send(o2::monitoring::Metric{0, fmt::format(
"backpressure_{}", info.
channel->GetName())});
1983 case InputType::SourceInfo: {
1984 LOGP(detail,
"Received SourceInfo");
1986 state.lastActiveDataProcessor = &context;
1987 auto headerIndex = input.position;
1988 auto payloadIndex = input.position + 1;
1989 assert(payloadIndex < parts.Size());
1992 parts.At(headerIndex).reset(
nullptr);
1993 parts.At(payloadIndex).reset(
nullptr);
2000 case InputType::DomainInfo: {
2004 state.lastActiveDataProcessor = &context;
2005 auto headerIndex = input.position;
2006 auto payloadIndex = input.position + 1;
2007 assert(payloadIndex < parts.Size());
2011 auto dih = o2::header::get<DomainInfoHeader*>(parts.At(headerIndex)->GetData());
2012 if (hasBackpressure && dih->oldestPossibleTimeslice >= minBackpressureTimeslice) {
2015 oldestPossibleTimeslice = std::min(oldestPossibleTimeslice, dih->oldestPossibleTimeslice);
2016 LOGP(
debug,
"Got DomainInfoHeader, new oldestPossibleTimeslice {} on channel {}", oldestPossibleTimeslice, info.
id.
value);
2017 parts.At(headerIndex).reset(
nullptr);
2018 parts.At(payloadIndex).reset(
nullptr);
2020 case InputType::Invalid: {
2021 reportError(
"Invalid part found.");
2027 if (oldestPossibleTimeslice != (
size_t)-1) {
2030 context.domainInfoUpdatedCallback(*context.registry, oldestPossibleTimeslice, info.
id);
2032 state.lastActiveDataProcessor = &context;
2034 auto it = std::remove_if(parts.fParts.begin(), parts.fParts.end(), [](
auto&
msg) ->
bool { return msg.get() == nullptr; });
2035 parts.fParts.erase(it, parts.end());
2036 if (parts.fParts.size()) {
2037 LOG(
debug) << parts.fParts.size() <<
" messages backpressured";
2049 auto inputTypes = getInputTypes();
2050 if (
bool(inputTypes) ==
false) {
2051 reportError(
"Parts should come in couples. Dropping it.");
2054 handleValidMessages(*inputTypes);
2060struct InputLatency {
2065auto calculateInputRecordLatency(
InputRecord const& record, uint64_t currentTime) -> InputLatency
2069 for (
auto& item : record) {
2070 auto* header = o2::header::get<DataProcessingHeader*>(item.header);
2071 if (header ==
nullptr) {
2074 int64_t partLatency = (0x7fffffffffffffff & currentTime) - (0x7fffffffffffffff & header->creation);
2075 if (partLatency < 0) {
2078 result.minLatency = std::min(
result.minLatency, (uint64_t)partLatency);
2079 result.maxLatency = std::max(
result.maxLatency, (uint64_t)partLatency);
2084auto calculateTotalInputRecordSize(
InputRecord const& record) ->
int
2086 size_t totalInputSize = 0;
2087 for (
auto& item : record) {
2088 auto* header = o2::header::get<DataHeader*>(item.header);
2089 if (header ==
nullptr) {
2092 totalInputSize += header->payloadSize;
2094 return totalInputSize;
2097template <
typename T>
2098void update_maximum(std::atomic<T>& maximum_value, T
const&
value)
noexcept
2100 T prev_value = maximum_value;
2101 while (prev_value <
value &&
2102 !maximum_value.compare_exchange_weak(prev_value,
value)) {
2110 LOGP(
debug,
"DataProcessingDevice::tryDispatchComputation");
2115 std::vector<MessageSet> currentSetOfInputs;
2118 auto getInputSpan = [
ref, ¤tSetOfInputs](
TimesliceSlot slot,
bool consume =
true) {
2123 currentSetOfInputs = relayer.consumeExistingInputsForTimeslice(slot);
2125 auto getter = [¤tSetOfInputs](
size_t i,
size_t partindex) ->
DataRef {
2126 if (currentSetOfInputs[
i].getNumberOfPairs() > partindex) {
2127 const char* headerptr =
nullptr;
2128 const char* payloadptr =
nullptr;
2129 size_t payloadSize = 0;
2135 auto const& headerMsg = currentSetOfInputs[
i].associatedHeader(partindex);
2136 auto const& payloadMsg = currentSetOfInputs[
i].associatedPayload(partindex);
2137 headerptr =
static_cast<char const*
>(headerMsg->GetData());
2138 payloadptr = payloadMsg ?
static_cast<char const*
>(payloadMsg->GetData()) :
nullptr;
2139 payloadSize = payloadMsg ? payloadMsg->GetSize() : 0;
2140 return DataRef{
nullptr, headerptr, payloadptr, payloadSize};
2144 auto nofPartsGetter = [¤tSetOfInputs](
size_t i) ->
size_t {
2145 return currentSetOfInputs[
i].getNumberOfPairs();
2147 auto refCountGetter = [¤tSetOfInputs](
size_t idx) ->
int {
2148 auto& header =
static_cast<const fair::mq::shmem::Message&
>(*currentSetOfInputs[idx].header(0));
2149 return header.GetRefCount();
2151 return InputSpan{getter, nofPartsGetter, refCountGetter, currentSetOfInputs.
size()};
2166 auto timeslice = relayer.getTimesliceForSlot(
i);
2168 timingInfo.timeslice = timeslice.value;
2178 auto timeslice = relayer.getTimesliceForSlot(
i);
2180 timingInfo.globalRunNumberChanged = !
TimingInfo::timesliceIsTimer(timeslice.value) && dataProcessorContext.lastRunNumberProcessed != timingInfo.runNumber;
2182 timingInfo.globalRunNumberChanged &= (dataProcessorContext.lastRunNumberProcessed == -1 || timingInfo.runNumber != 0);
2186 timingInfo.streamRunNumberChanged = timingInfo.globalRunNumberChanged;
2194 assert(record.size() == currentSetOfInputs.size());
2195 for (
size_t ii = 0, ie = record.size(); ii < ie; ++ii) {
2199 DataRef input = record.getByPos(ii);
2203 if (input.
header ==
nullptr) {
2207 currentSetOfInputs[ii].clear();
2218 for (
size_t pi = 0, pe = record.size(); pi < pe; ++pi) {
2219 DataRef input = record.getByPos(pi);
2220 if (input.
header ==
nullptr) {
2223 auto sih = o2::header::get<SourceInfoHeader*>(input.
header);
2228 auto dh = o2::header::get<DataHeader*>(input.
header);
2238 if (dh->splitPayloadParts > 0 && dh->splitPayloadParts == dh->splitPayloadIndex) {
2241 pi += dh->splitPayloadParts - 1;
2243 size_t pi = pi + (dh->splitPayloadParts > 0 ? dh->splitPayloadParts : 1) * 2;
2249 if (completed.empty() ==
true) {
2250 LOGP(
debug,
"No computations available for dispatching.");
2257 std::atomic_thread_fence(std::memory_order_release);
2258 char relayerSlotState[1024];
2260 char*
buffer = relayerSlotState + written;
2261 for (
size_t ai = 0; ai != record.size(); ai++) {
2262 buffer[ai] = record.isValid(ai) ?
'3' :
'0';
2264 buffer[record.size()] = 0;
2266 .size = (
int)(record.size() +
buffer - relayerSlotState),
2267 .
data = relayerSlotState});
2268 uint64_t tEnd = uv_hrtime();
2270 int64_t wallTimeMs = (tEnd - tStart) / 1000000;
2278 auto latency = calculateInputRecordLatency(record, tStartMilli);
2281 static int count = 0;
2288 std::atomic_thread_fence(std::memory_order_release);
2289 char relayerSlotState[1024];
2291 char*
buffer = strchr(relayerSlotState,
' ') + 1;
2292 for (
size_t ai = 0; ai != record.size(); ai++) {
2293 buffer[ai] = record.isValid(ai) ?
'2' :
'0';
2295 buffer[record.size()] = 0;
2313 switch (spec.completionPolicy.order) {
2315 std::sort(completed.begin(), completed.end(), [](
auto const&
a,
auto const&
b) { return a.timeslice.value < b.timeslice.value; });
2318 std::sort(completed.begin(), completed.end(), [](
auto const&
a,
auto const&
b) { return a.slot.index < b.slot.index; });
2325 for (
auto action : completed) {
2327 O2_SIGNPOST_START(device, aid,
"device",
"Processing action on slot %lu for action %{public}s", action.
slot.
index, fmt::format(
"{}", action.
op).c_str());
2351 dpContext.preProcessingCallbacks(processContext);
2354 context.postDispatchingCallbacks(processContext);
2355 if (spec.forwards.empty() ==
false) {
2357 forwardInputs(
ref, action.
slot, currentSetOfInputs, timesliceIndex.getOldestPossibleOutput(),
false);
2358 O2_SIGNPOST_END(device, aid,
"device",
"Forwarding inputs consume: %d.",
false);
2366 bool hasForwards = spec.forwards.empty() ==
false;
2370 O2_SIGNPOST_EVENT_EMIT(device, aid,
"device",
"Early forwarding: %{public}s.", fmt::format(
"{}", action.
op).c_str());
2381 O2_SIGNPOST_EVENT_EMIT(device, aid,
"device",
"cleaning early forwarding: %{public}s.", fmt::format(
"{}", action.
op).c_str());
2386 markInputsAsDone(action.
slot);
2388 uint64_t tStart = uv_hrtime();
2390 preUpdateStats(action, record, tStart);
2392 static bool noCatch = getenv(
"O2_NO_CATCHALL_EXCEPTIONS") && strcmp(getenv(
"O2_NO_CATCHALL_EXCEPTIONS"),
"0");
2400 switch (action.
op) {
2411 if (
state.quitRequested ==
false) {
2415 streamContext.preProcessingCallbacks(processContext);
2421 if (context.statefulProcess && shouldProcess(action)) {
2425 (context.statefulProcess)(processContext);
2427 }
else if (context.statelessProcess && shouldProcess(action)) {
2429 (context.statelessProcess)(processContext);
2431 }
else if (context.statelessProcess || context.statefulProcess) {
2434 O2_SIGNPOST_EVENT_EMIT(device, pcid,
"device",
"No processing callback provided. Switching to %{public}s.",
"Idle");
2437 if (shouldProcess(action)) {
2439 if (timingInfo.globalRunNumberChanged) {
2440 context.lastRunNumberProcessed = timingInfo.runNumber;
2457 streamContext.finaliseOutputsCallbacks(processContext);
2463 streamContext.postProcessingCallbacks(processContext);
2469 state.severityStack.push_back((
int)fair::Logger::GetConsoleSeverity());
2470 fair::Logger::SetConsoleSeverity(fair::Severity::trace);
2476 (context.errorHandling)(e, record);
2481 }
catch (std::exception& ex) {
2486 (context.errorHandling)(e, record);
2488 (context.errorHandling)(e, record);
2491 if (
state.severityStack.empty() ==
false) {
2492 fair::Logger::SetConsoleSeverity((fair::Severity)
state.severityStack.back());
2493 state.severityStack.pop_back();
2496 postUpdateStats(action, record, tStart, tStartMilli);
2500 cleanupRecord(record);
2501 context.postDispatchingCallbacks(processContext);
2509 context.postForwardingCallbacks(processContext);
2511 cleanTimers(action.
slot, record);
2513 O2_SIGNPOST_END(device, aid,
"device",
"Done processing action on slot %lu for action %{public}s", action.
slot.
index, fmt::format(
"{}", action.
op).c_str());
2515 O2_SIGNPOST_END(device, sid,
"device",
"Start processing ready actions");
2519 LOGP(detail,
"Broadcasting end of stream");
2520 for (
auto& channel : spec.outputChannels) {
2543 cfg.getRecursive(
name);
2544 std::vector<std::unique_ptr<ParamRetriever>> retrievers;
2545 retrievers.emplace_back(std::make_unique<ConfigurationOptionsRetriever>(&cfg,
name));
2546 auto configStore = std::make_unique<ConfigParamStore>(options, std::move(retrievers));
2547 configStore->preload();
2548 configStore->activate();
struct uv_timer_s uv_timer_t
struct uv_signal_s uv_signal_t
struct uv_async_s uv_async_t
struct uv_poll_s uv_poll_t
struct uv_loop_s uv_loop_t
o2::monitoring::Metric Metric
o2::configuration::ConfigurationInterface ConfigurationInterface
constexpr int DEFAULT_MAX_CHANNEL_AHEAD
std::enable_if_t< std::is_signed< T >::value, bool > hasData(const CalArray< T > &cal)
#define O2_SIGNPOST_EVENT_EMIT_ERROR(log, id, name, format,...)
#define O2_DECLARE_DYNAMIC_LOG(name)
#define O2_SIGNPOST_ID_FROM_POINTER(name, log, pointer)
#define O2_SIGNPOST_END(log, id, name, format,...)
#define O2_LOG_ENABLED(log)
#define O2_SIGNPOST_ID_GENERATE(name, log)
#define O2_SIGNPOST_EVENT_EMIT_WARN(log, id, name, format,...)
#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format,...)
#define O2_SIGNPOST_START(log, id, name, format,...)
constexpr uint32_t runtime_hash(char const *str)
o2::monitoring::Monitoring Monitoring
@ DeviceStateChanged
Invoked the device undergoes a state change.
decltype(auto) make(const Output &spec, Args... args)
static void doRun(ServiceRegistryRef)
void fillContext(DataProcessorContext &context, DeviceContext &deviceContext)
void error(const char *msg)
DataProcessingDevice(RunningDeviceRef ref, ServiceRegistry &)
static void doPrepare(ServiceRegistryRef)
static bool tryDispatchComputation(ServiceRegistryRef ref, std::vector< DataRelayer::RecordAction > &completed)
static void handleData(ServiceRegistryRef, InputChannelInfo &)
uint32_t getFirstTFOrbitForSlot(TimesliceSlot slot)
Get the firstTForbit associate to a given slot.
void updateCacheStatus(TimesliceSlot slot, CacheEntryStatus oldStatus, CacheEntryStatus newStatus)
uint32_t getRunNumberForSlot(TimesliceSlot slot)
Get the runNumber associated to a given slot.
void prunePending(OnDropCallback)
Prune all the pending entries in the cache.
std::vector< MessageSet > consumeAllInputsForTimeslice(TimesliceSlot id)
uint64_t getCreationTimeForSlot(TimesliceSlot slot)
Get the creation time associated to a given slot.
ActivityStats processDanglingInputs(std::vector< ExpirationHandler > const &, ServiceRegistryRef context, bool createNew)
uint32_t getFirstTFCounterForSlot(TimesliceSlot slot)
Get the firstTFCounter associate to a given slot.
A service API to communicate with the driver.
bool active() const
Check if service of type T is currently active.
OldestOutputInfo getOldestPossibleOutput() const
GLuint const GLchar * name
GLboolean GLboolean GLboolean b
GLsizei const GLfloat * value
GLint GLint GLsizei GLint GLenum GLenum type
GLuint GLsizei GLsizei * length
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLuint GLsizei const GLchar * message
GLboolean GLboolean GLboolean GLboolean a
Defining PrimaryVertex explicitly as messageable.
auto decongestionCallbackLate
RuntimeErrorRef runtime_error(const char *)
ServiceKind
The kind of service we are asking for.
void on_idle_timer(uv_timer_t *handle)
@ DPL
The channel is a normal input channel.
void run_completion(uv_work_t *handle, int status)
void on_socket_polled(uv_poll_t *poller, int status, int events)
void run_callback(uv_work_t *handle)
auto forwardOnInsertion(ServiceRegistryRef &ref, std::span< fair::mq::MessagePtr > &messages) -> void
volatile int region_read_global_dummy_variable
void handleRegionCallbacks(ServiceRegistryRef registry, std::vector< fair::mq::RegionInfo > &infos)
Invoke the callbacks for the mPendingRegionInfos.
void on_out_of_band_polled(uv_poll_t *poller, int status, int events)
DeviceSpec const & getRunningDevice(RunningDeviceRef const &running, ServiceRegistryRef const &services)
@ EndOfStreaming
End of streaming requested, but not notified.
@ Streaming
Data is being processed.
@ Idle
End of streaming notified.
void on_communication_requested(uv_async_t *s)
@ Expired
A transition needs to be fullfilled ASAP.
@ NoTransition
No pending transitions.
@ Requested
A transition was notified to be requested.
@ AtCompletionPolicySatisified
RuntimeError & error_from_ref(RuntimeErrorRef)
void on_awake_main_thread(uv_async_t *handle)
@ SHM_OFFER_BYTES_CONSUMED
@ TIMESLICE_NUMBER_EXPIRED
@ TIMESLICE_OFFER_NUMBER_CONSUMED
@ Completed
The channel was signaled it will not receive any data.
@ Running
The channel is actively receiving data.
void on_signal_callback(uv_signal_t *handle, int signum)
@ Me
Only quit this data processor.
constexpr const char * channelName(int channel)
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
Defining DataPointCompositeObject explicitly as copiable.
static void run(AsyncQueue &queue, TimesliceId oldestPossibleTimeslice)
static void post(AsyncQueue &queue, AsyncTask const &task)
An actuatual task to be executed.
static void demangled_backtrace_symbols(void **backtrace, unsigned int total, int fd)
static constexpr int INVALID
CompletionOp
Action to take with the InputRecord:
@ Retry
Like Wait but mark the cacheline as dirty.
int64_t timeslices
How many timeslices it can process without giving back control.
int64_t sharedMemory
How much shared memory it can allocate.
Statistics on the offers consumed, expired.
static bool hasOnlyGenerated(DeviceSpec const &spec)
check if spec is a source devide
static TransitionHandlingState updateStateTransition(ServiceRegistryRef const &ref, ProcessingPolicies const &policies)
starts the EoS timers and returns the new TransitionHandlingState in case as new state is requested
static std::vector< fair::mq::Parts > routeForwardedMessageSet(FairMQDeviceProxy &proxy, std::vector< MessageSet > ¤tSetOfInputs, bool copy, bool consume)
Helper to route messages for forwarding.
static void switchState(ServiceRegistryRef const &ref, StreamingState newState)
change the device StreamingState to newState
static void sendEndOfStream(ServiceRegistryRef const &ref, OutputChannelSpec const &channel)
static bool sendOldestPossibleTimeframe(ServiceRegistryRef const &ref, ForwardChannelInfo const &info, ForwardChannelState &state, size_t timeslice)
static void cleanForwardedMessages(std::span< fair::mq::MessagePtr > ¤tSetOfInputs, bool consume)
static void routeForwardedMessages(FairMQDeviceProxy &proxy, std::span< fair::mq::MessagePtr > ¤tSetOfInputs, std::vector< fair::mq::Parts > &forwardedParts, bool copy, bool consume)
Helper to route messages for forwarding.
Helper struct to hold statistics about the data processing happening.
@ CumulativeRate
Set the value to the specified value if it is positive.
@ Add
Update the rate of the metric given the amount since the last time.
void updateStats(CommandSpec cmd)
std::function< void(o2::framework::RuntimeErrorRef e, InputRecord &record)> errorHandling
ForwardPolicy forwardPolicy
Wether or not the associated DataProcessor can forward things early.
AlgorithmSpec::InitErrorCallback initError
void preLoopCallbacks(ServiceRegistryRef)
Invoke callbacks before we enter the event loop.
void postStopCallbacks(ServiceRegistryRef)
Invoke callbacks on stop.
void preProcessingCallbacks(ProcessingContext &)
Invoke callbacks to be executed before every process method invokation.
ServiceRegistry * registry
AlgorithmSpec::ErrorCallback error
void preStartCallbacks(ServiceRegistryRef)
Invoke callbacks to be executed in PreRun(), before the User Start callbacks.
AlgorithmSpec::ProcessCallback statefulProcess
static std::vector< size_t > createDistinctRouteIndex(std::vector< InputRoute > const &)
CompletionPolicy::CompletionOp op
@ Invalid
Ownership of the data has been taken.
@ Backpressured
The incoming data was not valid and has been dropped.
@ Dropped
The incoming data was not relayed, because we are backpressured.
static bool partialMatch(InputSpec const &spec, o2::header::DataOrigin const &origin)
static std::string describe(InputSpec const &spec)
static bool match(InputSpec const &spec, ConcreteDataMatcher const &target)
TimesliceIndex::OldestOutputInfo oldestTimeslice
static unsigned int pipelineLength()
get max number of timeslices in the queue
static std::unique_ptr< ConfigParamStore > getConfiguration(ServiceRegistryRef registry, const char *name, std::vector< ConfigParamSpec > const &options)
uv_signal_t * sigusr1Handle
ProcessingPolicies & processingPolicies
int expectedRegionCallbacks
Running state information of a given device.
uv_async_t * awakeMainThread
std::atomic< int64_t > cleanupCount
Forward channel information.
ChannelAccountingType channelType
Wether or not it's a DPL internal channel.
fair::mq::Channel & channel
std::string name
The name of the channel.
ForwardingPolicy const * policy
ForwardingCallback forward
InputChannelInfo * channelInfo
fair::mq::Socket * socket
DataProcessingDevice * device
enum EarlyForwardPolicy earlyForward
Information about the running workflow.
static Salt streamSalt(short streamId, short dataProcessorId)
void lateBindStreamServices(DeviceState &state, fair::mq::ProgOptions &options, ServiceRegistry::Salt salt)
static Salt globalStreamSalt(short streamId)
static Salt globalDeviceSalt()
void * get(ServiceTypeHash typeHash, Salt salt, ServiceKind kind, char const *name=nullptr) const
void finaliseOutputsCallbacks(ProcessingContext &)
Invoke callbacks to be executed after every process method invokation.
void preProcessingCallbacks(ProcessingContext &pcx)
Invoke callbacks to be executed before every process method invokation.
void preEOSCallbacks(EndOfStreamContext &eosContext)
Invoke callbacks to be executed before every EOS user callback invokation.
void postProcessingCallbacks(ProcessingContext &pcx)
Invoke callbacks to be executed after every process method invokation.
static int64_t getRealtimeSinceEpochStandalone()
bool keepAtEndOfStream
Wether this kind of data should be flushed during end of stream.
static bool timesliceIsTimer(size_t timeslice)
static TimesliceId getTimeslice(data_matcher::VariableContext const &variables)
void backpressure(InputChannelInfo const &)
locked_execution(ServiceRegistryRef &ref_)
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
uint64_t const void const *restrict const msg