Project
Loading...
Searching...
No Matches
RecoWorkflow.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
16
19#include "Framework/Logger.h"
29#include "TPCWorkflow/ZSSpec.h"
48#include <string>
49#include <stdexcept>
50#include <fstream>
51#include <sstream>
52#include <iomanip>
53#include <unordered_map>
54#include <stdexcept>
55#include <algorithm> // std::find
56#include <tuple> // make_tuple
57#include <array>
58#include <gsl/span>
59
60using namespace o2::dataformats;
61
62namespace o2
63{
64namespace tpc
65{
66namespace reco_workflow
67{
68
69static std::shared_ptr<o2::gpu::GPURecoWorkflowSpec> gTask;
70
71using namespace framework;
72
73template <typename T>
75
76const std::unordered_map<std::string, InputType> InputMap{
77 {"pass-through", InputType::PassThrough},
78 {"digitizer", InputType::Digitizer},
79 {"digits", InputType::Digits},
80 {"clustershardware", InputType::ClustersHardware},
81 {"clusters", InputType::Clusters},
82 {"zsraw", InputType::ZSRaw},
83 {"compressed-clusters-root", InputType::CompClustersRoot},
84 {"compressed-clusters-flat", InputType::CompClustersFlat},
85 {"compressed-clusters-flat-for-encode", InputType::CompClustersFlatForEncode}};
86
87const std::unordered_map<std::string, OutputType> OutputMap{
88 {"digits", OutputType::Digits},
89 {"clustershardware", OutputType::ClustersHardware},
90 {"clusters", OutputType::Clusters},
91 {"tracks", OutputType::Tracks},
92 {"compressed-clusters-root", OutputType::CompClustersRoot},
93 {"compressed-clusters-flat", OutputType::CompClustersFlat},
94 {"encoded-clusters", OutputType::EncodedClusters},
95 {"disable-writer", OutputType::DisableWriter},
96 {"send-clusters-per-sector", OutputType::SendClustersPerSector},
97 {"zsraw", OutputType::ZSRaw},
98 {"qa", OutputType::QA},
99 {"no-shared-cluster-map", OutputType::NoSharedClusterMap},
100 {"tpc-triggers", OutputType::TPCTriggers}};
101
102framework::WorkflowSpec getWorkflow(CompletionPolicyData* policyData, std::vector<int> const& tpcSectors, unsigned long tpcSectorMask, std::vector<int> const& laneConfiguration,
103 const o2::tpc::CorrectionMapsLoaderGloOpts& sclOpts, bool propagateMC, unsigned nLanes, std::string const& cfgInput, std::string const& cfgOutput, bool disableRootInput,
104 int caClusterer, int zsOnTheFly, bool askDISTSTF, bool selIR, bool filteredInp, int deadMapSources, bool useMCTimeGain)
105{
106 InputType inputType;
107 try {
108 inputType = InputMap.at(cfgInput);
109 } catch (std::out_of_range&) {
110 throw std::invalid_argument(std::string("invalid input type: ") + cfgInput);
111 }
112 std::vector<OutputType> outputTypes;
113 try {
114 outputTypes = RangeTokenizer::tokenize<OutputType>(cfgOutput, [](std::string const& token) { return OutputMap.at(token); });
115 } catch (std::out_of_range&) {
116 throw std::invalid_argument(std::string("invalid output type: ") + cfgOutput);
117 }
118 auto isEnabled = [&outputTypes](OutputType type) {
119 return std::find(outputTypes.begin(), outputTypes.end(), type) != outputTypes.end();
120 };
121
122 if (filteredInp && !(inputType == InputType::PassThrough)) {
123 throw std::invalid_argument("filtered-input option must be provided only with pass-through input");
124 }
125
126 bool decompressTPC = inputType == InputType::CompClustersFlat || inputType == InputType::CompClustersRoot;
127 // Disable not applicable settings depending on TPC input, no need to disable manually
128 if (decompressTPC && (isEnabled(OutputType::Clusters) || isEnabled(OutputType::Tracks))) {
129 caClusterer = false;
130 zsOnTheFly = false;
131 propagateMC = false;
132 }
134 caClusterer = false;
135 zsOnTheFly = false;
136 propagateMC = false;
137 }
138 if (inputType == InputType::ZSRaw) {
139 caClusterer = true;
140 zsOnTheFly = false;
141 propagateMC = false;
142 }
143 if (inputType == InputType::PassThrough || inputType == InputType::ClustersHardware || inputType == InputType::Clusters) {
144 caClusterer = false;
145 }
146 if (!caClusterer) {
147 zsOnTheFly = false;
148 }
149
150 if (inputType == InputType::ClustersHardware && isEnabled(OutputType::Digits)) {
151 throw std::invalid_argument("input/output type mismatch, can not produce 'digits' from 'clustershardware'");
152 }
153 if (inputType == InputType::Clusters && (isEnabled(OutputType::Digits) || isEnabled(OutputType::ClustersHardware))) {
154 throw std::invalid_argument("input/output type mismatch, can not produce 'digits', nor 'clustershardware' from 'clusters'");
155 }
156 if (inputType == InputType::ZSRaw && isEnabled(OutputType::ClustersHardware)) {
157 throw std::invalid_argument("input/output type mismatch, can not produce 'clustershardware' from 'zsraw'");
158 }
159 if (caClusterer && (inputType == InputType::Clusters || inputType == InputType::ClustersHardware)) {
160 throw std::invalid_argument("ca-clusterer requires digits as input");
161 }
162 if (caClusterer && (isEnabled(OutputType::ClustersHardware))) {
163 throw std::invalid_argument("ca-clusterer cannot produce clustershardware output");
164 }
165
166 WorkflowSpec specs;
167
168 bool produceTracks = isEnabled(OutputType::Tracks);
169
170 // We provide a special publishing method for labels which have been stored in a split format and need
171 // to be transformed into a contiguous shareable container before publishing. For other branches/types this returns
172 // false and the generic RootTreeWriter publishing proceeds
173 static Reader::SpecialPublishHook hook{[](std::string_view name, ProcessingContext& context, o2::framework::Output const& output, char* data) -> bool {
174 if (TString(name.data()).Contains("TPCDigitMCTruth") || TString(name.data()).Contains("TPCClusterHwMCTruth") || TString(name.data()).Contains("TPCClusterNativeMCTruth")) {
175 auto storedlabels = reinterpret_cast<o2::dataformats::IOMCTruthContainerView const*>(data);
177 storedlabels->copyandflatten(flatlabels);
178 // LOG(info) << "PUBLISHING CONST LABELS " << flatlabels.getNElements();
179 context.outputs().snapshot(output, flatlabels);
180 return true;
181 }
182 return false;
183 }};
184 if (!disableRootInput || inputType == InputType::PassThrough) {
185 // The OutputSpec of the PublisherSpec is configured depending on the input
186 // type. Note that the configuration of the dispatch trigger in the main file
187 // needs to be done in accordance. This means, if a new input option is added
188 // also the dispatch trigger needs to be updated.
189 if (inputType == InputType::Digits) {
190 using Type = std::vector<o2::tpc::Digit>;
191
192 specs.emplace_back(o2::tpc::getPublisherSpec<Type>(PublisherConf{
193 "tpc-digit-reader",
194 "tpcdigits.root",
195 "o2sim",
196 {"digitbranch", "TPCDigit", "Digit branch"},
197 {"mcbranch", "TPCDigitMCTruth", "MC label branch"},
198 OutputSpec{"TPC", "DIGITS"},
199 OutputSpec{"TPC", "DIGITSMCTR"},
200 tpcSectors,
201 laneConfiguration,
202 &hook},
203 propagateMC));
204 if (sclOpts.needTPCScalersWorkflow()) { // for standalone tpc-reco workflow
205 specs.emplace_back(o2::tpc::getTPCScalerSpec(sclOpts.lumiType == 2, sclOpts.enableMShapeCorrection));
206 }
207 if (produceTracks && sclOpts.requestCTPLumi) { // need CTP digits (lumi) reader
208 specs.emplace_back(o2::ctp::getDigitsReaderSpec(false));
209 }
210 } else if (inputType == InputType::ClustersHardware) {
211 specs.emplace_back(o2::tpc::getPublisherSpec(PublisherConf{
212 "tpc-clusterhardware-reader",
213 "tpc-clusterhardware.root",
214 "tpcclustershardware",
215 {"databranch", "TPCClusterHw", "Branch with TPC ClustersHardware"},
216 {"mcbranch", "TPCClusterHwMCTruth", "MC label branch"},
217 OutputSpec{"TPC", "CLUSTERHW"},
218 OutputSpec{"TPC", "CLUSTERHWMCLBL"},
219 tpcSectors,
220 laneConfiguration,
221 &hook},
222 propagateMC));
223 } else if (inputType == InputType::Clusters) {
224 specs.emplace_back(o2::tpc::getClusterReaderSpec(propagateMC, &tpcSectors, &laneConfiguration));
225 if (!getenv("DPL_DISABLE_TPC_TRIGGER_READER") || atoi(getenv("DPL_DISABLE_TPC_TRIGGER_READER")) != 1) {
226 specs.emplace_back(o2::tpc::getTPCTriggerReaderSpec());
227 }
228 if (sclOpts.needTPCScalersWorkflow()) { // for standalone tpc-reco workflow
229 specs.emplace_back(o2::tpc::getTPCScalerSpec(sclOpts.lumiType == 2, sclOpts.enableMShapeCorrection));
230 }
231 if (sclOpts.requestCTPLumi) { // need CTP digits (lumi) reader
232 specs.emplace_back(o2::ctp::getDigitsReaderSpec(false));
233 }
234 } else if (inputType == InputType::CompClustersRoot) {
235 // TODO: need to check if we want to store the MC labels alongside with compressed clusters
236 // for the moment reading of labels is disabled (last parameter is false)
237 // TODO: make a different publisher spec for only one output spec, for now using the
238 // PublisherSpec with only sector 0, '_0' is thus appended to the branch name
239 specs.emplace_back(o2::tpc::getPublisherSpec(PublisherConf{
240 "tpc-compressed-cluster-reader",
241 "tpc-compclusters.root",
242 "tpcrec",
243 {"clusterbranch", "TPCCompClusters", "Branch with TPC compressed clusters"},
244 {"", "", ""}, // No MC labels
245 OutputSpec{"TPC", "COMPCLUSTERS"},
246 OutputSpec{"", ""}, // No MC labels
247 std::vector<int>(1, 0),
248 std::vector<int>(1, 0),
249 &hook},
250 false));
251 }
252 }
253
254 // output matrix
255 // Note: the ClusterHardware format is probably a deprecated legacy format and also the
256 // ClusterDecoderRawSpec
257 bool produceCompClustersRoot = isEnabled(OutputType::CompClustersRoot);
258 bool produceCompClustersFlat = isEnabled(OutputType::CompClustersFlat);
259 bool runGPUReco = (produceTracks || produceCompClustersRoot || produceCompClustersFlat || (isEnabled(OutputType::Clusters) && caClusterer) || inputType == InputType::CompClustersFlat) && inputType != InputType::CompClustersFlatForEncode;
260 bool runHWDecoder = !caClusterer && (runGPUReco || isEnabled(OutputType::Clusters));
261 bool runClusterer = !caClusterer && (runHWDecoder || isEnabled(OutputType::ClustersHardware));
262 bool zsDecoder = inputType == InputType::ZSRaw;
263 bool runClusterEncoder = isEnabled(OutputType::EncodedClusters);
264
265 // input matrix
266 runClusterer &= inputType == InputType::Digitizer || inputType == InputType::Digits;
267 runHWDecoder &= runClusterer || inputType == InputType::ClustersHardware;
268 runGPUReco &= caClusterer || runHWDecoder || inputType == InputType::Clusters || decompressTPC;
269
270 bool outRaw = inputType == InputType::Digits && isEnabled(OutputType::ZSRaw) && !isEnabled(OutputType::DisableWriter);
271 // bool runZSDecode = inputType == InputType::ZSRaw;
272 bool zsToDigit = inputType == InputType::ZSRaw && isEnabled(OutputType::Digits);
273
274 if (inputType == InputType::PassThrough) {
275 runGPUReco = runHWDecoder = runClusterer = runClusterEncoder = zsToDigit = false;
276 }
277
278 WorkflowSpec parallelProcessors;
280 //
281 // clusterer process(es)
282 //
283 //
284 if (runClusterer) {
285 parallelProcessors.push_back(o2::tpc::getClustererSpec(propagateMC));
286 }
287
289 //
290 // cluster decoder process(es)
291 //
292 //
293 if (runHWDecoder) {
294 parallelProcessors.push_back(o2::tpc::getClusterDecoderRawSpec(propagateMC));
295 }
296
298 //
299 // set up parallel TPC lanes
300 //
301 // the parallelPipeline helper distributes the subspec ids from the lane configuration
302 // among the pipelines. All inputs and outputs of processors of one pipeline will be
303 // cloned by the number of subspecs served by this pipeline and amended with the subspecs
304 parallelProcessors = parallelPipeline(
305 parallelProcessors, nLanes,
306 [&laneConfiguration]() { return laneConfiguration.size(); },
307 [&laneConfiguration](size_t index) { return laneConfiguration[index]; });
308 specs.insert(specs.end(), parallelProcessors.begin(), parallelProcessors.end());
309
311 //
312 // generation of processor specs for various types of outputs
313 // based on generic RootTreeWriter and MakeRootTreeWriterSpec generator
314 //
315 // -------------------------------------------------------------------------------------------
316 // the callbacks for the RootTreeWriter
317 //
318 // The generic writer needs a way to associate incoming data with the individual branches for
319 // the TPC sectors. The sector number is transmitted as part of the sector header, the callback
320 // finds the corresponding index in the vector of configured sectors
321 auto getIndex = [tpcSectors](o2::framework::DataRef const& ref) {
322 auto const* tpcSectorHeader = o2::framework::DataRefUtils::getHeader<o2::tpc::TPCSectorHeader*>(ref);
323 if (!tpcSectorHeader) {
324 throw std::runtime_error("TPC sector header missing in header stack");
325 }
326 if (tpcSectorHeader->sector() < 0) {
327 // special data sets, don't write
328 return ~(size_t)0;
329 }
330 size_t index = 0;
331 for (auto const& sector : tpcSectors) {
332 if (sector == tpcSectorHeader->sector()) {
333 return index;
334 }
335 ++index;
336 }
337 throw std::runtime_error("sector " + std::to_string(tpcSectorHeader->sector()) + " not configured for writing");
338 };
339 auto getName = [tpcSectors](std::string base, size_t index) {
340 return base + "_" + std::to_string(tpcSectors.at(index));
341 };
342
343 // -------------------------------------------------------------------------------------------
344 // helper to create writer specs for different types of output
345 auto fillLabels = [](TBranch& branch, std::vector<char> const& labelbuffer, DataRef const& /*ref*/) {
348 auto ptr = &outputcontainer;
350 outputcontainer.adopt(labelbuffer);
351 br->Fill();
352 br->ResetAddress();
353 };
354
355 auto makeWriterSpec = [tpcSectors, laneConfiguration, propagateMC, getIndex, getName](const char* processName,
356 const char* defaultFileName,
357 const char* defaultTreeName,
358 auto&& databranch,
359 auto&& mcbranch,
360 bool singleBranch = false) {
361 if (tpcSectors.size() == 0) {
362 throw std::invalid_argument(std::string("writer process configuration needs list of TPC sectors"));
363 }
364
365 auto amendInput = [tpcSectors, laneConfiguration](InputSpec& input, size_t index) {
366 input.binding += std::to_string(laneConfiguration[index]);
367 DataSpecUtils::updateMatchingSubspec(input, laneConfiguration[index]);
368 };
369 auto amendBranchDef = [laneConfiguration, amendInput, tpcSectors, getIndex, getName, singleBranch](auto&& def, bool enableMC = true) {
370 if (!singleBranch) {
371 def.keys = mergeInputs(def.keys, laneConfiguration.size(), amendInput);
372 // the branch is disabled if set to 0
373 def.nofBranches = enableMC ? tpcSectors.size() : 0;
374 def.getIndex = getIndex;
375 def.getName = getName;
376 } else {
377 // instead of the separate sector branches only one is going to be written
378 def.nofBranches = enableMC ? 1 : 0;
379 }
380 return std::move(def);
381 };
382
383 return std::move(MakeRootTreeWriterSpec(processName, defaultFileName, defaultTreeName,
384 std::move(amendBranchDef(databranch)),
385 std::move(amendBranchDef(mcbranch, propagateMC)))());
386 };
387
389 //
390 // a writer process for digits
391 //
392 // selected by output type 'difits'
393 if (isEnabled(OutputType::Digits) && !isEnabled(OutputType::DisableWriter)) {
394 using DigitOutputType = std::vector<o2::tpc::Digit>;
395 specs.push_back(makeWriterSpec("tpc-digits-writer",
396 inputType == InputType::ZSRaw ? "tpc-zs-digits.root" : inputType == InputType::Digits ? "tpc-filtered-digits.root"
397 : "tpcdigits.root",
398 "o2sim",
399 BranchDefinition<DigitOutputType>{InputSpec{"data", "TPC", "DIGITS", 0},
400 "TPCDigit",
401 "digit-branch-name"},
402 BranchDefinition<MCLabelContainer>{InputSpec{"mc", "TPC", "DIGITSMCTR", 0},
403 "TPCDigitMCTruth",
404 "digitmc-branch-name"}));
405 }
406
408 //
409 // a writer process for hardware clusters
410 //
411 // selected by output type 'clustershardware'
412 if (isEnabled(OutputType::ClustersHardware) && !isEnabled(OutputType::DisableWriter)) {
413 specs.push_back(makeWriterSpec("tpc-clusterhardware-writer",
414 inputType == InputType::ClustersHardware ? "tpc-filtered-clustershardware.root" : "tpc-clustershardware.root",
415 "tpcclustershardware",
416 BranchDefinition<const char*>{InputSpec{"data", "TPC", "CLUSTERHW", 0},
417 "TPCClusterHw",
418 "databranch"},
419 BranchDefinition<MCLabelContainer>{InputSpec{"mc", "TPC", "CLUSTERHWMCLBL", 0},
420 "TPCClusterHwMCTruth",
421 "mcbranch"}));
422 }
423
425 //
426 // a writer process for TPC native clusters
427 //
428 // selected by output type 'clusters'
429 if (isEnabled(OutputType::Clusters) && !isEnabled(OutputType::DisableWriter)) {
430 // if the caClusterer is enabled, only one data set with the full TPC is produced, and the writer
431 // is configured to write one single branch
432 specs.push_back(makeWriterSpec(filteredInp ? "tpc-native-cluster-writer_filtered" : "tpc-native-cluster-writer",
433 (inputType == InputType::Clusters || filteredInp) ? "tpc-filtered-native-clusters.root" : "tpc-native-clusters.root",
434 "tpcrec",
435 BranchDefinition<const char*>{InputSpec{"data", ConcreteDataTypeMatcher{"TPC", filteredInp ? o2::header::DataDescription("CLUSTERNATIVEF") : o2::header::DataDescription("CLUSTERNATIVE")}},
436 "TPCClusterNative",
437 "databranch"},
439 "TPCClusterNativeMCTruth",
440 "mcbranch", fillLabels},
441 (caClusterer || decompressTPC || inputType == InputType::PassThrough) && !isEnabled(OutputType::SendClustersPerSector)));
442 }
443
444 if ((isEnabled(OutputType::TPCTriggers) || (caClusterer && runGPUReco)) && !isEnabled(OutputType::DisableWriter)) {
445 specs.push_back(o2::tpc::getTPCTriggerWriterSpec());
446 }
447
448 if (zsOnTheFly) {
449 specs.emplace_back(o2::tpc::getZSEncoderSpec(tpcSectors, outRaw, tpcSectorMask));
450 }
451
452 if (zsToDigit) {
453 specs.emplace_back(o2::tpc::getZStoDigitsSpec(tpcSectors));
454 }
455
457 //
458 // tracker process
459 //
460 // selected by output type 'tracks'
461 if (runGPUReco) {
463 cfg.runTPCTracking = true;
464 cfg.lumiScaleType = sclOpts.lumiType;
465 cfg.lumiScaleMode = sclOpts.lumiMode;
468 cfg.enableCTPLumi = sclOpts.requestCTPLumi;
469 cfg.decompressTPC = decompressTPC;
470 cfg.decompressTPCFromROOT = decompressTPC && inputType == InputType::CompClustersRoot;
471 cfg.caClusterer = caClusterer;
472 cfg.zsDecoder = zsDecoder;
473 cfg.zsOnTheFly = zsOnTheFly;
474 cfg.outputTracks = produceTracks;
475 cfg.outputCompClustersRoot = produceCompClustersRoot;
476 cfg.outputCompClustersFlat = produceCompClustersFlat || runClusterEncoder;
477 cfg.outputCAClusters = isEnabled(OutputType::Clusters) && (caClusterer || decompressTPC);
478 cfg.outputQA = isEnabled(OutputType::QA);
479 cfg.outputSharedClusterMap = (isEnabled(OutputType::Clusters) || inputType == InputType::Clusters) && isEnabled(OutputType::Tracks) && !isEnabled(OutputType::NoSharedClusterMap);
480 cfg.processMC = propagateMC;
482 cfg.askDISTSTF = askDISTSTF;
484 cfg.tpcDeadMapSources = deadMapSources;
485 cfg.tpcUseMCTimeGain = useMCTimeGain;
486
487 Inputs ggInputs;
488 auto ggRequest = std::make_shared<o2::base::GRPGeomRequest>(false, true, false, true, true, o2::base::GRPGeomRequest::Aligned, ggInputs, true);
489
490 auto task = std::make_shared<o2::gpu::GPURecoWorkflowSpec>(policyData, cfg, tpcSectors, tpcSectorMask, ggRequest);
491 gTask = task;
492 Inputs taskInputs = task->inputs();
493 Options taskOptions = task->options();
494 std::move(ggInputs.begin(), ggInputs.end(), std::back_inserter(taskInputs));
495
496 specs.emplace_back(DataProcessorSpec{
497 "tpc-tracker",
498 taskInputs,
499 task->outputs(),
500 AlgorithmSpec{adoptTask<o2::gpu::GPURecoWorkflowSpec>(task)},
501 taskOptions});
502 }
503
505 //
506 // tracker process
507 //
508 // selected by output type 'encoded-clusters'
509 if (runClusterEncoder) {
510 specs.emplace_back(o2::tpc::getEntropyEncoderSpec(!runGPUReco && inputType != InputType::CompClustersFlatForEncode, selIR));
511 }
512
514 //
515 // a writer process for tracks
516 //
517 // selected by output type 'tracks'
518 if (produceTracks && !isEnabled(OutputType::DisableWriter)) {
519 // defining the track writer process using the generic RootTreeWriter and generator tool
520 //
521 // defaults
522 const char* processName = filteredInp ? "tpc-track-writer_filtered" : "tpc-track-writer";
523 const char* defaultFileName = filteredInp ? "tpctracks_filtered.root" : "tpctracks.root";
524 const char* defaultTreeName = "tpcrec";
525
526 // branch definitions for RootTreeWriter spec
527 using TrackOutputType = std::vector<o2::tpc::TrackTPC>;
528
529 using ClusRefsOutputType = std::vector<o2::tpc::TPCClRefElem>;
530 // a spectator callback which will be invoked by the tree writer with the extracted object
531 // we are using it for printing a log message
532 auto logger = BranchDefinition<TrackOutputType>::Spectator([](TrackOutputType const& tracks) {
533 LOG(info) << "writing " << tracks.size() << " track(s)";
534 });
535 auto tracksdef = BranchDefinition<TrackOutputType>{InputSpec{"inputTracks", "TPC", filteredInp ? o2::header::DataDescription("TRACKSF") : o2::header::DataDescription("TRACKS"), 0}, //
536 "TPCTracks", "track-branch-name", //
537 1, //
538 logger}; //
539 auto clrefdef = BranchDefinition<ClusRefsOutputType>{InputSpec{"inputClusRef", "TPC", filteredInp ? o2::header::DataDescription("CLUSREFSF") : o2::header::DataDescription("CLUSREFS"), 0}, //
540 "ClusRefs", "trackclusref-branch-name"}; //
541 auto mcdef = BranchDefinition<std::vector<o2::MCCompLabel>>{InputSpec{"mcinput", "TPC", filteredInp ? o2::header::DataDescription("TRACKSMCLBLF") : o2::header::DataDescription("TRACKSMCLBL"), 0}, //
542 "TPCTracksMCTruth", //
543 (propagateMC ? 1 : 0), //
544 "trackmc-branch-name"}; //
545
546 // depending on the MC propagation flag, branch definition for MC labels is disabled
547 specs.push_back(MakeRootTreeWriterSpec(processName, defaultFileName, defaultTreeName,
548 std::move(tracksdef), std::move(clrefdef),
549 std::move(mcdef))());
550 }
551
553 //
554 // a writer process for compressed clusters container
555 //
556 // selected by output type 'compressed-clusters-root'
557 if (produceCompClustersRoot && !isEnabled(OutputType::DisableWriter)) {
558 // defining the track writer process using the generic RootTreeWriter and generator tool
559 //
560 // defaults
561 const char* processName = "tpc-compcluster-writer";
562 const char* defaultFileName = "tpc-compclusters.root";
563 const char* defaultTreeName = "tpcrec";
564
565 // branch definitions for RootTreeWriter spec
566 using CCluSerializedType = ROOTSerialized<CompressedClustersROOT>;
567 auto ccldef = BranchDefinition<CCluSerializedType>{InputSpec{"inputCompCl", "TPC", "COMPCLUSTERS"}, //
568 "TPCCompClusters_0", "compcluster-branch-name"}; //
569
570 specs.push_back(MakeRootTreeWriterSpec(processName, defaultFileName, defaultTreeName, //
571 std::move(ccldef))()); //
572 }
573
574 return std::move(specs);
575}
576
578{
579 if (gTask) {
580 gTask->deinitialize();
581 }
582}
583
584} // end namespace reco_workflow
585} // end namespace tpc
586} // end namespace o2
Processor spec for decoder of TPC raw cluster data.
Meta data for a group describing it by sector number and global padrow.
Container to store compressed TPC cluster data.
std::string getName(const TDataMember *dm, int index, int size)
A const (ready only) version of MCTruthContainer.
Helper class to access load maps from CCDB.
Definition of the TPC Digit.
Helper class for memory management of TPC Data Formats, external from the actual data type classes to...
Helper for geometry and GRP related CCDB requests.
A special IO container - splitting a given vector to enable ROOT IO.
void output(const std::map< std::string, ChannelStat > &channels)
Definition rawdump.cxx:197
Configurable generator for RootTreeWriter processor spec.
Helper function to tokenize sequences and ranges of integral numbers.
spec definition for a TPC clusterer process
ProcessorSpec for the TPC cluster entropy encoding.
Workflow definition for the TPC reconstruction.
TBranch * ptr
Definitions of TPC Zero Suppression Data Headers.
A read-only version of MCTruthContainer allowing for storage optimisation.
void adopt(gsl::span< const char > const input)
"adopt" (without taking ownership) from an existing buffer
Generate a processor spec for the RootTreeWriter utility.
static TBranch * remapBranch(TBranch &branchRef, T **newdata)
GLuint index
Definition glcorearb.h:781
GLuint const GLchar * name
Definition glcorearb.h:781
GLint GLint GLsizei GLint GLenum GLenum type
Definition glcorearb.h:275
GLboolean * data
Definition glcorearb.h:298
GLint ref
Definition glcorearb.h:291
framework::DataProcessorSpec getDigitsReaderSpec(bool propagateMC=true, const std::string &defFile="ctpdigits.root")
Definition of a container to keep/associate and arbitrary number of labels associated to an index wit...
WorkflowSpec parallelPipeline(const WorkflowSpec &specs, size_t nPipelines, std::function< size_t()> getNumberOfSubspecs, std::function< size_t(size_t)> getSubSpec)
Inputs mergeInputs(InputSpec original, size_t maxIndex, std::function< void(InputSpec &, size_t)> amendCallback)
std::vector< DataProcessorSpec > WorkflowSpec
std::vector< ConfigParamSpec > Options
std::vector< InputSpec > Inputs
Descriptor< gSizeDataDescriptionString > DataDescription
Definition DataHeader.h:551
std::vector< framework::InputSpec > CompletionPolicyData
InputType
define input and output types of the workflow
const std::unordered_map< std::string, InputType > InputMap
framework::WorkflowSpec getWorkflow(CompletionPolicyData *policyData, std::vector< int > const &tpcSectors, unsigned long tpcSectorMask, std::vector< int > const &laneConfiguration, const o2::tpc::CorrectionMapsLoaderGloOpts &sclOpts, bool propagateMC=true, unsigned nLanes=1, std::string const &cfgInput="digitizer", std::string const &cfgOutput="tracks", bool disableRootInput=false, int caClusterer=0, int zsOnTheFly=0, bool askDISTSTF=true, bool selIR=false, bool filteredInp=false, int deadMapSources=-1, bool useMCTimeGain=false)
create the workflow for TPC reconstruction
const std::unordered_map< std::string, OutputType > OutputMap
framework::DataProcessorSpec getEntropyEncoderSpec(bool inputFromFile, bool selIR=false)
create a processor spec
framework::DataProcessorSpec getClusterReaderSpec(bool useMC, const std::vector< int > *tpcSectors=nullptr, const std::vector< int > *laneConfiguration=nullptr)
o2::framework::DataProcessorSpec getTPCScalerSpec(bool enableIDCs, bool enableMShape)
framework::DataProcessorSpec getZSEncoderSpec(std::vector< int > const &tpcSectors, bool outRaw, unsigned long tpcSectorMask)
create a processor spec
Definition ZSSpec.cxx:61
o2::framework::DataProcessorSpec getTPCTriggerWriterSpec()
framework::DataProcessorSpec getZStoDigitsSpec(std::vector< int > const &tpcSectors)
Definition ZSSpec.cxx:211
framework::DataProcessorSpec getClustererSpec(bool sendMC)
framework::DataProcessorSpec getPublisherSpec(PublisherConf const &config, bool propagateMC=true)
framework::DataProcessorSpec getTPCTriggerReaderSpec()
framework::DataProcessorSpec getClusterDecoderRawSpec(bool sendMC=false)
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
std::string to_string(gsl::span< T, Size > span)
Definition common.h:52
static void updateMatchingSubspec(InputSpec &in, header::DataHeader::SubSpecificationType subSpec)
int lumiType
what estimator to used for corrections scaling: 0: no scaling, 1: CTP, 2: IDC
int lumiMode
what corrections method to use: 0: classical scaling, 1: Using of the derivative map,...
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"