Project
Loading...
Searching...
No Matches
AODWriterHelpers.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
14#include "AODWriterHelpers.h"
24#include "Framework/Signpost.h"
25
26#include <Monitoring/Monitoring.h>
27#include <TDirectory.h>
28#include <TFile.h>
29#include <TFile.h>
30#include <TTree.h>
31#include <TMap.h>
32#include <TObjString.h>
33#include <arrow/table.h>
34
35O2_DECLARE_DYNAMIC_LOG(histogram_registry);
36
38{
39
48
50 TClass* kind = nullptr;
51 void* obj = nullptr;
52 std::string container;
53 std::string name;
54 int count = -1;
55};
56
57const static std::unordered_map<OutputObjHandlingPolicy, std::string> ROOTfileNames = {{OutputObjHandlingPolicy::AnalysisObject, "AnalysisResults.root"},
58 {OutputObjHandlingPolicy::QAObject, "QAResults.root"}};
59
61{
63 int compressionLevel = 505;
64 if (ctx.options().hasOption("aod-writer-compression")) {
65 compressionLevel = ctx.options().get<int>("aod-writer-compression");
66 }
67 return AlgorithmSpec{[dod, compressionLevel](InitContext& ic) -> std::function<void(ProcessingContext&)> {
68 auto outputInputs = ic.services().get<DanglingEdgesContext>().outputsInputsAOD;
69 LOGP(debug, "======== getGlobalAODSink::Init ==========");
70
71 // find out if any table needs to be saved
72 bool hasOutputsToWrite = false;
73 for (auto& outobj : outputInputs) {
74 auto ds = dod->getDataOutputDescriptors(outobj);
75 if (ds.size() > 0) {
76 hasOutputsToWrite = true;
77 break;
78 }
79 }
80
81 // if nothing needs to be saved then return a trivial functor
82 // this happens when nothing needs to be saved but there are dangling outputs
83 if (!hasOutputsToWrite) {
84 return [](ProcessingContext&) mutable -> void {
85 static bool once = false;
86 if (!once) {
87 LOG(info) << "No AODs to be saved.";
88 once = true;
89 }
90 };
91 }
92
93 // end of data functor is called at the end of the data stream
94 auto endofdatacb = [dod](EndOfStreamContext& context) {
95 dod->closeDataFiles();
96 context.services().get<ControlService>().readyToQuit(QuitRequest::Me);
97 };
98
99 auto& callbacks = ic.services().get<CallbackService>();
100 callbacks.set<CallbackService::Id::EndOfStream>(endofdatacb);
101
102 // prepare map<uint64_t, uint64_t>(startTime, tfNumber)
103 std::map<uint64_t, uint64_t> tfNumbers;
104 std::map<uint64_t, std::string> tfFilenames;
105
106 std::vector<TString> aodMetaDataKeys;
107 std::vector<TString> aodMetaDataVals;
108
109 // this functor is called once per time frame
110 return [dod, tfNumbers, tfFilenames, aodMetaDataKeys, aodMetaDataVals, compressionLevel](ProcessingContext& pc) mutable -> void {
111 LOGP(debug, "======== getGlobalAODSink::processing ==========");
112 LOGP(debug, " processing data set with {} entries", pc.inputs().size());
113
114 // return immediately if pc.inputs() is empty. This should never happen!
115 if (pc.inputs().size() == 0) {
116 LOGP(info, "No inputs available!");
117 return;
118 }
119
120 // update tfNumbers
121 uint64_t startTime = 0;
122 uint64_t tfNumber = 0;
123 auto ref = pc.inputs().get("tfn");
124 if (ref.spec && ref.payload) {
125 startTime = DataRefUtils::getHeader<DataProcessingHeader*>(ref)->startTime;
126 tfNumber = pc.inputs().get<uint64_t>("tfn");
127 tfNumbers.insert(std::pair<uint64_t, uint64_t>(startTime, tfNumber));
128 }
129 // update tfFilenames
130 std::string aodInputFile;
131 auto ref2 = pc.inputs().get("tff");
132 if (ref2.spec && ref2.payload) {
133 startTime = DataRefUtils::getHeader<DataProcessingHeader*>(ref2)->startTime;
134 aodInputFile = pc.inputs().get<std::string>("tff");
135 tfFilenames.insert(std::pair<uint64_t, std::string>(startTime, aodInputFile));
136 }
137
138 // close all output files if one has reached size limit
139 dod->checkFileSizes();
140
141 // loop over the DataRefs which are contained in pc.inputs()
142 for (const auto& ref : pc.inputs()) {
143 if (!ref.spec) {
144 LOGP(debug, "Invalid input will be skipped!");
145 continue;
146 }
147
148 // get metadata
149 if (DataSpecUtils::partialMatch(*ref.spec, header::DataDescription("AODMetadataKeys"))) {
150 aodMetaDataKeys = pc.inputs().get<std::vector<TString>>(ref.spec->binding);
151 }
152 if (DataSpecUtils::partialMatch(*ref.spec, header::DataDescription("AODMetadataVals"))) {
153 aodMetaDataVals = pc.inputs().get<std::vector<TString>>(ref.spec->binding);
154 }
155
156 // skip non-AOD refs
157 if (!DataSpecUtils::partialMatch(*ref.spec, AODOrigins)) {
158 continue;
159 }
160 startTime = DataRefUtils::getHeader<DataProcessingHeader*>(ref)->startTime;
161
162 // does this need to be saved?
163 auto dh = DataRefUtils::getHeader<header::DataHeader*>(ref);
164 auto tableName = dh->dataDescription.as<std::string>();
165 auto ds = dod->getDataOutputDescriptors(*dh);
166 if (ds.empty()) {
167 continue;
168 }
169
170 // get TF number from startTime
171 auto it = tfNumbers.find(startTime);
172 if (it != tfNumbers.end()) {
173 tfNumber = (it->second / dod->getNumberTimeFramesToMerge()) * dod->getNumberTimeFramesToMerge();
174 } else {
175 LOGP(fatal, "No time frame number found for output with start time {}", startTime);
176 throw std::runtime_error("Processing is stopped!");
177 }
178 // get aod input file from startTime
179 auto it2 = tfFilenames.find(startTime);
180 if (it2 != tfFilenames.end()) {
181 aodInputFile = it2->second;
182 }
183
184 // get the TableConsumer and corresponding arrow table
185 if (ref.header == nullptr) {
186 LOGP(error, "No header for message {}:{}", ref.spec->binding, DataSpecUtils::describe(*ref.spec));
187 continue;
188 }
189
190 auto table = pc.inputs().get<TableConsumer>(std::get<ConcreteDataMatcher>(ref.spec->matcher))->asArrowTable();
191 if (!table->Validate().ok()) {
192 LOGP(warning, "The table \"{}\" is not valid and will not be saved!", tableName);
193 continue;
194 }
195 if (table->schema()->fields().empty()) {
196 LOGP(debug, "The table \"{}\" is empty but will be saved anyway!", tableName);
197 }
198
199 // loop over all DataOutputDescriptors
200 // a table can be saved in multiple ways
201 // e.g. different selections of columns to different files
202 for (auto d : ds) {
203 auto fileAndFolder = dod->getFileFolder(d, tfNumber, aodInputFile, compressionLevel);
204 auto treename = fileAndFolder.folderName + "/" + d->treename;
205 TableToTree ta2tr(table,
206 fileAndFolder.file,
207 treename.c_str());
208
209 // update metadata
210 if (fileAndFolder.file->FindObjectAny("metaData")) {
211 LOGF(debug, "Metadata: target file %s already has metadata, preserving it", fileAndFolder.file->GetName());
212 } else if (!aodMetaDataKeys.empty() && !aodMetaDataVals.empty()) {
213 TMap aodMetaDataMap;
214 for (uint32_t imd = 0; imd < aodMetaDataKeys.size(); imd++) {
215 aodMetaDataMap.Add(new TObjString(aodMetaDataKeys[imd]), new TObjString(aodMetaDataVals[imd]));
216 }
217 fileAndFolder.file->WriteObject(&aodMetaDataMap, "metaData", "Overwrite");
218 }
219
220 if (!d->colnames.empty()) {
221 for (auto& cn : d->colnames) {
222 auto idx = table->schema()->GetFieldIndex(cn);
223 auto col = table->column(idx);
224 auto field = table->schema()->field(idx);
225 if (idx != -1) {
226 ta2tr.addBranch(col, field);
227 }
228 }
229 } else {
230 ta2tr.addAllBranches();
231 }
232 ta2tr.process();
233 }
234 }
235 };
236 }
237
238 };
239}
240
242{
243 return AlgorithmSpec{[](InitContext& ic) -> std::function<void(ProcessingContext&)> {
244 using namespace monitoring;
245 auto& dec = ic.services().get<DanglingEdgesContext>();
246 auto tskmap = dec.outTskMap;
247 auto objmap = dec.outObjHistMap;
248 auto& callbacks = ic.services().get<CallbackService>();
249 auto inputObjects = std::make_shared<std::vector<std::pair<InputObjectRoute, InputObject>>>();
250
252 for (auto i = 0u; i < OutputObjHandlingPolicy::numPolicies; ++i) {
253 f[i] = nullptr;
254 }
255
256 static std::string currentDirectory = "";
257 static std::string currentFile = "";
258
259 auto endofdatacb = [inputObjects](EndOfStreamContext& context) {
260 LOG(debug) << "Writing merged objects and histograms to file";
261 if (inputObjects->empty()) {
262 LOG(error) << "Output object map is empty!";
263 context.services().get<ControlService>().readyToQuit(QuitRequest::Me);
264 return;
265 }
266 for (auto i = 0u; i < OutputObjHandlingPolicy::numPolicies; ++i) {
267 if (f[i] != nullptr) {
268 f[i]->Close();
269 }
270 }
271 LOG(debug) << "All outputs merged in their respective target files";
272 context.services().get<ControlService>().readyToQuit(QuitRequest::Me);
273 };
274
275 callbacks.set<CallbackService::Id::EndOfStream>(endofdatacb);
276 return [inputObjects, objmap, tskmap](ProcessingContext& pc) mutable -> void {
277 auto mergePart = [&inputObjects, &objmap, &tskmap](DataRef const& ref) {
278 O2_SIGNPOST_ID_GENERATE(hid, histogram_registry);
279 O2_SIGNPOST_START(histogram_registry, hid, "mergePart", "Merging histogram");
280 if (!ref.header) {
281 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "Header not found.");
282 return;
283 }
284 auto datah = o2::header::get<o2::header::DataHeader*>(ref.header);
285 if (!datah) {
286 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No data header in stack");
287 return;
288 }
289
290 if (!ref.payload) {
291 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "Payload not found for %{public}s/%{public}s/%d",
292 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
293 datah->subSpecification);
294 return;
295 }
296
297 auto objh = o2::header::get<o2::framework::OutputObjHeader*>(ref.header);
298 if (!objh) {
299 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No output object header in stack of %{public}s/%{public}s/%d.",
300 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
301 datah->subSpecification);
302 return;
303 }
304
305 InputObject obj;
306 FairInputTBuffer tm(const_cast<char*>(ref.payload), static_cast<int>(datah->payloadSize));
307 tm.InitMap();
308 obj.kind = tm.ReadClass();
309 tm.SetBufferOffset(0);
310 tm.ResetMap();
311 O2_SIGNPOST_ID_GENERATE(did, histogram_registry);
312 O2_SIGNPOST_START(histogram_registry, did, "initialising root", "Starting deserialization of %{public}s/%{public}s/%d",
313 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
314 datah->subSpecification);
315 if (obj.kind == nullptr) {
316 O2_SIGNPOST_END(histogram_registry, did, "initialising root", "Failed to deserialise");
317 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "Cannot read class info from buffer of %{public}s/%{public}s/%d.",
318 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
319 datah->subSpecification);
320 return;
321 }
322 O2_SIGNPOST_END(histogram_registry, did, "initialising root", "Done init.");
323
324 auto policy = objh->mPolicy;
325 auto sourceType = objh->mSourceType;
326 auto hash = objh->mTaskHash;
327 O2_SIGNPOST_START(histogram_registry, did, "deserialization", "Starting deserialization of %{public}s/%{public}s/%d",
328 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
329 datah->subSpecification);
330
331 obj.obj = tm.ReadObjectAny(obj.kind);
332 auto* named = static_cast<TNamed*>(obj.obj);
333 obj.name = named->GetName();
334 O2_SIGNPOST_END(histogram_registry, did, "deserialization", "Done deserialization.");
335 // If we have a folder, we assume the first element of the path
336 // to be the name of the registry.
337 bool folderForContainer = false;
338 if (sourceType == HistogramRegistrySource) {
339 folderForContainer = objh->createContainer != 0;
340 obj.container = objh->containerName;
341 } else {
342 obj.container = obj.name;
343 }
344 auto hpos = std::find_if(tskmap.begin(), tskmap.end(), [&](auto&& x) { return x.id == hash; });
345 if (hpos == tskmap.end()) {
346 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No task found for hash %d.", hash);
347 return;
348 }
349 auto taskname = hpos->name;
350 auto opos = std::find_if(objmap.begin(), objmap.end(), [&](auto&& x) { return x.id == hash; });
351 if (opos == objmap.end()) {
352 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No object list found for task %{public}s (hash=%d).",
353 taskname.c_str(), hash);
354 return;
355 }
356 auto objects = opos->bindings;
357 if (std::find(objects.begin(), objects.end(), obj.container) == objects.end()) {
358 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No container %{public}s in map for task %{public}s.",
359 obj.container.c_str(), taskname.c_str());
360 return;
361 }
362 auto nameHash = runtime_hash(obj.name.c_str());
363 InputObjectRoute key{obj.name, nameHash, taskname, hash, policy, sourceType};
364 auto existing = std::find_if(inputObjects->begin(), inputObjects->end(), [&](auto&& x) { return (x.first.uniqueId == nameHash) && (x.first.taskHash == hash); });
365 // If it's the first one, we just add it to the list.
366 O2_SIGNPOST_START(histogram_registry, did, "merging", "Starting merging of %{public}s/%{public}s/%d",
367 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
368 datah->subSpecification);
369 if (existing == inputObjects->end()) {
370 obj.count = objh->mPipelineSize;
371 inputObjects->emplace_back(key, obj);
372 existing = inputObjects->end() - 1;
373 } else {
374 obj.count = existing->second.count;
375 // Otherwise, we merge it with the existing one.
376 auto merger = existing->second.kind->GetMerge();
377 if (!merger) {
378 O2_SIGNPOST_END(histogram_registry, did, "merging", "Unabled to merge");
379 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "merging", "Already one unmergeable object found for %{public}s", obj.name.c_str());
380 return;
381 }
382 TList coll;
383 coll.Add(static_cast<TObject*>(obj.obj));
384 merger(existing->second.obj, &coll, nullptr);
385 }
386 // We expect as many objects as the pipeline size, for
387 // a given object name and task hash.
388 existing->second.count -= 1;
389
390 if (existing->second.count != 0) {
391 O2_SIGNPOST_END(histogram_registry, did, "merging", "Done partial merging.");
392 O2_SIGNPOST_END(histogram_registry, hid, "mergePart", "Pipeline lanes still missing.");
393 return;
394 }
395 O2_SIGNPOST_END(histogram_registry, did, "merging", "Done merging.");
396 // Write the object here.
397 auto route = existing->first;
398 auto entry = existing->second;
399 auto file = ROOTfileNames.find(route.policy);
400 if (file == ROOTfileNames.end()) {
401 O2_SIGNPOST_END(histogram_registry, hid, "mergePart", "Not matching any file.");
402 return;
403 }
404 O2_SIGNPOST_START(histogram_registry, did, "writing", "Starting writing of %{public}s/%{public}s/%d",
405 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
406 datah->subSpecification);
407 auto filename = file->second;
408 if (f[route.policy] == nullptr) {
409 f[route.policy] = TFile::Open(filename.c_str(), "RECREATE");
410 }
411 auto nextDirectory = route.directory;
412 if ((nextDirectory != currentDirectory) || (filename != currentFile)) {
413 if (!f[route.policy]->FindKey(nextDirectory.c_str())) {
414 f[route.policy]->mkdir(nextDirectory.c_str());
415 }
416 currentDirectory = nextDirectory;
417 currentFile = filename;
418 }
419
420 // FIXME: handle folders
421 f[route.policy]->cd("/");
422 auto* currentDir = f[route.policy]->GetDirectory(currentDirectory.c_str());
423
424 // In case we need a folder for the registry, let's create it.
425 if (folderForContainer) {
426 auto* histogramRegistryFolder = currentDir->GetDirectory(obj.container.data());
427 if (!histogramRegistryFolder) {
428 histogramRegistryFolder = currentDir->mkdir(obj.container.c_str(), "", kTRUE);
429 }
430 currentDir = histogramRegistryFolder;
431 }
432
433 // The name contains a path...
434 int objSize = 0;
435 if (sourceType == HistogramRegistrySource) {
436 TDirectory* currentFolder = currentDir;
437 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Toplevel folder is %{public}s.",
438 currentDir->GetName());
439 std::string objName = entry.name;
440 auto lastSlash = entry.name.rfind('/');
441
442 if (lastSlash != std::string::npos) {
443 auto dirname = entry.name.substr(0, lastSlash);
444 objName = entry.name.substr(lastSlash + 1);
445 currentFolder = currentDir->GetDirectory(dirname.c_str());
446 if (!currentFolder) {
447 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Creating folder %{public}s",
448 dirname.c_str());
449 currentFolder = currentDir->mkdir(dirname.c_str(), "", kTRUE);
450 } else {
451 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Folder %{public}s already there.",
452 currentFolder->GetName());
453 }
454 }
455 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Writing %{public}s of kind %{public}s in %{public}s",
456 entry.name.c_str(), entry.kind->GetName(), currentDir->GetName());
457 objSize = currentFolder->WriteObjectAny(entry.obj, entry.kind, objName.c_str());
458 O2_SIGNPOST_END(histogram_registry, did, "writing", "End writing %{public}s", entry.name.c_str());
459 delete (TObject*)entry.obj;
460 entry.obj = nullptr;
461 } else {
462 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Writing %{public}s of kind %{public}s in %{public}s",
463 entry.name.c_str(), entry.kind->GetName(), currentDir->GetName());
464 objSize = currentDir->WriteObjectAny(entry.obj, entry.kind, entry.name.c_str());
465 O2_SIGNPOST_END(histogram_registry, did, "writing", "End writing %{public}s", entry.name.c_str());
466 delete (TObject*)entry.obj;
467 entry.obj = nullptr;
468 }
469 O2_SIGNPOST_END(histogram_registry, hid, "mergePart", "Done merging object of %d bytes.", objSize);
470 };
471 O2_SIGNPOST_ID_GENERATE(rid, histogram_registry);
472 O2_SIGNPOST_START(histogram_registry, rid, "processParts", "Start merging %zu parts received together.", pc.inputs().getNofParts(0));
473 for (auto pi = 0U; pi < pc.inputs().getNofParts(0); ++pi) {
474 mergePart(pc.inputs().get("x", pi));
475 }
476 O2_SIGNPOST_END(histogram_registry, rid, "processParts", "Done histograms in multipart message.");
477 };
478 }};
479}
480} // namespace o2::framework::writers
std::vector< std::string > objects
uint32_t hash
std::ostringstream debug
int32_t i
uint32_t col
Definition RawData.h:4
#define O2_DECLARE_DYNAMIC_LOG(name)
Definition Signpost.h:489
#define O2_SIGNPOST_END(log, id, name, format,...)
Definition Signpost.h:608
#define O2_SIGNPOST_ID_GENERATE(name, log)
Definition Signpost.h:506
#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format,...)
Definition Signpost.h:522
#define O2_SIGNPOST_END_WITH_ERROR(log, id, name, format,...)
Definition Signpost.h:616
#define O2_SIGNPOST_START(log, id, name, format,...)
Definition Signpost.h:602
constexpr uint32_t runtime_hash(char const *str)
StringRef key
ConfigParamRegistry & options() const
bool hasOption(const char *key) const
void addBranch(std::shared_ptr< arrow::ChunkedArray > const &column, std::shared_ptr< arrow::Field > const &field)
std::shared_ptr< TTree > process()
GLint GLenum GLint x
Definition glcorearb.h:403
GLint GLsizei count
Definition glcorearb.h:399
GLuint entry
Definition glcorearb.h:5735
GLdouble f
Definition glcorearb.h:310
GLint ref
Definition glcorearb.h:291
@ Me
Only quit this data processor.
OutputObjHandlingPolicy
Policy enum to determine OutputObj handling when writing.
std::string filename()
static std::shared_ptr< DataOutputDirector > getDataOutputDirector(ConfigContext const &ctx)
Get the data director.
std::vector< OutputTaskInfo > outTskMap
static bool partialMatch(InputSpec const &spec, o2::header::DataOrigin const &origin)
static std::string describe(InputSpec const &spec)
static AlgorithmSpec getOutputObjHistWriter(ConfigContext const &context)
static AlgorithmSpec getOutputTTreeWriter(ConfigContext const &context)
o2::mch::DsIndex ds
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"