Project
Loading...
Searching...
No Matches
AODWriterHelpers.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
14#include "AODWriterHelpers.h"
25#include "Framework/Signpost.h"
26
27#include <Monitoring/Monitoring.h>
28#include <TDirectory.h>
29#include <TFile.h>
30#include <TFile.h>
31#include <TTree.h>
32#include <TMap.h>
33#include <TObjString.h>
34#include <arrow/table.h>
35#include <chrono>
36#include <ios>
37
38O2_DECLARE_DYNAMIC_LOG(histogram_registry);
39
41{
42
51
53 TClass* kind = nullptr;
54 void* obj = nullptr;
55 std::string container;
56 std::string name;
57 int count = -1;
58};
59
60const static std::unordered_map<OutputObjHandlingPolicy, std::string> ROOTfileNames = {{OutputObjHandlingPolicy::AnalysisObject, "AnalysisResults.root"},
61 {OutputObjHandlingPolicy::QAObject, "QAResults.root"}};
62
64{
65 auto& ac = ctx.services().get<AnalysisContext>();
67 int compressionLevel = 505;
68 if (ctx.options().hasOption("aod-writer-compression")) {
69 compressionLevel = ctx.options().get<int>("aod-writer-compression");
70 }
71 return AlgorithmSpec{[dod, outputInputs = ac.outputsInputsAOD, compressionLevel](InitContext& ic) -> std::function<void(ProcessingContext&)> {
72 LOGP(debug, "======== getGlobalAODSink::Init ==========");
73
74 // find out if any table needs to be saved
75 bool hasOutputsToWrite = false;
76 for (auto& outobj : outputInputs) {
77 auto ds = dod->getDataOutputDescriptors(outobj);
78 if (ds.size() > 0) {
79 hasOutputsToWrite = true;
80 break;
81 }
82 }
83
84 // if nothing needs to be saved then return a trivial functor
85 // this happens when nothing needs to be saved but there are dangling outputs
86 if (!hasOutputsToWrite) {
87 return [](ProcessingContext&) mutable -> void {
88 static bool once = false;
89 if (!once) {
90 LOG(info) << "No AODs to be saved.";
91 once = true;
92 }
93 };
94 }
95
96 // end of data functor is called at the end of the data stream
97 auto endofdatacb = [dod](EndOfStreamContext& context) {
98 dod->closeDataFiles();
99 context.services().get<ControlService>().readyToQuit(QuitRequest::Me);
100 };
101
102 auto& callbacks = ic.services().get<CallbackService>();
103 callbacks.set<CallbackService::Id::EndOfStream>(endofdatacb);
104
105 // prepare map<uint64_t, uint64_t>(startTime, tfNumber)
106 std::map<uint64_t, uint64_t> tfNumbers;
107 std::map<uint64_t, std::string> tfFilenames;
108
109 std::vector<TString> aodMetaDataKeys;
110 std::vector<TString> aodMetaDataVals;
111
112 // this functor is called once per time frame
113 return [dod, tfNumbers, tfFilenames, aodMetaDataKeys, aodMetaDataVals, compressionLevel](ProcessingContext& pc) mutable -> void {
114 LOGP(debug, "======== getGlobalAODSink::processing ==========");
115 LOGP(debug, " processing data set with {} entries", pc.inputs().size());
116
117 // return immediately if pc.inputs() is empty. This should never happen!
118 if (pc.inputs().size() == 0) {
119 LOGP(info, "No inputs available!");
120 return;
121 }
122
123 // update tfNumbers
124 uint64_t startTime = 0;
125 uint64_t tfNumber = 0;
126 auto ref = pc.inputs().get("tfn");
127 if (ref.spec && ref.payload) {
128 startTime = DataRefUtils::getHeader<DataProcessingHeader*>(ref)->startTime;
129 tfNumber = pc.inputs().get<uint64_t>("tfn");
130 tfNumbers.insert(std::pair<uint64_t, uint64_t>(startTime, tfNumber));
131 }
132 // update tfFilenames
133 std::string aodInputFile;
134 auto ref2 = pc.inputs().get("tff");
135 if (ref2.spec && ref2.payload) {
136 startTime = DataRefUtils::getHeader<DataProcessingHeader*>(ref2)->startTime;
137 aodInputFile = pc.inputs().get<std::string>("tff");
138 tfFilenames.insert(std::pair<uint64_t, std::string>(startTime, aodInputFile));
139 }
140
141 // close all output files if one has reached size limit
142 dod->checkFileSizes();
143
144 // loop over the DataRefs which are contained in pc.inputs()
145 for (const auto& ref : pc.inputs()) {
146 if (!ref.spec) {
147 LOGP(debug, "Invalid input will be skipped!");
148 continue;
149 }
150
151 // get metadata
152 if (DataSpecUtils::partialMatch(*ref.spec, header::DataDescription("AODMetadataKeys"))) {
153 aodMetaDataKeys = pc.inputs().get<std::vector<TString>>(ref.spec->binding);
154 }
155 if (DataSpecUtils::partialMatch(*ref.spec, header::DataDescription("AODMetadataVals"))) {
156 aodMetaDataVals = pc.inputs().get<std::vector<TString>>(ref.spec->binding);
157 }
158
159 // skip non-AOD refs
160 if (!DataSpecUtils::partialMatch(*ref.spec, writableAODOrigins)) {
161 continue;
162 }
163 startTime = DataRefUtils::getHeader<DataProcessingHeader*>(ref)->startTime;
164
165 // does this need to be saved?
166 auto dh = DataRefUtils::getHeader<header::DataHeader*>(ref);
167 auto tableName = dh->dataDescription.as<std::string>();
168 auto ds = dod->getDataOutputDescriptors(*dh);
169 if (ds.empty()) {
170 continue;
171 }
172
173 // get TF number from startTime
174 auto it = tfNumbers.find(startTime);
175 if (it != tfNumbers.end()) {
176 tfNumber = (it->second / dod->getNumberTimeFramesToMerge()) * dod->getNumberTimeFramesToMerge();
177 } else {
178 LOGP(fatal, "No time frame number found for output with start time {}", startTime);
179 throw std::runtime_error("Processing is stopped!");
180 }
181 // get aod input file from startTime
182 auto it2 = tfFilenames.find(startTime);
183 if (it2 != tfFilenames.end()) {
184 aodInputFile = it2->second;
185 }
186
187 // get the TableConsumer and corresponding arrow table
188 auto msg = pc.inputs().get(ref.spec->binding);
189 if (msg.header == nullptr) {
190 LOGP(error, "No header for message {}:{}", ref.spec->binding, DataSpecUtils::describe(*ref.spec));
191 continue;
192 }
193 auto s = pc.inputs().get<TableConsumer>(ref.spec->binding);
194 auto table = s->asArrowTable();
195 if (!table->Validate().ok()) {
196 LOGP(warning, "The table \"{}\" is not valid and will not be saved!", tableName);
197 continue;
198 }
199 if (table->schema()->fields().empty()) {
200 LOGP(debug, "The table \"{}\" is empty but will be saved anyway!", tableName);
201 }
202
203 // loop over all DataOutputDescriptors
204 // a table can be saved in multiple ways
205 // e.g. different selections of columns to different files
206 for (auto d : ds) {
207 auto fileAndFolder = dod->getFileFolder(d, tfNumber, aodInputFile, compressionLevel);
208 auto treename = fileAndFolder.folderName + "/" + d->treename;
209 TableToTree ta2tr(table,
210 fileAndFolder.file,
211 treename.c_str());
212
213 // update metadata
214 if (fileAndFolder.file->FindObjectAny("metaData")) {
215 LOGF(debug, "Metadata: target file %s already has metadata, preserving it", fileAndFolder.file->GetName());
216 } else if (!aodMetaDataKeys.empty() && !aodMetaDataVals.empty()) {
217 TMap aodMetaDataMap;
218 for (uint32_t imd = 0; imd < aodMetaDataKeys.size(); imd++) {
219 aodMetaDataMap.Add(new TObjString(aodMetaDataKeys[imd]), new TObjString(aodMetaDataVals[imd]));
220 }
221 fileAndFolder.file->WriteObject(&aodMetaDataMap, "metaData", "Overwrite");
222 }
223
224 if (!d->colnames.empty()) {
225 for (auto& cn : d->colnames) {
226 auto idx = table->schema()->GetFieldIndex(cn);
227 auto col = table->column(idx);
228 auto field = table->schema()->field(idx);
229 if (idx != -1) {
230 ta2tr.addBranch(col, field);
231 }
232 }
233 } else {
234 ta2tr.addAllBranches();
235 }
236 ta2tr.process();
237 }
238 }
239 };
240 }
241
242 };
243}
244
246{
247 using namespace monitoring;
248 auto& ac = ctx.services().get<AnalysisContext>();
249 auto tskmap = ac.outTskMap;
250 auto objmap = ac.outObjHistMap;
251
252 return AlgorithmSpec{[objmap, tskmap](InitContext& ic) -> std::function<void(ProcessingContext&)> {
253 auto& callbacks = ic.services().get<CallbackService>();
254 auto inputObjects = std::make_shared<std::vector<std::pair<InputObjectRoute, InputObject>>>();
255
257 for (auto i = 0u; i < OutputObjHandlingPolicy::numPolicies; ++i) {
258 f[i] = nullptr;
259 }
260
261 static std::string currentDirectory = "";
262 static std::string currentFile = "";
263
264 auto endofdatacb = [inputObjects](EndOfStreamContext& context) {
265 LOG(debug) << "Writing merged objects and histograms to file";
266 if (inputObjects->empty()) {
267 LOG(error) << "Output object map is empty!";
268 context.services().get<ControlService>().readyToQuit(QuitRequest::Me);
269 return;
270 }
271 for (auto i = 0u; i < OutputObjHandlingPolicy::numPolicies; ++i) {
272 if (f[i] != nullptr) {
273 f[i]->Close();
274 }
275 }
276 LOG(debug) << "All outputs merged in their respective target files";
277 context.services().get<ControlService>().readyToQuit(QuitRequest::Me);
278 };
279
280 callbacks.set<CallbackService::Id::EndOfStream>(endofdatacb);
281 return [inputObjects, objmap, tskmap](ProcessingContext& pc) mutable -> void {
282 auto mergePart = [&inputObjects, &objmap, &tskmap, &pc](DataRef const& ref) {
283 O2_SIGNPOST_ID_GENERATE(hid, histogram_registry);
284 O2_SIGNPOST_START(histogram_registry, hid, "mergePart", "Merging histogram");
285 if (!ref.header) {
286 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "Header not found.");
287 return;
288 }
289 auto datah = o2::header::get<o2::header::DataHeader*>(ref.header);
290 if (!datah) {
291 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No data header in stack");
292 return;
293 }
294
295 if (!ref.payload) {
296 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "Payload not found for %{public}s/%{public}s/%d",
297 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
298 datah->subSpecification);
299 return;
300 }
301
302 auto objh = o2::header::get<o2::framework::OutputObjHeader*>(ref.header);
303 if (!objh) {
304 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No output object header in stack of %{public}s/%{public}s/%d.",
305 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
306 datah->subSpecification);
307 return;
308 }
309
310 InputObject obj;
311 FairInputTBuffer tm(const_cast<char*>(ref.payload), static_cast<int>(datah->payloadSize));
312 tm.InitMap();
313 obj.kind = tm.ReadClass();
314 tm.SetBufferOffset(0);
315 tm.ResetMap();
316 O2_SIGNPOST_ID_GENERATE(did, histogram_registry);
317 O2_SIGNPOST_START(histogram_registry, did, "initialising root", "Starting deserialization of %{public}s/%{public}s/%d",
318 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
319 datah->subSpecification);
320 if (obj.kind == nullptr) {
321 O2_SIGNPOST_END(histogram_registry, did, "initialising root", "Failed to deserialise");
322 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "Cannot read class info from buffer of %{public}s/%{public}s/%d.",
323 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
324 datah->subSpecification);
325 return;
326 }
327 O2_SIGNPOST_END(histogram_registry, did, "initialising root", "Done init.");
328
329 auto policy = objh->mPolicy;
330 auto sourceType = objh->mSourceType;
331 auto hash = objh->mTaskHash;
332 O2_SIGNPOST_START(histogram_registry, did, "deserialization", "Starting deserialization of %{public}s/%{public}s/%d",
333 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
334 datah->subSpecification);
335
336 obj.obj = tm.ReadObjectAny(obj.kind);
337 auto* named = static_cast<TNamed*>(obj.obj);
338 obj.name = named->GetName();
339 O2_SIGNPOST_END(histogram_registry, did, "deserialization", "Done deserialization.");
340 // If we have a folder, we assume the first element of the path
341 // to be the name of the registry.
342 if (sourceType == HistogramRegistrySource) {
343 obj.container = objh->containerName;
344 } else {
345 obj.container = obj.name;
346 }
347 auto hpos = std::find_if(tskmap.begin(), tskmap.end(), [&](auto&& x) { return x.id == hash; });
348 if (hpos == tskmap.end()) {
349 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No task found for hash %d.", hash);
350 return;
351 }
352 auto taskname = hpos->name;
353 auto opos = std::find_if(objmap.begin(), objmap.end(), [&](auto&& x) { return x.id == hash; });
354 if (opos == objmap.end()) {
355 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No object list found for task %{public}s (hash=%d).",
356 taskname.c_str(), hash);
357 return;
358 }
359 auto objects = opos->bindings;
360 if (std::find(objects.begin(), objects.end(), obj.container) == objects.end()) {
361 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "mergePart", "No container %{public}s in map for task %{public}s.",
362 obj.container.c_str(), taskname.c_str());
363 return;
364 }
365 auto nameHash = runtime_hash(obj.name.c_str());
366 InputObjectRoute key{obj.name, nameHash, taskname, hash, policy, sourceType};
367 auto existing = std::find_if(inputObjects->begin(), inputObjects->end(), [&](auto&& x) { return (x.first.uniqueId == nameHash) && (x.first.taskHash == hash); });
368 // If it's the first one, we just add it to the list.
369 O2_SIGNPOST_START(histogram_registry, did, "merging", "Starting merging of %{public}s/%{public}s/%d",
370 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
371 datah->subSpecification);
372 if (existing == inputObjects->end()) {
373 obj.count = objh->mPipelineSize;
374 inputObjects->emplace_back(key, obj);
375 existing = inputObjects->end() - 1;
376 } else {
377 obj.count = existing->second.count;
378 // Otherwise, we merge it with the existing one.
379 auto merger = existing->second.kind->GetMerge();
380 if (!merger) {
381 O2_SIGNPOST_END(histogram_registry, did, "merging", "Unabled to merge");
382 O2_SIGNPOST_END_WITH_ERROR(histogram_registry, hid, "merging", "Already one unmergeable object found for %{public}s", obj.name.c_str());
383 return;
384 }
385 TList coll;
386 coll.Add(static_cast<TObject*>(obj.obj));
387 merger(existing->second.obj, &coll, nullptr);
388 }
389 // We expect as many objects as the pipeline size, for
390 // a given object name and task hash.
391 existing->second.count -= 1;
392
393 if (existing->second.count != 0) {
394 O2_SIGNPOST_END(histogram_registry, did, "merging", "Done partial merging.");
395 O2_SIGNPOST_END(histogram_registry, hid, "mergePart", "Pipeline lanes still missing.");
396 return;
397 }
398 O2_SIGNPOST_END(histogram_registry, did, "merging", "Done merging.");
399 // Write the object here.
400 auto route = existing->first;
401 auto entry = existing->second;
402 auto file = ROOTfileNames.find(route.policy);
403 if (file == ROOTfileNames.end()) {
404 O2_SIGNPOST_END(histogram_registry, hid, "mergePart", "Not matching any file.");
405 return;
406 }
407 O2_SIGNPOST_START(histogram_registry, did, "writing", "Starting writing of %{public}s/%{public}s/%d",
408 datah->dataOrigin.as<std::string>().c_str(), datah->dataDescription.as<std::string>().c_str(),
409 datah->subSpecification);
410 auto filename = file->second;
411 if (f[route.policy] == nullptr) {
412 f[route.policy] = TFile::Open(filename.c_str(), "RECREATE");
413 }
414 auto nextDirectory = route.directory;
415 if ((nextDirectory != currentDirectory) || (filename != currentFile)) {
416 if (!f[route.policy]->FindKey(nextDirectory.c_str())) {
417 f[route.policy]->mkdir(nextDirectory.c_str());
418 }
419 currentDirectory = nextDirectory;
420 currentFile = filename;
421 }
422
423 // FIXME: handle folders
424 f[route.policy]->cd("/");
425 auto* currentDir = f[route.policy]->GetDirectory(currentDirectory.c_str());
426 // The name contains a path...
427 int objSize = 0;
428 if (sourceType == HistogramRegistrySource) {
429 TDirectory* currentFolder = currentDir;
430 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Toplevel folder is %{public}s.",
431 currentDir->GetName());
432 std::string objName = entry.name;
433 auto lastSlash = entry.name.rfind('/');
434
435 if (lastSlash != std::string::npos) {
436 auto dirname = entry.name.substr(0, lastSlash);
437 objName = entry.name.substr(lastSlash + 1);
438 currentFolder = currentDir->GetDirectory(dirname.c_str());
439 if (!currentFolder) {
440 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Creating folder %{public}s",
441 dirname.c_str());
442 currentFolder = currentDir->mkdir(dirname.c_str(), "", kTRUE);
443 } else {
444 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Folder %{public}s already there.",
445 currentFolder->GetName());
446 }
447 }
448 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Writing %{public}s of kind %{public}s in %{public}s",
449 entry.name.c_str(), entry.kind->GetName(), currentDir->GetName());
450 objSize = currentFolder->WriteObjectAny(entry.obj, entry.kind, objName.c_str());
451 O2_SIGNPOST_END(histogram_registry, did, "writing", "End writing %{public}s", entry.name.c_str());
452 delete (TObject*)entry.obj;
453 entry.obj = nullptr;
454 } else {
455 O2_SIGNPOST_EVENT_EMIT(histogram_registry, hid, "mergePart", "Writing %{public}s of kind %{public}s in %{public}s",
456 entry.name.c_str(), entry.kind->GetName(), currentDir->GetName());
457 objSize = currentDir->WriteObjectAny(entry.obj, entry.kind, entry.name.c_str());
458 O2_SIGNPOST_END(histogram_registry, did, "writing", "End writing %{public}s", entry.name.c_str());
459 delete (TObject*)entry.obj;
460 entry.obj = nullptr;
461 }
462 O2_SIGNPOST_END(histogram_registry, hid, "mergePart", "Done merging object of %d bytes.", objSize);
463 };
464 O2_SIGNPOST_ID_GENERATE(rid, histogram_registry);
465 O2_SIGNPOST_START(histogram_registry, rid, "processParts", "Start merging %zu parts received together.", pc.inputs().getNofParts(0));
466 for (int pi = 0; pi < pc.inputs().getNofParts(0); ++pi) {
467 mergePart(pc.inputs().get("x", pi));
468 }
469 O2_SIGNPOST_END(histogram_registry, rid, "processParts", "Done histograms in multipart message.");
470 };
471 }};
472}
473} // namespace o2::framework::writers
std::vector< std::string > objects
int32_t i
uint32_t col
Definition RawData.h:4
#define O2_DECLARE_DYNAMIC_LOG(name)
Definition Signpost.h:489
#define O2_SIGNPOST_END(log, id, name, format,...)
Definition Signpost.h:608
#define O2_SIGNPOST_ID_GENERATE(name, log)
Definition Signpost.h:506
#define O2_SIGNPOST_EVENT_EMIT(log, id, name, format,...)
Definition Signpost.h:522
#define O2_SIGNPOST_END_WITH_ERROR(log, id, name, format,...)
Definition Signpost.h:616
#define O2_SIGNPOST_START(log, id, name, format,...)
Definition Signpost.h:602
constexpr uint32_t runtime_hash(char const *str)
std::ostringstream debug
StringRef key
ServiceRegistryRef services() const
ConfigParamRegistry & options() const
bool hasOption(const char *key) const
void addBranch(std::shared_ptr< arrow::ChunkedArray > const &column, std::shared_ptr< arrow::Field > const &field)
std::shared_ptr< TTree > process()
GLint GLenum GLint x
Definition glcorearb.h:403
GLint GLsizei count
Definition glcorearb.h:399
GLuint entry
Definition glcorearb.h:5735
GLdouble f
Definition glcorearb.h:310
GLint ref
Definition glcorearb.h:291
@ Me
Only quit this data processor.
OutputObjHandlingPolicy
Policy enum to determine OutputObj handling when writing.
std::string filename()
std::vector< OutputTaskInfo > outTskMap
std::vector< InputSpec > outputsInputsAOD
static std::shared_ptr< DataOutputDirector > getDataOutputDirector(ConfigContext const &ctx)
Get the data director.
static bool partialMatch(InputSpec const &spec, o2::header::DataOrigin const &origin)
static std::string describe(InputSpec const &spec)
static AlgorithmSpec getOutputObjHistWriter(ConfigContext const &context)
static AlgorithmSpec getOutputTTreeWriter(ConfigContext const &context)
o2::mch::DsIndex ds
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
uint64_t const void const *restrict const msg
Definition x9.h:153