Project
Loading...
Searching...
No Matches
GPUQA.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
15#define QA_DEBUG 0
16#define QA_TIMING 0
17
18#include "Rtypes.h" // Include ROOT header first, to use ROOT and disable replacements
19
20#include "TH1F.h"
21#include "TH2F.h"
22#include "TH1D.h"
23#include "TGraphAsymmErrors.h"
24#include "TCanvas.h"
25#include "TPad.h"
26#include "TLegend.h"
27#include "TColor.h"
28#include "TPaveText.h"
29#include "TF1.h"
30#include "TFile.h"
31#include "TTree.h"
32#include "TStyle.h"
33#include "TLatex.h"
34#include "TObjArray.h"
35#include <sys/stat.h>
36
37#include "GPUQA.h"
38#include "GPUTPCDef.h"
39#include "GPUTPCTrackingData.h"
40#include "GPUChainTracking.h"
41#include "GPUChainTrackingGetters.inc"
42#include "GPUTPCTrack.h"
43#include "GPUTPCTracker.h"
44#include "GPUTPCGMMergedTrack.h"
45#include "GPUTPCGMPropagator.h"
47#include "GPUTPCMCInfo.h"
48#include "GPUTPCClusterData.h"
49#include "GPUO2DataTypes.h"
50#include "GPUParam.inc"
52#include "GPUTPCConvertImpl.h"
53#include "TPCFastTransform.h"
55#include "GPUROOTDump.h"
58#include "GPUSettings.h"
59#ifdef GPUCA_O2_LIB
69#include "TPDGCode.h"
70#include "TParticlePDG.h"
71#include "TDatabasePDG.h"
72#endif
73#include "GPUQAHelper.h"
74#include <algorithm>
75#include <cstdio>
76#include <cinttypes>
77
78#include "utils/timer.h"
79
80#include <oneapi/tbb.h>
81
82using namespace o2::gpu;
83
84#ifdef GPUCA_MERGER_BY_MC_LABEL
85#define CHECK_CLUSTER_STATE_INIT_LEG_BY_MC() \
86 if (!unattached && mTrackMCLabels[id].isValid()) { \
87 int32_t mcLabel = mTrackMCLabels[id].getTrackID(); \
88 int32_t mcEvent = mTrackMCLabels[id].getEventID(); \
89 int32_t mcSource = mTrackMCLabels[id].getSourceID(); \
90 if (mTrackMCLabelsReverse[mMCEventOffset[mcSource] + mcEvent][mcLabel] != id) { \
91 attach &= (~gputpcgmmergertypes::attachGoodLeg); \
92 } \
93 }
94#else
95#define CHECK_CLUSTER_STATE_INIT_LEG_BY_MC()
96#endif
97
98#define CHECK_CLUSTER_STATE_INIT() \
99 bool unattached = attach == 0; \
100 float qpt = 0; \
101 bool lowPt = false; \
102 bool mev200 = false; \
103 bool mergedLooper = false; \
104 int32_t id = attach & gputpcgmmergertypes::attachTrackMask; \
105 if (!unattached) { \
106 qpt = fabsf(mTracking->mIOPtrs.mergedTracks[id].GetParam().GetQPt()); \
107 lowPt = qpt * mTracking->GetParam().qptB5Scaler > mTracking->GetParam().rec.tpc.rejectQPtB5; \
108 mev200 = qpt > 5; \
109 mergedLooper = mTracking->mIOPtrs.mergedTracks[id].MergedLooper(); \
110 } \
111 bool physics = false, protect = false; \
112 CHECK_CLUSTER_STATE_INIT_LEG_BY_MC();
113
114#define CHECK_CLUSTER_STATE() \
115 CHECK_CLUSTER_STATE_INIT() \
116 if (mev200) { \
117 mClusterCounts.n200MeV++; \
118 } \
119 if (lowPt) { \
120 mClusterCounts.nLowPt++; \
121 } else if (mergedLooper) { \
122 mClusterCounts.nMergedLooper++; \
123 } else { \
124 GPUTPCClusterRejection::GetProtectionStatus<true>(attach, physics, protect, &mClusterCounts, &mev200); \
125 }
126
127#define CHECK_CLUSTER_STATE_NOCOUNT() \
128 CHECK_CLUSTER_STATE_INIT() \
129 (void)mev200; /* silence unused variable warning*/ \
130 if (!lowPt && !mergedLooper) { \
131 GPUTPCClusterRejection::GetProtectionStatus<false>(attach, physics, protect); \
132 }
133
134static const GPUSettingsQA& GPUQA_GetConfig(GPUChainTracking* chain)
135{
136 static GPUSettingsQA defaultConfig;
137 if (chain && chain->mConfigQA) {
138 return *chain->mConfigQA;
139 } else {
140 return defaultConfig;
141 }
142}
143
144// static const constexpr bool PLOT_ROOT = 0;
145// static const constexpr bool FIX_SCALES = 0;
146static const constexpr bool PERF_FIGURE = 0;
147// static const constexpr float FIXED_SCALES_MIN[5] = {-0.05, -0.05, -0.2, -0.2, -0.5};
148// static const constexpr float FIXED_SCALES_MAX[5] = {0.4, 0.7, 5, 3, 6.5};
149static const constexpr float LOG_PT_MIN = -1.;
150
151static constexpr float Y_MAX = 40;
152static constexpr float Z_MAX = 100;
153static constexpr float PT_MIN = GPUCA_MIN_TRACK_PTB5_DEFAULT;
154// static constexpr float PT_MIN2 = 0.1;
155static constexpr float PT_MIN_PRIM = 0.1;
156static constexpr float PT_MIN_CLUST = GPUCA_MIN_TRACK_PTB5_DEFAULT;
157static constexpr float PT_MAX = 20;
158static constexpr float ETA_MAX = 1.5;
159static constexpr float ETA_MAX2 = 0.9;
160
161static constexpr float MIN_WEIGHT_CLS = 40;
162static constexpr float FINDABLE_WEIGHT_CLS = 70;
163
164static constexpr bool CLUST_HIST_INT_SUM = false;
165
166static constexpr const int32_t COLORCOUNT = 12;
167
168static const constexpr char* EFF_TYPES[5] = {"Rec", "Clone", "Fake", "All", "RecAndClone"};
169static const constexpr char* FINDABLE_NAMES[2] = {"", "Findable"};
170static const constexpr char* PRIM_NAMES[2] = {"Prim", "Sec"};
171static const constexpr char* PARAMETER_NAMES[5] = {"Y", "Z", "#Phi", "#lambda", "Relative #it{p}_{T}"};
172static const constexpr char* PARAMETER_NAMES_NATIVE[5] = {"Y", "Z", "sin(#Phi)", "tan(#lambda)", "q/#it{p}_{T} (curvature)"};
173static const constexpr char* VSPARAMETER_NAMES[6] = {"Y", "Z", "Phi", "Eta", "Pt", "Pt_log"};
174static const constexpr char* EFF_NAMES[3] = {"Efficiency", "Clone Rate", "Fake Rate"};
175static const constexpr char* EFFICIENCY_TITLES[4] = {"Efficiency (Primary Tracks, Findable)", "Efficiency (Secondary Tracks, Findable)", "Efficiency (Primary Tracks)", "Efficiency (Secondary Tracks)"};
176static const constexpr double SCALE[5] = {10., 10., 1000., 1000., 100.};
177static const constexpr double SCALE_NATIVE[5] = {10., 10., 1000., 1000., 1.};
178static const constexpr char* XAXIS_TITLES[5] = {"#it{y}_{mc} (cm)", "#it{z}_{mc} (cm)", "#Phi_{mc} (rad)", "#eta_{mc}", "#it{p}_{Tmc} (GeV/#it{c})"};
179static const constexpr char* AXIS_TITLES[5] = {"#it{y}-#it{y}_{mc} (mm) (Resolution)", "#it{z}-#it{z}_{mc} (mm) (Resolution)", "#phi-#phi_{mc} (mrad) (Resolution)", "#lambda-#lambda_{mc} (mrad) (Resolution)", "(#it{p}_{T} - #it{p}_{Tmc}) / #it{p}_{Tmc} (%) (Resolution)"};
180static const constexpr char* AXIS_TITLES_NATIVE[5] = {"#it{y}-#it{y}_{mc} (mm) (Resolution)", "#it{z}-#it{z}_{mc} (mm) (Resolution)", "sin(#phi)-sin(#phi_{mc}) (Resolution)", "tan(#lambda)-tan(#lambda_{mc}) (Resolution)", "q*(q/#it{p}_{T} - q/#it{p}_{Tmc}) (Resolution)"};
181static const constexpr char* AXIS_TITLES_PULL[5] = {"#it{y}-#it{y}_{mc}/#sigma_{y} (Pull)", "#it{z}-#it{z}_{mc}/#sigma_{z} (Pull)", "sin(#phi)-sin(#phi_{mc})/#sigma_{sin(#phi)} (Pull)", "tan(#lambda)-tan(#lambda_{mc})/#sigma_{tan(#lambda)} (Pull)",
182 "q*(q/#it{p}_{T} - q/#it{p}_{Tmc})/#sigma_{q/#it{p}_{T}} (Pull)"};
183static const constexpr char* CLUSTER_NAMES[GPUQA::N_CLS_HIST] = {"Correctly attached clusters", "Fake attached clusters", "Attached + adjacent clusters", "Fake adjacent clusters", "Clusters of reconstructed tracks", "Used in Physics", "Protected", "All clusters"};
184static const constexpr char* CLUSTER_TITLES[GPUQA::N_CLS_TYPE] = {"Clusters Pt Distribution / Attachment", "Clusters Pt Distribution / Attachment (relative to all clusters)", "Clusters Pt Distribution / Attachment (integrated)"};
185static const constexpr char* CLUSTER_NAMES_SHORT[GPUQA::N_CLS_HIST] = {"Attached", "Fake", "AttachAdjacent", "FakeAdjacent", "FoundTracks", "Physics", "Protected", "All"};
186static const constexpr char* CLUSTER_TYPES[GPUQA::N_CLS_TYPE] = {"", "Ratio", "Integral"};
187static const constexpr int32_t COLORS_HEX[COLORCOUNT] = {0xB03030, 0x00A000, 0x0000C0, 0x9400D3, 0x19BBBF, 0xF25900, 0x7F7F7F, 0xFFD700, 0x07F707, 0x07F7F7, 0xF08080, 0x000000};
188
189static const constexpr int32_t CONFIG_DASHED_MARKERS = 0;
190
191static const constexpr float AXES_MIN[5] = {-Y_MAX, -Z_MAX, 0.f, -ETA_MAX, PT_MIN};
192static const constexpr float AXES_MAX[5] = {Y_MAX, Z_MAX, 2.f * M_PI, ETA_MAX, PT_MAX};
193static const constexpr int32_t AXIS_BINS[5] = {51, 51, 144, 31, 50};
194static const constexpr int32_t RES_AXIS_BINS[] = {1017, 113}; // Consecutive bin sizes, histograms are binned down until the maximum entry is 50, each bin size should evenly divide its predecessor.
195static const constexpr float RES_AXES[5] = {1., 1., 0.03, 0.03, 1.0};
196static const constexpr float RES_AXES_NATIVE[5] = {1., 1., 0.1, 0.1, 5.0};
197static const constexpr float PULL_AXIS = 10.f;
198
199std::vector<TColor*> GPUQA::mColors;
200int32_t GPUQA::initColors()
201{
202 mColors.reserve(COLORCOUNT);
203 for (int32_t i = 0; i < COLORCOUNT; i++) {
204 float f1 = (float)((COLORS_HEX[i] >> 16) & 0xFF) / (float)0xFF;
205 float f2 = (float)((COLORS_HEX[i] >> 8) & 0xFF) / (float)0xFF;
206 float f3 = (float)((COLORS_HEX[i] >> 0) & 0xFF) / (float)0xFF;
207 mColors.emplace_back(new TColor(10000 + i, f1, f2, f3));
208 }
209 return 0;
210}
211static constexpr Color_t defaultColorNums[COLORCOUNT] = {kRed, kBlue, kGreen, kMagenta, kOrange, kAzure, kBlack, kYellow, kGray, kTeal, kSpring, kPink};
212
213#define TRACK_EXPECTED_REFERENCE_X_DEFAULT 81
214#ifdef GPUCA_TPC_GEOMETRY_O2
215static inline int32_t GPUQA_O2_ConvertFakeLabel(int32_t label) { return label >= 0x7FFFFFFE ? -1 : label; }
216inline uint32_t GPUQA::GetNMCCollissions() const { return mMCInfosCol.size(); }
217inline uint32_t GPUQA::GetNMCTracks(int32_t iCol) const { return mMCInfosCol[iCol].num; }
218inline uint32_t GPUQA::GetNMCTracks(const mcLabelI_t& label) const { return mMCInfosCol[mMCEventOffset[label.getSourceID()] + label.getEventID()].num; }
219inline uint32_t GPUQA::GetNMCLabels() const { return mClNative->clustersMCTruth ? mClNative->clustersMCTruth->getIndexedSize() : 0; }
220inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) { return mMCInfos[mMCInfosCol[iCol].first + iTrk]; }
221inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabel_t& label) { return mMCInfos[mMCInfosCol[mMCEventOffset[label.getSourceID()] + label.getEventID()].first + label.getTrackID()]; }
222inline GPUQA::mcLabels_t GPUQA::GetMCLabel(uint32_t i) { return mClNative->clustersMCTruth->getLabels(i); }
223inline int32_t GPUQA::GetMCLabelNID(const mcLabels_t& label) { return label.size(); }
224inline int32_t GPUQA::GetMCLabelNID(uint32_t i) { return mClNative->clustersMCTruth->getLabels(i).size(); }
225inline GPUQA::mcLabel_t GPUQA::GetMCLabel(uint32_t i, uint32_t j) { return mClNative->clustersMCTruth->getLabels(i)[j]; }
226inline int32_t GPUQA::GetMCLabelID(uint32_t i, uint32_t j) { return GPUQA_O2_ConvertFakeLabel(mClNative->clustersMCTruth->getLabels(i)[j].getTrackID()); }
227inline int32_t GPUQA::GetMCLabelID(const mcLabels_t& label, uint32_t j) { return GPUQA_O2_ConvertFakeLabel(label[j].getTrackID()); }
228inline int32_t GPUQA::GetMCLabelID(const mcLabel_t& label) { return GPUQA_O2_ConvertFakeLabel(label.getTrackID()); }
229inline uint32_t GPUQA::GetMCLabelCol(uint32_t i, uint32_t j) { return mMCEventOffset[mClNative->clustersMCTruth->getLabels(i)[j].getSourceID()] + mClNative->clustersMCTruth->getLabels(i)[j].getEventID(); }
230inline const auto& GPUQA::GetClusterLabels() { return mClNative->clustersMCTruth; }
231inline float GPUQA::GetMCLabelWeight(uint32_t i, uint32_t j) { return 1; }
232inline float GPUQA::GetMCLabelWeight(const mcLabels_t& label, uint32_t j) { return 1; }
233inline float GPUQA::GetMCLabelWeight(const mcLabel_t& label) { return 1; }
234inline bool GPUQA::mcPresent() { return !mConfig.noMC && mTracking && mClNative && mClNative->clustersMCTruth && mMCInfos.size(); }
235uint32_t GPUQA::GetMCLabelCol(const mcLabel_t& label) const { return !label.isValid() ? 0 : (mMCEventOffset[label.getSourceID()] + label.getEventID()); }
236GPUQA::mcLabelI_t GPUQA::GetMCTrackLabel(uint32_t trackId) const { return trackId >= mTrackMCLabels.size() ? MCCompLabel() : mTrackMCLabels[trackId]; }
237#define TRACK_EXPECTED_REFERENCE_X 78
238#else
239inline GPUQA::mcLabelI_t::mcLabelI_t(const GPUQA::mcLabel_t& l) : track(l.fMCID) {}
240inline bool GPUQA::mcLabelI_t::operator==(const GPUQA::mcLabel_t& l) { return AbsLabelID(track) == l.fMCID; }
241inline uint32_t GPUQA::GetNMCCollissions() const { return 1; }
242inline uint32_t GPUQA::GetNMCTracks(int32_t iCol) const { return mTracking->mIOPtrs.nMCInfosTPC; }
243inline uint32_t GPUQA::GetNMCTracks(const mcLabelI_t& label) const { return mTracking->mIOPtrs.nMCInfosTPC; }
244inline uint32_t GPUQA::GetNMCLabels() const { return mTracking->mIOPtrs.nMCLabelsTPC; }
245inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) { return mTracking->mIOPtrs.mcInfosTPC[AbsLabelID(iTrk)]; }
246inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabel_t& label) { return GetMCTrack(label.fMCID, 0); }
247inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabelI_t& label) { return GetMCTrack(label.track, 0); }
248inline const GPUQA::mcLabels_t& GPUQA::GetMCLabel(uint32_t i) { return mTracking->mIOPtrs.mcLabelsTPC[i]; }
249inline const GPUQA::mcLabel_t& GPUQA::GetMCLabel(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j]; }
250inline int32_t GPUQA::GetMCLabelNID(const mcLabels_t& label) { return 3; }
251inline int32_t GPUQA::GetMCLabelNID(uint32_t i) { return 3; }
252inline int32_t GPUQA::GetMCLabelID(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j].fMCID; }
253inline int32_t GPUQA::GetMCLabelID(const mcLabels_t& label, uint32_t j) { return label.fClusterID[j].fMCID; }
254inline int32_t GPUQA::GetMCLabelID(const mcLabel_t& label) { return label.fMCID; }
255inline uint32_t GPUQA::GetMCLabelCol(uint32_t i, uint32_t j) { return 0; }
256
257inline const auto& GPUQA::GetClusterLabels() { return mTracking->mIOPtrs.mcLabelsTPC; }
258inline float GPUQA::GetMCLabelWeight(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j].fWeight; }
259inline float GPUQA::GetMCLabelWeight(const mcLabels_t& label, uint32_t j) { return label.fClusterID[j].fWeight; }
260inline float GPUQA::GetMCLabelWeight(const mcLabel_t& label) { return label.fWeight; }
261inline int32_t GPUQA::FakeLabelID(int32_t id) { return id < 0 ? id : (-2 - id); }
262inline int32_t GPUQA::AbsLabelID(int32_t id) { return id >= 0 ? id : (-id - 2); }
263inline bool GPUQA::mcPresent() { return !mConfig.noMC && mTracking && GetNMCLabels() && GetNMCTracks(0); }
264uint32_t GPUQA::GetMCLabelCol(const mcLabel_t& label) const { return 0; }
265GPUQA::mcLabelI_t GPUQA::GetMCTrackLabel(uint32_t trackId) const { return trackId >= mTrackMCLabels.size() ? mcLabelI_t() : mTrackMCLabels[trackId]; }
266#define TRACK_EXPECTED_REFERENCE_X TRACK_EXPECTED_REFERENCE_X_DEFAULT
267#endif
268template <class T>
269inline auto& GPUQA::GetMCTrackObj(T& obj, const GPUQA::mcLabelI_t& l)
270{
271 return obj[mMCEventOffset[l.getSourceID()] + l.getEventID()][l.getTrackID()];
272}
273
274template <>
275auto GPUQA::getHistArray<TH1F>()
276{
277 return std::make_pair(mHist1D, &mHist1D_pos);
278}
279template <>
280auto GPUQA::getHistArray<TH2F>()
281{
282 return std::make_pair(mHist2D, &mHist2D_pos);
283}
284template <>
285auto GPUQA::getHistArray<TH1D>()
286{
287 return std::make_pair(mHist1Dd, &mHist1Dd_pos);
288}
289template <>
290auto GPUQA::getHistArray<TGraphAsymmErrors>()
291{
292 return std::make_pair(mHistGraph, &mHistGraph_pos);
293}
294template <class T, typename... Args>
295void GPUQA::createHist(T*& h, const char* name, Args... args)
296{
297 const auto& p = getHistArray<T>();
298 if (mHaveExternalHists) {
299 if (p.first->size() <= p.second->size()) {
300 GPUError("Array sizes mismatch: Histograms %lu <= Positions %lu", p.first->size(), p.second->size());
301 throw std::runtime_error("Incoming histogram array incomplete");
302 }
303 if (strcmp((*p.first)[p.second->size()].GetName(), name)) {
304 GPUError("Histogram name mismatch: in array %s, trying to create %s", (*p.first)[p.second->size()].GetName(), name);
305 throw std::runtime_error("Incoming histogram has incorrect name");
306 }
307 } else {
308 if constexpr (std::is_same_v<T, TGraphAsymmErrors>) {
309 p.first->emplace_back();
310 p.first->back().SetName(name);
311 } else {
312 p.first->emplace_back(name, args...);
313 }
314 }
315 h = &((*p.first)[p.second->size()]);
316 p.second->emplace_back(&h);
317}
318
319namespace o2::gpu::internal
320{
322 std::tuple<std::vector<std::unique_ptr<TCanvas>>, std::vector<std::unique_ptr<TLegend>>, std::vector<std::unique_ptr<TPad>>, std::vector<std::unique_ptr<TLatex>>, std::vector<std::unique_ptr<TH1D>>> v;
323};
324} // namespace o2::gpu::internal
325
326template <class T, typename... Args>
327T* GPUQA::createGarbageCollected(Args... args)
328{
329 auto& v = std::get<std::vector<std::unique_ptr<T>>>(mGarbageCollector->v);
330 v.emplace_back(std::make_unique<T>(args...));
331 return v.back().get();
332}
333void GPUQA::clearGarbagageCollector()
334{
335 std::get<std::vector<std::unique_ptr<TPad>>>(mGarbageCollector->v).clear(); // Make sure to delete TPad first due to ROOT ownership (std::tuple has no defined order in its destructor)
336 std::apply([](auto&&... args) { ((args.clear()), ...); }, mGarbageCollector->v);
337}
338
339GPUQA::GPUQA(GPUChainTracking* chain, const GPUSettingsQA* config, const GPUParam* param) : mTracking(chain), mConfig(config ? *config : GPUQA_GetConfig(chain)), mParam(param ? param : &chain->GetParam()), mGarbageCollector(std::make_unique<internal::GPUQAGarbageCollection>())
340{
341 mMCEventOffset.resize(1, 0);
342}
343
345{
346 if (mQAInitialized && !mHaveExternalHists) {
347 delete mHist1D;
348 delete mHist2D;
349 delete mHist1Dd;
350 delete mHistGraph;
351 }
352 clearGarbagageCollector(); // Needed to guarantee correct order for ROOT ownership
353}
354
355bool GPUQA::clusterRemovable(int32_t attach, bool prot) const
356{
358 if (prot) {
359 return protect || physics;
360 }
361 return (!unattached && !physics && !protect);
362}
363
364template <class T>
365void GPUQA::SetAxisSize(T* e)
366{
367 e->GetYaxis()->SetTitleOffset(1.0);
368 e->GetYaxis()->SetTitleSize(0.045);
369 e->GetYaxis()->SetLabelSize(0.045);
370 e->GetXaxis()->SetTitleOffset(1.03);
371 e->GetXaxis()->SetTitleSize(0.045);
372 e->GetXaxis()->SetLabelOffset(-0.005);
373 e->GetXaxis()->SetLabelSize(0.045);
374}
375
376void GPUQA::SetLegend(TLegend* l)
377{
378 l->SetTextFont(72);
379 l->SetTextSize(0.016);
380 l->SetFillColor(0);
381}
382
383double* GPUQA::CreateLogAxis(int32_t nbins, float xmin, float xmax)
384{
385 float logxmin = std::log10(xmin);
386 float logxmax = std::log10(xmax);
387 float binwidth = (logxmax - logxmin) / nbins;
388
389 double* xbins = new double[nbins + 1];
390
391 xbins[0] = xmin;
392 for (int32_t i = 1; i <= nbins; i++) {
393 xbins[i] = std::pow(10, logxmin + i * binwidth);
394 }
395 return xbins;
396}
397
398void GPUQA::ChangePadTitleSize(TPad* p, float size)
399{
400 p->Update();
401 TPaveText* pt = (TPaveText*)(p->GetPrimitive("title"));
402 if (pt == nullptr) {
403 GPUError("Error changing title");
404 } else {
405 pt->SetTextSize(size);
406 p->Modified();
407 }
408}
409
410void GPUQA::DrawHisto(TH1* histo, char* filename, char* options)
411{
412 TCanvas tmp;
413 tmp.cd();
414 histo->Draw(options);
415 tmp.Print(filename);
416}
417
418void GPUQA::doPerfFigure(float x, float y, float size)
419{
420 if (!PERF_FIGURE) {
421 return;
422 }
423 TLatex* t = createGarbageCollected<TLatex>();
424 t->SetNDC(kTRUE);
425 t->SetTextColor(1);
426 t->SetTextSize(size);
427 t->DrawLatex(x, y, str_perf_figure_1);
428 t->DrawLatex(x, y - 0.01 - size, str_perf_figure_2);
429}
430
431void GPUQA::SetMCTrackRange(int32_t min, int32_t max)
432{
433 mMCTrackMin = min;
434 mMCTrackMax = max;
435}
436
437int32_t GPUQA::InitQACreateHistograms()
438{
439 char name[2048], fname[1024];
440 if (mQATasks & taskTrackingEff) {
441 // Create Efficiency Histograms
442 for (int32_t i = 0; i < 5; i++) {
443 for (int32_t j = 0; j < 2; j++) {
444 for (int32_t k = 0; k < 2; k++) {
445 for (int32_t l = 0; l < 5; l++) {
446 snprintf(name, 2048, "%s%s%s%sVs%s", "tracks", EFF_TYPES[i], FINDABLE_NAMES[j], PRIM_NAMES[k], VSPARAMETER_NAMES[l]);
447 if (l == 4) {
448 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], k == 0 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
449 createHist(mEff[i][j][k][l], name, name, AXIS_BINS[l], binsPt.get());
450 } else {
451 createHist(mEff[i][j][k][l], name, name, AXIS_BINS[l], AXES_MIN[l], AXES_MAX[l]);
452 }
453 if (!mHaveExternalHists) {
454 mEff[i][j][k][l]->Sumw2();
455 }
456 strcat(name, "_eff");
457 if (i < 4) {
458 createHist(mEffResult[i][j][k][l], name);
459 }
460 }
461 }
462 }
463 }
464 }
465
466 // Create Resolution Histograms
467 if (mQATasks & taskTrackingRes) {
468 for (int32_t i = 0; i < 5; i++) {
469 for (int32_t j = 0; j < 5; j++) {
470 snprintf(name, 2048, "rms_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
471 snprintf(fname, 1024, "mean_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
472 if (j == 4) {
473 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
474 createHist(mRes[i][j][0], name, name, AXIS_BINS[j], binsPt.get());
475 createHist(mRes[i][j][1], fname, fname, AXIS_BINS[j], binsPt.get());
476 } else {
477 createHist(mRes[i][j][0], name, name, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
478 createHist(mRes[i][j][1], fname, fname, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
479 }
480 snprintf(name, 2048, "res_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
481 const float* axis = mConfig.nativeFitResolutions ? RES_AXES_NATIVE : RES_AXES;
482 const int32_t nbins = i == 4 && mConfig.nativeFitResolutions ? (10 * RES_AXIS_BINS[0]) : RES_AXIS_BINS[0];
483 if (j == 4) {
484 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
485 createHist(mRes2[i][j], name, name, nbins, -axis[i], axis[i], AXIS_BINS[j], binsPt.get());
486 } else {
487 createHist(mRes2[i][j], name, name, nbins, -axis[i], axis[i], AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
488 }
489 }
490 }
491 }
492
493 // Create Pull Histograms
494 if (mQATasks & taskTrackingResPull) {
495 for (int32_t i = 0; i < 5; i++) {
496 for (int32_t j = 0; j < 5; j++) {
497 snprintf(name, 2048, "pull_rms_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
498 snprintf(fname, 1024, "pull_mean_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
499 if (j == 4) {
500 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
501 createHist(mPull[i][j][0], name, name, AXIS_BINS[j], binsPt.get());
502 createHist(mPull[i][j][1], fname, fname, AXIS_BINS[j], binsPt.get());
503 } else {
504 createHist(mPull[i][j][0], name, name, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
505 createHist(mPull[i][j][1], fname, fname, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
506 }
507 snprintf(name, 2048, "pull_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
508 if (j == 4) {
509 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
510 createHist(mPull2[i][j], name, name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[j], binsPt.get());
511 } else {
512 createHist(mPull2[i][j], name, name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
513 }
514 }
515 }
516 }
517
518 // Create Cluster Histograms
519 if (mQATasks & taskClusterAttach) {
520 for (int32_t i = 0; i < N_CLS_TYPE * N_CLS_HIST - 1; i++) {
521 int32_t ioffset = i >= (2 * N_CLS_HIST - 1) ? (2 * N_CLS_HIST - 1) : i >= N_CLS_HIST ? N_CLS_HIST : 0;
522 int32_t itype = i >= (2 * N_CLS_HIST - 1) ? 2 : i >= N_CLS_HIST ? 1 : 0;
523 snprintf(name, 2048, "clusters%s%s", CLUSTER_NAMES_SHORT[i - ioffset], CLUSTER_TYPES[itype]);
524 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
525 createHist(mClusters[i], name, name, AXIS_BINS[4], binsPt.get());
526 }
527 }
528
529 if (mQATasks & taskTrackStatistics) {
530 // Create Tracks Histograms
531 for (int32_t i = 0; i < 2; i++) {
532 snprintf(name, 2048, i ? "nrows_with_cluster" : "nclusters");
533 createHist(mNCl[i], name, name, 160, 0, 159);
534 }
535 snprintf(name, 2048, "tracks");
536 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
537 createHist(mTracks, name, name, AXIS_BINS[4], binsPt.get());
538 createHist(mClXY, "clXY", "clXY", 1000, -250, 250, 1000, -250, 250);
539 }
540
541 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
542 int32_t num = DoClusterCounts(nullptr, 2);
543 mHistClusterCount.resize(num);
544 DoClusterCounts(nullptr, 1);
545 }
546
547 for (uint32_t i = 0; i < mHist1D->size(); i++) {
548 *mHist1D_pos[i] = &(*mHist1D)[i];
549 }
550 for (uint32_t i = 0; i < mHist2D->size(); i++) {
551 *mHist2D_pos[i] = &(*mHist2D)[i];
552 }
553 for (uint32_t i = 0; i < mHist1Dd->size(); i++) {
554 *mHist1Dd_pos[i] = &(*mHist1Dd)[i];
555 }
556 for (uint32_t i = 0; i < mHistGraph->size(); i++) {
557 *mHistGraph_pos[i] = &(*mHistGraph)[i];
558 }
559
560 return 0;
561}
562
563int32_t GPUQA::loadHistograms(std::vector<TH1F>& i1, std::vector<TH2F>& i2, std::vector<TH1D>& i3, std::vector<TGraphAsymmErrors>& i4, int32_t tasks)
564{
565 if (tasks == -1) {
566 tasks = taskDefaultPostprocess;
567 }
568 if (mQAInitialized && (!mHaveExternalHists || tasks != mQATasks)) {
569 throw std::runtime_error("QA not initialized or initialized with different task array");
570 }
571 mHist1D = &i1;
572 mHist2D = &i2;
573 mHist1Dd = &i3;
574 mHistGraph = &i4;
575 mHist1D_pos.clear();
576 mHist2D_pos.clear();
577 mHist1Dd_pos.clear();
578 mHistGraph_pos.clear();
579 mHaveExternalHists = true;
580 if (mConfig.noMC) {
581 tasks &= tasksNoQC;
582 }
583 mQATasks = tasks;
584 if (InitQACreateHistograms()) {
585 return 1;
586 }
587 mQAInitialized = true;
588 return 0;
589}
590
591void GPUQA::DumpO2MCData(const char* filename) const
592{
593 FILE* fp = fopen(filename, "w+b");
594 if (fp == nullptr) {
595 return;
596 }
597 uint32_t n = mMCInfos.size();
598 fwrite(&n, sizeof(n), 1, fp);
599 fwrite(mMCInfos.data(), sizeof(mMCInfos[0]), n, fp);
600 n = mMCInfosCol.size();
601 fwrite(&n, sizeof(n), 1, fp);
602 fwrite(mMCInfosCol.data(), sizeof(mMCInfosCol[0]), n, fp);
603 n = mMCEventOffset.size();
604 fwrite(&n, sizeof(n), 1, fp);
605 fwrite(mMCEventOffset.data(), sizeof(mMCEventOffset[0]), n, fp);
606 fclose(fp);
607}
608
609int32_t GPUQA::ReadO2MCData(const char* filename)
610{
611 FILE* fp = fopen(filename, "rb");
612 if (fp == nullptr) {
613 return 1;
614 }
615 uint32_t n;
616 uint32_t x;
617 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
618 fclose(fp);
619 return 1;
620 }
621 mMCInfos.resize(n);
622 if (fread(mMCInfos.data(), sizeof(mMCInfos[0]), n, fp) != n) {
623 fclose(fp);
624 return 1;
625 }
626 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
627 fclose(fp);
628 return 1;
629 }
630 mMCInfosCol.resize(n);
631 if (fread(mMCInfosCol.data(), sizeof(mMCInfosCol[0]), n, fp) != n) {
632 fclose(fp);
633 return 1;
634 }
635 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
636 fclose(fp);
637 return 1;
638 }
639 mMCEventOffset.resize(n);
640 if (fread(mMCEventOffset.data(), sizeof(mMCEventOffset[0]), n, fp) != n) {
641 fclose(fp);
642 return 1;
643 }
644 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
645 printf("Read %ld bytes MC Infos\n", ftell(fp));
646 }
647 fclose(fp);
648 if (mTracking) {
649 CopyO2MCtoIOPtr(&mTracking->mIOPtrs);
650 }
651 return 0;
652}
653
654void GPUQA::CopyO2MCtoIOPtr(GPUTrackingInOutPointers* ptr)
655{
656 ptr->mcInfosTPC = mMCInfos.data();
657 ptr->nMCInfosTPC = mMCInfos.size();
658 ptr->mcInfosTPCCol = mMCInfosCol.data();
659 ptr->nMCInfosTPCCol = mMCInfosCol.size();
660}
661
662void GPUQA::InitO2MCData(GPUTrackingInOutPointers* updateIOPtr)
663{
664#ifdef GPUCA_O2_LIB
665 if (!mO2MCDataLoaded) {
666 HighResTimer timer;
667 if (mTracking && mTracking->GetProcessingSettings().debugLevel) {
668 GPUInfo("Start reading O2 Track MC information");
669 timer.Start();
670 }
671 static constexpr float PRIM_MAX_T = 0.01f;
672
673 o2::steer::MCKinematicsReader mcReader("collisioncontext.root");
674 std::vector<int32_t> refId;
675
676 auto dc = o2::steer::DigitizationContext::loadFromFile("collisioncontext.root");
677 auto evrec = dc->getEventRecords();
678
679 uint32_t nSimSources = mcReader.getNSources();
680 mMCEventOffset.resize(nSimSources);
681 uint32_t nSimTotalEvents = 0;
682 uint32_t nSimTotalTracks = 0;
683 for (uint32_t i = 0; i < nSimSources; i++) {
684 mMCEventOffset[i] = nSimTotalEvents;
685 nSimTotalEvents += mcReader.getNEvents(i);
686 }
687
688 mMCInfosCol.resize(nSimTotalEvents);
689 for (int32_t iSim = 0; iSim < mcReader.getNSources(); iSim++) {
690 for (int32_t i = 0; i < mcReader.getNEvents(iSim); i++) {
691 auto ir = evrec[i];
694
695 const std::vector<o2::MCTrack>& tracks = mcReader.getTracks(iSim, i);
696 const std::vector<o2::TrackReference>& trackRefs = mcReader.getTrackRefsByEvent(iSim, i);
697
698 refId.resize(tracks.size());
699 std::fill(refId.begin(), refId.end(), -1);
700 for (uint32_t j = 0; j < trackRefs.size(); j++) {
701 if (trackRefs[j].getDetectorId() == o2::detectors::DetID::TPC) {
702 int32_t trkId = trackRefs[j].getTrackID();
703 if (refId[trkId] == -1) {
704 refId[trkId] = j;
705 }
706 }
707 }
708 mMCInfosCol[mMCEventOffset[iSim] + i].first = mMCInfos.size();
709 mMCInfosCol[mMCEventOffset[iSim] + i].num = tracks.size();
710 mMCInfos.resize(mMCInfos.size() + tracks.size());
711 for (uint32_t j = 0; j < tracks.size(); j++) {
712 auto& info = mMCInfos[mMCInfosCol[mMCEventOffset[iSim] + i].first + j];
713 const auto& trk = tracks[j];
714 TParticlePDG* particle = TDatabasePDG::Instance()->GetParticle(trk.GetPdgCode());
715 Int_t pid = -1;
716 if (abs(trk.GetPdgCode()) == kElectron) {
717 pid = 0;
718 }
719 if (abs(trk.GetPdgCode()) == kMuonMinus) {
720 pid = 1;
721 }
722 if (abs(trk.GetPdgCode()) == kPiPlus) {
723 pid = 2;
724 }
725 if (abs(trk.GetPdgCode()) == kKPlus) {
726 pid = 3;
727 }
728 if (abs(trk.GetPdgCode()) == kProton) {
729 pid = 4;
730 }
731
732 info.charge = particle ? particle->Charge() : 0;
733 info.prim = trk.T() < PRIM_MAX_T;
734 info.primDaughters = 0;
735 if (trk.getFirstDaughterTrackId() != -1) {
736 for (int32_t k = trk.getFirstDaughterTrackId(); k <= trk.getLastDaughterTrackId(); k++) {
737 if (tracks[k].T() < PRIM_MAX_T) {
738 info.primDaughters = 1;
739 break;
740 }
741 }
742 }
743 info.pid = pid;
744 info.t0 = timebin;
745 if (refId[j] >= 0) {
746 const auto& trkRef = trackRefs[refId[j]];
747 info.x = trkRef.X();
748 info.y = trkRef.Y();
749 info.z = trkRef.Z();
750 info.pX = trkRef.Px();
751 info.pY = trkRef.Py();
752 info.pZ = trkRef.Pz();
753 info.genRadius = std::sqrt(trk.GetStartVertexCoordinatesX() * trk.GetStartVertexCoordinatesX() + trk.GetStartVertexCoordinatesY() * trk.GetStartVertexCoordinatesY() + trk.GetStartVertexCoordinatesZ() * trk.GetStartVertexCoordinatesZ());
754 } else {
755 info.x = info.y = info.z = info.pX = info.pY = info.pZ = 0;
756 info.genRadius = 0;
757 }
758 }
759 }
760 }
761 if (mTracking && mTracking->GetProcessingSettings().debugLevel) {
762 GPUInfo("Finished reading O2 Track MC information (%f seconds)", timer.GetCurrentElapsedTime());
763 }
764 mO2MCDataLoaded = true;
765 }
766 if (updateIOPtr) {
767 CopyO2MCtoIOPtr(updateIOPtr);
768 }
769#endif
770}
771
772int32_t GPUQA::InitQA(int32_t tasks)
773{
774 if (mQAInitialized) {
775 throw std::runtime_error("QA already initialized");
776 }
777 if (tasks == -1) {
778 tasks = taskDefault;
779 }
780
781 mHist1D = new std::vector<TH1F>;
782 mHist2D = new std::vector<TH2F>;
783 mHist1Dd = new std::vector<TH1D>;
784 mHistGraph = new std::vector<TGraphAsymmErrors>;
785 if (mConfig.noMC) {
786 tasks &= tasksNoQC;
787 }
788 mQATasks = tasks;
789
790 if (mTracking->GetProcessingSettings().qcRunFraction != 100.f && mQATasks != taskClusterCounts) {
791 throw std::runtime_error("QA with qcRunFraction only supported for taskClusterCounts");
792 }
793
794 if (mTracking) {
795 mClNative = mTracking->mIOPtrs.clustersNative;
796 }
797
798 if (InitQACreateHistograms()) {
799 return 1;
800 }
801
802 if (mConfig.enableLocalOutput) {
803 mkdir("plots", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
804 }
805
806#ifdef GPUCA_O2_LIB
807 if (!mConfig.noMC) {
808 InitO2MCData(mTracking ? &mTracking->mIOPtrs : nullptr);
809 }
810#endif
811
812 if (mConfig.matchMCLabels.size()) {
813 uint32_t nFiles = mConfig.matchMCLabels.size();
814 std::vector<std::unique_ptr<TFile>> files;
815 std::vector<std::vector<std::vector<int32_t>>*> labelsBuffer(nFiles);
816 std::vector<std::vector<std::vector<int32_t>>*> effBuffer(nFiles);
817 for (uint32_t i = 0; i < nFiles; i++) {
818 files.emplace_back(std::make_unique<TFile>(mConfig.matchMCLabels[i].c_str()));
819 labelsBuffer[i] = (std::vector<std::vector<int32_t>>*)files[i]->Get("mcLabelBuffer");
820 effBuffer[i] = (std::vector<std::vector<int32_t>>*)files[i]->Get("mcEffBuffer");
821 if (labelsBuffer[i] == nullptr || effBuffer[i] == nullptr) {
822 GPUError("Error opening / reading from labels file %u/%s: %p %p", i, mConfig.matchMCLabels[i].c_str(), (void*)labelsBuffer[i], (void*)effBuffer[i]);
823 exit(1);
824 }
825 }
826
827 mGoodTracks.resize(labelsBuffer[0]->size());
828 mGoodHits.resize(labelsBuffer[0]->size());
829 for (uint32_t iEvent = 0; iEvent < labelsBuffer[0]->size(); iEvent++) {
830 std::vector<bool> labelsOK((*effBuffer[0])[iEvent].size());
831 for (uint32_t k = 0; k < (*effBuffer[0])[iEvent].size(); k++) {
832 labelsOK[k] = false;
833 for (uint32_t l = 0; l < nFiles; l++) {
834 if ((*effBuffer[0])[iEvent][k] != (*effBuffer[l])[iEvent][k]) {
835 labelsOK[k] = true;
836 break;
837 }
838 }
839 }
840 mGoodTracks[iEvent].resize((*labelsBuffer[0])[iEvent].size());
841 for (uint32_t k = 0; k < (*labelsBuffer[0])[iEvent].size(); k++) {
842 if ((*labelsBuffer[0])[iEvent][k] == MC_LABEL_INVALID) {
843 continue;
844 }
845 mGoodTracks[iEvent][k] = labelsOK[abs((*labelsBuffer[0])[iEvent][k])];
846 }
847 }
848 }
849 mQAInitialized = true;
850 return 0;
851}
852
853void GPUQA::RunQA(bool matchOnly, const std::vector<o2::tpc::TrackTPC>* tracksExternal, const std::vector<o2::MCCompLabel>* tracksExtMC, const o2::tpc::ClusterNativeAccess* clNative)
854{
855 if (!mQAInitialized) {
856 throw std::runtime_error("QA not initialized");
857 }
858 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
859 GPUInfo("Running QA - Mask %d, Efficiency %d, Resolution %d, Pulls %d, Cluster Attachment %d, Track Statistics %d, Cluster Counts %d", mQATasks, (int32_t)(mQATasks & taskTrackingEff), (int32_t)(mQATasks & taskTrackingRes), (int32_t)(mQATasks & taskTrackingResPull), (int32_t)(mQATasks & taskClusterAttach), (int32_t)(mQATasks & taskTrackStatistics), (int32_t)(mQATasks & taskClusterCounts));
860 }
861 if (!clNative && mTracking) {
862 clNative = mTracking->mIOPtrs.clustersNative;
863 }
864 mClNative = clNative;
865
866#ifdef GPUCA_TPC_GEOMETRY_O2
867 uint32_t nSimEvents = GetNMCCollissions();
868 if (mTrackMCLabelsReverse.size() < nSimEvents) {
869 mTrackMCLabelsReverse.resize(nSimEvents);
870 }
871 if (mRecTracks.size() < nSimEvents) {
872 mRecTracks.resize(nSimEvents);
873 }
874 if (mFakeTracks.size() < nSimEvents) {
875 mFakeTracks.resize(nSimEvents);
876 }
877 if (mMCParam.size() < nSimEvents) {
878 mMCParam.resize(nSimEvents);
879 }
880#endif
881
882 // Initialize Arrays
883 uint32_t nReconstructedTracks = 0;
884 if (tracksExternal) {
885#ifdef GPUCA_O2_LIB
886 nReconstructedTracks = tracksExternal->size();
887#endif
888 } else {
889 nReconstructedTracks = mTracking->mIOPtrs.nMergedTracks;
890 }
891 mTrackMCLabels.resize(nReconstructedTracks);
892 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
893 mTrackMCLabelsReverse[iCol].resize(GetNMCTracks(iCol));
894 mRecTracks[iCol].resize(GetNMCTracks(iCol));
895 mFakeTracks[iCol].resize(GetNMCTracks(iCol));
896 mMCParam[iCol].resize(GetNMCTracks(iCol));
897 memset(mRecTracks[iCol].data(), 0, mRecTracks[iCol].size() * sizeof(mRecTracks[iCol][0]));
898 memset(mFakeTracks[iCol].data(), 0, mFakeTracks[iCol].size() * sizeof(mFakeTracks[iCol][0]));
899 for (size_t i = 0; i < mTrackMCLabelsReverse[iCol].size(); i++) {
900 mTrackMCLabelsReverse[iCol][i] = -1;
901 }
902 }
903 if (mQATasks & taskClusterAttach && GetNMCLabels()) {
904 mClusterParam.resize(GetNMCLabels());
905 memset(mClusterParam.data(), 0, mClusterParam.size() * sizeof(mClusterParam[0]));
906 }
907 HighResTimer timer;
908
909 mNEvents++;
910 if (mConfig.writeMCLabels) {
911 mcEffBuffer.resize(mNEvents);
912 mcLabelBuffer.resize(mNEvents);
913 mcEffBuffer[mNEvents - 1].resize(GetNMCTracks(0));
914 mcLabelBuffer[mNEvents - 1].resize(nReconstructedTracks);
915 }
916
917 bool mcAvail = mcPresent() || tracksExtMC;
918
919 if (mcAvail) {
920 // Assign Track MC Labels
921 timer.Start();
922 if (tracksExternal) {
923#ifdef GPUCA_O2_LIB
924 for (uint32_t i = 0; i < tracksExternal->size(); i++) {
925 mTrackMCLabels[i] = (*tracksExtMC)[i];
926 }
927#endif
928 } else {
929 tbb::parallel_for(tbb::blocked_range<uint32_t>(0, nReconstructedTracks, (QA_DEBUG == 0) ? 32 : nReconstructedTracks), [&](const tbb::blocked_range<uint32_t>& range) {
930 auto acc = GPUTPCTrkLbl<true, mcLabelI_t>(GetClusterLabels(), 1.f - mConfig.recThreshold);
931 for (auto i = range.begin(); i < range.end(); i++) {
932 acc.reset();
933 int32_t nClusters = 0;
934 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
935 std::vector<mcLabel_t> labels;
936 for (uint32_t k = 0; k < track.NClusters(); k++) {
937 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
938 continue;
939 }
940 nClusters++;
941 uint32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
942 if (hitId >= GetNMCLabels()) {
943 GPUError("Invalid hit id %u > %d (nClusters %d)", hitId, GetNMCLabels(), mTracking->mIOPtrs.clustersNative ? mTracking->mIOPtrs.clustersNative->nClustersTotal : 0);
944 throw std::runtime_error("qa error");
945 }
946 acc.addLabel(hitId);
947 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
948 if (GetMCLabelID(hitId, j) >= (int32_t)GetNMCTracks(GetMCLabelCol(hitId, j))) {
949 GPUError("Invalid label %d > %d (hit %d, label %d, col %d)", GetMCLabelID(hitId, j), GetNMCTracks(GetMCLabelCol(hitId, j)), hitId, j, (int32_t)GetMCLabelCol(hitId, j));
950 throw std::runtime_error("qa error");
951 }
952 if (GetMCLabelID(hitId, j) >= 0) {
953 if (QA_DEBUG >= 3 && track.OK()) {
954 GPUInfo("Track %d Cluster %u Label %d: %d (%f)", i, k, j, GetMCLabelID(hitId, j), GetMCLabelWeight(hitId, j));
955 }
956 }
957 }
958 }
959
960 float maxweight, sumweight;
961 int32_t maxcount;
962 auto maxLabel = acc.computeLabel(&maxweight, &sumweight, &maxcount);
963 mTrackMCLabels[i] = maxLabel;
964 if (QA_DEBUG && track.OK() && GetNMCTracks(maxLabel) > (uint32_t)maxLabel.getTrackID()) {
965 const mcInfo_t& mc = GetMCTrack(maxLabel);
966 GPUInfo("Track %d label %d (fake %d) weight %f clusters %d (fitted %d) (%f%% %f%%) Pt %f", i, maxLabel.getTrackID(), (int32_t)(maxLabel.isFake()), maxweight, nClusters, track.NClustersFitted(), 100.f * maxweight / sumweight, 100.f * (float)maxcount / (float)nClusters,
967 std::sqrt(mc.pX * mc.pX + mc.pY * mc.pY));
968 }
969 }
970 });
971 }
972 if (QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
973 GPUInfo("QA Time: Assign Track Labels:\t\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
974 }
975
976 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
977 const GPUTPCGMMergedTrack* track = mTracking ? &mTracking->mIOPtrs.mergedTracks[i] : nullptr;
978 mcLabelI_t label = mTrackMCLabels[i];
979 if (mQATasks & taskClusterAttach) {
980 // fill cluster attachment status
981 if (!track->OK()) {
982 continue;
983 }
984 if (!mTrackMCLabels[i].isValid()) {
985 for (uint32_t k = 0; k < track->NClusters(); k++) {
986 if (mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
987 continue;
988 }
989 mClusterParam[mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num].fakeAttached++;
990 }
991 continue;
992 }
993 if (mMCTrackMin == -1 || (label.getTrackID() >= mMCTrackMin && label.getTrackID() < mMCTrackMax)) {
994 for (uint32_t k = 0; k < track->NClusters(); k++) {
995 if (mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
996 continue;
997 }
998 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num;
999 bool correct = false;
1000 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1001 if (label == GetMCLabel(hitId, j)) {
1002 correct = true;
1003 break;
1004 }
1005 }
1006 if (correct) {
1007 mClusterParam[hitId].attached++;
1008 } else {
1009 mClusterParam[hitId].fakeAttached++;
1010 }
1011 }
1012 }
1013 }
1014
1015 if (mTrackMCLabels[i].isFake()) {
1016 (GetMCTrackObj(mFakeTracks, label))++;
1017 } else if (tracksExternal || !track->MergedLooper()) {
1018 GetMCTrackObj(mRecTracks, label)++;
1019 if (mMCTrackMin == -1 || (label.getTrackID() >= mMCTrackMin && label.getTrackID() < mMCTrackMax)) {
1020 int32_t& revLabel = GetMCTrackObj(mTrackMCLabelsReverse, label);
1021 if (tracksExternal) {
1022#ifdef GPUCA_O2_LIB
1023 if (revLabel == -1 || fabsf((*tracksExternal)[i].getZ()) < fabsf((*tracksExternal)[revLabel].getZ())) {
1024 revLabel = i;
1025 }
1026#endif
1027 } else {
1028 const auto* trks = mTracking->mIOPtrs.mergedTracks;
1029 bool comp;
1030 if (revLabel == -1) {
1031 comp = true;
1032 } else if (mTracking->GetParam().par.earlyTpcTransform) {
1033 comp = fabsf(trks[i].GetParam().GetZ() + trks[i].GetParam().GetTZOffset()) < fabsf(trks[revLabel].GetParam().GetZ() + trks[revLabel].GetParam().GetTZOffset());
1034 } else {
1035 float shift1 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[i].CSide() * GPUChainTracking::NSECTORS / 2, trks[i].GetParam().GetTZOffset());
1036 float shift2 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[revLabel].CSide() * GPUChainTracking::NSECTORS / 2, trks[revLabel].GetParam().GetTZOffset());
1037 comp = fabsf(trks[i].GetParam().GetZ() + shift1) < fabsf(trks[revLabel].GetParam().GetZ() + shift2);
1038 }
1039 if (revLabel == -1 || !trks[revLabel].OK() || (trks[i].OK() && comp)) {
1040 revLabel = i;
1041 }
1042 }
1043 }
1044 }
1045 }
1046 if ((mQATasks & taskClusterAttach) && mTracking->mIOPtrs.mergedTrackHitAttachment) {
1047 // fill cluster adjacent status
1048 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1049 if (mClusterParam[i].attached == 0 && mClusterParam[i].fakeAttached == 0) {
1050 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1052 int32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1053 mcLabelI_t trackL = mTrackMCLabels[track];
1054 bool fake = true;
1055 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1056 // GPUInfo("Attach %x Track %d / %d:%d", attach, track, j, GetMCLabelID(i, j));
1057 if (trackL == GetMCLabel(i, j)) {
1058 fake = false;
1059 break;
1060 }
1061 }
1062 if (fake) {
1063 mClusterParam[i].fakeAdjacent++;
1064 } else {
1065 mClusterParam[i].adjacent++;
1066 }
1067 }
1068 }
1069 }
1070 }
1071
1072 if (mConfig.matchMCLabels.size()) {
1073 mGoodHits[mNEvents - 1].resize(GetNMCLabels());
1074 std::vector<bool> allowMCLabels(GetNMCTracks(0));
1075 for (uint32_t k = 0; k < GetNMCTracks(0); k++) {
1076 allowMCLabels[k] = false;
1077 }
1078 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1079 if (!mGoodTracks[mNEvents - 1][i]) {
1080 continue;
1081 }
1082 if (mConfig.matchDisplayMinPt > 0) {
1083 if (!mTrackMCLabels[i].isValid()) {
1084 continue;
1085 }
1086 const mcInfo_t& info = GetMCTrack(mTrackMCLabels[i]);
1087 if (info.pX * info.pX + info.pY * info.pY < mConfig.matchDisplayMinPt * mConfig.matchDisplayMinPt) {
1088 continue;
1089 }
1090 }
1091
1092 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
1093 for (uint32_t j = 0; j < track.NClusters(); j++) {
1094 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + j].num;
1095 if (GetMCLabelNID(hitId)) {
1096 int32_t mcID = GetMCLabelID(hitId, 0);
1097 if (mcID >= 0) {
1098 allowMCLabels[mcID] = true;
1099 }
1100 }
1101 }
1102 }
1103 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1104 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1105 int32_t mcID = GetMCLabelID(i, j);
1106 if (mcID >= 0 && allowMCLabels[mcID]) {
1107 mGoodHits[mNEvents - 1][i] = true;
1108 }
1109 }
1110 }
1111 }
1112 if (QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1113 GPUInfo("QA Time: Cluster attach status:\t\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1114 }
1115
1116 if (matchOnly) {
1117 return;
1118 }
1119
1120 // Recompute fNWeightCls (might have changed after merging events into timeframes)
1121 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1122 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1123 mMCParam[iCol][i].nWeightCls = 0.;
1124 }
1125 }
1126 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1127 float weightTotal = 0.f;
1128 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1129 if (GetMCLabelID(i, j) >= 0) {
1130 weightTotal += GetMCLabelWeight(i, j);
1131 }
1132 }
1133 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1134 if (GetMCLabelID(i, j) >= 0) {
1135 GetMCTrackObj(mMCParam, GetMCLabel(i, j)).nWeightCls += GetMCLabelWeight(i, j) / weightTotal;
1136 }
1137 }
1138 }
1139 if (QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1140 GPUInfo("QA Time: Compute cluster label weights:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1141 }
1142
1143 // Compute MC Track Parameters for MC Tracks
1144 tbb::parallel_for<uint32_t>(0, GetNMCCollissions(), [&](auto iCol) {
1145 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1146 const mcInfo_t& info = GetMCTrack(i, iCol);
1147 additionalMCParameters& mc2 = mMCParam[iCol][i];
1148 mc2.pt = std::sqrt(info.pX * info.pX + info.pY * info.pY);
1149 mc2.phi = M_PI + std::atan2(-info.pY, -info.pX);
1150 float p = info.pX * info.pX + info.pY * info.pY + info.pZ * info.pZ;
1151 if (p < 1e-18) {
1152 mc2.theta = mc2.eta = 0.f;
1153 } else {
1154 mc2.theta = info.pZ == 0 ? (M_PI / 2) : (std::acos(info.pZ / std::sqrt(p)));
1155 mc2.eta = -std::log(std::tan(0.5 * mc2.theta));
1156 }
1157 if (mConfig.writeMCLabels) {
1158 std::vector<int32_t>& effBuffer = mcEffBuffer[mNEvents - 1];
1159 effBuffer[i] = mRecTracks[iCol][i] * 1000 + mFakeTracks[iCol][i];
1160 }
1161 } // clang-format off
1162 }, tbb::simple_partitioner()); // clang-format on
1163 if (QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1164 GPUInfo("QA Time: Compute track mc parameters:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1165 }
1166
1167 // Fill Efficiency Histograms
1168 if (mQATasks & taskTrackingEff) {
1169 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1170 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1171 if ((mMCTrackMin != -1 && (int32_t)i < mMCTrackMin) || (mMCTrackMax != -1 && (int32_t)i >= mMCTrackMax)) {
1172 continue;
1173 }
1174 const mcInfo_t& info = GetMCTrack(i, iCol);
1175 const additionalMCParameters& mc2 = mMCParam[iCol][i];
1176 if (mc2.nWeightCls == 0.f) {
1177 continue;
1178 }
1179 const float& mcpt = mc2.pt;
1180 const float& mcphi = mc2.phi;
1181 const float& mceta = mc2.eta;
1182
1183 if (info.primDaughters) {
1184 continue;
1185 }
1186 if (mc2.nWeightCls < MIN_WEIGHT_CLS) {
1187 continue;
1188 }
1189 int32_t findable = mc2.nWeightCls >= FINDABLE_WEIGHT_CLS;
1190 if (info.pid < 0) {
1191 continue;
1192 }
1193 if (info.charge == 0.f) {
1194 continue;
1195 }
1196 if (mConfig.filterCharge && info.charge * mConfig.filterCharge < 0) {
1197 continue;
1198 }
1199 if (mConfig.filterPID >= 0 && info.pid != mConfig.filterPID) {
1200 continue;
1201 }
1202
1203 if (fabsf(mceta) > ETA_MAX || mcpt < PT_MIN || mcpt > PT_MAX) {
1204 continue;
1205 }
1206
1207 float alpha = std::atan2(info.y, info.x);
1208 alpha /= M_PI / 9.f;
1209 alpha = std::floor(alpha);
1210 alpha *= M_PI / 9.f;
1211 alpha += M_PI / 18.f;
1212
1213 float c = std::cos(alpha);
1214 float s = std::sin(alpha);
1215 float localY = -info.x * s + info.y * c;
1216
1217 if (mConfig.dumpToROOT) {
1218 static auto effdump = GPUROOTDump<TNtuple>::getNew("eff", "alpha:x:y:z:mcphi:mceta:mcpt:rec:fake:findable:prim");
1219 float localX = info.x * c + info.y * s;
1220 effdump.Fill(alpha, localX, localY, info.z, mcphi, mceta, mcpt, mRecTracks[iCol][i], mFakeTracks[iCol][i], findable, info.prim);
1221 }
1222
1223 for (int32_t j = 0; j < 4; j++) {
1224 for (int32_t k = 0; k < 2; k++) {
1225 if (k == 0 && findable == 0) {
1226 continue;
1227 }
1228
1229 int32_t val = (j == 0) ? (mRecTracks[iCol][i] ? 1 : 0) : (j == 1) ? (mRecTracks[iCol][i] ? mRecTracks[iCol][i] - 1 : 0) : (j == 2) ? mFakeTracks[iCol][i] : 1;
1230 if (val == 0) {
1231 continue;
1232 }
1233
1234 for (int32_t l = 0; l < 5; l++) {
1235 if (info.prim && mcpt < PT_MIN_PRIM) {
1236 continue;
1237 }
1238 if (l != 3 && fabsf(mceta) > ETA_MAX2) {
1239 continue;
1240 }
1241 if (l < 4 && mcpt < 1.f / mConfig.qpt) {
1242 continue;
1243 }
1244
1245 float pos = l == 0 ? localY : l == 1 ? info.z : l == 2 ? mcphi : l == 3 ? mceta : mcpt;
1246
1247 mEff[j][k][!info.prim][l]->Fill(pos, val);
1248 }
1249 }
1250 }
1251 }
1252 }
1253 if (QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1254 GPUInfo("QA Time: Fill efficiency histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1255 }
1256 }
1257
1258 // Fill Resolution Histograms
1259 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
1260 GPUTPCGMPropagator prop;
1261 prop.SetMaxSinPhi(.999);
1262 prop.SetMaterialTPC();
1263 prop.SetPolynomialField(&mParam->polynomialField);
1264
1265 for (uint32_t i = 0; i < mTrackMCLabels.size(); i++) {
1266 if (mConfig.writeMCLabels) {
1267 std::vector<int32_t>& labelBuffer = mcLabelBuffer[mNEvents - 1];
1268 labelBuffer[i] = mTrackMCLabels[i].getTrackID();
1269 }
1270 if (mTrackMCLabels[i].isFake()) {
1271 continue;
1272 }
1273 const mcInfo_t& mc1 = GetMCTrack(mTrackMCLabels[i]);
1274 const additionalMCParameters& mc2 = GetMCTrackObj(mMCParam, mTrackMCLabels[i]);
1275
1276 if (mc1.primDaughters) {
1277 continue;
1278 }
1279 if (!tracksExternal) {
1280 if (!mTracking->mIOPtrs.mergedTracks[i].OK()) {
1281 continue;
1282 }
1283 if (mTracking->mIOPtrs.mergedTracks[i].MergedLooper()) {
1284 continue;
1285 }
1286 }
1287 if ((mMCTrackMin != -1 && mTrackMCLabels[i].getTrackID() < mMCTrackMin) || (mMCTrackMax != -1 && mTrackMCLabels[i].getTrackID() >= mMCTrackMax)) {
1288 continue;
1289 }
1290 if (fabsf(mc2.eta) > ETA_MAX || mc2.pt < PT_MIN || mc2.pt > PT_MAX) {
1291 continue;
1292 }
1293 if (mc1.charge == 0.f) {
1294 continue;
1295 }
1296 if (mc1.pid < 0) {
1297 continue;
1298 }
1299 if (mConfig.filterCharge && mc1.charge * mConfig.filterCharge < 0) {
1300 continue;
1301 }
1302 if (mConfig.filterPID >= 0 && mc1.pid != mConfig.filterPID) {
1303 continue;
1304 }
1305 if (mc2.nWeightCls < MIN_WEIGHT_CLS) {
1306 continue;
1307 }
1308 if (mConfig.resPrimaries == 1 && !mc1.prim) {
1309 continue;
1310 } else if (mConfig.resPrimaries == 2 && mc1.prim) {
1311 continue;
1312 }
1313 if (GetMCTrackObj(mTrackMCLabelsReverse, mTrackMCLabels[i]) != (int32_t)i) {
1314 continue;
1315 }
1316
1318 float alpha = 0.f;
1319 int32_t side;
1320 if (tracksExternal) {
1321#ifdef GPUCA_O2_LIB
1322 for (int32_t k = 0; k < 5; k++) {
1323 param.Par()[k] = (*tracksExternal)[i].getParams()[k];
1324 }
1325 for (int32_t k = 0; k < 15; k++) {
1326 param.Cov()[k] = (*tracksExternal)[i].getCov()[k];
1327 }
1328 param.X() = (*tracksExternal)[i].getX();
1329 param.TZOffset() = (*tracksExternal)[i].getTime0();
1330 alpha = (*tracksExternal)[i].getAlpha();
1331 side = (*tracksExternal)[i].hasBothSidesClusters() ? 2 : ((*tracksExternal)[i].hasCSideClusters() ? 1 : 0);
1332#endif
1333 } else {
1334 param = mTracking->mIOPtrs.mergedTracks[i].GetParam();
1335 alpha = mTracking->mIOPtrs.mergedTracks[i].GetAlpha();
1336 side = mTracking->mIOPtrs.mergedTracks[i].CCE() ? 2 : (mTracking->mIOPtrs.mergedTracks[i].CSide() ? 1 : 0);
1337 }
1338
1339 float mclocal[4]; // Rotated x,y,Px,Py mc-coordinates - the MC data should be rotated since the track is propagated best along x
1340 float c = std::cos(alpha);
1341 float s = std::sin(alpha);
1342 float x = mc1.x;
1343 float y = mc1.y;
1344 mclocal[0] = x * c + y * s;
1345 mclocal[1] = -x * s + y * c;
1346 float px = mc1.pX;
1347 float py = mc1.pY;
1348 mclocal[2] = px * c + py * s;
1349 mclocal[3] = -px * s + py * c;
1350
1351 if (mclocal[0] < TRACK_EXPECTED_REFERENCE_X - 3) {
1352 continue;
1353 }
1354 if (mclocal[0] > param.GetX() + 20) {
1355 continue;
1356 }
1357 if (param.GetX() > mConfig.maxResX) {
1358 continue;
1359 }
1360
1361 auto getdz = [this, &param, &mc1, &side, tracksExternal]() {
1362 if (tracksExternal) {
1363 return param.GetZ();
1364 }
1365 if (!mParam->continuousMaxTimeBin) {
1366 return param.GetZ() - mc1.z;
1367 }
1368#ifdef GPUCA_TPC_GEOMETRY_O2
1369 if (!mParam->par.earlyTpcTransform) {
1370 float shift = side == 2 ? 0 : mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(side * GPUChainTracking::NSECTORS / 2, param.GetTZOffset() - mc1.t0);
1371 return param.GetZ() + shift - mc1.z;
1372 }
1373#endif
1374 return param.Z() + param.TZOffset() - mc1.z;
1375 };
1376
1377 prop.SetTrack(&param, alpha);
1378 bool inFlyDirection = 0;
1379 if (mConfig.strict) {
1380 const float dx = param.X() - std::max<float>(mclocal[0], TRACK_EXPECTED_REFERENCE_X_DEFAULT); // Limit distance check
1381 const float dy = param.Y() - mclocal[1];
1382 const float dz = getdz();
1383 if (dx * dx + dy * dy + dz * dz > 5.f * 5.f) {
1384 continue;
1385 }
1386 }
1387
1388 if (prop.PropagateToXAlpha(mclocal[0], alpha, inFlyDirection)) {
1389 continue;
1390 }
1391 if (fabsf(param.Y() - mclocal[1]) > (mConfig.strict ? 1.f : 4.f) || fabsf(getdz()) > (mConfig.strict ? 1.f : 4.f)) {
1392 continue;
1393 }
1394 float charge = mc1.charge > 0 ? 1.f : -1.f;
1395
1396 float deltaY = param.GetY() - mclocal[1];
1397 float deltaZ = getdz();
1398 float deltaPhiNative = param.GetSinPhi() - mclocal[3] / mc2.pt;
1399 float deltaPhi = std::asin(param.GetSinPhi()) - std::atan2(mclocal[3], mclocal[2]);
1400 float deltaLambdaNative = param.GetDzDs() - mc1.pZ / mc2.pt;
1401 float deltaLambda = std::atan(param.GetDzDs()) - std::atan2(mc1.pZ, mc2.pt);
1402 float deltaPtNative = (param.GetQPt() - charge / mc2.pt) * charge;
1403 float deltaPt = (fabsf(1.f / param.GetQPt()) - mc2.pt) / mc2.pt;
1404
1405 float paramval[5] = {mclocal[1], mc1.z, mc2.phi, mc2.eta, mc2.pt};
1406 float resval[5] = {deltaY, deltaZ, mConfig.nativeFitResolutions ? deltaPhiNative : deltaPhi, mConfig.nativeFitResolutions ? deltaLambdaNative : deltaLambda, mConfig.nativeFitResolutions ? deltaPtNative : deltaPt};
1407 float pullval[5] = {deltaY / std::sqrt(param.GetErr2Y()), deltaZ / std::sqrt(param.GetErr2Z()), deltaPhiNative / std::sqrt(param.GetErr2SinPhi()), deltaLambdaNative / std::sqrt(param.GetErr2DzDs()), deltaPtNative / std::sqrt(param.GetErr2QPt())};
1408
1409 for (int32_t j = 0; j < 5; j++) {
1410 for (int32_t k = 0; k < 5; k++) {
1411 if (k != 3 && fabsf(mc2.eta) > ETA_MAX2) {
1412 continue;
1413 }
1414 if (k < 4 && mc2.pt < 1.f / mConfig.qpt) {
1415 continue;
1416 }
1417 if (mQATasks & taskTrackingRes) {
1418 mRes2[j][k]->Fill(resval[j], paramval[k]);
1419 }
1420 if (mQATasks & taskTrackingResPull) {
1421 mPull2[j][k]->Fill(pullval[j], paramval[k]);
1422 }
1423 }
1424 }
1425 }
1426 if (QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1427 GPUInfo("QA Time: Fill resolution histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1428 }
1429 }
1430
1431 if (mQATasks & taskClusterAttach) {
1432 // Fill cluster histograms
1433 for (uint32_t iTrk = 0; iTrk < nReconstructedTracks; iTrk++) {
1434 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[iTrk];
1435 if (!track.OK()) {
1436 continue;
1437 }
1438 if (!mTrackMCLabels[iTrk].isValid()) {
1439 for (uint32_t k = 0; k < track.NClusters(); k++) {
1440 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1441 continue;
1442 }
1443 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1444 float totalWeight = 0.;
1445 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1446 if (GetMCLabelID(hitId, j) >= 0 && GetMCTrackObj(mMCParam, GetMCLabel(hitId, j)).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1447 totalWeight += GetMCLabelWeight(hitId, j);
1448 }
1449 }
1450 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1452 if (totalWeight > 0) {
1453 float weight = 1.f / (totalWeight * (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached));
1454 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1455 mcLabelI_t label = GetMCLabel(hitId, j);
1456 if (!label.isFake() && GetMCTrackObj(mMCParam, label).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1457 float pt = GetMCTrackObj(mMCParam, label).pt;
1458 if (pt < PT_MIN_CLUST) {
1459 pt = PT_MIN_CLUST;
1460 }
1461 mClusters[CL_fake]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1462 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1463 if (GetMCTrackObj(mRecTracks, label)) {
1464 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1465 }
1466 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1467 if (protect || physics) {
1468 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1469 }
1470 if (physics) {
1471 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1472 }
1473 }
1474 }
1475 } else {
1476 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1477 mClusters[CL_fake]->Fill(0.f, weight);
1478 mClusters[CL_att_adj]->Fill(0.f, weight);
1479 mClusters[CL_all]->Fill(0.f, weight);
1480 mClusterCounts.nUnaccessible += weight;
1481 if (protect || physics) {
1482 mClusters[CL_prot]->Fill(0.f, weight);
1483 }
1484 if (physics) {
1485 mClusters[CL_physics]->Fill(0.f, weight);
1486 }
1487 }
1488 }
1489 continue;
1490 }
1491 mcLabelI_t label = mTrackMCLabels[iTrk];
1492 if (mMCTrackMin != -1 && (label.getTrackID() < mMCTrackMin || label.getTrackID() >= mMCTrackMax)) {
1493 continue;
1494 }
1495 for (uint32_t k = 0; k < track.NClusters(); k++) {
1496 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1497 continue;
1498 }
1499 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1500 float pt = GetMCTrackObj(mMCParam, label).pt;
1501 if (pt < PT_MIN_CLUST) {
1502 pt = PT_MIN_CLUST;
1503 }
1504 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1505 bool correct = false;
1506 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1507 if (label == GetMCLabel(hitId, j)) {
1508 correct = true;
1509 break;
1510 }
1511 }
1512 if (correct) {
1513 mClusters[CL_attached]->Fill(pt, weight);
1514 mClusters[CL_tracks]->Fill(pt, weight);
1515 } else {
1516 mClusters[CL_fake]->Fill(pt, weight);
1517 }
1518 mClusters[CL_att_adj]->Fill(pt, weight);
1519 mClusters[CL_all]->Fill(pt, weight);
1520 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1522 if (protect || physics) {
1523 mClusters[CL_prot]->Fill(pt, weight);
1524 }
1525 if (physics) {
1526 mClusters[CL_physics]->Fill(pt, weight);
1527 }
1528 }
1529 }
1530 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1531 if ((mMCTrackMin != -1 && GetMCLabelID(i, 0) < mMCTrackMin) || (mMCTrackMax != -1 && GetMCLabelID(i, 0) >= mMCTrackMax)) {
1532 continue;
1533 }
1534 if (mClusterParam[i].attached || mClusterParam[i].fakeAttached) {
1535 continue;
1536 }
1537 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1539 if (mClusterParam[i].adjacent) {
1540 int32_t label = mTracking->mIOPtrs.mergedTrackHitAttachment[i] & gputpcgmmergertypes::attachTrackMask;
1541 if (!mTrackMCLabels[label].isValid()) {
1542 float totalWeight = 0.;
1543 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1544 mcLabelI_t labelT = GetMCLabel(i, j);
1545 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1546 totalWeight += GetMCLabelWeight(i, j);
1547 }
1548 }
1549 float weight = 1.f / totalWeight;
1550 if (totalWeight > 0) {
1551 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1552 mcLabelI_t labelT = GetMCLabel(i, j);
1553 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1554 float pt = GetMCTrackObj(mMCParam, labelT).pt;
1555 if (pt < PT_MIN_CLUST) {
1556 pt = PT_MIN_CLUST;
1557 }
1558 if (GetMCTrackObj(mRecTracks, labelT)) {
1559 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1560 }
1561 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1562 mClusters[CL_fakeAdj]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1563 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1564 if (protect || physics) {
1565 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1566 }
1567 if (physics) {
1568 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1569 }
1570 }
1571 }
1572 } else {
1573 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1574 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1575 mClusters[CL_all]->Fill(0.f, 1.f);
1576 mClusterCounts.nUnaccessible++;
1577 if (protect || physics) {
1578 mClusters[CL_prot]->Fill(0.f, 1.f);
1579 }
1580 if (physics) {
1581 mClusters[CL_physics]->Fill(0.f, 1.f);
1582 }
1583 }
1584 } else {
1585 float pt = GetMCTrackObj(mMCParam, mTrackMCLabels[label]).pt;
1586 if (pt < PT_MIN_CLUST) {
1587 pt = PT_MIN_CLUST;
1588 }
1589 mClusters[CL_att_adj]->Fill(pt, 1.f);
1590 mClusters[CL_tracks]->Fill(pt, 1.f);
1591 mClusters[CL_all]->Fill(pt, 1.f);
1592 if (protect || physics) {
1593 mClusters[CL_prot]->Fill(pt, 1.f);
1594 }
1595 if (physics) {
1596 mClusters[CL_physics]->Fill(pt, 1.f);
1597 }
1598 }
1599 } else {
1600 float totalWeight = 0.;
1601 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1602 mcLabelI_t labelT = GetMCLabel(i, j);
1603 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1604 totalWeight += GetMCLabelWeight(i, j);
1605 }
1606 }
1607 if (totalWeight > 0) {
1608 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1609 mcLabelI_t label = GetMCLabel(i, j);
1610 if (!label.isFake() && GetMCTrackObj(mMCParam, label).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1611 float pt = GetMCTrackObj(mMCParam, label).pt;
1612 if (pt < PT_MIN_CLUST) {
1613 pt = PT_MIN_CLUST;
1614 }
1615 float weight = GetMCLabelWeight(i, j) / totalWeight;
1616 if (mClusterParam[i].fakeAdjacent) {
1617 mClusters[CL_fakeAdj]->Fill(pt, weight);
1618 }
1619 if (mClusterParam[i].fakeAdjacent) {
1620 mClusters[CL_att_adj]->Fill(pt, weight);
1621 }
1622 if (GetMCTrackObj(mRecTracks, label)) {
1623 mClusters[CL_tracks]->Fill(pt, weight);
1624 }
1625 mClusters[CL_all]->Fill(pt, weight);
1626 if (protect || physics) {
1627 mClusters[CL_prot]->Fill(pt, weight);
1628 }
1629 if (physics) {
1630 mClusters[CL_physics]->Fill(pt, weight);
1631 }
1632 }
1633 }
1634 } else {
1635 if (mClusterParam[i].fakeAdjacent) {
1636 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1637 }
1638 if (mClusterParam[i].fakeAdjacent) {
1639 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1640 }
1641 mClusters[CL_all]->Fill(0.f, 1.f);
1642 mClusterCounts.nUnaccessible++;
1643 if (protect || physics) {
1644 mClusters[CL_prot]->Fill(0.f, 1.f);
1645 }
1646 if (physics) {
1647 mClusters[CL_physics]->Fill(0.f, 1.f);
1648 }
1649 }
1650 }
1651 }
1652
1653 if (QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1654 GPUInfo("QA Time: Fill cluster histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1655 }
1656 }
1657 } else if (!mConfig.inputHistogramsOnly && !mConfig.noMC && (mQATasks & (taskTrackingEff | taskTrackingRes | taskTrackingResPull | taskClusterAttach))) {
1658 GPUWarning("No MC information available, only running partial TPC QA!");
1659 }
1660
1661 if (mQATasks & taskTrackStatistics) {
1662 // Fill track statistic histograms
1663 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1664 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
1665 if (!track.OK()) {
1666 continue;
1667 }
1668 mTracks->Fill(1.f / fabsf(track.GetParam().GetQPt()));
1669 mNCl[0]->Fill(track.NClustersFitted());
1670 uint32_t nClCorrected = 0;
1671 int32_t lastSector = -1, lastRow = -1;
1672 const auto& trackClusters = mTracking->mIOPtrs.mergedTrackHits;
1673 for (uint32_t j = 0; j < track.NClusters(); j++) {
1674 if (trackClusters[track.FirstClusterRef() + j].state & GPUTPCGMMergedTrackHit::flagReject) {
1675 continue;
1676 }
1677 if (trackClusters[track.FirstClusterRef() + j].sector == lastSector && trackClusters[track.FirstClusterRef() + j].row == lastRow) {
1678 continue;
1679 }
1680 if (trackClusters[track.FirstClusterRef() + j].leg != trackClusters[track.FirstClusterRef() + track.NClusters() - 1].leg) {
1681 continue;
1682 }
1683 nClCorrected++;
1684 lastSector = trackClusters[track.FirstClusterRef() + j].sector;
1685 lastRow = trackClusters[track.FirstClusterRef() + j].sector;
1686 }
1687 mNCl[1]->Fill(nClCorrected);
1688 }
1689 if (mClNative && mTracking && mTracking->GetTPCTransformHelper()) {
1690 for (uint32_t i = 0; i < GPUChainTracking::NSECTORS; i++) {
1691 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
1692 for (uint32_t k = 0; k < mClNative->nClusters[i][j]; k++) {
1693 const auto& cl = mClNative->clusters[i][j][k];
1694 float x, y, z;
1695 GPUTPCConvertImpl::convert(*mTracking->GetTPCTransformHelper()->getCorrMap(), mTracking->GetParam(), i, j, cl.getPad(), cl.getTime(), x, y, z);
1696 mTracking->GetParam().Sector2Global(i, x, y, z, &x, &y, &z);
1697 mClXY->Fill(x, y);
1698 }
1699 }
1700 }
1701 }
1702
1703 if (QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1704 GPUInfo("QA Time: Fill track statistics:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1705 }
1706 }
1707
1708 uint32_t nCl = clNative ? clNative->nClustersTotal : mTracking->GetProcessors()->tpcMerger.NMaxClusters();
1709 mClusterCounts.nTotal += nCl;
1710 if (mQATasks & taskClusterCounts) {
1711 for (uint32_t i = 0; i < nCl; i++) {
1712 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1714
1715 if (mcAvail) {
1716 float totalWeight = 0, weight400 = 0, weight40 = 0;
1717 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1718 const auto& label = GetMCLabel(i, j);
1719 if (GetMCLabelID(label) >= 0) {
1720 totalWeight += GetMCLabelWeight(label);
1721 if (GetMCTrackObj(mMCParam, label).pt >= 0.4) {
1722 weight400 += GetMCLabelWeight(label);
1723 }
1724 if (GetMCTrackObj(mMCParam, label).pt <= 0.04) {
1725 weight40 += GetMCLabelWeight(label);
1726 }
1727 }
1728 }
1729 if (totalWeight > 0 && 10.f * weight400 >= totalWeight) {
1730 if (!unattached && !protect && !physics) {
1731 mClusterCounts.nFakeRemove400++;
1732 int32_t totalFake = weight400 < 0.9f * totalWeight;
1733 if (totalFake) {
1734 mClusterCounts.nFullFakeRemove400++;
1735 }
1736 /*printf("Fake removal (%d): Hit %7d, attached %d lowPt %d looper %d tube200 %d highIncl %d tube %d bad %d recPt %7.2f recLabel %6d", totalFake, i, (int32_t) (mClusterParam[i].attached || mClusterParam[i].fakeAttached),
1737 (int32_t) lowPt, (int32_t) ((attach & gputpcgmmergertypes::attachGoodLeg) == 0), (int32_t) ((attach & gputpcgmmergertypes::attachTube) && mev200),
1738 (int32_t) ((attach & gputpcgmmergertypes::attachHighIncl) != 0), (int32_t) ((attach & gputpcgmmergertypes::attachTube) != 0), (int32_t) ((attach & gputpcgmmergertypes::attachGood) == 0),
1739 fabsf(qpt) > 0 ? 1.f / qpt : 0.f, id);
1740 for (int32_t j = 0;j < GetMCLabelNID(i);j++)
1741 {
1742 //if (GetMCLabelID(i, j) < 0) break;
1743 printf(" - label%d %6d weight %5d", j, GetMCLabelID(i, j), (int32_t) GetMCLabelWeight(i, j));
1744 if (GetMCLabelID(i, j) >= 0) printf(" - pt %7.2f", mMCParam[GetMCLabelID(i, j)].pt);
1745 else printf(" ");
1746 }
1747 printf("\n");*/
1748 }
1749 mClusterCounts.nAbove400++;
1750 }
1751 if (totalWeight > 0 && weight40 >= 0.9 * totalWeight) {
1752 mClusterCounts.nBelow40++;
1753 if (protect || physics) {
1754 mClusterCounts.nFakeProtect40++;
1755 }
1756 }
1757 }
1758 if (physics) {
1759 mClusterCounts.nPhysics++;
1760 }
1761 if (physics || protect) {
1762 mClusterCounts.nProt++;
1763 }
1764 if (unattached) {
1765 mClusterCounts.nUnattached++;
1766 }
1767 }
1768 }
1769
1770 // Process cluster count statistics
1771 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
1772 DoClusterCounts(nullptr);
1773 mClusterCounts = counts_t();
1774 }
1775
1776 if (QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1777 GPUInfo("QA Time: Cluster Counts:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1778 }
1779
1780 if (mConfig.dumpToROOT) {
1781 if (!clNative || !mTracking || !mTracking->mIOPtrs.mergedTrackHitAttachment || !mTracking->mIOPtrs.mergedTracks) {
1782 throw std::runtime_error("Cannot dump non o2::tpc::clusterNative clusters, need also hit attachmend and GPU tracks");
1783 }
1784 uint32_t clid = 0;
1785 for (uint32_t i = 0; i < GPUChainTracking::NSECTORS; i++) {
1786 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
1787 for (uint32_t k = 0; k < mClNative->nClusters[i][j]; k++) {
1788 const auto& cl = mClNative->clusters[i][j][k];
1789 uint32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[clid];
1790 float x = 0, y = 0, z = 0;
1792 uint32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1793 const auto& trk = mTracking->mIOPtrs.mergedTracks[track];
1794 mTracking->GetTPCTransformHelper()->Transform(i, j, cl.getPad(), cl.getTime(), x, y, z, trk.GetParam().GetTZOffset());
1795 mTracking->GetParam().Sector2Global(i, x, y, z, &x, &y, &z);
1796 }
1797 uint32_t extState = mTracking->mIOPtrs.mergedTrackHitStates ? mTracking->mIOPtrs.mergedTrackHitStates[clid] : 0;
1798
1799 if (mConfig.dumpToROOT >= 2) {
1802 memset((void*)&trk, 0, sizeof(trk));
1803 memset((void*)&trkHit, 0, sizeof(trkHit));
1805 uint32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1806 trk = mTracking->mIOPtrs.mergedTracks[track];
1807 for (uint32_t l = 0; l < trk.NClusters(); l++) {
1808 const auto& tmp = mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + l];
1809 if (tmp.num == clid) {
1810 trkHit = tmp;
1811 break;
1812 }
1813 }
1814 }
1815 static auto cldump = GPUROOTDump<o2::tpc::ClusterNative, GPUTPCGMMergedTrack, GPUTPCGMMergedTrackHit, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew("cluster", "track", "trackHit", "attach", "extState", "x", "y", "z", "sector", "row", "nEv", "clusterTree");
1816 cldump.Fill(cl, trk, trkHit, attach, extState, x, y, z, i, j, mNEvents - 1);
1817 } else {
1818 static auto cldump = GPUROOTDump<o2::tpc::ClusterNative, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew("cluster", "attach", "extState", "x", "y", "z", "sector", "row", "nEv", "clusterTree");
1819 cldump.Fill(cl, attach, extState, x, y, z, i, j, mNEvents - 1);
1820 }
1821 clid++;
1822 }
1823 }
1824 }
1825
1826 static auto trkdump = GPUROOTDump<uint32_t, GPUTPCGMMergedTrack>::getNew("nEv", "track", "tracksTree");
1827 for (uint32_t i = 0; i < mTracking->mIOPtrs.nMergedTracks; i++) {
1828 if (mTracking->mIOPtrs.mergedTracks[i].OK()) {
1829 trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.mergedTracks[i]);
1830 }
1831 }
1832
1833 if (mTracking && mTracking->GetProcessingSettings().createO2Output) {
1834 static auto o2trkdump = GPUROOTDump<uint32_t, o2::tpc::TrackTPC>::getNew("nEv", "track", "tracksO2Tree");
1835 for (uint32_t i = 0; i < mTracking->mIOPtrs.nOutputTracksTPCO2; i++) {
1836 o2trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.outputTracksTPCO2[i]);
1837 }
1838 }
1839 }
1840 mTrackingScratchBuffer.clear();
1841 mTrackingScratchBuffer.shrink_to_fit();
1842}
1843
1844void GPUQA::GetName(char* fname, int32_t k)
1845{
1846 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
1847 if (k || mConfig.inputHistogramsOnly || mConfig.name.size()) {
1848 if (!(mConfig.inputHistogramsOnly || k)) {
1849 snprintf(fname, 1024, "%s - ", mConfig.name.c_str());
1850 } else if (mConfig.compareInputNames.size() > (unsigned)(k - nNewInput)) {
1851 snprintf(fname, 1024, "%s - ", mConfig.compareInputNames[k - nNewInput].c_str());
1852 } else {
1853 strcpy(fname, mConfig.compareInputs[k - nNewInput].c_str());
1854 if (strlen(fname) > 5 && strcmp(fname + strlen(fname) - 5, ".root") == 0) {
1855 fname[strlen(fname) - 5] = 0;
1856 }
1857 strcat(fname, " - ");
1858 }
1859 } else {
1860 fname[0] = 0;
1861 }
1862}
1863
1864template <class T>
1865T* GPUQA::GetHist(T*& ee, std::vector<std::unique_ptr<TFile>>& tin, int32_t k, int32_t nNewInput)
1866{
1867 T* e = ee;
1868 if ((mConfig.inputHistogramsOnly || k) && (e = dynamic_cast<T*>(tin[k - nNewInput]->Get(e->GetName()))) == nullptr) {
1869 GPUWarning("Missing histogram in input %s: %s", mConfig.compareInputs[k - nNewInput].c_str(), ee->GetName());
1870 return (nullptr);
1871 }
1872 ee = e;
1873 return (e);
1874}
1875
1876void GPUQA::DrawQAHistogramsCleanup()
1877{
1878 clearGarbagageCollector();
1879}
1880
1881void GPUQA::resetHists()
1882{
1883 if (!mQAInitialized) {
1884 throw std::runtime_error("QA not initialized");
1885 }
1886 if (mHaveExternalHists) {
1887 throw std::runtime_error("Cannot reset external hists");
1888 }
1889 for (auto& h : *mHist1D) {
1890 h.Reset();
1891 }
1892 for (auto& h : *mHist2D) {
1893 h.Reset();
1894 }
1895 for (auto& h : *mHist1Dd) {
1896 h.Reset();
1897 }
1898 for (auto& h : *mHistGraph) {
1899 h = TGraphAsymmErrors();
1900 }
1901 mClusterCounts = counts_t();
1902}
1903
1904int32_t GPUQA::DrawQAHistograms(TObjArray* qcout)
1905{
1906 const auto oldRootIgnoreLevel = gErrorIgnoreLevel;
1907 gErrorIgnoreLevel = kWarning;
1908 if (!mQAInitialized) {
1909 throw std::runtime_error("QA not initialized");
1910 }
1911
1912 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
1913 printf("Creating QA Histograms\n");
1914 }
1915
1916 std::vector<Color_t> colorNums(COLORCOUNT);
1917 if (!qcout) {
1918 static int32_t initColorsInitialized = initColors();
1919 (void)initColorsInitialized;
1920 }
1921 for (int32_t i = 0; i < COLORCOUNT; i++) {
1922 colorNums[i] = qcout ? defaultColorNums[i] : mColors[i]->GetNumber();
1923 }
1924
1925 bool mcAvail = mcPresent();
1926 char name[2048], fname[1024];
1927
1928 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
1929 const int32_t ConfigNumInputs = nNewInput + mConfig.compareInputs.size();
1930
1931 std::vector<std::unique_ptr<TFile>> tin;
1932 for (uint32_t i = 0; i < mConfig.compareInputs.size(); i++) {
1933 tin.emplace_back(std::make_unique<TFile>(mConfig.compareInputs[i].c_str()));
1934 }
1935 std::unique_ptr<TFile> tout = nullptr;
1936 if (mConfig.output.size()) {
1937 tout = std::make_unique<TFile>(mConfig.output.c_str(), "RECREATE");
1938 }
1939
1940 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
1941 float legendSpacingString = 0.025;
1942 for (int32_t i = 0; i < ConfigNumInputs; i++) {
1943 GetName(fname, i);
1944 if (strlen(fname) * 0.006 > legendSpacingString) {
1945 legendSpacingString = strlen(fname) * 0.006;
1946 }
1947 }
1948
1949 // Create Canvas / Pads for Efficiency Histograms
1950 if (mQATasks & taskTrackingEff) {
1951 for (int32_t ii = 0; ii < 6; ii++) {
1952 int32_t i = ii == 5 ? 4 : ii;
1953 snprintf(fname, 1024, "eff_vs_%s_layout", VSPARAMETER_NAMES[ii]);
1954 snprintf(name, 2048, "Efficiency versus %s", VSPARAMETER_NAMES[i]);
1955 mCEff[ii] = createGarbageCollected<TCanvas>(fname, name, 0, 0, 700, 700. * 2. / 3.);
1956 mCEff[ii]->cd();
1957 float dy = 1. / 2.;
1958 mPEff[ii][0] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
1959 mPEff[ii][0]->Draw();
1960 mPEff[ii][0]->SetRightMargin(0.04);
1961 mPEff[ii][1] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
1962 mPEff[ii][1]->Draw();
1963 mPEff[ii][1]->SetRightMargin(0.04);
1964 mPEff[ii][2] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 0.5, dy * 2 - .001);
1965 mPEff[ii][2]->Draw();
1966 mPEff[ii][2]->SetRightMargin(0.04);
1967 mPEff[ii][3] = createGarbageCollected<TPad>("p3", "", 0.5, dy * 1, 1.0, dy * 2 - .001);
1968 mPEff[ii][3]->Draw();
1969 mPEff[ii][3]->SetRightMargin(0.04);
1970 mLEff[ii] = createGarbageCollected<TLegend>(0.92 - legendSpacingString * 1.45, 0.83 - (0.93 - 0.82) / 2. * (float)ConfigNumInputs, 0.98, 0.849);
1971 SetLegend(mLEff[ii]);
1972 }
1973 }
1974
1975 // Create Canvas / Pads for Resolution Histograms
1976 if (mQATasks & taskTrackingRes) {
1977 for (int32_t ii = 0; ii < 7; ii++) {
1978 int32_t i = ii == 5 ? 4 : ii;
1979 if (ii == 6) {
1980 snprintf(fname, 1024, "res_integral_layout");
1981 snprintf(name, 2048, "Integral Resolution");
1982 } else {
1983 snprintf(fname, 1024, "res_vs_%s_layout", VSPARAMETER_NAMES[ii]);
1984 snprintf(name, 2048, "Resolution versus %s", VSPARAMETER_NAMES[i]);
1985 }
1986 mCRes[ii] = createGarbageCollected<TCanvas>(fname, name, 0, 0, 700, 700. * 2. / 3.);
1987 mCRes[ii]->cd();
1988 gStyle->SetOptFit(1);
1989
1990 float dy = 1. / 2.;
1991 mPRes[ii][3] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
1992 mPRes[ii][3]->Draw();
1993 mPRes[ii][3]->SetRightMargin(0.04);
1994 mPRes[ii][4] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
1995 mPRes[ii][4]->Draw();
1996 mPRes[ii][4]->SetRightMargin(0.04);
1997 mPRes[ii][0] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
1998 mPRes[ii][0]->Draw();
1999 mPRes[ii][0]->SetRightMargin(0.04);
2000 mPRes[ii][0]->SetLeftMargin(0.15);
2001 mPRes[ii][1] = createGarbageCollected<TPad>("p3", "", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2002 mPRes[ii][1]->Draw();
2003 mPRes[ii][1]->SetRightMargin(0.04);
2004 mPRes[ii][1]->SetLeftMargin(0.135);
2005 mPRes[ii][2] = createGarbageCollected<TPad>("p4", "", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2006 mPRes[ii][2]->Draw();
2007 mPRes[ii][2]->SetRightMargin(0.06);
2008 mPRes[ii][2]->SetLeftMargin(0.135);
2009 if (ii < 6) {
2010 mLRes[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2011 SetLegend(mLRes[ii]);
2012 }
2013 }
2014 }
2015
2016 // Create Canvas / Pads for Pull Histograms
2017 if (mQATasks & taskTrackingResPull) {
2018 for (int32_t ii = 0; ii < 7; ii++) {
2019 int32_t i = ii == 5 ? 4 : ii;
2020
2021 if (ii == 6) {
2022 snprintf(fname, 1024, "pull_integral_layout");
2023 snprintf(name, 2048, "Integral Pull");
2024 } else {
2025 snprintf(fname, 1024, "pull_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2026 snprintf(name, 2048, "Pull versus %s", VSPARAMETER_NAMES[i]);
2027 }
2028 mCPull[ii] = createGarbageCollected<TCanvas>(fname, name, 0, 0, 700, 700. * 2. / 3.);
2029 mCPull[ii]->cd();
2030 gStyle->SetOptFit(1);
2031
2032 float dy = 1. / 2.;
2033 mPPull[ii][3] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2034 mPPull[ii][3]->Draw();
2035 mPPull[ii][3]->SetRightMargin(0.04);
2036 mPPull[ii][4] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2037 mPPull[ii][4]->Draw();
2038 mPPull[ii][4]->SetRightMargin(0.04);
2039 mPPull[ii][0] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2040 mPPull[ii][0]->Draw();
2041 mPPull[ii][0]->SetRightMargin(0.04);
2042 mPPull[ii][0]->SetLeftMargin(0.15);
2043 mPPull[ii][1] = createGarbageCollected<TPad>("p3", "", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2044 mPPull[ii][1]->Draw();
2045 mPPull[ii][1]->SetRightMargin(0.04);
2046 mPPull[ii][1]->SetLeftMargin(0.135);
2047 mPPull[ii][2] = createGarbageCollected<TPad>("p4", "", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2048 mPPull[ii][2]->Draw();
2049 mPPull[ii][2]->SetRightMargin(0.06);
2050 mPPull[ii][2]->SetLeftMargin(0.135);
2051 if (ii < 6) {
2052 mLPull[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2053 SetLegend(mLPull[ii]);
2054 }
2055 }
2056 }
2057
2058 // Create Canvas for Cluster Histos
2059 if (mQATasks & taskClusterAttach) {
2060 for (int32_t i = 0; i < 3; i++) {
2061 snprintf(fname, 1024, "clusters_%s_layout", CLUSTER_TYPES[i]);
2062 mCClust[i] = createGarbageCollected<TCanvas>(fname, CLUSTER_TITLES[i], 0, 0, 700, 700. * 2. / 3.);
2063 mCClust[i]->cd();
2064 mPClust[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2065 mPClust[i]->Draw();
2066 float y1 = i != 1 ? 0.77 : 0.27, y2 = i != 1 ? 0.9 : 0.42;
2067 mLClust[i] = createGarbageCollected<TLegend>(i == 2 ? 0.1 : (0.65 - legendSpacingString * 1.45), y2 - (y2 - y1) * (ConfigNumInputs + (i != 1) / 2.) + 0.005, i == 2 ? (0.3 + legendSpacingString * 1.45) : 0.9, y2);
2068 SetLegend(mLClust[i]);
2069 }
2070 }
2071
2072 // Create Canvas for track statistic histos
2073 if (mQATasks & taskTrackStatistics) {
2074 mCTracks = createGarbageCollected<TCanvas>("ctracks", "Track Pt", 0, 0, 700, 700. * 2. / 3.);
2075 mCTracks->cd();
2076 mPTracks = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2077 mPTracks->Draw();
2078 mLTracks = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2079 SetLegend(mLTracks);
2080
2081 for (int32_t i = 0; i < 2; i++) {
2082 snprintf(name, 2048, "cncl%d Pull", i);
2083 mCNCl[i] = createGarbageCollected<TCanvas>(name, i ? "Number of clusters (corrected for multiple per row)" : "Number of clusters per track", 0, 0, 700, 700. * 2. / 3.);
2084 mCNCl[i]->cd();
2085 mPNCl[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2086 mPNCl[i]->Draw();
2087 mLNCl[i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2088 SetLegend(mLNCl[i]);
2089 }
2090
2091 mCClXY = createGarbageCollected<TCanvas>("clxy", "Number of clusters per X / Y", 0, 0, 700, 700. * 2. / 3.);
2092 mCClXY->cd();
2093 mPClXY = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2094 mPClXY->Draw();
2095 }
2096 }
2097
2098 if (mConfig.enableLocalOutput && !mConfig.inputHistogramsOnly && (mQATasks & taskTrackingEff) && mcPresent()) {
2099 GPUInfo("QA Stats: Eff: Tracks Prim %d (Eta %d, Pt %d) %f%% (%f%%) Sec %d (Eta %d, Pt %d) %f%% (%f%%) - Res: Tracks %d (Eta %d, Pt %d)", (int32_t)mEff[3][1][0][0]->GetEntries(), (int32_t)mEff[3][1][0][3]->GetEntries(), (int32_t)mEff[3][1][0][4]->GetEntries(),
2100 mEff[0][0][0][0]->GetSumOfWeights() / std::max(1., mEff[3][0][0][0]->GetSumOfWeights()), mEff[0][1][0][0]->GetSumOfWeights() / std::max(1., mEff[3][1][0][0]->GetSumOfWeights()), (int32_t)mEff[3][1][1][0]->GetEntries(), (int32_t)mEff[3][1][1][3]->GetEntries(),
2101 (int32_t)mEff[3][1][1][4]->GetEntries(), mEff[0][0][1][0]->GetSumOfWeights() / std::max(1., mEff[3][0][1][0]->GetSumOfWeights()), mEff[0][1][1][0]->GetSumOfWeights() / std::max(1., mEff[3][1][1][0]->GetSumOfWeights()), (int32_t)mRes2[0][0]->GetEntries(),
2102 (int32_t)mRes2[0][3]->GetEntries(), (int32_t)mRes2[0][4]->GetEntries());
2103 }
2104
2105 int32_t flagShowVsPtLog = (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) ? 1 : 0;
2106
2107 if (mQATasks & taskTrackingEff) {
2108 // Process / Draw Efficiency Histograms
2109 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2110 int32_t i = ii == 5 ? 4 : ii;
2111 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2112 for (int32_t j = 0; j < 4; j++) {
2113 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2114 mPEff[ii][j]->cd();
2115 if (ii == 5) {
2116 mPEff[ii][j]->SetLogx();
2117 }
2118 }
2119 for (int32_t l = 0; l < 3; l++) {
2120 if (k == 0 && mConfig.inputHistogramsOnly == 0 && ii != 5) {
2121 if (l == 0) {
2122 // Divide eff, compute all for fake/clone
2123 auto oldLevel = gErrorIgnoreLevel;
2124 gErrorIgnoreLevel = kError;
2125 mEffResult[0][j / 2][j % 2][i]->Divide(mEff[l][j / 2][j % 2][i], mEff[3][j / 2][j % 2][i], "cl=0.683 b(1,1) mode");
2126 gErrorIgnoreLevel = oldLevel;
2127 mEff[3][j / 2][j % 2][i]->Reset(); // Sum up rec + clone + fake for fake rate
2128 mEff[3][j / 2][j % 2][i]->Add(mEff[0][j / 2][j % 2][i]);
2129 mEff[3][j / 2][j % 2][i]->Add(mEff[1][j / 2][j % 2][i]);
2130 mEff[3][j / 2][j % 2][i]->Add(mEff[2][j / 2][j % 2][i]);
2131 mEff[4][j / 2][j % 2][i]->Reset(); // Sum up rec + clone for clone rate
2132 mEff[4][j / 2][j % 2][i]->Add(mEff[0][j / 2][j % 2][i]);
2133 mEff[4][j / 2][j % 2][i]->Add(mEff[1][j / 2][j % 2][i]);
2134 } else {
2135 // Divide fake/clone
2136 auto oldLevel = gErrorIgnoreLevel;
2137 gErrorIgnoreLevel = kError;
2138 mEffResult[l][j / 2][j % 2][i]->Divide(mEff[l][j / 2][j % 2][i], mEff[l == 1 ? 4 : 3][j / 2][j % 2][i], "cl=0.683 b(1,1) mode");
2139 gErrorIgnoreLevel = oldLevel;
2140 }
2141 }
2142
2143 TGraphAsymmErrors* e = mEffResult[l][j / 2][j % 2][i];
2144
2145 if (!mConfig.inputHistogramsOnly && k == 0) {
2146 if (tout) {
2147 mEff[l][j / 2][j % 2][i]->Write();
2148 e->Write();
2149 if (l == 2) {
2150 mEff[3][j / 2][j % 2][i]->Write(); // Store also all histogram!
2151 mEff[4][j / 2][j % 2][i]->Write(); // Store also all histogram!
2152 }
2153 }
2154 } else if (GetHist(e, tin, k, nNewInput) == nullptr) {
2155 continue;
2156 }
2157 e->SetTitle(EFFICIENCY_TITLES[j]);
2158 e->GetYaxis()->SetTitle("(Efficiency)");
2159 e->GetXaxis()->SetTitle(XAXIS_TITLES[i]);
2160
2161 e->SetLineWidth(1);
2162 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2163 SetAxisSize(e);
2164 if (qcout && !mConfig.shipToQCAsCanvas) {
2165 qcout->Add(e);
2166 }
2167 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2168 continue;
2169 }
2170 e->SetMarkerColor(kBlack);
2171 e->SetLineColor(colorNums[(l == 2 ? (ConfigNumInputs * 2 + k) : (k * 2 + l)) % COLORCOUNT]);
2172 e->GetHistogram()->GetYaxis()->SetRangeUser(-0.02, 1.02);
2173 e->Draw(k || l ? "same P" : "AP");
2174 if (j == 0) {
2175 GetName(fname, k);
2176 snprintf(name, 2048, "%s%s", fname, EFF_NAMES[l]);
2177 mLEff[ii]->AddEntry(e, name, "l");
2178 }
2179 }
2180 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2181 continue;
2182 }
2183 mCEff[ii]->cd();
2184 ChangePadTitleSize(mPEff[ii][j], 0.056);
2185 }
2186 }
2187 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2188 continue;
2189 }
2190
2191 mLEff[ii]->Draw();
2192
2193 if (qcout) {
2194 qcout->Add(mCEff[ii]);
2195 }
2196 if (!mConfig.enableLocalOutput) {
2197 continue;
2198 }
2199 doPerfFigure(0.2, 0.295, 0.025);
2200 mCEff[ii]->Print(Form("plots/eff_vs_%s.pdf", VSPARAMETER_NAMES[ii]));
2201 if (mConfig.writeRootFiles) {
2202 mCEff[ii]->Print(Form("plots/eff_vs_%s.root", VSPARAMETER_NAMES[ii]));
2203 }
2204 }
2205 }
2206
2207 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
2208 // Process / Draw Resolution Histograms
2209 TH1D *resIntegral[5] = {}, *pullIntegral[5] = {};
2210 TCanvas* cfit = nullptr;
2211 std::unique_ptr<TF1> customGaus = std::make_unique<TF1>("G", "[0]*exp(-(x-[1])*(x-[1])/(2.*[2]*[2]))");
2212 for (int32_t p = 0; p < 2; p++) {
2213 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2214 continue;
2215 }
2216 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2217 TCanvas* can = p ? mCPull[ii] : mCRes[ii];
2218 TLegend* leg = p ? mLPull[ii] : mLRes[ii];
2219 int32_t i = ii == 5 ? 4 : ii;
2220 for (int32_t j = 0; j < 5; j++) {
2221 TH2F* src = p ? mPull2[j][i] : mRes2[j][i];
2222 TH1F** dst = p ? mPull[j][i] : mRes[j][i];
2223 TH1D*& dstIntegral = p ? pullIntegral[j] : resIntegral[j];
2224 TPad* pad = p ? mPPull[ii][j] : mPRes[ii][j];
2225
2226 if (!mConfig.inputHistogramsOnly && ii != 5) {
2227 if (cfit == nullptr) {
2228 cfit = createGarbageCollected<TCanvas>();
2229 }
2230 cfit->cd();
2231
2232 TAxis* axis = src->GetYaxis();
2233 int32_t nBins = axis->GetNbins();
2234 int32_t integ = 1;
2235 for (int32_t bin = 1; bin <= nBins; bin++) {
2236 int32_t bin0 = std::max(bin - integ, 0);
2237 int32_t bin1 = std::min(bin + integ, nBins);
2238 std::unique_ptr<TH1D> proj{src->ProjectionX("proj", bin0, bin1)};
2239 proj->ClearUnderflowAndOverflow();
2240 if (proj->GetEntries()) {
2241 uint32_t rebin = 1;
2242 while (proj->GetMaximum() < 50 && rebin < sizeof(RES_AXIS_BINS) / sizeof(RES_AXIS_BINS[0])) {
2243 proj->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2244 rebin++;
2245 }
2246
2247 if (proj->GetEntries() < 20 || proj->GetRMS() < 0.00001) {
2248 dst[0]->SetBinContent(bin, proj->GetRMS());
2249 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2250 dst[1]->SetBinContent(bin, proj->GetMean());
2251 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2252 } else {
2253 proj->GetXaxis()->SetRange(0, 0);
2254 proj->GetXaxis()->SetRangeUser(std::max(proj->GetXaxis()->GetXmin(), proj->GetMean() - 3. * proj->GetRMS()), std::min(proj->GetXaxis()->GetXmax(), proj->GetMean() + 3. * proj->GetRMS()));
2255 bool forceLogLike = proj->GetMaximum() < 20;
2256 for (int32_t k = forceLogLike ? 2 : 0; k < 3; k++) {
2257 proj->Fit("gaus", forceLogLike || k == 2 ? "sQl" : k ? "sQww" : "sQ");
2258 TF1* fitFunc = proj->GetFunction("gaus");
2259
2260 if (k && !forceLogLike) {
2261 customGaus->SetParameters(fitFunc->GetParameter(0), fitFunc->GetParameter(1), fitFunc->GetParameter(2));
2262 proj->Fit(customGaus.get(), "sQ");
2263 fitFunc = customGaus.get();
2264 }
2265
2266 const float sigma = fabs(fitFunc->GetParameter(2));
2267 dst[0]->SetBinContent(bin, sigma);
2268 dst[1]->SetBinContent(bin, fitFunc->GetParameter(1));
2269 dst[0]->SetBinError(bin, fitFunc->GetParError(2));
2270 dst[1]->SetBinError(bin, fitFunc->GetParError(1));
2271
2272 const bool fail1 = sigma <= 0.f;
2273 const bool fail2 = fabs(proj->GetMean() - dst[1]->GetBinContent(bin)) > std::min<float>(p ? PULL_AXIS : mConfig.nativeFitResolutions ? RES_AXES_NATIVE[j] : RES_AXES[j], 3.f * proj->GetRMS());
2274 const bool fail3 = dst[0]->GetBinContent(bin) > 3.f * proj->GetRMS() || dst[0]->GetBinError(bin) > 1 || dst[1]->GetBinError(bin) > 1;
2275 const bool fail4 = fitFunc->GetParameter(0) < proj->GetMaximum() / 5.;
2276 const bool fail = fail1 || fail2 || fail3 || fail4;
2277 // if (p == 0 && ii == 4 && j == 2) DrawHisto(proj, Form("Hist_bin_%d-%d_vs_%d____%d_%d___%f-%f___%f-%f___%d.pdf", p, j, ii, bin, k, dst[0]->GetBinContent(bin), proj->GetRMS(), dst[1]->GetBinContent(bin), proj->GetMean(), (int32_t) fail), "");
2278
2279 if (!fail) {
2280 break;
2281 } else if (k >= 2) {
2282 dst[0]->SetBinContent(bin, proj->GetRMS());
2283 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2284 dst[1]->SetBinContent(bin, proj->GetMean());
2285 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2286 }
2287 }
2288 }
2289 } else {
2290 dst[0]->SetBinContent(bin, 0.f);
2291 dst[0]->SetBinError(bin, 0.f);
2292 dst[1]->SetBinContent(bin, 0.f);
2293 dst[1]->SetBinError(bin, 0.f);
2294 }
2295 }
2296 if (ii == 0) {
2297 dstIntegral = src->ProjectionX(mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j], 0, nBins + 1);
2298 uint32_t rebin = 1;
2299 while (dstIntegral->GetMaximum() < 50 && rebin < sizeof(RES_AXIS_BINS) / sizeof(RES_AXIS_BINS[0])) {
2300 dstIntegral->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2301 rebin++;
2302 }
2303 }
2304 }
2305 if (ii == 0) {
2306 if (mConfig.inputHistogramsOnly) {
2307 dstIntegral = createGarbageCollected<TH1D>();
2308 }
2309 snprintf(fname, 1024, p ? "IntPull%s" : "IntRes%s", VSPARAMETER_NAMES[j]);
2310 snprintf(name, 2048, p ? "%s Pull" : "%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j]);
2311 dstIntegral->SetName(fname);
2312 dstIntegral->SetTitle(name);
2313 }
2314 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2315 pad->cd();
2316 }
2317 int32_t numColor = 0;
2318 float tmpMax = -1000.;
2319 float tmpMin = 1000.;
2320
2321 for (int32_t l = 0; l < 2; l++) {
2322 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2323 TH1F* e = dst[l];
2324 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2325 continue;
2326 }
2327 if (nNewInput && k == 0 && ii != 5) {
2328 if (p == 0) {
2329 e->Scale(mConfig.nativeFitResolutions ? SCALE_NATIVE[j] : SCALE[j]);
2330 }
2331 }
2332 if (ii == 4) {
2333 e->GetXaxis()->SetRangeUser(0.2, PT_MAX);
2334 } else if (LOG_PT_MIN > 0 && ii == 5) {
2335 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2336 } else if (ii == 5) {
2337 e->GetXaxis()->SetRange(1, 0);
2338 }
2339 e->SetMinimum(-1111);
2340 e->SetMaximum(-1111);
2341
2342 if (e->GetMaximum() > tmpMax) {
2343 tmpMax = e->GetMaximum();
2344 }
2345 if (e->GetMinimum() < tmpMin) {
2346 tmpMin = e->GetMinimum();
2347 }
2348 }
2349 }
2350
2351 float tmpSpan;
2352 tmpSpan = tmpMax - tmpMin;
2353 tmpMax += tmpSpan * .02;
2354 tmpMin -= tmpSpan * .02;
2355 if (j == 2 && i < 3) {
2356 tmpMax += tmpSpan * 0.13 * ConfigNumInputs;
2357 }
2358
2359 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2360 for (int32_t l = 0; l < 2; l++) {
2361 TH1F* e = dst[l];
2362 if (!mConfig.inputHistogramsOnly && k == 0) {
2363 snprintf(name, 2048, p ? "%s Pull" : "%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j]);
2364 e->SetTitle(name);
2365 e->SetStats(kFALSE);
2366 if (tout) {
2367 if (l == 0) {
2368 mRes2[j][i]->SetOption("colz");
2369 mRes2[j][i]->Write();
2370 }
2371 e->Write();
2372 }
2373 } else if (GetHist(e, tin, k, nNewInput) == nullptr) {
2374 continue;
2375 }
2376 e->SetMaximum(tmpMax);
2377 e->SetMinimum(tmpMin);
2378 e->SetLineWidth(1);
2379 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2380 SetAxisSize(e);
2381 e->GetYaxis()->SetTitle(p ? AXIS_TITLES_PULL[j] : mConfig.nativeFitResolutions ? AXIS_TITLES_NATIVE[j] : AXIS_TITLES[j]);
2382 e->GetXaxis()->SetTitle(XAXIS_TITLES[i]);
2383 if (LOG_PT_MIN > 0 && ii == 5) {
2384 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2385 }
2386
2387 if (j == 0) {
2388 e->GetYaxis()->SetTitleOffset(1.5);
2389 } else if (j < 3) {
2390 e->GetYaxis()->SetTitleOffset(1.4);
2391 }
2392 if (qcout && !mConfig.shipToQCAsCanvas) {
2393 qcout->Add(e);
2394 }
2395 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2396 continue;
2397 }
2398
2399 e->SetMarkerColor(kBlack);
2400 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2401 e->Draw(k || l ? "same" : "");
2402 if (j == 0) {
2403 GetName(fname, k);
2404 if (p) {
2405 snprintf(name, 2048, "%s%s", fname, l ? "Mean" : "Pull");
2406 } else {
2407 snprintf(name, 2048, "%s%s", fname, l ? "Mean" : "Resolution");
2408 }
2409 leg->AddEntry(e, name, "l");
2410 }
2411 }
2412 }
2413 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2414 continue;
2415 }
2416
2417 if (ii == 5) {
2418 pad->SetLogx();
2419 }
2420 can->cd();
2421 if (j == 4) {
2422 ChangePadTitleSize(pad, 0.056);
2423 }
2424 }
2425 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2426 continue;
2427 }
2428
2429 leg->Draw();
2430
2431 if (qcout) {
2432 qcout->Add(can);
2433 }
2434 if (!mConfig.enableLocalOutput) {
2435 continue;
2436 }
2437 doPerfFigure(0.2, 0.295, 0.025);
2438 can->Print(Form(p ? "plots/pull_vs_%s.pdf" : "plots/res_vs_%s.pdf", VSPARAMETER_NAMES[ii]));
2439 if (mConfig.writeRootFiles) {
2440 can->Print(Form(p ? "plots/pull_vs_%s.root" : "plots/res_vs_%s.root", VSPARAMETER_NAMES[ii]));
2441 }
2442 }
2443 }
2444
2445 // Process Integral Resolution Histogreams
2446 for (int32_t p = 0; p < 2; p++) {
2447 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2448 continue;
2449 }
2450 TCanvas* can = p ? mCPull[6] : mCRes[6];
2451 for (int32_t i = 0; i < 5; i++) {
2452 TPad* pad = p ? mPPull[6][i] : mPRes[6][i];
2453 TH1D* hist = p ? pullIntegral[i] : resIntegral[i];
2454 int32_t numColor = 0;
2455 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2456 pad->cd();
2457 }
2458 if (!mConfig.inputHistogramsOnly && mcAvail) {
2459 TH1D* e = hist;
2460 if (e && e->GetEntries()) {
2461 e->Fit("gaus", "sQ");
2462 }
2463 }
2464
2465 float tmpMax = 0;
2466 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2467 TH1D* e = hist;
2468 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2469 continue;
2470 }
2471 e->SetMaximum(-1111);
2472 if (e->GetMaximum() > tmpMax) {
2473 tmpMax = e->GetMaximum();
2474 }
2475 }
2476
2477 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2478 TH1D* e = hist;
2479 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2480 continue;
2481 }
2482 e->SetMaximum(tmpMax * 1.02);
2483 e->SetMinimum(tmpMax * -0.02);
2484 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2485 e->Write();
2486 }
2487 if (qcout && !mConfig.shipToQCAsCanvas) {
2488 qcout->Add(e);
2489 }
2490 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2491 continue;
2492 }
2493
2494 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2495 e->Draw(k == 0 ? "" : "same");
2496 }
2497 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2498 continue;
2499 }
2500 can->cd();
2501 }
2502 if (qcout) {
2503 qcout->Add(can);
2504 }
2505 if (!mConfig.enableLocalOutput) {
2506 continue;
2507 }
2508
2509 can->Print(p ? "plots/pull_integral.pdf" : "plots/res_integral.pdf");
2510 if (mConfig.writeRootFiles) {
2511 can->Print(p ? "plots/pull_integral.root" : "plots/res_integral.root");
2512 }
2513 }
2514 }
2515
2516 uint64_t attachClusterCounts[N_CLS_HIST];
2517 if (mQATasks & taskClusterAttach) {
2518 // Process Cluster Attachment Histograms
2519 if (mConfig.inputHistogramsOnly == 0) {
2520 for (int32_t i = N_CLS_HIST; i < N_CLS_TYPE * N_CLS_HIST - 1; i++) {
2521 mClusters[i]->Sumw2(true);
2522 }
2523 double totalVal = 0;
2524 if (!CLUST_HIST_INT_SUM) {
2525 for (int32_t j = 0; j < mClusters[N_CLS_HIST - 1]->GetXaxis()->GetNbins() + 2; j++) {
2526 totalVal += mClusters[N_CLS_HIST - 1]->GetBinContent(j);
2527 }
2528 }
2529 if (totalVal == 0.) {
2530 totalVal = 1.;
2531 }
2532 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2533 double val = 0;
2534 for (int32_t j = 0; j < mClusters[i]->GetXaxis()->GetNbins() + 2; j++) {
2535 val += mClusters[i]->GetBinContent(j);
2536 mClusters[2 * N_CLS_HIST - 1 + i]->SetBinContent(j, val / totalVal);
2537 }
2538 attachClusterCounts[i] = val;
2539 }
2540
2541 if (!CLUST_HIST_INT_SUM) {
2542 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2543 mClusters[2 * N_CLS_HIST - 1 + i]->SetMaximum(1.02);
2544 mClusters[2 * N_CLS_HIST - 1 + i]->SetMinimum(-0.02);
2545 }
2546 }
2547
2548 for (int32_t i = 0; i < N_CLS_HIST - 1; i++) {
2549 auto oldLevel = gErrorIgnoreLevel;
2550 gErrorIgnoreLevel = kError;
2551 mClusters[N_CLS_HIST + i]->Divide(mClusters[i], mClusters[N_CLS_HIST - 1], 1, 1, "B");
2552 gErrorIgnoreLevel = oldLevel;
2553 mClusters[N_CLS_HIST + i]->SetMinimum(-0.02);
2554 mClusters[N_CLS_HIST + i]->SetMaximum(1.02);
2555 }
2556 }
2557
2558 float tmpMax[2] = {0, 0}, tmpMin[2] = {0, 0};
2559 for (int32_t l = 0; l <= CLUST_HIST_INT_SUM; l++) {
2560 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2561 TH1* e = mClusters[l ? (N_CLS_TYPE * N_CLS_HIST - 2) : (N_CLS_HIST - 1)];
2562 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2563 continue;
2564 }
2565 e->SetMinimum(-1111);
2566 e->SetMaximum(-1111);
2567 if (l == 0) {
2568 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2569 }
2570 if (e->GetMaximum() > tmpMax[l]) {
2571 tmpMax[l] = e->GetMaximum();
2572 }
2573 if (e->GetMinimum() < tmpMin[l]) {
2574 tmpMin[l] = e->GetMinimum();
2575 }
2576 }
2577 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2578 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2579 TH1* e = mClusters[l ? (2 * N_CLS_HIST - 1 + i) : i];
2580 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2581 continue;
2582 }
2583 e->SetMaximum(tmpMax[l] * 1.02);
2584 e->SetMinimum(tmpMax[l] * -0.02);
2585 }
2586 }
2587 }
2588
2589 for (int32_t i = 0; i < N_CLS_TYPE; i++) {
2590 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2591 mPClust[i]->cd();
2592 mPClust[i]->SetLogx();
2593 }
2594 int32_t begin = i == 2 ? (2 * N_CLS_HIST - 1) : i == 1 ? N_CLS_HIST : 0;
2595 int32_t end = i == 2 ? (3 * N_CLS_HIST - 1) : i == 1 ? (2 * N_CLS_HIST - 1) : N_CLS_HIST;
2596 int32_t numColor = 0;
2597 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2598 for (int32_t j = end - 1; j >= begin; j--) {
2599 TH1* e = mClusters[j];
2600 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2601 continue;
2602 }
2603
2604 e->SetTitle(CLUSTER_TITLES[i]);
2605 e->GetYaxis()->SetTitle(i == 0 ? "Number of TPC clusters" : i == 1 ? "Fraction of TPC clusters" : CLUST_HIST_INT_SUM ? "Total TPC clusters (integrated)" : "Fraction of TPC clusters (integrated)");
2606 e->GetXaxis()->SetTitle("#it{p}_{Tmc} (GeV/#it{c})");
2607 e->GetXaxis()->SetTitleOffset(1.1);
2608 e->GetXaxis()->SetLabelOffset(-0.005);
2609 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2610 e->Write();
2611 }
2612 e->SetStats(kFALSE);
2613 e->SetLineWidth(1);
2614 e->SetLineStyle(CONFIG_DASHED_MARKERS ? j + 1 : 1);
2615 if (i == 0) {
2616 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2617 }
2618 if (qcout && !mConfig.shipToQCAsCanvas) {
2619 qcout->Add(e);
2620 }
2621 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2622 continue;
2623 }
2624
2625 e->SetMarkerColor(kBlack);
2626 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2627 e->Draw(j == end - 1 && k == 0 ? "" : "same");
2628 GetName(fname, k);
2629 snprintf(name, 2048, "%s%s", fname, CLUSTER_NAMES[j - begin]);
2630 mLClust[i]->AddEntry(e, name, "l");
2631 }
2632 }
2633 if (ConfigNumInputs == 1) {
2634 TH1* e = reinterpret_cast<TH1F*>(mClusters[begin + CL_att_adj]->Clone());
2635 e->Add(mClusters[begin + CL_prot], -1);
2636 if (qcout && !mConfig.shipToQCAsCanvas) {
2637 qcout->Add(e);
2638 }
2639 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2640 continue;
2641 }
2642
2643 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2644 e->Draw("same");
2645 mLClust[i]->AddEntry(e, "Removed (Strategy A)", "l");
2646 }
2647 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2648 continue;
2649 }
2650
2651 mLClust[i]->Draw();
2652
2653 if (qcout) {
2654 qcout->Add(mCClust[i]);
2655 }
2656 if (!mConfig.enableLocalOutput) {
2657 continue;
2658 }
2659 doPerfFigure(i != 2 ? 0.37 : 0.6, 0.295, 0.030);
2660 mCClust[i]->cd();
2661 mCClust[i]->Print(i == 2 ? "plots/clusters_integral.pdf" : i == 1 ? "plots/clusters_relative.pdf" : "plots/clusters.pdf");
2662 if (mConfig.writeRootFiles) {
2663 mCClust[i]->Print(i == 2 ? "plots/clusters_integral.root" : i == 1 ? "plots/clusters_relative.root" : "plots/clusters.root");
2664 }
2665 }
2666 }
2667
2668 // Process cluster count statistics
2669 if ((mQATasks & taskClusterCounts) && !mHaveExternalHists && !mConfig.clusterRejectionHistograms && !mConfig.inputHistogramsOnly) {
2670 DoClusterCounts(attachClusterCounts);
2671 }
2672 if ((qcout || tout) && (mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
2673 for (uint32_t i = 0; i < mHistClusterCount.size(); i++) {
2674 if (tout) {
2675 mHistClusterCount[i]->Write();
2676 }
2677 if (qcout) {
2678 qcout->Add(mHistClusterCount[i]);
2679 }
2680 }
2681 }
2682
2683 if (mQATasks & taskTrackStatistics) {
2684 // Process track statistic histograms
2685 float tmpMax = 0.;
2686 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2687 TH1F* e = mTracks;
2688 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2689 continue;
2690 }
2691 e->SetMaximum(-1111);
2692 if (e->GetMaximum() > tmpMax) {
2693 tmpMax = e->GetMaximum();
2694 }
2695 }
2696 mPTracks->cd();
2697 mPTracks->SetLogx();
2698 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2699 TH1F* e = mTracks;
2700 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2701 continue;
2702 }
2703 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2704 e->Write();
2705 }
2706 e->SetMaximum(tmpMax * 1.02);
2707 e->SetMinimum(tmpMax * -0.02);
2708 e->SetStats(kFALSE);
2709 e->SetLineWidth(1);
2710 e->GetYaxis()->SetTitle("a.u.");
2711 e->GetXaxis()->SetTitle("#it{p}_{Tmc} (GeV/#it{c})");
2712 if (qcout) {
2713 qcout->Add(e);
2714 }
2715 e->SetMarkerColor(kBlack);
2716 e->SetLineColor(colorNums[k % COLORCOUNT]);
2717 e->Draw(k == 0 ? "" : "same");
2718 GetName(fname, k);
2719 snprintf(name, 2048, "%sTrack Pt", fname);
2720 mLTracks->AddEntry(e, name, "l");
2721 }
2722 mLTracks->Draw();
2723 mCTracks->cd();
2724 mCTracks->Print("plots/tracks.pdf");
2725 if (mConfig.writeRootFiles) {
2726 mCTracks->Print("plots/tracks.root");
2727 }
2728
2729 for (int32_t i = 0; i < 2; i++) {
2730 tmpMax = 0.;
2731 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2732 TH1F* e = mNCl[i];
2733 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2734 continue;
2735 }
2736 e->SetMaximum(-1111);
2737 if (e->GetMaximum() > tmpMax) {
2738 tmpMax = e->GetMaximum();
2739 }
2740 }
2741 mPNCl[i]->cd();
2742 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2743 TH1F* e = mNCl[i];
2744 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2745 continue;
2746 }
2747 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2748 e->Write();
2749 }
2750 e->SetMaximum(tmpMax * 1.02);
2751 e->SetMinimum(tmpMax * -0.02);
2752 e->SetStats(kFALSE);
2753 e->SetLineWidth(1);
2754 e->GetYaxis()->SetTitle("a.u.");
2755 e->GetXaxis()->SetTitle("NClusters");
2756 if (qcout) {
2757 qcout->Add(e);
2758 }
2759 e->SetMarkerColor(kBlack);
2760 e->SetLineColor(colorNums[k % COLORCOUNT]);
2761 e->Draw(k == 0 ? "" : "same");
2762 GetName(fname, k);
2763 snprintf(name, 2048, "%sNClusters%d", fname, i);
2764 mLNCl[i]->AddEntry(e, name, "l");
2765 }
2766 mLNCl[i]->Draw();
2767 mCNCl[i]->cd();
2768 snprintf(name, 2048, "plots/nClusters%s.pdf", i ? "_corrected" : "");
2769 mCNCl[i]->Print(name);
2770 if (mConfig.writeRootFiles) {
2771 snprintf(name, 2048, "plots/nClusters%s.root", i ? "_corrected" : "");
2772 mCNCl[i]->Print(name);
2773 }
2774 }
2775
2776 mPClXY->cd();
2777 mClXY->SetOption("colz");
2778 mClXY->Draw();
2779 mCClXY->cd();
2780 mCClXY->Print("plots/clustersXY.pdf");
2781 if (mConfig.writeRootFiles) {
2782 mCClXY->Print("plots/clustersXY.root");
2783 }
2784 }
2785
2786 if (tout && !mConfig.inputHistogramsOnly && mConfig.writeMCLabels) {
2787 gInterpreter->GenerateDictionary("vector<vector<int32_t>>", "");
2788 tout->WriteObject(&mcEffBuffer, "mcEffBuffer");
2789 tout->WriteObject(&mcLabelBuffer, "mcLabelBuffer");
2790 remove("AutoDict_vector_vector_int__.cxx");
2791 remove("AutoDict_vector_vector_int___cxx_ACLiC_dict_rdict.pcm");
2792 remove("AutoDict_vector_vector_int___cxx.d");
2793 remove("AutoDict_vector_vector_int___cxx.so");
2794 }
2795
2796 if (tout) {
2797 tout->Close();
2798 }
2799 for (uint32_t i = 0; i < mConfig.compareInputs.size(); i++) {
2800 tin[i]->Close();
2801 }
2802 if (!qcout) {
2803 clearGarbagageCollector();
2804 }
2805 GPUInfo("GPU TPC QA histograms have been written to %s files", mConfig.writeRootFiles ? ".pdf and .root" : ".pdf");
2806 gErrorIgnoreLevel = oldRootIgnoreLevel;
2807 return (0);
2808}
2809
2810void GPUQA::PrintClusterCount(int32_t mode, int32_t& num, const char* name, uint64_t n, uint64_t normalization)
2811{
2812 if (mode == 2) {
2813 // do nothing, just count num
2814 } else if (mode == 1) {
2815 char name2[128];
2816 snprintf(name2, 128, "clusterCount%d_", num);
2817 char* ptr = name2 + strlen(name2);
2818 for (uint32_t i = 0; i < strlen(name); i++) {
2819 if ((name[i] >= 'a' && name[i] <= 'z') || (name[i] >= 'A' && name[i] <= 'Z') || (name[i] >= '0' && name[i] <= '9')) {
2820 *(ptr++) = name[i];
2821 }
2822 }
2823 *ptr = 0;
2824 createHist(mHistClusterCount[num], name2, name, 1000, 0, mConfig.histMaxNClusters, 1000, 0, 100);
2825 } else if (mode == 0) {
2826 if (normalization && mConfig.enableLocalOutput) {
2827 printf("\t%35s: %'12" PRIu64 " (%6.2f%%)\n", name, n, 100.f * n / normalization);
2828 }
2829 if (mConfig.clusterRejectionHistograms) {
2830 float ratio = 100.f * n / std::max<uint64_t>(normalization, 1);
2831 mHistClusterCount[num]->Fill(normalization, ratio, 1);
2832 }
2833 }
2834 num++;
2835}
2836
2837int32_t GPUQA::DoClusterCounts(uint64_t* attachClusterCounts, int32_t mode)
2838{
2839 int32_t num = 0;
2840 if (mcPresent() && (mQATasks & taskClusterAttach) && attachClusterCounts) {
2841 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2842 PrintClusterCount(mode, num, CLUSTER_NAMES[i], attachClusterCounts[i], mClusterCounts.nTotal);
2843 }
2844 PrintClusterCount(mode, num, "Unattached", attachClusterCounts[N_CLS_HIST - 1] - attachClusterCounts[CL_att_adj], mClusterCounts.nTotal);
2845 PrintClusterCount(mode, num, "Removed (Strategy A)", attachClusterCounts[CL_att_adj] - attachClusterCounts[CL_prot], mClusterCounts.nTotal); // Attached + Adjacent (also fake) - protected
2846 PrintClusterCount(mode, num, "Unaccessible", mClusterCounts.nUnaccessible, mClusterCounts.nTotal); // No contribution from track >= 10 MeV, unattached or fake-attached/adjacent
2847 } else {
2848 PrintClusterCount(mode, num, "All Clusters", mClusterCounts.nTotal, mClusterCounts.nTotal);
2849 PrintClusterCount(mode, num, "Used in Physics", mClusterCounts.nPhysics, mClusterCounts.nTotal);
2850 PrintClusterCount(mode, num, "Protected", mClusterCounts.nProt, mClusterCounts.nTotal);
2851 PrintClusterCount(mode, num, "Unattached", mClusterCounts.nUnattached, mClusterCounts.nTotal);
2852 PrintClusterCount(mode, num, "Removed (Strategy A)", mClusterCounts.nTotal - mClusterCounts.nUnattached - mClusterCounts.nProt, mClusterCounts.nTotal);
2853 PrintClusterCount(mode, num, "Removed (Strategy B)", mClusterCounts.nTotal - mClusterCounts.nProt, mClusterCounts.nTotal);
2854 }
2855
2856 PrintClusterCount(mode, num, "Merged Loopers (Afterburner)", mClusterCounts.nMergedLooper, mClusterCounts.nTotal);
2857 PrintClusterCount(mode, num, "High Inclination Angle", mClusterCounts.nHighIncl, mClusterCounts.nTotal);
2858 PrintClusterCount(mode, num, "Rejected", mClusterCounts.nRejected, mClusterCounts.nTotal);
2859 PrintClusterCount(mode, num, "Tube (> 200 MeV)", mClusterCounts.nTube, mClusterCounts.nTotal);
2860 PrintClusterCount(mode, num, "Tube (< 200 MeV)", mClusterCounts.nTube200, mClusterCounts.nTotal);
2861 PrintClusterCount(mode, num, "Looping Legs", mClusterCounts.nLoopers, mClusterCounts.nTotal);
2862 PrintClusterCount(mode, num, "Low Pt < 50 MeV", mClusterCounts.nLowPt, mClusterCounts.nTotal);
2863 PrintClusterCount(mode, num, "Low Pt < 200 MeV", mClusterCounts.n200MeV, mClusterCounts.nTotal);
2864
2865 if (mcPresent() && (mQATasks & taskClusterAttach)) {
2866 PrintClusterCount(mode, num, "Tracks > 400 MeV", mClusterCounts.nAbove400, mClusterCounts.nTotal);
2867 PrintClusterCount(mode, num, "Fake Removed (> 400 MeV)", mClusterCounts.nFakeRemove400, mClusterCounts.nAbove400);
2868 PrintClusterCount(mode, num, "Full Fake Removed (> 400 MeV)", mClusterCounts.nFullFakeRemove400, mClusterCounts.nAbove400);
2869 PrintClusterCount(mode, num, "Tracks < 40 MeV", mClusterCounts.nBelow40, mClusterCounts.nTotal);
2870 PrintClusterCount(mode, num, "Fake Protect (< 40 MeV)", mClusterCounts.nFakeProtect40, mClusterCounts.nBelow40);
2871 }
2872 return num;
2873}
2874
2875void* GPUQA::AllocateScratchBuffer(size_t nBytes)
2876{
2877 mTrackingScratchBuffer.resize((nBytes + sizeof(mTrackingScratchBuffer[0]) - 1) / sizeof(mTrackingScratchBuffer[0]));
2878 return mTrackingScratchBuffer.data();
2879}
A const (ready only) version of MCTruthContainer.
Helper class to access correction maps.
int16_t charge
Definition RawEventData.h:5
int32_t i
#define GPUCA_MIN_TRACK_PTB5_DEFAULT
#define TRACK_EXPECTED_REFERENCE_X_DEFAULT
Definition GPUQA.cxx:213
#define TRACK_EXPECTED_REFERENCE_X
Definition GPUQA.cxx:266
#define QA_DEBUG
Definition GPUQA.cxx:15
#define QA_TIMING
Definition GPUQA.cxx:16
#define CHECK_CLUSTER_STATE_NOCOUNT()
Definition GPUQA.cxx:127
#define CHECK_CLUSTER_STATE()
Definition GPUQA.cxx:114
int16_t Color_t
Definition GPUQA.h:32
GPUChain * chain
uint8_t leg
#define GPUCA_ROW_COUNT
Definition of the MCTrack class.
Definition of the Names Generator class.
uint16_t pos
Definition RawData.h:3
uint32_t j
Definition RawData.h:0
uint32_t side
Definition RawData.h:0
uint16_t pid
Definition RawData.h:2
uint32_t c
Definition RawData.h:2
Definition of TPCFastTransform class.
TBranch * ptr
int nClusters
double num
void Start()
Definition timer.cxx:57
double GetCurrentElapsedTime(bool reset=false)
Definition timer.cxx:110
Class for time synchronization of RawReader instances.
static constexpr ID TPC
Definition DetID.h:64
static constexpr int32_t NSECTORS
Definition GPUChain.h:58
int32_t ReadO2MCData(const char *filename)
Definition GPUQA.h:54
bool clusterRemovable(int32_t attach, bool prot) const
Definition GPUQA.h:52
~GPUQA()=default
Definition GPUQA.cxx:344
void * AllocateScratchBuffer(size_t nBytes)
Definition GPUQA.h:55
void SetMCTrackRange(int32_t min, int32_t max)
Definition GPUQA.h:47
mcLabelI_t GetMCTrackLabel(uint32_t trackId) const
Definition GPUQA.h:51
int32_t DrawQAHistograms()
Definition GPUQA.h:46
int32_t InitQA(int32_t tasks=0)
Definition GPUQA.h:44
void DumpO2MCData(const char *filename) const
Definition GPUQA.h:53
int32_t mcLabelI_t
Definition GPUQA.h:43
void RunQA(bool matchOnly=false)
Definition GPUQA.h:45
GPUQA(void *chain)
Definition GPUQA.h:41
static GPUROOTDump< T, Args... > getNew(const char *name1, Names... names)
Definition GPUROOTDump.h:64
static DigitizationContext * loadFromFile(std::string_view filename="")
GLdouble n
Definition glcorearb.h:1982
GLfloat GLfloat GLfloat alpha
Definition glcorearb.h:279
GLint GLenum GLint x
Definition glcorearb.h:403
GLenum mode
Definition glcorearb.h:266
GLenum src
Definition glcorearb.h:1767
GLuint GLfloat GLfloat GLfloat GLfloat y1
Definition glcorearb.h:5034
GLsizeiptr size
Definition glcorearb.h:659
GLuint GLuint end
Definition glcorearb.h:469
const GLdouble * v
Definition glcorearb.h:832
GLuint const GLchar * name
Definition glcorearb.h:781
GLuint GLuint GLfloat weight
Definition glcorearb.h:5477
GLenum GLint * range
Definition glcorearb.h:1899
GLint y
Definition glcorearb.h:270
GLenum GLenum dst
Definition glcorearb.h:1767
GLboolean * data
Definition glcorearb.h:298
GLuint GLsizei const GLchar * label
Definition glcorearb.h:2519
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLuint GLfloat * val
Definition glcorearb.h:1582
GLenum GLfloat param
Definition glcorearb.h:271
GLuint id
Definition glcorearb.h:650
GLdouble GLdouble GLdouble z
Definition glcorearb.h:843
float float float y2
Definition MathUtils.h:44
constexpr int LHCBCPERTIMEBIN
Definition Constants.h:38
Enum< T >::Iterator begin(Enum< T >)
Definition Defs.h:173
value_T f3
Definition TrackUtils.h:93
value_T f1
Definition TrackUtils.h:91
value_T f2
Definition TrackUtils.h:92
struct o2::upgrades_utils::@454 tracks
structure to keep trigger-related info
Defining DataPointCompositeObject explicitly as copiable.
std::string filename()
bool isValid(std::string alias)
int64_t differenceInBC(const InteractionRecord &other) const
std::tuple< std::vector< std::unique_ptr< TCanvas > >, std::vector< std::unique_ptr< TLegend > >, std::vector< std::unique_ptr< TPad > >, std::vector< std::unique_ptr< TLatex > >, std::vector< std::unique_ptr< TH1D > > > v
Definition GPUQA.cxx:322
IR getFirstIRofTF(const IR &rec) const
get 1st IR of TF corresponding to the 1st sampled orbit (in MC)
Definition HBFUtils.h:71
constexpr size_t min
constexpr size_t max
o2::InteractionRecord ir(0, 0)
vec clear()
o2::InteractionRecord ir0(3, 5)