Project
Loading...
Searching...
No Matches
GPUQA.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
15#define QA_DEBUG 0
16#define QA_TIMING 0
17
18#include "Rtypes.h" // Include ROOT header first, to use ROOT and disable replacements
19
20#include "TH1F.h"
21#include "TH2F.h"
22#include "TH1D.h"
23#include "TGraphAsymmErrors.h"
24#include "TCanvas.h"
25#include "TPad.h"
26#include "TLegend.h"
27#include "TColor.h"
28#include "TPaveText.h"
29#include "TF1.h"
30#include "TFile.h"
31#include "TTree.h"
32#include "TStyle.h"
33#include "TLatex.h"
34#include "TObjArray.h"
35#include <sys/stat.h>
36
37#include "GPUQA.h"
38#include "GPUTPCDef.h"
39#include "GPUTPCTrackingData.h"
40#include "GPUChainTracking.h"
41#include "GPUChainTrackingGetters.inc"
42#include "GPUTPCTrack.h"
43#include "GPUTPCTracker.h"
44#include "GPUTPCGMMergedTrack.h"
45#include "GPUTPCGMPropagator.h"
47#include "GPUTPCMCInfo.h"
48#include "GPUO2DataTypes.h"
49#include "GPUParam.inc"
51#include "GPUTPCConvertImpl.h"
52#include "TPCFastTransform.h"
54#include "GPUROOTDump.h"
57#include "GPUSettings.h"
58#ifdef GPUCA_O2_LIB
68#include "TPDGCode.h"
69#include "TParticlePDG.h"
70#include "TDatabasePDG.h"
71#endif
72#include "GPUQAHelper.h"
73#include <algorithm>
74#include <cstdio>
75#include <cinttypes>
76#include <fstream>
77
78#include "utils/timer.h"
79
80#include <oneapi/tbb.h>
81
82using namespace o2::gpu;
83
84#ifdef GPUCA_MERGER_BY_MC_LABEL
85#define CHECK_CLUSTER_STATE_INIT_LEG_BY_MC() \
86 if (!unattached && mTrackMCLabels[id].isValid()) { \
87 int32_t mcLabel = mTrackMCLabels[id].getTrackID(); \
88 int32_t mcEvent = mTrackMCLabels[id].getEventID(); \
89 int32_t mcSource = mTrackMCLabels[id].getSourceID(); \
90 if (mTrackMCLabelsReverse[mMCEventOffset[mcSource] + mcEvent][mcLabel] != id) { \
91 attach &= (~gputpcgmmergertypes::attachGoodLeg); \
92 } \
93 }
94#else
95#define CHECK_CLUSTER_STATE_INIT_LEG_BY_MC()
96#endif
97
98#define CHECK_CLUSTER_STATE_INIT() \
99 bool unattached = attach == 0; \
100 float qpt = 0; \
101 bool lowPt = false; \
102 [[maybe_unused]] bool mev200 = false; \
103 bool mergedLooperUnconnected = false; \
104 bool mergedLooperConnected = false; \
105 int32_t id = attach & gputpcgmmergertypes::attachTrackMask; \
106 if (!unattached) { \
107 qpt = fabsf(mTracking->mIOPtrs.mergedTracks[id].GetParam().GetQPt()); \
108 lowPt = qpt * mTracking->GetParam().qptB5Scaler > mTracking->GetParam().rec.tpc.rejectQPtB5; \
109 mev200 = qpt > 5; \
110 mergedLooperUnconnected = mTracking->mIOPtrs.mergedTracks[id].MergedLooperUnconnected(); \
111 mergedLooperConnected = mTracking->mIOPtrs.mergedTracks[id].MergedLooperConnected(); \
112 } \
113 bool physics = false, protect = false; \
114 CHECK_CLUSTER_STATE_INIT_LEG_BY_MC();
115
116#define CHECK_CLUSTER_STATE() \
117 CHECK_CLUSTER_STATE_INIT() \
118 if (mev200) { \
119 mClusterCounts.n200MeV++; \
120 } \
121 if (lowPt) { \
122 mClusterCounts.nLowPt++; \
123 } else if (mergedLooperUnconnected) { \
124 mClusterCounts.nMergedLooperUnconnected++; \
125 } else if (mergedLooperConnected) { \
126 mClusterCounts.nMergedLooperConnected++; \
127 } else { \
128 GPUTPCClusterRejection::GetProtectionStatus<true>(attach, physics, protect, &mClusterCounts, &mev200); \
129 }
130
131#define CHECK_CLUSTER_STATE_NOCOUNT() \
132 CHECK_CLUSTER_STATE_INIT() \
133 if (!lowPt && !mergedLooperUnconnected && !mergedLooperConnected) { \
134 GPUTPCClusterRejection::GetProtectionStatus<false>(attach, physics, protect); \
135 }
136
137static const GPUSettingsQA& GPUQA_GetConfig(GPUChainTracking* chain)
138{
139 static GPUSettingsQA defaultConfig;
140 if (chain && chain->mConfigQA) {
141 return *chain->mConfigQA;
142 } else {
143 return defaultConfig;
144 }
145}
146
147// static const constexpr bool PLOT_ROOT = 0;
148// static const constexpr bool FIX_SCALES = 0;
149static const constexpr bool PERF_FIGURE = 0;
150// static const constexpr float FIXED_SCALES_MIN[5] = {-0.05, -0.05, -0.2, -0.2, -0.5};
151// static const constexpr float FIXED_SCALES_MAX[5] = {0.4, 0.7, 5, 3, 6.5};
152static const constexpr float LOG_PT_MIN = -1.;
153
154static constexpr float Y_MAX = 40;
155static constexpr float Z_MAX = 100;
156static constexpr float PT_MIN = GPUCA_MIN_TRACK_PTB5_DEFAULT;
157// static constexpr float PT_MIN2 = 0.1;
158static constexpr float PT_MIN_PRIM = 0.1;
159static constexpr float PT_MIN_CLUST = GPUCA_MIN_TRACK_PTB5_DEFAULT;
160static constexpr float PT_MAX = 20;
161static constexpr float ETA_MAX = 1.5;
162static constexpr float ETA_MAX2 = 0.9;
163
164static constexpr bool CLUST_HIST_INT_SUM = false;
165
166static constexpr const int32_t COLORCOUNT = 12;
167
168static const constexpr char* EFF_TYPES[5] = {"Rec", "Clone", "Fake", "All", "RecAndClone"};
169static const constexpr char* FINDABLE_NAMES[2] = {"All", "Findable"};
170static const constexpr char* PRIM_NAMES[2] = {"Prim", "Sec"};
171static const constexpr char* PARAMETER_NAMES[5] = {"Y", "Z", "#Phi", "#lambda", "Relative #it{p}_{T}"};
172static const constexpr char* PARAMETER_NAMES_NATIVE[5] = {"Y", "Z", "sin(#Phi)", "tan(#lambda)", "q/#it{p}_{T} (curvature)"};
173static const constexpr char* VSPARAMETER_NAMES[6] = {"Y", "Z", "Phi", "Eta", "Pt", "Pt_log"};
174static const constexpr char* EFF_NAMES[3] = {"Efficiency", "Clone Rate", "Fake Rate"};
175static const constexpr char* EFFICIENCY_TITLES[4] = {"Efficiency (Primary Tracks, Findable)", "Efficiency (Secondary Tracks, Findable)", "Efficiency (Primary Tracks)", "Efficiency (Secondary Tracks)"};
176static const constexpr double SCALE[5] = {10., 10., 1000., 1000., 100.};
177static const constexpr double SCALE_NATIVE[5] = {10., 10., 1000., 1000., 1.};
178static const constexpr char* XAXIS_TITLES[5] = {"#it{y}_{mc} (cm)", "#it{z}_{mc} (cm)", "#Phi_{mc} (rad)", "#eta_{mc}", "#it{p}_{Tmc} (GeV/#it{c})"};
179static const constexpr char* AXIS_TITLES[5] = {"#it{y}-#it{y}_{mc} (mm) (Resolution)", "#it{z}-#it{z}_{mc} (mm) (Resolution)", "#phi-#phi_{mc} (mrad) (Resolution)", "#lambda-#lambda_{mc} (mrad) (Resolution)", "(#it{p}_{T} - #it{p}_{Tmc}) / #it{p}_{Tmc} (%) (Resolution)"};
180static const constexpr char* AXIS_TITLES_NATIVE[5] = {"#it{y}-#it{y}_{mc} (mm) (Resolution)", "#it{z}-#it{z}_{mc} (mm) (Resolution)", "sin(#phi)-sin(#phi_{mc}) (Resolution)", "tan(#lambda)-tan(#lambda_{mc}) (Resolution)", "q*(q/#it{p}_{T} - q/#it{p}_{Tmc}) (Resolution)"};
181static const constexpr char* AXIS_TITLES_PULL[5] = {"#it{y}-#it{y}_{mc}/#sigma_{y} (Pull)", "#it{z}-#it{z}_{mc}/#sigma_{z} (Pull)", "sin(#phi)-sin(#phi_{mc})/#sigma_{sin(#phi)} (Pull)", "tan(#lambda)-tan(#lambda_{mc})/#sigma_{tan(#lambda)} (Pull)",
182 "q*(q/#it{p}_{T} - q/#it{p}_{Tmc})/#sigma_{q/#it{p}_{T}} (Pull)"};
183static const constexpr char* CLUSTER_NAMES[GPUQA::N_CLS_HIST] = {"Correctly attached clusters", "Fake attached clusters", "Attached + adjacent clusters", "Fake adjacent clusters", "Clusters of reconstructed tracks", "Used in Physics", "Protected", "All clusters"};
184static const constexpr char* CLUSTER_TITLES[GPUQA::N_CLS_TYPE] = {"Clusters Pt Distribution / Attachment", "Clusters Pt Distribution / Attachment (relative to all clusters)", "Clusters Pt Distribution / Attachment (integrated)"};
185static const constexpr char* CLUSTER_NAMES_SHORT[GPUQA::N_CLS_HIST] = {"Attached", "Fake", "AttachAdjacent", "FakeAdjacent", "FoundTracks", "Physics", "Protected", "All"};
186static const constexpr char* CLUSTER_TYPES[GPUQA::N_CLS_TYPE] = {"", "Ratio", "Integral"};
187static const constexpr int32_t COLORS_HEX[COLORCOUNT] = {0xB03030, 0x00A000, 0x0000C0, 0x9400D3, 0x19BBBF, 0xF25900, 0x7F7F7F, 0xFFD700, 0x07F707, 0x07F7F7, 0xF08080, 0x000000};
188
189static const constexpr int32_t CONFIG_DASHED_MARKERS = 0;
190
191static const constexpr float AXES_MIN[5] = {-Y_MAX, -Z_MAX, 0.f, -ETA_MAX, PT_MIN};
192static const constexpr float AXES_MAX[5] = {Y_MAX, Z_MAX, 2.f * M_PI, ETA_MAX, PT_MAX};
193static const constexpr int32_t AXIS_BINS[5] = {51, 51, 144, 31, 50};
194static const constexpr int32_t RES_AXIS_BINS[] = {1017, 113}; // Consecutive bin sizes, histograms are binned down until the maximum entry is 50, each bin size should evenly divide its predecessor.
195static const constexpr float RES_AXES[5] = {1., 1., 0.03, 0.03, 1.0};
196static const constexpr float RES_AXES_NATIVE[5] = {1., 1., 0.1, 0.1, 5.0};
197static const constexpr float PULL_AXIS = 10.f;
198
199std::vector<TColor*> GPUQA::mColors;
200int32_t GPUQA::initColors()
201{
202 mColors.reserve(COLORCOUNT);
203 for (int32_t i = 0; i < COLORCOUNT; i++) {
204 float f1 = (float)((COLORS_HEX[i] >> 16) & 0xFF) / (float)0xFF;
205 float f2 = (float)((COLORS_HEX[i] >> 8) & 0xFF) / (float)0xFF;
206 float f3 = (float)((COLORS_HEX[i] >> 0) & 0xFF) / (float)0xFF;
207 mColors.emplace_back(new TColor(10000 + i, f1, f2, f3));
208 }
209 return 0;
210}
211static constexpr Color_t defaultColorNums[COLORCOUNT] = {kRed, kBlue, kGreen, kMagenta, kOrange, kAzure, kBlack, kYellow, kGray, kTeal, kSpring, kPink};
212
213#define TRACK_EXPECTED_REFERENCE_X_DEFAULT 81
214#ifdef GPUCA_TPC_GEOMETRY_O2
215static inline int32_t GPUQA_O2_ConvertFakeLabel(int32_t label) { return label >= 0x7FFFFFFE ? -1 : label; }
216inline uint32_t GPUQA::GetNMCCollissions() const { return mMCInfosCol.size(); }
217inline uint32_t GPUQA::GetNMCTracks(int32_t iCol) const { return mMCInfosCol[iCol].num; }
218inline uint32_t GPUQA::GetNMCTracks(const mcLabelI_t& label) const { return mMCInfosCol[mMCEventOffset[label.getSourceID()] + label.getEventID()].num; }
219inline uint32_t GPUQA::GetNMCLabels() const { return mClNative->clustersMCTruth ? mClNative->clustersMCTruth->getIndexedSize() : 0; }
220inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) { return mMCInfos[mMCInfosCol[iCol].first + iTrk]; }
221inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabel_t& label) { return mMCInfos[mMCInfosCol[mMCEventOffset[label.getSourceID()] + label.getEventID()].first + label.getTrackID()]; }
222inline GPUQA::mcLabels_t GPUQA::GetMCLabel(uint32_t i) { return mClNative->clustersMCTruth->getLabels(i); }
223inline int32_t GPUQA::GetMCLabelNID(const mcLabels_t& label) { return label.size(); }
224inline int32_t GPUQA::GetMCLabelNID(uint32_t i) { return mClNative->clustersMCTruth->getLabels(i).size(); }
225inline GPUQA::mcLabel_t GPUQA::GetMCLabel(uint32_t i, uint32_t j) { return mClNative->clustersMCTruth->getLabels(i)[j]; }
226inline int32_t GPUQA::GetMCLabelID(uint32_t i, uint32_t j) { return GPUQA_O2_ConvertFakeLabel(mClNative->clustersMCTruth->getLabels(i)[j].getTrackID()); }
227inline int32_t GPUQA::GetMCLabelID(const mcLabels_t& label, uint32_t j) { return GPUQA_O2_ConvertFakeLabel(label[j].getTrackID()); }
228inline int32_t GPUQA::GetMCLabelID(const mcLabel_t& label) { return GPUQA_O2_ConvertFakeLabel(label.getTrackID()); }
229inline uint32_t GPUQA::GetMCLabelCol(uint32_t i, uint32_t j) { return mMCEventOffset[mClNative->clustersMCTruth->getLabels(i)[j].getSourceID()] + mClNative->clustersMCTruth->getLabels(i)[j].getEventID(); }
230inline const auto& GPUQA::GetClusterLabels() { return mClNative->clustersMCTruth; }
231inline float GPUQA::GetMCLabelWeight(uint32_t i, uint32_t j) { return 1; }
232inline float GPUQA::GetMCLabelWeight(const mcLabels_t& label, uint32_t j) { return 1; }
233inline float GPUQA::GetMCLabelWeight(const mcLabel_t& label) { return 1; }
234inline bool GPUQA::mcPresent() { return !mConfig.noMC && mTracking && mClNative && mClNative->clustersMCTruth && mMCInfos.size(); }
235uint32_t GPUQA::GetMCLabelCol(const mcLabel_t& label) const { return !label.isValid() ? 0 : (mMCEventOffset[label.getSourceID()] + label.getEventID()); }
236GPUQA::mcLabelI_t GPUQA::GetMCTrackLabel(uint32_t trackId) const { return trackId >= mTrackMCLabels.size() ? MCCompLabel() : mTrackMCLabels[trackId]; }
237bool GPUQA::CompareIgnoreFake(const mcLabelI_t& l1, const mcLabelI_t& l2) { return l1.compare(l2) >= 0; }
238#define TRACK_EXPECTED_REFERENCE_X 78
239#else
240inline GPUQA::mcLabelI_t::mcLabelI_t(const GPUQA::mcLabel_t& l) : track(l.fMCID) {}
241inline bool GPUQA::mcLabelI_t::operator==(const GPUQA::mcLabel_t& l) { return AbsLabelID(track) == l.fMCID; }
242inline uint32_t GPUQA::GetNMCCollissions() const { return 1; }
243inline uint32_t GPUQA::GetNMCTracks(int32_t iCol) const { return mTracking->mIOPtrs.nMCInfosTPC; }
244inline uint32_t GPUQA::GetNMCTracks(const mcLabelI_t& label) const { return mTracking->mIOPtrs.nMCInfosTPC; }
245inline uint32_t GPUQA::GetNMCLabels() const { return mTracking->mIOPtrs.nMCLabelsTPC; }
246inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) { return mTracking->mIOPtrs.mcInfosTPC[AbsLabelID(iTrk)]; }
247inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabel_t& label) { return GetMCTrack(label.fMCID, 0); }
248inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabelI_t& label) { return GetMCTrack(label.track, 0); }
249inline const GPUQA::mcLabels_t& GPUQA::GetMCLabel(uint32_t i) { return mTracking->mIOPtrs.mcLabelsTPC[i]; }
250inline const GPUQA::mcLabel_t& GPUQA::GetMCLabel(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j]; }
251inline int32_t GPUQA::GetMCLabelNID(const mcLabels_t& label) { return 3; }
252inline int32_t GPUQA::GetMCLabelNID(uint32_t i) { return 3; }
253inline int32_t GPUQA::GetMCLabelID(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j].fMCID; }
254inline int32_t GPUQA::GetMCLabelID(const mcLabels_t& label, uint32_t j) { return label.fClusterID[j].fMCID; }
255inline int32_t GPUQA::GetMCLabelID(const mcLabel_t& label) { return label.fMCID; }
256inline uint32_t GPUQA::GetMCLabelCol(uint32_t i, uint32_t j) { return 0; }
257
258inline const auto& GPUQA::GetClusterLabels() { return mTracking->mIOPtrs.mcLabelsTPC; }
259inline float GPUQA::GetMCLabelWeight(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j].fWeight; }
260inline float GPUQA::GetMCLabelWeight(const mcLabels_t& label, uint32_t j) { return label.fClusterID[j].fWeight; }
261inline float GPUQA::GetMCLabelWeight(const mcLabel_t& label) { return label.fWeight; }
262inline int32_t GPUQA::FakeLabelID(int32_t id) { return id < 0 ? id : (-2 - id); }
263inline int32_t GPUQA::AbsLabelID(int32_t id) { return id >= 0 ? id : (-id - 2); }
264inline bool GPUQA::mcPresent() { return !mConfig.noMC && mTracking && GetNMCLabels() && GetNMCTracks(0); }
265uint32_t GPUQA::GetMCLabelCol(const mcLabel_t& label) const { return 0; }
266GPUQA::mcLabelI_t GPUQA::GetMCTrackLabel(uint32_t trackId) const { return trackId >= mTrackMCLabels.size() ? mcLabelI_t() : mTrackMCLabels[trackId]; }
267bool GPUQA::CompareIgnoreFake(const mcLabelI_t& l1, const mcLabelI_t& l2) { return AbsLabelID(l1) == AbsLabelID(l2); }
268#define TRACK_EXPECTED_REFERENCE_X TRACK_EXPECTED_REFERENCE_X_DEFAULT
269#endif
270template <class T>
271inline auto& GPUQA::GetMCTrackObj(T& obj, const GPUQA::mcLabelI_t& l)
272{
273 return obj[mMCEventOffset[l.getSourceID()] + l.getEventID()][l.getTrackID()];
274}
275
276template <>
277auto GPUQA::getHistArray<TH1F>()
278{
279 return std::make_pair(mHist1D, &mHist1D_pos);
280}
281template <>
282auto GPUQA::getHistArray<TH2F>()
283{
284 return std::make_pair(mHist2D, &mHist2D_pos);
285}
286template <>
287auto GPUQA::getHistArray<TH1D>()
288{
289 return std::make_pair(mHist1Dd, &mHist1Dd_pos);
290}
291template <>
292auto GPUQA::getHistArray<TGraphAsymmErrors>()
293{
294 return std::make_pair(mHistGraph, &mHistGraph_pos);
295}
296template <class T, typename... Args>
297void GPUQA::createHist(T*& h, const char* name, Args... args)
298{
299 const auto& p = getHistArray<T>();
300 if (mHaveExternalHists) {
301 if (p.first->size() <= p.second->size()) {
302 GPUError("Array sizes mismatch: Histograms %lu <= Positions %lu", p.first->size(), p.second->size());
303 throw std::runtime_error("Incoming histogram array incomplete");
304 }
305 if (strcmp((*p.first)[p.second->size()].GetName(), name)) {
306 GPUError("Histogram name mismatch: in array %s, trying to create %s", (*p.first)[p.second->size()].GetName(), name);
307 throw std::runtime_error("Incoming histogram has incorrect name");
308 }
309 } else {
310 if constexpr (std::is_same_v<T, TGraphAsymmErrors>) {
311 p.first->emplace_back();
312 p.first->back().SetName(name);
313 } else {
314 p.first->emplace_back(name, args...);
315 }
316 }
317 h = &((*p.first)[p.second->size()]);
318 p.second->emplace_back(&h);
319}
320
321namespace o2::gpu::internal
322{
324 std::tuple<std::vector<std::unique_ptr<TCanvas>>, std::vector<std::unique_ptr<TLegend>>, std::vector<std::unique_ptr<TPad>>, std::vector<std::unique_ptr<TLatex>>, std::vector<std::unique_ptr<TH1D>>> v;
325};
326} // namespace o2::gpu::internal
327
328template <class T, typename... Args>
329T* GPUQA::createGarbageCollected(Args... args)
330{
331 auto& v = std::get<std::vector<std::unique_ptr<T>>>(mGarbageCollector->v);
332 v.emplace_back(std::make_unique<T>(args...));
333 return v.back().get();
334}
335void GPUQA::clearGarbagageCollector()
336{
337 std::get<std::vector<std::unique_ptr<TPad>>>(mGarbageCollector->v).clear(); // Make sure to delete TPad first due to ROOT ownership (std::tuple has no defined order in its destructor)
338 std::apply([](auto&&... args) { ((args.clear()), ...); }, mGarbageCollector->v);
339}
340
341GPUQA::GPUQA(GPUChainTracking* chain, const GPUSettingsQA* config, const GPUParam* param) : mTracking(chain), mConfig(config ? *config : GPUQA_GetConfig(chain)), mParam(param ? param : &chain->GetParam()), mGarbageCollector(std::make_unique<internal::GPUQAGarbageCollection>())
342{
343 mMCEventOffset.resize(1, 0);
344}
345
347{
348 if (mQAInitialized && !mHaveExternalHists) {
349 delete mHist1D;
350 delete mHist2D;
351 delete mHist1Dd;
352 delete mHistGraph;
353 }
354 clearGarbagageCollector(); // Needed to guarantee correct order for ROOT ownership
355}
356
357bool GPUQA::clusterRemovable(int32_t attach, bool prot) const
358{
360 if (prot) {
361 return protect || physics;
362 }
363 return (!unattached && !physics && !protect);
364}
365
366template <class T>
367void GPUQA::SetAxisSize(T* e)
368{
369 e->GetYaxis()->SetTitleOffset(1.0);
370 e->GetYaxis()->SetTitleSize(0.045);
371 e->GetYaxis()->SetLabelSize(0.045);
372 e->GetXaxis()->SetTitleOffset(1.03);
373 e->GetXaxis()->SetTitleSize(0.045);
374 e->GetXaxis()->SetLabelOffset(-0.005);
375 e->GetXaxis()->SetLabelSize(0.045);
376}
377
378void GPUQA::SetLegend(TLegend* l)
379{
380 l->SetTextFont(72);
381 l->SetTextSize(0.016);
382 l->SetFillColor(0);
383}
384
385double* GPUQA::CreateLogAxis(int32_t nbins, float xmin, float xmax)
386{
387 float logxmin = std::log10(xmin);
388 float logxmax = std::log10(xmax);
389 float binwidth = (logxmax - logxmin) / nbins;
390
391 double* xbins = new double[nbins + 1];
392
393 xbins[0] = xmin;
394 for (int32_t i = 1; i <= nbins; i++) {
395 xbins[i] = std::pow(10, logxmin + i * binwidth);
396 }
397 return xbins;
398}
399
400void GPUQA::ChangePadTitleSize(TPad* p, float size)
401{
402 p->Update();
403 TPaveText* pt = (TPaveText*)(p->GetPrimitive("title"));
404 if (pt == nullptr) {
405 GPUError("Error changing title");
406 } else {
407 pt->SetTextSize(size);
408 p->Modified();
409 }
410}
411
412void GPUQA::DrawHisto(TH1* histo, char* filename, char* options)
413{
414 TCanvas tmp;
415 tmp.cd();
416 histo->Draw(options);
417 tmp.Print(filename);
418}
419
420void GPUQA::doPerfFigure(float x, float y, float size)
421{
422 if (!PERF_FIGURE) {
423 return;
424 }
425 TLatex* t = createGarbageCollected<TLatex>();
426 t->SetNDC(kTRUE);
427 t->SetTextColor(1);
428 t->SetTextSize(size);
429 t->DrawLatex(x, y, str_perf_figure_1);
430 t->DrawLatex(x, y - 0.01 - size, str_perf_figure_2);
431}
432
433void GPUQA::SetMCTrackRange(int32_t min, int32_t max)
434{
435 mMCTrackMin = min;
436 mMCTrackMax = max;
437}
438
439int32_t GPUQA::InitQACreateHistograms()
440{
441 char name[2048], fname[1024];
442 if (mQATasks & taskTrackingEff) {
443 // Create Efficiency Histograms
444 for (int32_t i = 0; i < 5; i++) {
445 for (int32_t j = 0; j < 2; j++) {
446 for (int32_t k = 0; k < 2; k++) {
447 for (int32_t l = 0; l < 5; l++) {
448 snprintf(name, 2048, "%s%s%s%sVs%s", "tracks", EFF_TYPES[i], FINDABLE_NAMES[j], PRIM_NAMES[k], VSPARAMETER_NAMES[l]);
449 if (l == 4) {
450 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], k == 0 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
451 createHist(mEff[i][j][k][l], name, name, AXIS_BINS[l], binsPt.get());
452 } else {
453 createHist(mEff[i][j][k][l], name, name, AXIS_BINS[l], AXES_MIN[l], AXES_MAX[l]);
454 }
455 if (!mHaveExternalHists) {
456 mEff[i][j][k][l]->Sumw2();
457 }
458 strcat(name, "_eff");
459 if (i < 4) {
460 createHist(mEffResult[i][j][k][l], name);
461 }
462 }
463 }
464 }
465 }
466 }
467
468 // Create Resolution Histograms
469 if (mQATasks & taskTrackingRes) {
470 for (int32_t i = 0; i < 5; i++) {
471 for (int32_t j = 0; j < 5; j++) {
472 snprintf(name, 2048, "rms_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
473 snprintf(fname, 1024, "mean_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
474 if (j == 4) {
475 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
476 createHist(mRes[i][j][0], name, name, AXIS_BINS[j], binsPt.get());
477 createHist(mRes[i][j][1], fname, fname, AXIS_BINS[j], binsPt.get());
478 } else {
479 createHist(mRes[i][j][0], name, name, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
480 createHist(mRes[i][j][1], fname, fname, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
481 }
482 snprintf(name, 2048, "res_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
483 const float* axis = mConfig.nativeFitResolutions ? RES_AXES_NATIVE : RES_AXES;
484 const int32_t nbins = i == 4 && mConfig.nativeFitResolutions ? (10 * RES_AXIS_BINS[0]) : RES_AXIS_BINS[0];
485 if (j == 4) {
486 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
487 createHist(mRes2[i][j], name, name, nbins, -axis[i], axis[i], AXIS_BINS[j], binsPt.get());
488 } else {
489 createHist(mRes2[i][j], name, name, nbins, -axis[i], axis[i], AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
490 }
491 }
492 }
493 }
494
495 // Create Pull Histograms
496 if (mQATasks & taskTrackingResPull) {
497 for (int32_t i = 0; i < 5; i++) {
498 for (int32_t j = 0; j < 5; j++) {
499 snprintf(name, 2048, "pull_rms_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
500 snprintf(fname, 1024, "pull_mean_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
501 if (j == 4) {
502 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
503 createHist(mPull[i][j][0], name, name, AXIS_BINS[j], binsPt.get());
504 createHist(mPull[i][j][1], fname, fname, AXIS_BINS[j], binsPt.get());
505 } else {
506 createHist(mPull[i][j][0], name, name, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
507 createHist(mPull[i][j][1], fname, fname, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
508 }
509 snprintf(name, 2048, "pull_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
510 if (j == 4) {
511 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
512 createHist(mPull2[i][j], name, name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[j], binsPt.get());
513 } else {
514 createHist(mPull2[i][j], name, name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
515 }
516 }
517 }
518 }
519
520 // Create Cluster Histograms
521 if (mQATasks & taskClusterAttach) {
522 for (int32_t i = 0; i < N_CLS_TYPE * N_CLS_HIST - 1; i++) {
523 int32_t ioffset = i >= (2 * N_CLS_HIST - 1) ? (2 * N_CLS_HIST - 1) : i >= N_CLS_HIST ? N_CLS_HIST : 0;
524 int32_t itype = i >= (2 * N_CLS_HIST - 1) ? 2 : i >= N_CLS_HIST ? 1 : 0;
525 snprintf(name, 2048, "clusters%s%s", CLUSTER_NAMES_SHORT[i - ioffset], CLUSTER_TYPES[itype]);
526 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
527 createHist(mClusters[i], name, name, AXIS_BINS[4], binsPt.get());
528 }
529 }
530
531 if (mQATasks & taskTrackStatistics) {
532 // Create Tracks Histograms
533 for (int32_t i = 0; i < 2; i++) {
534 snprintf(name, 2048, i ? "nrows_with_cluster" : "nclusters");
535 createHist(mNCl[i], name, name, 160, 0, 159);
536 }
537 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
538 createHist(mTracks, "tracks_pt", "tracks_pt", AXIS_BINS[4], binsPt.get());
539 const uint32_t maxTime = (mTracking && mTracking->GetParam().continuousMaxTimeBin > 0) ? mTracking->GetParam().continuousMaxTimeBin : TPC_MAX_TIME_BIN_TRIGGERED;
540 createHist(mT0[0], "tracks_t0", "tracks_t0", (maxTime + 1) / 10, 0, maxTime);
541 createHist(mT0[1], "tracks_t0_res", "tracks_t0_res", 1000, -100, 100);
542 createHist(mClXY, "clXY", "clXY", 1000, -250, 250, 1000, -250, 250); // TODO: Pass name only once
543 }
544
545 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
546 int32_t num = DoClusterCounts(nullptr, 2);
547 mHistClusterCount.resize(num);
548 DoClusterCounts(nullptr, 1);
549 }
550
551 for (uint32_t i = 0; i < mHist1D->size(); i++) {
552 *mHist1D_pos[i] = &(*mHist1D)[i];
553 }
554 for (uint32_t i = 0; i < mHist2D->size(); i++) {
555 *mHist2D_pos[i] = &(*mHist2D)[i];
556 }
557 for (uint32_t i = 0; i < mHist1Dd->size(); i++) {
558 *mHist1Dd_pos[i] = &(*mHist1Dd)[i];
559 }
560 for (uint32_t i = 0; i < mHistGraph->size(); i++) {
561 *mHistGraph_pos[i] = &(*mHistGraph)[i];
562 }
563
564 return 0;
565}
566
567int32_t GPUQA::loadHistograms(std::vector<TH1F>& i1, std::vector<TH2F>& i2, std::vector<TH1D>& i3, std::vector<TGraphAsymmErrors>& i4, int32_t tasks)
568{
569 if (tasks == -1) {
570 tasks = taskDefaultPostprocess;
571 }
572 if (mQAInitialized && (!mHaveExternalHists || tasks != mQATasks)) {
573 throw std::runtime_error("QA not initialized or initialized with different task array");
574 }
575 mHist1D = &i1;
576 mHist2D = &i2;
577 mHist1Dd = &i3;
578 mHistGraph = &i4;
579 mHist1D_pos.clear();
580 mHist2D_pos.clear();
581 mHist1Dd_pos.clear();
582 mHistGraph_pos.clear();
583 mHaveExternalHists = true;
584 if (mConfig.noMC) {
585 tasks &= tasksNoQC;
586 }
587 mQATasks = tasks;
588 if (InitQACreateHistograms()) {
589 return 1;
590 }
591 mQAInitialized = true;
592 return 0;
593}
594
595void GPUQA::DumpO2MCData(const char* filename) const
596{
597 FILE* fp = fopen(filename, "w+b");
598 if (fp == nullptr) {
599 return;
600 }
601 uint32_t n = mMCInfos.size();
602 fwrite(&n, sizeof(n), 1, fp);
603 fwrite(mMCInfos.data(), sizeof(mMCInfos[0]), n, fp);
604 n = mMCInfosCol.size();
605 fwrite(&n, sizeof(n), 1, fp);
606 fwrite(mMCInfosCol.data(), sizeof(mMCInfosCol[0]), n, fp);
607 n = mMCEventOffset.size();
608 fwrite(&n, sizeof(n), 1, fp);
609 fwrite(mMCEventOffset.data(), sizeof(mMCEventOffset[0]), n, fp);
610 fclose(fp);
611}
612
613int32_t GPUQA::ReadO2MCData(const char* filename)
614{
615 FILE* fp = fopen(filename, "rb");
616 if (fp == nullptr) {
617 return 1;
618 }
619 uint32_t n;
620 uint32_t x;
621 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
622 fclose(fp);
623 return 1;
624 }
625 mMCInfos.resize(n);
626 if (fread(mMCInfos.data(), sizeof(mMCInfos[0]), n, fp) != n) {
627 fclose(fp);
628 return 1;
629 }
630 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
631 fclose(fp);
632 return 1;
633 }
634 mMCInfosCol.resize(n);
635 if (fread(mMCInfosCol.data(), sizeof(mMCInfosCol[0]), n, fp) != n) {
636 fclose(fp);
637 return 1;
638 }
639 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
640 fclose(fp);
641 return 1;
642 }
643 mMCEventOffset.resize(n);
644 if (fread(mMCEventOffset.data(), sizeof(mMCEventOffset[0]), n, fp) != n) {
645 fclose(fp);
646 return 1;
647 }
648 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
649 printf("Read %ld bytes MC Infos\n", ftell(fp));
650 }
651 fclose(fp);
652 if (mTracking) {
653 CopyO2MCtoIOPtr(&mTracking->mIOPtrs);
654 }
655 return 0;
656}
657
658void GPUQA::CopyO2MCtoIOPtr(GPUTrackingInOutPointers* ptr)
659{
660 ptr->mcInfosTPC = mMCInfos.data();
661 ptr->nMCInfosTPC = mMCInfos.size();
662 ptr->mcInfosTPCCol = mMCInfosCol.data();
663 ptr->nMCInfosTPCCol = mMCInfosCol.size();
664}
665
666void GPUQA::InitO2MCData(GPUTrackingInOutPointers* updateIOPtr)
667{
668#ifdef GPUCA_O2_LIB
669 if (!mO2MCDataLoaded) {
670 HighResTimer timer(mTracking && mTracking->GetProcessingSettings().debugLevel);
671 if (mTracking && mTracking->GetProcessingSettings().debugLevel) {
672 GPUInfo("Start reading O2 Track MC information");
673 }
674 static constexpr float PRIM_MAX_T = 0.01f;
675
676 o2::steer::MCKinematicsReader mcReader("collisioncontext.root");
677 std::vector<int32_t> refId;
678
679 auto dc = o2::steer::DigitizationContext::loadFromFile("collisioncontext.root");
680 const auto& evrec = dc->getEventRecords();
681 const auto& evparts = dc->getEventParts();
682 std::vector<std::vector<float>> evTimeBins(mcReader.getNSources());
683 for (uint32_t i = 0; i < evTimeBins.size(); i++) {
684 evTimeBins[i].resize(mcReader.getNEvents(i), -100.f);
685 }
686 for (uint32_t i = 0; i < evrec.size(); i++) {
687 const auto& ir = evrec[i];
688 for (uint32_t j = 0; j < evparts[i].size(); j++) {
689 const int iSim = evparts[i][j].sourceID;
690 const int iEv = evparts[i][j].entryID;
691 if (iSim == o2::steer::QEDSOURCEID || ir.differenceInBC(o2::raw::HBFUtils::Instance().getFirstIR()) >= 0) {
694 if (evTimeBins[iSim][iEv] >= 0) {
695 throw std::runtime_error("Multiple time bins for same MC collision found");
696 }
697 evTimeBins[iSim][iEv] = timebin;
698 }
699 }
700 }
701
702 uint32_t nSimSources = mcReader.getNSources();
703 mMCEventOffset.resize(nSimSources);
704 uint32_t nSimTotalEvents = 0;
705 uint32_t nSimTotalTracks = 0;
706 for (uint32_t i = 0; i < nSimSources; i++) {
707 mMCEventOffset[i] = nSimTotalEvents;
708 nSimTotalEvents += mcReader.getNEvents(i);
709 }
710
711 mMCInfosCol.resize(nSimTotalEvents);
712 for (int32_t iSim = 0; iSim < mcReader.getNSources(); iSim++) {
713 for (int32_t i = 0; i < mcReader.getNEvents(iSim); i++) {
714 const float timebin = evTimeBins[iSim][i];
715
716 const std::vector<o2::MCTrack>& tracks = mcReader.getTracks(iSim, i);
717 const std::vector<o2::TrackReference>& trackRefs = mcReader.getTrackRefsByEvent(iSim, i);
718
719 refId.resize(tracks.size());
720 std::fill(refId.begin(), refId.end(), -1);
721 for (uint32_t j = 0; j < trackRefs.size(); j++) {
722 if (trackRefs[j].getDetectorId() == o2::detectors::DetID::TPC) {
723 int32_t trkId = trackRefs[j].getTrackID();
724 if (refId[trkId] == -1) {
725 refId[trkId] = j;
726 }
727 }
728 }
729 mMCInfosCol[mMCEventOffset[iSim] + i].first = mMCInfos.size();
730 mMCInfosCol[mMCEventOffset[iSim] + i].num = tracks.size();
731 mMCInfos.resize(mMCInfos.size() + tracks.size());
732 for (uint32_t j = 0; j < tracks.size(); j++) {
733 auto& info = mMCInfos[mMCInfosCol[mMCEventOffset[iSim] + i].first + j];
734 const auto& trk = tracks[j];
735 TParticlePDG* particle = TDatabasePDG::Instance()->GetParticle(trk.GetPdgCode());
736 Int_t pid = -1;
737 if (abs(trk.GetPdgCode()) == kElectron) {
738 pid = 0;
739 }
740 if (abs(trk.GetPdgCode()) == kMuonMinus) {
741 pid = 1;
742 }
743 if (abs(trk.GetPdgCode()) == kPiPlus) {
744 pid = 2;
745 }
746 if (abs(trk.GetPdgCode()) == kKPlus) {
747 pid = 3;
748 }
749 if (abs(trk.GetPdgCode()) == kProton) {
750 pid = 4;
751 }
752
753 info.charge = particle ? particle->Charge() : 0;
754 info.prim = trk.T() < PRIM_MAX_T;
755 info.primDaughters = 0;
756 if (trk.getFirstDaughterTrackId() != -1) {
757 for (int32_t k = trk.getFirstDaughterTrackId(); k <= trk.getLastDaughterTrackId(); k++) {
758 if (tracks[k].T() < PRIM_MAX_T) {
759 info.primDaughters = 1;
760 break;
761 }
762 }
763 }
764 info.pid = pid;
765 info.t0 = timebin;
766 if (refId[j] >= 0) {
767 const auto& trkRef = trackRefs[refId[j]];
768 info.x = trkRef.X();
769 info.y = trkRef.Y();
770 info.z = trkRef.Z();
771 info.pX = trkRef.Px();
772 info.pY = trkRef.Py();
773 info.pZ = trkRef.Pz();
774 info.genRadius = std::sqrt(trk.GetStartVertexCoordinatesX() * trk.GetStartVertexCoordinatesX() + trk.GetStartVertexCoordinatesY() * trk.GetStartVertexCoordinatesY() + trk.GetStartVertexCoordinatesZ() * trk.GetStartVertexCoordinatesZ());
775 } else {
776 info.x = info.y = info.z = info.pX = info.pY = info.pZ = 0;
777 info.genRadius = 0;
778 }
779 }
780 }
781 }
782 if (timer.IsRunning()) {
783 GPUInfo("Finished reading O2 Track MC information (%f seconds)", timer.GetCurrentElapsedTime());
784 }
785 mO2MCDataLoaded = true;
786 }
787 if (updateIOPtr) {
788 CopyO2MCtoIOPtr(updateIOPtr);
789 }
790#endif
791}
792
793int32_t GPUQA::InitQA(int32_t tasks)
794{
795 if (mQAInitialized) {
796 throw std::runtime_error("QA already initialized");
797 }
798 if (tasks == -1) {
799 tasks = taskDefault;
800 }
801
802 mHist1D = new std::vector<TH1F>;
803 mHist2D = new std::vector<TH2F>;
804 mHist1Dd = new std::vector<TH1D>;
805 mHistGraph = new std::vector<TGraphAsymmErrors>;
806 if (mConfig.noMC) {
807 tasks &= tasksNoQC;
808 }
809 mQATasks = tasks;
810
811 if (mTracking->GetProcessingSettings().qcRunFraction != 100.f && mQATasks != taskClusterCounts) {
812 throw std::runtime_error("QA with qcRunFraction only supported for taskClusterCounts");
813 }
814
815 if (mTracking) {
816 mClNative = mTracking->mIOPtrs.clustersNative;
817 }
818
819 if (InitQACreateHistograms()) {
820 return 1;
821 }
822
823 if (mConfig.enableLocalOutput) {
824 mkdir("plots", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
825 }
826
827#ifdef GPUCA_O2_LIB
828 if (!mConfig.noMC) {
829 InitO2MCData(mTracking ? &mTracking->mIOPtrs : nullptr);
830 }
831#endif
832
833 if (mConfig.matchMCLabels.size()) {
834 uint32_t nFiles = mConfig.matchMCLabels.size();
835 std::vector<std::unique_ptr<TFile>> files;
836 std::vector<std::vector<std::vector<int32_t>>*> labelsBuffer(nFiles);
837 std::vector<std::vector<std::vector<int32_t>>*> effBuffer(nFiles);
838 for (uint32_t i = 0; i < nFiles; i++) {
839 files.emplace_back(std::make_unique<TFile>(mConfig.matchMCLabels[i].c_str()));
840 labelsBuffer[i] = (std::vector<std::vector<int32_t>>*)files[i]->Get("mcLabelBuffer");
841 effBuffer[i] = (std::vector<std::vector<int32_t>>*)files[i]->Get("mcEffBuffer");
842 if (labelsBuffer[i] == nullptr || effBuffer[i] == nullptr) {
843 GPUError("Error opening / reading from labels file %u/%s: %p %p", i, mConfig.matchMCLabels[i].c_str(), (void*)labelsBuffer[i], (void*)effBuffer[i]);
844 exit(1);
845 }
846 }
847
848 mGoodTracks.resize(labelsBuffer[0]->size());
849 mGoodHits.resize(labelsBuffer[0]->size());
850 for (uint32_t iEvent = 0; iEvent < labelsBuffer[0]->size(); iEvent++) {
851 std::vector<bool> labelsOK((*effBuffer[0])[iEvent].size());
852 for (uint32_t k = 0; k < (*effBuffer[0])[iEvent].size(); k++) {
853 labelsOK[k] = false;
854 for (uint32_t l = 0; l < nFiles; l++) {
855 if ((*effBuffer[0])[iEvent][k] != (*effBuffer[l])[iEvent][k]) {
856 labelsOK[k] = true;
857 break;
858 }
859 }
860 }
861 mGoodTracks[iEvent].resize((*labelsBuffer[0])[iEvent].size());
862 for (uint32_t k = 0; k < (*labelsBuffer[0])[iEvent].size(); k++) {
863 if ((*labelsBuffer[0])[iEvent][k] == MC_LABEL_INVALID) {
864 continue;
865 }
866 mGoodTracks[iEvent][k] = labelsOK[abs((*labelsBuffer[0])[iEvent][k])];
867 }
868 }
869 }
870 mQAInitialized = true;
871 return 0;
872}
873
874void GPUQA::RunQA(bool matchOnly, const std::vector<o2::tpc::TrackTPC>* tracksExternal, const std::vector<o2::MCCompLabel>* tracksExtMC, const o2::tpc::ClusterNativeAccess* clNative)
875{
876 if (!mQAInitialized) {
877 throw std::runtime_error("QA not initialized");
878 }
879 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
880 GPUInfo("Running QA - Mask %d, Efficiency %d, Resolution %d, Pulls %d, Cluster Attachment %d, Track Statistics %d, Cluster Counts %d", mQATasks, (int32_t)(mQATasks & taskTrackingEff), (int32_t)(mQATasks & taskTrackingRes), (int32_t)(mQATasks & taskTrackingResPull), (int32_t)(mQATasks & taskClusterAttach), (int32_t)(mQATasks & taskTrackStatistics), (int32_t)(mQATasks & taskClusterCounts));
881 }
882 if (!clNative && mTracking) {
883 clNative = mTracking->mIOPtrs.clustersNative;
884 }
885 mClNative = clNative;
886
887#ifdef GPUCA_TPC_GEOMETRY_O2
888 uint32_t nSimEvents = GetNMCCollissions();
889 if (mTrackMCLabelsReverse.size() < nSimEvents) {
890 mTrackMCLabelsReverse.resize(nSimEvents);
891 }
892 if (mRecTracks.size() < nSimEvents) {
893 mRecTracks.resize(nSimEvents);
894 }
895 if (mFakeTracks.size() < nSimEvents) {
896 mFakeTracks.resize(nSimEvents);
897 }
898 if (mMCParam.size() < nSimEvents) {
899 mMCParam.resize(nSimEvents);
900 }
901#endif
902
903 // Initialize Arrays
904 uint32_t nReconstructedTracks = 0;
905 if (tracksExternal) {
906#ifdef GPUCA_O2_LIB
907 nReconstructedTracks = tracksExternal->size();
908#endif
909 } else {
910 nReconstructedTracks = mTracking->mIOPtrs.nMergedTracks;
911 }
912 mTrackMCLabels.resize(nReconstructedTracks);
913 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
914 mTrackMCLabelsReverse[iCol].resize(GetNMCTracks(iCol));
915 mRecTracks[iCol].resize(GetNMCTracks(iCol));
916 mFakeTracks[iCol].resize(GetNMCTracks(iCol));
917 mMCParam[iCol].resize(GetNMCTracks(iCol));
918 memset(mRecTracks[iCol].data(), 0, mRecTracks[iCol].size() * sizeof(mRecTracks[iCol][0]));
919 memset(mFakeTracks[iCol].data(), 0, mFakeTracks[iCol].size() * sizeof(mFakeTracks[iCol][0]));
920 for (size_t i = 0; i < mTrackMCLabelsReverse[iCol].size(); i++) {
921 mTrackMCLabelsReverse[iCol][i] = -1;
922 }
923 }
924 if (mQATasks & taskClusterAttach && GetNMCLabels()) {
925 mClusterParam.resize(GetNMCLabels());
926 memset(mClusterParam.data(), 0, mClusterParam.size() * sizeof(mClusterParam[0]));
927 }
928 HighResTimer timer(QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2));
929
930 mNEvents++;
931 if (mConfig.writeMCLabels) {
932 mcEffBuffer.resize(mNEvents);
933 mcLabelBuffer.resize(mNEvents);
934 mcEffBuffer[mNEvents - 1].resize(GetNMCTracks(0));
935 mcLabelBuffer[mNEvents - 1].resize(nReconstructedTracks);
936 }
937
938 bool mcAvail = mcPresent() || tracksExtMC;
939
940 if (mcAvail) { // Assign Track MC Labels
941 if (tracksExternal) {
942#ifdef GPUCA_O2_LIB
943 for (uint32_t i = 0; i < tracksExternal->size(); i++) {
944 mTrackMCLabels[i] = (*tracksExtMC)[i];
945 }
946#endif
947 } else {
948 tbb::parallel_for(tbb::blocked_range<uint32_t>(0, nReconstructedTracks, (QA_DEBUG == 0) ? 32 : nReconstructedTracks), [&](const tbb::blocked_range<uint32_t>& range) {
949 auto acc = GPUTPCTrkLbl<true, mcLabelI_t>(GetClusterLabels(), 1.f - mConfig.recThreshold);
950 for (auto i = range.begin(); i < range.end(); i++) {
951 acc.reset();
952 int32_t nClusters = 0;
953 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
954 std::vector<mcLabel_t> labels;
955 for (uint32_t k = 0; k < track.NClusters(); k++) {
956 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
957 continue;
958 }
959 nClusters++;
960 uint32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
961 if (hitId >= GetNMCLabels()) {
962 GPUError("Invalid hit id %u > %d (nClusters %d)", hitId, GetNMCLabels(), mTracking->mIOPtrs.clustersNative ? mTracking->mIOPtrs.clustersNative->nClustersTotal : 0);
963 throw std::runtime_error("qa error");
964 }
965 acc.addLabel(hitId);
966 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
967 if (GetMCLabelID(hitId, j) >= (int32_t)GetNMCTracks(GetMCLabelCol(hitId, j))) {
968 GPUError("Invalid label %d > %d (hit %d, label %d, col %d)", GetMCLabelID(hitId, j), GetNMCTracks(GetMCLabelCol(hitId, j)), hitId, j, (int32_t)GetMCLabelCol(hitId, j));
969 throw std::runtime_error("qa error");
970 }
971 if (GetMCLabelID(hitId, j) >= 0) {
972 if (QA_DEBUG >= 3 && track.OK()) {
973 GPUInfo("Track %d Cluster %u Label %d: %d (%f)", i, k, j, GetMCLabelID(hitId, j), GetMCLabelWeight(hitId, j));
974 }
975 }
976 }
977 }
978
979 float maxweight, sumweight;
980 int32_t maxcount;
981 auto maxLabel = acc.computeLabel(&maxweight, &sumweight, &maxcount);
982 mTrackMCLabels[i] = maxLabel;
983 if (QA_DEBUG && track.OK() && GetNMCTracks(maxLabel) > (uint32_t)maxLabel.getTrackID()) {
984 const mcInfo_t& mc = GetMCTrack(maxLabel);
985 GPUInfo("Track %d label %d (fake %d) weight %f clusters %d (fitted %d) (%f%% %f%%) Pt %f", i, maxLabel.getTrackID(), (int32_t)(maxLabel.isFake()), maxweight, nClusters, track.NClustersFitted(), 100.f * maxweight / sumweight, 100.f * (float)maxcount / (float)nClusters,
986 std::sqrt(mc.pX * mc.pX + mc.pY * mc.pY));
987 }
988 }
989 });
990 }
991 if (timer.IsRunning()) {
992 GPUInfo("QA Time: Assign Track Labels:\t\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
993 }
994
995 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
996 const GPUTPCGMMergedTrack* track = mTracking ? &mTracking->mIOPtrs.mergedTracks[i] : nullptr;
997 mcLabelI_t label = mTrackMCLabels[i];
998 if (mQATasks & taskClusterAttach) {
999 // fill cluster attachment status
1000 if (!track->OK()) {
1001 continue;
1002 }
1003 if (!mTrackMCLabels[i].isValid()) {
1004 for (uint32_t k = 0; k < track->NClusters(); k++) {
1005 if (mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1006 continue;
1007 }
1008 mClusterParam[mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num].fakeAttached++;
1009 }
1010 continue;
1011 }
1012 if (mMCTrackMin == -1 || (label.getTrackID() >= mMCTrackMin && label.getTrackID() < mMCTrackMax)) {
1013 for (uint32_t k = 0; k < track->NClusters(); k++) {
1014 if (mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1015 continue;
1016 }
1017 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num;
1018 bool correct = false;
1019 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1020 if (label == GetMCLabel(hitId, j)) {
1021 correct = true;
1022 break;
1023 }
1024 }
1025 if (correct) {
1026 mClusterParam[hitId].attached++;
1027 } else {
1028 mClusterParam[hitId].fakeAttached++;
1029 }
1030 }
1031 }
1032 }
1033
1034 if (mTrackMCLabels[i].isFake()) {
1035 (GetMCTrackObj(mFakeTracks, label))++;
1036 } else if (tracksExternal || !track->MergedLooper()) {
1037 GetMCTrackObj(mRecTracks, label)++;
1038 if (mMCTrackMin == -1 || (label.getTrackID() >= mMCTrackMin && label.getTrackID() < mMCTrackMax)) {
1039 int32_t& revLabel = GetMCTrackObj(mTrackMCLabelsReverse, label);
1040 if (tracksExternal) {
1041#ifdef GPUCA_O2_LIB
1042 if (revLabel == -1 || fabsf((*tracksExternal)[i].getZ()) < fabsf((*tracksExternal)[revLabel].getZ())) {
1043 revLabel = i;
1044 }
1045#endif
1046 } else {
1047 const auto* trks = mTracking->mIOPtrs.mergedTracks;
1048 bool comp;
1049 if (revLabel == -1) {
1050 comp = true;
1051 } else {
1052 float shift1 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[i].CSide() * GPUChainTracking::NSECTORS / 2, trks[i].GetParam().GetTOffset());
1053 float shift2 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[revLabel].CSide() * GPUChainTracking::NSECTORS / 2, trks[revLabel].GetParam().GetTOffset());
1054 comp = fabsf(trks[i].GetParam().GetZ() + shift1) < fabsf(trks[revLabel].GetParam().GetZ() + shift2);
1055 }
1056 if (revLabel == -1 || !trks[revLabel].OK() || (trks[i].OK() && comp)) {
1057 revLabel = i;
1058 }
1059 }
1060 }
1061 }
1062 }
1063 if ((mQATasks & taskClusterAttach) && mTracking->mIOPtrs.mergedTrackHitAttachment) {
1064 // fill cluster adjacent status
1065 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1066 if (mClusterParam[i].attached == 0 && mClusterParam[i].fakeAttached == 0) {
1067 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1069 int32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1070 mcLabelI_t trackL = mTrackMCLabels[track];
1071 bool fake = true;
1072 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1073 // GPUInfo("Attach %x Track %d / %d:%d", attach, track, j, GetMCLabelID(i, j));
1074 if (trackL == GetMCLabel(i, j)) {
1075 fake = false;
1076 break;
1077 }
1078 }
1079 if (fake) {
1080 mClusterParam[i].fakeAdjacent++;
1081 } else {
1082 mClusterParam[i].adjacent++;
1083 }
1084 }
1085 }
1086 }
1087 }
1088
1089 if (mConfig.matchMCLabels.size()) {
1090 mGoodHits[mNEvents - 1].resize(GetNMCLabels());
1091 std::vector<bool> allowMCLabels(GetNMCTracks(0));
1092 for (uint32_t k = 0; k < GetNMCTracks(0); k++) {
1093 allowMCLabels[k] = false;
1094 }
1095 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1096 if (!mGoodTracks[mNEvents - 1][i]) {
1097 continue;
1098 }
1099 if (mConfig.matchDisplayMinPt > 0) {
1100 if (!mTrackMCLabels[i].isValid()) {
1101 continue;
1102 }
1103 const mcInfo_t& info = GetMCTrack(mTrackMCLabels[i]);
1104 if (info.pX * info.pX + info.pY * info.pY < mConfig.matchDisplayMinPt * mConfig.matchDisplayMinPt) {
1105 continue;
1106 }
1107 }
1108
1109 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
1110 for (uint32_t j = 0; j < track.NClusters(); j++) {
1111 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + j].num;
1112 if (GetMCLabelNID(hitId)) {
1113 int32_t mcID = GetMCLabelID(hitId, 0);
1114 if (mcID >= 0) {
1115 allowMCLabels[mcID] = true;
1116 }
1117 }
1118 }
1119 }
1120 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1121 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1122 int32_t mcID = GetMCLabelID(i, j);
1123 if (mcID >= 0 && allowMCLabels[mcID]) {
1124 mGoodHits[mNEvents - 1][i] = true;
1125 }
1126 }
1127 }
1128 }
1129 if (timer.IsRunning()) {
1130 GPUInfo("QA Time: Cluster attach status:\t\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1131 }
1132
1133 if (matchOnly) {
1134 return;
1135 }
1136
1137 // Recompute fNWeightCls (might have changed after merging events into timeframes)
1138 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1139 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1140 mMCParam[iCol][i].nWeightCls = 0.;
1141 }
1142 }
1143 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1144 float weightTotal = 0.f;
1145 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1146 if (GetMCLabelID(i, j) >= 0) {
1147 weightTotal += GetMCLabelWeight(i, j);
1148 }
1149 }
1150 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1151 if (GetMCLabelID(i, j) >= 0) {
1152 GetMCTrackObj(mMCParam, GetMCLabel(i, j)).nWeightCls += GetMCLabelWeight(i, j) / weightTotal;
1153 }
1154 }
1155 }
1156 if (timer.IsRunning()) {
1157 GPUInfo("QA Time: Compute cluster label weights:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1158 }
1159
1160 // Compute MC Track Parameters for MC Tracks
1161 tbb::parallel_for<uint32_t>(0, GetNMCCollissions(), [&](auto iCol) {
1162 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1163 const mcInfo_t& info = GetMCTrack(i, iCol);
1164 additionalMCParameters& mc2 = mMCParam[iCol][i];
1165 mc2.pt = std::sqrt(info.pX * info.pX + info.pY * info.pY);
1166 mc2.phi = M_PI + std::atan2(-info.pY, -info.pX);
1167 float p = info.pX * info.pX + info.pY * info.pY + info.pZ * info.pZ;
1168 if (p < 1e-18) {
1169 mc2.theta = mc2.eta = 0.f;
1170 } else {
1171 mc2.theta = info.pZ == 0 ? (M_PI / 2) : (std::acos(info.pZ / std::sqrt(p)));
1172 mc2.eta = -std::log(std::tan(0.5 * mc2.theta));
1173 }
1174 if (mConfig.writeMCLabels) {
1175 std::vector<int32_t>& effBuffer = mcEffBuffer[mNEvents - 1];
1176 effBuffer[i] = mRecTracks[iCol][i] * 1000 + mFakeTracks[iCol][i];
1177 }
1178 } // clang-format off
1179 }, tbb::simple_partitioner()); // clang-format on
1180 if (timer.IsRunning()) {
1181 GPUInfo("QA Time: Compute track mc parameters:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1182 }
1183
1184 // Fill Efficiency Histograms
1185 if (mQATasks & taskTrackingEff) {
1186 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1187 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1188 if ((mMCTrackMin != -1 && (int32_t)i < mMCTrackMin) || (mMCTrackMax != -1 && (int32_t)i >= mMCTrackMax)) {
1189 continue;
1190 }
1191 const mcInfo_t& info = GetMCTrack(i, iCol);
1192 const additionalMCParameters& mc2 = mMCParam[iCol][i];
1193 if (mc2.nWeightCls == 0.f) {
1194 continue;
1195 }
1196 const float& mcpt = mc2.pt;
1197 const float& mcphi = mc2.phi;
1198 const float& mceta = mc2.eta;
1199
1200 if (info.primDaughters) {
1201 continue;
1202 }
1203 if (mc2.nWeightCls < mConfig.minNClEff) {
1204 continue;
1205 }
1206 int32_t findable = mc2.nWeightCls >= mConfig.minNClFindable;
1207 if (info.pid < 0) {
1208 continue;
1209 }
1210 if (info.charge == 0.f) {
1211 continue;
1212 }
1213 if (mConfig.filterCharge && info.charge * mConfig.filterCharge < 0) {
1214 continue;
1215 }
1216 if (mConfig.filterPID >= 0 && info.pid != mConfig.filterPID) {
1217 continue;
1218 }
1219
1220 if (fabsf(mceta) > ETA_MAX || mcpt < PT_MIN || mcpt > PT_MAX) {
1221 continue;
1222 }
1223
1224 float alpha = std::atan2(info.y, info.x);
1225 alpha /= M_PI / 9.f;
1226 alpha = std::floor(alpha);
1227 alpha *= M_PI / 9.f;
1228 alpha += M_PI / 18.f;
1229
1230 float c = std::cos(alpha);
1231 float s = std::sin(alpha);
1232 float localY = -info.x * s + info.y * c;
1233
1234 if (mConfig.dumpToROOT) {
1235 static auto effdump = GPUROOTDump<TNtuple>::getNew("eff", "alpha:x:y:z:mcphi:mceta:mcpt:rec:fake:findable:prim:ncls");
1236 float localX = info.x * c + info.y * s;
1237 effdump.Fill(alpha, localX, localY, info.z, mcphi, mceta, mcpt, mRecTracks[iCol][i], mFakeTracks[iCol][i], findable, info.prim, mc2.nWeightCls);
1238 }
1239
1240 for (int32_t j = 0; j < 4; j++) {
1241 for (int32_t k = 0; k < 2; k++) {
1242 if (k == 0 && findable == 0) {
1243 continue;
1244 }
1245
1246 int32_t val = (j == 0) ? (mRecTracks[iCol][i] ? 1 : 0) : (j == 1) ? (mRecTracks[iCol][i] ? mRecTracks[iCol][i] - 1 : 0) : (j == 2) ? mFakeTracks[iCol][i] : 1;
1247 if (val == 0) {
1248 continue;
1249 }
1250
1251 for (int32_t l = 0; l < 5; l++) {
1252 if (info.prim && mcpt < PT_MIN_PRIM) {
1253 continue;
1254 }
1255 if (l != 3 && fabsf(mceta) > ETA_MAX2) {
1256 continue;
1257 }
1258 if (l < 4 && mcpt < 1.f / mConfig.qpt) {
1259 continue;
1260 }
1261
1262 float pos = l == 0 ? localY : l == 1 ? info.z : l == 2 ? mcphi : l == 3 ? mceta : mcpt;
1263
1264 mEff[j][k][!info.prim][l]->Fill(pos, val);
1265 }
1266 }
1267 }
1268 }
1269 }
1270 if (timer.IsRunning()) {
1271 GPUInfo("QA Time: Fill efficiency histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1272 }
1273 }
1274
1275 // Fill Resolution Histograms
1276 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
1277 GPUTPCGMPropagator prop;
1278 prop.SetMaxSinPhi(.999);
1279 prop.SetMaterialTPC();
1280 prop.SetPolynomialField(&mParam->polynomialField);
1281
1282 for (uint32_t i = 0; i < mTrackMCLabels.size(); i++) {
1283 if (mConfig.writeMCLabels) {
1284 std::vector<int32_t>& labelBuffer = mcLabelBuffer[mNEvents - 1];
1285 labelBuffer[i] = mTrackMCLabels[i].getTrackID();
1286 }
1287 if (mTrackMCLabels[i].isFake()) {
1288 continue;
1289 }
1290 const mcInfo_t& mc1 = GetMCTrack(mTrackMCLabels[i]);
1291 const additionalMCParameters& mc2 = GetMCTrackObj(mMCParam, mTrackMCLabels[i]);
1292
1293 if (mc1.primDaughters) {
1294 continue;
1295 }
1296 if (!tracksExternal) {
1297 if (!mTracking->mIOPtrs.mergedTracks[i].OK()) {
1298 continue;
1299 }
1300 if (mTracking->mIOPtrs.mergedTracks[i].MergedLooper()) {
1301 continue;
1302 }
1303 }
1304 if ((mMCTrackMin != -1 && mTrackMCLabels[i].getTrackID() < mMCTrackMin) || (mMCTrackMax != -1 && mTrackMCLabels[i].getTrackID() >= mMCTrackMax)) {
1305 continue;
1306 }
1307 if (fabsf(mc2.eta) > ETA_MAX || mc2.pt < PT_MIN || mc2.pt > PT_MAX) {
1308 continue;
1309 }
1310 if (mc1.charge == 0.f) {
1311 continue;
1312 }
1313 if (mc1.pid < 0) {
1314 continue;
1315 }
1316 if (mc1.t0 == -100.f) {
1317 continue;
1318 }
1319 if (mConfig.filterCharge && mc1.charge * mConfig.filterCharge < 0) {
1320 continue;
1321 }
1322 if (mConfig.filterPID >= 0 && mc1.pid != mConfig.filterPID) {
1323 continue;
1324 }
1325 if (mc2.nWeightCls < mConfig.minNClRes) {
1326 continue;
1327 }
1328 if (mConfig.resPrimaries == 1 && !mc1.prim) {
1329 continue;
1330 } else if (mConfig.resPrimaries == 2 && mc1.prim) {
1331 continue;
1332 }
1333 if (GetMCTrackObj(mTrackMCLabelsReverse, mTrackMCLabels[i]) != (int32_t)i) {
1334 continue;
1335 }
1336
1338 float alpha = 0.f;
1339 int32_t side;
1340 if (tracksExternal) {
1341#ifdef GPUCA_O2_LIB
1342 for (int32_t k = 0; k < 5; k++) {
1343 param.Par()[k] = (*tracksExternal)[i].getParams()[k];
1344 }
1345 for (int32_t k = 0; k < 15; k++) {
1346 param.Cov()[k] = (*tracksExternal)[i].getCov()[k];
1347 }
1348 param.X() = (*tracksExternal)[i].getX();
1349 param.TOffset() = (*tracksExternal)[i].getTime0();
1350 alpha = (*tracksExternal)[i].getAlpha();
1351 side = (*tracksExternal)[i].hasBothSidesClusters() ? 2 : ((*tracksExternal)[i].hasCSideClusters() ? 1 : 0);
1352#endif
1353 } else {
1354 param = mTracking->mIOPtrs.mergedTracks[i].GetParam();
1355 alpha = mTracking->mIOPtrs.mergedTracks[i].GetAlpha();
1356 side = mTracking->mIOPtrs.mergedTracks[i].CCE() ? 2 : (mTracking->mIOPtrs.mergedTracks[i].CSide() ? 1 : 0);
1357 }
1358
1359 float mclocal[4]; // Rotated x,y,Px,Py mc-coordinates - the MC data should be rotated since the track is propagated best along x
1360 float c = std::cos(alpha);
1361 float s = std::sin(alpha);
1362 float x = mc1.x;
1363 float y = mc1.y;
1364 mclocal[0] = x * c + y * s;
1365 mclocal[1] = -x * s + y * c;
1366 float px = mc1.pX;
1367 float py = mc1.pY;
1368 mclocal[2] = px * c + py * s;
1369 mclocal[3] = -px * s + py * c;
1370
1371 if (mclocal[0] < TRACK_EXPECTED_REFERENCE_X - 3) {
1372 continue;
1373 }
1374 if (mclocal[0] > param.GetX() + 20) {
1375 continue;
1376 }
1377 if (param.GetX() > mConfig.maxResX) {
1378 continue;
1379 }
1380
1381 auto getdz = [this, &param, &mc1, &side, tracksExternal]() {
1382 if (tracksExternal) {
1383 return param.GetZ();
1384 }
1385 if (!mParam->continuousMaxTimeBin) {
1386 return param.GetZ() - mc1.z;
1387 }
1388 float shift = side == 2 ? 0 : mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(side * GPUChainTracking::NSECTORS / 2, param.GetTOffset() - mc1.t0);
1389 return param.GetZ() + shift - mc1.z;
1390 };
1391
1392 prop.SetTrack(&param, alpha);
1393 bool inFlyDirection = 0;
1394 if (mConfig.strict) {
1395 const float dx = param.X() - std::max<float>(mclocal[0], TRACK_EXPECTED_REFERENCE_X_DEFAULT); // Limit distance check
1396 const float dy = param.Y() - mclocal[1];
1397 const float dz = getdz();
1398 if (dx * dx + dy * dy + dz * dz > 5.f * 5.f) {
1399 continue;
1400 }
1401 }
1402
1403 if (prop.PropagateToXAlpha(mclocal[0], alpha, inFlyDirection)) {
1404 continue;
1405 }
1406 if (fabsf(param.Y() - mclocal[1]) > (mConfig.strict ? 1.f : 4.f) || fabsf(getdz()) > (mConfig.strict ? 1.f : 4.f)) {
1407 continue;
1408 }
1409 float charge = mc1.charge > 0 ? 1.f : -1.f;
1410
1411 float deltaY = param.GetY() - mclocal[1];
1412 float deltaZ = getdz();
1413 float deltaPhiNative = param.GetSinPhi() - mclocal[3] / mc2.pt;
1414 float deltaPhi = std::asin(param.GetSinPhi()) - std::atan2(mclocal[3], mclocal[2]);
1415 float deltaLambdaNative = param.GetDzDs() - mc1.pZ / mc2.pt;
1416 float deltaLambda = std::atan(param.GetDzDs()) - std::atan2(mc1.pZ, mc2.pt);
1417 float deltaPtNative = (param.GetQPt() - charge / mc2.pt) * charge;
1418 float deltaPt = (fabsf(1.f / param.GetQPt()) - mc2.pt) / mc2.pt;
1419
1420 float paramval[5] = {mclocal[1], mc1.z, mc2.phi, mc2.eta, mc2.pt};
1421 float resval[5] = {deltaY, deltaZ, mConfig.nativeFitResolutions ? deltaPhiNative : deltaPhi, mConfig.nativeFitResolutions ? deltaLambdaNative : deltaLambda, mConfig.nativeFitResolutions ? deltaPtNative : deltaPt};
1422 float pullval[5] = {deltaY / std::sqrt(param.GetErr2Y()), deltaZ / std::sqrt(param.GetErr2Z()), deltaPhiNative / std::sqrt(param.GetErr2SinPhi()), deltaLambdaNative / std::sqrt(param.GetErr2DzDs()), deltaPtNative / std::sqrt(param.GetErr2QPt())};
1423
1424 for (int32_t j = 0; j < 5; j++) {
1425 for (int32_t k = 0; k < 5; k++) {
1426 if (k != 3 && fabsf(mc2.eta) > ETA_MAX2) {
1427 continue;
1428 }
1429 if (k < 4 && mc2.pt < 1.f / mConfig.qpt) {
1430 continue;
1431 }
1432 if (mQATasks & taskTrackingRes) {
1433 mRes2[j][k]->Fill(resval[j], paramval[k]);
1434 }
1435 if (mQATasks & taskTrackingResPull) {
1436 mPull2[j][k]->Fill(pullval[j], paramval[k]);
1437 }
1438 }
1439 }
1440 }
1441 if (timer.IsRunning()) {
1442 GPUInfo("QA Time: Fill resolution histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1443 }
1444 }
1445
1446 if (mQATasks & taskClusterAttach) {
1447 // Fill cluster histograms
1448 for (uint32_t iTrk = 0; iTrk < nReconstructedTracks; iTrk++) {
1449 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[iTrk];
1450 if (!track.OK()) {
1451 continue;
1452 }
1453 if (!mTrackMCLabels[iTrk].isValid()) {
1454 for (uint32_t k = 0; k < track.NClusters(); k++) {
1455 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1456 continue;
1457 }
1458 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1459 float totalWeight = 0.;
1460 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1461 if (GetMCLabelID(hitId, j) >= 0 && GetMCTrackObj(mMCParam, GetMCLabel(hitId, j)).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1462 totalWeight += GetMCLabelWeight(hitId, j);
1463 }
1464 }
1465 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1467 if (totalWeight > 0) {
1468 float weight = 1.f / (totalWeight * (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached));
1469 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1470 mcLabelI_t label = GetMCLabel(hitId, j);
1471 if (!label.isFake() && GetMCTrackObj(mMCParam, label).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1472 float pt = GetMCTrackObj(mMCParam, label).pt;
1473 if (pt < PT_MIN_CLUST) {
1474 pt = PT_MIN_CLUST;
1475 }
1476 mClusters[CL_fake]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1477 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1478 if (GetMCTrackObj(mRecTracks, label)) {
1479 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1480 }
1481 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1482 if (protect || physics) {
1483 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1484 }
1485 if (physics) {
1486 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1487 }
1488 }
1489 }
1490 } else {
1491 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1492 mClusters[CL_fake]->Fill(0.f, weight);
1493 mClusters[CL_att_adj]->Fill(0.f, weight);
1494 mClusters[CL_all]->Fill(0.f, weight);
1495 mClusterCounts.nUnaccessible += weight;
1496 if (protect || physics) {
1497 mClusters[CL_prot]->Fill(0.f, weight);
1498 }
1499 if (physics) {
1500 mClusters[CL_physics]->Fill(0.f, weight);
1501 }
1502 }
1503 }
1504 continue;
1505 }
1506 mcLabelI_t label = mTrackMCLabels[iTrk];
1507 if (mMCTrackMin != -1 && (label.getTrackID() < mMCTrackMin || label.getTrackID() >= mMCTrackMax)) {
1508 continue;
1509 }
1510 for (uint32_t k = 0; k < track.NClusters(); k++) {
1511 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1512 continue;
1513 }
1514 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1515 float pt = GetMCTrackObj(mMCParam, label).pt;
1516 if (pt < PT_MIN_CLUST) {
1517 pt = PT_MIN_CLUST;
1518 }
1519 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1520 bool correct = false;
1521 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1522 if (label == GetMCLabel(hitId, j)) {
1523 correct = true;
1524 break;
1525 }
1526 }
1527 if (correct) {
1528 mClusters[CL_attached]->Fill(pt, weight);
1529 mClusters[CL_tracks]->Fill(pt, weight);
1530 } else {
1531 mClusters[CL_fake]->Fill(pt, weight);
1532 }
1533 mClusters[CL_att_adj]->Fill(pt, weight);
1534 mClusters[CL_all]->Fill(pt, weight);
1535 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1537 if (protect || physics) {
1538 mClusters[CL_prot]->Fill(pt, weight);
1539 }
1540 if (physics) {
1541 mClusters[CL_physics]->Fill(pt, weight);
1542 }
1543 }
1544 }
1545 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1546 if ((mMCTrackMin != -1 && GetMCLabelID(i, 0) < mMCTrackMin) || (mMCTrackMax != -1 && GetMCLabelID(i, 0) >= mMCTrackMax)) {
1547 continue;
1548 }
1549 if (mClusterParam[i].attached || mClusterParam[i].fakeAttached) {
1550 continue;
1551 }
1552 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1554 if (mClusterParam[i].adjacent) {
1555 int32_t label = mTracking->mIOPtrs.mergedTrackHitAttachment[i] & gputpcgmmergertypes::attachTrackMask;
1556 if (!mTrackMCLabels[label].isValid()) {
1557 float totalWeight = 0.;
1558 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1559 mcLabelI_t labelT = GetMCLabel(i, j);
1560 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1561 totalWeight += GetMCLabelWeight(i, j);
1562 }
1563 }
1564 float weight = 1.f / totalWeight;
1565 if (totalWeight > 0) {
1566 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1567 mcLabelI_t labelT = GetMCLabel(i, j);
1568 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1569 float pt = GetMCTrackObj(mMCParam, labelT).pt;
1570 if (pt < PT_MIN_CLUST) {
1571 pt = PT_MIN_CLUST;
1572 }
1573 if (GetMCTrackObj(mRecTracks, labelT)) {
1574 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1575 }
1576 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1577 mClusters[CL_fakeAdj]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1578 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1579 if (protect || physics) {
1580 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1581 }
1582 if (physics) {
1583 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1584 }
1585 }
1586 }
1587 } else {
1588 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1589 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1590 mClusters[CL_all]->Fill(0.f, 1.f);
1591 mClusterCounts.nUnaccessible++;
1592 if (protect || physics) {
1593 mClusters[CL_prot]->Fill(0.f, 1.f);
1594 }
1595 if (physics) {
1596 mClusters[CL_physics]->Fill(0.f, 1.f);
1597 }
1598 }
1599 } else {
1600 float pt = GetMCTrackObj(mMCParam, mTrackMCLabels[label]).pt;
1601 if (pt < PT_MIN_CLUST) {
1602 pt = PT_MIN_CLUST;
1603 }
1604 mClusters[CL_att_adj]->Fill(pt, 1.f);
1605 mClusters[CL_tracks]->Fill(pt, 1.f);
1606 mClusters[CL_all]->Fill(pt, 1.f);
1607 if (protect || physics) {
1608 mClusters[CL_prot]->Fill(pt, 1.f);
1609 }
1610 if (physics) {
1611 mClusters[CL_physics]->Fill(pt, 1.f);
1612 }
1613 }
1614 } else {
1615 float totalWeight = 0.;
1616 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1617 mcLabelI_t labelT = GetMCLabel(i, j);
1618 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1619 totalWeight += GetMCLabelWeight(i, j);
1620 }
1621 }
1622 if (totalWeight > 0) {
1623 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1624 mcLabelI_t label = GetMCLabel(i, j);
1625 if (!label.isFake() && GetMCTrackObj(mMCParam, label).pt > GPUCA_MIN_TRACK_PTB5_DEFAULT) {
1626 float pt = GetMCTrackObj(mMCParam, label).pt;
1627 if (pt < PT_MIN_CLUST) {
1628 pt = PT_MIN_CLUST;
1629 }
1630 float weight = GetMCLabelWeight(i, j) / totalWeight;
1631 if (mClusterParam[i].fakeAdjacent) {
1632 mClusters[CL_fakeAdj]->Fill(pt, weight);
1633 }
1634 if (mClusterParam[i].fakeAdjacent) {
1635 mClusters[CL_att_adj]->Fill(pt, weight);
1636 }
1637 if (GetMCTrackObj(mRecTracks, label)) {
1638 mClusters[CL_tracks]->Fill(pt, weight);
1639 }
1640 mClusters[CL_all]->Fill(pt, weight);
1641 if (protect || physics) {
1642 mClusters[CL_prot]->Fill(pt, weight);
1643 }
1644 if (physics) {
1645 mClusters[CL_physics]->Fill(pt, weight);
1646 }
1647 }
1648 }
1649 } else {
1650 if (mClusterParam[i].fakeAdjacent) {
1651 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1652 }
1653 if (mClusterParam[i].fakeAdjacent) {
1654 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1655 }
1656 mClusters[CL_all]->Fill(0.f, 1.f);
1657 mClusterCounts.nUnaccessible++;
1658 if (protect || physics) {
1659 mClusters[CL_prot]->Fill(0.f, 1.f);
1660 }
1661 if (physics) {
1662 mClusters[CL_physics]->Fill(0.f, 1.f);
1663 }
1664 }
1665 }
1666 }
1667
1668 if (timer.IsRunning()) {
1669 GPUInfo("QA Time: Fill cluster histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1670 }
1671 }
1672 } else if (!mConfig.inputHistogramsOnly && !mConfig.noMC && (mQATasks & (taskTrackingEff | taskTrackingRes | taskTrackingResPull | taskClusterAttach))) {
1673 GPUWarning("No MC information available, only running partial TPC QA!");
1674 }
1675
1676 if (mQATasks & taskTrackStatistics) {
1677 // Fill track statistic histograms
1678 std::vector<std::array<float, 3>> clusterAttachCounts;
1679 if (mcAvail) {
1680 clusterAttachCounts.resize(GetNMCLabels(), {0.f, 0.f});
1681 }
1682 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1683 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
1684 if (!track.OK()) {
1685 continue;
1686 }
1687 mTracks->Fill(1.f / fabsf(track.GetParam().GetQPt()));
1688 mNCl[0]->Fill(track.NClustersFitted());
1689 uint32_t nClCorrected = 0;
1690 const auto& trackClusters = mTracking->mIOPtrs.mergedTrackHits;
1691 uint32_t jNext = 0;
1692 for (uint32_t j = 0; j < track.NClusters(); j = jNext) {
1693 uint32_t rowClCount = !(trackClusters[track.FirstClusterRef() + j].state & GPUTPCGMMergedTrackHit::flagReject);
1694 for (jNext = j + 1; j < track.NClusters(); jNext++) {
1695 if (trackClusters[track.FirstClusterRef() + j].sector != trackClusters[track.FirstClusterRef() + jNext].sector || trackClusters[track.FirstClusterRef() + j].row != trackClusters[track.FirstClusterRef() + jNext].row) {
1696 break;
1697 }
1698 rowClCount += !(trackClusters[track.FirstClusterRef() + jNext].state & GPUTPCGMMergedTrackHit::flagReject);
1699 }
1700 if (!track.MergedLooper() && rowClCount) {
1701 nClCorrected++;
1702 }
1703 if (mcAvail && rowClCount) {
1704 for (uint32_t k = j; k < jNext; k++) {
1705 const auto& cl = trackClusters[track.FirstClusterRef() + k];
1706 if (cl.state & GPUTPCGMMergedTrackHit::flagReject) {
1707 continue;
1708 }
1709 bool labelOk = false, labelOkNonFake = false;
1710 const mcLabelI_t& trkLabel = mTrackMCLabels[i];
1711 if (trkLabel.isValid() && !trkLabel.isNoise()) {
1712 for (int32_t l = 0; l < GetMCLabelNID(cl.num); l++) {
1713 const mcLabelI_t& clLabel = GetMCLabel(cl.num, l);
1714 if (clLabel.isValid() && !clLabel.isNoise() && CompareIgnoreFake(trkLabel, clLabel)) {
1715 labelOk = true;
1716 if (!trkLabel.isFake()) {
1717 labelOkNonFake = true;
1718 }
1719 break;
1720 }
1721 }
1722 }
1723 clusterAttachCounts[cl.num][0] += 1.0f;
1724 clusterAttachCounts[cl.num][1] += (float)labelOk / rowClCount;
1725 clusterAttachCounts[cl.num][2] += (float)labelOkNonFake / rowClCount;
1726 }
1727 }
1728 }
1729 if (nClCorrected) {
1730 mNCl[1]->Fill(nClCorrected);
1731 }
1732 mT0[0]->Fill(track.GetParam().GetTOffset());
1733 if (mTrackMCLabels.size() && !mTrackMCLabels[i].isFake() && !track.MergedLooper() && !track.CCE()) {
1734 const auto& info = GetMCTrack(mTrackMCLabels[i]);
1735 if (info.t0 != -100.f) {
1736 mT0[1]->Fill(track.GetParam().GetTOffset() - info.t0);
1737 }
1738 }
1739 }
1740 if (mClNative && mTracking && mTracking->GetTPCTransformHelper()) {
1741 for (uint32_t i = 0; i < GPUChainTracking::NSECTORS; i++) {
1742 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
1743 for (uint32_t k = 0; k < mClNative->nClusters[i][j]; k++) {
1744 const auto& cl = mClNative->clusters[i][j][k];
1745 float x, y, z;
1746 GPUTPCConvertImpl::convert(*mTracking->GetTPCTransformHelper()->getCorrMap(), mTracking->GetParam(), i, j, cl.getPad(), cl.getTime(), x, y, z);
1747 mTracking->GetParam().Sector2Global(i, x, y, z, &x, &y, &z);
1748 mClXY->Fill(x, y);
1749 }
1750 }
1751 }
1752 }
1753 if (mcAvail) {
1754 double clusterAttachNormalizedCount = 0, clusterAttachNormalizedCountNonFake = 0;
1755 for (uint32_t i = 0; i < clusterAttachCounts.size(); i++) {
1756 if (clusterAttachCounts[i][0]) {
1757 clusterAttachNormalizedCount += clusterAttachCounts[i][1] / clusterAttachCounts[i][0];
1758 clusterAttachNormalizedCountNonFake += clusterAttachCounts[i][2] / clusterAttachCounts[i][0];
1759 }
1760 }
1761 mClusterCounts.nCorrectlyAttachedNormalized = clusterAttachNormalizedCount;
1762 mClusterCounts.nCorrectlyAttachedNormalizedNonFake = clusterAttachNormalizedCountNonFake;
1763 clusterAttachCounts.clear();
1764 }
1765
1766 if (timer.IsRunning()) {
1767 GPUInfo("QA Time: Fill track statistics:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1768 }
1769 }
1770
1771 uint32_t nCl = clNative ? clNative->nClustersTotal : mTracking->GetProcessors()->tpcMerger.NMaxClusters();
1772 mClusterCounts.nTotal += nCl;
1773 if (mQATasks & taskClusterCounts) {
1774 for (uint32_t i = 0; i < nCl; i++) {
1775 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1777
1778 if (mcAvail) {
1779 float totalWeight = 0, weight400 = 0, weight40 = 0;
1780 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1781 const auto& label = GetMCLabel(i, j);
1782 if (GetMCLabelID(label) >= 0) {
1783 totalWeight += GetMCLabelWeight(label);
1784 if (GetMCTrackObj(mMCParam, label).pt >= 0.4) {
1785 weight400 += GetMCLabelWeight(label);
1786 }
1787 if (GetMCTrackObj(mMCParam, label).pt <= 0.04) {
1788 weight40 += GetMCLabelWeight(label);
1789 }
1790 }
1791 }
1792 if (totalWeight > 0 && 10.f * weight400 >= totalWeight) {
1793 if (!unattached && !protect && !physics) {
1794 mClusterCounts.nFakeRemove400++;
1795 int32_t totalFake = weight400 < 0.9f * totalWeight;
1796 if (totalFake) {
1797 mClusterCounts.nFullFakeRemove400++;
1798 }
1799 /*printf("Fake removal (%d): Hit %7d, attached %d lowPt %d looper %d tube200 %d highIncl %d tube %d bad %d recPt %7.2f recLabel %6d", totalFake, i, (int32_t) (mClusterParam[i].attached || mClusterParam[i].fakeAttached),
1800 (int32_t) lowPt, (int32_t) ((attach & gputpcgmmergertypes::attachGoodLeg) == 0), (int32_t) ((attach & gputpcgmmergertypes::attachTube) && mev200),
1801 (int32_t) ((attach & gputpcgmmergertypes::attachHighIncl) != 0), (int32_t) ((attach & gputpcgmmergertypes::attachTube) != 0), (int32_t) ((attach & gputpcgmmergertypes::attachGood) == 0),
1802 fabsf(qpt) > 0 ? 1.f / qpt : 0.f, id);
1803 for (int32_t j = 0;j < GetMCLabelNID(i);j++)
1804 {
1805 //if (GetMCLabelID(i, j) < 0) break;
1806 printf(" - label%d %6d weight %5d", j, GetMCLabelID(i, j), (int32_t) GetMCLabelWeight(i, j));
1807 if (GetMCLabelID(i, j) >= 0) printf(" - pt %7.2f", mMCParam[GetMCLabelID(i, j)].pt);
1808 else printf(" ");
1809 }
1810 printf("\n");*/
1811 }
1812 mClusterCounts.nAbove400++;
1813 }
1814 if (totalWeight > 0 && weight40 >= 0.9 * totalWeight) {
1815 mClusterCounts.nBelow40++;
1816 if (protect || physics) {
1817 mClusterCounts.nFakeProtect40++;
1818 }
1819 }
1820 }
1821 if (physics) {
1822 mClusterCounts.nPhysics++;
1823 }
1824 if (physics || protect) {
1825 mClusterCounts.nProt++;
1826 }
1827 if (unattached) {
1828 mClusterCounts.nUnattached++;
1829 }
1830 }
1831 }
1832
1833 // Process cluster count statistics
1834 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
1835 DoClusterCounts(nullptr);
1836 mClusterCounts = counts_t();
1837 }
1838
1839 if (timer.IsRunning()) {
1840 GPUInfo("QA Time: Cluster Counts:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1841 }
1842
1843 if (mConfig.dumpToROOT) {
1844 if (!clNative || !mTracking || !mTracking->mIOPtrs.mergedTrackHitAttachment || !mTracking->mIOPtrs.mergedTracks) {
1845 throw std::runtime_error("Cannot dump non o2::tpc::clusterNative clusters, need also hit attachmend and GPU tracks");
1846 }
1847 uint32_t clid = 0;
1848 for (uint32_t i = 0; i < GPUChainTracking::NSECTORS; i++) {
1849 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
1850 for (uint32_t k = 0; k < mClNative->nClusters[i][j]; k++) {
1851 const auto& cl = mClNative->clusters[i][j][k];
1852 uint32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[clid];
1853 float x = 0, y = 0, z = 0;
1855 uint32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1856 const auto& trk = mTracking->mIOPtrs.mergedTracks[track];
1857 mTracking->GetTPCTransformHelper()->Transform(i, j, cl.getPad(), cl.getTime(), x, y, z, trk.GetParam().GetTOffset());
1858 mTracking->GetParam().Sector2Global(i, x, y, z, &x, &y, &z);
1859 }
1860 uint32_t extState = mTracking->mIOPtrs.mergedTrackHitStates ? mTracking->mIOPtrs.mergedTrackHitStates[clid] : 0;
1861
1862 if (mConfig.dumpToROOT >= 2) {
1865 memset((void*)&trk, 0, sizeof(trk));
1866 memset((void*)&trkHit, 0, sizeof(trkHit));
1868 uint32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1869 trk = mTracking->mIOPtrs.mergedTracks[track];
1870 for (uint32_t l = 0; l < trk.NClusters(); l++) {
1871 const auto& tmp = mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + l];
1872 if (tmp.num == clid) {
1873 trkHit = tmp;
1874 break;
1875 }
1876 }
1877 }
1878 static auto cldump = GPUROOTDump<o2::tpc::ClusterNative, GPUTPCGMMergedTrack, GPUTPCGMMergedTrackHit, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew("cluster", "track", "trackHit", "attach", "extState", "x", "y", "z", "sector", "row", "nEv", "clusterTree");
1879 cldump.Fill(cl, trk, trkHit, attach, extState, x, y, z, i, j, mNEvents - 1);
1880 } else {
1881 static auto cldump = GPUROOTDump<o2::tpc::ClusterNative, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew("cluster", "attach", "extState", "x", "y", "z", "sector", "row", "nEv", "clusterTree");
1882 cldump.Fill(cl, attach, extState, x, y, z, i, j, mNEvents - 1);
1883 }
1884 clid++;
1885 }
1886 }
1887 }
1888
1889 static auto trkdump = GPUROOTDump<uint32_t, GPUTPCGMMergedTrack>::getNew("nEv", "track", "tracksTree");
1890 for (uint32_t i = 0; i < mTracking->mIOPtrs.nMergedTracks; i++) {
1891 if (mTracking->mIOPtrs.mergedTracks[i].OK()) {
1892 trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.mergedTracks[i]);
1893 }
1894 }
1895
1896 if (mTracking && mTracking->GetProcessingSettings().createO2Output) {
1897 static auto o2trkdump = GPUROOTDump<uint32_t, o2::tpc::TrackTPC>::getNew("nEv", "track", "tracksO2Tree");
1898 for (uint32_t i = 0; i < mTracking->mIOPtrs.nOutputTracksTPCO2; i++) {
1899 o2trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.outputTracksTPCO2[i]);
1900 }
1901 }
1902 }
1903
1904 if (mConfig.compareTrackStatus) {
1905#ifdef GPUCA_DETERMINISTIC_MODE
1906 if (!mTracking || !mTracking->GetProcessingSettings().deterministicGPUReconstruction)
1907#endif
1908 {
1909 throw std::runtime_error("Need deterministic processing to compare track status");
1910 }
1911 std::vector<uint8_t> status(mTracking->mIOPtrs.nMergedTracks);
1912 for (uint32_t i = 0; i < mTracking->mIOPtrs.nMergedTracks; i++) {
1913 const auto& trk = mTracking->mIOPtrs.mergedTracks[i];
1914 status[i] = trk.OK() && trk.NClusters() && trk.GetParam().GetNDF() > 0 && (mConfig.noMC || (mTrackMCLabels[i].isValid() && !mTrackMCLabels[i].isFake()));
1915 }
1916 if (mConfig.compareTrackStatus == 1) {
1917 std::ofstream("track.status", std::ios::binary).write((char*)status.data(), status.size() * sizeof(status[0]));
1918 } else if (mConfig.compareTrackStatus == 2) {
1919 std::ifstream f("track.status", std::ios::binary | std::ios::ate);
1920 std::vector<uint8_t> comp(f.tellg());
1921 f.seekg(0);
1922 f.read((char*)comp.data(), comp.size());
1923
1924 if (comp.size() != status.size()) {
1925 throw std::runtime_error("Number of tracks candidates in track fit in track.status and in current reconstruction differ");
1926 }
1927 std::vector<uint32_t> missing, missingComp;
1928 for (uint32_t i = 0; i < status.size(); i++) {
1929 if (status[i] && !comp[i]) {
1930 missingComp.emplace_back(i);
1931 }
1932 if (comp[i] && !status[i]) {
1933 missing.emplace_back(i);
1934 }
1935 }
1936 auto printer = [](std::vector<uint32_t> m, const char* name) {
1937 if (m.size()) {
1938 printf("Missing in %s reconstruction: (%zu)\n", name, m.size());
1939 for (uint32_t i = 0; i < m.size(); i++) {
1940 if (i) {
1941 printf(", ");
1942 }
1943 printf("%d", m[i]);
1944 }
1945 printf("\n");
1946 }
1947 };
1948 printer(missing, "current");
1949 printer(missingComp, "comparison");
1950 }
1951 }
1952
1953 mTrackingScratchBuffer.clear();
1954 mTrackingScratchBuffer.shrink_to_fit();
1955}
1956
1957void GPUQA::GetName(char* fname, int32_t k)
1958{
1959 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
1960 if (k || mConfig.inputHistogramsOnly || mConfig.name.size()) {
1961 if (!(mConfig.inputHistogramsOnly || k)) {
1962 snprintf(fname, 1024, "%s - ", mConfig.name.c_str());
1963 } else if (mConfig.compareInputNames.size() > (unsigned)(k - nNewInput)) {
1964 snprintf(fname, 1024, "%s - ", mConfig.compareInputNames[k - nNewInput].c_str());
1965 } else {
1966 strcpy(fname, mConfig.compareInputs[k - nNewInput].c_str());
1967 if (strlen(fname) > 5 && strcmp(fname + strlen(fname) - 5, ".root") == 0) {
1968 fname[strlen(fname) - 5] = 0;
1969 }
1970 strcat(fname, " - ");
1971 }
1972 } else {
1973 fname[0] = 0;
1974 }
1975}
1976
1977template <class T>
1978T* GPUQA::GetHist(T*& ee, std::vector<std::unique_ptr<TFile>>& tin, int32_t k, int32_t nNewInput)
1979{
1980 T* e = ee;
1981 if ((mConfig.inputHistogramsOnly || k) && (e = dynamic_cast<T*>(tin[k - nNewInput]->Get(e->GetName()))) == nullptr) {
1982 GPUWarning("Missing histogram in input %s: %s", mConfig.compareInputs[k - nNewInput].c_str(), ee->GetName());
1983 return (nullptr);
1984 }
1985 ee = e;
1986 return (e);
1987}
1988
1989void GPUQA::DrawQAHistogramsCleanup()
1990{
1991 clearGarbagageCollector();
1992}
1993
1994void GPUQA::resetHists()
1995{
1996 if (!mQAInitialized) {
1997 throw std::runtime_error("QA not initialized");
1998 }
1999 if (mHaveExternalHists) {
2000 throw std::runtime_error("Cannot reset external hists");
2001 }
2002 for (auto& h : *mHist1D) {
2003 h.Reset();
2004 }
2005 for (auto& h : *mHist2D) {
2006 h.Reset();
2007 }
2008 for (auto& h : *mHist1Dd) {
2009 h.Reset();
2010 }
2011 for (auto& h : *mHistGraph) {
2012 h = TGraphAsymmErrors();
2013 }
2014 mClusterCounts = counts_t();
2015}
2016
2017int32_t GPUQA::DrawQAHistograms(TObjArray* qcout)
2018{
2019 const auto oldRootIgnoreLevel = gErrorIgnoreLevel;
2020 gErrorIgnoreLevel = kWarning;
2021 if (!mQAInitialized) {
2022 throw std::runtime_error("QA not initialized");
2023 }
2024
2025 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
2026 printf("Creating QA Histograms\n");
2027 }
2028
2029 std::vector<Color_t> colorNums(COLORCOUNT);
2030 if (!qcout) {
2031 [[maybe_unused]] static int32_t initColorsInitialized = initColors();
2032 }
2033 for (int32_t i = 0; i < COLORCOUNT; i++) {
2034 colorNums[i] = qcout ? defaultColorNums[i] : mColors[i]->GetNumber();
2035 }
2036
2037 bool mcAvail = mcPresent();
2038 char name[2048], fname[1024];
2039
2040 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
2041 const int32_t ConfigNumInputs = nNewInput + mConfig.compareInputs.size();
2042
2043 std::vector<std::unique_ptr<TFile>> tin;
2044 for (uint32_t i = 0; i < mConfig.compareInputs.size(); i++) {
2045 tin.emplace_back(std::make_unique<TFile>(mConfig.compareInputs[i].c_str()));
2046 }
2047 std::unique_ptr<TFile> tout = nullptr;
2048 if (mConfig.output.size()) {
2049 tout = std::make_unique<TFile>(mConfig.output.c_str(), "RECREATE");
2050 }
2051
2052 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2053 float legendSpacingString = 0.025;
2054 for (int32_t i = 0; i < ConfigNumInputs; i++) {
2055 GetName(fname, i);
2056 if (strlen(fname) * 0.006 > legendSpacingString) {
2057 legendSpacingString = strlen(fname) * 0.006;
2058 }
2059 }
2060
2061 // Create Canvas / Pads for Efficiency Histograms
2062 if (mQATasks & taskTrackingEff) {
2063 for (int32_t ii = 0; ii < 6; ii++) {
2064 int32_t i = ii == 5 ? 4 : ii;
2065 snprintf(fname, 1024, "eff_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2066 snprintf(name, 2048, "Efficiency versus %s", VSPARAMETER_NAMES[i]);
2067 mCEff[ii] = createGarbageCollected<TCanvas>(fname, name, 0, 0, 700, 700. * 2. / 3.);
2068 mCEff[ii]->cd();
2069 float dy = 1. / 2.;
2070 mPEff[ii][0] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2071 mPEff[ii][0]->Draw();
2072 mPEff[ii][0]->SetRightMargin(0.04);
2073 mPEff[ii][1] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2074 mPEff[ii][1]->Draw();
2075 mPEff[ii][1]->SetRightMargin(0.04);
2076 mPEff[ii][2] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 0.5, dy * 2 - .001);
2077 mPEff[ii][2]->Draw();
2078 mPEff[ii][2]->SetRightMargin(0.04);
2079 mPEff[ii][3] = createGarbageCollected<TPad>("p3", "", 0.5, dy * 1, 1.0, dy * 2 - .001);
2080 mPEff[ii][3]->Draw();
2081 mPEff[ii][3]->SetRightMargin(0.04);
2082 mLEff[ii] = createGarbageCollected<TLegend>(0.92 - legendSpacingString * 1.45, 0.83 - (0.93 - 0.82) / 2. * (float)ConfigNumInputs, 0.98, 0.849);
2083 SetLegend(mLEff[ii]);
2084 }
2085 }
2086
2087 // Create Canvas / Pads for Resolution Histograms
2088 if (mQATasks & taskTrackingRes) {
2089 for (int32_t ii = 0; ii < 7; ii++) {
2090 int32_t i = ii == 5 ? 4 : ii;
2091 if (ii == 6) {
2092 snprintf(fname, 1024, "res_integral_layout");
2093 snprintf(name, 2048, "Integral Resolution");
2094 } else {
2095 snprintf(fname, 1024, "res_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2096 snprintf(name, 2048, "Resolution versus %s", VSPARAMETER_NAMES[i]);
2097 }
2098 mCRes[ii] = createGarbageCollected<TCanvas>(fname, name, 0, 0, 700, 700. * 2. / 3.);
2099 mCRes[ii]->cd();
2100 gStyle->SetOptFit(1);
2101
2102 float dy = 1. / 2.;
2103 mPRes[ii][3] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2104 mPRes[ii][3]->Draw();
2105 mPRes[ii][3]->SetRightMargin(0.04);
2106 mPRes[ii][4] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2107 mPRes[ii][4]->Draw();
2108 mPRes[ii][4]->SetRightMargin(0.04);
2109 mPRes[ii][0] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2110 mPRes[ii][0]->Draw();
2111 mPRes[ii][0]->SetRightMargin(0.04);
2112 mPRes[ii][0]->SetLeftMargin(0.15);
2113 mPRes[ii][1] = createGarbageCollected<TPad>("p3", "", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2114 mPRes[ii][1]->Draw();
2115 mPRes[ii][1]->SetRightMargin(0.04);
2116 mPRes[ii][1]->SetLeftMargin(0.135);
2117 mPRes[ii][2] = createGarbageCollected<TPad>("p4", "", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2118 mPRes[ii][2]->Draw();
2119 mPRes[ii][2]->SetRightMargin(0.06);
2120 mPRes[ii][2]->SetLeftMargin(0.135);
2121 if (ii < 6) {
2122 mLRes[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2123 SetLegend(mLRes[ii]);
2124 }
2125 }
2126 }
2127
2128 // Create Canvas / Pads for Pull Histograms
2129 if (mQATasks & taskTrackingResPull) {
2130 for (int32_t ii = 0; ii < 7; ii++) {
2131 int32_t i = ii == 5 ? 4 : ii;
2132
2133 if (ii == 6) {
2134 snprintf(fname, 1024, "pull_integral_layout");
2135 snprintf(name, 2048, "Integral Pull");
2136 } else {
2137 snprintf(fname, 1024, "pull_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2138 snprintf(name, 2048, "Pull versus %s", VSPARAMETER_NAMES[i]);
2139 }
2140 mCPull[ii] = createGarbageCollected<TCanvas>(fname, name, 0, 0, 700, 700. * 2. / 3.);
2141 mCPull[ii]->cd();
2142 gStyle->SetOptFit(1);
2143
2144 float dy = 1. / 2.;
2145 mPPull[ii][3] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2146 mPPull[ii][3]->Draw();
2147 mPPull[ii][3]->SetRightMargin(0.04);
2148 mPPull[ii][4] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2149 mPPull[ii][4]->Draw();
2150 mPPull[ii][4]->SetRightMargin(0.04);
2151 mPPull[ii][0] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2152 mPPull[ii][0]->Draw();
2153 mPPull[ii][0]->SetRightMargin(0.04);
2154 mPPull[ii][0]->SetLeftMargin(0.15);
2155 mPPull[ii][1] = createGarbageCollected<TPad>("p3", "", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2156 mPPull[ii][1]->Draw();
2157 mPPull[ii][1]->SetRightMargin(0.04);
2158 mPPull[ii][1]->SetLeftMargin(0.135);
2159 mPPull[ii][2] = createGarbageCollected<TPad>("p4", "", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2160 mPPull[ii][2]->Draw();
2161 mPPull[ii][2]->SetRightMargin(0.06);
2162 mPPull[ii][2]->SetLeftMargin(0.135);
2163 if (ii < 6) {
2164 mLPull[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2165 SetLegend(mLPull[ii]);
2166 }
2167 }
2168 }
2169
2170 // Create Canvas for Cluster Histos
2171 if (mQATasks & taskClusterAttach) {
2172 for (int32_t i = 0; i < 3; i++) {
2173 snprintf(fname, 1024, "clusters_%s_layout", CLUSTER_TYPES[i]);
2174 mCClust[i] = createGarbageCollected<TCanvas>(fname, CLUSTER_TITLES[i], 0, 0, 700, 700. * 2. / 3.);
2175 mCClust[i]->cd();
2176 mPClust[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2177 mPClust[i]->Draw();
2178 float y1 = i != 1 ? 0.77 : 0.27, y2 = i != 1 ? 0.9 : 0.42;
2179 mLClust[i] = createGarbageCollected<TLegend>(i == 2 ? 0.1 : (0.65 - legendSpacingString * 1.45), y2 - (y2 - y1) * (ConfigNumInputs + (i != 1) / 2.) + 0.005, i == 2 ? (0.3 + legendSpacingString * 1.45) : 0.9, y2);
2180 SetLegend(mLClust[i]);
2181 }
2182 }
2183
2184 // Create Canvas for track statistic histos
2185 if (mQATasks & taskTrackStatistics) {
2186 mCTracks = createGarbageCollected<TCanvas>("ctrackspt", "Track Pt", 0, 0, 700, 700. * 2. / 3.);
2187 mCTracks->cd();
2188 mPTracks = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2189 mPTracks->Draw();
2190 mLTracks = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2191 SetLegend(mLTracks);
2192
2193 for (int32_t i = 0; i < 2; i++) {
2194 snprintf(name, 2048, "ctrackst0%d", i);
2195 mCT0[i] = createGarbageCollected<TCanvas>(name, "Track T0", 0, 0, 700, 700. * 2. / 3.);
2196 mCT0[i]->cd();
2197 mPT0[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2198 mPT0[i]->Draw();
2199 mLT0[i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2200 SetLegend(mLT0[i]);
2201
2202 snprintf(name, 2048, "cncl%d", i);
2203 mCNCl[i] = createGarbageCollected<TCanvas>(name, i ? "Number of clusters (corrected for multiple per row)" : "Number of clusters per track", 0, 0, 700, 700. * 2. / 3.);
2204 mCNCl[i]->cd();
2205 mPNCl[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2206 mPNCl[i]->Draw();
2207 mLNCl[i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2208 SetLegend(mLNCl[i]);
2209 }
2210
2211 mCClXY = createGarbageCollected<TCanvas>("clxy", "Number of clusters per X / Y", 0, 0, 700, 700. * 2. / 3.);
2212 mCClXY->cd();
2213 mPClXY = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2214 mPClXY->Draw();
2215 }
2216 }
2217
2218 if (mConfig.enableLocalOutput && !mConfig.inputHistogramsOnly && (mQATasks & taskTrackingEff) && mcPresent()) {
2219 GPUInfo("QA Stats: Eff: Tracks Prim %d (Eta %d, Pt %d) %f%% (%f%%) Sec %d (Eta %d, Pt %d) %f%% (%f%%) - Res: Tracks %d (Eta %d, Pt %d)", (int32_t)mEff[3][1][0][0]->GetEntries(), (int32_t)mEff[3][1][0][3]->GetEntries(), (int32_t)mEff[3][1][0][4]->GetEntries(),
2220 mEff[0][0][0][0]->GetSumOfWeights() / std::max(1., mEff[3][0][0][0]->GetSumOfWeights()), mEff[0][1][0][0]->GetSumOfWeights() / std::max(1., mEff[3][1][0][0]->GetSumOfWeights()), (int32_t)mEff[3][1][1][0]->GetEntries(), (int32_t)mEff[3][1][1][3]->GetEntries(),
2221 (int32_t)mEff[3][1][1][4]->GetEntries(), mEff[0][0][1][0]->GetSumOfWeights() / std::max(1., mEff[3][0][1][0]->GetSumOfWeights()), mEff[0][1][1][0]->GetSumOfWeights() / std::max(1., mEff[3][1][1][0]->GetSumOfWeights()), (int32_t)mRes2[0][0]->GetEntries(),
2222 (int32_t)mRes2[0][3]->GetEntries(), (int32_t)mRes2[0][4]->GetEntries());
2223 }
2224
2225 int32_t flagShowVsPtLog = (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) ? 1 : 0;
2226
2227 if (mQATasks & taskTrackingEff) {
2228 // Process / Draw Efficiency Histograms
2229 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2230 int32_t i = ii == 5 ? 4 : ii;
2231 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2232 for (int32_t j = 0; j < 4; j++) {
2233 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2234 mPEff[ii][j]->cd();
2235 if (ii == 5) {
2236 mPEff[ii][j]->SetLogx();
2237 }
2238 }
2239 for (int32_t l = 0; l < 3; l++) {
2240 if (k == 0 && mConfig.inputHistogramsOnly == 0 && ii != 5) {
2241 if (l == 0) {
2242 // Divide eff, compute all for fake/clone
2243 auto oldLevel = gErrorIgnoreLevel;
2244 gErrorIgnoreLevel = kError;
2245 mEffResult[0][j / 2][j % 2][i]->Divide(mEff[l][j / 2][j % 2][i], mEff[3][j / 2][j % 2][i], "cl=0.683 b(1,1) mode");
2246 gErrorIgnoreLevel = oldLevel;
2247 mEff[3][j / 2][j % 2][i]->Reset(); // Sum up rec + clone + fake for fake rate
2248 mEff[3][j / 2][j % 2][i]->Add(mEff[0][j / 2][j % 2][i]);
2249 mEff[3][j / 2][j % 2][i]->Add(mEff[1][j / 2][j % 2][i]);
2250 mEff[3][j / 2][j % 2][i]->Add(mEff[2][j / 2][j % 2][i]);
2251 mEff[4][j / 2][j % 2][i]->Reset(); // Sum up rec + clone for clone rate
2252 mEff[4][j / 2][j % 2][i]->Add(mEff[0][j / 2][j % 2][i]);
2253 mEff[4][j / 2][j % 2][i]->Add(mEff[1][j / 2][j % 2][i]);
2254 } else {
2255 // Divide fake/clone
2256 auto oldLevel = gErrorIgnoreLevel;
2257 gErrorIgnoreLevel = kError;
2258 mEffResult[l][j / 2][j % 2][i]->Divide(mEff[l][j / 2][j % 2][i], mEff[l == 1 ? 4 : 3][j / 2][j % 2][i], "cl=0.683 b(1,1) mode");
2259 gErrorIgnoreLevel = oldLevel;
2260 }
2261 }
2262
2263 TGraphAsymmErrors* e = mEffResult[l][j / 2][j % 2][i];
2264
2265 if (!mConfig.inputHistogramsOnly && k == 0) {
2266 if (tout) {
2267 mEff[l][j / 2][j % 2][i]->Write();
2268 e->Write();
2269 if (l == 2) {
2270 mEff[3][j / 2][j % 2][i]->Write(); // Store also all histogram!
2271 mEff[4][j / 2][j % 2][i]->Write(); // Store also all histogram!
2272 }
2273 }
2274 } else if (GetHist(e, tin, k, nNewInput) == nullptr) {
2275 continue;
2276 }
2277 e->SetTitle(EFFICIENCY_TITLES[j]);
2278 e->GetYaxis()->SetTitle("(Efficiency)");
2279 e->GetXaxis()->SetTitle(XAXIS_TITLES[i]);
2280
2281 e->SetLineWidth(1);
2282 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2283 SetAxisSize(e);
2284 if (qcout && !mConfig.shipToQCAsCanvas) {
2285 qcout->Add(e);
2286 }
2287 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2288 continue;
2289 }
2290 e->SetMarkerColor(kBlack);
2291 e->SetLineColor(colorNums[(l == 2 ? (ConfigNumInputs * 2 + k) : (k * 2 + l)) % COLORCOUNT]);
2292 e->GetHistogram()->GetYaxis()->SetRangeUser(-0.02, 1.02);
2293 e->Draw(k || l ? "same P" : "AP");
2294 if (j == 0) {
2295 GetName(fname, k);
2296 snprintf(name, 2048, "%s%s", fname, EFF_NAMES[l]);
2297 mLEff[ii]->AddEntry(e, name, "l");
2298 }
2299 }
2300 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2301 continue;
2302 }
2303 mCEff[ii]->cd();
2304 ChangePadTitleSize(mPEff[ii][j], 0.056);
2305 }
2306 }
2307 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2308 continue;
2309 }
2310
2311 mLEff[ii]->Draw();
2312
2313 if (qcout) {
2314 qcout->Add(mCEff[ii]);
2315 }
2316 if (!mConfig.enableLocalOutput) {
2317 continue;
2318 }
2319 doPerfFigure(0.2, 0.295, 0.025);
2320 mCEff[ii]->Print(Form("plots/eff_vs_%s.pdf", VSPARAMETER_NAMES[ii]));
2321 if (mConfig.writeRootFiles) {
2322 mCEff[ii]->Print(Form("plots/eff_vs_%s.root", VSPARAMETER_NAMES[ii]));
2323 }
2324 }
2325 }
2326
2327 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
2328 // Process / Draw Resolution Histograms
2329 TH1D *resIntegral[5] = {}, *pullIntegral[5] = {};
2330 TCanvas* cfit = nullptr;
2331 std::unique_ptr<TF1> customGaus = std::make_unique<TF1>("G", "[0]*exp(-(x-[1])*(x-[1])/(2.*[2]*[2]))");
2332 for (int32_t p = 0; p < 2; p++) {
2333 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2334 continue;
2335 }
2336 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2337 TCanvas* can = p ? mCPull[ii] : mCRes[ii];
2338 TLegend* leg = p ? mLPull[ii] : mLRes[ii];
2339 int32_t i = ii == 5 ? 4 : ii;
2340 for (int32_t j = 0; j < 5; j++) {
2341 TH2F* src = p ? mPull2[j][i] : mRes2[j][i];
2342 TH1F** dst = p ? mPull[j][i] : mRes[j][i];
2343 TH1D*& dstIntegral = p ? pullIntegral[j] : resIntegral[j];
2344 TPad* pad = p ? mPPull[ii][j] : mPRes[ii][j];
2345
2346 if (!mConfig.inputHistogramsOnly && ii != 5) {
2347 if (cfit == nullptr) {
2348 cfit = createGarbageCollected<TCanvas>();
2349 }
2350 cfit->cd();
2351
2352 TAxis* axis = src->GetYaxis();
2353 int32_t nBins = axis->GetNbins();
2354 int32_t integ = 1;
2355 for (int32_t bin = 1; bin <= nBins; bin++) {
2356 int32_t bin0 = std::max(bin - integ, 0);
2357 int32_t bin1 = std::min(bin + integ, nBins);
2358 std::unique_ptr<TH1D> proj{src->ProjectionX("proj", bin0, bin1)};
2359 proj->ClearUnderflowAndOverflow();
2360 if (proj->GetEntries()) {
2361 uint32_t rebin = 1;
2362 while (proj->GetMaximum() < 50 && rebin < sizeof(RES_AXIS_BINS) / sizeof(RES_AXIS_BINS[0])) {
2363 proj->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2364 rebin++;
2365 }
2366
2367 if (proj->GetEntries() < 20 || proj->GetRMS() < 0.00001) {
2368 dst[0]->SetBinContent(bin, proj->GetRMS());
2369 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2370 dst[1]->SetBinContent(bin, proj->GetMean());
2371 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2372 } else {
2373 proj->GetXaxis()->SetRange(0, 0);
2374 proj->GetXaxis()->SetRangeUser(std::max(proj->GetXaxis()->GetXmin(), proj->GetMean() - 3. * proj->GetRMS()), std::min(proj->GetXaxis()->GetXmax(), proj->GetMean() + 3. * proj->GetRMS()));
2375 bool forceLogLike = proj->GetMaximum() < 20;
2376 for (int32_t k = forceLogLike ? 2 : 0; k < 3; k++) {
2377 proj->Fit("gaus", forceLogLike || k == 2 ? "sQl" : k ? "sQww" : "sQ");
2378 TF1* fitFunc = proj->GetFunction("gaus");
2379
2380 if (k && !forceLogLike) {
2381 customGaus->SetParameters(fitFunc->GetParameter(0), fitFunc->GetParameter(1), fitFunc->GetParameter(2));
2382 proj->Fit(customGaus.get(), "sQ");
2383 fitFunc = customGaus.get();
2384 }
2385
2386 const float sigma = fabs(fitFunc->GetParameter(2));
2387 dst[0]->SetBinContent(bin, sigma);
2388 dst[1]->SetBinContent(bin, fitFunc->GetParameter(1));
2389 dst[0]->SetBinError(bin, fitFunc->GetParError(2));
2390 dst[1]->SetBinError(bin, fitFunc->GetParError(1));
2391
2392 const bool fail1 = sigma <= 0.f;
2393 const bool fail2 = fabs(proj->GetMean() - dst[1]->GetBinContent(bin)) > std::min<float>(p ? PULL_AXIS : mConfig.nativeFitResolutions ? RES_AXES_NATIVE[j] : RES_AXES[j], 3.f * proj->GetRMS());
2394 const bool fail3 = dst[0]->GetBinContent(bin) > 3.f * proj->GetRMS() || dst[0]->GetBinError(bin) > 1 || dst[1]->GetBinError(bin) > 1;
2395 const bool fail4 = fitFunc->GetParameter(0) < proj->GetMaximum() / 5.;
2396 const bool fail = fail1 || fail2 || fail3 || fail4;
2397 // if (p == 0 && ii == 4 && j == 2) DrawHisto(proj, Form("Hist_bin_%d-%d_vs_%d____%d_%d___%f-%f___%f-%f___%d.pdf", p, j, ii, bin, k, dst[0]->GetBinContent(bin), proj->GetRMS(), dst[1]->GetBinContent(bin), proj->GetMean(), (int32_t) fail), "");
2398
2399 if (!fail) {
2400 break;
2401 } else if (k >= 2) {
2402 dst[0]->SetBinContent(bin, proj->GetRMS());
2403 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2404 dst[1]->SetBinContent(bin, proj->GetMean());
2405 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2406 }
2407 }
2408 }
2409 } else {
2410 dst[0]->SetBinContent(bin, 0.f);
2411 dst[0]->SetBinError(bin, 0.f);
2412 dst[1]->SetBinContent(bin, 0.f);
2413 dst[1]->SetBinError(bin, 0.f);
2414 }
2415 }
2416 if (ii == 0) {
2417 dstIntegral = src->ProjectionX(mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j], 0, nBins + 1);
2418 uint32_t rebin = 1;
2419 while (dstIntegral->GetMaximum() < 50 && rebin < sizeof(RES_AXIS_BINS) / sizeof(RES_AXIS_BINS[0])) {
2420 dstIntegral->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2421 rebin++;
2422 }
2423 }
2424 }
2425 if (ii == 0) {
2426 if (mConfig.inputHistogramsOnly) {
2427 dstIntegral = createGarbageCollected<TH1D>();
2428 }
2429 snprintf(fname, 1024, p ? "IntPull%s" : "IntRes%s", VSPARAMETER_NAMES[j]);
2430 snprintf(name, 2048, p ? "%s Pull" : "%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j]);
2431 dstIntegral->SetName(fname);
2432 dstIntegral->SetTitle(name);
2433 }
2434 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2435 pad->cd();
2436 }
2437 int32_t numColor = 0;
2438 float tmpMax = -1000.;
2439 float tmpMin = 1000.;
2440
2441 for (int32_t l = 0; l < 2; l++) {
2442 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2443 TH1F* e = dst[l];
2444 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2445 continue;
2446 }
2447 if (nNewInput && k == 0 && ii != 5) {
2448 if (p == 0) {
2449 e->Scale(mConfig.nativeFitResolutions ? SCALE_NATIVE[j] : SCALE[j]);
2450 }
2451 }
2452 if (ii == 4) {
2453 e->GetXaxis()->SetRangeUser(0.2, PT_MAX);
2454 } else if (LOG_PT_MIN > 0 && ii == 5) {
2455 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2456 } else if (ii == 5) {
2457 e->GetXaxis()->SetRange(1, 0);
2458 }
2459 e->SetMinimum(-1111);
2460 e->SetMaximum(-1111);
2461
2462 if (e->GetMaximum() > tmpMax) {
2463 tmpMax = e->GetMaximum();
2464 }
2465 if (e->GetMinimum() < tmpMin) {
2466 tmpMin = e->GetMinimum();
2467 }
2468 }
2469 }
2470
2471 float tmpSpan;
2472 tmpSpan = tmpMax - tmpMin;
2473 tmpMax += tmpSpan * .02;
2474 tmpMin -= tmpSpan * .02;
2475 if (j == 2 && i < 3) {
2476 tmpMax += tmpSpan * 0.13 * ConfigNumInputs;
2477 }
2478
2479 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2480 for (int32_t l = 0; l < 2; l++) {
2481 TH1F* e = dst[l];
2482 if (!mConfig.inputHistogramsOnly && k == 0) {
2483 snprintf(name, 2048, p ? "%s Pull" : "%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j]);
2484 e->SetTitle(name);
2485 e->SetStats(kFALSE);
2486 if (tout) {
2487 if (l == 0) {
2488 mRes2[j][i]->SetOption("colz");
2489 mRes2[j][i]->Write();
2490 }
2491 e->Write();
2492 }
2493 } else if (GetHist(e, tin, k, nNewInput) == nullptr) {
2494 continue;
2495 }
2496 e->SetMaximum(tmpMax);
2497 e->SetMinimum(tmpMin);
2498 e->SetLineWidth(1);
2499 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2500 SetAxisSize(e);
2501 e->GetYaxis()->SetTitle(p ? AXIS_TITLES_PULL[j] : mConfig.nativeFitResolutions ? AXIS_TITLES_NATIVE[j] : AXIS_TITLES[j]);
2502 e->GetXaxis()->SetTitle(XAXIS_TITLES[i]);
2503 if (LOG_PT_MIN > 0 && ii == 5) {
2504 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2505 }
2506
2507 if (j == 0) {
2508 e->GetYaxis()->SetTitleOffset(1.5);
2509 } else if (j < 3) {
2510 e->GetYaxis()->SetTitleOffset(1.4);
2511 }
2512 if (qcout && !mConfig.shipToQCAsCanvas) {
2513 qcout->Add(e);
2514 }
2515 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2516 continue;
2517 }
2518
2519 e->SetMarkerColor(kBlack);
2520 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2521 e->Draw(k || l ? "same" : "");
2522 if (j == 0) {
2523 GetName(fname, k);
2524 if (p) {
2525 snprintf(name, 2048, "%s%s", fname, l ? "Mean" : "Pull");
2526 } else {
2527 snprintf(name, 2048, "%s%s", fname, l ? "Mean" : "Resolution");
2528 }
2529 leg->AddEntry(e, name, "l");
2530 }
2531 }
2532 }
2533 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2534 continue;
2535 }
2536
2537 if (ii == 5) {
2538 pad->SetLogx();
2539 }
2540 can->cd();
2541 if (j == 4) {
2542 ChangePadTitleSize(pad, 0.056);
2543 }
2544 }
2545 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2546 continue;
2547 }
2548
2549 leg->Draw();
2550
2551 if (qcout) {
2552 qcout->Add(can);
2553 }
2554 if (!mConfig.enableLocalOutput) {
2555 continue;
2556 }
2557 doPerfFigure(0.2, 0.295, 0.025);
2558 can->Print(Form(p ? "plots/pull_vs_%s.pdf" : "plots/res_vs_%s.pdf", VSPARAMETER_NAMES[ii]));
2559 if (mConfig.writeRootFiles) {
2560 can->Print(Form(p ? "plots/pull_vs_%s.root" : "plots/res_vs_%s.root", VSPARAMETER_NAMES[ii]));
2561 }
2562 }
2563 }
2564
2565 // Process Integral Resolution Histogreams
2566 for (int32_t p = 0; p < 2; p++) {
2567 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2568 continue;
2569 }
2570 TCanvas* can = p ? mCPull[6] : mCRes[6];
2571 for (int32_t i = 0; i < 5; i++) {
2572 TPad* pad = p ? mPPull[6][i] : mPRes[6][i];
2573 TH1D* hist = p ? pullIntegral[i] : resIntegral[i];
2574 int32_t numColor = 0;
2575 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2576 pad->cd();
2577 }
2578 if (!mConfig.inputHistogramsOnly && mcAvail) {
2579 TH1D* e = hist;
2580 if (e && e->GetEntries()) {
2581 e->Fit("gaus", "sQ");
2582 }
2583 }
2584
2585 float tmpMax = 0;
2586 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2587 TH1D* e = hist;
2588 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2589 continue;
2590 }
2591 e->SetMaximum(-1111);
2592 if (e->GetMaximum() > tmpMax) {
2593 tmpMax = e->GetMaximum();
2594 }
2595 }
2596
2597 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2598 TH1D* e = hist;
2599 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2600 continue;
2601 }
2602 e->SetMaximum(tmpMax * 1.02);
2603 e->SetMinimum(tmpMax * -0.02);
2604 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2605 e->Write();
2606 }
2607 if (qcout && !mConfig.shipToQCAsCanvas) {
2608 qcout->Add(e);
2609 }
2610 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2611 continue;
2612 }
2613
2614 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2615 e->Draw(k == 0 ? "" : "same");
2616 }
2617 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2618 continue;
2619 }
2620 can->cd();
2621 }
2622 if (qcout) {
2623 qcout->Add(can);
2624 }
2625 if (!mConfig.enableLocalOutput) {
2626 continue;
2627 }
2628
2629 can->Print(p ? "plots/pull_integral.pdf" : "plots/res_integral.pdf");
2630 if (mConfig.writeRootFiles) {
2631 can->Print(p ? "plots/pull_integral.root" : "plots/res_integral.root");
2632 }
2633 }
2634 }
2635
2636 uint64_t attachClusterCounts[N_CLS_HIST];
2637 if (mQATasks & taskClusterAttach) {
2638 // Process Cluster Attachment Histograms
2639 if (mConfig.inputHistogramsOnly == 0) {
2640 for (int32_t i = N_CLS_HIST; i < N_CLS_TYPE * N_CLS_HIST - 1; i++) {
2641 mClusters[i]->Sumw2(true);
2642 }
2643 double totalVal = 0;
2644 if (!CLUST_HIST_INT_SUM) {
2645 for (int32_t j = 0; j < mClusters[N_CLS_HIST - 1]->GetXaxis()->GetNbins() + 2; j++) {
2646 totalVal += mClusters[N_CLS_HIST - 1]->GetBinContent(j);
2647 }
2648 }
2649 if (totalVal == 0.) {
2650 totalVal = 1.;
2651 }
2652 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2653 double val = 0;
2654 for (int32_t j = 0; j < mClusters[i]->GetXaxis()->GetNbins() + 2; j++) {
2655 val += mClusters[i]->GetBinContent(j);
2656 mClusters[2 * N_CLS_HIST - 1 + i]->SetBinContent(j, val / totalVal);
2657 }
2658 attachClusterCounts[i] = val;
2659 }
2660
2661 if (!CLUST_HIST_INT_SUM) {
2662 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2663 mClusters[2 * N_CLS_HIST - 1 + i]->SetMaximum(1.02);
2664 mClusters[2 * N_CLS_HIST - 1 + i]->SetMinimum(-0.02);
2665 }
2666 }
2667
2668 for (int32_t i = 0; i < N_CLS_HIST - 1; i++) {
2669 auto oldLevel = gErrorIgnoreLevel;
2670 gErrorIgnoreLevel = kError;
2671 mClusters[N_CLS_HIST + i]->Divide(mClusters[i], mClusters[N_CLS_HIST - 1], 1, 1, "B");
2672 gErrorIgnoreLevel = oldLevel;
2673 mClusters[N_CLS_HIST + i]->SetMinimum(-0.02);
2674 mClusters[N_CLS_HIST + i]->SetMaximum(1.02);
2675 }
2676 }
2677
2678 float tmpMax[2] = {0, 0}, tmpMin[2] = {0, 0};
2679 for (int32_t l = 0; l <= CLUST_HIST_INT_SUM; l++) {
2680 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2681 TH1* e = mClusters[l ? (N_CLS_TYPE * N_CLS_HIST - 2) : (N_CLS_HIST - 1)];
2682 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2683 continue;
2684 }
2685 e->SetMinimum(-1111);
2686 e->SetMaximum(-1111);
2687 if (l == 0) {
2688 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2689 }
2690 if (e->GetMaximum() > tmpMax[l]) {
2691 tmpMax[l] = e->GetMaximum();
2692 }
2693 if (e->GetMinimum() < tmpMin[l]) {
2694 tmpMin[l] = e->GetMinimum();
2695 }
2696 }
2697 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2698 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2699 TH1* e = mClusters[l ? (2 * N_CLS_HIST - 1 + i) : i];
2700 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2701 continue;
2702 }
2703 e->SetMaximum(tmpMax[l] * 1.02);
2704 e->SetMinimum(tmpMax[l] * -0.02);
2705 }
2706 }
2707 }
2708
2709 for (int32_t i = 0; i < N_CLS_TYPE; i++) {
2710 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2711 mPClust[i]->cd();
2712 mPClust[i]->SetLogx();
2713 }
2714 int32_t begin = i == 2 ? (2 * N_CLS_HIST - 1) : i == 1 ? N_CLS_HIST : 0;
2715 int32_t end = i == 2 ? (3 * N_CLS_HIST - 1) : i == 1 ? (2 * N_CLS_HIST - 1) : N_CLS_HIST;
2716 int32_t numColor = 0;
2717 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2718 for (int32_t j = end - 1; j >= begin; j--) {
2719 TH1* e = mClusters[j];
2720 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2721 continue;
2722 }
2723
2724 e->SetTitle(CLUSTER_TITLES[i]);
2725 e->GetYaxis()->SetTitle(i == 0 ? "Number of TPC clusters" : i == 1 ? "Fraction of TPC clusters" : CLUST_HIST_INT_SUM ? "Total TPC clusters (integrated)" : "Fraction of TPC clusters (integrated)");
2726 e->GetXaxis()->SetTitle("#it{p}_{Tmc} (GeV/#it{c})");
2727 e->GetXaxis()->SetTitleOffset(1.1);
2728 e->GetXaxis()->SetLabelOffset(-0.005);
2729 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2730 e->Write();
2731 }
2732 e->SetStats(kFALSE);
2733 e->SetLineWidth(1);
2734 e->SetLineStyle(CONFIG_DASHED_MARKERS ? j + 1 : 1);
2735 if (i == 0) {
2736 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2737 }
2738 if (qcout && !mConfig.shipToQCAsCanvas) {
2739 qcout->Add(e);
2740 }
2741 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2742 continue;
2743 }
2744
2745 e->SetMarkerColor(kBlack);
2746 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2747 e->Draw(j == end - 1 && k == 0 ? "" : "same");
2748 GetName(fname, k);
2749 snprintf(name, 2048, "%s%s", fname, CLUSTER_NAMES[j - begin]);
2750 mLClust[i]->AddEntry(e, name, "l");
2751 }
2752 }
2753 if (ConfigNumInputs == 1) {
2754 TH1* e = reinterpret_cast<TH1F*>(mClusters[begin + CL_att_adj]->Clone());
2755 e->Add(mClusters[begin + CL_prot], -1);
2756 if (qcout && !mConfig.shipToQCAsCanvas) {
2757 qcout->Add(e);
2758 }
2759 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2760 continue;
2761 }
2762
2763 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2764 e->Draw("same");
2765 mLClust[i]->AddEntry(e, "Removed (Strategy A)", "l");
2766 }
2767 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2768 continue;
2769 }
2770
2771 mLClust[i]->Draw();
2772
2773 if (qcout) {
2774 qcout->Add(mCClust[i]);
2775 }
2776 if (!mConfig.enableLocalOutput) {
2777 continue;
2778 }
2779 doPerfFigure(i != 2 ? 0.37 : 0.6, 0.295, 0.030);
2780 mCClust[i]->cd();
2781 mCClust[i]->Print(i == 2 ? "plots/clusters_integral.pdf" : i == 1 ? "plots/clusters_relative.pdf" : "plots/clusters.pdf");
2782 if (mConfig.writeRootFiles) {
2783 mCClust[i]->Print(i == 2 ? "plots/clusters_integral.root" : i == 1 ? "plots/clusters_relative.root" : "plots/clusters.root");
2784 }
2785 }
2786 }
2787
2788 // Process cluster count statistics
2789 if ((mQATasks & taskClusterCounts) && !mHaveExternalHists && !mConfig.clusterRejectionHistograms && !mConfig.inputHistogramsOnly) {
2790 DoClusterCounts(attachClusterCounts);
2791 }
2792 if ((qcout || tout) && (mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
2793 for (uint32_t i = 0; i < mHistClusterCount.size(); i++) {
2794 if (tout) {
2795 mHistClusterCount[i]->Write();
2796 }
2797 if (qcout) {
2798 qcout->Add(mHistClusterCount[i]);
2799 }
2800 }
2801 }
2802
2803 if (mQATasks & taskTrackStatistics) {
2804 // Process track statistic histograms
2805 float tmpMax = 0.;
2806 for (int32_t k = 0; k < ConfigNumInputs; k++) { // TODO: Simplify this drawing, avoid copy&paste
2807 TH1F* e = mTracks;
2808 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2809 continue;
2810 }
2811 e->SetMaximum(-1111);
2812 if (e->GetMaximum() > tmpMax) {
2813 tmpMax = e->GetMaximum();
2814 }
2815 }
2816 mPTracks->cd();
2817 mPTracks->SetLogx();
2818 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2819 TH1F* e = mTracks;
2820 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2821 continue;
2822 }
2823 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2824 e->Write();
2825 }
2826 e->SetMaximum(tmpMax * 1.02);
2827 e->SetMinimum(tmpMax * -0.02);
2828 e->SetStats(kFALSE);
2829 e->SetLineWidth(1);
2830 e->GetYaxis()->SetTitle("a.u.");
2831 e->GetXaxis()->SetTitle("#it{p}_{Tmc} (GeV/#it{c})");
2832 if (qcout) {
2833 qcout->Add(e);
2834 }
2835 e->SetMarkerColor(kBlack);
2836 e->SetLineColor(colorNums[k % COLORCOUNT]);
2837 e->Draw(k == 0 ? "" : "same");
2838 GetName(fname, k);
2839 snprintf(name, 2048, "%sTrack Pt", fname);
2840 mLTracks->AddEntry(e, name, "l");
2841 }
2842 mLTracks->Draw();
2843 mCTracks->cd();
2844 mCTracks->Print("plots/tracks.pdf");
2845 if (mConfig.writeRootFiles) {
2846 mCTracks->Print("plots/tracks.root");
2847 }
2848
2849 for (int32_t i = 0; i < 2; i++) {
2850 tmpMax = 0.;
2851 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2852 TH1F* e = mT0[i];
2853 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2854 continue;
2855 }
2856 e->SetMaximum(-1111);
2857 if (e->GetMaximum() > tmpMax) {
2858 tmpMax = e->GetMaximum();
2859 }
2860 }
2861 mPT0[i]->cd();
2862 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2863 TH1F* e = mT0[i];
2864 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2865 continue;
2866 }
2867 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2868 e->Write();
2869 }
2870 e->SetMaximum(tmpMax * 1.02);
2871 e->SetMinimum(tmpMax * -0.02);
2872 e->SetStats(kFALSE);
2873 e->SetLineWidth(1);
2874 e->GetYaxis()->SetTitle("a.u.");
2875 e->GetXaxis()->SetTitle(i ? "to vs t0_{mc}" : "t0");
2876 if (qcout) {
2877 qcout->Add(e);
2878 }
2879 e->SetMarkerColor(kBlack);
2880 e->SetLineColor(colorNums[k % COLORCOUNT]);
2881 e->Draw(k == 0 ? "" : "same");
2882 GetName(fname, k);
2883 snprintf(name, 2048, "%sTrack T0 %s", fname, i ? "" : "resolution");
2884 mLT0[i]->AddEntry(e, name, "l");
2885 }
2886 mLT0[i]->Draw();
2887 mCT0[i]->cd();
2888 snprintf(name, 2048, "plots/t0%s.pdf", i ? "_res" : "");
2889 mCT0[i]->Print(name);
2890 if (mConfig.writeRootFiles) {
2891 snprintf(name, 2048, "plots/t0%s.root", i ? "_res" : "");
2892 mCT0[i]->Print(name);
2893 }
2894
2895 tmpMax = 0.;
2896 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2897 TH1F* e = mNCl[i];
2898 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2899 continue;
2900 }
2901 e->SetMaximum(-1111);
2902 if (e->GetMaximum() > tmpMax) {
2903 tmpMax = e->GetMaximum();
2904 }
2905 }
2906 mPNCl[i]->cd();
2907 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2908 TH1F* e = mNCl[i];
2909 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2910 continue;
2911 }
2912 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2913 e->Write();
2914 }
2915 e->SetMaximum(tmpMax * 1.02);
2916 e->SetMinimum(tmpMax * -0.02);
2917 e->SetStats(kFALSE);
2918 e->SetLineWidth(1);
2919 e->GetYaxis()->SetTitle("a.u.");
2920 e->GetXaxis()->SetTitle("NClusters");
2921 if (qcout) {
2922 qcout->Add(e);
2923 }
2924 e->SetMarkerColor(kBlack);
2925 e->SetLineColor(colorNums[k % COLORCOUNT]);
2926 e->Draw(k == 0 ? "" : "same");
2927 GetName(fname, k);
2928 snprintf(name, 2048, "%sNClusters%d", fname, i);
2929 mLNCl[i]->AddEntry(e, name, "l");
2930 }
2931 mLNCl[i]->Draw();
2932 mCNCl[i]->cd();
2933 snprintf(name, 2048, "plots/nClusters%s.pdf", i ? "_corrected" : "");
2934 mCNCl[i]->Print(name);
2935 if (mConfig.writeRootFiles) {
2936 snprintf(name, 2048, "plots/nClusters%s.root", i ? "_corrected" : "");
2937 mCNCl[i]->Print(name);
2938 }
2939 }
2940
2941 mPClXY->cd();
2942 mClXY->SetOption("colz");
2943 mClXY->Draw();
2944 mCClXY->cd();
2945 mCClXY->Print("plots/clustersXY.pdf");
2946 if (mConfig.writeRootFiles) {
2947 mCClXY->Print("plots/clustersXY.root");
2948 }
2949 }
2950
2951 if (tout && !mConfig.inputHistogramsOnly && mConfig.writeMCLabels) {
2952 gInterpreter->GenerateDictionary("vector<vector<int32_t>>", "");
2953 tout->WriteObject(&mcEffBuffer, "mcEffBuffer");
2954 tout->WriteObject(&mcLabelBuffer, "mcLabelBuffer");
2955 remove("AutoDict_vector_vector_int__.cxx");
2956 remove("AutoDict_vector_vector_int___cxx_ACLiC_dict_rdict.pcm");
2957 remove("AutoDict_vector_vector_int___cxx.d");
2958 remove("AutoDict_vector_vector_int___cxx.so");
2959 }
2960
2961 if (tout) {
2962 tout->Close();
2963 }
2964 for (uint32_t i = 0; i < mConfig.compareInputs.size(); i++) {
2965 tin[i]->Close();
2966 }
2967 if (!qcout) {
2968 clearGarbagageCollector();
2969 }
2970 GPUInfo("GPU TPC QA histograms have been written to %s files", mConfig.writeRootFiles ? ".pdf and .root" : ".pdf");
2971 gErrorIgnoreLevel = oldRootIgnoreLevel;
2972 return (0);
2973}
2974
2975void GPUQA::PrintClusterCount(int32_t mode, int32_t& num, const char* name, uint64_t n, uint64_t normalization)
2976{
2977 if (mode == 2) {
2978 // do nothing, just count num
2979 } else if (mode == 1) {
2980 char name2[128];
2981 snprintf(name2, 128, "clusterCount%d_", num);
2982 char* ptr = name2 + strlen(name2);
2983 for (uint32_t i = 0; i < strlen(name); i++) {
2984 if ((name[i] >= 'a' && name[i] <= 'z') || (name[i] >= 'A' && name[i] <= 'Z') || (name[i] >= '0' && name[i] <= '9')) {
2985 *(ptr++) = name[i];
2986 }
2987 }
2988 *ptr = 0;
2989 createHist(mHistClusterCount[num], name2, name, 1000, 0, mConfig.histMaxNClusters, 1000, 0, 100);
2990 } else if (mode == 0) {
2991 if (normalization && mConfig.enableLocalOutput) {
2992 printf("\t%40s: %'12" PRIu64 " (%6.2f%%)\n", name, n, 100.f * n / normalization);
2993 }
2994 if (mConfig.clusterRejectionHistograms) {
2995 float ratio = 100.f * n / std::max<uint64_t>(normalization, 1);
2996 mHistClusterCount[num]->Fill(normalization, ratio, 1);
2997 }
2998 }
2999 num++;
3000}
3001
3002int32_t GPUQA::DoClusterCounts(uint64_t* attachClusterCounts, int32_t mode)
3003{
3004 int32_t num = 0;
3005 if (mcPresent() && (mQATasks & taskClusterAttach) && attachClusterCounts) {
3006 for (int32_t i = 0; i < N_CLS_HIST; i++) {
3007 PrintClusterCount(mode, num, CLUSTER_NAMES[i], attachClusterCounts[i], mClusterCounts.nTotal);
3008 }
3009 PrintClusterCount(mode, num, "Unattached", attachClusterCounts[N_CLS_HIST - 1] - attachClusterCounts[CL_att_adj], mClusterCounts.nTotal);
3010 PrintClusterCount(mode, num, "Removed (Strategy A)", attachClusterCounts[CL_att_adj] - attachClusterCounts[CL_prot], mClusterCounts.nTotal); // Attached + Adjacent (also fake) - protected
3011 PrintClusterCount(mode, num, "Unaccessible", mClusterCounts.nUnaccessible, mClusterCounts.nTotal); // No contribution from track >= 10 MeV, unattached or fake-attached/adjacent
3012 } else {
3013 PrintClusterCount(mode, num, "All Clusters", mClusterCounts.nTotal, mClusterCounts.nTotal);
3014 PrintClusterCount(mode, num, "Used in Physics", mClusterCounts.nPhysics, mClusterCounts.nTotal);
3015 PrintClusterCount(mode, num, "Protected", mClusterCounts.nProt, mClusterCounts.nTotal);
3016 PrintClusterCount(mode, num, "Unattached", mClusterCounts.nUnattached, mClusterCounts.nTotal);
3017 PrintClusterCount(mode, num, "Removed (Strategy A)", mClusterCounts.nTotal - mClusterCounts.nUnattached - mClusterCounts.nProt, mClusterCounts.nTotal);
3018 PrintClusterCount(mode, num, "Removed (Strategy B)", mClusterCounts.nTotal - mClusterCounts.nProt, mClusterCounts.nTotal);
3019 }
3020
3021 PrintClusterCount(mode, num, "Merged Loopers (Track Merging)", mClusterCounts.nMergedLooperConnected, mClusterCounts.nTotal);
3022 PrintClusterCount(mode, num, "Merged Loopers (Afterburner)", mClusterCounts.nMergedLooperUnconnected, mClusterCounts.nTotal);
3023 PrintClusterCount(mode, num, "Looping Legs (other)", mClusterCounts.nLoopers, mClusterCounts.nTotal);
3024 PrintClusterCount(mode, num, "High Inclination Angle", mClusterCounts.nHighIncl, mClusterCounts.nTotal);
3025 PrintClusterCount(mode, num, "Rejected", mClusterCounts.nRejected, mClusterCounts.nTotal);
3026 PrintClusterCount(mode, num, "Tube (> 200 MeV)", mClusterCounts.nTube, mClusterCounts.nTotal);
3027 PrintClusterCount(mode, num, "Tube (< 200 MeV)", mClusterCounts.nTube200, mClusterCounts.nTotal);
3028 PrintClusterCount(mode, num, "Low Pt < 50 MeV", mClusterCounts.nLowPt, mClusterCounts.nTotal);
3029 PrintClusterCount(mode, num, "Low Pt < 200 MeV", mClusterCounts.n200MeV, mClusterCounts.nTotal);
3030
3031 if (mcPresent() && (mQATasks & taskClusterAttach)) {
3032 PrintClusterCount(mode, num, "Tracks > 400 MeV", mClusterCounts.nAbove400, mClusterCounts.nTotal);
3033 PrintClusterCount(mode, num, "Fake Removed (> 400 MeV)", mClusterCounts.nFakeRemove400, mClusterCounts.nAbove400);
3034 PrintClusterCount(mode, num, "Full Fake Removed (> 400 MeV)", mClusterCounts.nFullFakeRemove400, mClusterCounts.nAbove400);
3035 PrintClusterCount(mode, num, "Tracks < 40 MeV", mClusterCounts.nBelow40, mClusterCounts.nTotal);
3036 PrintClusterCount(mode, num, "Fake Protect (< 40 MeV)", mClusterCounts.nFakeProtect40, mClusterCounts.nBelow40);
3037 }
3038 if (mcPresent() && (mQATasks & taskTrackStatistics)) {
3039 PrintClusterCount(mode, num, "Correctly Attached all-trk normalized", mClusterCounts.nCorrectlyAttachedNormalized, mClusterCounts.nTotal);
3040 PrintClusterCount(mode, num, "Correctly Attached non-fake normalized", mClusterCounts.nCorrectlyAttachedNormalizedNonFake, mClusterCounts.nTotal);
3041 }
3042 return num;
3043}
3044
3045void* GPUQA::AllocateScratchBuffer(size_t nBytes)
3046{
3047 mTrackingScratchBuffer.resize((nBytes + sizeof(mTrackingScratchBuffer[0]) - 1) / sizeof(mTrackingScratchBuffer[0]));
3048 return mTrackingScratchBuffer.data();
3049}
A const (ready only) version of MCTruthContainer.
Helper class to access correction maps.
int16_t charge
Definition RawEventData.h:5
int32_t i
#define TPC_MAX_TIME_BIN_TRIGGERED
#define GPUCA_MIN_TRACK_PTB5_DEFAULT
#define TRACK_EXPECTED_REFERENCE_X_DEFAULT
Definition GPUQA.cxx:213
#define TRACK_EXPECTED_REFERENCE_X
Definition GPUQA.cxx:268
#define QA_DEBUG
Definition GPUQA.cxx:15
#define QA_TIMING
Definition GPUQA.cxx:16
#define CHECK_CLUSTER_STATE_NOCOUNT()
Definition GPUQA.cxx:131
#define CHECK_CLUSTER_STATE()
Definition GPUQA.cxx:116
int16_t Color_t
Definition GPUQA.h:32
GPUChain * chain
#define GPUCA_ROW_COUNT
Definition of the MCTrack class.
Definition of the Names Generator class.
uint16_t pos
Definition RawData.h:3
uint32_t j
Definition RawData.h:0
uint32_t side
Definition RawData.h:0
uint16_t pid
Definition RawData.h:2
uint32_t c
Definition RawData.h:2
Definition of TPCFastTransform class.
TBranch * ptr
int nClusters
double num
Class for time synchronization of RawReader instances.
static constexpr ID TPC
Definition DetID.h:64
static constexpr int32_t NSECTORS
Definition GPUChain.h:58
int32_t ReadO2MCData(const char *filename)
Definition GPUQA.h:54
bool clusterRemovable(int32_t attach, bool prot) const
Definition GPUQA.h:52
~GPUQA()=default
Definition GPUQA.cxx:346
void * AllocateScratchBuffer(size_t nBytes)
Definition GPUQA.h:55
void SetMCTrackRange(int32_t min, int32_t max)
Definition GPUQA.h:47
mcLabelI_t GetMCTrackLabel(uint32_t trackId) const
Definition GPUQA.h:51
int32_t DrawQAHistograms()
Definition GPUQA.h:46
int32_t InitQA(int32_t tasks=0)
Definition GPUQA.h:44
void DumpO2MCData(const char *filename) const
Definition GPUQA.h:53
int32_t mcLabelI_t
Definition GPUQA.h:43
void RunQA(bool matchOnly=false)
Definition GPUQA.h:45
GPUQA(void *chain)
Definition GPUQA.h:41
static GPUROOTDump< T, Args... > getNew(const char *name1, Names... names)
Definition GPUROOTDump.h:64
static DigitizationContext * loadFromFile(std::string_view filename="")
GLdouble n
Definition glcorearb.h:1982
GLfloat GLfloat GLfloat alpha
Definition glcorearb.h:279
GLint GLenum GLint x
Definition glcorearb.h:403
const GLfloat * m
Definition glcorearb.h:4066
GLenum mode
Definition glcorearb.h:266
GLenum src
Definition glcorearb.h:1767
GLuint GLfloat GLfloat GLfloat GLfloat y1
Definition glcorearb.h:5034
GLsizeiptr size
Definition glcorearb.h:659
GLuint GLuint end
Definition glcorearb.h:469
const GLdouble * v
Definition glcorearb.h:832
GLuint const GLchar * name
Definition glcorearb.h:781
GLdouble f
Definition glcorearb.h:310
GLuint GLuint GLfloat weight
Definition glcorearb.h:5477
GLenum GLint * range
Definition glcorearb.h:1899
GLint y
Definition glcorearb.h:270
GLenum GLenum dst
Definition glcorearb.h:1767
GLboolean * data
Definition glcorearb.h:298
GLuint GLsizei const GLchar * label
Definition glcorearb.h:2519
GLuint GLfloat * val
Definition glcorearb.h:1582
GLenum GLfloat param
Definition glcorearb.h:271
GLuint id
Definition glcorearb.h:650
GLdouble GLdouble GLdouble z
Definition glcorearb.h:843
float float float y2
Definition MathUtils.h:44
constexpr int LHCBCPERTIMEBIN
Definition Constants.h:38
Enum< T >::Iterator begin(Enum< T >)
Definition Defs.h:173
value_T f3
Definition TrackUtils.h:93
value_T f1
Definition TrackUtils.h:91
value_T f2
Definition TrackUtils.h:92
struct o2::upgrades_utils::@454 tracks
structure to keep trigger-related info
Defining DataPointCompositeObject explicitly as copiable.
std::string filename()
bool isValid(std::string alias)
int64_t differenceInBC(const InteractionRecord &other) const
std::tuple< std::vector< std::unique_ptr< TCanvas > >, std::vector< std::unique_ptr< TLegend > >, std::vector< std::unique_ptr< TPad > >, std::vector< std::unique_ptr< TLatex > >, std::vector< std::unique_ptr< TH1D > > > v
Definition GPUQA.cxx:324
IR getFirstIRofTF(const IR &rec) const
get 1st IR of TF corresponding to the 1st sampled orbit (in MC)
Definition HBFUtils.h:71
IR getFirstIR() const
Definition HBFUtils.h:47
constexpr size_t min
constexpr size_t max
o2::InteractionRecord ir(0, 0)
vec clear()
o2::InteractionRecord ir0(3, 5)