Project
Loading...
Searching...
No Matches
GPUQA.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
15#define QA_DEBUG 0
16#define QA_TIMING 0
17
18#include "Rtypes.h" // Include ROOT header first, to use ROOT and disable replacements
19
20#include "TH1F.h"
21#include "TH2F.h"
22#include "TH1D.h"
23#include "TGraphAsymmErrors.h"
24#include "TCanvas.h"
25#include "TPad.h"
26#include "TLegend.h"
27#include "TColor.h"
28#include "TPaveText.h"
29#include "TF1.h"
30#include "TFile.h"
31#include "TTree.h"
32#include "TStyle.h"
33#include "TLatex.h"
34#include "TObjArray.h"
35#include <sys/stat.h>
36
37#include "GPUQA.h"
38#include "GPUTPCDef.h"
39#include "GPUTPCTrackingData.h"
40#include "GPUChainTracking.h"
41#include "GPUChainTrackingGetters.inc"
42#include "GPUTPCTrack.h"
43#include "GPUTPCTracker.h"
44#include "GPUTPCGMMergedTrack.h"
45#include "GPUTPCGMPropagator.h"
47#include "GPUTPCMCInfo.h"
48#include "GPUO2DataTypes.h"
49#include "GPUParam.inc"
51#include "GPUTPCConvertImpl.h"
52#include "TPCFastTransform.h"
54#include "GPUROOTDump.h"
57#include "GPUSettings.h"
58#include "GPUDefMacros.h"
59#ifdef GPUCA_O2_LIB
69#include "TPDGCode.h"
70#include "TParticlePDG.h"
71#include "TDatabasePDG.h"
72#endif
73#include "GPUQAHelper.h"
74#include <algorithm>
75#include <cstdio>
76#include <cinttypes>
77#include <fstream>
78
79#include "utils/timer.h"
80
81#include <oneapi/tbb.h>
82
83using namespace o2::gpu;
84
85namespace o2::gpu
86{
88 bool unattached = false;
89 float qpt = 0.f;
90 bool lowPt = false;
91 bool mev200 = false;
94 int32_t id = 0;
95 bool physics = false, protect = false;
96};
97} // namespace o2::gpu
98
99template <bool COUNT, class T>
100inline checkClusterStateResult GPUQA::checkClusterState(uint32_t attach, T* counts) const
101{
103 r.unattached = attach == 0;
105 if (!r.unattached && !(attach & gputpcgmmergertypes::attachProtect)) {
106 r.qpt = fabsf(mTracking->mIOPtrs.mergedTracks[r.id].GetParam().GetQPt());
107 r.lowPt = r.qpt * mTracking->GetParam().qptB5Scaler > mTracking->GetParam().rec.tpc.rejectQPtB5;
108 r.mev200 = r.qpt > 5;
109 r.mergedLooperUnconnected = mTracking->mIOPtrs.mergedTracks[r.id].MergedLooperUnconnected();
110 r.mergedLooperConnected = mTracking->mIOPtrs.mergedTracks[r.id].MergedLooperConnected();
111 }
112 if (r.mev200) {
113 if constexpr (COUNT) {
114 counts->n200MeV++;
115 }
116 }
117 if (r.lowPt) {
118 if constexpr (COUNT) {
119 counts->nLowPt++;
120 }
121 } else if (r.mergedLooperUnconnected) {
122 if constexpr (COUNT) {
123 counts->nMergedLooperUnconnected++;
124 }
125 } else if (r.mergedLooperConnected) {
126 if constexpr (COUNT) {
127 counts->nMergedLooperConnected++;
128 }
129 } else if (attach) {
130 r.protect = !GPUTPCClusterRejection::GetRejectionStatus<COUNT>(attach, r.physics, counts, &r.mev200) && ((attach & gputpcgmmergertypes::attachProtect) || !GPUTPCClusterRejection::IsTrackRejected(mTracking->mIOPtrs.mergedTracks[r.id], mTracking->GetParam()));
131 }
132 return r;
133}
134
135static const GPUSettingsQA& GPUQA_GetConfig(GPUChainTracking* chain)
136{
137 static GPUSettingsQA defaultConfig;
138 if (chain && chain->mConfigQA) {
139 return *chain->mConfigQA;
140 } else {
141 return defaultConfig;
142 }
143}
144
145static const constexpr float LOG_PT_MIN = -1.;
146
147static constexpr float Y_MAX = 40;
148static constexpr float Z_MAX = 100;
149static constexpr float PT_MIN = 0.01; // TODO: Take from Param
150static constexpr float PT_MIN_PRIM = 0.1;
151static constexpr float PT_MIN_CLUST = 0.01;
152static constexpr float PT_MAX = 20;
153static constexpr float ETA_MAX = 1.5;
154static constexpr float ETA_MAX2 = 0.9;
155static constexpr int32_t PADROW_CHECK_MINCLS = 50;
156
157static constexpr bool CLUST_HIST_INT_SUM = false;
158
159static constexpr const int32_t COLORCOUNT = 12;
160
161static const constexpr char* EFF_TYPES[6] = {"Rec", "Clone", "Fake", "All", "RecAndClone", "MC"};
162static const constexpr char* FINDABLE_NAMES[2] = {"All", "Findable"};
163static const constexpr char* PRIM_NAMES[2] = {"Prim", "Sec"};
164static const constexpr char* PARAMETER_NAMES[5] = {"Y", "Z", "#Phi", "#lambda", "Relative #it{p}_{T}"};
165static const constexpr char* PARAMETER_NAMES_NATIVE[5] = {"Y", "Z", "sin(#Phi)", "tan(#lambda)", "q/#it{p}_{T} (curvature)"};
166static const constexpr char* VSPARAMETER_NAMES[6] = {"Y", "Z", "Phi", "Eta", "Pt", "Pt_log"};
167static const constexpr char* EFF_NAMES[3] = {"Efficiency", "Clone Rate", "Fake Rate"};
168static const constexpr char* EFFICIENCY_TITLES[4] = {"Efficiency (Primary Tracks, Findable)", "Efficiency (Secondary Tracks, Findable)", "Efficiency (Primary Tracks)", "Efficiency (Secondary Tracks)"};
169static const constexpr double SCALE[5] = {10., 10., 1000., 1000., 100.};
170static const constexpr double SCALE_NATIVE[5] = {10., 10., 1000., 1000., 1.};
171static const constexpr char* XAXIS_TITLES[5] = {"#it{y}_{mc} (cm)", "#it{z}_{mc} (cm)", "#Phi_{mc} (rad)", "#eta_{mc}", "#it{p}_{Tmc} (GeV/#it{c})"};
172static const constexpr char* AXIS_TITLES[5] = {"#it{y}-#it{y}_{mc} (mm) (Resolution)", "#it{z}-#it{z}_{mc} (mm) (Resolution)", "#phi-#phi_{mc} (mrad) (Resolution)", "#lambda-#lambda_{mc} (mrad) (Resolution)", "(#it{p}_{T} - #it{p}_{Tmc}) / #it{p}_{Tmc} (%) (Resolution)"};
173static const constexpr char* AXIS_TITLES_NATIVE[5] = {"#it{y}-#it{y}_{mc} (mm) (Resolution)", "#it{z}-#it{z}_{mc} (mm) (Resolution)", "sin(#phi)-sin(#phi_{mc}) (Resolution)", "tan(#lambda)-tan(#lambda_{mc}) (Resolution)", "q*(q/#it{p}_{T} - q/#it{p}_{Tmc}) (Resolution)"};
174static const constexpr char* AXIS_TITLES_PULL[5] = {"#it{y}-#it{y}_{mc}/#sigma_{y} (Pull)", "#it{z}-#it{z}_{mc}/#sigma_{z} (Pull)", "sin(#phi)-sin(#phi_{mc})/#sigma_{sin(#phi)} (Pull)", "tan(#lambda)-tan(#lambda_{mc})/#sigma_{tan(#lambda)} (Pull)",
175 "q*(q/#it{p}_{T} - q/#it{p}_{Tmc})/#sigma_{q/#it{p}_{T}} (Pull)"};
176static const constexpr char* CLUSTER_NAMES[GPUQA::N_CLS_HIST] = {"Correctly attached clusters", "Fake attached clusters", "Attached + adjacent clusters", "Fake adjacent clusters", "Clusters of reconstructed tracks", "Used in Physics", "Protected", "All clusters"};
177static const constexpr char* CLUSTER_TITLES[GPUQA::N_CLS_TYPE] = {"Clusters Pt Distribution / Attachment", "Clusters Pt Distribution / Attachment (relative to all clusters)", "Clusters Pt Distribution / Attachment (integrated)"};
178static const constexpr char* CLUSTER_NAMES_SHORT[GPUQA::N_CLS_HIST] = {"Attached", "Fake", "AttachAdjacent", "FakeAdjacent", "FoundTracks", "Physics", "Protected", "All"};
179static const constexpr char* CLUSTER_TYPES[GPUQA::N_CLS_TYPE] = {"", "Ratio", "Integral"};
180static const constexpr char* REJECTED_NAMES[3] = {"All", "Rejected", "Fraction"};
181static const constexpr int32_t COLORS_HEX[COLORCOUNT] = {0xB03030, 0x00A000, 0x0000C0, 0x9400D3, 0x19BBBF, 0xF25900, 0x7F7F7F, 0xFFD700, 0x07F707, 0x07F7F7, 0xF08080, 0x000000};
182
183static const constexpr int32_t CONFIG_DASHED_MARKERS = 0;
184
185static const constexpr float AXES_MIN[5] = {-Y_MAX, -Z_MAX, 0.f, -ETA_MAX, PT_MIN};
186static const constexpr float AXES_MAX[5] = {Y_MAX, Z_MAX, 2.f * M_PI, ETA_MAX, PT_MAX};
187static const constexpr int32_t AXIS_BINS[5] = {51, 51, 144, 31, 50};
188static const constexpr int32_t RES_AXIS_BINS[] = {1017, 113}; // Consecutive bin sizes, histograms are binned down until the maximum entry is 50, each bin size should evenly divide its predecessor.
189static const constexpr float RES_AXES[5] = {1., 1., 0.03, 0.03, 1.0};
190static const constexpr float RES_AXES_NATIVE[5] = {1., 1., 0.1, 0.1, 5.0};
191static const constexpr float PULL_AXIS = 10.f;
192
193std::vector<TColor*> GPUQA::mColors;
194int32_t GPUQA::initColors()
195{
196 mColors.reserve(COLORCOUNT);
197 for (int32_t i = 0; i < COLORCOUNT; i++) {
198 float f1 = (float)((COLORS_HEX[i] >> 16) & 0xFF) / (float)0xFF;
199 float f2 = (float)((COLORS_HEX[i] >> 8) & 0xFF) / (float)0xFF;
200 float f3 = (float)((COLORS_HEX[i] >> 0) & 0xFF) / (float)0xFF;
201 mColors.emplace_back(new TColor(10000 + i, f1, f2, f3));
202 }
203 return 0;
204}
205static constexpr Color_t defaultColorNums[COLORCOUNT] = {kRed, kBlue, kGreen, kMagenta, kOrange, kAzure, kBlack, kYellow, kGray, kTeal, kSpring, kPink};
206
207#define TRACK_EXPECTED_REFERENCE_X_DEFAULT 81
208#ifdef GPUCA_TPC_GEOMETRY_O2
209static inline int32_t GPUQA_O2_ConvertFakeLabel(int32_t label) { return label >= 0x7FFFFFFE ? -1 : label; }
210inline uint32_t GPUQA::GetNMCCollissions() const { return mMCInfosCol.size(); }
211inline uint32_t GPUQA::GetNMCTracks(int32_t iCol) const { return mMCInfosCol[iCol].num; }
212inline uint32_t GPUQA::GetNMCTracks(const mcLabelI_t& label) const { return mMCInfosCol[mMCEventOffset[label.getSourceID()] + label.getEventID()].num; }
213inline uint32_t GPUQA::GetNMCLabels() const { return mClNative->clustersMCTruth ? mClNative->clustersMCTruth->getIndexedSize() : 0; }
214inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) { return mMCInfos[mMCInfosCol[iCol].first + iTrk]; }
215inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabel_t& label) { return mMCInfos[mMCInfosCol[mMCEventOffset[label.getSourceID()] + label.getEventID()].first + label.getTrackID()]; }
216inline GPUQA::mcLabels_t GPUQA::GetMCLabel(uint32_t i) { return mClNative->clustersMCTruth->getLabels(i); }
217inline int32_t GPUQA::GetMCLabelNID(const mcLabels_t& label) { return label.size(); }
218inline int32_t GPUQA::GetMCLabelNID(uint32_t i) { return mClNative->clustersMCTruth->getLabels(i).size(); }
219inline GPUQA::mcLabel_t GPUQA::GetMCLabel(uint32_t i, uint32_t j) { return mClNative->clustersMCTruth->getLabels(i)[j]; }
220inline int32_t GPUQA::GetMCLabelID(uint32_t i, uint32_t j) { return GPUQA_O2_ConvertFakeLabel(mClNative->clustersMCTruth->getLabels(i)[j].getTrackID()); }
221inline int32_t GPUQA::GetMCLabelID(const mcLabels_t& label, uint32_t j) { return GPUQA_O2_ConvertFakeLabel(label[j].getTrackID()); }
222inline int32_t GPUQA::GetMCLabelID(const mcLabel_t& label) { return GPUQA_O2_ConvertFakeLabel(label.getTrackID()); }
223inline uint32_t GPUQA::GetMCLabelCol(uint32_t i, uint32_t j) { return mMCEventOffset[mClNative->clustersMCTruth->getLabels(i)[j].getSourceID()] + mClNative->clustersMCTruth->getLabels(i)[j].getEventID(); }
224inline const auto& GPUQA::GetClusterLabels() { return mClNative->clustersMCTruth; }
225inline float GPUQA::GetMCLabelWeight(uint32_t i, uint32_t j) { return 1; }
226inline float GPUQA::GetMCLabelWeight(const mcLabels_t& label, uint32_t j) { return 1; }
227inline float GPUQA::GetMCLabelWeight(const mcLabel_t& label) { return 1; }
228inline bool GPUQA::mcPresent() { return !mConfig.noMC && mTracking && mClNative && mClNative->clustersMCTruth && mMCInfos.size(); }
229uint32_t GPUQA::GetMCLabelCol(const mcLabel_t& label) const { return !label.isValid() ? 0 : (mMCEventOffset[label.getSourceID()] + label.getEventID()); }
230GPUQA::mcLabelI_t GPUQA::GetMCTrackLabel(uint32_t trackId) const { return trackId >= mTrackMCLabels.size() ? MCCompLabel() : mTrackMCLabels[trackId]; }
231bool GPUQA::CompareIgnoreFake(const mcLabelI_t& l1, const mcLabelI_t& l2) { return l1.compare(l2) >= 0; }
232#define TRACK_EXPECTED_REFERENCE_X 78
233#else
234inline GPUQA::mcLabelI_t::mcLabelI_t(const GPUQA::mcLabel_t& l) : track(l.fMCID) {}
235inline bool GPUQA::mcLabelI_t::operator==(const GPUQA::mcLabel_t& l) { return AbsLabelID(track) == l.fMCID; }
236inline uint32_t GPUQA::GetNMCCollissions() const { return 1; }
237inline uint32_t GPUQA::GetNMCTracks(int32_t iCol) const { return mTracking->mIOPtrs.nMCInfosTPC; }
238inline uint32_t GPUQA::GetNMCTracks(const mcLabelI_t& label) const { return mTracking->mIOPtrs.nMCInfosTPC; }
239inline uint32_t GPUQA::GetNMCLabels() const { return mTracking->mIOPtrs.nMCLabelsTPC; }
240inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) { return mTracking->mIOPtrs.mcInfosTPC[AbsLabelID(iTrk)]; }
241inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabel_t& label) { return GetMCTrack(label.fMCID, 0); }
242inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabelI_t& label) { return GetMCTrack(label.track, 0); }
243inline const GPUQA::mcLabels_t& GPUQA::GetMCLabel(uint32_t i) { return mTracking->mIOPtrs.mcLabelsTPC[i]; }
244inline const GPUQA::mcLabel_t& GPUQA::GetMCLabel(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j]; }
245inline int32_t GPUQA::GetMCLabelNID(const mcLabels_t& label) { return 3; }
246inline int32_t GPUQA::GetMCLabelNID(uint32_t i) { return 3; }
247inline int32_t GPUQA::GetMCLabelID(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j].fMCID; }
248inline int32_t GPUQA::GetMCLabelID(const mcLabels_t& label, uint32_t j) { return label.fClusterID[j].fMCID; }
249inline int32_t GPUQA::GetMCLabelID(const mcLabel_t& label) { return label.fMCID; }
250inline uint32_t GPUQA::GetMCLabelCol(uint32_t i, uint32_t j) { return 0; }
251
252inline const auto& GPUQA::GetClusterLabels() { return mTracking->mIOPtrs.mcLabelsTPC; }
253inline float GPUQA::GetMCLabelWeight(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j].fWeight; }
254inline float GPUQA::GetMCLabelWeight(const mcLabels_t& label, uint32_t j) { return label.fClusterID[j].fWeight; }
255inline float GPUQA::GetMCLabelWeight(const mcLabel_t& label) { return label.fWeight; }
256inline int32_t GPUQA::FakeLabelID(int32_t id) { return id < 0 ? id : (-2 - id); }
257inline int32_t GPUQA::AbsLabelID(int32_t id) { return id >= 0 ? id : (-id - 2); }
258inline bool GPUQA::mcPresent() { return !mConfig.noMC && mTracking && GetNMCLabels() && GetNMCTracks(0); }
259uint32_t GPUQA::GetMCLabelCol(const mcLabel_t& label) const { return 0; }
260GPUQA::mcLabelI_t GPUQA::GetMCTrackLabel(uint32_t trackId) const { return trackId >= mTrackMCLabels.size() ? mcLabelI_t() : mTrackMCLabels[trackId]; }
261bool GPUQA::CompareIgnoreFake(const mcLabelI_t& l1, const mcLabelI_t& l2) { return AbsLabelID(l1) == AbsLabelID(l2); }
262#define TRACK_EXPECTED_REFERENCE_X TRACK_EXPECTED_REFERENCE_X_DEFAULT
263#endif
264template <class T>
265inline auto& GPUQA::GetMCTrackObj(T& obj, const GPUQA::mcLabelI_t& l)
266{
267 return obj[mMCEventOffset[l.getSourceID()] + l.getEventID()][l.getTrackID()];
268}
269
270template <>
271auto GPUQA::getHistArray<TH1F>()
272{
273 return std::make_pair(mHist1D, &mHist1D_pos);
274}
275template <>
276auto GPUQA::getHistArray<TH2F>()
277{
278 return std::make_pair(mHist2D, &mHist2D_pos);
279}
280template <>
281auto GPUQA::getHistArray<TH1D>()
282{
283 return std::make_pair(mHist1Dd, &mHist1Dd_pos);
284}
285template <>
286auto GPUQA::getHistArray<TGraphAsymmErrors>()
287{
288 return std::make_pair(mHistGraph, &mHistGraph_pos);
289}
290template <class T, typename... Args>
291void GPUQA::createHist(T*& h, const char* name, Args... args)
292{
293 const auto& p = getHistArray<T>();
294 if (mHaveExternalHists) {
295 if (p.first->size() <= p.second->size()) {
296 GPUError("Array sizes mismatch: Histograms %lu <= Positions %lu", p.first->size(), p.second->size());
297 throw std::runtime_error("Incoming histogram array incomplete");
298 }
299 if (strcmp((*p.first)[p.second->size()].GetName(), name)) {
300 GPUError("Histogram name mismatch: in array %s, trying to create %s", (*p.first)[p.second->size()].GetName(), name);
301 throw std::runtime_error("Incoming histogram has incorrect name");
302 }
303 } else {
304 if constexpr (std::is_same_v<T, TGraphAsymmErrors>) {
305 p.first->emplace_back();
306 p.first->back().SetName(name);
307 } else {
308 p.first->emplace_back(name, args...);
309 }
310 }
311 h = &((*p.first)[p.second->size()]);
312 p.second->emplace_back(&h);
313}
314
315namespace o2::gpu::internal
316{
318 std::tuple<std::vector<std::unique_ptr<TCanvas>>, std::vector<std::unique_ptr<TLegend>>, std::vector<std::unique_ptr<TPad>>, std::vector<std::unique_ptr<TLatex>>, std::vector<std::unique_ptr<TH1D>>> v;
319};
320} // namespace o2::gpu::internal
321
322template <class T, typename... Args>
323T* GPUQA::createGarbageCollected(Args... args)
324{
325 auto& v = std::get<std::vector<std::unique_ptr<T>>>(mGarbageCollector->v);
326 v.emplace_back(std::make_unique<T>(args...));
327 return v.back().get();
328}
329void GPUQA::clearGarbagageCollector()
330{
331 std::get<std::vector<std::unique_ptr<TPad>>>(mGarbageCollector->v).clear(); // Make sure to delete TPad first due to ROOT ownership (std::tuple has no defined order in its destructor)
332 std::apply([](auto&&... args) { ((args.clear()), ...); }, mGarbageCollector->v);
333}
334
335GPUQA::GPUQA(GPUChainTracking* chain, const GPUSettingsQA* config, const GPUParam* param) : mTracking(chain), mConfig(config ? *config : GPUQA_GetConfig(chain)), mParam(param ? param : &chain->GetParam()), mGarbageCollector(std::make_unique<internal::GPUQAGarbageCollection>())
336{
337 mMCEventOffset.resize(1, 0);
338}
339
341{
342 if (mQAInitialized && !mHaveExternalHists) {
343 delete mHist1D;
344 delete mHist2D;
345 delete mHist1Dd;
346 delete mHistGraph;
347 }
348 clearGarbagageCollector(); // Needed to guarantee correct order for ROOT ownership
349}
350
351bool GPUQA::clusterRemovable(int32_t attach, bool prot) const
352{
353 const auto& r = checkClusterState<false>(attach);
354 if (prot) {
355 return r.protect || r.physics;
356 }
357 return (!r.unattached && !r.physics && !r.protect);
358}
359
360template <class T>
361void GPUQA::SetAxisSize(T* e)
362{
363 e->GetYaxis()->SetTitleOffset(1.0);
364 e->GetYaxis()->SetTitleSize(0.045);
365 e->GetYaxis()->SetLabelSize(0.045);
366 e->GetXaxis()->SetTitleOffset(1.03);
367 e->GetXaxis()->SetTitleSize(0.045);
368 e->GetXaxis()->SetLabelOffset(-0.005);
369 e->GetXaxis()->SetLabelSize(0.045);
370}
371
372void GPUQA::SetLegend(TLegend* l, bool bigText)
373{
374 l->SetTextFont(72);
375 l->SetTextSize(bigText ? 0.03 : 0.016);
376 l->SetFillColor(0);
377}
378
379double* GPUQA::CreateLogAxis(int32_t nbins, float xmin, float xmax)
380{
381 float logxmin = std::log10(xmin);
382 float logxmax = std::log10(xmax);
383 float binwidth = (logxmax - logxmin) / nbins;
384
385 double* xbins = new double[nbins + 1];
386
387 xbins[0] = xmin;
388 for (int32_t i = 1; i <= nbins; i++) {
389 xbins[i] = std::pow(10, logxmin + i * binwidth);
390 }
391 return xbins;
392}
393
394void GPUQA::ChangePadTitleSize(TPad* p, float size)
395{
396 p->Update();
397 TPaveText* pt = (TPaveText*)(p->GetPrimitive("title"));
398 if (pt == nullptr) {
399 GPUError("Error changing title");
400 } else {
401 pt->SetTextSize(size);
402 p->Modified();
403 }
404}
405
406void GPUQA::DrawHisto(TH1* histo, char* filename, char* options)
407{
408 TCanvas tmp;
409 tmp.cd();
410 histo->Draw(options);
411 tmp.Print(filename);
412}
413
414void GPUQA::doPerfFigure(float x, float y, float size)
415{
416 if (mConfig.perfFigure == "") {
417 return;
418 }
419 static constexpr const char* str_perf_figure_1 = "ALICE Performance";
420 static constexpr const char* str_perf_figure_2_mc = "MC, Pb#minusPb, #sqrt{s_{NN}} = 5.36 TeV";
421 static constexpr const char* str_perf_figure_2_data = "Pb#minusPb, #sqrt{s_{NN}} = 5.36 TeV";
422 const char* str_perf_figure_2 = (mConfig.perfFigure == "mc" || mConfig.perfFigure == "MC") ? str_perf_figure_2_mc : (mConfig.perfFigure == "data" ? str_perf_figure_2_data : mConfig.perfFigure.c_str());
423
424 TLatex* t = createGarbageCollected<TLatex>(); // TODO: We could perhaps put everything in a legend, to get a white background if there is a grid
425 t->SetNDC(kTRUE);
426 t->SetTextColor(1);
427 t->SetTextSize(size);
428 t->DrawLatex(x, y, str_perf_figure_1);
429 t->SetTextSize(size * 0.8);
430 t->DrawLatex(x, y - 0.01 - size, str_perf_figure_2);
431}
432
433void GPUQA::SetMCTrackRange(int32_t min, int32_t max)
434{
435 mMCTrackMin = min;
436 mMCTrackMax = max;
437}
438
439int32_t GPUQA::InitQACreateHistograms()
440{
441 char name[2048], fname[1024];
442 if (mQATasks & taskTrackingEff) {
443 // Create Efficiency Histograms
444 for (int32_t i = 0; i < 6; i++) {
445 for (int32_t j = 0; j < 2; j++) {
446 for (int32_t k = 0; k < 2; k++) {
447 for (int32_t l = 0; l < 5; l++) {
448 snprintf(name, 2048, "%s%s%s%sVs%s", "tracks", EFF_TYPES[i], FINDABLE_NAMES[j], PRIM_NAMES[k], VSPARAMETER_NAMES[l]);
449 if (l == 4) {
450 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], k == 0 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
451 createHist(mEff[i][j][k][l], name, name, AXIS_BINS[l], binsPt.get());
452 } else {
453 createHist(mEff[i][j][k][l], name, name, AXIS_BINS[l], AXES_MIN[l], AXES_MAX[l]);
454 }
455 if (!mHaveExternalHists) {
456 mEff[i][j][k][l]->Sumw2();
457 }
458 strcat(name, "_eff");
459 if (i < 4) {
460 createHist(mEffResult[i][j][k][l], name);
461 }
462 }
463 }
464 }
465 }
466 }
467
468 // Create Resolution Histograms
469 if (mQATasks & taskTrackingRes) {
470 for (int32_t i = 0; i < 5; i++) {
471 for (int32_t j = 0; j < 5; j++) {
472 snprintf(name, 2048, "rms_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
473 snprintf(fname, 1024, "mean_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
474 if (j == 4) {
475 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
476 createHist(mRes[i][j][0], name, name, AXIS_BINS[j], binsPt.get());
477 createHist(mRes[i][j][1], fname, fname, AXIS_BINS[j], binsPt.get());
478 } else {
479 createHist(mRes[i][j][0], name, name, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
480 createHist(mRes[i][j][1], fname, fname, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
481 }
482 snprintf(name, 2048, "res_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
483 const float* axis = mConfig.nativeFitResolutions ? RES_AXES_NATIVE : RES_AXES;
484 const int32_t nbins = i == 4 && mConfig.nativeFitResolutions ? (10 * RES_AXIS_BINS[0]) : RES_AXIS_BINS[0];
485 if (j == 4) {
486 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
487 createHist(mRes2[i][j], name, name, nbins, -axis[i], axis[i], AXIS_BINS[j], binsPt.get());
488 } else {
489 createHist(mRes2[i][j], name, name, nbins, -axis[i], axis[i], AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
490 }
491 }
492 }
493 }
494
495 // Create Pull Histograms
496 if (mQATasks & taskTrackingResPull) {
497 for (int32_t i = 0; i < 5; i++) {
498 for (int32_t j = 0; j < 5; j++) {
499 snprintf(name, 2048, "pull_rms_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
500 snprintf(fname, 1024, "pull_mean_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
501 if (j == 4) {
502 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
503 createHist(mPull[i][j][0], name, name, AXIS_BINS[j], binsPt.get());
504 createHist(mPull[i][j][1], fname, fname, AXIS_BINS[j], binsPt.get());
505 } else {
506 createHist(mPull[i][j][0], name, name, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
507 createHist(mPull[i][j][1], fname, fname, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
508 }
509 snprintf(name, 2048, "pull_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
510 if (j == 4) {
511 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
512 createHist(mPull2[i][j], name, name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[j], binsPt.get());
513 } else {
514 createHist(mPull2[i][j], name, name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
515 }
516 }
517 }
518 }
519
520 // Create Cluster Histograms
521 if (mQATasks & taskClusterAttach) {
522 for (int32_t i = 0; i < N_CLS_TYPE * N_CLS_HIST - 1; i++) {
523 int32_t ioffset = i >= (2 * N_CLS_HIST - 1) ? (2 * N_CLS_HIST - 1) : i >= N_CLS_HIST ? N_CLS_HIST : 0;
524 int32_t itype = i >= (2 * N_CLS_HIST - 1) ? 2 : i >= N_CLS_HIST ? 1 : 0;
525 snprintf(name, 2048, "clusters%s%s", CLUSTER_NAMES_SHORT[i - ioffset], CLUSTER_TYPES[itype]);
526 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
527 createHist(mClusters[i], name, name, AXIS_BINS[4], binsPt.get());
528 }
529
530 createHist(mPadRow[0], "padrow0", "padrow0", GPUCA_ROW_COUNT - PADROW_CHECK_MINCLS, 0, GPUCA_ROW_COUNT - 1 - PADROW_CHECK_MINCLS, GPUCA_ROW_COUNT - PADROW_CHECK_MINCLS, 0, GPUCA_ROW_COUNT - 1 - PADROW_CHECK_MINCLS);
531 createHist(mPadRow[1], "padrow1", "padrow1", 100.f, -0.2f, 0.2f, GPUCA_ROW_COUNT - PADROW_CHECK_MINCLS, 0, GPUCA_ROW_COUNT - 1 - PADROW_CHECK_MINCLS);
532 createHist(mPadRow[2], "padrow2", "padrow2", 100.f, -0.2f, 0.2f, GPUCA_ROW_COUNT - PADROW_CHECK_MINCLS, 0, GPUCA_ROW_COUNT - 1 - PADROW_CHECK_MINCLS);
533 createHist(mPadRow[3], "padrow3", "padrow3", 100.f, 0, 300000, GPUCA_ROW_COUNT - PADROW_CHECK_MINCLS, 0, GPUCA_ROW_COUNT - 1 - PADROW_CHECK_MINCLS);
534 }
535
536 if (mQATasks & taskTrackStatistics) {
537 // Create Tracks Histograms
538 for (int32_t i = 0; i < 2; i++) {
539 snprintf(name, 2048, i ? "nrows_with_cluster" : "nclusters");
540 createHist(mNCl[i], name, name, 160, 0, 159);
541 }
542 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
543 createHist(mTrackPt, "tracks_pt", "tracks_pt", AXIS_BINS[4], binsPt.get());
544 const uint32_t maxTime = (mTracking && mTracking->GetParam().continuousMaxTimeBin > 0) ? mTracking->GetParam().continuousMaxTimeBin : TPC_MAX_TIME_BIN_TRIGGERED;
545 createHist(mT0[0], "tracks_t0", "tracks_t0", (maxTime + 1) / 10, 0, maxTime);
546 createHist(mT0[1], "tracks_t0_res", "tracks_t0_res", 1000, -100, 100);
547 createHist(mClXY, "clXY", "clXY", 1000, -250, 250, 1000, -250, 250); // TODO: Pass name only once
548 }
549 if (mQATasks & taskClusterRejection) {
550 const int padCount = GPUTPCGeometry::NPads(GPUCA_ROW_COUNT - 1);
551 for (int32_t i = 0; i < 3; i++) {
552 snprintf(name, 2048, "clrej_%d", i);
553 createHist(mClRej[i], name, name, 2 * padCount, -padCount / 2 + 0.5f, padCount / 2 - 0.5f, GPUCA_ROW_COUNT, 0, GPUCA_ROW_COUNT - 1);
554 }
555 createHist(mClRejP, "clrejp", "clrejp", GPUCA_ROW_COUNT, 0, GPUCA_ROW_COUNT - 1);
556 }
557
558 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
559 int32_t num = DoClusterCounts(nullptr, 2);
560 mHistClusterCount.resize(num);
561 DoClusterCounts(nullptr, 1);
562 }
563
564 for (uint32_t i = 0; i < mHist1D->size(); i++) {
565 *mHist1D_pos[i] = &(*mHist1D)[i];
566 }
567 for (uint32_t i = 0; i < mHist2D->size(); i++) {
568 *mHist2D_pos[i] = &(*mHist2D)[i];
569 }
570 for (uint32_t i = 0; i < mHist1Dd->size(); i++) {
571 *mHist1Dd_pos[i] = &(*mHist1Dd)[i];
572 }
573 for (uint32_t i = 0; i < mHistGraph->size(); i++) {
574 *mHistGraph_pos[i] = &(*mHistGraph)[i];
575 }
576
577 return 0;
578}
579
580int32_t GPUQA::loadHistograms(std::vector<TH1F>& i1, std::vector<TH2F>& i2, std::vector<TH1D>& i3, std::vector<TGraphAsymmErrors>& i4, int32_t tasks)
581{
582 if (tasks == tasksAutomatic) {
584 }
585 if (mQAInitialized && (!mHaveExternalHists || tasks != mQATasks)) {
586 throw std::runtime_error("QA not initialized or initialized with different task array");
587 }
588 mHist1D = &i1;
589 mHist2D = &i2;
590 mHist1Dd = &i3;
591 mHistGraph = &i4;
592 mHist1D_pos.clear();
593 mHist2D_pos.clear();
594 mHist1Dd_pos.clear();
595 mHistGraph_pos.clear();
596 mHaveExternalHists = true;
597 if (mConfig.noMC) {
598 tasks &= tasksAllNoQC;
599 }
600 mQATasks = tasks;
601 if (InitQACreateHistograms()) {
602 return 1;
603 }
604 mQAInitialized = true;
605 return 0;
606}
607
608void GPUQA::DumpO2MCData(const char* filename) const
609{
610 FILE* fp = fopen(filename, "w+b");
611 if (fp == nullptr) {
612 return;
613 }
614 uint32_t n = mMCInfos.size();
615 fwrite(&n, sizeof(n), 1, fp);
616 fwrite(mMCInfos.data(), sizeof(mMCInfos[0]), n, fp);
617 n = mMCInfosCol.size();
618 fwrite(&n, sizeof(n), 1, fp);
619 fwrite(mMCInfosCol.data(), sizeof(mMCInfosCol[0]), n, fp);
620 n = mMCEventOffset.size();
621 fwrite(&n, sizeof(n), 1, fp);
622 fwrite(mMCEventOffset.data(), sizeof(mMCEventOffset[0]), n, fp);
623 fclose(fp);
624}
625
626int32_t GPUQA::ReadO2MCData(const char* filename)
627{
628 FILE* fp = fopen(filename, "rb");
629 if (fp == nullptr) {
630 return 1;
631 }
632 uint32_t n;
633 uint32_t x;
634 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
635 fclose(fp);
636 return 1;
637 }
638 mMCInfos.resize(n);
639 if (fread(mMCInfos.data(), sizeof(mMCInfos[0]), n, fp) != n) {
640 fclose(fp);
641 return 1;
642 }
643 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
644 fclose(fp);
645 return 1;
646 }
647 mMCInfosCol.resize(n);
648 if (fread(mMCInfosCol.data(), sizeof(mMCInfosCol[0]), n, fp) != n) {
649 fclose(fp);
650 return 1;
651 }
652 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
653 fclose(fp);
654 return 1;
655 }
656 mMCEventOffset.resize(n);
657 if (fread(mMCEventOffset.data(), sizeof(mMCEventOffset[0]), n, fp) != n) {
658 fclose(fp);
659 return 1;
660 }
661 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
662 printf("Read %ld bytes MC Infos\n", ftell(fp));
663 }
664 fclose(fp);
665 if (mTracking) {
666 CopyO2MCtoIOPtr(&mTracking->mIOPtrs);
667 }
668 return 0;
669}
670
671void GPUQA::CopyO2MCtoIOPtr(GPUTrackingInOutPointers* ptr)
672{
673 ptr->mcInfosTPC = mMCInfos.data();
674 ptr->nMCInfosTPC = mMCInfos.size();
675 ptr->mcInfosTPCCol = mMCInfosCol.data();
676 ptr->nMCInfosTPCCol = mMCInfosCol.size();
677}
678
679void GPUQA::InitO2MCData(GPUTrackingInOutPointers* updateIOPtr)
680{
681#ifdef GPUCA_O2_LIB
682 if (!mO2MCDataLoaded) {
683 HighResTimer timer(mTracking && mTracking->GetProcessingSettings().debugLevel);
684 if (mTracking && mTracking->GetProcessingSettings().debugLevel) {
685 GPUInfo("Start reading O2 Track MC information");
686 }
687 static constexpr float PRIM_MAX_T = 0.01f;
688
689 o2::steer::MCKinematicsReader mcReader("collisioncontext.root");
690 std::vector<int32_t> refId;
691
692 auto dc = o2::steer::DigitizationContext::loadFromFile("collisioncontext.root");
693 const auto& evrec = dc->getEventRecords();
694 const auto& evparts = dc->getEventParts();
695 std::vector<std::vector<float>> evTimeBins(mcReader.getNSources());
696 for (uint32_t i = 0; i < evTimeBins.size(); i++) {
697 evTimeBins[i].resize(mcReader.getNEvents(i), -100.f);
698 }
699 for (uint32_t i = 0; i < evrec.size(); i++) {
700 const auto& ir = evrec[i];
701 for (uint32_t j = 0; j < evparts[i].size(); j++) {
702 const int iSim = evparts[i][j].sourceID;
703 const int iEv = evparts[i][j].entryID;
704 if (iSim == o2::steer::QEDSOURCEID || ir.differenceInBC(o2::raw::HBFUtils::Instance().getFirstIR()) >= 0) {
707 if (evTimeBins[iSim][iEv] >= 0) {
708 throw std::runtime_error("Multiple time bins for same MC collision found");
709 }
710 evTimeBins[iSim][iEv] = timebin;
711 }
712 }
713 }
714
715 uint32_t nSimSources = mcReader.getNSources();
716 mMCEventOffset.resize(nSimSources);
717 uint32_t nSimTotalEvents = 0;
718 uint32_t nSimTotalTracks = 0;
719 for (uint32_t i = 0; i < nSimSources; i++) {
720 mMCEventOffset[i] = nSimTotalEvents;
721 nSimTotalEvents += mcReader.getNEvents(i);
722 }
723
724 mMCInfosCol.resize(nSimTotalEvents);
725 for (int32_t iSim = 0; iSim < mcReader.getNSources(); iSim++) {
726 for (int32_t i = 0; i < mcReader.getNEvents(iSim); i++) {
727 const float timebin = evTimeBins[iSim][i];
728
729 const std::vector<o2::MCTrack>& tracks = mcReader.getTracks(iSim, i);
730 const std::vector<o2::TrackReference>& trackRefs = mcReader.getTrackRefsByEvent(iSim, i);
731
732 refId.resize(tracks.size());
733 std::fill(refId.begin(), refId.end(), -1);
734 for (uint32_t j = 0; j < trackRefs.size(); j++) {
735 if (trackRefs[j].getDetectorId() == o2::detectors::DetID::TPC) {
736 int32_t trkId = trackRefs[j].getTrackID();
737 if (refId[trkId] == -1) {
738 refId[trkId] = j;
739 }
740 }
741 }
742 mMCInfosCol[mMCEventOffset[iSim] + i].first = mMCInfos.size();
743 mMCInfosCol[mMCEventOffset[iSim] + i].num = tracks.size();
744 mMCInfos.resize(mMCInfos.size() + tracks.size());
745 for (uint32_t j = 0; j < tracks.size(); j++) {
746 auto& info = mMCInfos[mMCInfosCol[mMCEventOffset[iSim] + i].first + j];
747 const auto& trk = tracks[j];
748 TParticlePDG* particle = TDatabasePDG::Instance()->GetParticle(trk.GetPdgCode());
749 Int_t pid = -1;
750 if (abs(trk.GetPdgCode()) == kElectron) {
751 pid = 0;
752 }
753 if (abs(trk.GetPdgCode()) == kMuonMinus) {
754 pid = 1;
755 }
756 if (abs(trk.GetPdgCode()) == kPiPlus) {
757 pid = 2;
758 }
759 if (abs(trk.GetPdgCode()) == kKPlus) {
760 pid = 3;
761 }
762 if (abs(trk.GetPdgCode()) == kProton) {
763 pid = 4;
764 }
765
766 info.charge = particle ? particle->Charge() : 0;
767 info.prim = trk.T() < PRIM_MAX_T;
768 info.primDaughters = 0;
769 if (trk.getFirstDaughterTrackId() != -1) {
770 for (int32_t k = trk.getFirstDaughterTrackId(); k <= trk.getLastDaughterTrackId(); k++) {
771 if (tracks[k].T() < PRIM_MAX_T) {
772 info.primDaughters = 1;
773 break;
774 }
775 }
776 }
777 info.pid = pid;
778 info.t0 = timebin;
779 if (refId[j] >= 0) {
780 const auto& trkRef = trackRefs[refId[j]];
781 info.x = trkRef.X();
782 info.y = trkRef.Y();
783 info.z = trkRef.Z();
784 info.pX = trkRef.Px();
785 info.pY = trkRef.Py();
786 info.pZ = trkRef.Pz();
787 info.genRadius = std::sqrt(trk.GetStartVertexCoordinatesX() * trk.GetStartVertexCoordinatesX() + trk.GetStartVertexCoordinatesY() * trk.GetStartVertexCoordinatesY() + trk.GetStartVertexCoordinatesZ() * trk.GetStartVertexCoordinatesZ());
788 } else {
789 info.x = info.y = info.z = info.pX = info.pY = info.pZ = 0;
790 info.genRadius = 0;
791 }
792 }
793 }
794 }
795 if (timer.IsRunning()) {
796 GPUInfo("Finished reading O2 Track MC information (%f seconds)", timer.GetCurrentElapsedTime());
797 }
798 mO2MCDataLoaded = true;
799 }
800 if (updateIOPtr) {
801 CopyO2MCtoIOPtr(updateIOPtr);
802 }
803#endif
804}
805
806int32_t GPUQA::InitQA(int32_t tasks)
807{
808 if (mQAInitialized) {
809 throw std::runtime_error("QA already initialized");
810 }
811 if (tasks == tasksAutomatic) {
812 tasks = tasksDefault;
813 }
814
815 mHist1D = new std::vector<TH1F>;
816 mHist2D = new std::vector<TH2F>;
817 mHist1Dd = new std::vector<TH1D>;
818 mHistGraph = new std::vector<TGraphAsymmErrors>;
819 if (mConfig.noMC) {
820 tasks &= tasksAllNoQC;
821 }
822 mQATasks = tasks;
823
824 if (mTracking->GetProcessingSettings().qcRunFraction != 100.f && mQATasks != taskClusterCounts) {
825 throw std::runtime_error("QA with qcRunFraction only supported for taskClusterCounts");
826 }
827
828 if (mTracking) {
829 mClNative = mTracking->mIOPtrs.clustersNative;
830 }
831
832 if (InitQACreateHistograms()) {
833 return 1;
834 }
835
836 if (mConfig.enableLocalOutput) {
837 mkdir(mConfig.plotsDir.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
838 }
839
840#ifdef GPUCA_O2_LIB
841 if (!mConfig.noMC) {
842 InitO2MCData(mTracking ? &mTracking->mIOPtrs : nullptr);
843 }
844#endif
845
846 if (mConfig.matchMCLabels.size()) {
847 uint32_t nFiles = mConfig.matchMCLabels.size();
848 std::vector<std::unique_ptr<TFile>> files;
849 std::vector<std::vector<std::vector<int32_t>>*> labelsBuffer(nFiles);
850 std::vector<std::vector<std::vector<int32_t>>*> effBuffer(nFiles);
851 for (uint32_t i = 0; i < nFiles; i++) {
852 files.emplace_back(std::make_unique<TFile>(mConfig.matchMCLabels[i].c_str()));
853 labelsBuffer[i] = (std::vector<std::vector<int32_t>>*)files[i]->Get("mcLabelBuffer");
854 effBuffer[i] = (std::vector<std::vector<int32_t>>*)files[i]->Get("mcEffBuffer");
855 if (labelsBuffer[i] == nullptr || effBuffer[i] == nullptr) {
856 GPUError("Error opening / reading from labels file %u/%s: %p %p", i, mConfig.matchMCLabels[i].c_str(), (void*)labelsBuffer[i], (void*)effBuffer[i]);
857 exit(1);
858 }
859 }
860
861 mGoodTracks.resize(labelsBuffer[0]->size());
862 mGoodHits.resize(labelsBuffer[0]->size());
863 for (uint32_t iEvent = 0; iEvent < labelsBuffer[0]->size(); iEvent++) {
864 std::vector<bool> labelsOK((*effBuffer[0])[iEvent].size());
865 for (uint32_t k = 0; k < (*effBuffer[0])[iEvent].size(); k++) {
866 labelsOK[k] = false;
867 for (uint32_t l = 0; l < nFiles; l++) {
868 if ((*effBuffer[0])[iEvent][k] != (*effBuffer[l])[iEvent][k]) {
869 labelsOK[k] = true;
870 break;
871 }
872 }
873 }
874 mGoodTracks[iEvent].resize((*labelsBuffer[0])[iEvent].size());
875 for (uint32_t k = 0; k < (*labelsBuffer[0])[iEvent].size(); k++) {
876 if ((*labelsBuffer[0])[iEvent][k] == MC_LABEL_INVALID) {
877 continue;
878 }
879 mGoodTracks[iEvent][k] = labelsOK[abs((*labelsBuffer[0])[iEvent][k])];
880 }
881 }
882 }
883 mQAInitialized = true;
884 return 0;
885}
886
887void GPUQA::RunQA(bool matchOnly, const std::vector<o2::tpc::TrackTPC>* tracksExternal, const std::vector<o2::MCCompLabel>* tracksExtMC, const o2::tpc::ClusterNativeAccess* clNative)
888{
889 if (!mQAInitialized) {
890 throw std::runtime_error("QA not initialized");
891 }
892 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
893 GPUInfo("Running QA - Mask %d, Efficiency %d, Resolution %d, Pulls %d, Cluster Attachment %d, Track Statistics %d, Cluster Counts %d", mQATasks, (int32_t)(mQATasks & taskTrackingEff), (int32_t)(mQATasks & taskTrackingRes), (int32_t)(mQATasks & taskTrackingResPull), (int32_t)(mQATasks & taskClusterAttach), (int32_t)(mQATasks & taskTrackStatistics), (int32_t)(mQATasks & taskClusterCounts));
894 }
895 if (!clNative && mTracking) {
896 clNative = mTracking->mIOPtrs.clustersNative;
897 }
898 mClNative = clNative;
899
900#ifdef GPUCA_TPC_GEOMETRY_O2
901 uint32_t nSimEvents = GetNMCCollissions();
902 if (mTrackMCLabelsReverse.size() < nSimEvents) {
903 mTrackMCLabelsReverse.resize(nSimEvents);
904 }
905 if (mRecTracks.size() < nSimEvents) {
906 mRecTracks.resize(nSimEvents);
907 }
908 if (mFakeTracks.size() < nSimEvents) {
909 mFakeTracks.resize(nSimEvents);
910 }
911 if (mMCParam.size() < nSimEvents) {
912 mMCParam.resize(nSimEvents);
913 }
914#endif
915
916 // Initialize Arrays
917 uint32_t nReconstructedTracks = 0;
918 if (tracksExternal) {
919#ifdef GPUCA_O2_LIB
920 nReconstructedTracks = tracksExternal->size();
921#endif
922 } else {
923 nReconstructedTracks = mTracking->mIOPtrs.nMergedTracks;
924 }
925 mTrackMCLabels.resize(nReconstructedTracks);
926 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
927 mTrackMCLabelsReverse[iCol].resize(GetNMCTracks(iCol));
928 mRecTracks[iCol].resize(GetNMCTracks(iCol));
929 mFakeTracks[iCol].resize(GetNMCTracks(iCol));
930 mMCParam[iCol].resize(GetNMCTracks(iCol));
931 memset(mRecTracks[iCol].data(), 0, mRecTracks[iCol].size() * sizeof(mRecTracks[iCol][0]));
932 memset(mFakeTracks[iCol].data(), 0, mFakeTracks[iCol].size() * sizeof(mFakeTracks[iCol][0]));
933 for (size_t i = 0; i < mTrackMCLabelsReverse[iCol].size(); i++) {
934 mTrackMCLabelsReverse[iCol][i] = -1;
935 }
936 }
937 if (mQATasks & taskClusterAttach && GetNMCLabels()) {
938 mClusterParam.resize(GetNMCLabels());
939 memset(mClusterParam.data(), 0, mClusterParam.size() * sizeof(mClusterParam[0]));
940 }
941 HighResTimer timer(QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2));
942
943 mNEvents++;
944 if (mConfig.writeMCLabels) {
945 mcEffBuffer.resize(mNEvents);
946 mcLabelBuffer.resize(mNEvents);
947 mcEffBuffer[mNEvents - 1].resize(GetNMCTracks(0));
948 mcLabelBuffer[mNEvents - 1].resize(nReconstructedTracks);
949 }
950
951 bool mcAvail = mcPresent() || tracksExtMC;
952
953 if (mcAvail) { // Assign Track MC Labels
954 if (tracksExternal) {
955#ifdef GPUCA_O2_LIB
956 for (uint32_t i = 0; i < tracksExternal->size(); i++) {
957 mTrackMCLabels[i] = (*tracksExtMC)[i];
958 }
959#endif
960 } else {
961 tbb::parallel_for(tbb::blocked_range<uint32_t>(0, nReconstructedTracks, (QA_DEBUG == 0) ? 32 : nReconstructedTracks), [&](const tbb::blocked_range<uint32_t>& range) {
962 auto acc = GPUTPCTrkLbl<true, mcLabelI_t>(GetClusterLabels(), 1.f - mConfig.recThreshold);
963 for (auto i = range.begin(); i < range.end(); i++) {
964 acc.reset();
965 int32_t nClusters = 0;
966 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
967 std::vector<mcLabel_t> labels;
968 for (uint32_t k = 0; k < track.NClusters(); k++) {
969 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
970 continue;
971 }
972 nClusters++;
973 uint32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
974 if (hitId >= GetNMCLabels()) {
975 GPUError("Invalid hit id %u > %d (nClusters %d)", hitId, GetNMCLabels(), clNative ? clNative->nClustersTotal : 0);
976 throw std::runtime_error("qa error");
977 }
978 acc.addLabel(hitId);
979 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
980 if (GetMCLabelID(hitId, j) >= (int32_t)GetNMCTracks(GetMCLabelCol(hitId, j))) {
981 GPUError("Invalid label %d > %d (hit %d, label %d, col %d)", GetMCLabelID(hitId, j), GetNMCTracks(GetMCLabelCol(hitId, j)), hitId, j, (int32_t)GetMCLabelCol(hitId, j));
982 throw std::runtime_error("qa error");
983 }
984 if (GetMCLabelID(hitId, j) >= 0) {
985 if (QA_DEBUG >= 3 && track.OK()) {
986 GPUInfo("Track %d Cluster %u Label %d: %d (%f)", i, k, j, GetMCLabelID(hitId, j), GetMCLabelWeight(hitId, j));
987 }
988 }
989 }
990 }
991
992 float maxweight, sumweight;
993 int32_t maxcount;
994 auto maxLabel = acc.computeLabel(&maxweight, &sumweight, &maxcount);
995 mTrackMCLabels[i] = maxLabel;
996 if (QA_DEBUG && track.OK() && GetNMCTracks(maxLabel) > (uint32_t)maxLabel.getTrackID()) {
997 const mcInfo_t& mc = GetMCTrack(maxLabel);
998 GPUInfo("Track %d label %d (fake %d) weight %f clusters %d (fitted %d) (%f%% %f%%) Pt %f", i, maxLabel.getTrackID(), (int32_t)(maxLabel.isFake()), maxweight, nClusters, track.NClustersFitted(), 100.f * maxweight / sumweight, 100.f * (float)maxcount / (float)nClusters,
999 std::sqrt(mc.pX * mc.pX + mc.pY * mc.pY));
1000 }
1001 }
1002 });
1003 }
1004 if (timer.IsRunning()) {
1005 GPUInfo("QA Time: Assign Track Labels:\t\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1006 }
1007
1008 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1009 const GPUTPCGMMergedTrack* track = mTracking ? &mTracking->mIOPtrs.mergedTracks[i] : nullptr;
1010 mcLabelI_t label = mTrackMCLabels[i];
1011 if (mQATasks & taskClusterAttach) {
1012 // fill cluster attachment status
1013 if (!track->OK()) {
1014 continue;
1015 }
1016 if (!mTrackMCLabels[i].isValid()) {
1017 for (uint32_t k = 0; k < track->NClusters(); k++) {
1018 if (mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1019 continue;
1020 }
1021 mClusterParam[mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num].fakeAttached++;
1022 }
1023 continue;
1024 }
1025 if (mMCTrackMin == -1 || (label.getTrackID() >= mMCTrackMin && label.getTrackID() < mMCTrackMax)) {
1026 for (uint32_t k = 0; k < track->NClusters(); k++) {
1027 if (mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1028 continue;
1029 }
1030 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num;
1031 bool correct = false;
1032 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1033 if (label == GetMCLabel(hitId, j)) {
1034 correct = true;
1035 break;
1036 }
1037 }
1038 if (correct) {
1039 mClusterParam[hitId].attached++;
1040 } else {
1041 mClusterParam[hitId].fakeAttached++;
1042 }
1043 }
1044 }
1045 }
1046
1047 if (mTrackMCLabels[i].isFake()) {
1048 (GetMCTrackObj(mFakeTracks, label))++;
1049 } else if (tracksExternal || !track->MergedLooper()) {
1050 GetMCTrackObj(mRecTracks, label)++;
1051 if (mMCTrackMin == -1 || (label.getTrackID() >= mMCTrackMin && label.getTrackID() < mMCTrackMax)) {
1052 int32_t& revLabel = GetMCTrackObj(mTrackMCLabelsReverse, label);
1053 if (tracksExternal) {
1054#ifdef GPUCA_O2_LIB
1055 if (revLabel == -1 || fabsf((*tracksExternal)[i].getZ()) < fabsf((*tracksExternal)[revLabel].getZ())) {
1056 revLabel = i;
1057 }
1058#endif
1059 } else {
1060 const auto* trks = mTracking->mIOPtrs.mergedTracks;
1061 bool comp;
1062 if (revLabel == -1) {
1063 comp = true;
1064 } else {
1065 float shift1 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[i].CSide() * GPUChainTracking::NSECTORS / 2, trks[i].GetParam().GetTOffset());
1066 float shift2 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[revLabel].CSide() * GPUChainTracking::NSECTORS / 2, trks[revLabel].GetParam().GetTOffset());
1067 comp = fabsf(trks[i].GetParam().GetZ() + shift1) < fabsf(trks[revLabel].GetParam().GetZ() + shift2);
1068 }
1069 if (revLabel == -1 || !trks[revLabel].OK() || (trks[i].OK() && comp)) {
1070 revLabel = i;
1071 }
1072 }
1073 }
1074 }
1075 }
1076 if ((mQATasks & taskClusterAttach) && !tracksExternal) {
1077 std::vector<uint8_t> lowestPadRow(mTracking->mIOPtrs.nMergedTracks);
1078 // fill cluster adjacent status
1079 if (mTracking->mIOPtrs.mergedTrackHitAttachment) {
1080 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1081 if (mClusterParam[i].attached == 0 && mClusterParam[i].fakeAttached == 0) {
1082 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1084 int32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1085 mcLabelI_t trackL = mTrackMCLabels[track];
1086 bool fake = true;
1087 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1088 // GPUInfo("Attach %x Track %d / %d:%d", attach, track, j, GetMCLabelID(i, j));
1089 if (trackL == GetMCLabel(i, j)) {
1090 fake = false;
1091 break;
1092 }
1093 }
1094 if (fake) {
1095 mClusterParam[i].fakeAdjacent++;
1096 } else {
1097 mClusterParam[i].adjacent++;
1098 }
1099 }
1100 }
1101 }
1102 }
1103 if (mTracking->mIOPtrs.nMergedTracks && clNative) {
1104 std::fill(lowestPadRow.begin(), lowestPadRow.end(), 255);
1105 for (uint32_t iSector = 0; iSector < GPUCA_NSECTORS; iSector++) {
1106 for (uint32_t iRow = 0; iRow < GPUCA_ROW_COUNT; iRow++) {
1107 for (uint32_t iCl = 0; iCl < clNative->nClusters[iSector][iRow]; iCl++) {
1108 int32_t i = clNative->clusterOffset[iSector][iRow] + iCl;
1109 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1110 uint32_t trackId = GetMCTrackObj(mTrackMCLabelsReverse, GetMCLabel(i, j));
1111 if (trackId < lowestPadRow.size() && lowestPadRow[trackId] > iRow) {
1112 lowestPadRow[trackId] = iRow;
1113 }
1114 }
1115 }
1116 }
1117 }
1118 for (uint32_t i = 0; i < mTracking->mIOPtrs.nMergedTracks; i++) {
1119 const auto& trk = mTracking->mIOPtrs.mergedTracks[i];
1120 if (trk.OK() && lowestPadRow[i] != 255 && trk.NClustersFitted() >= PADROW_CHECK_MINCLS && CAMath::Abs(trk.GetParam().GetQPt()) < 1.0) {
1121 const auto& lowestCl = mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef()].row < mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + trk.NClusters() - 1].row ? mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef()] : mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + trk.NClusters() - 1];
1122 const int32_t lowestRow = lowestCl.row;
1123 mPadRow[0]->Fill(lowestPadRow[i], lowestRow, 1.f);
1124 mPadRow[1]->Fill(CAMath::ATan2(trk.GetParam().GetY(), trk.GetParam().GetX()), lowestRow, 1.f);
1125 if (lowestPadRow[i] < 10 && lowestRow > lowestPadRow[i] + 3) {
1126 const auto& cl = clNative->clustersLinear[lowestCl.num];
1127 float x, y, z;
1128 mTracking->GetTPCTransformHelper()->Transform(lowestCl.sector, lowestCl.row, cl.getPad(), cl.getTime(), x, y, z, trk.GetParam().GetTOffset());
1129 float phi = CAMath::ATan2(y, x);
1130 mPadRow[2]->Fill(phi, lowestRow, 1.f);
1131 if (CAMath::Abs(phi) < 0.15) {
1132 const float time = cl.getTime();
1133 mPadRow[3]->Fill(mTracking->GetParam().GetUnscaledMult(time), lowestRow, 1.f);
1134 }
1135 }
1136 }
1137 }
1138 }
1139 }
1140
1141 if (mConfig.matchMCLabels.size()) {
1142 mGoodHits[mNEvents - 1].resize(GetNMCLabels());
1143 std::vector<bool> allowMCLabels(GetNMCTracks(0));
1144 for (uint32_t k = 0; k < GetNMCTracks(0); k++) {
1145 allowMCLabels[k] = false;
1146 }
1147 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1148 if (!mGoodTracks[mNEvents - 1][i]) {
1149 continue;
1150 }
1151 if (mConfig.matchDisplayMinPt > 0) {
1152 if (!mTrackMCLabels[i].isValid()) {
1153 continue;
1154 }
1155 const mcInfo_t& info = GetMCTrack(mTrackMCLabels[i]);
1156 if (info.pX * info.pX + info.pY * info.pY < mConfig.matchDisplayMinPt * mConfig.matchDisplayMinPt) {
1157 continue;
1158 }
1159 }
1160
1161 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
1162 for (uint32_t j = 0; j < track.NClusters(); j++) {
1163 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + j].num;
1164 if (GetMCLabelNID(hitId)) {
1165 int32_t mcID = GetMCLabelID(hitId, 0);
1166 if (mcID >= 0) {
1167 allowMCLabels[mcID] = true;
1168 }
1169 }
1170 }
1171 }
1172 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1173 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1174 int32_t mcID = GetMCLabelID(i, j);
1175 if (mcID >= 0 && allowMCLabels[mcID]) {
1176 mGoodHits[mNEvents - 1][i] = true;
1177 }
1178 }
1179 }
1180 }
1181 if (timer.IsRunning()) {
1182 GPUInfo("QA Time: Cluster attach status:\t\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1183 }
1184
1185 if (matchOnly) {
1186 return;
1187 }
1188
1189 // Recompute fNWeightCls (might have changed after merging events into timeframes)
1190 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1191 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1192 mMCParam[iCol][i].nWeightCls = 0.;
1193 }
1194 }
1195 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1196 float weightTotal = 0.f;
1197 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1198 if (GetMCLabelID(i, j) >= 0) {
1199 weightTotal += GetMCLabelWeight(i, j);
1200 }
1201 }
1202 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1203 if (GetMCLabelID(i, j) >= 0) {
1204 GetMCTrackObj(mMCParam, GetMCLabel(i, j)).nWeightCls += GetMCLabelWeight(i, j) / weightTotal;
1205 }
1206 }
1207 }
1208 if (timer.IsRunning()) {
1209 GPUInfo("QA Time: Compute cluster label weights:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1210 }
1211
1212 // Compute MC Track Parameters for MC Tracks
1213 tbb::parallel_for<uint32_t>(0, GetNMCCollissions(), [&](auto iCol) {
1214 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1215 const mcInfo_t& info = GetMCTrack(i, iCol);
1216 additionalMCParameters& mc2 = mMCParam[iCol][i];
1217 mc2.pt = std::sqrt(info.pX * info.pX + info.pY * info.pY);
1218 mc2.phi = M_PI + std::atan2(-info.pY, -info.pX);
1219 float p = info.pX * info.pX + info.pY * info.pY + info.pZ * info.pZ;
1220 if (p < 1e-18) {
1221 mc2.theta = mc2.eta = 0.f;
1222 } else {
1223 mc2.theta = info.pZ == 0 ? (M_PI / 2) : (std::acos(info.pZ / std::sqrt(p)));
1224 mc2.eta = -std::log(std::tan(0.5 * mc2.theta));
1225 }
1226 if (mConfig.writeMCLabels) {
1227 std::vector<int32_t>& effBuffer = mcEffBuffer[mNEvents - 1];
1228 effBuffer[i] = mRecTracks[iCol][i] * 1000 + mFakeTracks[iCol][i];
1229 }
1230 } // clang-format off
1231 }, tbb::simple_partitioner()); // clang-format on
1232 if (timer.IsRunning()) {
1233 GPUInfo("QA Time: Compute track mc parameters:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1234 }
1235
1236 // Fill Efficiency Histograms
1237 if (mQATasks & taskTrackingEff) {
1238 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1239 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1240 if ((mMCTrackMin != -1 && (int32_t)i < mMCTrackMin) || (mMCTrackMax != -1 && (int32_t)i >= mMCTrackMax)) {
1241 continue;
1242 }
1243 const mcInfo_t& info = GetMCTrack(i, iCol);
1244 const additionalMCParameters& mc2 = mMCParam[iCol][i];
1245 if (mc2.nWeightCls == 0.f) {
1246 continue;
1247 }
1248 const float& mcpt = mc2.pt;
1249 const float& mcphi = mc2.phi;
1250 const float& mceta = mc2.eta;
1251
1252 if (info.primDaughters) {
1253 continue;
1254 }
1255 if (mc2.nWeightCls < mConfig.minNClEff) {
1256 continue;
1257 }
1258 int32_t findable = mc2.nWeightCls >= mConfig.minNClFindable;
1259 if (info.pid < 0) {
1260 continue;
1261 }
1262 if (info.charge == 0.f) {
1263 continue;
1264 }
1265 if (mConfig.filterCharge && info.charge * mConfig.filterCharge < 0) {
1266 continue;
1267 }
1268 if (mConfig.filterPID >= 0 && info.pid != mConfig.filterPID) {
1269 continue;
1270 }
1271
1272 if (fabsf(mceta) > ETA_MAX || mcpt < PT_MIN || mcpt > PT_MAX) {
1273 continue;
1274 }
1275
1276 float alpha = std::atan2(info.y, info.x);
1277 alpha /= M_PI / 9.f;
1278 alpha = std::floor(alpha);
1279 alpha *= M_PI / 9.f;
1280 alpha += M_PI / 18.f;
1281
1282 float c = std::cos(alpha);
1283 float s = std::sin(alpha);
1284 float localY = -info.x * s + info.y * c;
1285
1286 if (mConfig.dumpToROOT) {
1287 static auto effdump = GPUROOTDump<TNtuple>::getNew("eff", "alpha:x:y:z:mcphi:mceta:mcpt:rec:fake:findable:prim:ncls");
1288 float localX = info.x * c + info.y * s;
1289 effdump.Fill(alpha, localX, localY, info.z, mcphi, mceta, mcpt, mRecTracks[iCol][i], mFakeTracks[iCol][i], findable, info.prim, mc2.nWeightCls);
1290 }
1291
1292 for (int32_t j = 0; j < 6; j++) {
1293 if (j == 3 || j == 4) {
1294 continue;
1295 }
1296 for (int32_t k = 0; k < 2; k++) {
1297 if (k == 0 && findable == 0) {
1298 continue;
1299 }
1300
1301 int32_t val = (j == 0) ? (mRecTracks[iCol][i] ? 1 : 0) : (j == 1) ? (mRecTracks[iCol][i] ? mRecTracks[iCol][i] - 1 : 0) : (j == 2) ? mFakeTracks[iCol][i] : 1;
1302 if (val == 0) {
1303 continue;
1304 }
1305
1306 for (int32_t l = 0; l < 5; l++) {
1307 if (info.prim && mcpt < PT_MIN_PRIM) {
1308 continue;
1309 }
1310 if (l != 3 && fabsf(mceta) > ETA_MAX2) {
1311 continue;
1312 }
1313 if (l < 4 && mcpt < 1.f / mConfig.qpt) {
1314 continue;
1315 }
1316
1317 float pos = l == 0 ? localY : l == 1 ? info.z : l == 2 ? mcphi : l == 3 ? mceta : mcpt;
1318
1319 mEff[j][k][!info.prim][l]->Fill(pos, val);
1320 }
1321 }
1322 }
1323 }
1324 }
1325 if (timer.IsRunning()) {
1326 GPUInfo("QA Time: Fill efficiency histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1327 }
1328 }
1329
1330 // Fill Resolution Histograms
1331 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
1332 GPUTPCGMPropagator prop;
1333 prop.SetMaxSinPhi(.999);
1334 prop.SetMaterialTPC();
1335 prop.SetPolynomialField(&mParam->polynomialField);
1336
1337 for (uint32_t i = 0; i < mTrackMCLabels.size(); i++) {
1338 if (mConfig.writeMCLabels) {
1339 std::vector<int32_t>& labelBuffer = mcLabelBuffer[mNEvents - 1];
1340 labelBuffer[i] = mTrackMCLabels[i].getTrackID();
1341 }
1342 if (mTrackMCLabels[i].isFake()) {
1343 continue;
1344 }
1345 const mcInfo_t& mc1 = GetMCTrack(mTrackMCLabels[i]);
1346 const additionalMCParameters& mc2 = GetMCTrackObj(mMCParam, mTrackMCLabels[i]);
1347
1348 if (mc1.primDaughters) {
1349 continue;
1350 }
1351 if (!tracksExternal) {
1352 if (!mTracking->mIOPtrs.mergedTracks[i].OK()) {
1353 continue;
1354 }
1355 if (mTracking->mIOPtrs.mergedTracks[i].MergedLooper()) {
1356 continue;
1357 }
1358 }
1359 if ((mMCTrackMin != -1 && mTrackMCLabels[i].getTrackID() < mMCTrackMin) || (mMCTrackMax != -1 && mTrackMCLabels[i].getTrackID() >= mMCTrackMax)) {
1360 continue;
1361 }
1362 if (fabsf(mc2.eta) > ETA_MAX || mc2.pt < PT_MIN || mc2.pt > PT_MAX) {
1363 continue;
1364 }
1365 if (mc1.charge == 0.f) {
1366 continue;
1367 }
1368 if (mc1.pid < 0) {
1369 continue;
1370 }
1371 if (mc1.t0 == -100.f) {
1372 continue;
1373 }
1374 if (mConfig.filterCharge && mc1.charge * mConfig.filterCharge < 0) {
1375 continue;
1376 }
1377 if (mConfig.filterPID >= 0 && mc1.pid != mConfig.filterPID) {
1378 continue;
1379 }
1380 if (mc2.nWeightCls < mConfig.minNClRes) {
1381 continue;
1382 }
1383 if (mConfig.resPrimaries == 1 && !mc1.prim) {
1384 continue;
1385 } else if (mConfig.resPrimaries == 2 && mc1.prim) {
1386 continue;
1387 }
1388 if (GetMCTrackObj(mTrackMCLabelsReverse, mTrackMCLabels[i]) != (int32_t)i) {
1389 continue;
1390 }
1391
1393 float alpha = 0.f;
1394 int32_t side;
1395 if (tracksExternal) {
1396#ifdef GPUCA_O2_LIB
1397 for (int32_t k = 0; k < 5; k++) {
1398 param.Par()[k] = (*tracksExternal)[i].getParams()[k];
1399 }
1400 for (int32_t k = 0; k < 15; k++) {
1401 param.Cov()[k] = (*tracksExternal)[i].getCov()[k];
1402 }
1403 param.X() = (*tracksExternal)[i].getX();
1404 param.TOffset() = (*tracksExternal)[i].getTime0();
1405 alpha = (*tracksExternal)[i].getAlpha();
1406 side = (*tracksExternal)[i].hasBothSidesClusters() ? 2 : ((*tracksExternal)[i].hasCSideClusters() ? 1 : 0);
1407#endif
1408 } else {
1409 param = mTracking->mIOPtrs.mergedTracks[i].GetParam();
1410 alpha = mTracking->mIOPtrs.mergedTracks[i].GetAlpha();
1411 side = mTracking->mIOPtrs.mergedTracks[i].CCE() ? 2 : (mTracking->mIOPtrs.mergedTracks[i].CSide() ? 1 : 0);
1412 }
1413
1414 float mclocal[4]; // Rotated x,y,Px,Py mc-coordinates - the MC data should be rotated since the track is propagated best along x
1415 float c = std::cos(alpha);
1416 float s = std::sin(alpha);
1417 float x = mc1.x;
1418 float y = mc1.y;
1419 mclocal[0] = x * c + y * s;
1420 mclocal[1] = -x * s + y * c;
1421 float px = mc1.pX;
1422 float py = mc1.pY;
1423 mclocal[2] = px * c + py * s;
1424 mclocal[3] = -px * s + py * c;
1425
1426 if (mclocal[0] < TRACK_EXPECTED_REFERENCE_X - 3) {
1427 continue;
1428 }
1429 if (mclocal[0] > param.GetX() + 20) {
1430 continue;
1431 }
1432 if (param.GetX() > mConfig.maxResX) {
1433 continue;
1434 }
1435
1436 auto getdz = [this, &param, &mc1, &side, tracksExternal]() {
1437 if (tracksExternal) {
1438 return param.GetZ();
1439 }
1440 if (!mParam->continuousMaxTimeBin) {
1441 return param.GetZ() - mc1.z;
1442 }
1443 float shift = side == 2 ? 0 : mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(side * GPUChainTracking::NSECTORS / 2, param.GetTOffset() - mc1.t0);
1444 return param.GetZ() + shift - mc1.z;
1445 };
1446
1447 prop.SetTrack(&param, alpha);
1448 bool inFlyDirection = 0;
1449 if (mConfig.strict) {
1450 const float dx = param.X() - std::max<float>(mclocal[0], TRACK_EXPECTED_REFERENCE_X_DEFAULT); // Limit distance check
1451 const float dy = param.Y() - mclocal[1];
1452 const float dz = getdz();
1453 if (dx * dx + dy * dy + dz * dz > 5.f * 5.f) {
1454 continue;
1455 }
1456 }
1457
1458 if (prop.PropagateToXAlpha(mclocal[0], alpha, inFlyDirection)) {
1459 continue;
1460 }
1461 if (fabsf(param.Y() - mclocal[1]) > (mConfig.strict ? 1.f : 4.f) || fabsf(getdz()) > (mConfig.strict ? 1.f : 4.f)) {
1462 continue;
1463 }
1464 float charge = mc1.charge > 0 ? 1.f : -1.f;
1465
1466 float deltaY = param.GetY() - mclocal[1];
1467 float deltaZ = getdz();
1468 float deltaPhiNative = param.GetSinPhi() - mclocal[3] / mc2.pt;
1469 float deltaPhi = std::asin(param.GetSinPhi()) - std::atan2(mclocal[3], mclocal[2]);
1470 float deltaLambdaNative = param.GetDzDs() - mc1.pZ / mc2.pt;
1471 float deltaLambda = std::atan(param.GetDzDs()) - std::atan2(mc1.pZ, mc2.pt);
1472 float deltaPtNative = (param.GetQPt() - charge / mc2.pt) * charge;
1473 float deltaPt = (fabsf(1.f / param.GetQPt()) - mc2.pt) / mc2.pt;
1474
1475 float paramval[5] = {mclocal[1], mc1.z, mc2.phi, mc2.eta, mc2.pt};
1476 float resval[5] = {deltaY, deltaZ, mConfig.nativeFitResolutions ? deltaPhiNative : deltaPhi, mConfig.nativeFitResolutions ? deltaLambdaNative : deltaLambda, mConfig.nativeFitResolutions ? deltaPtNative : deltaPt};
1477 float pullval[5] = {deltaY / std::sqrt(param.GetErr2Y()), deltaZ / std::sqrt(param.GetErr2Z()), deltaPhiNative / std::sqrt(param.GetErr2SinPhi()), deltaLambdaNative / std::sqrt(param.GetErr2DzDs()), deltaPtNative / std::sqrt(param.GetErr2QPt())};
1478
1479 for (int32_t j = 0; j < 5; j++) {
1480 for (int32_t k = 0; k < 5; k++) {
1481 if (k != 3 && fabsf(mc2.eta) > ETA_MAX2) {
1482 continue;
1483 }
1484 if (k < 4 && mc2.pt < 1.f / mConfig.qpt) {
1485 continue;
1486 }
1487 if (mQATasks & taskTrackingRes) {
1488 mRes2[j][k]->Fill(resval[j], paramval[k]);
1489 }
1490 if (mQATasks & taskTrackingResPull) {
1491 mPull2[j][k]->Fill(pullval[j], paramval[k]);
1492 }
1493 }
1494 }
1495 }
1496 if (timer.IsRunning()) {
1497 GPUInfo("QA Time: Fill resolution histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1498 }
1499 }
1500
1501 if ((mQATasks & taskClusterAttach) && !tracksExternal) {
1502 // Fill cluster histograms
1503 for (uint32_t iTrk = 0; iTrk < nReconstructedTracks; iTrk++) {
1504 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[iTrk];
1505 if (!track.OK()) {
1506 continue;
1507 }
1508 if (!mTrackMCLabels[iTrk].isValid()) {
1509 for (uint32_t k = 0; k < track.NClusters(); k++) {
1510 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1511 continue;
1512 }
1513 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1514 float totalWeight = 0.;
1515 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1516 if (GetMCLabelID(hitId, j) >= 0 && GetMCTrackObj(mMCParam, GetMCLabel(hitId, j)).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1517 totalWeight += GetMCLabelWeight(hitId, j);
1518 }
1519 }
1520 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1521 const auto& r = checkClusterState<false>(attach);
1522 if (totalWeight > 0) {
1523 float weight = 1.f / (totalWeight * (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached));
1524 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1525 mcLabelI_t label = GetMCLabel(hitId, j);
1526 if (!label.isFake() && GetMCTrackObj(mMCParam, label).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1527 float pt = GetMCTrackObj(mMCParam, label).pt;
1528 if (pt < PT_MIN_CLUST) {
1529 pt = PT_MIN_CLUST;
1530 }
1531 mClusters[CL_fake]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1532 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1533 if (GetMCTrackObj(mRecTracks, label)) {
1534 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1535 }
1536 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1537 if (r.protect || r.physics) {
1538 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1539 }
1540 if (r.physics) {
1541 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1542 }
1543 }
1544 }
1545 } else {
1546 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1547 mClusters[CL_fake]->Fill(0.f, weight);
1548 mClusters[CL_att_adj]->Fill(0.f, weight);
1549 mClusters[CL_all]->Fill(0.f, weight);
1550 mClusterCounts.nUnaccessible += weight;
1551 if (r.protect || r.physics) {
1552 mClusters[CL_prot]->Fill(0.f, weight);
1553 }
1554 if (r.physics) {
1555 mClusters[CL_physics]->Fill(0.f, weight);
1556 }
1557 }
1558 }
1559 continue;
1560 }
1561 mcLabelI_t label = mTrackMCLabels[iTrk];
1562 if (mMCTrackMin != -1 && (label.getTrackID() < mMCTrackMin || label.getTrackID() >= mMCTrackMax)) {
1563 continue;
1564 }
1565 for (uint32_t k = 0; k < track.NClusters(); k++) {
1566 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1567 continue;
1568 }
1569 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1570 float pt = GetMCTrackObj(mMCParam, label).pt;
1571 if (pt < PT_MIN_CLUST) {
1572 pt = PT_MIN_CLUST;
1573 }
1574 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1575 bool correct = false;
1576 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1577 if (label == GetMCLabel(hitId, j)) {
1578 correct = true;
1579 break;
1580 }
1581 }
1582 if (correct) {
1583 mClusters[CL_attached]->Fill(pt, weight);
1584 mClusters[CL_tracks]->Fill(pt, weight);
1585 } else {
1586 mClusters[CL_fake]->Fill(pt, weight);
1587 }
1588 mClusters[CL_att_adj]->Fill(pt, weight);
1589 mClusters[CL_all]->Fill(pt, weight);
1590 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1591 const auto& r = checkClusterState<false>(attach);
1592 if (r.protect || r.physics) {
1593 mClusters[CL_prot]->Fill(pt, weight);
1594 }
1595 if (r.physics) {
1596 mClusters[CL_physics]->Fill(pt, weight);
1597 }
1598 }
1599 }
1600 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1601 if ((mMCTrackMin != -1 && GetMCLabelID(i, 0) < mMCTrackMin) || (mMCTrackMax != -1 && GetMCLabelID(i, 0) >= mMCTrackMax)) {
1602 continue;
1603 }
1604 if (mClusterParam[i].attached || mClusterParam[i].fakeAttached) {
1605 continue;
1606 }
1607 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1608 const auto& r = checkClusterState<false>(attach);
1609 if (mClusterParam[i].adjacent) {
1610 int32_t label = mTracking->mIOPtrs.mergedTrackHitAttachment[i] & gputpcgmmergertypes::attachTrackMask;
1611 if (!mTrackMCLabels[label].isValid()) {
1612 float totalWeight = 0.;
1613 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1614 mcLabelI_t labelT = GetMCLabel(i, j);
1615 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1616 totalWeight += GetMCLabelWeight(i, j);
1617 }
1618 }
1619 float weight = 1.f / totalWeight;
1620 if (totalWeight > 0) {
1621 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1622 mcLabelI_t labelT = GetMCLabel(i, j);
1623 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1624 float pt = GetMCTrackObj(mMCParam, labelT).pt;
1625 if (pt < PT_MIN_CLUST) {
1626 pt = PT_MIN_CLUST;
1627 }
1628 if (GetMCTrackObj(mRecTracks, labelT)) {
1629 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1630 }
1631 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1632 mClusters[CL_fakeAdj]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1633 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1634 if (r.protect || r.physics) {
1635 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1636 }
1637 if (r.physics) {
1638 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1639 }
1640 }
1641 }
1642 } else {
1643 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1644 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1645 mClusters[CL_all]->Fill(0.f, 1.f);
1646 mClusterCounts.nUnaccessible++;
1647 if (r.protect || r.physics) {
1648 mClusters[CL_prot]->Fill(0.f, 1.f);
1649 }
1650 if (r.physics) {
1651 mClusters[CL_physics]->Fill(0.f, 1.f);
1652 }
1653 }
1654 } else {
1655 float pt = GetMCTrackObj(mMCParam, mTrackMCLabels[label]).pt;
1656 if (pt < PT_MIN_CLUST) {
1657 pt = PT_MIN_CLUST;
1658 }
1659 mClusters[CL_att_adj]->Fill(pt, 1.f);
1660 mClusters[CL_tracks]->Fill(pt, 1.f);
1661 mClusters[CL_all]->Fill(pt, 1.f);
1662 if (r.protect || r.physics) {
1663 mClusters[CL_prot]->Fill(pt, 1.f);
1664 }
1665 if (r.physics) {
1666 mClusters[CL_physics]->Fill(pt, 1.f);
1667 }
1668 }
1669 } else {
1670 float totalWeight = 0.;
1671 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1672 mcLabelI_t labelT = GetMCLabel(i, j);
1673 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1674 totalWeight += GetMCLabelWeight(i, j);
1675 }
1676 }
1677 if (totalWeight > 0) {
1678 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1679 mcLabelI_t label = GetMCLabel(i, j);
1680 if (!label.isFake() && GetMCTrackObj(mMCParam, label).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1681 float pt = GetMCTrackObj(mMCParam, label).pt;
1682 if (pt < PT_MIN_CLUST) {
1683 pt = PT_MIN_CLUST;
1684 }
1685 float weight = GetMCLabelWeight(i, j) / totalWeight;
1686 if (mClusterParam[i].fakeAdjacent) {
1687 mClusters[CL_fakeAdj]->Fill(pt, weight);
1688 }
1689 if (mClusterParam[i].fakeAdjacent) {
1690 mClusters[CL_att_adj]->Fill(pt, weight);
1691 }
1692 if (GetMCTrackObj(mRecTracks, label)) {
1693 mClusters[CL_tracks]->Fill(pt, weight);
1694 }
1695 mClusters[CL_all]->Fill(pt, weight);
1696 if (r.protect || r.physics) {
1697 mClusters[CL_prot]->Fill(pt, weight);
1698 }
1699 if (r.physics) {
1700 mClusters[CL_physics]->Fill(pt, weight);
1701 }
1702 }
1703 }
1704 } else {
1705 if (mClusterParam[i].fakeAdjacent) {
1706 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1707 }
1708 if (mClusterParam[i].fakeAdjacent) {
1709 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1710 }
1711 mClusters[CL_all]->Fill(0.f, 1.f);
1712 mClusterCounts.nUnaccessible++;
1713 if (r.protect || r.physics) {
1714 mClusters[CL_prot]->Fill(0.f, 1.f);
1715 }
1716 if (r.physics) {
1717 mClusters[CL_physics]->Fill(0.f, 1.f);
1718 }
1719 }
1720 }
1721 }
1722
1723 if (timer.IsRunning()) {
1724 GPUInfo("QA Time: Fill cluster histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1725 }
1726 }
1727 } else if (!mConfig.inputHistogramsOnly && !mConfig.noMC && (mQATasks & (taskTrackingEff | taskTrackingRes | taskTrackingResPull | taskClusterAttach))) {
1728 GPUWarning("No MC information available, only running partial TPC QA!");
1729 } // mcAvail
1730
1731 if ((mQATasks & taskTrackStatistics) && !tracksExternal) {
1732 // Fill track statistic histograms
1733 std::vector<std::array<float, 3>> clusterAttachCounts;
1734 if (mcAvail) {
1735 clusterAttachCounts.resize(GetNMCLabels(), {0.f, 0.f});
1736 }
1737 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1738 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
1739 if (!track.OK()) {
1740 continue;
1741 }
1742 mTrackPt->Fill(1.f / fabsf(track.GetParam().GetQPt()));
1743 mNCl[0]->Fill(track.NClustersFitted());
1744 uint32_t nClCorrected = 0;
1745 const auto& trackClusters = mTracking->mIOPtrs.mergedTrackHits;
1746 uint32_t jNext = 0;
1747 for (uint32_t j = 0; j < track.NClusters(); j = jNext) {
1748 uint32_t rowClCount = !(trackClusters[track.FirstClusterRef() + j].state & GPUTPCGMMergedTrackHit::flagReject);
1749 for (jNext = j + 1; j < track.NClusters(); jNext++) {
1750 if (trackClusters[track.FirstClusterRef() + j].sector != trackClusters[track.FirstClusterRef() + jNext].sector || trackClusters[track.FirstClusterRef() + j].row != trackClusters[track.FirstClusterRef() + jNext].row) {
1751 break;
1752 }
1753 rowClCount += !(trackClusters[track.FirstClusterRef() + jNext].state & GPUTPCGMMergedTrackHit::flagReject);
1754 }
1755 if (!track.MergedLooper() && rowClCount) {
1756 nClCorrected++;
1757 }
1758 if (mcAvail && rowClCount) {
1759 for (uint32_t k = j; k < jNext; k++) {
1760 const auto& cl = trackClusters[track.FirstClusterRef() + k];
1761 if (cl.state & GPUTPCGMMergedTrackHit::flagReject) {
1762 continue;
1763 }
1764 bool labelOk = false, labelOkNonFake = false;
1765 const mcLabelI_t& trkLabel = mTrackMCLabels[i];
1766 if (trkLabel.isValid() && !trkLabel.isNoise()) {
1767 for (int32_t l = 0; l < GetMCLabelNID(cl.num); l++) {
1768 const mcLabelI_t& clLabel = GetMCLabel(cl.num, l);
1769 if (clLabel.isValid() && !clLabel.isNoise() && CompareIgnoreFake(trkLabel, clLabel)) {
1770 labelOk = true;
1771 if (!trkLabel.isFake()) {
1772 labelOkNonFake = true;
1773 }
1774 break;
1775 }
1776 }
1777 }
1778 clusterAttachCounts[cl.num][0] += 1.0f;
1779 clusterAttachCounts[cl.num][1] += (float)labelOk / rowClCount;
1780 clusterAttachCounts[cl.num][2] += (float)labelOkNonFake / rowClCount;
1781 }
1782 }
1783 }
1784 if (nClCorrected) {
1785 mNCl[1]->Fill(nClCorrected);
1786 }
1787 mT0[0]->Fill(track.GetParam().GetTOffset());
1788 if (mTrackMCLabels.size() && !mTrackMCLabels[i].isFake() && !track.MergedLooper() && !track.CCE()) {
1789 const auto& info = GetMCTrack(mTrackMCLabels[i]);
1790 if (info.t0 != -100.f) {
1791 mT0[1]->Fill(track.GetParam().GetTOffset() - info.t0);
1792 }
1793 }
1794 }
1795 if (mClNative && mTracking && mTracking->GetTPCTransformHelper()) {
1796 for (uint32_t i = 0; i < GPUChainTracking::NSECTORS; i++) {
1797 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
1798 for (uint32_t k = 0; k < mClNative->nClusters[i][j]; k++) {
1799 const auto& cl = mClNative->clusters[i][j][k];
1800 float x, y, z;
1801 GPUTPCConvertImpl::convert(*mTracking->GetTPCTransformHelper()->getCorrMap(), mTracking->GetParam(), i, j, cl.getPad(), cl.getTime(), x, y, z);
1802 mTracking->GetParam().Sector2Global(i, x, y, z, &x, &y, &z);
1803 mClXY->Fill(x, y);
1804 }
1805 }
1806 }
1807 }
1808 if (mcAvail) {
1809 double clusterAttachNormalizedCount = 0, clusterAttachNormalizedCountNonFake = 0;
1810 for (uint32_t i = 0; i < clusterAttachCounts.size(); i++) {
1811 if (clusterAttachCounts[i][0]) {
1812 clusterAttachNormalizedCount += clusterAttachCounts[i][1] / clusterAttachCounts[i][0];
1813 clusterAttachNormalizedCountNonFake += clusterAttachCounts[i][2] / clusterAttachCounts[i][0];
1814 }
1815 }
1816 mClusterCounts.nCorrectlyAttachedNormalized = clusterAttachNormalizedCount;
1817 mClusterCounts.nCorrectlyAttachedNormalizedNonFake = clusterAttachNormalizedCountNonFake;
1818 clusterAttachCounts.clear();
1819 }
1820
1821 if (timer.IsRunning()) {
1822 GPUInfo("QA Time: Fill track statistics:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1823 }
1824 }
1825
1826 uint32_t nCl = clNative ? clNative->nClustersTotal : mTracking->GetProcessors()->tpcMerger.NMaxClusters();
1827 mClusterCounts.nTotal += nCl;
1828 if (mQATasks & (taskClusterCounts | taskClusterRejection)) {
1829 for (uint32_t iSector = 0; iSector < GPUCA_NSECTORS; iSector++) {
1830 for (uint32_t iRow = 0; iRow < GPUCA_ROW_COUNT; iRow++) {
1831 for (uint32_t iCl = 0; iCl < clNative->nClusters[iSector][iRow]; iCl++) {
1832 uint32_t i = clNative->clusterOffset[iSector][iRow] + iCl;
1833 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1834 const auto& r = checkClusterState<true>(attach, &mClusterCounts);
1835
1836 if (mQATasks & taskClusterRejection) {
1837 if (mcAvail) {
1838 float totalWeight = 0, weight400 = 0, weight40 = 0;
1839 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1840 const auto& label = GetMCLabel(i, j);
1841 if (GetMCLabelID(label) >= 0) {
1842 totalWeight += GetMCLabelWeight(label);
1843 if (GetMCTrackObj(mMCParam, label).pt >= 0.4) {
1844 weight400 += GetMCLabelWeight(label);
1845 }
1846 if (GetMCTrackObj(mMCParam, label).pt <= 0.04) {
1847 weight40 += GetMCLabelWeight(label);
1848 }
1849 }
1850 }
1851 if (totalWeight > 0 && 10.f * weight400 >= totalWeight) {
1852 if (!r.unattached && !r.protect && !r.physics) {
1853 mClusterCounts.nFakeRemove400++;
1854 int32_t totalFake = weight400 < 0.9f * totalWeight;
1855 if (totalFake) {
1856 mClusterCounts.nFullFakeRemove400++;
1857 }
1858 /*printf("Fake removal (%d): Hit %7d, attached %d lowPt %d looper %d tube200 %d highIncl %d tube %d bad %d recPt %7.2f recLabel %6d", totalFake, i, (int32_t) (mClusterParam[i].attached || mClusterParam[i].fakeAttached),
1859 (int32_t) lowPt, (int32_t) ((attach & gputpcgmmergertypes::attachGoodLeg) == 0), (int32_t) ((attach & gputpcgmmergertypes::attachTube) && mev200),
1860 (int32_t) ((attach & gputpcgmmergertypes::attachHighIncl) != 0), (int32_t) ((attach & gputpcgmmergertypes::attachTube) != 0), (int32_t) ((attach & gputpcgmmergertypes::attachGood) == 0),
1861 fabsf(qpt) > 0 ? 1.f / qpt : 0.f, id);
1862 for (int32_t j = 0;j < GetMCLabelNID(i);j++)
1863 {
1864 //if (GetMCLabelID(i, j) < 0) break;
1865 printf(" - label%d %6d weight %5d", j, GetMCLabelID(i, j), (int32_t) GetMCLabelWeight(i, j));
1866 if (GetMCLabelID(i, j) >= 0) printf(" - pt %7.2f", mMCParam[GetMCLabelID(i, j)].pt);
1867 else printf(" ");
1868 }
1869 printf("\n");*/
1870 }
1871 mClusterCounts.nAbove400++;
1872 }
1873 if (totalWeight > 0 && weight40 >= 0.9 * totalWeight) {
1874 mClusterCounts.nBelow40++;
1875 if (r.protect || r.physics) {
1876 mClusterCounts.nFakeProtect40++;
1877 }
1878 }
1879 }
1880
1881 if (r.physics) {
1882 mClusterCounts.nPhysics++;
1883 }
1884 if (r.protect) {
1885 mClusterCounts.nProt++;
1886 }
1887 if (r.unattached) {
1888 mClusterCounts.nUnattached++;
1889 }
1890 }
1891 if (mQATasks & taskClusterRejection) {
1892 if (mTracking && clNative) {
1893 const auto& cl = clNative->clustersLinear[i];
1894 mClRej[0]->Fill(cl.getPad() - GPUTPCGeometry::NPads(iRow) / 2 + 0.5, iRow, 1.f);
1895 if (!r.unattached && !r.protect) {
1896 mClRej[1]->Fill(cl.getPad() - GPUTPCGeometry::NPads(iRow) / 2 + 0.5, iRow, 1.f);
1897 }
1898 }
1899 }
1900 }
1901 }
1902 }
1903 }
1904
1905 // Process cluster count statistics
1906 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
1907 DoClusterCounts(nullptr);
1908 mClusterCounts = counts_t();
1909 }
1910
1911 if (timer.IsRunning()) {
1912 GPUInfo("QA Time: Cluster Counts:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1913 }
1914
1915 if (mConfig.dumpToROOT && !tracksExternal) {
1916 if (!clNative || !mTracking || !mTracking->mIOPtrs.mergedTrackHitAttachment || !mTracking->mIOPtrs.mergedTracks) {
1917 throw std::runtime_error("Cannot dump non o2::tpc::clusterNative clusters, need also hit attachmend and GPU tracks");
1918 }
1919 uint32_t clid = 0;
1920 for (uint32_t i = 0; i < GPUChainTracking::NSECTORS; i++) {
1921 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
1922 for (uint32_t k = 0; k < mClNative->nClusters[i][j]; k++) {
1923 const auto& cl = mClNative->clusters[i][j][k];
1924 uint32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[clid];
1925 float x = 0, y = 0, z = 0;
1927 uint32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1928 const auto& trk = mTracking->mIOPtrs.mergedTracks[track];
1929 mTracking->GetTPCTransformHelper()->Transform(i, j, cl.getPad(), cl.getTime(), x, y, z, trk.GetParam().GetTOffset());
1930 mTracking->GetParam().Sector2Global(i, x, y, z, &x, &y, &z);
1931 }
1932 uint32_t extState = mTracking->mIOPtrs.mergedTrackHitStates ? mTracking->mIOPtrs.mergedTrackHitStates[clid] : 0;
1933
1934 if (mConfig.dumpToROOT >= 2) {
1937 memset((void*)&trk, 0, sizeof(trk));
1938 memset((void*)&trkHit, 0, sizeof(trkHit));
1940 uint32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1941 trk = mTracking->mIOPtrs.mergedTracks[track];
1942 for (uint32_t l = 0; l < trk.NClusters(); l++) {
1943 const auto& tmp = mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + l];
1944 if (tmp.num == clid) {
1945 trkHit = tmp;
1946 break;
1947 }
1948 }
1949 }
1950 static auto cldump = GPUROOTDump<o2::tpc::ClusterNative, GPUTPCGMMergedTrack, GPUTPCGMMergedTrackHit, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew("cluster", "track", "trackHit", "attach", "extState", "x", "y", "z", "sector", "row", "nEv", "clusterTree");
1951 cldump.Fill(cl, trk, trkHit, attach, extState, x, y, z, i, j, mNEvents - 1);
1952 } else {
1953 static auto cldump = GPUROOTDump<o2::tpc::ClusterNative, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew("cluster", "attach", "extState", "x", "y", "z", "sector", "row", "nEv", "clusterTree");
1954 cldump.Fill(cl, attach, extState, x, y, z, i, j, mNEvents - 1);
1955 }
1956 clid++;
1957 }
1958 }
1959 }
1960
1961 static auto trkdump = GPUROOTDump<uint32_t, GPUTPCGMMergedTrack>::getNew("nEv", "track", "tracksTree");
1962 for (uint32_t i = 0; i < mTracking->mIOPtrs.nMergedTracks; i++) {
1963 if (mTracking->mIOPtrs.mergedTracks[i].OK()) {
1964 trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.mergedTracks[i]);
1965 }
1966 }
1967
1968 if (mTracking && mTracking->GetProcessingSettings().createO2Output) {
1969 static auto o2trkdump = GPUROOTDump<uint32_t, o2::tpc::TrackTPC>::getNew("nEv", "track", "tracksO2Tree");
1970 for (uint32_t i = 0; i < mTracking->mIOPtrs.nOutputTracksTPCO2; i++) {
1971 o2trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.outputTracksTPCO2[i]);
1972 }
1973 }
1974 }
1975
1976 if (mConfig.compareTrackStatus) {
1977#ifdef GPUCA_DETERMINISTIC_MODE
1978 if (!mTracking || !mTracking->GetProcessingSettings().deterministicGPUReconstruction)
1979#endif
1980 {
1981 throw std::runtime_error("Need deterministic processing to compare track status");
1982 }
1983 std::vector<uint8_t> status(mTracking->mIOPtrs.nMergedTracks);
1984 for (uint32_t i = 0; i < mTracking->mIOPtrs.nMergedTracks; i++) {
1985 const auto& trk = mTracking->mIOPtrs.mergedTracks[i];
1986 status[i] = trk.OK() && trk.NClusters() && trk.GetParam().GetNDF() > 0 && (mConfig.noMC || (mTrackMCLabels[i].isValid() && !mTrackMCLabels[i].isFake()));
1987 }
1988 if (mConfig.compareTrackStatus == 1) {
1989 std::ofstream("track.status", std::ios::binary).write((char*)status.data(), status.size() * sizeof(status[0]));
1990 } else if (mConfig.compareTrackStatus == 2) {
1991 std::ifstream f("track.status", std::ios::binary | std::ios::ate);
1992 std::vector<uint8_t> comp(f.tellg());
1993 f.seekg(0);
1994 f.read((char*)comp.data(), comp.size());
1995
1996 if (comp.size() != status.size()) {
1997 throw std::runtime_error("Number of tracks candidates in track fit in track.status and in current reconstruction differ");
1998 }
1999 std::vector<uint32_t> missing, missingComp;
2000 for (uint32_t i = 0; i < status.size(); i++) {
2001 if (status[i] && !comp[i]) {
2002 missingComp.emplace_back(i);
2003 }
2004 if (comp[i] && !status[i]) {
2005 missing.emplace_back(i);
2006 }
2007 }
2008 auto printer = [](std::vector<uint32_t> m, const char* name) {
2009 if (m.size()) {
2010 printf("Missing in %s reconstruction: (%zu)\n", name, m.size());
2011 for (uint32_t i = 0; i < m.size(); i++) {
2012 if (i) {
2013 printf(", ");
2014 }
2015 printf("%d", m[i]);
2016 }
2017 printf("\n");
2018 }
2019 };
2020 printer(missing, "current");
2021 printer(missingComp, "comparison");
2022 }
2023 }
2024
2025 mTrackingScratchBuffer.clear();
2026 mTrackingScratchBuffer.shrink_to_fit();
2027}
2028
2029void GPUQA::GetName(char* fname, int32_t k, bool noDash)
2030{
2031 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
2032 if (k || mConfig.inputHistogramsOnly || mConfig.name.size()) {
2033 if (!(mConfig.inputHistogramsOnly || k)) {
2034 snprintf(fname, 1024, "%s%s", mConfig.name.c_str(), noDash ? "" : " - ");
2035 } else if (mConfig.compareInputNames.size() > (unsigned)(k - nNewInput)) {
2036 snprintf(fname, 1024, "%s%s", mConfig.compareInputNames[k - nNewInput].c_str(), noDash ? "" : " - ");
2037 } else {
2038 strcpy(fname, mConfig.compareInputs[k - nNewInput].c_str());
2039 if (strlen(fname) > 5 && strcmp(fname + strlen(fname) - 5, ".root") == 0) {
2040 fname[strlen(fname) - 5] = 0;
2041 }
2042 if (!noDash) {
2043 strcat(fname, " - ");
2044 }
2045 }
2046 } else {
2047 fname[0] = 0;
2048 }
2049}
2050
2051template <class T>
2052T* GPUQA::GetHist(T*& ee, std::vector<std::unique_ptr<TFile>>& tin, int32_t k, int32_t nNewInput)
2053{
2054 T* e = ee;
2055 if ((mConfig.inputHistogramsOnly || k) && (e = dynamic_cast<T*>(tin[k - nNewInput]->Get(e->GetName()))) == nullptr) {
2056 GPUWarning("Missing histogram in input %s: %s", mConfig.compareInputs[k - nNewInput].c_str(), ee->GetName());
2057 return (nullptr);
2058 }
2059 ee = e;
2060 return (e);
2061}
2062
2063void GPUQA::DrawQAHistogramsCleanup()
2064{
2065 clearGarbagageCollector();
2066}
2067
2068void GPUQA::resetHists()
2069{
2070 if (!mQAInitialized) {
2071 throw std::runtime_error("QA not initialized");
2072 }
2073 if (mHaveExternalHists) {
2074 throw std::runtime_error("Cannot reset external hists");
2075 }
2076 for (auto& h : *mHist1D) {
2077 h.Reset();
2078 }
2079 for (auto& h : *mHist2D) {
2080 h.Reset();
2081 }
2082 for (auto& h : *mHist1Dd) {
2083 h.Reset();
2084 }
2085 for (auto& h : *mHistGraph) {
2086 h = TGraphAsymmErrors();
2087 }
2088 mClusterCounts = counts_t();
2089}
2090
2091int32_t GPUQA::DrawQAHistograms(TObjArray* qcout)
2092{
2093 const auto oldRootIgnoreLevel = gErrorIgnoreLevel;
2094 gErrorIgnoreLevel = kWarning;
2095 if (!mQAInitialized) {
2096 throw std::runtime_error("QA not initialized");
2097 }
2098
2099 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
2100 printf("Creating QA Histograms\n");
2101 }
2102
2103 std::vector<Color_t> colorNums(COLORCOUNT);
2104 if (!(qcout || mConfig.writeFileExt == "root" || mConfig.writeFileExt == "C")) {
2105 [[maybe_unused]] static int32_t initColorsInitialized = initColors();
2106 }
2107 for (int32_t i = 0; i < COLORCOUNT; i++) {
2108 colorNums[i] = (qcout || mConfig.writeFileExt == "root" || mConfig.writeFileExt == "C") ? defaultColorNums[i] : mColors[i]->GetNumber();
2109 }
2110
2111 bool mcAvail = mcPresent();
2112 char name[2048], fname[1024];
2113
2114 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
2115 const int32_t ConfigNumInputs = nNewInput + mConfig.compareInputs.size();
2116
2117 std::vector<std::unique_ptr<TFile>> tin;
2118 for (uint32_t i = 0; i < mConfig.compareInputs.size(); i++) {
2119 tin.emplace_back(std::make_unique<TFile>(mConfig.compareInputs[i].c_str()));
2120 }
2121 std::unique_ptr<TFile> tout = nullptr;
2122 if (mConfig.output.size()) {
2123 tout = std::make_unique<TFile>(mConfig.output.c_str(), "RECREATE");
2124 }
2125
2126 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2127 float legendSpacingString = 0.025;
2128 for (int32_t i = 0; i < ConfigNumInputs; i++) {
2129 GetName(fname, i);
2130 if (strlen(fname) * 0.006 > legendSpacingString) {
2131 legendSpacingString = strlen(fname) * 0.006;
2132 }
2133 }
2134
2135 // Create Canvas / Pads for Efficiency Histograms
2136 if (mQATasks & taskTrackingEff) {
2137 for (int32_t ii = 0; ii < 6; ii++) {
2138 snprintf(name, 1024, "eff_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2139 mCEff[ii] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2140 mCEff[ii]->cd();
2141 float dy = 1. / 2.;
2142 mPEff[ii][0] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2143 mPEff[ii][0]->Draw();
2144 mPEff[ii][0]->SetRightMargin(0.04);
2145 mPEff[ii][1] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2146 mPEff[ii][1]->Draw();
2147 mPEff[ii][1]->SetRightMargin(0.04);
2148 mPEff[ii][2] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 0.5, dy * 2 - .001);
2149 mPEff[ii][2]->Draw();
2150 mPEff[ii][2]->SetRightMargin(0.04);
2151 mPEff[ii][3] = createGarbageCollected<TPad>("p3", "", 0.5, dy * 1, 1.0, dy * 2 - .001);
2152 mPEff[ii][3]->Draw();
2153 mPEff[ii][3]->SetRightMargin(0.04);
2154 mLEff[ii] = createGarbageCollected<TLegend>(0.92 - legendSpacingString * 1.45, 0.83 - (0.93 - 0.82) / 2. * (float)ConfigNumInputs, 0.98, 0.849);
2155 SetLegend(mLEff[ii]);
2156 }
2157 }
2158
2159 // Create Canvas / Pads for Resolution Histograms
2160 if (mQATasks & taskTrackingRes) {
2161 for (int32_t ii = 0; ii < 7; ii++) {
2162 if (ii == 6) {
2163 snprintf(name, 1024, "res_integral_layout");
2164 } else {
2165 snprintf(name, 1024, "res_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2166 }
2167 mCRes[ii] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2168 mCRes[ii]->cd();
2169 gStyle->SetOptFit(1);
2170
2171 float dy = 1. / 2.;
2172 mPRes[ii][3] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2173 mPRes[ii][3]->Draw();
2174 mPRes[ii][3]->SetRightMargin(0.04);
2175 mPRes[ii][4] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2176 mPRes[ii][4]->Draw();
2177 mPRes[ii][4]->SetRightMargin(0.04);
2178 mPRes[ii][0] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2179 mPRes[ii][0]->Draw();
2180 mPRes[ii][0]->SetRightMargin(0.04);
2181 mPRes[ii][0]->SetLeftMargin(0.15);
2182 mPRes[ii][1] = createGarbageCollected<TPad>("p3", "", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2183 mPRes[ii][1]->Draw();
2184 mPRes[ii][1]->SetRightMargin(0.04);
2185 mPRes[ii][1]->SetLeftMargin(0.135);
2186 mPRes[ii][2] = createGarbageCollected<TPad>("p4", "", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2187 mPRes[ii][2]->Draw();
2188 mPRes[ii][2]->SetRightMargin(0.06);
2189 mPRes[ii][2]->SetLeftMargin(0.135);
2190 if (ii < 6) {
2191 mLRes[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2192 SetLegend(mLRes[ii]);
2193 }
2194 }
2195 }
2196
2197 // Create Canvas / Pads for Pull Histograms
2198 if (mQATasks & taskTrackingResPull) {
2199 for (int32_t ii = 0; ii < 7; ii++) {
2200 if (ii == 6) {
2201 snprintf(name, 1024, "pull_integral_layout");
2202 } else {
2203 snprintf(name, 1024, "pull_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2204 }
2205 mCPull[ii] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2206 mCPull[ii]->cd();
2207 gStyle->SetOptFit(1);
2208
2209 float dy = 1. / 2.;
2210 mPPull[ii][3] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2211 mPPull[ii][3]->Draw();
2212 mPPull[ii][3]->SetRightMargin(0.04);
2213 mPPull[ii][4] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2214 mPPull[ii][4]->Draw();
2215 mPPull[ii][4]->SetRightMargin(0.04);
2216 mPPull[ii][0] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2217 mPPull[ii][0]->Draw();
2218 mPPull[ii][0]->SetRightMargin(0.04);
2219 mPPull[ii][0]->SetLeftMargin(0.15);
2220 mPPull[ii][1] = createGarbageCollected<TPad>("p3", "", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2221 mPPull[ii][1]->Draw();
2222 mPPull[ii][1]->SetRightMargin(0.04);
2223 mPPull[ii][1]->SetLeftMargin(0.135);
2224 mPPull[ii][2] = createGarbageCollected<TPad>("p4", "", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2225 mPPull[ii][2]->Draw();
2226 mPPull[ii][2]->SetRightMargin(0.06);
2227 mPPull[ii][2]->SetLeftMargin(0.135);
2228 if (ii < 6) {
2229 mLPull[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2230 SetLegend(mLPull[ii]);
2231 }
2232 }
2233 }
2234
2235 // Create Canvas for Cluster Histos
2236 if (mQATasks & taskClusterAttach) {
2237 for (int32_t i = 0; i < 3; i++) {
2238 snprintf(name, 1024, "clusters_%s_layout", CLUSTER_TYPES[i]);
2239 mCClust[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2240 mCClust[i]->cd();
2241 mPClust[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2242 mPClust[i]->Draw();
2243 float y1 = i != 1 ? 0.77 : 0.27, y2 = i != 1 ? 0.9 : 0.42;
2244 mLClust[i] = createGarbageCollected<TLegend>(i == 2 ? 0.1 : (0.65 - legendSpacingString * 1.45), y2 - (y2 - y1) * (ConfigNumInputs + (i != 1) / 2.) + 0.005, i == 2 ? (0.3 + legendSpacingString * 1.45) : 0.9, y2);
2245 SetLegend(mLClust[i]);
2246 }
2247 }
2248
2249 // Create Canvas for track statistic histos
2250 if (mQATasks & taskTrackStatistics) {
2251 mCTrackPt = createGarbageCollected<TCanvas>("ctrackspt", "ctrackspt", 0, 0, 700, 700. * 2. / 3.);
2252 mCTrackPt->cd();
2253 mPTrackPt = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2254 mPTrackPt->Draw();
2255 mLTrackPt = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.5, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2256 SetLegend(mLTrackPt, true);
2257
2258 for (int32_t i = 0; i < 2; i++) {
2259 snprintf(name, 2048, "ctrackst0%d", i);
2260 mCT0[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2261 mCT0[i]->cd();
2262 mPT0[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2263 mPT0[i]->Draw();
2264 mLT0[i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2265 SetLegend(mLT0[i]);
2266
2267 snprintf(name, 2048, "cncl%d", i);
2268 mCNCl[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2269 mCNCl[i]->cd();
2270 mPNCl[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2271 mPNCl[i]->Draw();
2272 mLNCl[i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949); // TODO: Fix sizing of legend, and also fix font size
2273 SetLegend(mLNCl[i], true);
2274 }
2275
2276 mCClXY = createGarbageCollected<TCanvas>("clxy", "clxy", 0, 0, 700, 700. * 2. / 3.);
2277 mCClXY->cd();
2278 mPClXY = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2279 mPClXY->Draw();
2280 }
2281
2282 if (mQATasks & taskClusterRejection) {
2283 for (int32_t i = 0; i < 3; i++) {
2284 snprintf(name, 2048, "cnclrej%d", i);
2285 mCClRej[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2286 mCClRej[i]->cd();
2287 mPClRej[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2288 mPClRej[i]->Draw();
2289 }
2290 mCClRejP = createGarbageCollected<TCanvas>("cnclrejp", "cnclrejp", 0, 0, 700, 700. * 2. / 3.);
2291 mCClRejP->cd();
2292 mPClRejP = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2293 mPClRejP->Draw();
2294 }
2295
2296 if (mQATasks & taskClusterAttach) {
2297 for (int32_t i = 0; i < 4; i++) {
2298 snprintf(name, 2048, "cpadrow%d", i);
2299 mCPadRow[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2300 mCPadRow[i]->cd();
2301 mPPadRow[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2302 mPPadRow[i]->Draw();
2303 }
2304 }
2305 }
2306
2307 if (mConfig.enableLocalOutput && !mConfig.inputHistogramsOnly && (mQATasks & taskTrackingEff) && mcPresent()) {
2308 GPUInfo("QA Stats: Eff: Tracks Prim %d (Eta %d, Pt %d) %f%% (%f%%) Sec %d (Eta %d, Pt %d) %f%% (%f%%) - Res: Tracks %d (Eta %d, Pt %d)", (int32_t)mEff[3][1][0][0]->GetEntries(), (int32_t)mEff[3][1][0][3]->GetEntries(), (int32_t)mEff[3][1][0][4]->GetEntries(),
2309 mEff[0][0][0][0]->GetSumOfWeights() / std::max(1., mEff[3][0][0][0]->GetSumOfWeights()), mEff[0][1][0][0]->GetSumOfWeights() / std::max(1., mEff[3][1][0][0]->GetSumOfWeights()), (int32_t)mEff[3][1][1][0]->GetEntries(), (int32_t)mEff[3][1][1][3]->GetEntries(),
2310 (int32_t)mEff[3][1][1][4]->GetEntries(), mEff[0][0][1][0]->GetSumOfWeights() / std::max(1., mEff[3][0][1][0]->GetSumOfWeights()), mEff[0][1][1][0]->GetSumOfWeights() / std::max(1., mEff[3][1][1][0]->GetSumOfWeights()), (int32_t)mRes2[0][0]->GetEntries(),
2311 (int32_t)mRes2[0][3]->GetEntries(), (int32_t)mRes2[0][4]->GetEntries());
2312 }
2313
2314 int32_t flagShowVsPtLog = (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) ? 1 : 0;
2315
2316 if (mQATasks & taskTrackingEff) {
2317 // Process / Draw Efficiency Histograms
2318 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2319 int32_t i = ii == 5 ? 4 : ii;
2320 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2321 for (int32_t j = 0; j < 4; j++) {
2322 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2323 mPEff[ii][j]->cd();
2324 if (ii == 5) {
2325 mPEff[ii][j]->SetLogx();
2326 }
2327 }
2328 for (int32_t l = 0; l < 3; l++) {
2329 if (k == 0 && mConfig.inputHistogramsOnly == 0 && ii != 5) {
2330 if (l == 0) {
2331 // Divide eff, compute all for fake/clone
2332 auto oldLevel = gErrorIgnoreLevel;
2333 gErrorIgnoreLevel = kError;
2334 mEffResult[0][j / 2][j % 2][i]->Divide(mEff[l][j / 2][j % 2][i], mEff[5][j / 2][j % 2][i], "cl=0.683 b(1,1) mode");
2335 gErrorIgnoreLevel = oldLevel;
2336 mEff[3][j / 2][j % 2][i]->Reset(); // Sum up rec + clone + fake for fake rate
2337 mEff[3][j / 2][j % 2][i]->Add(mEff[0][j / 2][j % 2][i]);
2338 mEff[3][j / 2][j % 2][i]->Add(mEff[1][j / 2][j % 2][i]);
2339 mEff[3][j / 2][j % 2][i]->Add(mEff[2][j / 2][j % 2][i]);
2340 mEff[4][j / 2][j % 2][i]->Reset(); // Sum up rec + clone for clone rate
2341 mEff[4][j / 2][j % 2][i]->Add(mEff[0][j / 2][j % 2][i]);
2342 mEff[4][j / 2][j % 2][i]->Add(mEff[1][j / 2][j % 2][i]);
2343 } else {
2344 // Divide fake/clone
2345 auto oldLevel = gErrorIgnoreLevel;
2346 gErrorIgnoreLevel = kError;
2347 mEffResult[l][j / 2][j % 2][i]->Divide(mEff[l][j / 2][j % 2][i], mEff[l == 1 ? 4 : 3][j / 2][j % 2][i], "cl=0.683 b(1,1) mode");
2348 gErrorIgnoreLevel = oldLevel;
2349 }
2350 }
2351
2352 TGraphAsymmErrors* e = mEffResult[l][j / 2][j % 2][i];
2353
2354 if (!mConfig.inputHistogramsOnly && k == 0) {
2355 if (tout) {
2356 mEff[l][j / 2][j % 2][i]->Write();
2357 e->Write();
2358 if (l == 2) {
2359 mEff[3][j / 2][j % 2][i]->Write(); // Store also all histogram!
2360 mEff[4][j / 2][j % 2][i]->Write(); // Store also all histogram!
2361 }
2362 }
2363 } else if (GetHist(e, tin, k, nNewInput) == nullptr) {
2364 continue;
2365 }
2366 e->SetTitle(EFFICIENCY_TITLES[j]);
2367 e->GetYaxis()->SetTitle("(Efficiency)");
2368 e->GetXaxis()->SetTitle(XAXIS_TITLES[i]);
2369
2370 e->SetLineWidth(1);
2371 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2372 SetAxisSize(e);
2373 if (qcout && !mConfig.shipToQCAsCanvas) {
2374 qcout->Add(e);
2375 }
2376 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2377 continue;
2378 }
2379 e->SetMarkerColor(kBlack);
2380 e->SetLineColor(colorNums[(k < 3 ? (l * 3 + k) : (k * 3 + l)) % COLORCOUNT]);
2381 e->GetHistogram()->GetYaxis()->SetRangeUser(-0.02, 1.02);
2382 e->Draw(k || l ? "same P" : "AP");
2383 if (j == 0) {
2384 GetName(fname, k);
2385 mLEff[ii]->AddEntry(e, Form("%s%s", fname, EFF_NAMES[l]), "l");
2386 }
2387 }
2388 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2389 continue;
2390 }
2391 mCEff[ii]->cd();
2392 ChangePadTitleSize(mPEff[ii][j], 0.056);
2393 }
2394 }
2395 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2396 continue;
2397 }
2398
2399 mLEff[ii]->Draw();
2400
2401 if (qcout) {
2402 qcout->Add(mCEff[ii]);
2403 }
2404 if (!mConfig.enableLocalOutput) {
2405 continue;
2406 }
2407 doPerfFigure(0.2, 0.295, 0.025);
2408 mCEff[ii]->Print(Form("%s/eff_vs_%s.pdf", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii]));
2409 if (mConfig.writeFileExt != "") {
2410 mCEff[ii]->Print(Form("%s/eff_vs_%s.%s", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii], mConfig.writeFileExt.c_str()));
2411 }
2412 }
2413 }
2414
2415 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
2416 // Process / Draw Resolution Histograms
2417 TH1D *resIntegral[5] = {}, *pullIntegral[5] = {};
2418 TCanvas* cfit = nullptr;
2419 std::unique_ptr<TF1> customGaus = std::make_unique<TF1>("G", "[0]*exp(-(x-[1])*(x-[1])/(2.*[2]*[2]))");
2420 for (int32_t p = 0; p < 2; p++) {
2421 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2422 continue;
2423 }
2424 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2425 TCanvas* can = p ? mCPull[ii] : mCRes[ii];
2426 TLegend* leg = p ? mLPull[ii] : mLRes[ii];
2427 int32_t i = ii == 5 ? 4 : ii;
2428 for (int32_t j = 0; j < 5; j++) {
2429 TH2F* src = p ? mPull2[j][i] : mRes2[j][i];
2430 TH1F** dst = p ? mPull[j][i] : mRes[j][i];
2431 TH1D*& dstIntegral = p ? pullIntegral[j] : resIntegral[j];
2432 TPad* pad = p ? mPPull[ii][j] : mPRes[ii][j];
2433
2434 if (!mConfig.inputHistogramsOnly && ii != 5) {
2435 if (cfit == nullptr) {
2436 cfit = createGarbageCollected<TCanvas>();
2437 }
2438 cfit->cd();
2439
2440 TAxis* axis = src->GetYaxis();
2441 int32_t nBins = axis->GetNbins();
2442 int32_t integ = 1;
2443 for (int32_t bin = 1; bin <= nBins; bin++) {
2444 int32_t bin0 = std::max(bin - integ, 0);
2445 int32_t bin1 = std::min(bin + integ, nBins);
2446 std::unique_ptr<TH1D> proj{src->ProjectionX("proj", bin0, bin1)};
2447 proj->ClearUnderflowAndOverflow();
2448 if (proj->GetEntries()) {
2449 uint32_t rebin = 1;
2450 while (proj->GetMaximum() < 50 && rebin < sizeof(RES_AXIS_BINS) / sizeof(RES_AXIS_BINS[0])) {
2451 proj->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2452 rebin++;
2453 }
2454
2455 if (proj->GetEntries() < 20 || proj->GetRMS() < 0.00001) {
2456 dst[0]->SetBinContent(bin, proj->GetRMS());
2457 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2458 dst[1]->SetBinContent(bin, proj->GetMean());
2459 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2460 } else {
2461 proj->GetXaxis()->SetRange(0, 0);
2462 proj->GetXaxis()->SetRangeUser(std::max(proj->GetXaxis()->GetXmin(), proj->GetMean() - 3. * proj->GetRMS()), std::min(proj->GetXaxis()->GetXmax(), proj->GetMean() + 3. * proj->GetRMS()));
2463 bool forceLogLike = proj->GetMaximum() < 20;
2464 for (int32_t k = forceLogLike ? 2 : 0; k < 3; k++) {
2465 proj->Fit("gaus", forceLogLike || k == 2 ? "sQl" : k ? "sQww" : "sQ");
2466 TF1* fitFunc = proj->GetFunction("gaus");
2467
2468 if (k && !forceLogLike) {
2469 customGaus->SetParameters(fitFunc->GetParameter(0), fitFunc->GetParameter(1), fitFunc->GetParameter(2));
2470 proj->Fit(customGaus.get(), "sQ");
2471 fitFunc = customGaus.get();
2472 }
2473
2474 const float sigma = fabs(fitFunc->GetParameter(2));
2475 dst[0]->SetBinContent(bin, sigma);
2476 dst[1]->SetBinContent(bin, fitFunc->GetParameter(1));
2477 dst[0]->SetBinError(bin, fitFunc->GetParError(2));
2478 dst[1]->SetBinError(bin, fitFunc->GetParError(1));
2479
2480 const bool fail1 = sigma <= 0.f;
2481 const bool fail2 = fabs(proj->GetMean() - dst[1]->GetBinContent(bin)) > std::min<float>(p ? PULL_AXIS : mConfig.nativeFitResolutions ? RES_AXES_NATIVE[j] : RES_AXES[j], 3.f * proj->GetRMS());
2482 const bool fail3 = dst[0]->GetBinContent(bin) > 3.f * proj->GetRMS() || dst[0]->GetBinError(bin) > 1 || dst[1]->GetBinError(bin) > 1;
2483 const bool fail4 = fitFunc->GetParameter(0) < proj->GetMaximum() / 5.;
2484 const bool fail = fail1 || fail2 || fail3 || fail4;
2485 // if (p == 0 && ii == 4 && j == 2) DrawHisto(proj, Form("Hist_bin_%d-%d_vs_%d____%d_%d___%f-%f___%f-%f___%d.pdf", p, j, ii, bin, k, dst[0]->GetBinContent(bin), proj->GetRMS(), dst[1]->GetBinContent(bin), proj->GetMean(), (int32_t) fail), "");
2486
2487 if (!fail) {
2488 break;
2489 } else if (k >= 2) {
2490 dst[0]->SetBinContent(bin, proj->GetRMS());
2491 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2492 dst[1]->SetBinContent(bin, proj->GetMean());
2493 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2494 }
2495 }
2496 }
2497 } else {
2498 dst[0]->SetBinContent(bin, 0.f);
2499 dst[0]->SetBinError(bin, 0.f);
2500 dst[1]->SetBinContent(bin, 0.f);
2501 dst[1]->SetBinError(bin, 0.f);
2502 }
2503 }
2504 if (ii == 0) {
2505 dstIntegral = src->ProjectionX(mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j], 0, nBins + 1);
2506 uint32_t rebin = 1;
2507 while (dstIntegral->GetMaximum() < 50 && rebin < sizeof(RES_AXIS_BINS) / sizeof(RES_AXIS_BINS[0])) {
2508 dstIntegral->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2509 rebin++;
2510 }
2511 }
2512 }
2513 if (ii == 0) {
2514 if (mConfig.inputHistogramsOnly) {
2515 dstIntegral = createGarbageCollected<TH1D>();
2516 }
2517 dstIntegral->SetName(Form(p ? "IntPull%s" : "IntRes%s", VSPARAMETER_NAMES[j]));
2518 dstIntegral->SetTitle(Form(p ? "%s Pull" : "%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j]));
2519 }
2520 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2521 pad->cd();
2522 }
2523 int32_t numColor = 0;
2524 float tmpMax = -1000.;
2525 float tmpMin = 1000.;
2526
2527 for (int32_t l = 0; l < 2; l++) {
2528 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2529 TH1F* e = dst[l];
2530 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2531 continue;
2532 }
2533 if (nNewInput && k == 0 && ii != 5) {
2534 if (p == 0) {
2535 e->Scale(mConfig.nativeFitResolutions ? SCALE_NATIVE[j] : SCALE[j]);
2536 }
2537 }
2538 if (ii == 4) {
2539 e->GetXaxis()->SetRangeUser(0.2, PT_MAX);
2540 } else if (LOG_PT_MIN > 0 && ii == 5) {
2541 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2542 } else if (ii == 5) {
2543 e->GetXaxis()->SetRange(1, 0);
2544 }
2545 e->SetMinimum(-1111);
2546 e->SetMaximum(-1111);
2547
2548 if (e->GetMaximum() > tmpMax) {
2549 tmpMax = e->GetMaximum();
2550 }
2551 if (e->GetMinimum() < tmpMin) {
2552 tmpMin = e->GetMinimum();
2553 }
2554 }
2555 }
2556
2557 float tmpSpan;
2558 tmpSpan = tmpMax - tmpMin;
2559 tmpMax += tmpSpan * .02;
2560 tmpMin -= tmpSpan * .02;
2561 if (j == 2 && i < 3) {
2562 tmpMax += tmpSpan * 0.13 * ConfigNumInputs;
2563 }
2564
2565 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2566 for (int32_t l = 0; l < 2; l++) {
2567 TH1F* e = dst[l];
2568 if (!mConfig.inputHistogramsOnly && k == 0) {
2569 e->SetTitle(Form(p ? "%s Pull" : "%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j]));
2570 e->SetStats(kFALSE);
2571 if (tout) {
2572 if (l == 0) {
2573 mRes2[j][i]->SetOption("colz");
2574 mRes2[j][i]->Write();
2575 }
2576 e->Write();
2577 }
2578 } else if (GetHist(e, tin, k, nNewInput) == nullptr) {
2579 continue;
2580 }
2581 e->SetMaximum(tmpMax);
2582 e->SetMinimum(tmpMin);
2583 e->SetLineWidth(1);
2584 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2585 SetAxisSize(e);
2586 e->GetYaxis()->SetTitle(p ? AXIS_TITLES_PULL[j] : mConfig.nativeFitResolutions ? AXIS_TITLES_NATIVE[j] : AXIS_TITLES[j]);
2587 e->GetXaxis()->SetTitle(XAXIS_TITLES[i]);
2588 if (LOG_PT_MIN > 0 && ii == 5) {
2589 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2590 }
2591
2592 if (j == 0) {
2593 e->GetYaxis()->SetTitleOffset(1.5);
2594 } else if (j < 3) {
2595 e->GetYaxis()->SetTitleOffset(1.4);
2596 }
2597 if (qcout && !mConfig.shipToQCAsCanvas) {
2598 qcout->Add(e);
2599 }
2600 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2601 continue;
2602 }
2603
2604 e->SetMarkerColor(kBlack);
2605 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2606 e->Draw(k || l ? "same" : "");
2607 if (j == 0) {
2608 GetName(fname, k);
2609 leg->AddEntry(e, Form("%s%s", fname, l ? "Mean" : (p ? "Pull" : "Resolution")), "l");
2610 }
2611 }
2612 }
2613 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2614 continue;
2615 }
2616
2617 if (ii == 5) {
2618 pad->SetLogx();
2619 }
2620 can->cd();
2621 if (j == 4) {
2622 ChangePadTitleSize(pad, 0.056);
2623 }
2624 }
2625 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2626 continue;
2627 }
2628
2629 leg->Draw();
2630
2631 if (qcout) {
2632 qcout->Add(can);
2633 }
2634 if (!mConfig.enableLocalOutput) {
2635 continue;
2636 }
2637 doPerfFigure(0.2, 0.295, 0.025);
2638 can->Print(Form(p ? "%s/pull_vs_%s.pdf" : "%s/res_vs_%s.pdf", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii]));
2639 if (mConfig.writeFileExt != "") {
2640 can->Print(Form(p ? "%s/pull_vs_%s.%s" : "%s/res_vs_%s.%s", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii], mConfig.writeFileExt.c_str()));
2641 }
2642 }
2643 }
2644
2645 // Process Integral Resolution Histogreams
2646 for (int32_t p = 0; p < 2; p++) {
2647 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2648 continue;
2649 }
2650 TCanvas* can = p ? mCPull[6] : mCRes[6];
2651 for (int32_t i = 0; i < 5; i++) {
2652 TPad* pad = p ? mPPull[6][i] : mPRes[6][i];
2653 TH1D* hist = p ? pullIntegral[i] : resIntegral[i];
2654 int32_t numColor = 0;
2655 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2656 pad->cd();
2657 }
2658 if (!mConfig.inputHistogramsOnly && mcAvail) {
2659 TH1D* e = hist;
2660 if (e && e->GetEntries()) {
2661 e->Fit("gaus", "sQ");
2662 }
2663 }
2664
2665 float tmpMax = 0;
2666 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2667 TH1D* e = hist;
2668 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2669 continue;
2670 }
2671 e->SetMaximum(-1111);
2672 if (e->GetMaximum() > tmpMax) {
2673 tmpMax = e->GetMaximum();
2674 }
2675 }
2676
2677 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2678 TH1D* e = hist;
2679 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2680 continue;
2681 }
2682 e->SetMaximum(tmpMax * 1.02);
2683 e->SetMinimum(tmpMax * -0.02);
2684 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2685 e->Write();
2686 }
2687 if (qcout && !mConfig.shipToQCAsCanvas) {
2688 qcout->Add(e);
2689 }
2690 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2691 continue;
2692 }
2693
2694 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2695 e->Draw(k == 0 ? "" : "same");
2696 }
2697 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2698 continue;
2699 }
2700 can->cd();
2701 }
2702 if (qcout) {
2703 qcout->Add(can);
2704 }
2705 if (!mConfig.enableLocalOutput) {
2706 continue;
2707 }
2708
2709 can->Print(Form(p ? "%s/pull_integral.pdf" : "%s/res_integral.pdf", mConfig.plotsDir.c_str()));
2710 if (mConfig.writeFileExt != "") {
2711 can->Print(Form(p ? "%s/pull_integral.%s" : "%s/res_integral.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
2712 }
2713 }
2714 }
2715
2716 uint64_t attachClusterCounts[N_CLS_HIST];
2717 if (mQATasks & taskClusterAttach) {
2718 // Process Cluster Attachment Histograms
2719 if (mConfig.inputHistogramsOnly == 0) {
2720 for (int32_t i = N_CLS_HIST; i < N_CLS_TYPE * N_CLS_HIST - 1; i++) {
2721 mClusters[i]->Sumw2(true);
2722 }
2723 double totalVal = 0;
2724 if (!CLUST_HIST_INT_SUM) {
2725 for (int32_t j = 0; j < mClusters[N_CLS_HIST - 1]->GetXaxis()->GetNbins() + 2; j++) {
2726 totalVal += mClusters[N_CLS_HIST - 1]->GetBinContent(j);
2727 }
2728 }
2729 if (totalVal == 0.) {
2730 totalVal = 1.;
2731 }
2732 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2733 double val = 0;
2734 for (int32_t j = 0; j < mClusters[i]->GetXaxis()->GetNbins() + 2; j++) {
2735 val += mClusters[i]->GetBinContent(j);
2736 mClusters[2 * N_CLS_HIST - 1 + i]->SetBinContent(j, val / totalVal);
2737 }
2738 attachClusterCounts[i] = val;
2739 }
2740
2741 if (!CLUST_HIST_INT_SUM) {
2742 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2743 mClusters[2 * N_CLS_HIST - 1 + i]->SetMaximum(1.02);
2744 mClusters[2 * N_CLS_HIST - 1 + i]->SetMinimum(-0.02);
2745 }
2746 }
2747
2748 for (int32_t i = 0; i < N_CLS_HIST - 1; i++) {
2749 auto oldLevel = gErrorIgnoreLevel;
2750 gErrorIgnoreLevel = kError;
2751 mClusters[N_CLS_HIST + i]->Divide(mClusters[i], mClusters[N_CLS_HIST - 1], 1, 1, "B");
2752 gErrorIgnoreLevel = oldLevel;
2753 mClusters[N_CLS_HIST + i]->SetMinimum(-0.02);
2754 mClusters[N_CLS_HIST + i]->SetMaximum(1.02);
2755 }
2756 }
2757
2758 float tmpMax[2] = {0, 0}, tmpMin[2] = {0, 0};
2759 for (int32_t l = 0; l <= CLUST_HIST_INT_SUM; l++) {
2760 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2761 TH1* e = mClusters[l ? (N_CLS_TYPE * N_CLS_HIST - 2) : (N_CLS_HIST - 1)];
2762 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2763 continue;
2764 }
2765 e->SetMinimum(-1111);
2766 e->SetMaximum(-1111);
2767 if (l == 0) {
2768 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2769 }
2770 if (e->GetMaximum() > tmpMax[l]) {
2771 tmpMax[l] = e->GetMaximum();
2772 }
2773 if (e->GetMinimum() < tmpMin[l]) {
2774 tmpMin[l] = e->GetMinimum();
2775 }
2776 }
2777 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2778 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2779 TH1* e = mClusters[l ? (2 * N_CLS_HIST - 1 + i) : i];
2780 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2781 continue;
2782 }
2783 e->SetMaximum(tmpMax[l] * 1.02);
2784 e->SetMinimum(tmpMax[l] * -0.02);
2785 }
2786 }
2787 }
2788
2789 for (int32_t i = 0; i < N_CLS_TYPE; i++) {
2790 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2791 mPClust[i]->cd();
2792 mPClust[i]->SetLogx();
2793 }
2794 int32_t begin = i == 2 ? (2 * N_CLS_HIST - 1) : i == 1 ? N_CLS_HIST : 0;
2795 int32_t end = i == 2 ? (3 * N_CLS_HIST - 1) : i == 1 ? (2 * N_CLS_HIST - 1) : N_CLS_HIST;
2796 int32_t numColor = 0;
2797 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2798 for (int32_t j = end - 1; j >= begin; j--) {
2799 TH1* e = mClusters[j];
2800 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2801 continue;
2802 }
2803
2804 e->SetTitle(mConfig.plotsNoTitle ? "" : CLUSTER_TITLES[i]);
2805 e->GetYaxis()->SetTitle(i == 0 ? "Number of TPC clusters" : i == 1 ? "Fraction of TPC clusters" : CLUST_HIST_INT_SUM ? "Total TPC clusters (integrated)" : "Fraction of TPC clusters (integrated)");
2806 e->GetXaxis()->SetTitle("#it{p}_{Tmc} (GeV/#it{c})");
2807 e->GetXaxis()->SetTitleOffset(1.1);
2808 e->GetXaxis()->SetLabelOffset(-0.005);
2809 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2810 e->Write();
2811 }
2812 e->SetStats(kFALSE);
2813 e->SetLineWidth(1);
2814 e->SetLineStyle(CONFIG_DASHED_MARKERS ? j + 1 : 1);
2815 if (i == 0) {
2816 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2817 }
2818 if (qcout && !mConfig.shipToQCAsCanvas) {
2819 qcout->Add(e);
2820 }
2821 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2822 continue;
2823 }
2824
2825 e->SetMarkerColor(kBlack);
2826 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2827 e->Draw(j == end - 1 && k == 0 ? "" : "same");
2828 GetName(fname, k);
2829 mLClust[i]->AddEntry(e, Form("%s%s", fname, CLUSTER_NAMES[j - begin]), "l");
2830 }
2831 }
2832 if (ConfigNumInputs == 1) {
2833 TH1* e = reinterpret_cast<TH1F*>(mClusters[begin + CL_att_adj]->Clone());
2834 e->Add(mClusters[begin + CL_prot], -1);
2835 if (qcout && !mConfig.shipToQCAsCanvas) {
2836 qcout->Add(e);
2837 }
2838 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2839 continue;
2840 }
2841
2842 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2843 e->Draw("same");
2844 mLClust[i]->AddEntry(e, "Removed (Strategy A)", "l");
2845 }
2846 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2847 continue;
2848 }
2849
2850 mLClust[i]->Draw();
2851
2852 if (qcout) {
2853 qcout->Add(mCClust[i]);
2854 }
2855 if (!mConfig.enableLocalOutput) {
2856 continue;
2857 }
2858 doPerfFigure(i == 0 ? 0.37 : (i == 1 ? 0.34 : 0.6), 0.295, 0.030);
2859 mCClust[i]->cd();
2860 mCClust[i]->Print(Form(i == 2 ? "%s/clusters_integral.pdf" : i == 1 ? "%s/clusters_relative.pdf" : "%s/clusters.pdf", mConfig.plotsDir.c_str()));
2861 if (mConfig.writeFileExt != "") {
2862 mCClust[i]->Print(Form(i == 2 ? "%s/clusters_integral.%s" : i == 1 ? "%s/clusters_relative.%s" : "%s/clusters.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
2863 }
2864 }
2865
2866 for (int32_t i = 0; i < 4; i++) {
2867 auto* e = mPadRow[i];
2868 if (tout && !mConfig.inputHistogramsOnly) {
2869 e->Write();
2870 }
2871 mPPadRow[i]->cd();
2872 e->SetOption("colz");
2873 std::string title = "First Track Pad Row (p_{T} > 1GeV, N_{Cl} #geq " + std::to_string(PADROW_CHECK_MINCLS);
2874 if (i >= 2) {
2875 title += ", row_{trk} > row_{MC} + 3, row_{MC} < 10";
2876 }
2877 if (i >= 3) {
2878 title += ", #Phi_{Cl} < 0.15";
2879 }
2880 title += ")";
2881
2882 e->SetTitle(mConfig.plotsNoTitle ? "" : title.c_str());
2883 e->GetXaxis()->SetTitle(i == 3 ? "Local Occupancy" : (i ? "#Phi_{Cl} (sector)" : "First MC Pad Row"));
2884 e->GetYaxis()->SetTitle("First Pad Row");
2885 e->Draw();
2886 mCPadRow[i]->cd();
2887 static const constexpr char* PADROW_NAMES[4] = {"MC", "Phi", "Phi1", "Occ"};
2888 mCPadRow[i]->Print(Form("%s/padRow%s.pdf", mConfig.plotsDir.c_str(), PADROW_NAMES[i]));
2889 if (mConfig.writeFileExt != "") {
2890 mCPadRow[i]->Print(Form("%s/padRow%s.%s", mConfig.plotsDir.c_str(), PADROW_NAMES[i], mConfig.writeFileExt.c_str()));
2891 }
2892 }
2893 }
2894
2895 // Process cluster count statistics
2896 if ((mQATasks & taskClusterCounts) && !mHaveExternalHists && !mConfig.clusterRejectionHistograms && !mConfig.inputHistogramsOnly) {
2897 DoClusterCounts(attachClusterCounts);
2898 }
2899 if ((qcout || tout) && (mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
2900 for (uint32_t i = 0; i < mHistClusterCount.size(); i++) {
2901 if (tout) {
2902 mHistClusterCount[i]->Write();
2903 }
2904 if (qcout) {
2905 qcout->Add(mHistClusterCount[i]);
2906 }
2907 }
2908 }
2909
2910 if (mQATasks & taskTrackStatistics) {
2911 // Process track statistic histograms
2912 float tmpMax = 0.;
2913 for (int32_t k = 0; k < ConfigNumInputs; k++) { // TODO: Simplify this drawing, avoid copy&paste
2914 TH1F* e = mTrackPt;
2915 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2916 continue;
2917 }
2918 e->SetMaximum(-1111);
2919 if (e->GetMaximum() > tmpMax) {
2920 tmpMax = e->GetMaximum();
2921 }
2922 }
2923 mPTrackPt->cd();
2924 mPTrackPt->SetLogx();
2925 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2926 TH1F* e = mTrackPt;
2927 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2928 continue;
2929 }
2930 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2931 e->Write();
2932 }
2933 e->SetMaximum(tmpMax * 1.02);
2934 e->SetMinimum(tmpMax * -0.02);
2935 e->SetStats(kFALSE);
2936 e->SetLineWidth(1);
2937 e->SetTitle(mConfig.plotsNoTitle ? "" : "Number of Tracks vs #it{p}_{T}");
2938 e->GetYaxis()->SetTitle("Number of Tracks");
2939 e->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})");
2940 e->GetXaxis()->SetTitleOffset(1.2);
2941 if (qcout) {
2942 qcout->Add(e);
2943 }
2944 e->SetMarkerColor(kBlack);
2945 e->SetLineColor(colorNums[k % COLORCOUNT]);
2946 e->Draw(k == 0 ? "" : "same");
2947 GetName(fname, k, mConfig.inputHistogramsOnly);
2948 mLTrackPt->AddEntry(e, Form(mConfig.inputHistogramsOnly ? "%s" : "%sTrack #it{p}_{T}", fname), "l");
2949 }
2950 mLTrackPt->Draw();
2951 doPerfFigure(0.63, 0.7, 0.030);
2952 mCTrackPt->cd();
2953 mCTrackPt->Print(Form("%s/tracks.pdf", mConfig.plotsDir.c_str()));
2954 if (mConfig.writeFileExt != "") {
2955 mCTrackPt->Print(Form("%s/tracks.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
2956 }
2957
2958 for (int32_t i = 0; i < 2; i++) {
2959 tmpMax = 0.;
2960 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2961 TH1F* e = mT0[i];
2962 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2963 continue;
2964 }
2965 e->SetMaximum(-1111);
2966 if (e->GetMaximum() > tmpMax) {
2967 tmpMax = e->GetMaximum();
2968 }
2969 }
2970 mPT0[i]->cd();
2971 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2972 TH1F* e = mT0[i];
2973 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2974 continue;
2975 }
2976 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2977 e->Write();
2978 }
2979 e->SetMaximum(tmpMax * 1.02);
2980 e->SetMinimum(tmpMax * -0.02);
2981 e->SetStats(kFALSE);
2982 e->SetLineWidth(1);
2983 e->SetTitle(mConfig.plotsNoTitle ? "" : (i ? "Track t_{0} resolution" : "Track t_{0} distribution"));
2984 e->GetYaxis()->SetTitle("a.u.");
2985 e->GetXaxis()->SetTitle(i ? "t_{0} - t_{0, mc}" : "t_{0}");
2986 if (qcout) {
2987 qcout->Add(e);
2988 }
2989 e->SetMarkerColor(kBlack);
2990 e->SetLineColor(colorNums[k % COLORCOUNT]);
2991 e->Draw(k == 0 ? "" : "same");
2992 GetName(fname, k, mConfig.inputHistogramsOnly);
2993 mLT0[i]->AddEntry(e, Form(mConfig.inputHistogramsOnly ? "%s (%s)" : "%sTrack t_{0} %s", fname, i ? "" : "resolution"), "l");
2994 }
2995 mLT0[i]->Draw();
2996 doPerfFigure(0.63, 0.7, 0.030);
2997 mCT0[i]->cd();
2998 mCT0[i]->Print(Form("%s/t0%s.pdf", mConfig.plotsDir.c_str(), i ? "_res" : ""));
2999 if (mConfig.writeFileExt != "") {
3000 mCT0[i]->Print(Form("%s/t0%s.%s", mConfig.plotsDir.c_str(), i ? "_res" : "", mConfig.writeFileExt.c_str()));
3001 }
3002
3003 tmpMax = 0.;
3004 for (int32_t k = 0; k < ConfigNumInputs; k++) {
3005 TH1F* e = mNCl[i];
3006 if (GetHist(e, tin, k, nNewInput) == nullptr) {
3007 continue;
3008 }
3009 e->SetMaximum(-1111);
3010 if (e->GetMaximum() > tmpMax) {
3011 tmpMax = e->GetMaximum();
3012 }
3013 }
3014 mPNCl[i]->cd();
3015 for (int32_t k = 0; k < ConfigNumInputs; k++) {
3016 TH1F* e = mNCl[i];
3017 if (GetHist(e, tin, k, nNewInput) == nullptr) {
3018 continue;
3019 }
3020 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
3021 e->Write();
3022 }
3023 e->SetMaximum(tmpMax * 1.02);
3024 e->SetMinimum(tmpMax * -0.02);
3025 e->SetStats(kFALSE);
3026 e->SetLineWidth(1);
3027 e->SetTitle(mConfig.plotsNoTitle ? "" : (i ? "Number of Rows with attached Cluster" : "Number of Clusters"));
3028 e->GetYaxis()->SetTitle("a.u.");
3029 e->GetXaxis()->SetTitle(i ? "N_{Rows with Clusters}" : "N_{Clusters}");
3030 if (qcout) {
3031 qcout->Add(e);
3032 }
3033 e->SetMarkerColor(kBlack);
3034 e->SetLineColor(colorNums[k % COLORCOUNT]);
3035 e->Draw(k == 0 ? "" : "same");
3036 GetName(fname, k, mConfig.inputHistogramsOnly);
3037 mLNCl[i]->AddEntry(e, Form(mConfig.inputHistogramsOnly ? "%s" : (i ? "%sN_{Clusters}" : "%sN_{Rows with Clusters}"), fname), "l");
3038 }
3039 mLNCl[i]->Draw();
3040 doPerfFigure(0.6, 0.7, 0.030);
3041 mCNCl[i]->cd();
3042 mCNCl[i]->Print(Form("%s/nClusters%s.pdf", mConfig.plotsDir.c_str(), i ? "_corrected" : ""));
3043 if (mConfig.writeFileExt != "") {
3044 mCNCl[i]->Print(Form("%s/nClusters%s.%s", mConfig.plotsDir.c_str(), i ? "_corrected" : "", mConfig.writeFileExt.c_str()));
3045 }
3046 }
3047
3048 mPClXY->cd(); // TODO: This should become a separate task category
3049 mClXY->SetOption("colz");
3050 mClXY->Draw();
3051 mCClXY->cd();
3052 mCClXY->Print(Form("%s/clustersXY.pdf", mConfig.plotsDir.c_str()));
3053 if (mConfig.writeFileExt != "") {
3054 mCClXY->Print(Form("%s/clustersXY.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
3055 }
3056 }
3057
3058 if (mQATasks & taskClusterRejection) {
3059 mClRej[2]->Divide(mClRej[1], mClRej[0]);
3060
3061 for (int32_t i = 0; i < 3; i++) {
3062 if (tout && !mConfig.inputHistogramsOnly) {
3063 mClRej[i]->Write();
3064 }
3065 mPClRej[i]->cd();
3066 mClRej[i]->SetTitle(mConfig.plotsNoTitle ? "" : REJECTED_NAMES[i]);
3067 mClRej[i]->SetOption("colz");
3068 mClRej[i]->Draw();
3069 mCClRej[i]->cd();
3070 mCClRej[i]->Print(Form("%s/clustersRej%d%s.pdf", mConfig.plotsDir.c_str(), i, REJECTED_NAMES[i]));
3071 if (mConfig.writeFileExt != "") {
3072 mCClRej[i]->Print(Form("%s/clustersRej%d%s.%s", mConfig.plotsDir.c_str(), i, REJECTED_NAMES[i], mConfig.writeFileExt.c_str()));
3073 }
3074 }
3075
3076 mPClRejP->cd();
3077 for (int32_t k = 0; k < ConfigNumInputs; k++) {
3078 auto* tmp = mClRej[0];
3079 if (GetHist(tmp, tin, k, nNewInput) == nullptr) {
3080 continue;
3081 }
3082 TH1D* proj1 = tmp->ProjectionY(Form("clrejptmp1%d", k)); // TODO: Clean up names
3083 proj1->SetDirectory(nullptr);
3084 tmp = mClRej[1];
3085 if (GetHist(tmp, tin, k, nNewInput) == nullptr) {
3086 continue;
3087 }
3088 TH1D* proj2 = tmp->ProjectionY(Form("clrejptmp2%d", k));
3089 proj2->SetDirectory(nullptr);
3090
3091 auto* e = mClRejP;
3092 if (GetHist(e, tin, k, nNewInput) == nullptr) {
3093 continue;
3094 }
3095 e->Divide(proj2, proj1);
3096 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
3097 e->Write();
3098 }
3099 delete proj1;
3100 delete proj2;
3101 e->SetMinimum(-0.02);
3102 e->SetMaximum(0.22);
3103 e->SetTitle(mConfig.plotsNoTitle ? "" : "Rejected Clusters");
3104 e->GetXaxis()->SetTitle("Pad Row");
3105 e->GetYaxis()->SetTitle("Rejected Clusters (fraction)");
3106 e->Draw(k == 0 ? "" : "same");
3107 }
3108 mPClRejP->Print(Form("%s/clustersRejProjected.pdf", mConfig.plotsDir.c_str()));
3109 if (mConfig.writeFileExt != "") {
3110 mPClRejP->Print(Form("%s/clustersRejProjected.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
3111 }
3112 }
3113
3114 if (tout && !mConfig.inputHistogramsOnly && mConfig.writeMCLabels) {
3115 gInterpreter->GenerateDictionary("vector<vector<int32_t>>", "");
3116 tout->WriteObject(&mcEffBuffer, "mcEffBuffer");
3117 tout->WriteObject(&mcLabelBuffer, "mcLabelBuffer");
3118 remove("AutoDict_vector_vector_int__.cxx");
3119 remove("AutoDict_vector_vector_int___cxx_ACLiC_dict_rdict.pcm");
3120 remove("AutoDict_vector_vector_int___cxx.d");
3121 remove("AutoDict_vector_vector_int___cxx.so");
3122 }
3123
3124 if (tout) {
3125 tout->Close();
3126 }
3127 for (uint32_t i = 0; i < mConfig.compareInputs.size(); i++) {
3128 tin[i]->Close();
3129 }
3130 if (!qcout) {
3131 clearGarbagageCollector();
3132 }
3133 GPUInfo("GPU TPC QA histograms have been written to pdf%s%s files", mConfig.writeFileExt == "" ? "" : " and ", mConfig.writeFileExt.c_str());
3134 gErrorIgnoreLevel = oldRootIgnoreLevel;
3135 return (0);
3136}
3137
3138void GPUQA::PrintClusterCount(int32_t mode, int32_t& num, const char* name, uint64_t n, uint64_t normalization)
3139{
3140 if (mode == 2) {
3141 // do nothing, just count num
3142 } else if (mode == 1) {
3143 char name2[128];
3144 snprintf(name2, 128, "clusterCount%d_", num);
3145 char* ptr = name2 + strlen(name2);
3146 for (uint32_t i = 0; i < strlen(name); i++) {
3147 if ((name[i] >= 'a' && name[i] <= 'z') || (name[i] >= 'A' && name[i] <= 'Z') || (name[i] >= '0' && name[i] <= '9')) {
3148 *(ptr++) = name[i];
3149 }
3150 }
3151 *ptr = 0;
3152 createHist(mHistClusterCount[num], name2, name, 1000, 0, mConfig.histMaxNClusters, 1000, 0, 100);
3153 } else if (mode == 0) {
3154 if (normalization && mConfig.enableLocalOutput) {
3155 for (uint32_t i = 0; i < 1 + (mTextDump != nullptr); i++) {
3156 fprintf(i ? mTextDump : stdout, "\t%40s: %'12" PRIu64 " (%6.2f%%)\n", name, n, 100.f * n / normalization);
3157 }
3158 }
3159 if (mConfig.clusterRejectionHistograms) {
3160 float ratio = 100.f * n / std::max<uint64_t>(normalization, 1);
3161 mHistClusterCount[num]->Fill(normalization, ratio, 1);
3162 }
3163 }
3164 num++;
3165}
3166
3167int32_t GPUQA::DoClusterCounts(uint64_t* attachClusterCounts, int32_t mode)
3168{
3169 if (mConfig.enableLocalOutput && !mConfig.inputHistogramsOnly && mConfig.plotsDir != "") {
3170 mTextDump = fopen((mConfig.plotsDir + "/clusterCounts.txt").c_str(), "w+");
3171 }
3172 int32_t num = 0;
3173 if (mcPresent() && (mQATasks & taskClusterAttach) && attachClusterCounts) {
3174 for (int32_t i = 0; i < N_CLS_HIST; i++) { // TODO: Check that these counts are still printed correctly!
3175 PrintClusterCount(mode, num, CLUSTER_NAMES[i], attachClusterCounts[i], mClusterCounts.nTotal);
3176 }
3177 PrintClusterCount(mode, num, "Unattached", attachClusterCounts[N_CLS_HIST - 1] - attachClusterCounts[CL_att_adj], mClusterCounts.nTotal);
3178 PrintClusterCount(mode, num, "Removed (Strategy A)", attachClusterCounts[CL_att_adj] - attachClusterCounts[CL_prot], mClusterCounts.nTotal); // Attached + Adjacent (also fake) - protected
3179 PrintClusterCount(mode, num, "Unaccessible", mClusterCounts.nUnaccessible, mClusterCounts.nTotal); // No contribution from track >= 10 MeV, unattached or fake-attached/adjacent
3180 } else {
3181 PrintClusterCount(mode, num, "All Clusters", mClusterCounts.nTotal, mClusterCounts.nTotal);
3182 PrintClusterCount(mode, num, "Used in Physics", mClusterCounts.nPhysics, mClusterCounts.nTotal);
3183 PrintClusterCount(mode, num, "Protected", mClusterCounts.nProt, mClusterCounts.nTotal);
3184 PrintClusterCount(mode, num, "Unattached", mClusterCounts.nUnattached, mClusterCounts.nTotal);
3185 PrintClusterCount(mode, num, "Removed (Strategy A)", mClusterCounts.nTotal - mClusterCounts.nUnattached - mClusterCounts.nProt, mClusterCounts.nTotal);
3186 PrintClusterCount(mode, num, "Removed (Strategy B)", mClusterCounts.nTotal - mClusterCounts.nProt, mClusterCounts.nTotal);
3187 }
3188
3189 PrintClusterCount(mode, num, "Merged Loopers (Track Merging)", mClusterCounts.nMergedLooperConnected, mClusterCounts.nTotal);
3190 PrintClusterCount(mode, num, "Merged Loopers (Afterburner)", mClusterCounts.nMergedLooperUnconnected, mClusterCounts.nTotal);
3191 PrintClusterCount(mode, num, "Looping Legs (other)", mClusterCounts.nLoopers, mClusterCounts.nTotal);
3192 PrintClusterCount(mode, num, "High Inclination Angle", mClusterCounts.nHighIncl, mClusterCounts.nTotal);
3193 PrintClusterCount(mode, num, "Rejected", mClusterCounts.nRejected, mClusterCounts.nTotal);
3194 PrintClusterCount(mode, num, "Tube (> 200 MeV)", mClusterCounts.nTube, mClusterCounts.nTotal);
3195 PrintClusterCount(mode, num, "Tube (< 200 MeV)", mClusterCounts.nTube200, mClusterCounts.nTotal);
3196 PrintClusterCount(mode, num, "Low Pt < 50 MeV", mClusterCounts.nLowPt, mClusterCounts.nTotal);
3197 PrintClusterCount(mode, num, "Low Pt < 200 MeV", mClusterCounts.n200MeV, mClusterCounts.nTotal);
3198
3199 if (mcPresent() && (mQATasks & taskClusterAttach)) {
3200 PrintClusterCount(mode, num, "Tracks > 400 MeV", mClusterCounts.nAbove400, mClusterCounts.nTotal);
3201 PrintClusterCount(mode, num, "Fake Removed (> 400 MeV)", mClusterCounts.nFakeRemove400, mClusterCounts.nAbove400);
3202 PrintClusterCount(mode, num, "Full Fake Removed (> 400 MeV)", mClusterCounts.nFullFakeRemove400, mClusterCounts.nAbove400);
3203 PrintClusterCount(mode, num, "Tracks < 40 MeV", mClusterCounts.nBelow40, mClusterCounts.nTotal);
3204 PrintClusterCount(mode, num, "Fake Protect (< 40 MeV)", mClusterCounts.nFakeProtect40, mClusterCounts.nBelow40);
3205 }
3206 if (mcPresent() && (mQATasks & taskTrackStatistics)) {
3207 PrintClusterCount(mode, num, "Correctly Attached all-trk normalized", mClusterCounts.nCorrectlyAttachedNormalized, mClusterCounts.nTotal);
3208 PrintClusterCount(mode, num, "Correctly Attached non-fake normalized", mClusterCounts.nCorrectlyAttachedNormalizedNonFake, mClusterCounts.nTotal);
3209 }
3210 if (mTextDump) {
3211 fclose(mTextDump);
3212 mTextDump = nullptr;
3213 }
3214 return num;
3215}
3216
3217void* GPUQA::AllocateScratchBuffer(size_t nBytes)
3218{
3219 mTrackingScratchBuffer.resize((nBytes + sizeof(mTrackingScratchBuffer[0]) - 1) / sizeof(mTrackingScratchBuffer[0]));
3220 return mTrackingScratchBuffer.data();
3221}
std::vector< std::string > labels
A const (ready only) version of MCTruthContainer.
Helper class to access correction maps.
int16_t charge
Definition RawEventData.h:5
int16_t time
Definition RawEventData.h:4
int32_t i
#define TPC_MAX_TIME_BIN_TRIGGERED
#define TRACK_EXPECTED_REFERENCE_X_DEFAULT
Definition GPUQA.cxx:207
#define TRACK_EXPECTED_REFERENCE_X
Definition GPUQA.cxx:262
#define QA_DEBUG
Definition GPUQA.cxx:15
#define QA_TIMING
Definition GPUQA.cxx:16
int16_t Color_t
Definition GPUQA.h:33
GPUChain * chain
#define GPUCA_NSECTORS
#define GPUCA_ROW_COUNT
Definition of the MCTrack class.
Definition of the Names Generator class.
uint16_t pos
Definition RawData.h:3
uint32_t j
Definition RawData.h:0
uint32_t side
Definition RawData.h:0
uint16_t pid
Definition RawData.h:2
uint32_t c
Definition RawData.h:2
Definition of TPCFastTransform class.
TBranch * ptr
int nClusters
double num
Class for time synchronization of RawReader instances.
static constexpr ID TPC
Definition DetID.h:64
static constexpr int32_t NSECTORS
Definition GPUChain.h:58
int32_t ReadO2MCData(const char *filename)
Definition GPUQA.h:55
bool clusterRemovable(int32_t attach, bool prot) const
Definition GPUQA.h:53
~GPUQA()=default
Definition GPUQA.cxx:340
void * AllocateScratchBuffer(size_t nBytes)
Definition GPUQA.h:56
void SetMCTrackRange(int32_t min, int32_t max)
Definition GPUQA.h:48
mcLabelI_t GetMCTrackLabel(uint32_t trackId) const
Definition GPUQA.h:52
int32_t DrawQAHistograms()
Definition GPUQA.h:47
int32_t InitQA(int32_t tasks=0)
Definition GPUQA.h:45
void DumpO2MCData(const char *filename) const
Definition GPUQA.h:54
int32_t mcLabelI_t
Definition GPUQA.h:44
void RunQA(bool matchOnly=false)
Definition GPUQA.h:46
GPUQA(void *chain)
Definition GPUQA.h:42
static GPUROOTDump< T, Args... > getNew(const char *name1, Names... names)
Definition GPUROOTDump.h:64
static DigitizationContext * loadFromFile(std::string_view filename="")
GLdouble n
Definition glcorearb.h:1982
GLfloat GLfloat GLfloat alpha
Definition glcorearb.h:279
GLint GLenum GLint x
Definition glcorearb.h:403
const GLfloat * m
Definition glcorearb.h:4066
GLenum mode
Definition glcorearb.h:266
GLenum src
Definition glcorearb.h:1767
GLuint GLfloat GLfloat GLfloat GLfloat y1
Definition glcorearb.h:5034
GLsizeiptr size
Definition glcorearb.h:659
GLuint GLuint end
Definition glcorearb.h:469
const GLdouble * v
Definition glcorearb.h:832
GLuint const GLchar * name
Definition glcorearb.h:781
GLdouble f
Definition glcorearb.h:310
GLuint GLuint GLfloat weight
Definition glcorearb.h:5477
GLenum GLint * range
Definition glcorearb.h:1899
GLint y
Definition glcorearb.h:270
GLenum GLenum dst
Definition glcorearb.h:1767
GLboolean * data
Definition glcorearb.h:298
GLuint GLsizei const GLchar * label
Definition glcorearb.h:2519
GLuint GLfloat * val
Definition glcorearb.h:1582
GLboolean r
Definition glcorearb.h:1233
GLenum GLfloat param
Definition glcorearb.h:271
GLuint id
Definition glcorearb.h:650
GLdouble GLdouble GLdouble z
Definition glcorearb.h:843
float float float y2
Definition MathUtils.h:44
constexpr int LHCBCPERTIMEBIN
Definition Constants.h:38
Enum< T >::Iterator begin(Enum< T >)
Definition Defs.h:156
value_T f3
Definition TrackUtils.h:93
value_T f1
Definition TrackUtils.h:91
value_T f2
Definition TrackUtils.h:92
struct o2::upgrades_utils::@453 tracks
structure to keep trigger-related info
Defining DataPointCompositeObject explicitly as copiable.
std::string to_string(gsl::span< T, Size > span)
Definition common.h:52
std::string filename()
bool isValid(std::string alias)
int64_t differenceInBC(const InteractionRecord &other) const
std::tuple< std::vector< std::unique_ptr< TCanvas > >, std::vector< std::unique_ptr< TLegend > >, std::vector< std::unique_ptr< TPad > >, std::vector< std::unique_ptr< TLatex > >, std::vector< std::unique_ptr< TH1D > > > v
Definition GPUQA.cxx:318
IR getFirstIRofTF(const IR &rec) const
get 1st IR of TF corresponding to the 1st sampled orbit (in MC)
Definition HBFUtils.h:71
IR getFirstIR() const
Definition HBFUtils.h:47
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
unsigned int clusterOffset[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const ClusterNative * clustersLinear
constexpr size_t min
constexpr size_t max
o2::InteractionRecord ir(0, 0)
vec clear()
o2::InteractionRecord ir0(3, 5)