Project
Loading...
Searching...
No Matches
GPUQA.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
15#define QA_DEBUG 0
16#define QA_TIMING 0
17
18#include "Rtypes.h" // Include ROOT header first, to use ROOT and disable replacements
19
20#include "TH1F.h"
21#include "TH2F.h"
22#include "TH1D.h"
23#include "TGraphAsymmErrors.h"
24#include "TCanvas.h"
25#include "TPad.h"
26#include "TLegend.h"
27#include "TColor.h"
28#include "TPaveText.h"
29#include "TF1.h"
30#include "TFile.h"
31#include "TTree.h"
32#include "TStyle.h"
33#include "TLatex.h"
34#include "TObjArray.h"
35#include <sys/stat.h>
36
37#include "GPUQA.h"
38#include "GPUTPCDef.h"
39#include "GPUTPCTrackingData.h"
40#include "GPUChainTracking.h"
41#include "GPUChainTrackingGetters.inc"
42#include "GPUTPCTrack.h"
43#include "GPUTPCTracker.h"
44#include "GPUTPCGMMergedTrack.h"
45#include "GPUTPCGMPropagator.h"
47#include "GPUTPCMCInfo.h"
48#include "GPUO2DataTypes.h"
49#include "GPUParam.inc"
51#include "GPUTPCConvertImpl.h"
52#include "TPCFastTransformPOD.h"
53#include "GPUROOTDump.h"
56#include "GPUSettings.h"
57#include "GPUDefMacros.h"
58#ifndef GPUCA_STANDALONE
68#include "TPDGCode.h"
69#include "TParticlePDG.h"
70#include "TDatabasePDG.h"
71#endif
72#include "GPUQAHelper.h"
73#include <algorithm>
74#include <cstdio>
75#include <cinttypes>
76#include <fstream>
77
78#include "utils/timer.h"
79
80#include <oneapi/tbb.h>
81
82using namespace o2::gpu;
83
84namespace o2::gpu
85{
87 bool unattached = false;
88 float qpt = 0.f;
89 bool lowPt = false;
90 bool mev200 = false;
93 int32_t id = 0;
94 bool physics = false, protect = false;
95};
96} // namespace o2::gpu
97
98template <bool COUNT, class T>
99inline checkClusterStateResult GPUQA::checkClusterState(uint32_t attach, T* counts) const
100{
102 r.unattached = attach == 0;
104 if (!r.unattached && !(attach & gputpcgmmergertypes::attachProtect)) {
105 r.qpt = fabsf(mTracking->mIOPtrs.mergedTracks[r.id].GetParam().GetQPt());
106 r.lowPt = r.qpt * mTracking->GetParam().qptB5Scaler > mTracking->GetParam().rec.tpc.rejectQPtB5;
107 r.mev200 = r.qpt > 5;
108 r.mergedLooperUnconnected = mTracking->mIOPtrs.mergedTracks[r.id].MergedLooperUnconnected();
109 r.mergedLooperConnected = mTracking->mIOPtrs.mergedTracks[r.id].MergedLooperConnected();
110 }
111 if (r.mev200) {
112 if constexpr (COUNT) {
113 counts->n200MeV++;
114 }
115 }
116 if (r.lowPt) {
117 if constexpr (COUNT) {
118 counts->nLowPt++;
119 }
120 } else if (r.mergedLooperUnconnected) {
121 if constexpr (COUNT) {
122 counts->nMergedLooperUnconnected++;
123 }
124 } else if (r.mergedLooperConnected) {
125 if constexpr (COUNT) {
126 counts->nMergedLooperConnected++;
127 }
128 } else if (attach) {
129 r.protect = !GPUTPCClusterRejection::GetRejectionStatus<COUNT>(attach, r.physics, counts, &r.mev200) && ((attach & gputpcgmmergertypes::attachProtect) || !GPUTPCClusterRejection::IsTrackRejected(mTracking->mIOPtrs.mergedTracks[r.id], mTracking->GetParam()));
130 }
131 return r;
132}
133
134static const GPUSettingsQA& GPUQA_GetConfig(GPUChainTracking* chain)
135{
136 static GPUSettingsQA defaultConfig;
137 if (chain && chain->mConfigQA) {
138 return *chain->mConfigQA;
139 } else {
140 return defaultConfig;
141 }
142}
143
144static const constexpr float LOG_PT_MIN = -1.;
145
146static constexpr float Y_MAX = 40;
147static constexpr float Z_MAX = 100;
148static constexpr float PT_MIN = 0.01; // TODO: Take from Param
149static constexpr float PT_MIN_PRIM = 0.1;
150static constexpr float PT_MIN_CLUST = 0.01;
151static constexpr float PT_MAX = 20;
152static constexpr float ETA_MAX = 1.5;
153static constexpr float ETA_MAX2 = 0.9;
154static constexpr int32_t PADROW_CHECK_MINCLS = 50;
155
156static constexpr bool CLUST_HIST_INT_SUM = false;
157
158static constexpr const int32_t COLORCOUNT = 12;
159
160static const constexpr char* EFF_TYPES[6] = {"Rec", "Clone", "Fake", "All", "RecAndClone", "MC"};
161static const constexpr char* FINDABLE_NAMES[2] = {"All", "Findable"};
162static const constexpr char* PRIM_NAMES[2] = {"Prim", "Sec"};
163static const constexpr char* PARAMETER_NAMES[5] = {"Y", "Z", "#Phi", "#lambda", "Relative #it{p}_{T}"};
164static const constexpr char* PARAMETER_NAMES_NATIVE[5] = {"Y", "Z", "sin(#Phi)", "tan(#lambda)", "q/#it{p}_{T} (curvature)"};
165static const constexpr char* VSPARAMETER_NAMES[6] = {"Y", "Z", "Phi", "Eta", "Pt", "Pt_log"};
166static const constexpr char* EFF_NAMES[3] = {"Efficiency", "Clone Rate", "Fake Rate"};
167static const constexpr char* EFFICIENCY_TITLES[4] = {"Efficiency (Primary Tracks, Findable)", "Efficiency (Secondary Tracks, Findable)", "Efficiency (Primary Tracks)", "Efficiency (Secondary Tracks)"};
168static const constexpr double SCALE[5] = {10., 10., 1000., 1000., 100.};
169static const constexpr double SCALE_NATIVE[5] = {10., 10., 1000., 1000., 1.};
170static const constexpr char* XAXIS_TITLES[5] = {"#it{y}_{mc} (cm)", "#it{z}_{mc} (cm)", "#Phi_{mc} (rad)", "#eta_{mc}", "#it{p}_{Tmc} (GeV/#it{c})"};
171static const constexpr char* AXIS_TITLES[5] = {"#it{y}-#it{y}_{mc} (mm) (Resolution)", "#it{z}-#it{z}_{mc} (mm) (Resolution)", "#phi-#phi_{mc} (mrad) (Resolution)", "#lambda-#lambda_{mc} (mrad) (Resolution)", "(#it{p}_{T} - #it{p}_{Tmc}) / #it{p}_{Tmc} (%) (Resolution)"};
172static const constexpr char* AXIS_TITLES_NATIVE[5] = {"#it{y}-#it{y}_{mc} (mm) (Resolution)", "#it{z}-#it{z}_{mc} (mm) (Resolution)", "sin(#phi)-sin(#phi_{mc}) (Resolution)", "tan(#lambda)-tan(#lambda_{mc}) (Resolution)", "q*(q/#it{p}_{T} - q/#it{p}_{Tmc}) (Resolution)"};
173static const constexpr char* AXIS_TITLES_PULL[5] = {"#it{y}-#it{y}_{mc}/#sigma_{y} (Pull)", "#it{z}-#it{z}_{mc}/#sigma_{z} (Pull)", "sin(#phi)-sin(#phi_{mc})/#sigma_{sin(#phi)} (Pull)", "tan(#lambda)-tan(#lambda_{mc})/#sigma_{tan(#lambda)} (Pull)",
174 "q*(q/#it{p}_{T} - q/#it{p}_{Tmc})/#sigma_{q/#it{p}_{T}} (Pull)"};
175static const constexpr char* CLUSTER_NAMES[GPUQA::N_CLS_HIST] = {"Correctly attached clusters", "Fake attached clusters", "Attached + adjacent clusters", "Fake adjacent clusters", "Clusters of reconstructed tracks", "Used in Physics", "Protected", "All clusters"};
176static const constexpr char* CLUSTER_TITLES[GPUQA::N_CLS_TYPE] = {"Clusters Pt Distribution / Attachment", "Clusters Pt Distribution / Attachment (relative to all clusters)", "Clusters Pt Distribution / Attachment (integrated)"};
177static const constexpr char* CLUSTER_NAMES_SHORT[GPUQA::N_CLS_HIST] = {"Attached", "Fake", "AttachAdjacent", "FakeAdjacent", "FoundTracks", "Physics", "Protected", "All"};
178static const constexpr char* CLUSTER_TYPES[GPUQA::N_CLS_TYPE] = {"", "Ratio", "Integral"};
179static const constexpr char* REJECTED_NAMES[3] = {"All", "Rejected", "Fraction"};
180static const constexpr int32_t COLORS_HEX[COLORCOUNT] = {0xB03030, 0x00A000, 0x0000C0, 0x9400D3, 0x19BBBF, 0xF25900, 0x7F7F7F, 0xFFD700, 0x07F707, 0x07F7F7, 0xF08080, 0x000000};
181
182static const constexpr int32_t CONFIG_DASHED_MARKERS = 0;
183
184static const constexpr float AXES_MIN[5] = {-Y_MAX, -Z_MAX, 0.f, -ETA_MAX, PT_MIN};
185static const constexpr float AXES_MAX[5] = {Y_MAX, Z_MAX, 2.f * M_PI, ETA_MAX, PT_MAX};
186static const constexpr int32_t AXIS_BINS[5] = {51, 51, 144, 31, 50};
187static const constexpr int32_t RES_AXIS_BINS[] = {1017, 113}; // Consecutive bin sizes, histograms are binned down until the maximum entry is 50, each bin size should evenly divide its predecessor.
188static const constexpr float RES_AXES[5] = {1., 1., 0.03, 0.03, 1.0};
189static const constexpr float RES_AXES_NATIVE[5] = {1., 1., 0.1, 0.1, 5.0};
190static const constexpr float PULL_AXIS = 10.f;
191
192std::vector<TColor*> GPUQA::mColors;
193int32_t GPUQA::initColors()
194{
195 mColors.reserve(COLORCOUNT);
196 for (int32_t i = 0; i < COLORCOUNT; i++) {
197 float f1 = (float)((COLORS_HEX[i] >> 16) & 0xFF) / (float)0xFF;
198 float f2 = (float)((COLORS_HEX[i] >> 8) & 0xFF) / (float)0xFF;
199 float f3 = (float)((COLORS_HEX[i] >> 0) & 0xFF) / (float)0xFF;
200 mColors.emplace_back(new TColor(10000 + i, f1, f2, f3));
201 }
202 return 0;
203}
204static constexpr Color_t defaultColorNums[COLORCOUNT] = {kRed, kBlue, kGreen, kMagenta, kOrange, kAzure, kBlack, kYellow, kGray, kTeal, kSpring, kPink};
205
206#define TRACK_EXPECTED_REFERENCE_X_DEFAULT 81
207#ifndef GPUCA_RUN2 // Run 3 implementation
208static inline int32_t GPUQA_O2_ConvertFakeLabel(int32_t label) { return label >= 0x7FFFFFFE ? -1 : label; }
209inline uint32_t GPUQA::GetNMCCollissions() const { return mMCInfosCol.size(); }
210inline uint32_t GPUQA::GetNMCTracks(int32_t iCol) const { return mMCInfosCol[iCol].num; }
211inline uint32_t GPUQA::GetNMCTracks(const mcLabelI_t& label) const { return mMCInfosCol[mMCEventOffset[label.getSourceID()] + label.getEventID()].num; }
212inline uint32_t GPUQA::GetNMCLabels() const { return mClNative->clustersMCTruth ? mClNative->clustersMCTruth->getIndexedSize() : 0; }
213inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) { return mMCInfos[mMCInfosCol[iCol].first + iTrk]; }
214inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabel_t& label) { return mMCInfos[mMCInfosCol[mMCEventOffset[label.getSourceID()] + label.getEventID()].first + label.getTrackID()]; }
215inline GPUQA::mcLabels_t GPUQA::GetMCLabel(uint32_t i) { return mClNative->clustersMCTruth->getLabels(i); }
216inline int32_t GPUQA::GetMCLabelNID(const mcLabels_t& label) { return label.size(); }
217inline int32_t GPUQA::GetMCLabelNID(uint32_t i) { return mClNative->clustersMCTruth->getLabels(i).size(); }
218inline GPUQA::mcLabel_t GPUQA::GetMCLabel(uint32_t i, uint32_t j) { return mClNative->clustersMCTruth->getLabels(i)[j]; }
219inline int32_t GPUQA::GetMCLabelID(uint32_t i, uint32_t j) { return GPUQA_O2_ConvertFakeLabel(mClNative->clustersMCTruth->getLabels(i)[j].getTrackID()); }
220inline int32_t GPUQA::GetMCLabelID(const mcLabels_t& label, uint32_t j) { return GPUQA_O2_ConvertFakeLabel(label[j].getTrackID()); }
221inline int32_t GPUQA::GetMCLabelID(const mcLabel_t& label) { return GPUQA_O2_ConvertFakeLabel(label.getTrackID()); }
222inline uint32_t GPUQA::GetMCLabelCol(uint32_t i, uint32_t j) { return mMCEventOffset[mClNative->clustersMCTruth->getLabels(i)[j].getSourceID()] + mClNative->clustersMCTruth->getLabels(i)[j].getEventID(); }
223inline const auto& GPUQA::GetClusterLabels() { return mClNative->clustersMCTruth; }
224inline float GPUQA::GetMCLabelWeight(uint32_t i, uint32_t j) { return 1; }
225inline float GPUQA::GetMCLabelWeight(const mcLabels_t& label, uint32_t j) { return 1; }
226inline float GPUQA::GetMCLabelWeight(const mcLabel_t& label) { return 1; }
227inline bool GPUQA::mcPresent() { return !mConfig.noMC && mTracking && mClNative && mClNative->clustersMCTruth && mMCInfos.size(); }
228uint32_t GPUQA::GetMCLabelCol(const mcLabel_t& label) const { return !label.isValid() ? 0 : (mMCEventOffset[label.getSourceID()] + label.getEventID()); }
229GPUQA::mcLabelI_t GPUQA::GetMCTrackLabel(uint32_t trackId) const { return trackId >= mTrackMCLabels.size() ? MCCompLabel() : mTrackMCLabels[trackId]; }
230bool GPUQA::CompareIgnoreFake(const mcLabelI_t& l1, const mcLabelI_t& l2) { return l1.compare(l2) >= 0; }
231#define TRACK_EXPECTED_REFERENCE_X 78
232#else // Run 2 implementation
233inline GPUQA::mcLabelI_t::mcLabelI_t(const GPUQA::mcLabel_t& l) : track(l.fMCID) {}
234inline bool GPUQA::mcLabelI_t::operator==(const GPUQA::mcLabel_t& l) { return AbsLabelID(track) == l.fMCID; }
235inline uint32_t GPUQA::GetNMCCollissions() const { return 1; }
236inline uint32_t GPUQA::GetNMCTracks(int32_t iCol) const { return mTracking->mIOPtrs.nMCInfosTPC; }
237inline uint32_t GPUQA::GetNMCTracks(const mcLabelI_t& label) const { return mTracking->mIOPtrs.nMCInfosTPC; }
238inline uint32_t GPUQA::GetNMCLabels() const { return mTracking->mIOPtrs.nMCLabelsTPC; }
239inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) { return mTracking->mIOPtrs.mcInfosTPC[AbsLabelID(iTrk)]; }
240inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabel_t& label) { return GetMCTrack(label.fMCID, 0); }
241inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(const mcLabelI_t& label) { return GetMCTrack(label.track, 0); }
242inline const GPUQA::mcLabels_t& GPUQA::GetMCLabel(uint32_t i) { return mTracking->mIOPtrs.mcLabelsTPC[i]; }
243inline const GPUQA::mcLabel_t& GPUQA::GetMCLabel(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j]; }
244inline int32_t GPUQA::GetMCLabelNID(const mcLabels_t& label) { return 3; }
245inline int32_t GPUQA::GetMCLabelNID(uint32_t i) { return 3; }
246inline int32_t GPUQA::GetMCLabelID(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j].fMCID; }
247inline int32_t GPUQA::GetMCLabelID(const mcLabels_t& label, uint32_t j) { return label.fClusterID[j].fMCID; }
248inline int32_t GPUQA::GetMCLabelID(const mcLabel_t& label) { return label.fMCID; }
249inline uint32_t GPUQA::GetMCLabelCol(uint32_t i, uint32_t j) { return 0; }
250inline const auto& GPUQA::GetClusterLabels() { return mTracking->mIOPtrs.mcLabelsTPC; }
251inline float GPUQA::GetMCLabelWeight(uint32_t i, uint32_t j) { return mTracking->mIOPtrs.mcLabelsTPC[i].fClusterID[j].fWeight; }
252inline float GPUQA::GetMCLabelWeight(const mcLabels_t& label, uint32_t j) { return label.fClusterID[j].fWeight; }
253inline float GPUQA::GetMCLabelWeight(const mcLabel_t& label) { return label.fWeight; }
254inline int32_t GPUQA::FakeLabelID(int32_t id) { return id < 0 ? id : (-2 - id); }
255inline int32_t GPUQA::AbsLabelID(int32_t id) { return id >= 0 ? id : (-id - 2); }
256inline bool GPUQA::mcPresent() { return !mConfig.noMC && mTracking && GetNMCLabels() && GetNMCTracks(0); }
257uint32_t GPUQA::GetMCLabelCol(const mcLabel_t& label) const { return 0; }
258GPUQA::mcLabelI_t GPUQA::GetMCTrackLabel(uint32_t trackId) const { return trackId >= mTrackMCLabels.size() ? mcLabelI_t() : mTrackMCLabels[trackId]; }
259bool GPUQA::CompareIgnoreFake(const mcLabelI_t& l1, const mcLabelI_t& l2) { return AbsLabelID(l1.track) == AbsLabelID(l2.track); }
260#define TRACK_EXPECTED_REFERENCE_X TRACK_EXPECTED_REFERENCE_X_DEFAULT
261#endif
262template <class T>
263inline auto& GPUQA::GetMCTrackObj(T& obj, const GPUQA::mcLabelI_t& l)
264{
265 return obj[mMCEventOffset[l.getSourceID()] + l.getEventID()][l.getTrackID()];
266}
267
268template <>
269auto GPUQA::getHistArray<TH1F>()
270{
271 return std::make_pair(mHist1D, &mHist1D_pos);
272}
273template <>
274auto GPUQA::getHistArray<TH2F>()
275{
276 return std::make_pair(mHist2D, &mHist2D_pos);
277}
278template <>
279auto GPUQA::getHistArray<TH1D>()
280{
281 return std::make_pair(mHist1Dd, &mHist1Dd_pos);
282}
283template <>
284auto GPUQA::getHistArray<TGraphAsymmErrors>()
285{
286 return std::make_pair(mHistGraph, &mHistGraph_pos);
287}
288template <class T, typename... Args>
289void GPUQA::createHist(T*& h, const char* name, Args... args)
290{
291 const auto& p = getHistArray<T>();
292 if (mHaveExternalHists) {
293 if (p.first->size() <= p.second->size()) {
294 GPUError("Array sizes mismatch: Histograms %lu <= Positions %lu", p.first->size(), p.second->size());
295 throw std::runtime_error("Incoming histogram array incomplete");
296 }
297 if (strcmp((*p.first)[p.second->size()].GetName(), name)) {
298 GPUError("Histogram name mismatch: in array %s, trying to create %s", (*p.first)[p.second->size()].GetName(), name);
299 throw std::runtime_error("Incoming histogram has incorrect name");
300 }
301 } else {
302 if constexpr (std::is_same_v<T, TGraphAsymmErrors>) {
303 p.first->emplace_back();
304 p.first->back().SetName(name);
305 } else {
306 p.first->emplace_back(name, args...);
307 }
308 }
309 h = &((*p.first)[p.second->size()]);
310 p.second->emplace_back(&h);
311}
312
313namespace o2::gpu::internal
314{
316 std::tuple<std::vector<std::unique_ptr<TCanvas>>, std::vector<std::unique_ptr<TLegend>>, std::vector<std::unique_ptr<TPad>>, std::vector<std::unique_ptr<TLatex>>, std::vector<std::unique_ptr<TH1D>>> v;
317};
318} // namespace o2::gpu::internal
319
320template <class T, typename... Args>
321T* GPUQA::createGarbageCollected(Args... args)
322{
323 auto& v = std::get<std::vector<std::unique_ptr<T>>>(mGarbageCollector->v);
324 v.emplace_back(std::make_unique<T>(args...));
325 return v.back().get();
326}
327void GPUQA::clearGarbagageCollector()
328{
329 std::get<std::vector<std::unique_ptr<TPad>>>(mGarbageCollector->v).clear(); // Make sure to delete TPad first due to ROOT ownership (std::tuple has no defined order in its destructor)
330 std::apply([](auto&&... args) { ((args.clear()), ...); }, mGarbageCollector->v);
331}
332
333GPUQA::GPUQA(GPUChainTracking* chain, const GPUSettingsQA* config, const GPUParam* param) : mTracking(chain), mConfig(config ? *config : GPUQA_GetConfig(chain)), mParam(param ? param : &chain->GetParam()), mGarbageCollector(std::make_unique<internal::GPUQAGarbageCollection>())
334{
335 mMCEventOffset.resize(1, 0);
336}
337
339{
340 if (mQAInitialized && !mHaveExternalHists) {
341 delete mHist1D;
342 delete mHist2D;
343 delete mHist1Dd;
344 delete mHistGraph;
345 }
346 clearGarbagageCollector(); // Needed to guarantee correct order for ROOT ownership
347}
348
349bool GPUQA::clusterRemovable(int32_t attach, bool prot) const
350{
351 const auto& r = checkClusterState<false>(attach);
352 if (prot) {
353 return r.protect || r.physics;
354 }
355 return (!r.unattached && !r.physics && !r.protect);
356}
357
358template <class T>
359void GPUQA::SetAxisSize(T* e)
360{
361 e->GetYaxis()->SetTitleOffset(1.0);
362 e->GetYaxis()->SetTitleSize(0.045);
363 e->GetYaxis()->SetLabelSize(0.045);
364 e->GetXaxis()->SetTitleOffset(1.03);
365 e->GetXaxis()->SetTitleSize(0.045);
366 e->GetXaxis()->SetLabelOffset(-0.005);
367 e->GetXaxis()->SetLabelSize(0.045);
368}
369
370void GPUQA::SetLegend(TLegend* l, bool bigText)
371{
372 l->SetTextFont(72);
373 l->SetTextSize(bigText ? 0.03 : 0.016);
374 l->SetFillColor(0);
375}
376
377double* GPUQA::CreateLogAxis(int32_t nbins, float xmin, float xmax)
378{
379 float logxmin = std::log10(xmin);
380 float logxmax = std::log10(xmax);
381 float binwidth = (logxmax - logxmin) / nbins;
382
383 double* xbins = new double[nbins + 1];
384
385 xbins[0] = xmin;
386 for (int32_t i = 1; i <= nbins; i++) {
387 xbins[i] = std::pow(10, logxmin + i * binwidth);
388 }
389 return xbins;
390}
391
392void GPUQA::ChangePadTitleSize(TPad* p, float size)
393{
394 p->Update();
395 TPaveText* pt = (TPaveText*)(p->GetPrimitive("title"));
396 if (pt == nullptr) {
397 GPUError("Error changing title");
398 } else {
399 pt->SetTextSize(size);
400 p->Modified();
401 }
402}
403
404void GPUQA::DrawHisto(TH1* histo, char* filename, char* options)
405{
406 TCanvas tmp;
407 tmp.cd();
408 histo->Draw(options);
409 tmp.Print(filename);
410}
411
412void GPUQA::doPerfFigure(float x, float y, float size)
413{
414 if (mConfig.perfFigure == "") {
415 return;
416 }
417 static constexpr const char* str_perf_figure_1 = "ALICE Performance";
418 static constexpr const char* str_perf_figure_2_mc = "MC, Pb#minusPb, #sqrt{s_{NN}} = 5.36 TeV";
419 static constexpr const char* str_perf_figure_2_data = "Pb#minusPb, #sqrt{s_{NN}} = 5.36 TeV";
420 const char* str_perf_figure_2 = (mConfig.perfFigure == "mc" || mConfig.perfFigure == "MC") ? str_perf_figure_2_mc : (mConfig.perfFigure == "data" ? str_perf_figure_2_data : mConfig.perfFigure.c_str());
421
422 TLatex* t = createGarbageCollected<TLatex>(); // TODO: We could perhaps put everything in a legend, to get a white background if there is a grid
423 t->SetNDC(kTRUE);
424 t->SetTextColor(1);
425 t->SetTextSize(size);
426 t->DrawLatex(x, y, str_perf_figure_1);
427 t->SetTextSize(size * 0.8);
428 t->DrawLatex(x, y - 0.01 - size, str_perf_figure_2);
429}
430
431void GPUQA::SetMCTrackRange(int32_t min, int32_t max)
432{
433 mMCTrackMin = min;
434 mMCTrackMax = max;
435}
436
437int32_t GPUQA::InitQACreateHistograms()
438{
439 char name[2048], fname[1024];
440 if (mQATasks & taskTrackingEff) {
441 // Create Efficiency Histograms
442 for (int32_t i = 0; i < 6; i++) {
443 for (int32_t j = 0; j < 2; j++) {
444 for (int32_t k = 0; k < 2; k++) {
445 for (int32_t l = 0; l < 5; l++) {
446 snprintf(name, 2048, "%s%s%s%sVs%s", "tracks", EFF_TYPES[i], FINDABLE_NAMES[j], PRIM_NAMES[k], VSPARAMETER_NAMES[l]);
447 if (l == 4) {
448 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], k == 0 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
449 createHist(mEff[i][j][k][l], name, name, AXIS_BINS[l], binsPt.get());
450 } else {
451 createHist(mEff[i][j][k][l], name, name, AXIS_BINS[l], AXES_MIN[l], AXES_MAX[l]);
452 }
453 if (!mHaveExternalHists) {
454 mEff[i][j][k][l]->Sumw2();
455 }
456 strcat(name, "_eff");
457 if (i < 4) {
458 createHist(mEffResult[i][j][k][l], name);
459 }
460 }
461 }
462 }
463 }
464 }
465
466 // Create Resolution Histograms
467 if (mQATasks & taskTrackingRes) {
468 for (int32_t i = 0; i < 5; i++) {
469 for (int32_t j = 0; j < 5; j++) {
470 snprintf(name, 2048, "rms_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
471 snprintf(fname, 1024, "mean_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
472 if (j == 4) {
473 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
474 createHist(mRes[i][j][0], name, name, AXIS_BINS[j], binsPt.get());
475 createHist(mRes[i][j][1], fname, fname, AXIS_BINS[j], binsPt.get());
476 } else {
477 createHist(mRes[i][j][0], name, name, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
478 createHist(mRes[i][j][1], fname, fname, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
479 }
480 snprintf(name, 2048, "res_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
481 const float* axis = mConfig.nativeFitResolutions ? RES_AXES_NATIVE : RES_AXES;
482 const int32_t nbins = i == 4 && mConfig.nativeFitResolutions ? (10 * RES_AXIS_BINS[0]) : RES_AXIS_BINS[0];
483 if (j == 4) {
484 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
485 createHist(mRes2[i][j], name, name, nbins, -axis[i], axis[i], AXIS_BINS[j], binsPt.get());
486 } else {
487 createHist(mRes2[i][j], name, name, nbins, -axis[i], axis[i], AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
488 }
489 }
490 }
491 }
492
493 // Create Pull Histograms
494 if (mQATasks & taskTrackingResPull) {
495 for (int32_t i = 0; i < 5; i++) {
496 for (int32_t j = 0; j < 5; j++) {
497 snprintf(name, 2048, "pull_rms_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
498 snprintf(fname, 1024, "pull_mean_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
499 if (j == 4) {
500 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
501 createHist(mPull[i][j][0], name, name, AXIS_BINS[j], binsPt.get());
502 createHist(mPull[i][j][1], fname, fname, AXIS_BINS[j], binsPt.get());
503 } else {
504 createHist(mPull[i][j][0], name, name, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
505 createHist(mPull[i][j][1], fname, fname, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
506 }
507 snprintf(name, 2048, "pull_%s_vs_%s", VSPARAMETER_NAMES[i], VSPARAMETER_NAMES[j]);
508 if (j == 4) {
509 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
510 createHist(mPull2[i][j], name, name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[j], binsPt.get());
511 } else {
512 createHist(mPull2[i][j], name, name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[j], AXES_MIN[j], AXES_MAX[j]);
513 }
514 }
515 }
516 }
517
518 // Create Cluster Histograms
519 if (mQATasks & taskClusterAttach) {
520 for (int32_t i = 0; i < N_CLS_TYPE * N_CLS_HIST - 1; i++) {
521 int32_t ioffset = i >= (2 * N_CLS_HIST - 1) ? (2 * N_CLS_HIST - 1) : i >= N_CLS_HIST ? N_CLS_HIST : 0;
522 int32_t itype = i >= (2 * N_CLS_HIST - 1) ? 2 : i >= N_CLS_HIST ? 1 : 0;
523 snprintf(name, 2048, "clusters%s%s", CLUSTER_NAMES_SHORT[i - ioffset], CLUSTER_TYPES[itype]);
524 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
525 createHist(mClusters[i], name, name, AXIS_BINS[4], binsPt.get());
526 }
527
528 createHist(mPadRow[0], "padrow0", "padrow0", GPUTPCGeometry::NROWS - PADROW_CHECK_MINCLS, 0, GPUTPCGeometry::NROWS - 1 - PADROW_CHECK_MINCLS, GPUTPCGeometry::NROWS - PADROW_CHECK_MINCLS, 0, GPUTPCGeometry::NROWS - 1 - PADROW_CHECK_MINCLS);
529 createHist(mPadRow[1], "padrow1", "padrow1", 100.f, -0.2f, 0.2f, GPUTPCGeometry::NROWS - PADROW_CHECK_MINCLS, 0, GPUTPCGeometry::NROWS - 1 - PADROW_CHECK_MINCLS);
530 createHist(mPadRow[2], "padrow2", "padrow2", 100.f, -0.2f, 0.2f, GPUTPCGeometry::NROWS - PADROW_CHECK_MINCLS, 0, GPUTPCGeometry::NROWS - 1 - PADROW_CHECK_MINCLS);
531 createHist(mPadRow[3], "padrow3", "padrow3", 100.f, 0, 300000, GPUTPCGeometry::NROWS - PADROW_CHECK_MINCLS, 0, GPUTPCGeometry::NROWS - 1 - PADROW_CHECK_MINCLS);
532 }
533
534 if (mQATasks & taskTrackStatistics) {
535 // Create Tracks Histograms
536 for (int32_t i = 0; i < 2; i++) {
537 snprintf(name, 2048, i ? "nrows_with_cluster" : "nclusters");
538 createHist(mNCl[i], name, name, GPUTPCGeometry::NROWS + 1, 0, GPUTPCGeometry::NROWS);
539 }
540 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
541 createHist(mTrackPt, "tracks_pt", "tracks_pt", AXIS_BINS[4], binsPt.get());
542 const uint32_t maxTime = (mTracking && mTracking->GetParam().continuousMaxTimeBin > 0) ? mTracking->GetParam().continuousMaxTimeBin : constants::TPC_MAX_TIME_BIN_TRIGGERED;
543 createHist(mT0[0], "tracks_t0", "tracks_t0", (maxTime + 1) / 10, 0, maxTime);
544 createHist(mT0[1], "tracks_t0_res", "tracks_t0_res", 1000, -100, 100);
545 createHist(mClXY, "clXY", "clXY", 1000, -250, 250, 1000, -250, 250); // TODO: Pass name only once
546 }
547 if (mQATasks & taskClusterRejection) {
548 const int padCount = GPUTPCGeometry::NPads(GPUTPCGeometry::NROWS - 1);
549 for (int32_t i = 0; i < 3; i++) {
550 snprintf(name, 2048, "clrej_%d", i);
551 createHist(mClRej[i], name, name, 2 * padCount, -padCount / 2 + 0.5f, padCount / 2 - 0.5f, GPUTPCGeometry::NROWS, 0, GPUTPCGeometry::NROWS - 1);
552 }
553 createHist(mClRejP, "clrejp", "clrejp", GPUTPCGeometry::NROWS, 0, GPUTPCGeometry::NROWS - 1);
554 }
555
556 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
557 int32_t num = DoClusterCounts(nullptr, 2);
558 mHistClusterCount.resize(num);
559 DoClusterCounts(nullptr, 1);
560 }
561
562 for (uint32_t i = 0; i < mHist1D->size(); i++) {
563 *mHist1D_pos[i] = &(*mHist1D)[i];
564 }
565 for (uint32_t i = 0; i < mHist2D->size(); i++) {
566 *mHist2D_pos[i] = &(*mHist2D)[i];
567 }
568 for (uint32_t i = 0; i < mHist1Dd->size(); i++) {
569 *mHist1Dd_pos[i] = &(*mHist1Dd)[i];
570 }
571 for (uint32_t i = 0; i < mHistGraph->size(); i++) {
572 *mHistGraph_pos[i] = &(*mHistGraph)[i];
573 }
574
575 return 0;
576}
577
578int32_t GPUQA::loadHistograms(std::vector<TH1F>& i1, std::vector<TH2F>& i2, std::vector<TH1D>& i3, std::vector<TGraphAsymmErrors>& i4, int32_t tasks)
579{
580 if (tasks == tasksAutomatic) {
582 }
583 if (mQAInitialized && (!mHaveExternalHists || tasks != mQATasks)) {
584 throw std::runtime_error("QA not initialized or initialized with different task array");
585 }
586 mHist1D = &i1;
587 mHist2D = &i2;
588 mHist1Dd = &i3;
589 mHistGraph = &i4;
590 mHist1D_pos.clear();
591 mHist2D_pos.clear();
592 mHist1Dd_pos.clear();
593 mHistGraph_pos.clear();
594 mHaveExternalHists = true;
595 if (mConfig.noMC) {
596 tasks &= tasksAllNoQC;
597 }
598 mQATasks = tasks;
599 if (InitQACreateHistograms()) {
600 return 1;
601 }
602 mQAInitialized = true;
603 return 0;
604}
605
606void GPUQA::DumpO2MCData(const char* filename) const
607{
608 FILE* fp = fopen(filename, "w+b");
609 if (fp == nullptr) {
610 return;
611 }
612 uint32_t n = mMCInfos.size();
613 fwrite(&n, sizeof(n), 1, fp);
614 fwrite(mMCInfos.data(), sizeof(mMCInfos[0]), n, fp);
615 n = mMCInfosCol.size();
616 fwrite(&n, sizeof(n), 1, fp);
617 fwrite(mMCInfosCol.data(), sizeof(mMCInfosCol[0]), n, fp);
618 n = mMCEventOffset.size();
619 fwrite(&n, sizeof(n), 1, fp);
620 fwrite(mMCEventOffset.data(), sizeof(mMCEventOffset[0]), n, fp);
621 fclose(fp);
622}
623
624int32_t GPUQA::ReadO2MCData(const char* filename)
625{
626 FILE* fp = fopen(filename, "rb");
627 if (fp == nullptr) {
628 return 1;
629 }
630 uint32_t n;
631 uint32_t x;
632 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
633 fclose(fp);
634 return 1;
635 }
636 mMCInfos.resize(n);
637 if (fread(mMCInfos.data(), sizeof(mMCInfos[0]), n, fp) != n) {
638 fclose(fp);
639 return 1;
640 }
641 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
642 fclose(fp);
643 return 1;
644 }
645 mMCInfosCol.resize(n);
646 if (fread(mMCInfosCol.data(), sizeof(mMCInfosCol[0]), n, fp) != n) {
647 fclose(fp);
648 return 1;
649 }
650 if ((x = fread(&n, sizeof(n), 1, fp)) != 1) {
651 fclose(fp);
652 return 1;
653 }
654 mMCEventOffset.resize(n);
655 if (fread(mMCEventOffset.data(), sizeof(mMCEventOffset[0]), n, fp) != n) {
656 fclose(fp);
657 return 1;
658 }
659 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
660 printf("Read %ld bytes MC Infos\n", ftell(fp));
661 }
662 fclose(fp);
663 if (mTracking) {
664 CopyO2MCtoIOPtr(&mTracking->mIOPtrs);
665 }
666 return 0;
667}
668
669void GPUQA::CopyO2MCtoIOPtr(GPUTrackingInOutPointers* ptr)
670{
671 ptr->mcInfosTPC = mMCInfos.data();
672 ptr->nMCInfosTPC = mMCInfos.size();
673 ptr->mcInfosTPCCol = mMCInfosCol.data();
674 ptr->nMCInfosTPCCol = mMCInfosCol.size();
675}
676
677void GPUQA::InitO2MCData(GPUTrackingInOutPointers* updateIOPtr)
678{
679#ifndef GPUCA_STANDALONE
680 if (!mO2MCDataLoaded) {
681 HighResTimer timer(mTracking && mTracking->GetProcessingSettings().debugLevel);
682 if (mTracking && mTracking->GetProcessingSettings().debugLevel) {
683 GPUInfo("Start reading O2 Track MC information");
684 }
685 static constexpr float PRIM_MAX_T = 0.01f;
686
687 o2::steer::MCKinematicsReader mcReader("collisioncontext.root");
688 std::vector<int32_t> refId;
689
690 auto dc = o2::steer::DigitizationContext::loadFromFile("collisioncontext.root");
691 const auto& evrec = dc->getEventRecords();
692 const auto& evparts = dc->getEventParts();
693 std::vector<std::vector<float>> evTimeBins(mcReader.getNSources());
694 for (uint32_t i = 0; i < evTimeBins.size(); i++) {
695 evTimeBins[i].resize(mcReader.getNEvents(i), -100.f);
696 }
697 for (uint32_t i = 0; i < evrec.size(); i++) {
698 const auto& ir = evrec[i];
699 for (uint32_t j = 0; j < evparts[i].size(); j++) {
700 const int iSim = evparts[i][j].sourceID;
701 const int iEv = evparts[i][j].entryID;
702 if (iSim == o2::steer::QEDSOURCEID || ir.differenceInBC(o2::raw::HBFUtils::Instance().getFirstIR()) >= 0) {
705 if (evTimeBins[iSim][iEv] >= 0) {
706 throw std::runtime_error("Multiple time bins for same MC collision found");
707 }
708 evTimeBins[iSim][iEv] = timebin;
709 }
710 }
711 }
712
713 uint32_t nSimSources = mcReader.getNSources();
714 mMCEventOffset.resize(nSimSources);
715 uint32_t nSimTotalEvents = 0;
716 uint32_t nSimTotalTracks = 0;
717 for (uint32_t i = 0; i < nSimSources; i++) {
718 mMCEventOffset[i] = nSimTotalEvents;
719 nSimTotalEvents += mcReader.getNEvents(i);
720 }
721
722 mMCInfosCol.resize(nSimTotalEvents);
723 for (int32_t iSim = 0; iSim < mcReader.getNSources(); iSim++) {
724 for (int32_t i = 0; i < mcReader.getNEvents(iSim); i++) {
725 const float timebin = evTimeBins[iSim][i];
726
727 const std::vector<o2::MCTrack>& tracks = mcReader.getTracks(iSim, i);
728 const std::vector<o2::TrackReference>& trackRefs = mcReader.getTrackRefsByEvent(iSim, i);
729
730 refId.resize(tracks.size());
731 std::fill(refId.begin(), refId.end(), -1);
732 for (uint32_t j = 0; j < trackRefs.size(); j++) {
733 if (trackRefs[j].getDetectorId() == o2::detectors::DetID::TPC) {
734 int32_t trkId = trackRefs[j].getTrackID();
735 if (refId[trkId] == -1) {
736 refId[trkId] = j;
737 }
738 }
739 }
740 mMCInfosCol[mMCEventOffset[iSim] + i].first = mMCInfos.size();
741 mMCInfosCol[mMCEventOffset[iSim] + i].num = tracks.size();
742 mMCInfos.resize(mMCInfos.size() + tracks.size());
743 for (uint32_t j = 0; j < tracks.size(); j++) {
744 auto& info = mMCInfos[mMCInfosCol[mMCEventOffset[iSim] + i].first + j];
745 const auto& trk = tracks[j];
746 TParticlePDG* particle = TDatabasePDG::Instance()->GetParticle(trk.GetPdgCode());
747 Int_t pid = -1;
748 if (abs(trk.GetPdgCode()) == kElectron) {
749 pid = 0;
750 }
751 if (abs(trk.GetPdgCode()) == kMuonMinus) {
752 pid = 1;
753 }
754 if (abs(trk.GetPdgCode()) == kPiPlus) {
755 pid = 2;
756 }
757 if (abs(trk.GetPdgCode()) == kKPlus) {
758 pid = 3;
759 }
760 if (abs(trk.GetPdgCode()) == kProton) {
761 pid = 4;
762 }
763
764 info.charge = particle ? particle->Charge() : 0;
765 info.prim = trk.T() < PRIM_MAX_T;
766 info.primDaughters = 0;
767 if (trk.getFirstDaughterTrackId() != -1) {
768 for (int32_t k = trk.getFirstDaughterTrackId(); k <= trk.getLastDaughterTrackId(); k++) {
769 if (tracks[k].T() < PRIM_MAX_T) {
770 info.primDaughters = 1;
771 break;
772 }
773 }
774 }
775 info.pid = pid;
776 info.t0 = timebin;
777 if (refId[j] >= 0) {
778 const auto& trkRef = trackRefs[refId[j]];
779 info.x = trkRef.X();
780 info.y = trkRef.Y();
781 info.z = trkRef.Z();
782 info.pX = trkRef.Px();
783 info.pY = trkRef.Py();
784 info.pZ = trkRef.Pz();
785 info.genRadius = std::sqrt(trk.GetStartVertexCoordinatesX() * trk.GetStartVertexCoordinatesX() + trk.GetStartVertexCoordinatesY() * trk.GetStartVertexCoordinatesY() + trk.GetStartVertexCoordinatesZ() * trk.GetStartVertexCoordinatesZ());
786 } else {
787 info.x = info.y = info.z = info.pX = info.pY = info.pZ = 0;
788 info.genRadius = 0;
789 }
790 }
791 }
792 }
793 if (timer.IsRunning()) {
794 GPUInfo("Finished reading O2 Track MC information (%f seconds)", timer.GetCurrentElapsedTime());
795 }
796 mO2MCDataLoaded = true;
797 }
798 if (updateIOPtr) {
799 CopyO2MCtoIOPtr(updateIOPtr);
800 }
801#endif
802}
803
804int32_t GPUQA::InitQA(int32_t tasks)
805{
806 if (mQAInitialized) {
807 throw std::runtime_error("QA already initialized");
808 }
809 if (tasks == tasksAutomatic) {
810 tasks = tasksDefault;
811 }
812
813 mHist1D = new std::vector<TH1F>;
814 mHist2D = new std::vector<TH2F>;
815 mHist1Dd = new std::vector<TH1D>;
816 mHistGraph = new std::vector<TGraphAsymmErrors>;
817 if (mConfig.noMC) {
818 tasks &= tasksAllNoQC;
819 }
820 mQATasks = tasks;
821
822 if (mTracking->GetProcessingSettings().qcRunFraction != 100.f && mQATasks != taskClusterCounts) {
823 throw std::runtime_error("QA with qcRunFraction only supported for taskClusterCounts");
824 }
825
826 if (mTracking) {
827 mClNative = mTracking->mIOPtrs.clustersNative;
828 }
829
830 if (InitQACreateHistograms()) {
831 return 1;
832 }
833
834 if (mConfig.enableLocalOutput) {
835 mkdir(mConfig.plotsDir.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
836 }
837
838#ifndef GPUCA_STANDALONE
839 if (!mConfig.noMC) {
840 InitO2MCData(mTracking ? &mTracking->mIOPtrs : nullptr);
841 }
842#endif
843
844 if (mConfig.matchMCLabels.size()) {
845 uint32_t nFiles = mConfig.matchMCLabels.size();
846 std::vector<std::unique_ptr<TFile>> files;
847 std::vector<std::vector<std::vector<int32_t>>*> labelsBuffer(nFiles);
848 std::vector<std::vector<std::vector<int32_t>>*> effBuffer(nFiles);
849 for (uint32_t i = 0; i < nFiles; i++) {
850 files.emplace_back(std::make_unique<TFile>(mConfig.matchMCLabels[i].c_str()));
851 labelsBuffer[i] = (std::vector<std::vector<int32_t>>*)files[i]->Get("mcLabelBuffer");
852 effBuffer[i] = (std::vector<std::vector<int32_t>>*)files[i]->Get("mcEffBuffer");
853 if (labelsBuffer[i] == nullptr || effBuffer[i] == nullptr) {
854 GPUError("Error opening / reading from labels file %u/%s: %p %p", i, mConfig.matchMCLabels[i].c_str(), (void*)labelsBuffer[i], (void*)effBuffer[i]);
855 exit(1);
856 }
857 }
858
859 mGoodTracks.resize(labelsBuffer[0]->size());
860 mGoodHits.resize(labelsBuffer[0]->size());
861 for (uint32_t iEvent = 0; iEvent < labelsBuffer[0]->size(); iEvent++) {
862 std::vector<bool> labelsOK((*effBuffer[0])[iEvent].size());
863 for (uint32_t k = 0; k < (*effBuffer[0])[iEvent].size(); k++) {
864 labelsOK[k] = false;
865 for (uint32_t l = 0; l < nFiles; l++) {
866 if ((*effBuffer[0])[iEvent][k] != (*effBuffer[l])[iEvent][k]) {
867 labelsOK[k] = true;
868 break;
869 }
870 }
871 }
872 mGoodTracks[iEvent].resize((*labelsBuffer[0])[iEvent].size());
873 for (uint32_t k = 0; k < (*labelsBuffer[0])[iEvent].size(); k++) {
874 if ((*labelsBuffer[0])[iEvent][k] == MC_LABEL_INVALID) {
875 continue;
876 }
877 mGoodTracks[iEvent][k] = labelsOK[abs((*labelsBuffer[0])[iEvent][k])];
878 }
879 }
880 }
881 mQAInitialized = true;
882 return 0;
883}
884
885void GPUQA::RunQA(bool matchOnly, const std::vector<o2::tpc::TrackTPC>* tracksExternal, const std::vector<o2::MCCompLabel>* tracksExtMC, const o2::tpc::ClusterNativeAccess* clNative)
886{
887 if (!mQAInitialized) {
888 throw std::runtime_error("QA not initialized");
889 }
890 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
891 GPUInfo("Running QA - Mask %d, Efficiency %d, Resolution %d, Pulls %d, Cluster Attachment %d, Track Statistics %d, Cluster Counts %d", mQATasks, (int32_t)(mQATasks & taskTrackingEff), (int32_t)(mQATasks & taskTrackingRes), (int32_t)(mQATasks & taskTrackingResPull), (int32_t)(mQATasks & taskClusterAttach), (int32_t)(mQATasks & taskTrackStatistics), (int32_t)(mQATasks & taskClusterCounts));
892 }
893 if (!clNative && mTracking) {
894 clNative = mTracking->mIOPtrs.clustersNative;
895 }
896 mClNative = clNative;
897
898#ifndef GPUCA_RUN2
899 uint32_t nSimEvents = GetNMCCollissions();
900 if (mTrackMCLabelsReverse.size() < nSimEvents) {
901 mTrackMCLabelsReverse.resize(nSimEvents);
902 }
903 if (mRecTracks.size() < nSimEvents) {
904 mRecTracks.resize(nSimEvents);
905 }
906 if (mFakeTracks.size() < nSimEvents) {
907 mFakeTracks.resize(nSimEvents);
908 }
909 if (mMCParam.size() < nSimEvents) {
910 mMCParam.resize(nSimEvents);
911 }
912#endif
913
914 // Initialize Arrays
915 uint32_t nReconstructedTracks = 0;
916 if (tracksExternal) {
917#ifndef GPUCA_STANDALONE
918 nReconstructedTracks = tracksExternal->size();
919#endif
920 } else {
921 nReconstructedTracks = mTracking->mIOPtrs.nMergedTracks;
922 }
923 mTrackMCLabels.resize(nReconstructedTracks);
924 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
925 mTrackMCLabelsReverse[iCol].resize(GetNMCTracks(iCol));
926 mRecTracks[iCol].resize(GetNMCTracks(iCol));
927 mFakeTracks[iCol].resize(GetNMCTracks(iCol));
928 mMCParam[iCol].resize(GetNMCTracks(iCol));
929 memset(mRecTracks[iCol].data(), 0, mRecTracks[iCol].size() * sizeof(mRecTracks[iCol][0]));
930 memset(mFakeTracks[iCol].data(), 0, mFakeTracks[iCol].size() * sizeof(mFakeTracks[iCol][0]));
931 for (size_t i = 0; i < mTrackMCLabelsReverse[iCol].size(); i++) {
932 mTrackMCLabelsReverse[iCol][i] = -1;
933 }
934 }
935 if (mQATasks & taskClusterAttach && GetNMCLabels()) {
936 mClusterParam.resize(GetNMCLabels());
937 memset(mClusterParam.data(), 0, mClusterParam.size() * sizeof(mClusterParam[0]));
938 }
939 HighResTimer timer(QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2));
940
941 mNEvents++;
942 if (mConfig.writeMCLabels) {
943 mcEffBuffer.resize(mNEvents);
944 mcLabelBuffer.resize(mNEvents);
945 mcEffBuffer[mNEvents - 1].resize(GetNMCTracks(0));
946 mcLabelBuffer[mNEvents - 1].resize(nReconstructedTracks);
947 }
948
949 bool mcAvail = mcPresent() || tracksExtMC;
950
951 if (mcAvail) { // Assign Track MC Labels
952 if (tracksExternal) {
953#ifndef GPUCA_STANDALONE
954 for (uint32_t i = 0; i < tracksExternal->size(); i++) {
955 mTrackMCLabels[i] = (*tracksExtMC)[i];
956 }
957#endif
958 } else {
959 tbb::parallel_for(tbb::blocked_range<uint32_t>(0, nReconstructedTracks, (QA_DEBUG == 0) ? 32 : nReconstructedTracks), [&](const tbb::blocked_range<uint32_t>& range) {
960 auto acc = GPUTPCTrkLbl<true, mcLabelI_t>(GetClusterLabels(), 1.f - mConfig.recThreshold);
961 for (auto i = range.begin(); i < range.end(); i++) {
962 acc.reset();
963 int32_t nClusters = 0;
964 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
965 std::vector<mcLabel_t> labels;
966 for (uint32_t k = 0; k < track.NClusters(); k++) {
967 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
968 continue;
969 }
970 nClusters++;
971 uint32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
972 if (hitId >= GetNMCLabels()) {
973 GPUError("Invalid hit id %u > %d (nClusters %d)", hitId, GetNMCLabels(), clNative ? clNative->nClustersTotal : 0);
974 throw std::runtime_error("qa error");
975 }
976 acc.addLabel(hitId);
977 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
978 if (GetMCLabelID(hitId, j) >= (int32_t)GetNMCTracks(GetMCLabelCol(hitId, j))) {
979 GPUError("Invalid label %d > %d (hit %d, label %d, col %d)", GetMCLabelID(hitId, j), GetNMCTracks(GetMCLabelCol(hitId, j)), hitId, j, (int32_t)GetMCLabelCol(hitId, j));
980 throw std::runtime_error("qa error");
981 }
982 if (GetMCLabelID(hitId, j) >= 0) {
983 if (QA_DEBUG >= 3 && track.OK()) {
984 GPUInfo("Track %d Cluster %u Label %d: %d (%f)", i, k, j, GetMCLabelID(hitId, j), GetMCLabelWeight(hitId, j));
985 }
986 }
987 }
988 }
989
990 float maxweight, sumweight;
991 int32_t maxcount;
992 auto maxLabel = acc.computeLabel(&maxweight, &sumweight, &maxcount);
993 mTrackMCLabels[i] = maxLabel;
994 if (QA_DEBUG && track.OK() && GetNMCTracks(maxLabel) > (uint32_t)maxLabel.getTrackID()) {
995 const mcInfo_t& mc = GetMCTrack(maxLabel);
996 GPUInfo("Track %d label %d (fake %d) weight %f clusters %d (fitted %d) (%f%% %f%%) Pt %f", i, maxLabel.getTrackID(), (int32_t)(maxLabel.isFake()), maxweight, nClusters, track.NClustersFitted(), 100.f * maxweight / sumweight, 100.f * (float)maxcount / (float)nClusters,
997 std::sqrt(mc.pX * mc.pX + mc.pY * mc.pY));
998 }
999 }
1000 });
1001 }
1002 if (timer.IsRunning()) {
1003 GPUInfo("QA Time: Assign Track Labels:\t\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1004 }
1005
1006 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1007 const GPUTPCGMMergedTrack* track = mTracking ? &mTracking->mIOPtrs.mergedTracks[i] : nullptr;
1008 mcLabelI_t label = mTrackMCLabels[i];
1009 if (mQATasks & taskClusterAttach) {
1010 // fill cluster attachment status
1011 if (!track->OK()) {
1012 continue;
1013 }
1014 if (!mTrackMCLabels[i].isValid()) {
1015 for (uint32_t k = 0; k < track->NClusters(); k++) {
1016 if (mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1017 continue;
1018 }
1019 mClusterParam[mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num].fakeAttached++;
1020 }
1021 continue;
1022 }
1023 if (mMCTrackMin == -1 || (label.getTrackID() >= mMCTrackMin && label.getTrackID() < mMCTrackMax)) {
1024 for (uint32_t k = 0; k < track->NClusters(); k++) {
1025 if (mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1026 continue;
1027 }
1028 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num;
1029 bool correct = false;
1030 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1031 if (label == GetMCLabel(hitId, j)) {
1032 correct = true;
1033 break;
1034 }
1035 }
1036 if (correct) {
1037 mClusterParam[hitId].attached++;
1038 } else {
1039 mClusterParam[hitId].fakeAttached++;
1040 }
1041 }
1042 }
1043 }
1044
1045 if (mTrackMCLabels[i].isFake()) {
1046 (GetMCTrackObj(mFakeTracks, label))++;
1047 } else if (tracksExternal || !track->MergedLooper()) {
1048 GetMCTrackObj(mRecTracks, label)++;
1049 if (mMCTrackMin == -1 || (label.getTrackID() >= mMCTrackMin && label.getTrackID() < mMCTrackMax)) {
1050 int32_t& revLabel = GetMCTrackObj(mTrackMCLabelsReverse, label);
1051 if (tracksExternal) {
1052#ifndef GPUCA_STANDALONE
1053 if (revLabel == -1 || fabsf((*tracksExternal)[i].getZ()) < fabsf((*tracksExternal)[revLabel].getZ())) {
1054 revLabel = i;
1055 }
1056#endif
1057 } else {
1058 const auto* trks = mTracking->mIOPtrs.mergedTracks;
1059 bool comp;
1060 if (revLabel == -1) {
1061 comp = true;
1062 } else {
1063 float shift1 = mTracking->GetTPCTransform()->convDeltaTimeToDeltaZinTimeFrame(trks[i].CSide() * GPUChainTracking::NSECTORS / 2, trks[i].GetParam().GetTOffset());
1064 float shift2 = mTracking->GetTPCTransform()->convDeltaTimeToDeltaZinTimeFrame(trks[revLabel].CSide() * GPUChainTracking::NSECTORS / 2, trks[revLabel].GetParam().GetTOffset());
1065 comp = fabsf(trks[i].GetParam().GetZ() + shift1) < fabsf(trks[revLabel].GetParam().GetZ() + shift2);
1066 }
1067 if (revLabel == -1 || !trks[revLabel].OK() || (trks[i].OK() && comp)) {
1068 revLabel = i;
1069 }
1070 }
1071 }
1072 }
1073 }
1074 if ((mQATasks & taskClusterAttach) && !tracksExternal) {
1075 std::vector<uint8_t> lowestPadRow(mTracking->mIOPtrs.nMergedTracks);
1076 // fill cluster adjacent status
1077 if (mTracking->mIOPtrs.mergedTrackHitAttachment) {
1078 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1079 if (mClusterParam[i].attached == 0 && mClusterParam[i].fakeAttached == 0) {
1080 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1082 int32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1083 mcLabelI_t trackL = mTrackMCLabels[track];
1084 bool fake = true;
1085 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1086 // GPUInfo("Attach %x Track %d / %d:%d", attach, track, j, GetMCLabelID(i, j));
1087 if (trackL == GetMCLabel(i, j)) {
1088 fake = false;
1089 break;
1090 }
1091 }
1092 if (fake) {
1093 mClusterParam[i].fakeAdjacent++;
1094 } else {
1095 mClusterParam[i].adjacent++;
1096 }
1097 }
1098 }
1099 }
1100 }
1101 if (mTracking->mIOPtrs.nMergedTracks && clNative) {
1102 std::fill(lowestPadRow.begin(), lowestPadRow.end(), 255);
1103 for (uint32_t iSector = 0; iSector < GPUTPCGeometry::NSECTORS; iSector++) {
1104 for (uint32_t iRow = 0; iRow < GPUTPCGeometry::NROWS; iRow++) {
1105 for (uint32_t iCl = 0; iCl < clNative->nClusters[iSector][iRow]; iCl++) {
1106 int32_t i = clNative->clusterOffset[iSector][iRow] + iCl;
1107 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1108 uint32_t trackId = GetMCTrackObj(mTrackMCLabelsReverse, GetMCLabel(i, j));
1109 if (trackId < lowestPadRow.size() && lowestPadRow[trackId] > iRow) {
1110 lowestPadRow[trackId] = iRow;
1111 }
1112 }
1113 }
1114 }
1115 }
1116 for (uint32_t i = 0; i < mTracking->mIOPtrs.nMergedTracks; i++) {
1117 const auto& trk = mTracking->mIOPtrs.mergedTracks[i];
1118 if (trk.OK() && lowestPadRow[i] != 255 && trk.NClustersFitted() >= PADROW_CHECK_MINCLS && CAMath::Abs(trk.GetParam().GetQPt()) < 1.0) {
1119 const auto& lowestCl = mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef()].row < mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + trk.NClusters() - 1].row ? mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef()] : mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + trk.NClusters() - 1];
1120 const int32_t lowestRow = lowestCl.row;
1121 mPadRow[0]->Fill(lowestPadRow[i], lowestRow, 1.f);
1122 mPadRow[1]->Fill(CAMath::ATan2(trk.GetParam().GetY(), trk.GetParam().GetX()), lowestRow, 1.f);
1123 if (lowestPadRow[i] < 10 && lowestRow > lowestPadRow[i] + 3) {
1124 const auto& cl = clNative->clustersLinear[lowestCl.num];
1125 float x, y, z;
1126 mTracking->GetTPCTransform()->Transform(lowestCl.sector, lowestCl.row, cl.getPad(), cl.getTime(), x, y, z, trk.GetParam().GetTOffset());
1127 float phi = CAMath::ATan2(y, x);
1128 mPadRow[2]->Fill(phi, lowestRow, 1.f);
1129 if (CAMath::Abs(phi) < 0.15) {
1130 const float time = cl.getTime();
1131 mPadRow[3]->Fill(mTracking->GetParam().GetUnscaledMult(time), lowestRow, 1.f);
1132 }
1133 }
1134 }
1135 }
1136 }
1137 }
1138
1139 if (mConfig.matchMCLabels.size()) {
1140 mGoodHits[mNEvents - 1].resize(GetNMCLabels());
1141 std::vector<bool> allowMCLabels(GetNMCTracks(0));
1142 for (uint32_t k = 0; k < GetNMCTracks(0); k++) {
1143 allowMCLabels[k] = false;
1144 }
1145 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1146 if (!mGoodTracks[mNEvents - 1][i]) {
1147 continue;
1148 }
1149 if (mConfig.matchDisplayMinPt > 0) {
1150 if (!mTrackMCLabels[i].isValid()) {
1151 continue;
1152 }
1153 const mcInfo_t& info = GetMCTrack(mTrackMCLabels[i]);
1154 if (info.pX * info.pX + info.pY * info.pY < mConfig.matchDisplayMinPt * mConfig.matchDisplayMinPt) {
1155 continue;
1156 }
1157 }
1158
1159 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
1160 for (uint32_t j = 0; j < track.NClusters(); j++) {
1161 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + j].num;
1162 if (GetMCLabelNID(hitId)) {
1163 int32_t mcID = GetMCLabelID(hitId, 0);
1164 if (mcID >= 0) {
1165 allowMCLabels[mcID] = true;
1166 }
1167 }
1168 }
1169 }
1170 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1171 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1172 int32_t mcID = GetMCLabelID(i, j);
1173 if (mcID >= 0 && allowMCLabels[mcID]) {
1174 mGoodHits[mNEvents - 1][i] = true;
1175 }
1176 }
1177 }
1178 }
1179 if (timer.IsRunning()) {
1180 GPUInfo("QA Time: Cluster attach status:\t\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1181 }
1182
1183 if (matchOnly) {
1184 return;
1185 }
1186
1187 // Recompute fNWeightCls (might have changed after merging events into timeframes)
1188 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1189 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1190 mMCParam[iCol][i].nWeightCls = 0.;
1191 }
1192 }
1193 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1194 float weightTotal = 0.f;
1195 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1196 if (GetMCLabelID(i, j) >= 0) {
1197 weightTotal += GetMCLabelWeight(i, j);
1198 }
1199 }
1200 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1201 if (GetMCLabelID(i, j) >= 0) {
1202 GetMCTrackObj(mMCParam, GetMCLabel(i, j)).nWeightCls += GetMCLabelWeight(i, j) / weightTotal;
1203 }
1204 }
1205 }
1206 if (timer.IsRunning()) {
1207 GPUInfo("QA Time: Compute cluster label weights:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1208 }
1209
1210 // Compute MC Track Parameters for MC Tracks
1211 tbb::parallel_for<uint32_t>(0, GetNMCCollissions(), [&](auto iCol) {
1212 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1213 const mcInfo_t& info = GetMCTrack(i, iCol);
1214 additionalMCParameters& mc2 = mMCParam[iCol][i];
1215 mc2.pt = std::sqrt(info.pX * info.pX + info.pY * info.pY);
1216 mc2.phi = M_PI + std::atan2(-info.pY, -info.pX);
1217 float p = info.pX * info.pX + info.pY * info.pY + info.pZ * info.pZ;
1218 if (p < 1e-18) {
1219 mc2.theta = mc2.eta = 0.f;
1220 } else {
1221 mc2.theta = info.pZ == 0 ? (M_PI / 2) : (std::acos(info.pZ / std::sqrt(p)));
1222 mc2.eta = -std::log(std::tan(0.5 * mc2.theta));
1223 }
1224 if (mConfig.writeMCLabels) {
1225 std::vector<int32_t>& effBuffer = mcEffBuffer[mNEvents - 1];
1226 effBuffer[i] = mRecTracks[iCol][i] * 1000 + mFakeTracks[iCol][i];
1227 }
1228 } // clang-format off
1229 }, tbb::simple_partitioner()); // clang-format on
1230 if (timer.IsRunning()) {
1231 GPUInfo("QA Time: Compute track mc parameters:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1232 }
1233
1234 // Fill Efficiency Histograms
1235 if (mQATasks & taskTrackingEff) {
1236 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1237 for (uint32_t i = 0; i < GetNMCTracks(iCol); i++) {
1238 if ((mMCTrackMin != -1 && (int32_t)i < mMCTrackMin) || (mMCTrackMax != -1 && (int32_t)i >= mMCTrackMax)) {
1239 continue;
1240 }
1241 const mcInfo_t& info = GetMCTrack(i, iCol);
1242 const additionalMCParameters& mc2 = mMCParam[iCol][i];
1243 if (mc2.nWeightCls == 0.f) {
1244 continue;
1245 }
1246 const float& mcpt = mc2.pt;
1247 const float& mcphi = mc2.phi;
1248 const float& mceta = mc2.eta;
1249
1250 if (info.primDaughters) {
1251 continue;
1252 }
1253 if (mc2.nWeightCls < mConfig.minNClEff) {
1254 continue;
1255 }
1256 int32_t findable = mc2.nWeightCls >= mConfig.minNClFindable;
1257 if (info.pid < 0) {
1258 continue;
1259 }
1260 if (info.charge == 0.f) {
1261 continue;
1262 }
1263 if (mConfig.filterCharge && info.charge * mConfig.filterCharge < 0) {
1264 continue;
1265 }
1266 if (mConfig.filterPID >= 0 && info.pid != mConfig.filterPID) {
1267 continue;
1268 }
1269
1270 if (fabsf(mceta) > ETA_MAX || mcpt < PT_MIN || mcpt > PT_MAX) {
1271 continue;
1272 }
1273
1274 float alpha = std::atan2(info.y, info.x);
1275 alpha /= M_PI / 9.f;
1276 alpha = std::floor(alpha);
1277 alpha *= M_PI / 9.f;
1278 alpha += M_PI / 18.f;
1279
1280 float c = std::cos(alpha);
1281 float s = std::sin(alpha);
1282 float localY = -info.x * s + info.y * c;
1283
1284 if (mConfig.dumpToROOT) {
1285 static auto effdump = GPUROOTDump<TNtuple>::getNew("eff", "alpha:x:y:z:mcphi:mceta:mcpt:rec:fake:findable:prim:ncls");
1286 float localX = info.x * c + info.y * s;
1287 effdump.Fill(alpha, localX, localY, info.z, mcphi, mceta, mcpt, mRecTracks[iCol][i], mFakeTracks[iCol][i], findable, info.prim, mc2.nWeightCls);
1288 }
1289
1290 for (int32_t j = 0; j < 6; j++) {
1291 if (j == 3 || j == 4) {
1292 continue;
1293 }
1294 for (int32_t k = 0; k < 2; k++) {
1295 if (k == 0 && findable == 0) {
1296 continue;
1297 }
1298
1299 int32_t val = (j == 0) ? (mRecTracks[iCol][i] ? 1 : 0) : (j == 1) ? (mRecTracks[iCol][i] ? mRecTracks[iCol][i] - 1 : 0) : (j == 2) ? mFakeTracks[iCol][i] : 1;
1300 if (val == 0) {
1301 continue;
1302 }
1303
1304 for (int32_t l = 0; l < 5; l++) {
1305 if (info.prim && mcpt < PT_MIN_PRIM) {
1306 continue;
1307 }
1308 if (l != 3 && fabsf(mceta) > ETA_MAX2) {
1309 continue;
1310 }
1311 if (l < 4 && mcpt < 1.f / mConfig.qpt) {
1312 continue;
1313 }
1314
1315 float pos = l == 0 ? localY : l == 1 ? info.z : l == 2 ? mcphi : l == 3 ? mceta : mcpt;
1316
1317 mEff[j][k][!info.prim][l]->Fill(pos, val);
1318 }
1319 }
1320 }
1321 }
1322 }
1323 if (timer.IsRunning()) {
1324 GPUInfo("QA Time: Fill efficiency histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1325 }
1326 }
1327
1328 // Fill Resolution Histograms
1329 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
1330 GPUTPCGMPropagator prop;
1331 prop.SetMaxSinPhi(.999);
1332 prop.SetMaterialTPC();
1333 prop.SetPolynomialField(&mParam->polynomialField);
1334
1335 for (uint32_t i = 0; i < mTrackMCLabels.size(); i++) {
1336 if (mConfig.writeMCLabels) {
1337 std::vector<int32_t>& labelBuffer = mcLabelBuffer[mNEvents - 1];
1338 labelBuffer[i] = mTrackMCLabels[i].getTrackID();
1339 }
1340 if (mTrackMCLabels[i].isFake()) {
1341 continue;
1342 }
1343 const mcInfo_t& mc1 = GetMCTrack(mTrackMCLabels[i]);
1344 const additionalMCParameters& mc2 = GetMCTrackObj(mMCParam, mTrackMCLabels[i]);
1345
1346 if (mc1.primDaughters) {
1347 continue;
1348 }
1349 if (!tracksExternal) {
1350 if (!mTracking->mIOPtrs.mergedTracks[i].OK()) {
1351 continue;
1352 }
1353 if (mTracking->mIOPtrs.mergedTracks[i].MergedLooper()) {
1354 continue;
1355 }
1356 }
1357 if ((mMCTrackMin != -1 && mTrackMCLabels[i].getTrackID() < mMCTrackMin) || (mMCTrackMax != -1 && mTrackMCLabels[i].getTrackID() >= mMCTrackMax)) {
1358 continue;
1359 }
1360 if (fabsf(mc2.eta) > ETA_MAX || mc2.pt < PT_MIN || mc2.pt > PT_MAX) {
1361 continue;
1362 }
1363 if (mc1.charge == 0.f) {
1364 continue;
1365 }
1366 if (mc1.pid < 0) {
1367 continue;
1368 }
1369#ifndef GPUCA_RUN2
1370 if (mc1.t0 == -100.f) {
1371 continue;
1372 }
1373#endif
1374 if (mConfig.filterCharge && mc1.charge * mConfig.filterCharge < 0) {
1375 continue;
1376 }
1377 if (mConfig.filterPID >= 0 && mc1.pid != mConfig.filterPID) {
1378 continue;
1379 }
1380 if (mc2.nWeightCls < mConfig.minNClRes) {
1381 continue;
1382 }
1383 if (mConfig.resPrimaries == 1 && !mc1.prim) {
1384 continue;
1385 } else if (mConfig.resPrimaries == 2 && mc1.prim) {
1386 continue;
1387 }
1388 if (GetMCTrackObj(mTrackMCLabelsReverse, mTrackMCLabels[i]) != (int32_t)i) {
1389 continue;
1390 }
1391
1393 float alpha = 0.f;
1394 int32_t side;
1395 if (tracksExternal) {
1396#ifndef GPUCA_STANDALONE
1397 for (int32_t k = 0; k < 5; k++) {
1398 param.Par()[k] = (*tracksExternal)[i].getParams()[k];
1399 }
1400 for (int32_t k = 0; k < 15; k++) {
1401 param.Cov()[k] = (*tracksExternal)[i].getCov()[k];
1402 }
1403 param.X() = (*tracksExternal)[i].getX();
1404 param.TOffset() = (*tracksExternal)[i].getTime0();
1405 alpha = (*tracksExternal)[i].getAlpha();
1406 side = (*tracksExternal)[i].hasBothSidesClusters() ? 2 : ((*tracksExternal)[i].hasCSideClusters() ? 1 : 0);
1407#endif
1408 } else {
1409 param = mTracking->mIOPtrs.mergedTracks[i].GetParam();
1410 alpha = mTracking->mIOPtrs.mergedTracks[i].GetAlpha();
1411 side = mTracking->mIOPtrs.mergedTracks[i].CCE() ? 2 : (mTracking->mIOPtrs.mergedTracks[i].CSide() ? 1 : 0);
1412 }
1413
1414 float mclocal[4]; // Rotated x,y,Px,Py mc-coordinates - the MC data should be rotated since the track is propagated best along x
1415 float c = std::cos(alpha);
1416 float s = std::sin(alpha);
1417 float x = mc1.x;
1418 float y = mc1.y;
1419 mclocal[0] = x * c + y * s;
1420 mclocal[1] = -x * s + y * c;
1421 float px = mc1.pX;
1422 float py = mc1.pY;
1423 mclocal[2] = px * c + py * s;
1424 mclocal[3] = -px * s + py * c;
1425
1426 if (mclocal[0] < TRACK_EXPECTED_REFERENCE_X - 3) {
1427 continue;
1428 }
1429 if (mclocal[0] > param.GetX() + 20) {
1430 continue;
1431 }
1432 if (param.GetX() > mConfig.maxResX) {
1433 continue;
1434 }
1435
1436 auto getdz = [this, &param, &mc1, &side, tracksExternal]() {
1437 if (tracksExternal) {
1438 return param.GetZ();
1439 }
1440 if (!mParam->continuousMaxTimeBin) {
1441 return param.GetZ() - mc1.z;
1442 }
1443 float shift = 0;
1444#ifndef GPUCA_RUN2
1445 if (side != 2) {
1446 shift = mTracking->GetTPCTransform()->convDeltaTimeToDeltaZinTimeFrame(side * GPUChainTracking::NSECTORS / 2, param.GetTOffset() - mc1.t0);
1447 }
1448#endif
1449 return param.GetZ() + shift - mc1.z;
1450 };
1451
1452 prop.SetTrack(&param, alpha);
1453 bool inFlyDirection = 0;
1454 if (mConfig.strict) {
1455 const float dx = param.X() - std::max<float>(mclocal[0], TRACK_EXPECTED_REFERENCE_X_DEFAULT); // Limit distance check
1456 const float dy = param.Y() - mclocal[1];
1457 const float dz = getdz();
1458 if (dx * dx + dy * dy + dz * dz > 5.f * 5.f) {
1459 continue;
1460 }
1461 }
1462
1463 if (prop.PropagateToXAlpha(mclocal[0], alpha, inFlyDirection)) {
1464 continue;
1465 }
1466 if (fabsf(param.Y() - mclocal[1]) > (mConfig.strict ? 1.f : 4.f) || fabsf(getdz()) > (mConfig.strict ? 1.f : 4.f)) {
1467 continue;
1468 }
1469 float charge = mc1.charge > 0 ? 1.f : -1.f;
1470
1471 float deltaY = param.GetY() - mclocal[1];
1472 float deltaZ = getdz();
1473 float deltaPhiNative = param.GetSinPhi() - mclocal[3] / mc2.pt;
1474 float deltaPhi = std::asin(param.GetSinPhi()) - std::atan2(mclocal[3], mclocal[2]);
1475 float deltaLambdaNative = param.GetDzDs() - mc1.pZ / mc2.pt;
1476 float deltaLambda = std::atan(param.GetDzDs()) - std::atan2(mc1.pZ, mc2.pt);
1477 float deltaPtNative = (param.GetQPt() - charge / mc2.pt) * charge;
1478 float deltaPt = (fabsf(1.f / param.GetQPt()) - mc2.pt) / mc2.pt;
1479
1480 float paramval[5] = {mclocal[1], mc1.z, mc2.phi, mc2.eta, mc2.pt};
1481 float resval[5] = {deltaY, deltaZ, mConfig.nativeFitResolutions ? deltaPhiNative : deltaPhi, mConfig.nativeFitResolutions ? deltaLambdaNative : deltaLambda, mConfig.nativeFitResolutions ? deltaPtNative : deltaPt};
1482 float pullval[5] = {deltaY / std::sqrt(param.GetErr2Y()), deltaZ / std::sqrt(param.GetErr2Z()), deltaPhiNative / std::sqrt(param.GetErr2SinPhi()), deltaLambdaNative / std::sqrt(param.GetErr2DzDs()), deltaPtNative / std::sqrt(param.GetErr2QPt())};
1483
1484 for (int32_t j = 0; j < 5; j++) {
1485 for (int32_t k = 0; k < 5; k++) {
1486 if (k != 3 && fabsf(mc2.eta) > ETA_MAX2) {
1487 continue;
1488 }
1489 if (k < 4 && mc2.pt < 1.f / mConfig.qpt) {
1490 continue;
1491 }
1492 if (mQATasks & taskTrackingRes) {
1493 mRes2[j][k]->Fill(resval[j], paramval[k]);
1494 }
1495 if (mQATasks & taskTrackingResPull) {
1496 mPull2[j][k]->Fill(pullval[j], paramval[k]);
1497 }
1498 }
1499 }
1500 }
1501 if (timer.IsRunning()) {
1502 GPUInfo("QA Time: Fill resolution histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1503 }
1504 }
1505
1506 if ((mQATasks & taskClusterAttach) && !tracksExternal) {
1507 // Fill cluster histograms
1508 for (uint32_t iTrk = 0; iTrk < nReconstructedTracks; iTrk++) {
1509 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[iTrk];
1510 if (!track.OK()) {
1511 continue;
1512 }
1513 if (!mTrackMCLabels[iTrk].isValid()) {
1514 for (uint32_t k = 0; k < track.NClusters(); k++) {
1515 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1516 continue;
1517 }
1518 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1519 float totalWeight = 0.;
1520 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1521 if (GetMCLabelID(hitId, j) >= 0 && GetMCTrackObj(mMCParam, GetMCLabel(hitId, j)).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1522 totalWeight += GetMCLabelWeight(hitId, j);
1523 }
1524 }
1525 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1526 const auto& r = checkClusterState<false>(attach);
1527 if (totalWeight > 0) {
1528 float weight = 1.f / (totalWeight * (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached));
1529 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1530 mcLabelI_t label = GetMCLabel(hitId, j);
1531 if (!label.isFake() && GetMCTrackObj(mMCParam, label).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1532 float pt = GetMCTrackObj(mMCParam, label).pt;
1533 if (pt < PT_MIN_CLUST) {
1534 pt = PT_MIN_CLUST;
1535 }
1536 mClusters[CL_fake]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1537 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1538 if (GetMCTrackObj(mRecTracks, label)) {
1539 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1540 }
1541 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1542 if (r.protect || r.physics) {
1543 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1544 }
1545 if (r.physics) {
1546 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(hitId, j) * weight);
1547 }
1548 }
1549 }
1550 } else {
1551 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1552 mClusters[CL_fake]->Fill(0.f, weight);
1553 mClusters[CL_att_adj]->Fill(0.f, weight);
1554 mClusters[CL_all]->Fill(0.f, weight);
1555 mClusterCounts.nUnaccessible += weight;
1556 if (r.protect || r.physics) {
1557 mClusters[CL_prot]->Fill(0.f, weight);
1558 }
1559 if (r.physics) {
1560 mClusters[CL_physics]->Fill(0.f, weight);
1561 }
1562 }
1563 }
1564 continue;
1565 }
1566 mcLabelI_t label = mTrackMCLabels[iTrk];
1567 if (mMCTrackMin != -1 && (label.getTrackID() < mMCTrackMin || label.getTrackID() >= mMCTrackMax)) {
1568 continue;
1569 }
1570 for (uint32_t k = 0; k < track.NClusters(); k++) {
1571 if (mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].state & GPUTPCGMMergedTrackHit::flagReject) {
1572 continue;
1573 }
1574 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1575 float pt = GetMCTrackObj(mMCParam, label).pt;
1576 if (pt < PT_MIN_CLUST) {
1577 pt = PT_MIN_CLUST;
1578 }
1579 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1580 bool correct = false;
1581 for (int32_t j = 0; j < GetMCLabelNID(hitId); j++) {
1582 if (label == GetMCLabel(hitId, j)) {
1583 correct = true;
1584 break;
1585 }
1586 }
1587 if (correct) {
1588 mClusters[CL_attached]->Fill(pt, weight);
1589 mClusters[CL_tracks]->Fill(pt, weight);
1590 } else {
1591 mClusters[CL_fake]->Fill(pt, weight);
1592 }
1593 mClusters[CL_att_adj]->Fill(pt, weight);
1594 mClusters[CL_all]->Fill(pt, weight);
1595 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1596 const auto& r = checkClusterState<false>(attach);
1597 if (r.protect || r.physics) {
1598 mClusters[CL_prot]->Fill(pt, weight);
1599 }
1600 if (r.physics) {
1601 mClusters[CL_physics]->Fill(pt, weight);
1602 }
1603 }
1604 }
1605 for (uint32_t i = 0; i < GetNMCLabels(); i++) {
1606 if ((mMCTrackMin != -1 && GetMCLabelID(i, 0) < mMCTrackMin) || (mMCTrackMax != -1 && GetMCLabelID(i, 0) >= mMCTrackMax)) {
1607 continue;
1608 }
1609 if (mClusterParam[i].attached || mClusterParam[i].fakeAttached) {
1610 continue;
1611 }
1612 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1613 const auto& r = checkClusterState<false>(attach);
1614 if (mClusterParam[i].adjacent) {
1615 int32_t label = mTracking->mIOPtrs.mergedTrackHitAttachment[i] & gputpcgmmergertypes::attachTrackMask;
1616 if (!mTrackMCLabels[label].isValid()) {
1617 float totalWeight = 0.;
1618 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1619 mcLabelI_t labelT = GetMCLabel(i, j);
1620 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1621 totalWeight += GetMCLabelWeight(i, j);
1622 }
1623 }
1624 float weight = 1.f / totalWeight;
1625 if (totalWeight > 0) {
1626 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1627 mcLabelI_t labelT = GetMCLabel(i, j);
1628 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1629 float pt = GetMCTrackObj(mMCParam, labelT).pt;
1630 if (pt < PT_MIN_CLUST) {
1631 pt = PT_MIN_CLUST;
1632 }
1633 if (GetMCTrackObj(mRecTracks, labelT)) {
1634 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1635 }
1636 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1637 mClusters[CL_fakeAdj]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1638 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1639 if (r.protect || r.physics) {
1640 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1641 }
1642 if (r.physics) {
1643 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(i, j) * weight);
1644 }
1645 }
1646 }
1647 } else {
1648 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1649 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1650 mClusters[CL_all]->Fill(0.f, 1.f);
1651 mClusterCounts.nUnaccessible++;
1652 if (r.protect || r.physics) {
1653 mClusters[CL_prot]->Fill(0.f, 1.f);
1654 }
1655 if (r.physics) {
1656 mClusters[CL_physics]->Fill(0.f, 1.f);
1657 }
1658 }
1659 } else {
1660 float pt = GetMCTrackObj(mMCParam, mTrackMCLabels[label]).pt;
1661 if (pt < PT_MIN_CLUST) {
1662 pt = PT_MIN_CLUST;
1663 }
1664 mClusters[CL_att_adj]->Fill(pt, 1.f);
1665 mClusters[CL_tracks]->Fill(pt, 1.f);
1666 mClusters[CL_all]->Fill(pt, 1.f);
1667 if (r.protect || r.physics) {
1668 mClusters[CL_prot]->Fill(pt, 1.f);
1669 }
1670 if (r.physics) {
1671 mClusters[CL_physics]->Fill(pt, 1.f);
1672 }
1673 }
1674 } else {
1675 float totalWeight = 0.;
1676 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1677 mcLabelI_t labelT = GetMCLabel(i, j);
1678 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1679 totalWeight += GetMCLabelWeight(i, j);
1680 }
1681 }
1682 if (totalWeight > 0) {
1683 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1684 mcLabelI_t label = GetMCLabel(i, j);
1685 if (!label.isFake() && GetMCTrackObj(mMCParam, label).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1686 float pt = GetMCTrackObj(mMCParam, label).pt;
1687 if (pt < PT_MIN_CLUST) {
1688 pt = PT_MIN_CLUST;
1689 }
1690 float weight = GetMCLabelWeight(i, j) / totalWeight;
1691 if (mClusterParam[i].fakeAdjacent) {
1692 mClusters[CL_fakeAdj]->Fill(pt, weight);
1693 }
1694 if (mClusterParam[i].fakeAdjacent) {
1695 mClusters[CL_att_adj]->Fill(pt, weight);
1696 }
1697 if (GetMCTrackObj(mRecTracks, label)) {
1698 mClusters[CL_tracks]->Fill(pt, weight);
1699 }
1700 mClusters[CL_all]->Fill(pt, weight);
1701 if (r.protect || r.physics) {
1702 mClusters[CL_prot]->Fill(pt, weight);
1703 }
1704 if (r.physics) {
1705 mClusters[CL_physics]->Fill(pt, weight);
1706 }
1707 }
1708 }
1709 } else {
1710 if (mClusterParam[i].fakeAdjacent) {
1711 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1712 }
1713 if (mClusterParam[i].fakeAdjacent) {
1714 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1715 }
1716 mClusters[CL_all]->Fill(0.f, 1.f);
1717 mClusterCounts.nUnaccessible++;
1718 if (r.protect || r.physics) {
1719 mClusters[CL_prot]->Fill(0.f, 1.f);
1720 }
1721 if (r.physics) {
1722 mClusters[CL_physics]->Fill(0.f, 1.f);
1723 }
1724 }
1725 }
1726 }
1727
1728 if (timer.IsRunning()) {
1729 GPUInfo("QA Time: Fill cluster histograms:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1730 }
1731 }
1732 } else if (!mConfig.inputHistogramsOnly && !mConfig.noMC && (mQATasks & (taskTrackingEff | taskTrackingRes | taskTrackingResPull | taskClusterAttach))) {
1733 GPUWarning("No MC information available, only running partial TPC QA!");
1734 } // mcAvail
1735
1736 if ((mQATasks & taskTrackStatistics) && !tracksExternal) {
1737 // Fill track statistic histograms
1738 std::vector<std::array<float, 3>> clusterAttachCounts;
1739 if (mcAvail) {
1740 clusterAttachCounts.resize(GetNMCLabels(), {0.f, 0.f});
1741 }
1742 for (uint32_t i = 0; i < nReconstructedTracks; i++) {
1743 const GPUTPCGMMergedTrack& track = mTracking->mIOPtrs.mergedTracks[i];
1744 if (!track.OK()) {
1745 continue;
1746 }
1747 mTrackPt->Fill(1.f / fabsf(track.GetParam().GetQPt()));
1748 mNCl[0]->Fill(track.NClustersFitted());
1749 uint32_t nClCorrected = 0;
1750 const auto& trackClusters = mTracking->mIOPtrs.mergedTrackHits;
1751 uint32_t jNext = 0;
1752 for (uint32_t j = 0; j < track.NClusters(); j = jNext) {
1753 uint32_t rowClCount = !(trackClusters[track.FirstClusterRef() + j].state & GPUTPCGMMergedTrackHit::flagReject);
1754 for (jNext = j + 1; j < track.NClusters(); jNext++) {
1755 if (trackClusters[track.FirstClusterRef() + j].sector != trackClusters[track.FirstClusterRef() + jNext].sector || trackClusters[track.FirstClusterRef() + j].row != trackClusters[track.FirstClusterRef() + jNext].row) {
1756 break;
1757 }
1758 rowClCount += !(trackClusters[track.FirstClusterRef() + jNext].state & GPUTPCGMMergedTrackHit::flagReject);
1759 }
1760 if (!track.MergedLooper() && rowClCount) {
1761 nClCorrected++;
1762 }
1763 if (mcAvail && rowClCount) {
1764 for (uint32_t k = j; k < jNext; k++) {
1765 const auto& cl = trackClusters[track.FirstClusterRef() + k];
1766 if (cl.state & GPUTPCGMMergedTrackHit::flagReject) {
1767 continue;
1768 }
1769 bool labelOk = false, labelOkNonFake = false;
1770 const mcLabelI_t& trkLabel = mTrackMCLabels[i];
1771 if (trkLabel.isValid() && !trkLabel.isNoise()) {
1772 for (int32_t l = 0; l < GetMCLabelNID(cl.num); l++) {
1773 const mcLabelI_t& clLabel = GetMCLabel(cl.num, l);
1774 if (clLabel.isValid() && !clLabel.isNoise() && CompareIgnoreFake(trkLabel, clLabel)) {
1775 labelOk = true;
1776 if (!trkLabel.isFake()) {
1777 labelOkNonFake = true;
1778 }
1779 break;
1780 }
1781 }
1782 }
1783 clusterAttachCounts[cl.num][0] += 1.0f;
1784 clusterAttachCounts[cl.num][1] += (float)labelOk / rowClCount;
1785 clusterAttachCounts[cl.num][2] += (float)labelOkNonFake / rowClCount;
1786 }
1787 }
1788 }
1789 if (nClCorrected) {
1790 mNCl[1]->Fill(nClCorrected);
1791 }
1792 mT0[0]->Fill(track.GetParam().GetTOffset());
1793#ifndef GPUCA_RUN2
1794 if (mTrackMCLabels.size() && !mTrackMCLabels[i].isFake() && !track.MergedLooper() && !track.CCE()) {
1795 const auto& info = GetMCTrack(mTrackMCLabels[i]);
1796 if (info.t0 != -100.f) {
1797 mT0[1]->Fill(track.GetParam().GetTOffset() - info.t0);
1798 }
1799 }
1800#endif
1801 }
1802 if (mClNative && mTracking && mTracking->GetTPCTransform()) {
1803 for (uint32_t i = 0; i < GPUChainTracking::NSECTORS; i++) {
1804 for (uint32_t j = 0; j < GPUTPCGeometry::NROWS; j++) {
1805 for (uint32_t k = 0; k < mClNative->nClusters[i][j]; k++) {
1806 const auto& cl = mClNative->clusters[i][j][k];
1807 float x, y, z;
1808 GPUTPCConvertImpl::convert(*mTracking->GetTPCTransform(), mTracking->GetParam(), i, j, cl.getPad(), cl.getTime(), x, y, z);
1809 mTracking->GetParam().Sector2Global(i, x, y, z, &x, &y, &z);
1810 mClXY->Fill(x, y);
1811 }
1812 }
1813 }
1814 }
1815 if (mcAvail) {
1816 double clusterAttachNormalizedCount = 0, clusterAttachNormalizedCountNonFake = 0;
1817 for (uint32_t i = 0; i < clusterAttachCounts.size(); i++) {
1818 if (clusterAttachCounts[i][0]) {
1819 clusterAttachNormalizedCount += clusterAttachCounts[i][1] / clusterAttachCounts[i][0];
1820 clusterAttachNormalizedCountNonFake += clusterAttachCounts[i][2] / clusterAttachCounts[i][0];
1821 }
1822 }
1823 mClusterCounts.nCorrectlyAttachedNormalized = clusterAttachNormalizedCount;
1824 mClusterCounts.nCorrectlyAttachedNormalizedNonFake = clusterAttachNormalizedCountNonFake;
1825 clusterAttachCounts.clear();
1826 }
1827
1828 if (timer.IsRunning()) {
1829 GPUInfo("QA Time: Fill track statistics:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1830 }
1831 }
1832
1833 uint32_t nCl = clNative ? clNative->nClustersTotal : mTracking->GetProcessors()->tpcMerger.NMaxClusters();
1834 mClusterCounts.nTotal += nCl;
1835 if (mQATasks & (taskClusterCounts | taskClusterRejection)) {
1836 for (uint32_t iSector = 0; iSector < GPUTPCGeometry::NSECTORS; iSector++) {
1837 for (uint32_t iRow = 0; iRow < GPUTPCGeometry::NROWS; iRow++) {
1838 for (uint32_t iCl = 0; iCl < clNative->nClusters[iSector][iRow]; iCl++) {
1839 uint32_t i = clNative->clusterOffset[iSector][iRow] + iCl;
1840 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[i];
1841 const auto& r = checkClusterState<true>(attach, &mClusterCounts);
1842
1843 if (mQATasks & taskClusterRejection) {
1844 if (mcAvail) {
1845 float totalWeight = 0, weight400 = 0, weight40 = 0;
1846 for (int32_t j = 0; j < GetMCLabelNID(i); j++) {
1847 const auto& label = GetMCLabel(i, j);
1848 if (GetMCLabelID(label) >= 0) {
1849 totalWeight += GetMCLabelWeight(label);
1850 if (GetMCTrackObj(mMCParam, label).pt >= 0.4) {
1851 weight400 += GetMCLabelWeight(label);
1852 }
1853 if (GetMCTrackObj(mMCParam, label).pt <= 0.04) {
1854 weight40 += GetMCLabelWeight(label);
1855 }
1856 }
1857 }
1858 if (totalWeight > 0 && 10.f * weight400 >= totalWeight) {
1859 if (!r.unattached && !r.protect && !r.physics) {
1860 mClusterCounts.nFakeRemove400++;
1861 int32_t totalFake = weight400 < 0.9f * totalWeight;
1862 if (totalFake) {
1863 mClusterCounts.nFullFakeRemove400++;
1864 }
1865 /*printf("Fake removal (%d): Hit %7d, attached %d lowPt %d looper %d tube200 %d highIncl %d tube %d bad %d recPt %7.2f recLabel %6d", totalFake, i, (int32_t) (mClusterParam[i].attached || mClusterParam[i].fakeAttached),
1866 (int32_t) lowPt, (int32_t) ((attach & gputpcgmmergertypes::attachGoodLeg) == 0), (int32_t) ((attach & gputpcgmmergertypes::attachTube) && mev200),
1867 (int32_t) ((attach & gputpcgmmergertypes::attachHighIncl) != 0), (int32_t) ((attach & gputpcgmmergertypes::attachTube) != 0), (int32_t) ((attach & gputpcgmmergertypes::attachGood) == 0),
1868 fabsf(qpt) > 0 ? 1.f / qpt : 0.f, id);
1869 for (int32_t j = 0;j < GetMCLabelNID(i);j++)
1870 {
1871 //if (GetMCLabelID(i, j) < 0) break;
1872 printf(" - label%d %6d weight %5d", j, GetMCLabelID(i, j), (int32_t) GetMCLabelWeight(i, j));
1873 if (GetMCLabelID(i, j) >= 0) printf(" - pt %7.2f", mMCParam[GetMCLabelID(i, j)].pt);
1874 else printf(" ");
1875 }
1876 printf("\n");*/
1877 }
1878 mClusterCounts.nAbove400++;
1879 }
1880 if (totalWeight > 0 && weight40 >= 0.9 * totalWeight) {
1881 mClusterCounts.nBelow40++;
1882 if (r.protect || r.physics) {
1883 mClusterCounts.nFakeProtect40++;
1884 }
1885 }
1886 }
1887
1888 if (r.physics) {
1889 mClusterCounts.nPhysics++;
1890 }
1891 if (r.protect) {
1892 mClusterCounts.nProt++;
1893 }
1894 if (r.unattached) {
1895 mClusterCounts.nUnattached++;
1896 }
1897 }
1898 if (mQATasks & taskClusterRejection) {
1899 if (mTracking && clNative) {
1900 const auto& cl = clNative->clustersLinear[i];
1901 mClRej[0]->Fill(cl.getPad() - GPUTPCGeometry::NPads(iRow) / 2 + 0.5, iRow, 1.f);
1902 if (!r.unattached && !r.protect) {
1903 mClRej[1]->Fill(cl.getPad() - GPUTPCGeometry::NPads(iRow) / 2 + 0.5, iRow, 1.f);
1904 }
1905 }
1906 }
1907 }
1908 }
1909 }
1910 }
1911
1912 // Process cluster count statistics
1913 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
1914 DoClusterCounts(nullptr);
1915 mClusterCounts = counts_t();
1916 }
1917
1918 if (timer.IsRunning()) {
1919 GPUInfo("QA Time: Cluster Counts:\t%6.0f us", timer.GetCurrentElapsedTime(true) * 1e6);
1920 }
1921
1922 if (mConfig.dumpToROOT && !tracksExternal) {
1923 if (!clNative || !mTracking || !mTracking->mIOPtrs.mergedTrackHitAttachment || !mTracking->mIOPtrs.mergedTracks) {
1924 throw std::runtime_error("Cannot dump non o2::tpc::clusterNative clusters, need also hit attachmend and GPU tracks");
1925 }
1926 uint32_t clid = 0;
1927 for (uint32_t i = 0; i < GPUChainTracking::NSECTORS; i++) {
1928 for (uint32_t j = 0; j < GPUTPCGeometry::NROWS; j++) {
1929 for (uint32_t k = 0; k < mClNative->nClusters[i][j]; k++) {
1930 const auto& cl = mClNative->clusters[i][j][k];
1931 uint32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[clid];
1932 float x = 0, y = 0, z = 0;
1934 uint32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1935 const auto& trk = mTracking->mIOPtrs.mergedTracks[track];
1936 mTracking->GetTPCTransform()->Transform(i, j, cl.getPad(), cl.getTime(), x, y, z, trk.GetParam().GetTOffset());
1937 mTracking->GetParam().Sector2Global(i, x, y, z, &x, &y, &z);
1938 }
1939 uint32_t extState = mTracking->mIOPtrs.mergedTrackHitStates ? mTracking->mIOPtrs.mergedTrackHitStates[clid] : 0;
1940
1941 if (mConfig.dumpToROOT >= 2) {
1944 memset((void*)&trk, 0, sizeof(trk));
1945 memset((void*)&trkHit, 0, sizeof(trkHit));
1947 uint32_t track = attach & gputpcgmmergertypes::attachTrackMask;
1948 trk = mTracking->mIOPtrs.mergedTracks[track];
1949 for (uint32_t l = 0; l < trk.NClusters(); l++) {
1950 const auto& tmp = mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + l];
1951 if (tmp.num == clid) {
1952 trkHit = tmp;
1953 break;
1954 }
1955 }
1956 }
1957 static auto cldump = GPUROOTDump<o2::tpc::ClusterNative, GPUTPCGMMergedTrack, GPUTPCGMMergedTrackHit, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew("cluster", "track", "trackHit", "attach", "extState", "x", "y", "z", "sector", "row", "nEv", "clusterTree");
1958 cldump.Fill(cl, trk, trkHit, attach, extState, x, y, z, i, j, mNEvents - 1);
1959 } else {
1960 static auto cldump = GPUROOTDump<o2::tpc::ClusterNative, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew("cluster", "attach", "extState", "x", "y", "z", "sector", "row", "nEv", "clusterTree");
1961 cldump.Fill(cl, attach, extState, x, y, z, i, j, mNEvents - 1);
1962 }
1963 clid++;
1964 }
1965 }
1966 }
1967
1968 static auto trkdump = GPUROOTDump<uint32_t, GPUTPCGMMergedTrack>::getNew("nEv", "track", "tracksTree");
1969 for (uint32_t i = 0; i < mTracking->mIOPtrs.nMergedTracks; i++) {
1970 if (mTracking->mIOPtrs.mergedTracks[i].OK()) {
1971 trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.mergedTracks[i]);
1972 }
1973 }
1974
1975 if (mTracking && mTracking->GetProcessingSettings().createO2Output) {
1976 static auto o2trkdump = GPUROOTDump<uint32_t, o2::tpc::TrackTPC>::getNew("nEv", "track", "tracksO2Tree");
1977 for (uint32_t i = 0; i < mTracking->mIOPtrs.nOutputTracksTPCO2; i++) {
1978 o2trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.outputTracksTPCO2[i]);
1979 }
1980 }
1981 }
1982
1983 if (mConfig.compareTrackStatus) {
1984#ifdef GPUCA_DETERMINISTIC_MODE
1985 if (!mTracking || !mTracking->GetProcessingSettings().deterministicGPUReconstruction)
1986#endif
1987 {
1988 throw std::runtime_error("Need deterministic processing to compare track status");
1989 }
1990 std::vector<uint8_t> status(mTracking->mIOPtrs.nMergedTracks);
1991 for (uint32_t i = 0; i < mTracking->mIOPtrs.nMergedTracks; i++) {
1992 const auto& trk = mTracking->mIOPtrs.mergedTracks[i];
1993 status[i] = trk.OK() && trk.NClusters() && trk.GetParam().GetNDF() > 0 && (mConfig.noMC || (mTrackMCLabels[i].isValid() && !mTrackMCLabels[i].isFake()));
1994 }
1995 if (mConfig.compareTrackStatus == 1) {
1996 std::ofstream("track.status", std::ios::binary).write((char*)status.data(), status.size() * sizeof(status[0]));
1997 } else if (mConfig.compareTrackStatus == 2) {
1998 std::ifstream f("track.status", std::ios::binary | std::ios::ate);
1999 std::vector<uint8_t> comp(f.tellg());
2000 f.seekg(0);
2001 f.read((char*)comp.data(), comp.size());
2002
2003 if (comp.size() != status.size()) {
2004 throw std::runtime_error("Number of tracks candidates in track fit in track.status and in current reconstruction differ");
2005 }
2006 std::vector<uint32_t> missing, missingComp;
2007 for (uint32_t i = 0; i < status.size(); i++) {
2008 if (status[i] && !comp[i]) {
2009 missingComp.emplace_back(i);
2010 }
2011 if (comp[i] && !status[i]) {
2012 missing.emplace_back(i);
2013 }
2014 }
2015 auto printer = [](std::vector<uint32_t> m, const char* name) {
2016 if (m.size()) {
2017 printf("Missing in %s reconstruction: (%zu)\n", name, m.size());
2018 for (uint32_t i = 0; i < m.size(); i++) {
2019 if (i) {
2020 printf(", ");
2021 }
2022 printf("%d", m[i]);
2023 }
2024 printf("\n");
2025 }
2026 };
2027 printer(missing, "current");
2028 printer(missingComp, "comparison");
2029 }
2030 }
2031
2032 mTrackingScratchBuffer.clear();
2033 mTrackingScratchBuffer.shrink_to_fit();
2034}
2035
2036void GPUQA::GetName(char* fname, int32_t k, bool noDash)
2037{
2038 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
2039 if (k || mConfig.inputHistogramsOnly || mConfig.name.size()) {
2040 if (!(mConfig.inputHistogramsOnly || k)) {
2041 snprintf(fname, 1024, "%s%s", mConfig.name.c_str(), noDash ? "" : " - ");
2042 } else if (mConfig.compareInputNames.size() > (unsigned)(k - nNewInput)) {
2043 snprintf(fname, 1024, "%s%s", mConfig.compareInputNames[k - nNewInput].c_str(), noDash ? "" : " - ");
2044 } else {
2045 strcpy(fname, mConfig.compareInputs[k - nNewInput].c_str());
2046 if (strlen(fname) > 5 && strcmp(fname + strlen(fname) - 5, ".root") == 0) {
2047 fname[strlen(fname) - 5] = 0;
2048 }
2049 if (!noDash) {
2050 strcat(fname, " - ");
2051 }
2052 }
2053 } else {
2054 fname[0] = 0;
2055 }
2056}
2057
2058template <class T>
2059T* GPUQA::GetHist(T*& ee, std::vector<std::unique_ptr<TFile>>& tin, int32_t k, int32_t nNewInput)
2060{
2061 T* e = ee;
2062 if ((mConfig.inputHistogramsOnly || k) && (e = dynamic_cast<T*>(tin[k - nNewInput]->Get(e->GetName()))) == nullptr) {
2063 GPUWarning("Missing histogram in input %s: %s", mConfig.compareInputs[k - nNewInput].c_str(), ee->GetName());
2064 return (nullptr);
2065 }
2066 ee = e;
2067 return (e);
2068}
2069
2070void GPUQA::DrawQAHistogramsCleanup()
2071{
2072 clearGarbagageCollector();
2073}
2074
2075void GPUQA::resetHists()
2076{
2077 if (!mQAInitialized) {
2078 throw std::runtime_error("QA not initialized");
2079 }
2080 if (mHaveExternalHists) {
2081 throw std::runtime_error("Cannot reset external hists");
2082 }
2083 for (auto& h : *mHist1D) {
2084 h.Reset();
2085 }
2086 for (auto& h : *mHist2D) {
2087 h.Reset();
2088 }
2089 for (auto& h : *mHist1Dd) {
2090 h.Reset();
2091 }
2092 for (auto& h : *mHistGraph) {
2093 h = TGraphAsymmErrors();
2094 }
2095 mClusterCounts = counts_t();
2096}
2097
2098int32_t GPUQA::DrawQAHistograms(TObjArray* qcout)
2099{
2100 const auto oldRootIgnoreLevel = gErrorIgnoreLevel;
2101 gErrorIgnoreLevel = kWarning;
2102 if (!mQAInitialized) {
2103 throw std::runtime_error("QA not initialized");
2104 }
2105
2106 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
2107 printf("Creating QA Histograms\n");
2108 }
2109
2110 std::vector<Color_t> colorNums(COLORCOUNT);
2111 if (!(qcout || mConfig.writeFileExt == "root" || mConfig.writeFileExt == "C")) {
2112 [[maybe_unused]] static int32_t initColorsInitialized = initColors();
2113 }
2114 for (int32_t i = 0; i < COLORCOUNT; i++) {
2115 colorNums[i] = (qcout || mConfig.writeFileExt == "root" || mConfig.writeFileExt == "C") ? defaultColorNums[i] : mColors[i]->GetNumber();
2116 }
2117
2118 bool mcAvail = mcPresent();
2119 char name[2048], fname[1024];
2120
2121 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
2122 const int32_t ConfigNumInputs = nNewInput + mConfig.compareInputs.size();
2123
2124 std::vector<std::unique_ptr<TFile>> tin;
2125 for (uint32_t i = 0; i < mConfig.compareInputs.size(); i++) {
2126 tin.emplace_back(std::make_unique<TFile>(mConfig.compareInputs[i].c_str()));
2127 }
2128 std::unique_ptr<TFile> tout = nullptr;
2129 if (mConfig.output.size()) {
2130 tout = std::make_unique<TFile>(mConfig.output.c_str(), "RECREATE");
2131 }
2132
2133 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2134 float legendSpacingString = 0.025;
2135 for (int32_t i = 0; i < ConfigNumInputs; i++) {
2136 GetName(fname, i);
2137 if (strlen(fname) * 0.006 > legendSpacingString) {
2138 legendSpacingString = strlen(fname) * 0.006;
2139 }
2140 }
2141
2142 // Create Canvas / Pads for Efficiency Histograms
2143 if (mQATasks & taskTrackingEff) {
2144 for (int32_t ii = 0; ii < 6; ii++) {
2145 snprintf(name, 1024, "eff_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2146 mCEff[ii] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2147 mCEff[ii]->cd();
2148 float dy = 1. / 2.;
2149 mPEff[ii][0] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2150 mPEff[ii][0]->Draw();
2151 mPEff[ii][0]->SetRightMargin(0.04);
2152 mPEff[ii][1] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2153 mPEff[ii][1]->Draw();
2154 mPEff[ii][1]->SetRightMargin(0.04);
2155 mPEff[ii][2] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 0.5, dy * 2 - .001);
2156 mPEff[ii][2]->Draw();
2157 mPEff[ii][2]->SetRightMargin(0.04);
2158 mPEff[ii][3] = createGarbageCollected<TPad>("p3", "", 0.5, dy * 1, 1.0, dy * 2 - .001);
2159 mPEff[ii][3]->Draw();
2160 mPEff[ii][3]->SetRightMargin(0.04);
2161 mLEff[ii] = createGarbageCollected<TLegend>(0.92 - legendSpacingString * 1.45, 0.83 - (0.93 - 0.82) / 2. * (float)ConfigNumInputs, 0.98, 0.849);
2162 SetLegend(mLEff[ii]);
2163 }
2164 }
2165
2166 // Create Canvas / Pads for Resolution Histograms
2167 if (mQATasks & taskTrackingRes) {
2168 for (int32_t ii = 0; ii < 7; ii++) {
2169 if (ii == 6) {
2170 snprintf(name, 1024, "res_integral_layout");
2171 } else {
2172 snprintf(name, 1024, "res_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2173 }
2174 mCRes[ii] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2175 mCRes[ii]->cd();
2176 gStyle->SetOptFit(1);
2177
2178 float dy = 1. / 2.;
2179 mPRes[ii][3] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2180 mPRes[ii][3]->Draw();
2181 mPRes[ii][3]->SetRightMargin(0.04);
2182 mPRes[ii][4] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2183 mPRes[ii][4]->Draw();
2184 mPRes[ii][4]->SetRightMargin(0.04);
2185 mPRes[ii][0] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2186 mPRes[ii][0]->Draw();
2187 mPRes[ii][0]->SetRightMargin(0.04);
2188 mPRes[ii][0]->SetLeftMargin(0.15);
2189 mPRes[ii][1] = createGarbageCollected<TPad>("p3", "", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2190 mPRes[ii][1]->Draw();
2191 mPRes[ii][1]->SetRightMargin(0.04);
2192 mPRes[ii][1]->SetLeftMargin(0.135);
2193 mPRes[ii][2] = createGarbageCollected<TPad>("p4", "", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2194 mPRes[ii][2]->Draw();
2195 mPRes[ii][2]->SetRightMargin(0.06);
2196 mPRes[ii][2]->SetLeftMargin(0.135);
2197 if (ii < 6) {
2198 mLRes[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2199 SetLegend(mLRes[ii]);
2200 }
2201 }
2202 }
2203
2204 // Create Canvas / Pads for Pull Histograms
2205 if (mQATasks & taskTrackingResPull) {
2206 for (int32_t ii = 0; ii < 7; ii++) {
2207 if (ii == 6) {
2208 snprintf(name, 1024, "pull_integral_layout");
2209 } else {
2210 snprintf(name, 1024, "pull_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2211 }
2212 mCPull[ii] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2213 mCPull[ii]->cd();
2214 gStyle->SetOptFit(1);
2215
2216 float dy = 1. / 2.;
2217 mPPull[ii][3] = createGarbageCollected<TPad>("p0", "", 0.0, dy * 0, 0.5, dy * 1);
2218 mPPull[ii][3]->Draw();
2219 mPPull[ii][3]->SetRightMargin(0.04);
2220 mPPull[ii][4] = createGarbageCollected<TPad>("p1", "", 0.5, dy * 0, 1.0, dy * 1);
2221 mPPull[ii][4]->Draw();
2222 mPPull[ii][4]->SetRightMargin(0.04);
2223 mPPull[ii][0] = createGarbageCollected<TPad>("p2", "", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2224 mPPull[ii][0]->Draw();
2225 mPPull[ii][0]->SetRightMargin(0.04);
2226 mPPull[ii][0]->SetLeftMargin(0.15);
2227 mPPull[ii][1] = createGarbageCollected<TPad>("p3", "", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2228 mPPull[ii][1]->Draw();
2229 mPPull[ii][1]->SetRightMargin(0.04);
2230 mPPull[ii][1]->SetLeftMargin(0.135);
2231 mPPull[ii][2] = createGarbageCollected<TPad>("p4", "", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2232 mPPull[ii][2]->Draw();
2233 mPPull[ii][2]->SetRightMargin(0.06);
2234 mPPull[ii][2]->SetLeftMargin(0.135);
2235 if (ii < 6) {
2236 mLPull[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2237 SetLegend(mLPull[ii]);
2238 }
2239 }
2240 }
2241
2242 // Create Canvas for Cluster Histos
2243 if (mQATasks & taskClusterAttach) {
2244 for (int32_t i = 0; i < 3; i++) {
2245 snprintf(name, 1024, "clusters_%s_layout", CLUSTER_TYPES[i]);
2246 mCClust[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2247 mCClust[i]->cd();
2248 mPClust[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2249 mPClust[i]->Draw();
2250 float y1 = i != 1 ? 0.77 : 0.27, y2 = i != 1 ? 0.9 : 0.42;
2251 mLClust[i] = createGarbageCollected<TLegend>(i == 2 ? 0.1 : (0.65 - legendSpacingString * 1.45), y2 - (y2 - y1) * (ConfigNumInputs + (i != 1) / 2.) + 0.005, i == 2 ? (0.3 + legendSpacingString * 1.45) : 0.9, y2);
2252 SetLegend(mLClust[i]);
2253 }
2254 }
2255
2256 // Create Canvas for track statistic histos
2257 if (mQATasks & taskTrackStatistics) {
2258 mCTrackPt = createGarbageCollected<TCanvas>("ctrackspt", "ctrackspt", 0, 0, 700, 700. * 2. / 3.);
2259 mCTrackPt->cd();
2260 mPTrackPt = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2261 mPTrackPt->Draw();
2262 mLTrackPt = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.5, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2263 SetLegend(mLTrackPt, true);
2264
2265 for (int32_t i = 0; i < 2; i++) {
2266 snprintf(name, 2048, "ctrackst0%d", i);
2267 mCT0[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2268 mCT0[i]->cd();
2269 mPT0[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2270 mPT0[i]->Draw();
2271 mLT0[i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949);
2272 SetLegend(mLT0[i]);
2273
2274 snprintf(name, 2048, "cncl%d", i);
2275 mCNCl[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2276 mCNCl[i]->cd();
2277 mPNCl[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2278 mPNCl[i]->Draw();
2279 mLNCl[i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (float)ConfigNumInputs, 0.98, 0.949); // TODO: Fix sizing of legend, and also fix font size
2280 SetLegend(mLNCl[i], true);
2281 }
2282
2283 mCClXY = createGarbageCollected<TCanvas>("clxy", "clxy", 0, 0, 700, 700. * 2. / 3.);
2284 mCClXY->cd();
2285 mPClXY = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2286 mPClXY->Draw();
2287 }
2288
2289 if (mQATasks & taskClusterRejection) {
2290 for (int32_t i = 0; i < 3; i++) {
2291 snprintf(name, 2048, "cnclrej%d", i);
2292 mCClRej[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2293 mCClRej[i]->cd();
2294 mPClRej[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2295 mPClRej[i]->Draw();
2296 }
2297 mCClRejP = createGarbageCollected<TCanvas>("cnclrejp", "cnclrejp", 0, 0, 700, 700. * 2. / 3.);
2298 mCClRejP->cd();
2299 mPClRejP = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2300 mPClRejP->Draw();
2301 }
2302
2303 if (mQATasks & taskClusterAttach) {
2304 for (int32_t i = 0; i < 4; i++) {
2305 snprintf(name, 2048, "cpadrow%d", i);
2306 mCPadRow[i] = createGarbageCollected<TCanvas>(name, name, 0, 0, 700, 700. * 2. / 3.);
2307 mCPadRow[i]->cd();
2308 mPPadRow[i] = createGarbageCollected<TPad>("p0", "", 0.0, 0.0, 1.0, 1.0);
2309 mPPadRow[i]->Draw();
2310 }
2311 }
2312 }
2313
2314 if (mConfig.enableLocalOutput && !mConfig.inputHistogramsOnly && (mQATasks & taskTrackingEff) && mcPresent()) {
2315 GPUInfo("QA Stats: Eff: Tracks Prim %d (Eta %d, Pt %d) %f%% (%f%%) Sec %d (Eta %d, Pt %d) %f%% (%f%%) - Res: Tracks %d (Eta %d, Pt %d)", (int32_t)mEff[3][1][0][0]->GetEntries(), (int32_t)mEff[3][1][0][3]->GetEntries(), (int32_t)mEff[3][1][0][4]->GetEntries(),
2316 mEff[0][0][0][0]->GetSumOfWeights() / std::max(1., mEff[3][0][0][0]->GetSumOfWeights()), mEff[0][1][0][0]->GetSumOfWeights() / std::max(1., mEff[3][1][0][0]->GetSumOfWeights()), (int32_t)mEff[3][1][1][0]->GetEntries(), (int32_t)mEff[3][1][1][3]->GetEntries(),
2317 (int32_t)mEff[3][1][1][4]->GetEntries(), mEff[0][0][1][0]->GetSumOfWeights() / std::max(1., mEff[3][0][1][0]->GetSumOfWeights()), mEff[0][1][1][0]->GetSumOfWeights() / std::max(1., mEff[3][1][1][0]->GetSumOfWeights()), (int32_t)mRes2[0][0]->GetEntries(),
2318 (int32_t)mRes2[0][3]->GetEntries(), (int32_t)mRes2[0][4]->GetEntries());
2319 }
2320
2321 int32_t flagShowVsPtLog = (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) ? 1 : 0;
2322
2323 if (mQATasks & taskTrackingEff) {
2324 // Process / Draw Efficiency Histograms
2325 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2326 int32_t i = ii == 5 ? 4 : ii;
2327 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2328 for (int32_t j = 0; j < 4; j++) {
2329 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2330 mPEff[ii][j]->cd();
2331 if (ii == 5) {
2332 mPEff[ii][j]->SetLogx();
2333 }
2334 }
2335 for (int32_t l = 0; l < 3; l++) {
2336 if (k == 0 && mConfig.inputHistogramsOnly == 0 && ii != 5) {
2337 if (l == 0) {
2338 // Divide eff, compute all for fake/clone
2339 auto oldLevel = gErrorIgnoreLevel;
2340 gErrorIgnoreLevel = kError;
2341 mEffResult[0][j / 2][j % 2][i]->Divide(mEff[l][j / 2][j % 2][i], mEff[5][j / 2][j % 2][i], "cl=0.683 b(1,1) mode");
2342 gErrorIgnoreLevel = oldLevel;
2343 mEff[3][j / 2][j % 2][i]->Reset(); // Sum up rec + clone + fake for fake rate
2344 mEff[3][j / 2][j % 2][i]->Add(mEff[0][j / 2][j % 2][i]);
2345 mEff[3][j / 2][j % 2][i]->Add(mEff[1][j / 2][j % 2][i]);
2346 mEff[3][j / 2][j % 2][i]->Add(mEff[2][j / 2][j % 2][i]);
2347 mEff[4][j / 2][j % 2][i]->Reset(); // Sum up rec + clone for clone rate
2348 mEff[4][j / 2][j % 2][i]->Add(mEff[0][j / 2][j % 2][i]);
2349 mEff[4][j / 2][j % 2][i]->Add(mEff[1][j / 2][j % 2][i]);
2350 } else {
2351 // Divide fake/clone
2352 auto oldLevel = gErrorIgnoreLevel;
2353 gErrorIgnoreLevel = kError;
2354 mEffResult[l][j / 2][j % 2][i]->Divide(mEff[l][j / 2][j % 2][i], mEff[l == 1 ? 4 : 3][j / 2][j % 2][i], "cl=0.683 b(1,1) mode");
2355 gErrorIgnoreLevel = oldLevel;
2356 }
2357 }
2358
2359 TGraphAsymmErrors* e = mEffResult[l][j / 2][j % 2][i];
2360
2361 if (!mConfig.inputHistogramsOnly && k == 0) {
2362 if (tout) {
2363 mEff[l][j / 2][j % 2][i]->Write();
2364 e->Write();
2365 if (l == 2) {
2366 mEff[3][j / 2][j % 2][i]->Write(); // Store also all histogram!
2367 mEff[4][j / 2][j % 2][i]->Write(); // Store also all histogram!
2368 }
2369 }
2370 } else if (GetHist(e, tin, k, nNewInput) == nullptr) {
2371 continue;
2372 }
2373 e->SetTitle(EFFICIENCY_TITLES[j]);
2374 e->GetYaxis()->SetTitle("(Efficiency)");
2375 e->GetXaxis()->SetTitle(XAXIS_TITLES[i]);
2376
2377 e->SetLineWidth(1);
2378 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2379 SetAxisSize(e);
2380 if (qcout && !mConfig.shipToQCAsCanvas) {
2381 qcout->Add(e);
2382 }
2383 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2384 continue;
2385 }
2386 e->SetMarkerColor(kBlack);
2387 e->SetLineColor(colorNums[(k < 3 ? (l * 3 + k) : (k * 3 + l)) % COLORCOUNT]);
2388 e->GetHistogram()->GetYaxis()->SetRangeUser(-0.02, 1.02);
2389 e->Draw(k || l ? "same P" : "AP");
2390 if (j == 0) {
2391 GetName(fname, k);
2392 mLEff[ii]->AddEntry(e, Form("%s%s", fname, EFF_NAMES[l]), "l");
2393 }
2394 }
2395 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2396 continue;
2397 }
2398 mCEff[ii]->cd();
2399 ChangePadTitleSize(mPEff[ii][j], 0.056);
2400 }
2401 }
2402 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2403 continue;
2404 }
2405
2406 mLEff[ii]->Draw();
2407
2408 if (qcout) {
2409 qcout->Add(mCEff[ii]);
2410 }
2411 if (!mConfig.enableLocalOutput) {
2412 continue;
2413 }
2414 doPerfFigure(0.2, 0.295, 0.025);
2415 mCEff[ii]->Print(Form("%s/eff_vs_%s.pdf", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii]));
2416 if (mConfig.writeFileExt != "") {
2417 mCEff[ii]->Print(Form("%s/eff_vs_%s.%s", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii], mConfig.writeFileExt.c_str()));
2418 }
2419 }
2420 }
2421
2422 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
2423 // Process / Draw Resolution Histograms
2424 TH1D *resIntegral[5] = {}, *pullIntegral[5] = {};
2425 TCanvas* cfit = nullptr;
2426 std::unique_ptr<TF1> customGaus = std::make_unique<TF1>("G", "[0]*exp(-(x-[1])*(x-[1])/(2.*[2]*[2]))");
2427 for (int32_t p = 0; p < 2; p++) {
2428 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2429 continue;
2430 }
2431 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2432 TCanvas* can = p ? mCPull[ii] : mCRes[ii];
2433 TLegend* leg = p ? mLPull[ii] : mLRes[ii];
2434 int32_t i = ii == 5 ? 4 : ii;
2435 for (int32_t j = 0; j < 5; j++) {
2436 TH2F* src = p ? mPull2[j][i] : mRes2[j][i];
2437 TH1F** dst = p ? mPull[j][i] : mRes[j][i];
2438 TH1D*& dstIntegral = p ? pullIntegral[j] : resIntegral[j];
2439 TPad* pad = p ? mPPull[ii][j] : mPRes[ii][j];
2440
2441 if (!mConfig.inputHistogramsOnly && ii != 5) {
2442 if (cfit == nullptr) {
2443 cfit = createGarbageCollected<TCanvas>();
2444 }
2445 cfit->cd();
2446
2447 TAxis* axis = src->GetYaxis();
2448 int32_t nBins = axis->GetNbins();
2449 int32_t integ = 1;
2450 for (int32_t bin = 1; bin <= nBins; bin++) {
2451 int32_t bin0 = std::max(bin - integ, 0);
2452 int32_t bin1 = std::min(bin + integ, nBins);
2453 std::unique_ptr<TH1D> proj{src->ProjectionX("proj", bin0, bin1)};
2454 proj->ClearUnderflowAndOverflow();
2455 if (proj->GetEntries()) {
2456 uint32_t rebin = 1;
2457 while (proj->GetMaximum() < 50 && rebin < sizeof(RES_AXIS_BINS) / sizeof(RES_AXIS_BINS[0])) {
2458 proj->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2459 rebin++;
2460 }
2461
2462 if (proj->GetEntries() < 20 || proj->GetRMS() < 0.00001) {
2463 dst[0]->SetBinContent(bin, proj->GetRMS());
2464 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2465 dst[1]->SetBinContent(bin, proj->GetMean());
2466 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2467 } else {
2468 proj->GetXaxis()->SetRange(0, 0);
2469 proj->GetXaxis()->SetRangeUser(std::max(proj->GetXaxis()->GetXmin(), proj->GetMean() - 3. * proj->GetRMS()), std::min(proj->GetXaxis()->GetXmax(), proj->GetMean() + 3. * proj->GetRMS()));
2470 bool forceLogLike = proj->GetMaximum() < 20;
2471 for (int32_t k = forceLogLike ? 2 : 0; k < 3; k++) {
2472 proj->Fit("gaus", forceLogLike || k == 2 ? "sQl" : k ? "sQww" : "sQ");
2473 TF1* fitFunc = proj->GetFunction("gaus");
2474
2475 if (k && !forceLogLike) {
2476 customGaus->SetParameters(fitFunc->GetParameter(0), fitFunc->GetParameter(1), fitFunc->GetParameter(2));
2477 proj->Fit(customGaus.get(), "sQ");
2478 fitFunc = customGaus.get();
2479 }
2480
2481 const float sigma = fabs(fitFunc->GetParameter(2));
2482 dst[0]->SetBinContent(bin, sigma);
2483 dst[1]->SetBinContent(bin, fitFunc->GetParameter(1));
2484 dst[0]->SetBinError(bin, fitFunc->GetParError(2));
2485 dst[1]->SetBinError(bin, fitFunc->GetParError(1));
2486
2487 const bool fail1 = sigma <= 0.f;
2488 const bool fail2 = fabs(proj->GetMean() - dst[1]->GetBinContent(bin)) > std::min<float>(p ? PULL_AXIS : mConfig.nativeFitResolutions ? RES_AXES_NATIVE[j] : RES_AXES[j], 3.f * proj->GetRMS());
2489 const bool fail3 = dst[0]->GetBinContent(bin) > 3.f * proj->GetRMS() || dst[0]->GetBinError(bin) > 1 || dst[1]->GetBinError(bin) > 1;
2490 const bool fail4 = fitFunc->GetParameter(0) < proj->GetMaximum() / 5.;
2491 const bool fail = fail1 || fail2 || fail3 || fail4;
2492 // if (p == 0 && ii == 4 && j == 2) DrawHisto(proj, Form("Hist_bin_%d-%d_vs_%d____%d_%d___%f-%f___%f-%f___%d.pdf", p, j, ii, bin, k, dst[0]->GetBinContent(bin), proj->GetRMS(), dst[1]->GetBinContent(bin), proj->GetMean(), (int32_t) fail), "");
2493
2494 if (!fail) {
2495 break;
2496 } else if (k >= 2) {
2497 dst[0]->SetBinContent(bin, proj->GetRMS());
2498 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2499 dst[1]->SetBinContent(bin, proj->GetMean());
2500 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2501 }
2502 }
2503 }
2504 } else {
2505 dst[0]->SetBinContent(bin, 0.f);
2506 dst[0]->SetBinError(bin, 0.f);
2507 dst[1]->SetBinContent(bin, 0.f);
2508 dst[1]->SetBinError(bin, 0.f);
2509 }
2510 }
2511 if (ii == 0) {
2512 dstIntegral = src->ProjectionX(mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j], 0, nBins + 1);
2513 uint32_t rebin = 1;
2514 while (dstIntegral->GetMaximum() < 50 && rebin < sizeof(RES_AXIS_BINS) / sizeof(RES_AXIS_BINS[0])) {
2515 dstIntegral->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2516 rebin++;
2517 }
2518 }
2519 }
2520 if (ii == 0) {
2521 if (mConfig.inputHistogramsOnly) {
2522 dstIntegral = createGarbageCollected<TH1D>();
2523 }
2524 dstIntegral->SetName(Form(p ? "IntPull%s" : "IntRes%s", VSPARAMETER_NAMES[j]));
2525 dstIntegral->SetTitle(Form(p ? "%s Pull" : "%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j]));
2526 }
2527 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2528 pad->cd();
2529 }
2530 int32_t numColor = 0;
2531 float tmpMax = -1000.;
2532 float tmpMin = 1000.;
2533
2534 for (int32_t l = 0; l < 2; l++) {
2535 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2536 TH1F* e = dst[l];
2537 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2538 continue;
2539 }
2540 if (nNewInput && k == 0 && ii != 5) {
2541 if (p == 0) {
2542 e->Scale(mConfig.nativeFitResolutions ? SCALE_NATIVE[j] : SCALE[j]);
2543 }
2544 }
2545 if (ii == 4) {
2546 e->GetXaxis()->SetRangeUser(0.2, PT_MAX);
2547 } else if (LOG_PT_MIN > 0 && ii == 5) {
2548 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2549 } else if (ii == 5) {
2550 e->GetXaxis()->SetRange(1, 0);
2551 }
2552 e->SetMinimum(-1111);
2553 e->SetMaximum(-1111);
2554
2555 if (e->GetMaximum() > tmpMax) {
2556 tmpMax = e->GetMaximum();
2557 }
2558 if (e->GetMinimum() < tmpMin) {
2559 tmpMin = e->GetMinimum();
2560 }
2561 }
2562 }
2563
2564 float tmpSpan;
2565 tmpSpan = tmpMax - tmpMin;
2566 tmpMax += tmpSpan * .02;
2567 tmpMin -= tmpSpan * .02;
2568 if (j == 2 && i < 3) {
2569 tmpMax += tmpSpan * 0.13 * ConfigNumInputs;
2570 }
2571
2572 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2573 for (int32_t l = 0; l < 2; l++) {
2574 TH1F* e = dst[l];
2575 if (!mConfig.inputHistogramsOnly && k == 0) {
2576 e->SetTitle(Form(p ? "%s Pull" : "%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[j] : PARAMETER_NAMES[j]));
2577 e->SetStats(kFALSE);
2578 if (tout) {
2579 if (l == 0) {
2580 mRes2[j][i]->SetOption("colz");
2581 mRes2[j][i]->Write();
2582 }
2583 e->Write();
2584 }
2585 } else if (GetHist(e, tin, k, nNewInput) == nullptr) {
2586 continue;
2587 }
2588 e->SetMaximum(tmpMax);
2589 e->SetMinimum(tmpMin);
2590 e->SetLineWidth(1);
2591 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2592 SetAxisSize(e);
2593 e->GetYaxis()->SetTitle(p ? AXIS_TITLES_PULL[j] : mConfig.nativeFitResolutions ? AXIS_TITLES_NATIVE[j] : AXIS_TITLES[j]);
2594 e->GetXaxis()->SetTitle(XAXIS_TITLES[i]);
2595 if (LOG_PT_MIN > 0 && ii == 5) {
2596 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2597 }
2598
2599 if (j == 0) {
2600 e->GetYaxis()->SetTitleOffset(1.5);
2601 } else if (j < 3) {
2602 e->GetYaxis()->SetTitleOffset(1.4);
2603 }
2604 if (qcout && !mConfig.shipToQCAsCanvas) {
2605 qcout->Add(e);
2606 }
2607 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2608 continue;
2609 }
2610
2611 e->SetMarkerColor(kBlack);
2612 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2613 e->Draw(k || l ? "same" : "");
2614 if (j == 0) {
2615 GetName(fname, k);
2616 leg->AddEntry(e, Form("%s%s", fname, l ? "Mean" : (p ? "Pull" : "Resolution")), "l");
2617 }
2618 }
2619 }
2620 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2621 continue;
2622 }
2623
2624 if (ii == 5) {
2625 pad->SetLogx();
2626 }
2627 can->cd();
2628 if (j == 4) {
2629 ChangePadTitleSize(pad, 0.056);
2630 }
2631 }
2632 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2633 continue;
2634 }
2635
2636 leg->Draw();
2637
2638 if (qcout) {
2639 qcout->Add(can);
2640 }
2641 if (!mConfig.enableLocalOutput) {
2642 continue;
2643 }
2644 doPerfFigure(0.2, 0.295, 0.025);
2645 can->Print(Form(p ? "%s/pull_vs_%s.pdf" : "%s/res_vs_%s.pdf", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii]));
2646 if (mConfig.writeFileExt != "") {
2647 can->Print(Form(p ? "%s/pull_vs_%s.%s" : "%s/res_vs_%s.%s", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii], mConfig.writeFileExt.c_str()));
2648 }
2649 }
2650 }
2651
2652 // Process Integral Resolution Histogreams
2653 for (int32_t p = 0; p < 2; p++) {
2654 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2655 continue;
2656 }
2657 TCanvas* can = p ? mCPull[6] : mCRes[6];
2658 for (int32_t i = 0; i < 5; i++) {
2659 TPad* pad = p ? mPPull[6][i] : mPRes[6][i];
2660 TH1D* hist = p ? pullIntegral[i] : resIntegral[i];
2661 int32_t numColor = 0;
2662 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2663 pad->cd();
2664 }
2665 if (!mConfig.inputHistogramsOnly && mcAvail) {
2666 TH1D* e = hist;
2667 if (e && e->GetEntries()) {
2668 e->Fit("gaus", "sQ");
2669 }
2670 }
2671
2672 float tmpMax = 0;
2673 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2674 TH1D* e = hist;
2675 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2676 continue;
2677 }
2678 e->SetMaximum(-1111);
2679 if (e->GetMaximum() > tmpMax) {
2680 tmpMax = e->GetMaximum();
2681 }
2682 }
2683
2684 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2685 TH1D* e = hist;
2686 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2687 continue;
2688 }
2689 e->SetMaximum(tmpMax * 1.02);
2690 e->SetMinimum(tmpMax * -0.02);
2691 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2692 e->Write();
2693 }
2694 if (qcout && !mConfig.shipToQCAsCanvas) {
2695 qcout->Add(e);
2696 }
2697 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2698 continue;
2699 }
2700
2701 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2702 e->Draw(k == 0 ? "" : "same");
2703 }
2704 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2705 continue;
2706 }
2707 can->cd();
2708 }
2709 if (qcout) {
2710 qcout->Add(can);
2711 }
2712 if (!mConfig.enableLocalOutput) {
2713 continue;
2714 }
2715
2716 can->Print(Form(p ? "%s/pull_integral.pdf" : "%s/res_integral.pdf", mConfig.plotsDir.c_str()));
2717 if (mConfig.writeFileExt != "") {
2718 can->Print(Form(p ? "%s/pull_integral.%s" : "%s/res_integral.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
2719 }
2720 }
2721 }
2722
2723 uint64_t attachClusterCounts[N_CLS_HIST];
2724 if (mQATasks & taskClusterAttach) {
2725 // Process Cluster Attachment Histograms
2726 if (mConfig.inputHistogramsOnly == 0) {
2727 for (int32_t i = N_CLS_HIST; i < N_CLS_TYPE * N_CLS_HIST - 1; i++) {
2728 mClusters[i]->Sumw2(true);
2729 }
2730 double totalVal = 0;
2731 if (!CLUST_HIST_INT_SUM) {
2732 for (int32_t j = 0; j < mClusters[N_CLS_HIST - 1]->GetXaxis()->GetNbins() + 2; j++) {
2733 totalVal += mClusters[N_CLS_HIST - 1]->GetBinContent(j);
2734 }
2735 }
2736 if (totalVal == 0.) {
2737 totalVal = 1.;
2738 }
2739 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2740 double val = 0;
2741 for (int32_t j = 0; j < mClusters[i]->GetXaxis()->GetNbins() + 2; j++) {
2742 val += mClusters[i]->GetBinContent(j);
2743 mClusters[2 * N_CLS_HIST - 1 + i]->SetBinContent(j, val / totalVal);
2744 }
2745 attachClusterCounts[i] = val;
2746 }
2747
2748 if (!CLUST_HIST_INT_SUM) {
2749 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2750 mClusters[2 * N_CLS_HIST - 1 + i]->SetMaximum(1.02);
2751 mClusters[2 * N_CLS_HIST - 1 + i]->SetMinimum(-0.02);
2752 }
2753 }
2754
2755 for (int32_t i = 0; i < N_CLS_HIST - 1; i++) {
2756 auto oldLevel = gErrorIgnoreLevel;
2757 gErrorIgnoreLevel = kError;
2758 mClusters[N_CLS_HIST + i]->Divide(mClusters[i], mClusters[N_CLS_HIST - 1], 1, 1, "B");
2759 gErrorIgnoreLevel = oldLevel;
2760 mClusters[N_CLS_HIST + i]->SetMinimum(-0.02);
2761 mClusters[N_CLS_HIST + i]->SetMaximum(1.02);
2762 }
2763 }
2764
2765 float tmpMax[2] = {0, 0}, tmpMin[2] = {0, 0};
2766 for (int32_t l = 0; l <= CLUST_HIST_INT_SUM; l++) {
2767 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2768 TH1* e = mClusters[l ? (N_CLS_TYPE * N_CLS_HIST - 2) : (N_CLS_HIST - 1)];
2769 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2770 continue;
2771 }
2772 e->SetMinimum(-1111);
2773 e->SetMaximum(-1111);
2774 if (l == 0) {
2775 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2776 }
2777 if (e->GetMaximum() > tmpMax[l]) {
2778 tmpMax[l] = e->GetMaximum();
2779 }
2780 if (e->GetMinimum() < tmpMin[l]) {
2781 tmpMin[l] = e->GetMinimum();
2782 }
2783 }
2784 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2785 for (int32_t i = 0; i < N_CLS_HIST; i++) {
2786 TH1* e = mClusters[l ? (2 * N_CLS_HIST - 1 + i) : i];
2787 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2788 continue;
2789 }
2790 e->SetMaximum(tmpMax[l] * 1.02);
2791 e->SetMinimum(tmpMax[l] * -0.02);
2792 }
2793 }
2794 }
2795
2796 for (int32_t i = 0; i < N_CLS_TYPE; i++) {
2797 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2798 mPClust[i]->cd();
2799 mPClust[i]->SetLogx();
2800 }
2801 int32_t begin = i == 2 ? (2 * N_CLS_HIST - 1) : i == 1 ? N_CLS_HIST : 0;
2802 int32_t end = i == 2 ? (3 * N_CLS_HIST - 1) : i == 1 ? (2 * N_CLS_HIST - 1) : N_CLS_HIST;
2803 int32_t numColor = 0;
2804 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2805 for (int32_t j = end - 1; j >= begin; j--) {
2806 TH1* e = mClusters[j];
2807 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2808 continue;
2809 }
2810
2811 e->SetTitle(mConfig.plotsNoTitle ? "" : CLUSTER_TITLES[i]);
2812 e->GetYaxis()->SetTitle(i == 0 ? "Number of TPC clusters" : i == 1 ? "Fraction of TPC clusters" : CLUST_HIST_INT_SUM ? "Total TPC clusters (integrated)" : "Fraction of TPC clusters (integrated)");
2813 e->GetXaxis()->SetTitle("#it{p}_{Tmc} (GeV/#it{c})");
2814 e->GetXaxis()->SetTitleOffset(1.1);
2815 e->GetXaxis()->SetLabelOffset(-0.005);
2816 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2817 e->Write();
2818 }
2819 e->SetStats(kFALSE);
2820 e->SetLineWidth(1);
2821 e->SetLineStyle(CONFIG_DASHED_MARKERS ? j + 1 : 1);
2822 if (i == 0) {
2823 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2824 }
2825 if (qcout && !mConfig.shipToQCAsCanvas) {
2826 qcout->Add(e);
2827 }
2828 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2829 continue;
2830 }
2831
2832 e->SetMarkerColor(kBlack);
2833 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2834 e->Draw(j == end - 1 && k == 0 ? "" : "same");
2835 GetName(fname, k);
2836 mLClust[i]->AddEntry(e, Form("%s%s", fname, CLUSTER_NAMES[j - begin]), "l");
2837 }
2838 }
2839 if (ConfigNumInputs == 1) {
2840 TH1* e = reinterpret_cast<TH1F*>(mClusters[begin + CL_att_adj]->Clone());
2841 e->Add(mClusters[begin + CL_prot], -1);
2842 if (qcout && !mConfig.shipToQCAsCanvas) {
2843 qcout->Add(e);
2844 }
2845 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2846 continue;
2847 }
2848
2849 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2850 e->Draw("same");
2851 mLClust[i]->AddEntry(e, "Removed (Strategy A)", "l");
2852 }
2853 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2854 continue;
2855 }
2856
2857 mLClust[i]->Draw();
2858
2859 if (qcout) {
2860 qcout->Add(mCClust[i]);
2861 }
2862 if (!mConfig.enableLocalOutput) {
2863 continue;
2864 }
2865 doPerfFigure(i == 0 ? 0.37 : (i == 1 ? 0.34 : 0.6), 0.295, 0.030);
2866 mCClust[i]->cd();
2867 mCClust[i]->Print(Form(i == 2 ? "%s/clusters_integral.pdf" : i == 1 ? "%s/clusters_relative.pdf" : "%s/clusters.pdf", mConfig.plotsDir.c_str()));
2868 if (mConfig.writeFileExt != "") {
2869 mCClust[i]->Print(Form(i == 2 ? "%s/clusters_integral.%s" : i == 1 ? "%s/clusters_relative.%s" : "%s/clusters.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
2870 }
2871 }
2872
2873 for (int32_t i = 0; i < 4; i++) {
2874 auto* e = mPadRow[i];
2875 if (tout && !mConfig.inputHistogramsOnly) {
2876 e->Write();
2877 }
2878 mPPadRow[i]->cd();
2879 e->SetOption("colz");
2880 std::string title = "First Track Pad Row (p_{T} > 1GeV, N_{Cl} #geq " + std::to_string(PADROW_CHECK_MINCLS);
2881 if (i >= 2) {
2882 title += ", row_{trk} > row_{MC} + 3, row_{MC} < 10";
2883 }
2884 if (i >= 3) {
2885 title += ", #Phi_{Cl} < 0.15";
2886 }
2887 title += ")";
2888
2889 e->SetTitle(mConfig.plotsNoTitle ? "" : title.c_str());
2890 e->GetXaxis()->SetTitle(i == 3 ? "Local Occupancy" : (i ? "#Phi_{Cl} (sector)" : "First MC Pad Row"));
2891 e->GetYaxis()->SetTitle("First Pad Row");
2892 e->Draw();
2893 mCPadRow[i]->cd();
2894 static const constexpr char* PADROW_NAMES[4] = {"MC", "Phi", "Phi1", "Occ"};
2895 mCPadRow[i]->Print(Form("%s/padRow%s.pdf", mConfig.plotsDir.c_str(), PADROW_NAMES[i]));
2896 if (mConfig.writeFileExt != "") {
2897 mCPadRow[i]->Print(Form("%s/padRow%s.%s", mConfig.plotsDir.c_str(), PADROW_NAMES[i], mConfig.writeFileExt.c_str()));
2898 }
2899 }
2900 }
2901
2902 // Process cluster count statistics
2903 if ((mQATasks & taskClusterCounts) && !mHaveExternalHists && !mConfig.clusterRejectionHistograms && !mConfig.inputHistogramsOnly) {
2904 DoClusterCounts(attachClusterCounts);
2905 }
2906 if ((qcout || tout) && (mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
2907 for (uint32_t i = 0; i < mHistClusterCount.size(); i++) {
2908 if (tout) {
2909 mHistClusterCount[i]->Write();
2910 }
2911 if (qcout) {
2912 qcout->Add(mHistClusterCount[i]);
2913 }
2914 }
2915 }
2916
2917 if (mQATasks & taskTrackStatistics) {
2918 // Process track statistic histograms
2919 float tmpMax = 0.;
2920 for (int32_t k = 0; k < ConfigNumInputs; k++) { // TODO: Simplify this drawing, avoid copy&paste
2921 TH1F* e = mTrackPt;
2922 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2923 continue;
2924 }
2925 e->SetMaximum(-1111);
2926 if (e->GetMaximum() > tmpMax) {
2927 tmpMax = e->GetMaximum();
2928 }
2929 }
2930 mPTrackPt->cd();
2931 mPTrackPt->SetLogx();
2932 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2933 TH1F* e = mTrackPt;
2934 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2935 continue;
2936 }
2937 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2938 e->Write();
2939 }
2940 e->SetMaximum(tmpMax * 1.02);
2941 e->SetMinimum(tmpMax * -0.02);
2942 e->SetStats(kFALSE);
2943 e->SetLineWidth(1);
2944 e->SetTitle(mConfig.plotsNoTitle ? "" : "Number of Tracks vs #it{p}_{T}");
2945 e->GetYaxis()->SetTitle("Number of Tracks");
2946 e->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})");
2947 e->GetXaxis()->SetTitleOffset(1.2);
2948 if (qcout) {
2949 qcout->Add(e);
2950 }
2951 e->SetMarkerColor(kBlack);
2952 e->SetLineColor(colorNums[k % COLORCOUNT]);
2953 e->Draw(k == 0 ? "" : "same");
2954 GetName(fname, k, mConfig.inputHistogramsOnly);
2955 mLTrackPt->AddEntry(e, Form(mConfig.inputHistogramsOnly ? "%s" : "%sTrack #it{p}_{T}", fname), "l");
2956 }
2957 mLTrackPt->Draw();
2958 doPerfFigure(0.63, 0.7, 0.030);
2959 mCTrackPt->cd();
2960 mCTrackPt->Print(Form("%s/tracks.pdf", mConfig.plotsDir.c_str()));
2961 if (mConfig.writeFileExt != "") {
2962 mCTrackPt->Print(Form("%s/tracks.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
2963 }
2964
2965 for (int32_t i = 0; i < 2; i++) {
2966 tmpMax = 0.;
2967 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2968 TH1F* e = mT0[i];
2969 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2970 continue;
2971 }
2972 e->SetMaximum(-1111);
2973 if (e->GetMaximum() > tmpMax) {
2974 tmpMax = e->GetMaximum();
2975 }
2976 }
2977 mPT0[i]->cd();
2978 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2979 TH1F* e = mT0[i];
2980 if (GetHist(e, tin, k, nNewInput) == nullptr) {
2981 continue;
2982 }
2983 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2984 e->Write();
2985 }
2986 e->SetMaximum(tmpMax * 1.02);
2987 e->SetMinimum(tmpMax * -0.02);
2988 e->SetStats(kFALSE);
2989 e->SetLineWidth(1);
2990 e->SetTitle(mConfig.plotsNoTitle ? "" : (i ? "Track t_{0} resolution" : "Track t_{0} distribution"));
2991 e->GetYaxis()->SetTitle("a.u.");
2992 e->GetXaxis()->SetTitle(i ? "t_{0} - t_{0, mc}" : "t_{0}");
2993 if (qcout) {
2994 qcout->Add(e);
2995 }
2996 e->SetMarkerColor(kBlack);
2997 e->SetLineColor(colorNums[k % COLORCOUNT]);
2998 e->Draw(k == 0 ? "" : "same");
2999 GetName(fname, k, mConfig.inputHistogramsOnly);
3000 mLT0[i]->AddEntry(e, Form(mConfig.inputHistogramsOnly ? "%s (%s)" : "%sTrack t_{0} %s", fname, i ? "" : "resolution"), "l");
3001 }
3002 mLT0[i]->Draw();
3003 doPerfFigure(0.63, 0.7, 0.030);
3004 mCT0[i]->cd();
3005 mCT0[i]->Print(Form("%s/t0%s.pdf", mConfig.plotsDir.c_str(), i ? "_res" : ""));
3006 if (mConfig.writeFileExt != "") {
3007 mCT0[i]->Print(Form("%s/t0%s.%s", mConfig.plotsDir.c_str(), i ? "_res" : "", mConfig.writeFileExt.c_str()));
3008 }
3009
3010 tmpMax = 0.;
3011 for (int32_t k = 0; k < ConfigNumInputs; k++) {
3012 TH1F* e = mNCl[i];
3013 if (GetHist(e, tin, k, nNewInput) == nullptr) {
3014 continue;
3015 }
3016 e->SetMaximum(-1111);
3017 if (e->GetMaximum() > tmpMax) {
3018 tmpMax = e->GetMaximum();
3019 }
3020 }
3021 mPNCl[i]->cd();
3022 for (int32_t k = 0; k < ConfigNumInputs; k++) {
3023 TH1F* e = mNCl[i];
3024 if (GetHist(e, tin, k, nNewInput) == nullptr) {
3025 continue;
3026 }
3027 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
3028 e->Write();
3029 }
3030 e->SetMaximum(tmpMax * 1.02);
3031 e->SetMinimum(tmpMax * -0.02);
3032 e->SetStats(kFALSE);
3033 e->SetLineWidth(1);
3034 e->SetTitle(mConfig.plotsNoTitle ? "" : (i ? "Number of Rows with attached Cluster" : "Number of Clusters"));
3035 e->GetYaxis()->SetTitle("a.u.");
3036 e->GetXaxis()->SetTitle(i ? "N_{Rows with Clusters}" : "N_{Clusters}");
3037 if (qcout) {
3038 qcout->Add(e);
3039 }
3040 e->SetMarkerColor(kBlack);
3041 e->SetLineColor(colorNums[k % COLORCOUNT]);
3042 e->Draw(k == 0 ? "" : "same");
3043 GetName(fname, k, mConfig.inputHistogramsOnly);
3044 mLNCl[i]->AddEntry(e, Form(mConfig.inputHistogramsOnly ? "%s" : (i ? "%sN_{Clusters}" : "%sN_{Rows with Clusters}"), fname), "l");
3045 }
3046 mLNCl[i]->Draw();
3047 doPerfFigure(0.6, 0.7, 0.030);
3048 mCNCl[i]->cd();
3049 mCNCl[i]->Print(Form("%s/nClusters%s.pdf", mConfig.plotsDir.c_str(), i ? "_corrected" : ""));
3050 if (mConfig.writeFileExt != "") {
3051 mCNCl[i]->Print(Form("%s/nClusters%s.%s", mConfig.plotsDir.c_str(), i ? "_corrected" : "", mConfig.writeFileExt.c_str()));
3052 }
3053 }
3054
3055 mPClXY->cd(); // TODO: This should become a separate task category
3056 mClXY->SetOption("colz");
3057 mClXY->Draw();
3058 mCClXY->cd();
3059 mCClXY->Print(Form("%s/clustersXY.pdf", mConfig.plotsDir.c_str()));
3060 if (mConfig.writeFileExt != "") {
3061 mCClXY->Print(Form("%s/clustersXY.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
3062 }
3063 }
3064
3065 if (mQATasks & taskClusterRejection) {
3066 mClRej[2]->Divide(mClRej[1], mClRej[0]);
3067
3068 for (int32_t i = 0; i < 3; i++) {
3069 if (tout && !mConfig.inputHistogramsOnly) {
3070 mClRej[i]->Write();
3071 }
3072 mPClRej[i]->cd();
3073 mClRej[i]->SetTitle(mConfig.plotsNoTitle ? "" : REJECTED_NAMES[i]);
3074 mClRej[i]->SetOption("colz");
3075 mClRej[i]->Draw();
3076 mCClRej[i]->cd();
3077 mCClRej[i]->Print(Form("%s/clustersRej%d%s.pdf", mConfig.plotsDir.c_str(), i, REJECTED_NAMES[i]));
3078 if (mConfig.writeFileExt != "") {
3079 mCClRej[i]->Print(Form("%s/clustersRej%d%s.%s", mConfig.plotsDir.c_str(), i, REJECTED_NAMES[i], mConfig.writeFileExt.c_str()));
3080 }
3081 }
3082
3083 mPClRejP->cd();
3084 for (int32_t k = 0; k < ConfigNumInputs; k++) {
3085 auto* tmp = mClRej[0];
3086 if (GetHist(tmp, tin, k, nNewInput) == nullptr) {
3087 continue;
3088 }
3089 TH1D* proj1 = tmp->ProjectionY(Form("clrejptmp1%d", k)); // TODO: Clean up names
3090 proj1->SetDirectory(nullptr);
3091 tmp = mClRej[1];
3092 if (GetHist(tmp, tin, k, nNewInput) == nullptr) {
3093 continue;
3094 }
3095 TH1D* proj2 = tmp->ProjectionY(Form("clrejptmp2%d", k));
3096 proj2->SetDirectory(nullptr);
3097
3098 auto* e = mClRejP;
3099 if (GetHist(e, tin, k, nNewInput) == nullptr) {
3100 continue;
3101 }
3102 e->Divide(proj2, proj1);
3103 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
3104 e->Write();
3105 }
3106 delete proj1;
3107 delete proj2;
3108 e->SetMinimum(-0.02);
3109 e->SetMaximum(0.22);
3110 e->SetTitle(mConfig.plotsNoTitle ? "" : "Rejected Clusters");
3111 e->GetXaxis()->SetTitle("Pad Row");
3112 e->GetYaxis()->SetTitle("Rejected Clusters (fraction)");
3113 e->Draw(k == 0 ? "" : "same");
3114 }
3115 mPClRejP->Print(Form("%s/clustersRejProjected.pdf", mConfig.plotsDir.c_str()));
3116 if (mConfig.writeFileExt != "") {
3117 mPClRejP->Print(Form("%s/clustersRejProjected.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
3118 }
3119 }
3120
3121 if (tout && !mConfig.inputHistogramsOnly && mConfig.writeMCLabels) {
3122 gInterpreter->GenerateDictionary("vector<vector<int32_t>>", "");
3123 tout->WriteObject(&mcEffBuffer, "mcEffBuffer");
3124 tout->WriteObject(&mcLabelBuffer, "mcLabelBuffer");
3125 remove("AutoDict_vector_vector_int__.cxx");
3126 remove("AutoDict_vector_vector_int___cxx_ACLiC_dict_rdict.pcm");
3127 remove("AutoDict_vector_vector_int___cxx.d");
3128 remove("AutoDict_vector_vector_int___cxx.so");
3129 }
3130
3131 if (tout) {
3132 tout->Close();
3133 }
3134 for (uint32_t i = 0; i < mConfig.compareInputs.size(); i++) {
3135 tin[i]->Close();
3136 }
3137 if (!qcout) {
3138 clearGarbagageCollector();
3139 }
3140 GPUInfo("GPU TPC QA histograms have been written to pdf%s%s files", mConfig.writeFileExt == "" ? "" : " and ", mConfig.writeFileExt.c_str());
3141 gErrorIgnoreLevel = oldRootIgnoreLevel;
3142 return (0);
3143}
3144
3145void GPUQA::PrintClusterCount(int32_t mode, int32_t& num, const char* name, uint64_t n, uint64_t normalization)
3146{
3147 if (mode == 2) {
3148 // do nothing, just count num
3149 } else if (mode == 1) {
3150 char name2[128];
3151 snprintf(name2, 128, "clusterCount%d_", num);
3152 char* ptr = name2 + strlen(name2);
3153 for (uint32_t i = 0; i < strlen(name); i++) {
3154 if ((name[i] >= 'a' && name[i] <= 'z') || (name[i] >= 'A' && name[i] <= 'Z') || (name[i] >= '0' && name[i] <= '9')) {
3155 *(ptr++) = name[i];
3156 }
3157 }
3158 *ptr = 0;
3159 createHist(mHistClusterCount[num], name2, name, 1000, 0, mConfig.histMaxNClusters, 1000, 0, 100);
3160 } else if (mode == 0) {
3161 if (normalization && mConfig.enableLocalOutput) {
3162 for (uint32_t i = 0; i < 1 + (mTextDump != nullptr); i++) {
3163 fprintf(i ? mTextDump : stdout, "\t%40s: %'12" PRIu64 " (%6.2f%%)\n", name, n, 100.f * n / normalization);
3164 }
3165 }
3166 if (mConfig.clusterRejectionHistograms) {
3167 float ratio = 100.f * n / std::max<uint64_t>(normalization, 1);
3168 mHistClusterCount[num]->Fill(normalization, ratio, 1);
3169 }
3170 }
3171 num++;
3172}
3173
3174int32_t GPUQA::DoClusterCounts(uint64_t* attachClusterCounts, int32_t mode)
3175{
3176 if (mConfig.enableLocalOutput && !mConfig.inputHistogramsOnly && mConfig.plotsDir != "") {
3177 mTextDump = fopen((mConfig.plotsDir + "/clusterCounts.txt").c_str(), "w+");
3178 }
3179 int32_t num = 0;
3180 if (mcPresent() && (mQATasks & taskClusterAttach) && attachClusterCounts) {
3181 for (int32_t i = 0; i < N_CLS_HIST; i++) { // TODO: Check that these counts are still printed correctly!
3182 PrintClusterCount(mode, num, CLUSTER_NAMES[i], attachClusterCounts[i], mClusterCounts.nTotal);
3183 }
3184 PrintClusterCount(mode, num, "Unattached", attachClusterCounts[N_CLS_HIST - 1] - attachClusterCounts[CL_att_adj], mClusterCounts.nTotal);
3185 PrintClusterCount(mode, num, "Removed (Strategy A)", attachClusterCounts[CL_att_adj] - attachClusterCounts[CL_prot], mClusterCounts.nTotal); // Attached + Adjacent (also fake) - protected
3186 PrintClusterCount(mode, num, "Unaccessible", mClusterCounts.nUnaccessible, mClusterCounts.nTotal); // No contribution from track >= 10 MeV, unattached or fake-attached/adjacent
3187 } else {
3188 PrintClusterCount(mode, num, "All Clusters", mClusterCounts.nTotal, mClusterCounts.nTotal);
3189 PrintClusterCount(mode, num, "Used in Physics", mClusterCounts.nPhysics, mClusterCounts.nTotal);
3190 PrintClusterCount(mode, num, "Protected", mClusterCounts.nProt, mClusterCounts.nTotal);
3191 PrintClusterCount(mode, num, "Unattached", mClusterCounts.nUnattached, mClusterCounts.nTotal);
3192 PrintClusterCount(mode, num, "Removed (Strategy A)", mClusterCounts.nTotal - mClusterCounts.nUnattached - mClusterCounts.nProt, mClusterCounts.nTotal);
3193 PrintClusterCount(mode, num, "Removed (Strategy B)", mClusterCounts.nTotal - mClusterCounts.nProt, mClusterCounts.nTotal);
3194 }
3195
3196 PrintClusterCount(mode, num, "Merged Loopers (Track Merging)", mClusterCounts.nMergedLooperConnected, mClusterCounts.nTotal);
3197 PrintClusterCount(mode, num, "Merged Loopers (Afterburner)", mClusterCounts.nMergedLooperUnconnected, mClusterCounts.nTotal);
3198 PrintClusterCount(mode, num, "Looping Legs (other)", mClusterCounts.nLoopers, mClusterCounts.nTotal);
3199 PrintClusterCount(mode, num, "High Inclination Angle", mClusterCounts.nHighIncl, mClusterCounts.nTotal);
3200 PrintClusterCount(mode, num, "Rejected", mClusterCounts.nRejected, mClusterCounts.nTotal);
3201 PrintClusterCount(mode, num, "Tube (> 200 MeV)", mClusterCounts.nTube, mClusterCounts.nTotal);
3202 PrintClusterCount(mode, num, "Tube (< 200 MeV)", mClusterCounts.nTube200, mClusterCounts.nTotal);
3203 PrintClusterCount(mode, num, "Low Pt < 50 MeV", mClusterCounts.nLowPt, mClusterCounts.nTotal);
3204 PrintClusterCount(mode, num, "Low Pt < 200 MeV", mClusterCounts.n200MeV, mClusterCounts.nTotal);
3205
3206 if (mcPresent() && (mQATasks & taskClusterAttach)) {
3207 PrintClusterCount(mode, num, "Tracks > 400 MeV", mClusterCounts.nAbove400, mClusterCounts.nTotal);
3208 PrintClusterCount(mode, num, "Fake Removed (> 400 MeV)", mClusterCounts.nFakeRemove400, mClusterCounts.nAbove400);
3209 PrintClusterCount(mode, num, "Full Fake Removed (> 400 MeV)", mClusterCounts.nFullFakeRemove400, mClusterCounts.nAbove400);
3210 PrintClusterCount(mode, num, "Tracks < 40 MeV", mClusterCounts.nBelow40, mClusterCounts.nTotal);
3211 PrintClusterCount(mode, num, "Fake Protect (< 40 MeV)", mClusterCounts.nFakeProtect40, mClusterCounts.nBelow40);
3212 }
3213 if (mcPresent() && (mQATasks & taskTrackStatistics)) {
3214 PrintClusterCount(mode, num, "Correctly Attached all-trk normalized", mClusterCounts.nCorrectlyAttachedNormalized, mClusterCounts.nTotal);
3215 PrintClusterCount(mode, num, "Correctly Attached non-fake normalized", mClusterCounts.nCorrectlyAttachedNormalizedNonFake, mClusterCounts.nTotal);
3216 }
3217 if (mTextDump) {
3218 fclose(mTextDump);
3219 mTextDump = nullptr;
3220 }
3221 return num;
3222}
3223
3224void* GPUQA::AllocateScratchBuffer(size_t nBytes)
3225{
3226 mTrackingScratchBuffer.resize((nBytes + sizeof(mTrackingScratchBuffer[0]) - 1) / sizeof(mTrackingScratchBuffer[0]));
3227 return mTrackingScratchBuffer.data();
3228}
std::vector< std::string > labels
A const (ready only) version of MCTruthContainer.
int16_t charge
Definition RawEventData.h:5
int16_t time
Definition RawEventData.h:4
int32_t i
#define TRACK_EXPECTED_REFERENCE_X_DEFAULT
Definition GPUQA.cxx:206
#define TRACK_EXPECTED_REFERENCE_X
Definition GPUQA.cxx:231
#define QA_DEBUG
Definition GPUQA.cxx:15
#define QA_TIMING
Definition GPUQA.cxx:16
int16_t Color_t
Definition GPUQA.h:33
uint32_t iSector
GPUChain * chain
Definition of the MCTrack class.
Definition of the Names Generator class.
uint16_t pos
Definition RawData.h:3
uint32_t j
Definition RawData.h:0
uint32_t side
Definition RawData.h:0
uint16_t pid
Definition RawData.h:2
uint32_t c
Definition RawData.h:2
POD correction map.
TBranch * ptr
int nClusters
double num
Class for time synchronization of RawReader instances.
static constexpr ID TPC
Definition DetID.h:64
static constexpr int32_t NSECTORS
Definition GPUChain.h:58
int32_t ReadO2MCData(const char *filename)
Definition GPUQA.h:55
bool clusterRemovable(int32_t attach, bool prot) const
Definition GPUQA.h:53
~GPUQA()=default
Definition GPUQA.cxx:338
void * AllocateScratchBuffer(size_t nBytes)
Definition GPUQA.h:56
void SetMCTrackRange(int32_t min, int32_t max)
Definition GPUQA.h:48
mcLabelI_t GetMCTrackLabel(uint32_t trackId) const
Definition GPUQA.h:52
int32_t DrawQAHistograms()
Definition GPUQA.h:47
int32_t InitQA(int32_t tasks=0)
Definition GPUQA.h:45
void DumpO2MCData(const char *filename) const
Definition GPUQA.h:54
int32_t mcLabelI_t
Definition GPUQA.h:44
void RunQA(bool matchOnly=false)
Definition GPUQA.h:46
GPUQA(void *chain)
Definition GPUQA.h:42
static GPUROOTDump< T, Args... > getNew(const char *name1, Names... names)
Definition GPUROOTDump.h:64
static constexpr uint32_t NROWS
static constexpr uint32_t NSECTORS
static DigitizationContext * loadFromFile(std::string_view filename="")
GLdouble n
Definition glcorearb.h:1982
GLfloat GLfloat GLfloat alpha
Definition glcorearb.h:279
GLint GLenum GLint x
Definition glcorearb.h:403
const GLfloat * m
Definition glcorearb.h:4066
GLenum mode
Definition glcorearb.h:266
GLenum src
Definition glcorearb.h:1767
GLuint GLfloat GLfloat GLfloat GLfloat y1
Definition glcorearb.h:5034
GLsizeiptr size
Definition glcorearb.h:659
GLuint GLuint end
Definition glcorearb.h:469
const GLdouble * v
Definition glcorearb.h:832
GLuint const GLchar * name
Definition glcorearb.h:781
GLdouble f
Definition glcorearb.h:310
GLuint GLuint GLfloat weight
Definition glcorearb.h:5477
GLenum GLint * range
Definition glcorearb.h:1899
GLint y
Definition glcorearb.h:270
GLenum GLenum dst
Definition glcorearb.h:1767
GLboolean * data
Definition glcorearb.h:298
GLuint GLsizei const GLchar * label
Definition glcorearb.h:2519
GLuint GLfloat * val
Definition glcorearb.h:1582
GLboolean r
Definition glcorearb.h:1233
GLenum GLfloat param
Definition glcorearb.h:271
GLuint id
Definition glcorearb.h:650
GLdouble GLdouble GLdouble z
Definition glcorearb.h:843
float float float y2
Definition MathUtils.h:44
int int int float float float int nCl
constexpr int LHCBCPERTIMEBIN
Definition Constants.h:38
Enum< T >::Iterator begin(Enum< T >)
Definition Defs.h:156
value_T f3
Definition TrackUtils.h:93
value_T f1
Definition TrackUtils.h:91
value_T f2
Definition TrackUtils.h:92
struct o2::upgrades_utils::@469 tracks
structure to keep trigger-related info
std::string to_string(gsl::span< T, Size > span)
Definition common.h:52
std::string filename()
bool isValid(std::string alias)
int64_t differenceInBC(const InteractionRecord &other) const
std::tuple< std::vector< std::unique_ptr< TCanvas > >, std::vector< std::unique_ptr< TLegend > >, std::vector< std::unique_ptr< TPad > >, std::vector< std::unique_ptr< TLatex > >, std::vector< std::unique_ptr< TH1D > > > v
Definition GPUQA.cxx:316
IR getFirstIRofTF(const IR &rec) const
get 1st IR of TF corresponding to the 1st sampled orbit (in MC)
Definition HBFUtils.h:71
IR getFirstIR() const
Definition HBFUtils.h:47
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
unsigned int clusterOffset[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const ClusterNative * clustersLinear
constexpr size_t min
constexpr size_t max
o2::InteractionRecord ir(0, 0)
vec clear()
o2::InteractionRecord ir0(3, 5)