23#include "TGraphAsymmErrors.h"
41#include "GPUChainTrackingGetters.inc"
50#include "GPUParam.inc"
69#include "TParticlePDG.h"
70#include "TDatabasePDG.h"
80#include <oneapi/tbb.h>
84#ifdef GPUCA_MERGER_BY_MC_LABEL
85#define CHECK_CLUSTER_STATE_INIT_LEG_BY_MC() \
86 if (!unattached && mTrackMCLabels[id].isValid()) { \
87 int32_t mcLabel = mTrackMCLabels[id].getTrackID(); \
88 int32_t mcEvent = mTrackMCLabels[id].getEventID(); \
89 int32_t mcSource = mTrackMCLabels[id].getSourceID(); \
90 if (mTrackMCLabelsReverse[mMCEventOffset[mcSource] + mcEvent][mcLabel] != id) { \
91 attach &= (~gputpcgmmergertypes::attachGoodLeg); \
95#define CHECK_CLUSTER_STATE_INIT_LEG_BY_MC()
98#define CHECK_CLUSTER_STATE_INIT() \
99 bool unattached = attach == 0; \
101 bool lowPt = false; \
102 bool mev200 = false; \
103 bool mergedLooper = false; \
104 int32_t id = attach & gputpcgmmergertypes::attachTrackMask; \
106 qpt = fabsf(mTracking->mIOPtrs.mergedTracks[id].GetParam().GetQPt()); \
107 lowPt = qpt * mTracking->GetParam().qptB5Scaler > mTracking->GetParam().rec.tpc.rejectQPtB5; \
109 mergedLooper = mTracking->mIOPtrs.mergedTracks[id].MergedLooper(); \
111 bool physics = false, protect = false; \
112 CHECK_CLUSTER_STATE_INIT_LEG_BY_MC();
114#define CHECK_CLUSTER_STATE() \
115 CHECK_CLUSTER_STATE_INIT() \
117 mClusterCounts.n200MeV++; \
120 mClusterCounts.nLowPt++; \
121 } else if (mergedLooper) { \
122 mClusterCounts.nMergedLooper++; \
124 GPUTPCClusterRejection::GetProtectionStatus<true>(attach, physics, protect, &mClusterCounts, &mev200); \
127#define CHECK_CLUSTER_STATE_NOCOUNT() \
128 CHECK_CLUSTER_STATE_INIT() \
130 if (!lowPt && !mergedLooper) { \
131 GPUTPCClusterRejection::GetProtectionStatus<false>(attach, physics, protect); \
136 static GPUSettingsQA defaultConfig;
138 return *
chain->mConfigQA;
140 return defaultConfig;
146static const constexpr bool PERF_FIGURE = 0;
149static const constexpr float LOG_PT_MIN = -1.;
151static constexpr float Y_MAX = 40;
152static constexpr float Z_MAX = 100;
155static constexpr float PT_MIN_PRIM = 0.1;
157static constexpr float PT_MAX = 20;
158static constexpr float ETA_MAX = 1.5;
159static constexpr float ETA_MAX2 = 0.9;
161static constexpr float MIN_WEIGHT_CLS = 40;
162static constexpr float FINDABLE_WEIGHT_CLS = 70;
164static constexpr bool CLUST_HIST_INT_SUM =
false;
166static constexpr const int32_t COLORCOUNT = 12;
168static const constexpr char* EFF_TYPES[4] = {
"Rec",
"Clone",
"Fake",
"All"};
169static const constexpr char* FINDABLE_NAMES[2] = {
"",
"Findable"};
170static const constexpr char* PRIM_NAMES[2] = {
"Prim",
"Sec"};
171static const constexpr char* PARAMETER_NAMES[5] = {
"Y",
"Z",
"#Phi",
"#lambda",
"Relative #it{p}_{T}"};
172static const constexpr char* PARAMETER_NAMES_NATIVE[5] = {
"Y",
"Z",
"sin(#Phi)",
"tan(#lambda)",
"q/#it{p}_{T} (curvature)"};
173static const constexpr char* VSPARAMETER_NAMES[6] = {
"Y",
"Z",
"Phi",
"Eta",
"Pt",
"Pt_log"};
174static const constexpr char* EFF_NAMES[3] = {
"Efficiency",
"Clone Rate",
"Fake Rate"};
175static const constexpr char* EFFICIENCY_TITLES[4] = {
"Efficiency (Primary Tracks, Findable)",
"Efficiency (Secondary Tracks, Findable)",
"Efficiency (Primary Tracks)",
"Efficiency (Secondary Tracks)"};
176static const constexpr double SCALE[5] = {10., 10., 1000., 1000., 100.};
177static const constexpr double SCALE_NATIVE[5] = {10., 10., 1000., 1000., 1.};
178static const constexpr char* XAXIS_TITLES[5] = {
"#it{y}_{mc} (cm)",
"#it{z}_{mc} (cm)",
"#Phi_{mc} (rad)",
"#eta_{mc}",
"#it{p}_{Tmc} (GeV/#it{c})"};
179static const constexpr char* AXIS_TITLES[5] = {
"#it{y}-#it{y}_{mc} (mm) (Resolution)",
"#it{z}-#it{z}_{mc} (mm) (Resolution)",
"#phi-#phi_{mc} (mrad) (Resolution)",
"#lambda-#lambda_{mc} (mrad) (Resolution)",
"(#it{p}_{T} - #it{p}_{Tmc}) / #it{p}_{Tmc} (%) (Resolution)"};
180static const constexpr char* AXIS_TITLES_NATIVE[5] = {
"#it{y}-#it{y}_{mc} (mm) (Resolution)",
"#it{z}-#it{z}_{mc} (mm) (Resolution)",
"sin(#phi)-sin(#phi_{mc}) (Resolution)",
"tan(#lambda)-tan(#lambda_{mc}) (Resolution)",
"q*(q/#it{p}_{T} - q/#it{p}_{Tmc}) (Resolution)"};
181static const constexpr char* AXIS_TITLES_PULL[5] = {
"#it{y}-#it{y}_{mc}/#sigma_{y} (Pull)",
"#it{z}-#it{z}_{mc}/#sigma_{z} (Pull)",
"sin(#phi)-sin(#phi_{mc})/#sigma_{sin(#phi)} (Pull)",
"tan(#lambda)-tan(#lambda_{mc})/#sigma_{tan(#lambda)} (Pull)",
182 "q*(q/#it{p}_{T} - q/#it{p}_{Tmc})/#sigma_{q/#it{p}_{T}} (Pull)"};
183static const constexpr char* CLUSTER_NAMES[GPUQA::N_CLS_HIST] = {
"Correctly attached clusters",
"Fake attached clusters",
"Attached + adjacent clusters",
"Fake adjacent clusters",
"Clusters of reconstructed tracks",
"Used in Physics",
"Protected",
"All clusters"};
184static const constexpr char* CLUSTER_TITLES[GPUQA::N_CLS_TYPE] = {
"Clusters Pt Distribution / Attachment",
"Clusters Pt Distribution / Attachment (relative to all clusters)",
"Clusters Pt Distribution / Attachment (integrated)"};
185static const constexpr char* CLUSTER_NAMES_SHORT[GPUQA::N_CLS_HIST] = {
"Attached",
"Fake",
"AttachAdjacent",
"FakeAdjacent",
"FoundTracks",
"Physics",
"Protected",
"All"};
186static const constexpr char* CLUSTER_TYPES[GPUQA::N_CLS_TYPE] = {
"",
"Ratio",
"Integral"};
187static const constexpr int32_t COLORS_HEX[COLORCOUNT] = {0xB03030, 0x00A000, 0x0000C0, 0x9400D3, 0x19BBBF, 0xF25900, 0x7F7F7F, 0xFFD700, 0x07F707, 0x07F7F7, 0xF08080, 0x000000};
189static const constexpr int32_t CONFIG_DASHED_MARKERS = 0;
191static const constexpr float AXES_MIN[5] = {-Y_MAX, -Z_MAX, 0.f, -ETA_MAX, PT_MIN};
192static const constexpr float AXES_MAX[5] = {Y_MAX, Z_MAX, 2.f * M_PI, ETA_MAX, PT_MAX};
193static const constexpr int32_t AXIS_BINS[5] = {51, 51, 144, 31, 50};
194static const constexpr int32_t RES_AXIS_BINS[] = {1017, 113};
195static const constexpr float RES_AXES[5] = {1., 1., 0.03, 0.03, 1.0};
196static const constexpr float RES_AXES_NATIVE[5] = {1., 1., 0.1, 0.1, 5.0};
197static const constexpr float PULL_AXIS = 10.f;
199std::vector<TColor*> GPUQA::mColors;
200int32_t GPUQA::initColors()
202 mColors.reserve(COLORCOUNT);
203 for (int32_t
i = 0;
i < COLORCOUNT;
i++) {
204 float f1 = (
float)((COLORS_HEX[
i] >> 16) & 0xFF) / (
float)0xFF;
205 float f2 = (
float)((COLORS_HEX[
i] >> 8) & 0xFF) / (
float)0xFF;
206 float f3 = (
float)((COLORS_HEX[
i] >> 0) & 0xFF) / (
float)0xFF;
207 mColors.emplace_back(
new TColor(10000 +
i, f1, f2, f3));
211static constexpr Color_t defaultColorNums[COLORCOUNT] = {kRed, kBlue, kGreen, kMagenta, kOrange, kAzure, kBlack, kYellow, kGray, kTeal, kSpring, kPink};
213#define TRACK_EXPECTED_REFERENCE_X_DEFAULT 81
214#ifdef GPUCA_TPC_GEOMETRY_O2
215static inline int32_t GPUQA_O2_ConvertFakeLabel(int32_t
label) {
return label >= 0x7FFFFFFE ? -1 :
label; }
216inline uint32_t GPUQA::GetNMCCollissions()
const {
return mMCInfosCol.size(); }
217inline uint32_t GPUQA::GetNMCTracks(int32_t iCol)
const {
return mMCInfosCol[iCol].num; }
218inline uint32_t GPUQA::GetNMCTracks(
const mcLabelI_t&
label)
const {
return mMCInfosCol[mMCEventOffset[
label.getSourceID()] +
label.getEventID()].num; }
219inline uint32_t GPUQA::GetNMCLabels()
const {
return mClNative->clustersMCTruth ? mClNative->clustersMCTruth->getIndexedSize() : 0; }
220inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) {
return mMCInfos[mMCInfosCol[iCol].first + iTrk]; }
221inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(
const mcLabel_t&
label) {
return mMCInfos[mMCInfosCol[mMCEventOffset[
label.getSourceID()] +
label.getEventID()].first +
label.getTrackID()]; }
222inline GPUQA::mcLabels_t GPUQA::GetMCLabel(uint32_t
i) {
return mClNative->clustersMCTruth->getLabels(
i); }
223inline int32_t GPUQA::GetMCLabelNID(
const mcLabels_t&
label) {
return label.size(); }
224inline int32_t GPUQA::GetMCLabelNID(uint32_t
i) {
return mClNative->clustersMCTruth->getLabels(
i).size(); }
225inline GPUQA::mcLabel_t GPUQA::GetMCLabel(uint32_t
i, uint32_t
j) {
return mClNative->clustersMCTruth->getLabels(
i)[
j]; }
226inline int32_t GPUQA::GetMCLabelID(uint32_t
i, uint32_t
j) {
return GPUQA_O2_ConvertFakeLabel(mClNative->clustersMCTruth->getLabels(
i)[
j].getTrackID()); }
227inline int32_t GPUQA::GetMCLabelID(
const mcLabels_t&
label, uint32_t
j) {
return GPUQA_O2_ConvertFakeLabel(
label[
j].getTrackID()); }
228inline int32_t GPUQA::GetMCLabelID(
const mcLabel_t&
label) {
return GPUQA_O2_ConvertFakeLabel(
label.getTrackID()); }
229inline uint32_t GPUQA::GetMCLabelCol(uint32_t
i, uint32_t
j) {
return mMCEventOffset[mClNative->clustersMCTruth->getLabels(
i)[
j].getSourceID()] + mClNative->clustersMCTruth->getLabels(
i)[
j].getEventID(); }
230inline const auto& GPUQA::GetClusterLabels() {
return mClNative->clustersMCTruth; }
231inline float GPUQA::GetMCLabelWeight(uint32_t
i, uint32_t
j) {
return 1; }
232inline float GPUQA::GetMCLabelWeight(
const mcLabels_t&
label, uint32_t
j) {
return 1; }
233inline float GPUQA::GetMCLabelWeight(
const mcLabel_t&
label) {
return 1; }
234inline bool GPUQA::mcPresent() {
return !mConfig.noMC && mTracking && mClNative && mClNative->clustersMCTruth && mMCInfos.size(); }
235uint32_t GPUQA::GetMCLabelCol(
const mcLabel_t&
label)
const {
return !
label.isValid() ? 0 : (mMCEventOffset[
label.getSourceID()] +
label.getEventID()); }
237#define TRACK_EXPECTED_REFERENCE_X 78
239inline GPUQA::mcLabelI_t::mcLabelI_t(
const GPUQA::mcLabel_t& l) : track(l.fMCID) {}
240inline bool GPUQA::mcLabelI_t::operator==(
const GPUQA::mcLabel_t& l) {
return AbsLabelID(track) == l.fMCID; }
241inline uint32_t GPUQA::GetNMCCollissions()
const {
return 1; }
242inline uint32_t GPUQA::GetNMCTracks(int32_t iCol)
const {
return mTracking->mIOPtrs.nMCInfosTPC; }
243inline uint32_t GPUQA::GetNMCTracks(
const mcLabelI_t&
label)
const {
return mTracking->mIOPtrs.nMCInfosTPC; }
244inline uint32_t GPUQA::GetNMCLabels()
const {
return mTracking->mIOPtrs.nMCLabelsTPC; }
245inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) {
return mTracking->mIOPtrs.mcInfosTPC[AbsLabelID(iTrk)]; }
246inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(
const mcLabel_t&
label) {
return GetMCTrack(
label.fMCID, 0); }
247inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(
const mcLabelI_t&
label) {
return GetMCTrack(
label.track, 0); }
248inline const GPUQA::mcLabels_t& GPUQA::GetMCLabel(uint32_t
i) {
return mTracking->mIOPtrs.mcLabelsTPC[
i]; }
249inline const GPUQA::mcLabel_t& GPUQA::GetMCLabel(uint32_t
i, uint32_t
j) {
return mTracking->mIOPtrs.mcLabelsTPC[
i].fClusterID[
j]; }
250inline int32_t GPUQA::GetMCLabelNID(
const mcLabels_t&
label) {
return 3; }
251inline int32_t GPUQA::GetMCLabelNID(uint32_t
i) {
return 3; }
252inline int32_t GPUQA::GetMCLabelID(uint32_t
i, uint32_t
j) {
return mTracking->mIOPtrs.mcLabelsTPC[
i].fClusterID[
j].fMCID; }
253inline int32_t GPUQA::GetMCLabelID(
const mcLabels_t&
label, uint32_t
j) {
return label.fClusterID[
j].fMCID; }
254inline int32_t GPUQA::GetMCLabelID(
const mcLabel_t&
label) {
return label.fMCID; }
255inline uint32_t GPUQA::GetMCLabelCol(uint32_t
i, uint32_t
j) {
return 0; }
257inline const auto& GPUQA::GetClusterLabels() {
return mTracking->mIOPtrs.mcLabelsTPC; }
258inline float GPUQA::GetMCLabelWeight(uint32_t
i, uint32_t
j) {
return mTracking->mIOPtrs.mcLabelsTPC[
i].fClusterID[
j].fWeight; }
259inline float GPUQA::GetMCLabelWeight(
const mcLabels_t&
label, uint32_t
j) {
return label.fClusterID[
j].fWeight; }
260inline float GPUQA::GetMCLabelWeight(
const mcLabel_t&
label) {
return label.fWeight; }
261inline int32_t GPUQA::FakeLabelID(int32_t
id) {
return id < 0 ?
id : (-2 -
id); }
262inline int32_t GPUQA::AbsLabelID(int32_t
id) {
return id >= 0 ?
id : (-
id - 2); }
263inline bool GPUQA::mcPresent() {
return !mConfig.noMC && mTracking && GetNMCLabels() && GetNMCTracks(0); }
264uint32_t GPUQA::GetMCLabelCol(
const mcLabel_t&
label)
const {
return 0; }
266#define TRACK_EXPECTED_REFERENCE_X TRACK_EXPECTED_REFERENCE_X_DEFAULT
271 return obj[mMCEventOffset[l.getSourceID()] + l.getEventID()][l.getTrackID()];
275auto GPUQA::getHistArray<TH1F>()
277 return std::make_pair(mHist1D, &mHist1D_pos);
280auto GPUQA::getHistArray<TH2F>()
282 return std::make_pair(mHist2D, &mHist2D_pos);
285auto GPUQA::getHistArray<TH1D>()
287 return std::make_pair(mHist1Dd, &mHist1Dd_pos);
290auto GPUQA::getHistArray<TGraphAsymmErrors>()
292 return std::make_pair(mHistGraph, &mHistGraph_pos);
294template <
class T,
typename... Args>
295void GPUQA::createHist(T*&
h,
const char*
name, Args... args)
297 const auto& p = getHistArray<T>();
298 if (mHaveExternalHists) {
299 if (p.first->size() <= p.second->size()) {
300 GPUError(
"Array sizes mismatch: Histograms %lu <= Positions %lu", p.first->size(), p.second->size());
301 throw std::runtime_error(
"Incoming histogram array incomplete");
303 if (strcmp((*p.first)[p.second->size()].GetName(),
name)) {
304 GPUError(
"Histogram name mismatch: in array %s, trying to create %s", (*p.first)[p.second->size()].GetName(),
name);
305 throw std::runtime_error(
"Incoming histogram has incorrect name");
308 if constexpr (std::is_same_v<T, TGraphAsymmErrors>) {
309 p.first->emplace_back();
310 p.first->back().SetName(
name);
312 p.first->emplace_back(
name, args...);
315 h = &((*p.first)[p.second->size()]);
316 p.second->emplace_back(&
h);
322 std::tuple<std::vector<std::unique_ptr<TCanvas>>, std::vector<std::unique_ptr<TLegend>>, std::vector<std::unique_ptr<TPad>>, std::vector<std::unique_ptr<TLatex>>, std::vector<std::unique_ptr<TH1D>>>
v;
326template <
class T,
typename... Args>
327T* GPUQA::createGarbageCollected(Args... args)
329 auto&
v = std::get<std::vector<std::unique_ptr<T>>>(mGarbageCollector->v);
330 v.emplace_back(std::make_unique<T>(args...));
331 return v.back().get();
333void GPUQA::clearGarbagageCollector()
335 std::get<std::vector<std::unique_ptr<TPad>>>(mGarbageCollector->v).
clear();
336 std::apply([](
auto&&... args) { ((args.clear()), ...); }, mGarbageCollector->v);
341 mMCEventOffset.resize(1, 0);
346 if (mQAInitialized && !mHaveExternalHists) {
352 clearGarbagageCollector();
359 return protect || physics;
361 return (!unattached && !physics && !protect);
365void GPUQA::SetAxisSize(T* e)
367 e->GetYaxis()->SetTitleOffset(1.0);
368 e->GetYaxis()->SetTitleSize(0.045);
369 e->GetYaxis()->SetLabelSize(0.045);
370 e->GetXaxis()->SetTitleOffset(1.03);
371 e->GetXaxis()->SetTitleSize(0.045);
372 e->GetXaxis()->SetLabelOffset(-0.005);
373 e->GetXaxis()->SetLabelSize(0.045);
376void GPUQA::SetLegend(TLegend* l)
379 l->SetTextSize(0.016);
383double* GPUQA::CreateLogAxis(int32_t nbins,
float xmin,
float xmax)
385 float logxmin = std::log10(xmin);
386 float logxmax = std::log10(xmax);
387 float binwidth = (logxmax - logxmin) / nbins;
389 double* xbins =
new double[nbins + 1];
392 for (int32_t
i = 1;
i <= nbins;
i++) {
393 xbins[
i] = std::pow(10, logxmin +
i * binwidth);
398void GPUQA::ChangePadTitleSize(TPad* p,
float size)
401 TPaveText* pt = (TPaveText*)(p->GetPrimitive(
"title"));
403 GPUError(
"Error changing title");
405 pt->SetTextSize(
size);
410void GPUQA::DrawHisto(TH1* histo,
char*
filename,
char* options)
414 histo->Draw(options);
418void GPUQA::doPerfFigure(
float x,
float y,
float size)
423 TLatex* t = createGarbageCollected<TLatex>();
426 t->SetTextSize(
size);
427 t->DrawLatex(
x,
y, str_perf_figure_1);
428 t->DrawLatex(
x,
y - 0.01 -
size, str_perf_figure_2);
437int32_t GPUQA::InitQACreateHistograms()
439 char name[2048], fname[1024];
440 if (mQATasks & taskTrackingEff) {
442 for (int32_t
i = 0;
i < 4;
i++) {
443 for (int32_t
j = 0;
j < 2;
j++) {
444 for (int32_t k = 0; k < 2; k++) {
445 for (int32_t l = 0; l < 5; l++) {
446 snprintf(
name, 2048,
"%s%s%s%sVs%s",
"tracks", EFF_TYPES[
i], FINDABLE_NAMES[
j], PRIM_NAMES[k], VSPARAMETER_NAMES[l]);
448 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], k == 0 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
449 createHist(mEff[
i][
j][k][l],
name,
name, AXIS_BINS[l], binsPt.get());
451 createHist(mEff[
i][
j][k][l],
name,
name, AXIS_BINS[l], AXES_MIN[l], AXES_MAX[l]);
453 if (!mHaveExternalHists) {
454 mEff[
i][
j][k][l]->Sumw2();
456 strcat(
name,
"_eff");
457 createHist(mEffResult[
i][
j][k][l],
name);
465 if (mQATasks & taskTrackingRes) {
466 for (int32_t
i = 0;
i < 5;
i++) {
467 for (int32_t
j = 0;
j < 5;
j++) {
468 snprintf(
name, 2048,
"rms_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
469 snprintf(fname, 1024,
"mean_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
471 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
472 createHist(mRes[
i][
j][0],
name,
name, AXIS_BINS[
j], binsPt.get());
473 createHist(mRes[
i][
j][1], fname, fname, AXIS_BINS[
j], binsPt.get());
475 createHist(mRes[
i][
j][0],
name,
name, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
476 createHist(mRes[
i][
j][1], fname, fname, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
478 snprintf(
name, 2048,
"res_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
479 const float* axis = mConfig.nativeFitResolutions ? RES_AXES_NATIVE : RES_AXES;
480 const int32_t nbins =
i == 4 && mConfig.nativeFitResolutions ? (10 * RES_AXIS_BINS[0]) : RES_AXIS_BINS[0];
482 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
483 createHist(mRes2[
i][
j],
name,
name, nbins, -axis[
i], axis[
i], AXIS_BINS[
j], binsPt.get());
485 createHist(mRes2[
i][
j],
name,
name, nbins, -axis[
i], axis[
i], AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
492 if (mQATasks & taskTrackingResPull) {
493 for (int32_t
i = 0;
i < 5;
i++) {
494 for (int32_t
j = 0;
j < 5;
j++) {
495 snprintf(
name, 2048,
"pull_rms_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
496 snprintf(fname, 1024,
"pull_mean_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
498 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
499 createHist(mPull[
i][
j][0],
name,
name, AXIS_BINS[
j], binsPt.get());
500 createHist(mPull[
i][
j][1], fname, fname, AXIS_BINS[
j], binsPt.get());
502 createHist(mPull[
i][
j][0],
name,
name, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
503 createHist(mPull[
i][
j][1], fname, fname, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
505 snprintf(
name, 2048,
"pull_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
507 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
508 createHist(mPull2[
i][
j],
name,
name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[
j], binsPt.get());
510 createHist(mPull2[
i][
j],
name,
name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
517 if (mQATasks & taskClusterAttach) {
518 for (int32_t
i = 0;
i < N_CLS_TYPE * N_CLS_HIST - 1;
i++) {
519 int32_t ioffset =
i >= (2 * N_CLS_HIST - 1) ? (2 * N_CLS_HIST - 1) :
i >= N_CLS_HIST ? N_CLS_HIST : 0;
520 int32_t itype =
i >= (2 * N_CLS_HIST - 1) ? 2 :
i >= N_CLS_HIST ? 1 : 0;
521 snprintf(
name, 2048,
"clusters%s%s", CLUSTER_NAMES_SHORT[
i - ioffset], CLUSTER_TYPES[itype]);
522 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
523 createHist(mClusters[
i],
name,
name, AXIS_BINS[4], binsPt.get());
527 if (mQATasks & taskTrackStatistics) {
529 for (int32_t
i = 0;
i < 2;
i++) {
530 snprintf(
name, 2048,
i ?
"nrows_with_cluster" :
"nclusters");
531 createHist(mNCl[
i],
name,
name, 160, 0, 159);
533 snprintf(
name, 2048,
"tracks");
534 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
535 createHist(mTracks,
name,
name, AXIS_BINS[4], binsPt.get());
536 createHist(mClXY,
"clXY",
"clXY", 1000, -250, 250, 1000, -250, 250);
539 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
540 int32_t
num = DoClusterCounts(
nullptr, 2);
541 mHistClusterCount.resize(
num);
542 DoClusterCounts(
nullptr, 1);
545 for (uint32_t
i = 0;
i < mHist1D->size();
i++) {
546 *mHist1D_pos[
i] = &(*mHist1D)[
i];
548 for (uint32_t
i = 0;
i < mHist2D->size();
i++) {
549 *mHist2D_pos[
i] = &(*mHist2D)[
i];
551 for (uint32_t
i = 0;
i < mHist1Dd->size();
i++) {
552 *mHist1Dd_pos[
i] = &(*mHist1Dd)[
i];
554 for (uint32_t
i = 0;
i < mHistGraph->size();
i++) {
555 *mHistGraph_pos[
i] = &(*mHistGraph)[
i];
561int32_t GPUQA::loadHistograms(std::vector<TH1F>& i1, std::vector<TH2F>& i2, std::vector<TH1D>& i3, std::vector<TGraphAsymmErrors>& i4, int32_t tasks)
564 tasks = taskDefaultPostprocess;
566 if (mQAInitialized && (!mHaveExternalHists || tasks != mQATasks)) {
567 throw std::runtime_error(
"QA not initialized or initialized with different task array");
575 mHist1Dd_pos.clear();
576 mHistGraph_pos.clear();
577 mHaveExternalHists =
true;
582 if (InitQACreateHistograms()) {
585 mQAInitialized =
true;
595 uint32_t
n = mMCInfos.size();
596 fwrite(&
n,
sizeof(
n), 1, fp);
597 fwrite(mMCInfos.data(),
sizeof(mMCInfos[0]),
n, fp);
598 n = mMCInfosCol.size();
599 fwrite(&
n,
sizeof(
n), 1, fp);
600 fwrite(mMCInfosCol.data(),
sizeof(mMCInfosCol[0]),
n, fp);
601 n = mMCEventOffset.size();
602 fwrite(&
n,
sizeof(
n), 1, fp);
603 fwrite(mMCEventOffset.data(),
sizeof(mMCEventOffset[0]),
n, fp);
615 if ((
x = fread(&
n,
sizeof(
n), 1, fp)) != 1) {
620 if (fread(mMCInfos.data(),
sizeof(mMCInfos[0]),
n, fp) !=
n) {
624 if ((
x = fread(&
n,
sizeof(
n), 1, fp)) != 1) {
628 mMCInfosCol.resize(
n);
629 if (fread(mMCInfosCol.data(),
sizeof(mMCInfosCol[0]),
n, fp) !=
n) {
633 if ((
x = fread(&
n,
sizeof(
n), 1, fp)) != 1) {
637 mMCEventOffset.resize(
n);
638 if (fread(mMCEventOffset.data(),
sizeof(mMCEventOffset[0]),
n, fp) !=
n) {
642 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
643 printf(
"Read %ld bytes MC Infos\n", ftell(fp));
647 CopyO2MCtoIOPtr(&mTracking->mIOPtrs);
654 ptr->mcInfosTPC = mMCInfos.data();
655 ptr->nMCInfosTPC = mMCInfos.size();
656 ptr->mcInfosTPCCol = mMCInfosCol.data();
657 ptr->nMCInfosTPCCol = mMCInfosCol.size();
663 if (!mO2MCDataLoaded) {
665 if (mTracking && mTracking->GetProcessingSettings().debugLevel) {
666 GPUInfo(
"Start reading O2 Track MC information");
669 static constexpr float PRIM_MAX_T = 0.01f;
672 std::vector<int32_t> refId;
675 auto evrec = dc->getEventRecords();
677 uint32_t nSimSources = mcReader.getNSources();
678 mMCEventOffset.resize(nSimSources);
679 uint32_t nSimTotalEvents = 0;
680 uint32_t nSimTotalTracks = 0;
681 for (uint32_t
i = 0;
i < nSimSources;
i++) {
682 mMCEventOffset[
i] = nSimTotalEvents;
683 nSimTotalEvents += mcReader.getNEvents(
i);
686 mMCInfosCol.resize(nSimTotalEvents);
687 for (int32_t iSim = 0; iSim < mcReader.getNSources(); iSim++) {
688 for (int32_t
i = 0;
i < mcReader.getNEvents(iSim);
i++) {
693 const std::vector<o2::MCTrack>&
tracks = mcReader.getTracks(iSim,
i);
694 const std::vector<o2::TrackReference>& trackRefs = mcReader.getTrackRefsByEvent(iSim,
i);
696 refId.resize(
tracks.size());
697 std::fill(refId.begin(), refId.end(), -1);
698 for (uint32_t
j = 0;
j < trackRefs.size();
j++) {
700 int32_t trkId = trackRefs[
j].getTrackID();
701 if (refId[trkId] == -1) {
706 mMCInfosCol[mMCEventOffset[iSim] +
i].first = mMCInfos.size();
707 mMCInfosCol[mMCEventOffset[iSim] +
i].num =
tracks.size();
708 mMCInfos.resize(mMCInfos.size() +
tracks.size());
709 for (uint32_t
j = 0;
j <
tracks.size();
j++) {
710 auto& info = mMCInfos[mMCInfosCol[mMCEventOffset[iSim] +
i].first +
j];
712 TParticlePDG* particle = TDatabasePDG::Instance()->GetParticle(trk.GetPdgCode());
714 if (abs(trk.GetPdgCode()) == kElectron) {
717 if (abs(trk.GetPdgCode()) == kMuonMinus) {
720 if (abs(trk.GetPdgCode()) == kPiPlus) {
723 if (abs(trk.GetPdgCode()) == kKPlus) {
726 if (abs(trk.GetPdgCode()) == kProton) {
730 info.charge = particle ? particle->Charge() : 0;
731 info.prim = trk.T() < PRIM_MAX_T;
732 info.primDaughters = 0;
733 if (trk.getFirstDaughterTrackId() != -1) {
734 for (int32_t k = trk.getFirstDaughterTrackId(); k <= trk.getLastDaughterTrackId(); k++) {
735 if (tracks[k].
T() < PRIM_MAX_T) {
736 info.primDaughters = 1;
744 const auto& trkRef = trackRefs[refId[
j]];
748 info.pX = trkRef.Px();
749 info.pY = trkRef.Py();
750 info.pZ = trkRef.Pz();
751 info.genRadius = std::sqrt(trk.GetStartVertexCoordinatesX() * trk.GetStartVertexCoordinatesX() + trk.GetStartVertexCoordinatesY() * trk.GetStartVertexCoordinatesY() + trk.GetStartVertexCoordinatesZ() * trk.GetStartVertexCoordinatesZ());
753 info.x = info.y = info.z = info.pX = info.pY = info.pZ = 0;
759 if (mTracking && mTracking->GetProcessingSettings().debugLevel) {
762 mO2MCDataLoaded =
true;
765 CopyO2MCtoIOPtr(updateIOPtr);
772 if (mQAInitialized) {
773 throw std::runtime_error(
"QA already initialized");
779 mHist1D =
new std::vector<TH1F>;
780 mHist2D =
new std::vector<TH2F>;
781 mHist1Dd =
new std::vector<TH1D>;
782 mHistGraph =
new std::vector<TGraphAsymmErrors>;
788 if (mTracking->GetProcessingSettings().qcRunFraction != 100.f && mQATasks != taskClusterCounts) {
789 throw std::runtime_error(
"QA with qcRunFraction only supported for taskClusterCounts");
793 mClNative = mTracking->mIOPtrs.clustersNative;
796 if (InitQACreateHistograms()) {
800 if (mConfig.enableLocalOutput) {
801 mkdir(
"plots", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
806 InitO2MCData(mTracking ? &mTracking->mIOPtrs : nullptr);
810 if (mConfig.matchMCLabels.size()) {
811 uint32_t nFiles = mConfig.matchMCLabels.size();
812 std::vector<std::unique_ptr<TFile>> files;
813 std::vector<std::vector<std::vector<int32_t>>*> labelsBuffer(nFiles);
814 std::vector<std::vector<std::vector<int32_t>>*> effBuffer(nFiles);
815 for (uint32_t
i = 0;
i < nFiles;
i++) {
816 files.emplace_back(std::make_unique<TFile>(mConfig.matchMCLabels[
i].c_str()));
817 labelsBuffer[
i] = (std::vector<std::vector<int32_t>>*)files[
i]->Get(
"mcLabelBuffer");
818 effBuffer[
i] = (std::vector<std::vector<int32_t>>*)files[
i]->Get(
"mcEffBuffer");
819 if (labelsBuffer[
i] ==
nullptr || effBuffer[
i] ==
nullptr) {
820 GPUError(
"Error opening / reading from labels file %u/%s: %p %p",
i, mConfig.matchMCLabels[
i].c_str(), (
void*)labelsBuffer[
i], (
void*)effBuffer[
i]);
825 mGoodTracks.resize(labelsBuffer[0]->
size());
826 mGoodHits.resize(labelsBuffer[0]->
size());
827 for (uint32_t iEvent = 0; iEvent < labelsBuffer[0]->size(); iEvent++) {
828 std::vector<bool> labelsOK((*effBuffer[0])[iEvent].
size());
829 for (uint32_t k = 0; k < (*effBuffer[0])[iEvent].
size(); k++) {
831 for (uint32_t l = 0; l < nFiles; l++) {
832 if ((*effBuffer[0])[iEvent][k] != (*effBuffer[l])[iEvent][k]) {
838 mGoodTracks[iEvent].resize((*labelsBuffer[0])[iEvent].size());
839 for (uint32_t k = 0; k < (*labelsBuffer[0])[iEvent].
size(); k++) {
840 if ((*labelsBuffer[0])[iEvent][k] == MC_LABEL_INVALID) {
843 mGoodTracks[iEvent][k] = labelsOK[abs((*labelsBuffer[0])[iEvent][k])];
847 mQAInitialized =
true;
853 if (!mQAInitialized) {
854 throw std::runtime_error(
"QA not initialized");
856 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
857 GPUInfo(
"Running QA - Mask %d, Efficiency %d, Resolution %d, Pulls %d, Cluster Attachment %d, Track Statistics %d, Cluster Counts %d", mQATasks, (int32_t)(mQATasks & taskTrackingEff), (int32_t)(mQATasks & taskTrackingRes), (int32_t)(mQATasks & taskTrackingResPull), (int32_t)(mQATasks & taskClusterAttach), (int32_t)(mQATasks & taskTrackStatistics), (int32_t)(mQATasks & taskClusterCounts));
859 if (!clNative && mTracking) {
860 clNative = mTracking->mIOPtrs.clustersNative;
862 mClNative = clNative;
864#ifdef GPUCA_TPC_GEOMETRY_O2
865 uint32_t nSimEvents = GetNMCCollissions();
866 if (mTrackMCLabelsReverse.size() < nSimEvents) {
867 mTrackMCLabelsReverse.resize(nSimEvents);
869 if (mRecTracks.size() < nSimEvents) {
870 mRecTracks.resize(nSimEvents);
872 if (mFakeTracks.size() < nSimEvents) {
873 mFakeTracks.resize(nSimEvents);
875 if (mMCParam.size() < nSimEvents) {
876 mMCParam.resize(nSimEvents);
881 uint32_t nReconstructedTracks = 0;
882 if (tracksExternal) {
884 nReconstructedTracks = tracksExternal->size();
887 nReconstructedTracks = mTracking->mIOPtrs.nMergedTracks;
889 mTrackMCLabels.resize(nReconstructedTracks);
890 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
891 mTrackMCLabelsReverse[iCol].resize(GetNMCTracks(iCol));
892 mRecTracks[iCol].resize(GetNMCTracks(iCol));
893 mFakeTracks[iCol].resize(GetNMCTracks(iCol));
894 mMCParam[iCol].resize(GetNMCTracks(iCol));
895 memset(mRecTracks[iCol].
data(), 0, mRecTracks[iCol].
size() *
sizeof(mRecTracks[iCol][0]));
896 memset(mFakeTracks[iCol].
data(), 0, mFakeTracks[iCol].
size() *
sizeof(mFakeTracks[iCol][0]));
897 for (
size_t i = 0;
i < mTrackMCLabelsReverse[iCol].size();
i++) {
898 mTrackMCLabelsReverse[iCol][
i] = -1;
901 if (mQATasks & taskClusterAttach && GetNMCLabels()) {
902 mClusterParam.resize(GetNMCLabels());
903 memset(mClusterParam.data(), 0, mClusterParam.size() *
sizeof(mClusterParam[0]));
908 if (mConfig.writeMCLabels) {
909 mcEffBuffer.resize(mNEvents);
910 mcLabelBuffer.resize(mNEvents);
911 mcEffBuffer[mNEvents - 1].resize(GetNMCTracks(0));
912 mcLabelBuffer[mNEvents - 1].resize(nReconstructedTracks);
915 bool mcAvail = mcPresent() || tracksExtMC;
920 if (tracksExternal) {
922 for (uint32_t
i = 0;
i < tracksExternal->size();
i++) {
923 mTrackMCLabels[
i] = (*tracksExtMC)[
i];
927 tbb::parallel_for(tbb::blocked_range<uint32_t>(0, nReconstructedTracks, (
QA_DEBUG == 0) ? 32 : nReconstructedTracks), [&](const tbb::blocked_range<uint32_t>&
range) {
928 auto acc = GPUTPCTrkLbl<true, mcLabelI_t>(GetClusterLabels(), 1.f - mConfig.recThreshold);
933 std::vector<mcLabel_t> labels;
934 for (uint32_t k = 0; k < track.NClusters(); k++) {
939 uint32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
940 if (hitId >= GetNMCLabels()) {
941 GPUError(
"Invalid hit id %u > %d (nClusters %d)", hitId, GetNMCLabels(), mTracking->mIOPtrs.clustersNative ? mTracking->mIOPtrs.clustersNative->nClustersTotal : 0);
942 throw std::runtime_error(
"qa error");
945 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
946 if (GetMCLabelID(hitId,
j) >= (int32_t)GetNMCTracks(GetMCLabelCol(hitId,
j))) {
947 GPUError(
"Invalid label %d > %d (hit %d, label %d, col %d)", GetMCLabelID(hitId,
j), GetNMCTracks(GetMCLabelCol(hitId,
j)), hitId,
j, (int32_t)GetMCLabelCol(hitId,
j));
948 throw std::runtime_error(
"qa error");
950 if (GetMCLabelID(hitId,
j) >= 0) {
952 GPUInfo(
"Track %d Cluster %u Label %d: %d (%f)",
i, k,
j, GetMCLabelID(hitId,
j), GetMCLabelWeight(hitId,
j));
958 float maxweight, sumweight;
960 auto maxLabel = acc.computeLabel(&maxweight, &sumweight, &maxcount);
961 mTrackMCLabels[
i] = maxLabel;
962 if (
QA_DEBUG && track.OK() && GetNMCTracks(maxLabel) > (uint32_t)maxLabel.getTrackID()) {
963 const mcInfo_t& mc = GetMCTrack(maxLabel);
964 GPUInfo(
"Track %d label %d (fake %d) weight %f clusters %d (fitted %d) (%f%% %f%%) Pt %f",
i, maxLabel.getTrackID(), (int32_t)(maxLabel.isFake()), maxweight,
nClusters, track.NClustersFitted(), 100.f * maxweight / sumweight, 100.f * (
float)maxcount / (
float)
nClusters,
965 std::sqrt(mc.pX * mc.pX + mc.pY * mc.pY));
970 if (
QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
974 for (uint32_t
i = 0;
i < nReconstructedTracks;
i++) {
976 mcLabelI_t
label = mTrackMCLabels[
i];
977 if (mQATasks & taskClusterAttach) {
983 for (uint32_t k = 0; k < track->NClusters(); k++) {
987 mClusterParam[mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num].fakeAttached++;
991 if (mMCTrackMin == -1 || (
label.getTrackID() >= mMCTrackMin &&
label.getTrackID() < mMCTrackMax)) {
992 for (uint32_t k = 0; k < track->NClusters(); k++) {
996 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num;
997 bool correct =
false;
998 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
999 if (
label == GetMCLabel(hitId,
j)) {
1005 mClusterParam[hitId].attached++;
1007 mClusterParam[hitId].fakeAttached++;
1013 if (mTrackMCLabels[
i].isFake()) {
1014 (GetMCTrackObj(mFakeTracks,
label))++;
1015 }
else if (tracksExternal || !track->MergedLooper()) {
1016 GetMCTrackObj(mRecTracks,
label)++;
1017 if (mMCTrackMin == -1 || (
label.getTrackID() >= mMCTrackMin &&
label.getTrackID() < mMCTrackMax)) {
1018 int32_t& revLabel = GetMCTrackObj(mTrackMCLabelsReverse,
label);
1019 if (tracksExternal) {
1021 if (revLabel == -1 || fabsf((*tracksExternal)[
i].getZ()) < fabsf((*tracksExternal)[revLabel].getZ())) {
1026 const auto* trks = mTracking->mIOPtrs.mergedTracks;
1028 if (revLabel == -1) {
1030 }
else if (mTracking->GetParam().par.earlyTpcTransform) {
1031 comp = fabsf(trks[
i].GetParam().GetZ() + trks[
i].GetParam().GetTZOffset()) < fabsf(trks[revLabel].GetParam().GetZ() + trks[revLabel].GetParam().GetTZOffset());
1033 float shift1 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[
i].CSide() *
GPUChainTracking::NSECTORS / 2, trks[
i].GetParam().GetTZOffset());
1034 float shift2 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[revLabel].CSide() *
GPUChainTracking::NSECTORS / 2, trks[revLabel].GetParam().GetTZOffset());
1035 comp = fabsf(trks[
i].GetParam().GetZ() + shift1) < fabsf(trks[revLabel].GetParam().GetZ() + shift2);
1037 if (revLabel == -1 || !trks[revLabel].OK() || (trks[
i].OK() && comp)) {
1044 if ((mQATasks & taskClusterAttach) && mTracking->mIOPtrs.mergedTrackHitAttachment) {
1046 for (uint32_t
i = 0;
i < GetNMCLabels();
i++) {
1047 if (mClusterParam[
i].attached == 0 && mClusterParam[
i].fakeAttached == 0) {
1048 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[
i];
1051 mcLabelI_t trackL = mTrackMCLabels[track];
1053 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1055 if (trackL == GetMCLabel(
i,
j)) {
1061 mClusterParam[
i].fakeAdjacent++;
1063 mClusterParam[
i].adjacent++;
1070 if (mConfig.matchMCLabels.size()) {
1071 mGoodHits[mNEvents - 1].resize(GetNMCLabels());
1072 std::vector<bool> allowMCLabels(GetNMCTracks(0));
1073 for (uint32_t k = 0; k < GetNMCTracks(0); k++) {
1074 allowMCLabels[k] =
false;
1076 for (uint32_t
i = 0;
i < nReconstructedTracks;
i++) {
1077 if (!mGoodTracks[mNEvents - 1][
i]) {
1080 if (mConfig.matchDisplayMinPt > 0) {
1081 if (!mTrackMCLabels[
i].
isValid()) {
1084 const mcInfo_t& info = GetMCTrack(mTrackMCLabels[
i]);
1085 if (info.pX * info.pX + info.pY * info.pY < mConfig.matchDisplayMinPt * mConfig.matchDisplayMinPt) {
1091 for (uint32_t
j = 0;
j < track.NClusters();
j++) {
1092 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() +
j].num;
1093 if (GetMCLabelNID(hitId)) {
1094 int32_t mcID = GetMCLabelID(hitId, 0);
1096 allowMCLabels[mcID] =
true;
1101 for (uint32_t
i = 0;
i < GetNMCLabels();
i++) {
1102 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1103 int32_t mcID = GetMCLabelID(
i,
j);
1104 if (mcID >= 0 && allowMCLabels[mcID]) {
1105 mGoodHits[mNEvents - 1][
i] =
true;
1110 if (
QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1119 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1120 for (uint32_t
i = 0;
i < GetNMCTracks(iCol);
i++) {
1121 mMCParam[iCol][
i].nWeightCls = 0.;
1124 for (uint32_t
i = 0;
i < GetNMCLabels();
i++) {
1125 float weightTotal = 0.f;
1126 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1127 if (GetMCLabelID(
i,
j) >= 0) {
1128 weightTotal += GetMCLabelWeight(
i,
j);
1131 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1132 if (GetMCLabelID(
i,
j) >= 0) {
1133 GetMCTrackObj(mMCParam, GetMCLabel(
i,
j)).nWeightCls += GetMCLabelWeight(
i,
j) / weightTotal;
1137 if (
QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1138 GPUInfo(
"QA Time: Compute cluster label weights:\t%6.0f us", timer.
GetCurrentElapsedTime(
true) * 1e6);
1142 tbb::parallel_for<uint32_t>(0, GetNMCCollissions(), [&](
auto iCol) {
1143 for (uint32_t
i = 0;
i < GetNMCTracks(iCol);
i++) {
1144 const mcInfo_t& info = GetMCTrack(
i, iCol);
1145 additionalMCParameters& mc2 = mMCParam[iCol][
i];
1146 mc2.pt = std::sqrt(info.pX * info.pX + info.pY * info.pY);
1147 mc2.phi = M_PI + std::atan2(-info.pY, -info.pX);
1148 float p = info.pX * info.pX + info.pY * info.pY + info.pZ * info.pZ;
1150 mc2.theta = mc2.eta = 0.f;
1152 mc2.theta = info.pZ == 0 ? (M_PI / 2) : (
std::acos(info.pZ /
std::sqrt(p)));
1153 mc2.eta = -std::log(std::tan(0.5 * mc2.theta));
1155 if (mConfig.writeMCLabels) {
1156 std::vector<int32_t>& effBuffer = mcEffBuffer[mNEvents - 1];
1157 effBuffer[
i] = mRecTracks[iCol][
i] * 1000 + mFakeTracks[iCol][
i];
1160 }, tbb::simple_partitioner());
1161 if (
QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1162 GPUInfo(
"QA Time: Compute track mc parameters:\t%6.0f us", timer.
GetCurrentElapsedTime(
true) * 1e6);
1166 if (mQATasks & taskTrackingEff) {
1167 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1168 for (uint32_t
i = 0;
i < GetNMCTracks(iCol);
i++) {
1169 if ((mMCTrackMin != -1 && (int32_t)
i < mMCTrackMin) || (mMCTrackMax != -1 && (int32_t)
i >= mMCTrackMax)) {
1172 const mcInfo_t& info = GetMCTrack(
i, iCol);
1173 const additionalMCParameters& mc2 = mMCParam[iCol][
i];
1174 if (mc2.nWeightCls == 0.f) {
1177 const float& mcpt = mc2.pt;
1178 const float& mcphi = mc2.phi;
1179 const float& mceta = mc2.eta;
1181 if (info.primDaughters) {
1184 if (mc2.nWeightCls < MIN_WEIGHT_CLS) {
1187 int32_t findable = mc2.nWeightCls >= FINDABLE_WEIGHT_CLS;
1191 if (info.charge == 0.f) {
1194 if (mConfig.filterCharge && info.charge * mConfig.filterCharge < 0) {
1197 if (mConfig.filterPID >= 0 && info.pid != mConfig.filterPID) {
1201 if (fabsf(mceta) > ETA_MAX || mcpt < PT_MIN || mcpt > PT_MAX) {
1205 float alpha = std::atan2(info.y, info.x);
1206 alpha /= M_PI / 9.f;
1208 alpha *= M_PI / 9.f;
1209 alpha += M_PI / 18.f;
1211 float c = std::cos(
alpha);
1212 float s = std::sin(
alpha);
1213 float localY = -info.x *
s + info.y *
c;
1215 if (mConfig.dumpToROOT) {
1217 float localX = info.x *
c + info.y *
s;
1218 effdump.Fill(
alpha, localX, localY, info.z, mcphi, mceta, mcpt, mRecTracks[iCol][
i], mFakeTracks[iCol][
i], findable, info.prim);
1221 for (int32_t
j = 0;
j < 4;
j++) {
1222 for (int32_t k = 0; k < 2; k++) {
1223 if (k == 0 && findable == 0) {
1227 int32_t
val = (
j == 0) ? (mRecTracks[iCol][
i] ? 1 : 0) : (
j == 1) ? (mRecTracks[iCol][
i] ? mRecTracks[iCol][
i] - 1 : 0) : (
j == 2) ? mFakeTracks[iCol][
i] : 1;
1232 for (int32_t l = 0; l < 5; l++) {
1233 if (info.prim && mcpt < PT_MIN_PRIM) {
1236 if (l != 3 && fabsf(mceta) > ETA_MAX2) {
1239 if (l < 4 && mcpt < 1.f / mConfig.qpt) {
1243 float pos = l == 0 ? localY : l == 1 ? info.z : l == 2 ? mcphi : l == 3 ? mceta : mcpt;
1245 mEff[
j][k][!info.prim][l]->Fill(
pos,
val);
1251 if (
QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1257 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
1259 prop.SetMaxSinPhi(.999);
1260 prop.SetMaterialTPC();
1261 prop.SetPolynomialField(&mParam->polynomialField);
1263 for (uint32_t
i = 0;
i < mTrackMCLabels.size();
i++) {
1264 if (mConfig.writeMCLabels) {
1265 std::vector<int32_t>& labelBuffer = mcLabelBuffer[mNEvents - 1];
1266 labelBuffer[
i] = mTrackMCLabels[
i].getTrackID();
1268 if (mTrackMCLabels[
i].isFake()) {
1271 const mcInfo_t& mc1 = GetMCTrack(mTrackMCLabels[
i]);
1272 const additionalMCParameters& mc2 = GetMCTrackObj(mMCParam, mTrackMCLabels[
i]);
1274 if (mc1.primDaughters) {
1277 if (!tracksExternal) {
1278 if (!mTracking->mIOPtrs.mergedTracks[
i].OK()) {
1281 if (mTracking->mIOPtrs.mergedTracks[
i].MergedLooper()) {
1285 if ((mMCTrackMin != -1 && mTrackMCLabels[
i].getTrackID() < mMCTrackMin) || (mMCTrackMax != -1 && mTrackMCLabels[
i].getTrackID() >= mMCTrackMax)) {
1288 if (fabsf(mc2.eta) > ETA_MAX || mc2.pt < PT_MIN || mc2.pt > PT_MAX) {
1291 if (mc1.charge == 0.f) {
1297 if (mConfig.filterCharge && mc1.charge * mConfig.filterCharge < 0) {
1300 if (mConfig.filterPID >= 0 && mc1.pid != mConfig.filterPID) {
1303 if (mc2.nWeightCls < MIN_WEIGHT_CLS) {
1306 if (mConfig.resPrimaries == 1 && !mc1.prim) {
1308 }
else if (mConfig.resPrimaries == 2 && mc1.prim) {
1311 if (GetMCTrackObj(mTrackMCLabelsReverse, mTrackMCLabels[
i]) != (int32_t)
i) {
1318 if (tracksExternal) {
1320 for (int32_t k = 0; k < 5; k++) {
1321 param.Par()[k] = (*tracksExternal)[
i].getParams()[k];
1323 for (int32_t k = 0; k < 15; k++) {
1324 param.Cov()[k] = (*tracksExternal)[
i].getCov()[k];
1326 param.X() = (*tracksExternal)[
i].getX();
1327 param.TZOffset() = (*tracksExternal)[
i].getTime0();
1328 alpha = (*tracksExternal)[
i].getAlpha();
1329 side = (*tracksExternal)[
i].hasBothSidesClusters() ? 2 : ((*tracksExternal)[
i].hasCSideClusters() ? 1 : 0);
1332 param = mTracking->mIOPtrs.mergedTracks[
i].GetParam();
1333 alpha = mTracking->mIOPtrs.mergedTracks[
i].GetAlpha();
1334 side = mTracking->mIOPtrs.mergedTracks[
i].CCE() ? 2 : (mTracking->mIOPtrs.mergedTracks[
i].CSide() ? 1 : 0);
1338 float c = std::cos(
alpha);
1339 float s = std::sin(
alpha);
1342 mclocal[0] =
x *
c +
y *
s;
1343 mclocal[1] = -
x *
s +
y *
c;
1346 mclocal[2] = px *
c + py *
s;
1347 mclocal[3] = -px *
s + py *
c;
1352 if (mclocal[0] >
param.GetX() + 20) {
1355 if (
param.GetX() > mConfig.maxResX) {
1359 auto getdz = [
this, &
param, &mc1, &
side, tracksExternal]() {
1360 if (tracksExternal) {
1361 return param.GetZ();
1363 if (!mParam->continuousMaxTimeBin) {
1364 return param.GetZ() - mc1.z;
1366#ifdef GPUCA_TPC_GEOMETRY_O2
1367 if (!mParam->par.earlyTpcTransform) {
1369 return param.GetZ() + shift - mc1.z;
1376 bool inFlyDirection = 0;
1377 if (mConfig.strict) {
1379 const float dy =
param.Y() - mclocal[1];
1380 const float dz = getdz();
1381 if (dx * dx + dy * dy + dz * dz > 5.f * 5.f) {
1386 if (prop.PropagateToXAlpha(mclocal[0],
alpha, inFlyDirection)) {
1389 if (fabsf(
param.Y() - mclocal[1]) > (mConfig.strict ? 1.f : 4.f) || fabsf(getdz()) > (mConfig.strict ? 1.f : 4.f)) {
1392 float charge = mc1.charge > 0 ? 1.f : -1.f;
1394 float deltaY =
param.GetY() - mclocal[1];
1395 float deltaZ = getdz();
1396 float deltaPhiNative =
param.GetSinPhi() - mclocal[3] / mc2.pt;
1397 float deltaPhi = std::asin(
param.GetSinPhi()) - std::atan2(mclocal[3], mclocal[2]);
1398 float deltaLambdaNative =
param.GetDzDs() - mc1.pZ / mc2.pt;
1399 float deltaLambda = std::atan(
param.GetDzDs()) - std::atan2(mc1.pZ, mc2.pt);
1401 float deltaPt = (fabsf(1.f /
param.GetQPt()) - mc2.pt) / mc2.pt;
1403 float paramval[5] = {mclocal[1], mc1.z, mc2.phi, mc2.eta, mc2.pt};
1404 float resval[5] = {deltaY, deltaZ, mConfig.nativeFitResolutions ? deltaPhiNative : deltaPhi, mConfig.nativeFitResolutions ? deltaLambdaNative : deltaLambda, mConfig.nativeFitResolutions ? deltaPtNative : deltaPt};
1405 float pullval[5] = {deltaY / std::sqrt(
param.GetErr2Y()), deltaZ / std::sqrt(
param.GetErr2Z()), deltaPhiNative / std::sqrt(
param.GetErr2SinPhi()), deltaLambdaNative / std::sqrt(
param.GetErr2DzDs()), deltaPtNative / std::sqrt(
param.GetErr2QPt())};
1407 for (int32_t
j = 0;
j < 5;
j++) {
1408 for (int32_t k = 0; k < 5; k++) {
1409 if (k != 3 && fabsf(mc2.eta) > ETA_MAX2) {
1412 if (k < 4 && mc2.pt < 1.f / mConfig.qpt) {
1415 if (mQATasks & taskTrackingRes) {
1416 mRes2[
j][k]->Fill(resval[
j], paramval[k]);
1418 if (mQATasks & taskTrackingResPull) {
1419 mPull2[
j][k]->Fill(pullval[
j], paramval[k]);
1424 if (
QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1429 if (mQATasks & taskClusterAttach) {
1431 for (uint32_t iTrk = 0; iTrk < nReconstructedTracks; iTrk++) {
1436 if (!mTrackMCLabels[iTrk].
isValid()) {
1437 for (uint32_t k = 0; k < track.NClusters(); k++) {
1441 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1442 float totalWeight = 0.;
1443 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
1445 totalWeight += GetMCLabelWeight(hitId,
j);
1448 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1450 if (totalWeight > 0) {
1451 float weight = 1.f / (totalWeight * (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached));
1452 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
1453 mcLabelI_t
label = GetMCLabel(hitId,
j);
1455 float pt = GetMCTrackObj(mMCParam,
label).pt;
1456 if (pt < PT_MIN_CLUST) {
1459 mClusters[CL_fake]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1460 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1461 if (GetMCTrackObj(mRecTracks,
label)) {
1462 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1464 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1465 if (protect || physics) {
1466 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1469 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1474 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1475 mClusters[CL_fake]->Fill(0.f,
weight);
1476 mClusters[CL_att_adj]->Fill(0.f,
weight);
1477 mClusters[CL_all]->Fill(0.f,
weight);
1478 mClusterCounts.nUnaccessible +=
weight;
1479 if (protect || physics) {
1480 mClusters[CL_prot]->Fill(0.f,
weight);
1483 mClusters[CL_physics]->Fill(0.f,
weight);
1489 mcLabelI_t
label = mTrackMCLabels[iTrk];
1490 if (mMCTrackMin != -1 && (
label.getTrackID() < mMCTrackMin ||
label.getTrackID() >= mMCTrackMax)) {
1493 for (uint32_t k = 0; k < track.NClusters(); k++) {
1497 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1498 float pt = GetMCTrackObj(mMCParam,
label).pt;
1499 if (pt < PT_MIN_CLUST) {
1502 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1503 bool correct =
false;
1504 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
1505 if (
label == GetMCLabel(hitId,
j)) {
1511 mClusters[CL_attached]->Fill(pt,
weight);
1512 mClusters[CL_tracks]->Fill(pt,
weight);
1514 mClusters[CL_fake]->Fill(pt,
weight);
1516 mClusters[CL_att_adj]->Fill(pt,
weight);
1517 mClusters[CL_all]->Fill(pt,
weight);
1518 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1520 if (protect || physics) {
1521 mClusters[CL_prot]->Fill(pt,
weight);
1524 mClusters[CL_physics]->Fill(pt,
weight);
1528 for (uint32_t
i = 0;
i < GetNMCLabels();
i++) {
1529 if ((mMCTrackMin != -1 && GetMCLabelID(
i, 0) < mMCTrackMin) || (mMCTrackMax != -1 && GetMCLabelID(
i, 0) >= mMCTrackMax)) {
1532 if (mClusterParam[
i].attached || mClusterParam[
i].fakeAttached) {
1535 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[
i];
1537 if (mClusterParam[
i].adjacent) {
1540 float totalWeight = 0.;
1541 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1542 mcLabelI_t labelT = GetMCLabel(
i,
j);
1544 totalWeight += GetMCLabelWeight(
i,
j);
1547 float weight = 1.f / totalWeight;
1548 if (totalWeight > 0) {
1549 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1550 mcLabelI_t labelT = GetMCLabel(
i,
j);
1552 float pt = GetMCTrackObj(mMCParam, labelT).pt;
1553 if (pt < PT_MIN_CLUST) {
1556 if (GetMCTrackObj(mRecTracks, labelT)) {
1557 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1559 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1560 mClusters[CL_fakeAdj]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1561 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1562 if (protect || physics) {
1563 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1566 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1571 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1572 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1573 mClusters[CL_all]->Fill(0.f, 1.f);
1574 mClusterCounts.nUnaccessible++;
1575 if (protect || physics) {
1576 mClusters[CL_prot]->Fill(0.f, 1.f);
1579 mClusters[CL_physics]->Fill(0.f, 1.f);
1583 float pt = GetMCTrackObj(mMCParam, mTrackMCLabels[
label]).pt;
1584 if (pt < PT_MIN_CLUST) {
1587 mClusters[CL_att_adj]->Fill(pt, 1.f);
1588 mClusters[CL_tracks]->Fill(pt, 1.f);
1589 mClusters[CL_all]->Fill(pt, 1.f);
1590 if (protect || physics) {
1591 mClusters[CL_prot]->Fill(pt, 1.f);
1594 mClusters[CL_physics]->Fill(pt, 1.f);
1598 float totalWeight = 0.;
1599 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1600 mcLabelI_t labelT = GetMCLabel(
i,
j);
1602 totalWeight += GetMCLabelWeight(
i,
j);
1605 if (totalWeight > 0) {
1606 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1607 mcLabelI_t
label = GetMCLabel(
i,
j);
1609 float pt = GetMCTrackObj(mMCParam,
label).pt;
1610 if (pt < PT_MIN_CLUST) {
1613 float weight = GetMCLabelWeight(
i,
j) / totalWeight;
1614 if (mClusterParam[
i].fakeAdjacent) {
1615 mClusters[CL_fakeAdj]->Fill(pt,
weight);
1617 if (mClusterParam[
i].fakeAdjacent) {
1618 mClusters[CL_att_adj]->Fill(pt,
weight);
1620 if (GetMCTrackObj(mRecTracks,
label)) {
1621 mClusters[CL_tracks]->Fill(pt,
weight);
1623 mClusters[CL_all]->Fill(pt,
weight);
1624 if (protect || physics) {
1625 mClusters[CL_prot]->Fill(pt,
weight);
1628 mClusters[CL_physics]->Fill(pt,
weight);
1633 if (mClusterParam[
i].fakeAdjacent) {
1634 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1636 if (mClusterParam[
i].fakeAdjacent) {
1637 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1639 mClusters[CL_all]->Fill(0.f, 1.f);
1640 mClusterCounts.nUnaccessible++;
1641 if (protect || physics) {
1642 mClusters[CL_prot]->Fill(0.f, 1.f);
1645 mClusters[CL_physics]->Fill(0.f, 1.f);
1651 if (
QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1655 }
else if (!mConfig.inputHistogramsOnly && !mConfig.noMC && (mQATasks & (taskTrackingEff | taskTrackingRes | taskTrackingResPull | taskClusterAttach))) {
1656 GPUWarning(
"No MC information available, only running partial TPC QA!");
1659 if (mQATasks & taskTrackStatistics) {
1661 for (uint32_t
i = 0;
i < nReconstructedTracks;
i++) {
1666 mTracks->Fill(1.f / fabsf(track.GetParam().GetQPt()));
1667 mNCl[0]->Fill(track.NClustersFitted());
1668 uint32_t nClCorrected = 0;
1669 int32_t lastSector = -1, lastRow = -1;
1670 const auto& trackClusters = mTracking->mIOPtrs.mergedTrackHits;
1671 for (uint32_t
j = 0;
j < track.NClusters();
j++) {
1675 if (trackClusters[track.FirstClusterRef() +
j].sector == lastSector && trackClusters[track.FirstClusterRef() +
j].row == lastRow) {
1678 if (trackClusters[track.FirstClusterRef() +
j].leg != trackClusters[track.FirstClusterRef() + track.NClusters() - 1].leg) {
1682 lastSector = trackClusters[track.FirstClusterRef() +
j].sector;
1683 lastRow = trackClusters[track.FirstClusterRef() +
j].sector;
1685 mNCl[1]->Fill(nClCorrected);
1687 if (mClNative && mTracking && mTracking->GetTPCTransformHelper()) {
1690 for (uint32_t k = 0; k < mClNative->nClusters[
i][
j]; k++) {
1691 const auto& cl = mClNative->clusters[
i][
j][k];
1693 GPUTPCConvertImpl::convert(*mTracking->GetTPCTransformHelper()->getCorrMap(), mTracking->GetParam(),
i,
j, cl.getPad(), cl.getTime(),
x,
y,
z);
1694 mTracking->GetParam().Sector2Global(
i,
x,
y,
z, &
x, &
y, &
z);
1701 if (
QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1706 uint32_t nCl = clNative ? clNative->
nClustersTotal : mTracking->GetProcessors()->tpcMerger.NMaxClusters();
1707 mClusterCounts.nTotal += nCl;
1708 if (mQATasks & taskClusterCounts) {
1709 for (uint32_t
i = 0;
i < nCl;
i++) {
1710 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[
i];
1714 float totalWeight = 0, weight400 = 0, weight40 = 0;
1715 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1716 const auto&
label = GetMCLabel(
i,
j);
1717 if (GetMCLabelID(
label) >= 0) {
1718 totalWeight += GetMCLabelWeight(
label);
1719 if (GetMCTrackObj(mMCParam,
label).pt >= 0.4) {
1720 weight400 += GetMCLabelWeight(
label);
1722 if (GetMCTrackObj(mMCParam,
label).pt <= 0.04) {
1723 weight40 += GetMCLabelWeight(
label);
1727 if (totalWeight > 0 && 10.f * weight400 >= totalWeight) {
1728 if (!unattached && !protect && !physics) {
1729 mClusterCounts.nFakeRemove400++;
1730 int32_t totalFake = weight400 < 0.9f * totalWeight;
1732 mClusterCounts.nFullFakeRemove400++;
1747 mClusterCounts.nAbove400++;
1749 if (totalWeight > 0 && weight40 >= 0.9 * totalWeight) {
1750 mClusterCounts.nBelow40++;
1751 if (protect || physics) {
1752 mClusterCounts.nFakeProtect40++;
1757 mClusterCounts.nPhysics++;
1759 if (physics || protect) {
1760 mClusterCounts.nProt++;
1763 mClusterCounts.nUnattached++;
1769 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
1770 DoClusterCounts(
nullptr);
1771 mClusterCounts = counts_t();
1774 if (
QA_TIMING || (mTracking && mTracking->GetProcessingSettings().debugLevel >= 3)) {
1778 if (mConfig.dumpToROOT) {
1779 if (!clNative || !mTracking || !mTracking->mIOPtrs.mergedTrackHitAttachment || !mTracking->mIOPtrs.mergedTracks) {
1780 throw std::runtime_error(
"Cannot dump non o2::tpc::clusterNative clusters, need also hit attachmend and GPU tracks");
1785 for (uint32_t k = 0; k < mClNative->nClusters[
i][
j]; k++) {
1786 const auto& cl = mClNative->clusters[
i][
j][k];
1787 uint32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[clid];
1788 float x = 0,
y = 0,
z = 0;
1791 const auto& trk = mTracking->mIOPtrs.mergedTracks[track];
1792 mTracking->GetTPCTransformHelper()->Transform(
i,
j, cl.getPad(), cl.getTime(),
x,
y,
z, trk.GetParam().GetTZOffset());
1793 mTracking->GetParam().Sector2Global(
i,
x,
y,
z, &
x, &
y, &
z);
1795 uint32_t extState = mTracking->mIOPtrs.mergedTrackHitStates ? mTracking->mIOPtrs.mergedTrackHitStates[clid] : 0;
1797 if (mConfig.dumpToROOT >= 2) {
1800 memset((
void*)&trk, 0,
sizeof(trk));
1801 memset((
void*)&trkHit, 0,
sizeof(trkHit));
1804 trk = mTracking->mIOPtrs.mergedTracks[track];
1805 for (uint32_t l = 0; l < trk.NClusters(); l++) {
1806 const auto& tmp = mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + l];
1807 if (tmp.num == clid) {
1813 static auto cldump =
GPUROOTDump<o2::tpc::ClusterNative, GPUTPCGMMergedTrack, GPUTPCGMMergedTrackHit, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew(
"cluster",
"track",
"trackHit",
"attach",
"extState",
"x",
"y",
"z",
"sector",
"row",
"nEv",
"clusterTree");
1814 cldump.Fill(cl, trk, trkHit, attach, extState,
x,
y,
z,
i,
j, mNEvents - 1);
1816 static auto cldump =
GPUROOTDump<o2::tpc::ClusterNative, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew(
"cluster",
"attach",
"extState",
"x",
"y",
"z",
"sector",
"row",
"nEv",
"clusterTree");
1817 cldump.Fill(cl, attach, extState,
x,
y,
z,
i,
j, mNEvents - 1);
1825 for (uint32_t
i = 0;
i < mTracking->mIOPtrs.nMergedTracks;
i++) {
1826 if (mTracking->mIOPtrs.mergedTracks[
i].OK()) {
1827 trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.mergedTracks[
i]);
1831 if (mTracking && mTracking->GetProcessingSettings().createO2Output) {
1833 for (uint32_t
i = 0;
i < mTracking->mIOPtrs.nOutputTracksTPCO2;
i++) {
1834 o2trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.outputTracksTPCO2[
i]);
1838 mTrackingScratchBuffer.clear();
1839 mTrackingScratchBuffer.shrink_to_fit();
1842void GPUQA::GetName(
char* fname, int32_t k)
1844 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
1845 if (k || mConfig.inputHistogramsOnly || mConfig.name.size()) {
1846 if (!(mConfig.inputHistogramsOnly || k)) {
1847 snprintf(fname, 1024,
"%s - ", mConfig.name.c_str());
1848 }
else if (mConfig.compareInputNames.size() > (
unsigned)(k - nNewInput)) {
1849 snprintf(fname, 1024,
"%s - ", mConfig.compareInputNames[k - nNewInput].c_str());
1851 strcpy(fname, mConfig.compareInputs[k - nNewInput].c_str());
1852 if (strlen(fname) > 5 && strcmp(fname + strlen(fname) - 5,
".root") == 0) {
1853 fname[strlen(fname) - 5] = 0;
1855 strcat(fname,
" - ");
1863T* GPUQA::GetHist(T*& ee, std::vector<std::unique_ptr<TFile>>& tin, int32_t k, int32_t nNewInput)
1866 if ((mConfig.inputHistogramsOnly || k) && (e =
dynamic_cast<T*
>(tin[k - nNewInput]->Get(e->GetName()))) ==
nullptr) {
1867 GPUWarning(
"Missing histogram in input %s: %s", mConfig.compareInputs[k - nNewInput].c_str(), ee->GetName());
1874void GPUQA::DrawQAHistogramsCleanup()
1876 clearGarbagageCollector();
1879void GPUQA::resetHists()
1881 if (!mQAInitialized) {
1882 throw std::runtime_error(
"QA not initialized");
1884 if (mHaveExternalHists) {
1885 throw std::runtime_error(
"Cannot reset external hists");
1887 for (
auto&
h : *mHist1D) {
1890 for (
auto&
h : *mHist2D) {
1893 for (
auto&
h : *mHist1Dd) {
1896 for (
auto&
h : *mHistGraph) {
1897 h = TGraphAsymmErrors();
1899 mClusterCounts = counts_t();
1904 const auto oldRootIgnoreLevel = gErrorIgnoreLevel;
1905 gErrorIgnoreLevel = kWarning;
1906 if (!mQAInitialized) {
1907 throw std::runtime_error(
"QA not initialized");
1910 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
1911 printf(
"Creating QA Histograms\n");
1914 std::vector<Color_t> colorNums(COLORCOUNT);
1916 static int32_t initColorsInitialized = initColors();
1917 (
void)initColorsInitialized;
1919 for (int32_t
i = 0;
i < COLORCOUNT;
i++) {
1920 colorNums[
i] = qcout ? defaultColorNums[
i] : mColors[
i]->GetNumber();
1923 bool mcAvail = mcPresent();
1924 char name[2048], fname[1024];
1926 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
1927 const int32_t ConfigNumInputs = nNewInput + mConfig.compareInputs.size();
1929 std::vector<std::unique_ptr<TFile>> tin;
1930 for (uint32_t
i = 0;
i < mConfig.compareInputs.size();
i++) {
1931 tin.emplace_back(std::make_unique<TFile>(mConfig.compareInputs[
i].c_str()));
1933 std::unique_ptr<TFile> tout =
nullptr;
1934 if (mConfig.output.size()) {
1935 tout = std::make_unique<TFile>(mConfig.output.c_str(),
"RECREATE");
1938 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
1939 float legendSpacingString = 0.025;
1940 for (int32_t
i = 0;
i < ConfigNumInputs;
i++) {
1942 if (strlen(fname) * 0.006 > legendSpacingString) {
1943 legendSpacingString = strlen(fname) * 0.006;
1948 if (mQATasks & taskTrackingEff) {
1949 for (int32_t ii = 0; ii < 6; ii++) {
1950 int32_t
i = ii == 5 ? 4 : ii;
1951 snprintf(fname, 1024,
"eff_vs_%s_layout", VSPARAMETER_NAMES[ii]);
1952 snprintf(
name, 2048,
"Efficiency versus %s", VSPARAMETER_NAMES[
i]);
1953 mCEff[ii] = createGarbageCollected<TCanvas>(fname,
name, 0, 0, 700, 700. * 2. / 3.);
1956 mPEff[ii][0] = createGarbageCollected<TPad>(
"p0",
"", 0.0, dy * 0, 0.5, dy * 1);
1957 mPEff[ii][0]->Draw();
1958 mPEff[ii][0]->SetRightMargin(0.04);
1959 mPEff[ii][1] = createGarbageCollected<TPad>(
"p1",
"", 0.5, dy * 0, 1.0, dy * 1);
1960 mPEff[ii][1]->Draw();
1961 mPEff[ii][1]->SetRightMargin(0.04);
1962 mPEff[ii][2] = createGarbageCollected<TPad>(
"p2",
"", 0.0, dy * 1, 0.5, dy * 2 - .001);
1963 mPEff[ii][2]->Draw();
1964 mPEff[ii][2]->SetRightMargin(0.04);
1965 mPEff[ii][3] = createGarbageCollected<TPad>(
"p3",
"", 0.5, dy * 1, 1.0, dy * 2 - .001);
1966 mPEff[ii][3]->Draw();
1967 mPEff[ii][3]->SetRightMargin(0.04);
1968 mLEff[ii] = createGarbageCollected<TLegend>(0.92 - legendSpacingString * 1.45, 0.83 - (0.93 - 0.82) / 2. * (
float)ConfigNumInputs, 0.98, 0.849);
1969 SetLegend(mLEff[ii]);
1974 if (mQATasks & taskTrackingRes) {
1975 for (int32_t ii = 0; ii < 7; ii++) {
1976 int32_t
i = ii == 5 ? 4 : ii;
1978 snprintf(fname, 1024,
"res_integral_layout");
1979 snprintf(
name, 2048,
"Integral Resolution");
1981 snprintf(fname, 1024,
"res_vs_%s_layout", VSPARAMETER_NAMES[ii]);
1982 snprintf(
name, 2048,
"Resolution versus %s", VSPARAMETER_NAMES[
i]);
1984 mCRes[ii] = createGarbageCollected<TCanvas>(fname,
name, 0, 0, 700, 700. * 2. / 3.);
1986 gStyle->SetOptFit(1);
1989 mPRes[ii][3] = createGarbageCollected<TPad>(
"p0",
"", 0.0, dy * 0, 0.5, dy * 1);
1990 mPRes[ii][3]->Draw();
1991 mPRes[ii][3]->SetRightMargin(0.04);
1992 mPRes[ii][4] = createGarbageCollected<TPad>(
"p1",
"", 0.5, dy * 0, 1.0, dy * 1);
1993 mPRes[ii][4]->Draw();
1994 mPRes[ii][4]->SetRightMargin(0.04);
1995 mPRes[ii][0] = createGarbageCollected<TPad>(
"p2",
"", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
1996 mPRes[ii][0]->Draw();
1997 mPRes[ii][0]->SetRightMargin(0.04);
1998 mPRes[ii][0]->SetLeftMargin(0.15);
1999 mPRes[ii][1] = createGarbageCollected<TPad>(
"p3",
"", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2000 mPRes[ii][1]->Draw();
2001 mPRes[ii][1]->SetRightMargin(0.04);
2002 mPRes[ii][1]->SetLeftMargin(0.135);
2003 mPRes[ii][2] = createGarbageCollected<TPad>(
"p4",
"", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2004 mPRes[ii][2]->Draw();
2005 mPRes[ii][2]->SetRightMargin(0.06);
2006 mPRes[ii][2]->SetLeftMargin(0.135);
2008 mLRes[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (
float)ConfigNumInputs, 0.98, 0.949);
2009 SetLegend(mLRes[ii]);
2015 if (mQATasks & taskTrackingResPull) {
2016 for (int32_t ii = 0; ii < 7; ii++) {
2017 int32_t
i = ii == 5 ? 4 : ii;
2020 snprintf(fname, 1024,
"pull_integral_layout");
2021 snprintf(
name, 2048,
"Integral Pull");
2023 snprintf(fname, 1024,
"pull_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2024 snprintf(
name, 2048,
"Pull versus %s", VSPARAMETER_NAMES[
i]);
2026 mCPull[ii] = createGarbageCollected<TCanvas>(fname,
name, 0, 0, 700, 700. * 2. / 3.);
2028 gStyle->SetOptFit(1);
2031 mPPull[ii][3] = createGarbageCollected<TPad>(
"p0",
"", 0.0, dy * 0, 0.5, dy * 1);
2032 mPPull[ii][3]->Draw();
2033 mPPull[ii][3]->SetRightMargin(0.04);
2034 mPPull[ii][4] = createGarbageCollected<TPad>(
"p1",
"", 0.5, dy * 0, 1.0, dy * 1);
2035 mPPull[ii][4]->Draw();
2036 mPPull[ii][4]->SetRightMargin(0.04);
2037 mPPull[ii][0] = createGarbageCollected<TPad>(
"p2",
"", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2038 mPPull[ii][0]->Draw();
2039 mPPull[ii][0]->SetRightMargin(0.04);
2040 mPPull[ii][0]->SetLeftMargin(0.15);
2041 mPPull[ii][1] = createGarbageCollected<TPad>(
"p3",
"", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2042 mPPull[ii][1]->Draw();
2043 mPPull[ii][1]->SetRightMargin(0.04);
2044 mPPull[ii][1]->SetLeftMargin(0.135);
2045 mPPull[ii][2] = createGarbageCollected<TPad>(
"p4",
"", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2046 mPPull[ii][2]->Draw();
2047 mPPull[ii][2]->SetRightMargin(0.06);
2048 mPPull[ii][2]->SetLeftMargin(0.135);
2050 mLPull[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (
float)ConfigNumInputs, 0.98, 0.949);
2051 SetLegend(mLPull[ii]);
2057 if (mQATasks & taskClusterAttach) {
2058 for (int32_t
i = 0;
i < 3;
i++) {
2059 snprintf(fname, 1024,
"clusters_%s_layout", CLUSTER_TYPES[
i]);
2060 mCClust[
i] = createGarbageCollected<TCanvas>(fname, CLUSTER_TITLES[
i], 0, 0, 700, 700. * 2. / 3.);
2062 mPClust[
i] = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2064 float y1 =
i != 1 ? 0.77 : 0.27,
y2 =
i != 1 ? 0.9 : 0.42;
2065 mLClust[
i] = createGarbageCollected<TLegend>(
i == 2 ? 0.1 : (0.65 - legendSpacingString * 1.45),
y2 - (
y2 -
y1) * (ConfigNumInputs + (
i != 1) / 2.) + 0.005,
i == 2 ? (0.3 + legendSpacingString * 1.45) : 0.9,
y2);
2066 SetLegend(mLClust[
i]);
2071 if (mQATasks & taskTrackStatistics) {
2072 mCTracks = createGarbageCollected<TCanvas>(
"ctracks",
"Track Pt", 0, 0, 700, 700. * 2. / 3.);
2074 mPTracks = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2076 mLTracks = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (
float)ConfigNumInputs, 0.98, 0.949);
2077 SetLegend(mLTracks);
2079 for (int32_t
i = 0;
i < 2;
i++) {
2080 snprintf(
name, 2048,
"cncl%d Pull",
i);
2081 mCNCl[
i] = createGarbageCollected<TCanvas>(
name,
i ?
"Number of clusters (corrected for multiple per row)" :
"Number of clusters per track", 0, 0, 700, 700. * 2. / 3.);
2083 mPNCl[
i] = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2085 mLNCl[
i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (
float)ConfigNumInputs, 0.98, 0.949);
2086 SetLegend(mLNCl[
i]);
2089 mCClXY = createGarbageCollected<TCanvas>(
"clxy",
"Number of clusters per X / Y", 0, 0, 700, 700. * 2. / 3.);
2091 mPClXY = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2096 if (mConfig.enableLocalOutput && !mConfig.inputHistogramsOnly && (mQATasks & taskTrackingEff) && mcPresent()) {
2097 GPUInfo(
"QA Stats: Eff: Tracks Prim %d (Eta %d, Pt %d) %f%% (%f%%) Sec %d (Eta %d, Pt %d) %f%% (%f%%) - Res: Tracks %d (Eta %d, Pt %d)", (int32_t)mEff[3][1][0][0]->GetEntries(), (int32_t)mEff[3][1][0][3]->GetEntries(), (int32_t)mEff[3][1][0][4]->GetEntries(),
2098 mEff[0][0][0][0]->GetSumOfWeights() / std::max(1., mEff[3][0][0][0]->GetSumOfWeights()), mEff[0][1][0][0]->GetSumOfWeights() / std::max(1., mEff[3][1][0][0]->GetSumOfWeights()), (int32_t)mEff[3][1][1][0]->GetEntries(), (int32_t)mEff[3][1][1][3]->GetEntries(),
2099 (int32_t)mEff[3][1][1][4]->GetEntries(), mEff[0][0][1][0]->GetSumOfWeights() / std::max(1., mEff[3][0][1][0]->GetSumOfWeights()), mEff[0][1][1][0]->GetSumOfWeights() / std::max(1., mEff[3][1][1][0]->GetSumOfWeights()), (int32_t)mRes2[0][0]->GetEntries(),
2100 (int32_t)mRes2[0][3]->GetEntries(), (int32_t)mRes2[0][4]->GetEntries());
2103 int32_t flagShowVsPtLog = (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) ? 1 : 0;
2105 if (mQATasks & taskTrackingEff) {
2107 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2108 int32_t
i = ii == 5 ? 4 : ii;
2109 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2110 for (int32_t
j = 0;
j < 4;
j++) {
2111 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2114 mPEff[ii][
j]->SetLogx();
2117 for (int32_t l = 0; l < 3; l++) {
2118 if (k == 0 && mConfig.inputHistogramsOnly == 0 && ii != 5) {
2121 auto oldLevel = gErrorIgnoreLevel;
2122 gErrorIgnoreLevel = kError;
2123 mEffResult[0][
j / 2][
j % 2][
i]->Divide(mEff[l][
j / 2][
j % 2][
i], mEff[3][
j / 2][
j % 2][
i],
"cl=0.683 b(1,1) mode");
2124 gErrorIgnoreLevel = oldLevel;
2125 mEff[3][
j / 2][
j % 2][
i]->Reset();
2126 mEff[3][
j / 2][
j % 2][
i]->Add(mEff[0][
j / 2][
j % 2][
i]);
2127 mEff[3][
j / 2][
j % 2][
i]->Add(mEff[1][
j / 2][
j % 2][
i]);
2128 mEff[3][
j / 2][
j % 2][
i]->Add(mEff[2][
j / 2][
j % 2][
i]);
2131 auto oldLevel = gErrorIgnoreLevel;
2132 gErrorIgnoreLevel = kError;
2133 mEffResult[l][
j / 2][
j % 2][
i]->Divide(mEff[l][
j / 2][
j % 2][
i], mEff[3][
j / 2][
j % 2][
i],
"cl=0.683 b(1,1) mode");
2134 gErrorIgnoreLevel = oldLevel;
2138 TGraphAsymmErrors* e = mEffResult[l][
j / 2][
j % 2][
i];
2140 if (!mConfig.inputHistogramsOnly && k == 0) {
2142 mEff[l][
j / 2][
j % 2][
i]->Write();
2145 mEff[3][
j / 2][
j % 2][
i]->Write();
2148 }
else if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2151 e->SetTitle(EFFICIENCY_TITLES[
j]);
2152 e->GetYaxis()->SetTitle(
"(Efficiency)");
2153 e->GetXaxis()->SetTitle(XAXIS_TITLES[
i]);
2156 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2158 if (qcout && !mConfig.shipToQCAsCanvas) {
2161 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2164 e->SetMarkerColor(kBlack);
2165 e->SetLineColor(colorNums[(l == 2 ? (ConfigNumInputs * 2 + k) : (k * 2 + l)) % COLORCOUNT]);
2166 e->GetHistogram()->GetYaxis()->SetRangeUser(-0.02, 1.02);
2167 e->Draw(k || l ?
"same P" :
"AP");
2170 snprintf(
name, 2048,
"%s%s", fname, EFF_NAMES[l]);
2171 mLEff[ii]->AddEntry(e,
name,
"l");
2174 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2178 ChangePadTitleSize(mPEff[ii][
j], 0.056);
2181 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2188 qcout->Add(mCEff[ii]);
2190 if (!mConfig.enableLocalOutput) {
2193 doPerfFigure(0.2, 0.295, 0.025);
2194 mCEff[ii]->Print(Form(
"plots/eff_vs_%s.pdf", VSPARAMETER_NAMES[ii]));
2195 if (mConfig.writeRootFiles) {
2196 mCEff[ii]->Print(Form(
"plots/eff_vs_%s.root", VSPARAMETER_NAMES[ii]));
2201 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
2203 TH1D *resIntegral[5] = {}, *pullIntegral[5] = {};
2204 TCanvas* cfit =
nullptr;
2205 std::unique_ptr<TF1> customGaus = std::make_unique<TF1>(
"G",
"[0]*exp(-(x-[1])*(x-[1])/(2.*[2]*[2]))");
2206 for (int32_t p = 0; p < 2; p++) {
2207 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2210 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2211 TCanvas* can = p ? mCPull[ii] : mCRes[ii];
2212 TLegend*
leg = p ? mLPull[ii] : mLRes[ii];
2213 int32_t
i = ii == 5 ? 4 : ii;
2214 for (int32_t
j = 0;
j < 5;
j++) {
2215 TH2F*
src = p ? mPull2[
j][
i] : mRes2[
j][
i];
2216 TH1F**
dst = p ? mPull[
j][
i] : mRes[
j][
i];
2217 TH1D*& dstIntegral = p ? pullIntegral[
j] : resIntegral[
j];
2218 TPad* pad = p ? mPPull[ii][
j] : mPRes[ii][
j];
2220 if (!mConfig.inputHistogramsOnly && ii != 5) {
2221 if (cfit ==
nullptr) {
2222 cfit = createGarbageCollected<TCanvas>();
2226 TAxis* axis =
src->GetYaxis();
2227 int32_t nBins = axis->GetNbins();
2229 for (int32_t bin = 1; bin <= nBins; bin++) {
2230 int32_t bin0 = std::max(bin - integ, 0);
2231 int32_t bin1 = std::min(bin + integ, nBins);
2232 std::unique_ptr<TH1D> proj{
src->ProjectionX(
"proj", bin0, bin1)};
2233 proj->ClearUnderflowAndOverflow();
2234 if (proj->GetEntries()) {
2236 while (proj->GetMaximum() < 50 && rebin <
sizeof(RES_AXIS_BINS) /
sizeof(RES_AXIS_BINS[0])) {
2237 proj->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2241 if (proj->GetEntries() < 20 || proj->GetRMS() < 0.00001) {
2242 dst[0]->SetBinContent(bin, proj->GetRMS());
2243 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2244 dst[1]->SetBinContent(bin, proj->GetMean());
2245 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2247 proj->GetXaxis()->SetRange(0, 0);
2248 proj->GetXaxis()->SetRangeUser(std::max(proj->GetXaxis()->GetXmin(), proj->GetMean() - 3. * proj->GetRMS()), std::min(proj->GetXaxis()->GetXmax(), proj->GetMean() + 3. * proj->GetRMS()));
2249 bool forceLogLike = proj->GetMaximum() < 20;
2250 for (int32_t k = forceLogLike ? 2 : 0; k < 3; k++) {
2251 proj->Fit(
"gaus", forceLogLike || k == 2 ?
"sQl" : k ?
"sQww" :
"sQ");
2252 TF1* fitFunc = proj->GetFunction(
"gaus");
2254 if (k && !forceLogLike) {
2255 customGaus->SetParameters(fitFunc->GetParameter(0), fitFunc->GetParameter(1), fitFunc->GetParameter(2));
2256 proj->Fit(customGaus.get(),
"sQ");
2257 fitFunc = customGaus.get();
2260 const float sigma = fabs(fitFunc->GetParameter(2));
2261 dst[0]->SetBinContent(bin, sigma);
2262 dst[1]->SetBinContent(bin, fitFunc->GetParameter(1));
2263 dst[0]->SetBinError(bin, fitFunc->GetParError(2));
2264 dst[1]->SetBinError(bin, fitFunc->GetParError(1));
2266 const bool fail1 = sigma <= 0.f;
2267 const bool fail2 = fabs(proj->GetMean() -
dst[1]->GetBinContent(bin)) > std::min<float>(p ? PULL_AXIS : mConfig.nativeFitResolutions ? RES_AXES_NATIVE[
j] : RES_AXES[
j], 3.f * proj->GetRMS());
2268 const bool fail3 =
dst[0]->GetBinContent(bin) > 3.f * proj->GetRMS() ||
dst[0]->GetBinError(bin) > 1 ||
dst[1]->GetBinError(bin) > 1;
2269 const bool fail4 = fitFunc->GetParameter(0) < proj->GetMaximum() / 5.;
2270 const bool fail = fail1 || fail2 || fail3 || fail4;
2275 }
else if (k >= 2) {
2276 dst[0]->SetBinContent(bin, proj->GetRMS());
2277 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2278 dst[1]->SetBinContent(bin, proj->GetMean());
2279 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2284 dst[0]->SetBinContent(bin, 0.f);
2285 dst[0]->SetBinError(bin, 0.f);
2286 dst[1]->SetBinContent(bin, 0.f);
2287 dst[1]->SetBinError(bin, 0.f);
2291 dstIntegral =
src->ProjectionX(mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[
j] : PARAMETER_NAMES[
j], 0, nBins + 1);
2293 while (dstIntegral->GetMaximum() < 50 && rebin <
sizeof(RES_AXIS_BINS) /
sizeof(RES_AXIS_BINS[0])) {
2294 dstIntegral->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2300 if (mConfig.inputHistogramsOnly) {
2301 dstIntegral = createGarbageCollected<TH1D>();
2303 snprintf(fname, 1024, p ?
"IntPull%s" :
"IntRes%s", VSPARAMETER_NAMES[
j]);
2304 snprintf(
name, 2048, p ?
"%s Pull" :
"%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[
j] : PARAMETER_NAMES[
j]);
2305 dstIntegral->SetName(fname);
2306 dstIntegral->SetTitle(
name);
2308 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2311 int32_t numColor = 0;
2312 float tmpMax = -1000.;
2313 float tmpMin = 1000.;
2315 for (int32_t l = 0; l < 2; l++) {
2316 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2318 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2321 if (nNewInput && k == 0 && ii != 5) {
2323 e->Scale(mConfig.nativeFitResolutions ? SCALE_NATIVE[
j] : SCALE[
j]);
2327 e->GetXaxis()->SetRangeUser(0.2, PT_MAX);
2328 }
else if (LOG_PT_MIN > 0 && ii == 5) {
2329 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2330 }
else if (ii == 5) {
2331 e->GetXaxis()->SetRange(1, 0);
2333 e->SetMinimum(-1111);
2334 e->SetMaximum(-1111);
2336 if (e->GetMaximum() > tmpMax) {
2337 tmpMax = e->GetMaximum();
2339 if (e->GetMinimum() < tmpMin) {
2340 tmpMin = e->GetMinimum();
2346 tmpSpan = tmpMax - tmpMin;
2347 tmpMax += tmpSpan * .02;
2348 tmpMin -= tmpSpan * .02;
2349 if (
j == 2 &&
i < 3) {
2350 tmpMax += tmpSpan * 0.13 * ConfigNumInputs;
2353 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2354 for (int32_t l = 0; l < 2; l++) {
2356 if (!mConfig.inputHistogramsOnly && k == 0) {
2357 snprintf(
name, 2048, p ?
"%s Pull" :
"%s Resolution", p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[
j] : PARAMETER_NAMES[
j]);
2359 e->SetStats(kFALSE);
2362 mRes2[
j][
i]->SetOption(
"colz");
2363 mRes2[
j][
i]->Write();
2367 }
else if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2370 e->SetMaximum(tmpMax);
2371 e->SetMinimum(tmpMin);
2373 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2375 e->GetYaxis()->SetTitle(p ? AXIS_TITLES_PULL[
j] : mConfig.nativeFitResolutions ? AXIS_TITLES_NATIVE[
j] : AXIS_TITLES[
j]);
2376 e->GetXaxis()->SetTitle(XAXIS_TITLES[
i]);
2377 if (LOG_PT_MIN > 0 && ii == 5) {
2378 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2382 e->GetYaxis()->SetTitleOffset(1.5);
2384 e->GetYaxis()->SetTitleOffset(1.4);
2386 if (qcout && !mConfig.shipToQCAsCanvas) {
2389 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2393 e->SetMarkerColor(kBlack);
2394 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2395 e->Draw(k || l ?
"same" :
"");
2399 snprintf(
name, 2048,
"%s%s", fname, l ?
"Mean" :
"Pull");
2401 snprintf(
name, 2048,
"%s%s", fname, l ?
"Mean" :
"Resolution");
2407 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2416 ChangePadTitleSize(pad, 0.056);
2419 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2428 if (!mConfig.enableLocalOutput) {
2431 doPerfFigure(0.2, 0.295, 0.025);
2432 can->Print(Form(p ?
"plots/pull_vs_%s.pdf" :
"plots/res_vs_%s.pdf", VSPARAMETER_NAMES[ii]));
2433 if (mConfig.writeRootFiles) {
2434 can->Print(Form(p ?
"plots/pull_vs_%s.root" :
"plots/res_vs_%s.root", VSPARAMETER_NAMES[ii]));
2440 for (int32_t p = 0; p < 2; p++) {
2441 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2444 TCanvas* can = p ? mCPull[6] : mCRes[6];
2445 for (int32_t
i = 0;
i < 5;
i++) {
2446 TPad* pad = p ? mPPull[6][
i] : mPRes[6][
i];
2447 TH1D* hist = p ? pullIntegral[
i] : resIntegral[
i];
2448 int32_t numColor = 0;
2449 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2452 if (!mConfig.inputHistogramsOnly && mcAvail) {
2454 if (e && e->GetEntries()) {
2455 e->Fit(
"gaus",
"sQ");
2460 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2462 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2465 e->SetMaximum(-1111);
2466 if (e->GetMaximum() > tmpMax) {
2467 tmpMax = e->GetMaximum();
2471 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2473 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2476 e->SetMaximum(tmpMax * 1.02);
2477 e->SetMinimum(tmpMax * -0.02);
2478 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2481 if (qcout && !mConfig.shipToQCAsCanvas) {
2484 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2488 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2489 e->Draw(k == 0 ?
"" :
"same");
2491 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2499 if (!mConfig.enableLocalOutput) {
2503 can->Print(p ?
"plots/pull_integral.pdf" :
"plots/res_integral.pdf");
2504 if (mConfig.writeRootFiles) {
2505 can->Print(p ?
"plots/pull_integral.root" :
"plots/res_integral.root");
2510 uint64_t attachClusterCounts[N_CLS_HIST];
2511 if (mQATasks & taskClusterAttach) {
2513 if (mConfig.inputHistogramsOnly == 0) {
2514 for (int32_t
i = N_CLS_HIST;
i < N_CLS_TYPE * N_CLS_HIST - 1;
i++) {
2515 mClusters[
i]->Sumw2(
true);
2517 double totalVal = 0;
2518 if (!CLUST_HIST_INT_SUM) {
2519 for (int32_t
j = 0;
j < mClusters[N_CLS_HIST - 1]->GetXaxis()->GetNbins() + 2;
j++) {
2520 totalVal += mClusters[N_CLS_HIST - 1]->GetBinContent(
j);
2523 if (totalVal == 0.) {
2526 for (int32_t
i = 0;
i < N_CLS_HIST;
i++) {
2528 for (int32_t
j = 0;
j < mClusters[
i]->GetXaxis()->GetNbins() + 2;
j++) {
2529 val += mClusters[
i]->GetBinContent(
j);
2530 mClusters[2 * N_CLS_HIST - 1 +
i]->SetBinContent(
j,
val / totalVal);
2532 attachClusterCounts[
i] =
val;
2535 if (!CLUST_HIST_INT_SUM) {
2536 for (int32_t
i = 0;
i < N_CLS_HIST;
i++) {
2537 mClusters[2 * N_CLS_HIST - 1 +
i]->SetMaximum(1.02);
2538 mClusters[2 * N_CLS_HIST - 1 +
i]->SetMinimum(-0.02);
2542 for (int32_t
i = 0;
i < N_CLS_HIST - 1;
i++) {
2543 auto oldLevel = gErrorIgnoreLevel;
2544 gErrorIgnoreLevel = kError;
2545 mClusters[N_CLS_HIST +
i]->Divide(mClusters[
i], mClusters[N_CLS_HIST - 1], 1, 1,
"B");
2546 gErrorIgnoreLevel = oldLevel;
2547 mClusters[N_CLS_HIST +
i]->SetMinimum(-0.02);
2548 mClusters[N_CLS_HIST +
i]->SetMaximum(1.02);
2552 float tmpMax[2] = {0, 0}, tmpMin[2] = {0, 0};
2553 for (int32_t l = 0; l <= CLUST_HIST_INT_SUM; l++) {
2554 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2555 TH1* e = mClusters[l ? (N_CLS_TYPE * N_CLS_HIST - 2) : (N_CLS_HIST - 1)];
2556 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2559 e->SetMinimum(-1111);
2560 e->SetMaximum(-1111);
2562 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2564 if (e->GetMaximum() > tmpMax[l]) {
2565 tmpMax[l] = e->GetMaximum();
2567 if (e->GetMinimum() < tmpMin[l]) {
2568 tmpMin[l] = e->GetMinimum();
2571 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2572 for (int32_t
i = 0;
i < N_CLS_HIST;
i++) {
2573 TH1* e = mClusters[l ? (2 * N_CLS_HIST - 1 +
i) :
i];
2574 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2577 e->SetMaximum(tmpMax[l] * 1.02);
2578 e->SetMinimum(tmpMax[l] * -0.02);
2583 for (int32_t
i = 0;
i < N_CLS_TYPE;
i++) {
2584 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2586 mPClust[
i]->SetLogx();
2588 int32_t
begin =
i == 2 ? (2 * N_CLS_HIST - 1) :
i == 1 ? N_CLS_HIST : 0;
2589 int32_t
end =
i == 2 ? (3 * N_CLS_HIST - 1) :
i == 1 ? (2 * N_CLS_HIST - 1) : N_CLS_HIST;
2590 int32_t numColor = 0;
2591 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2593 TH1* e = mClusters[
j];
2594 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2598 e->SetTitle(CLUSTER_TITLES[
i]);
2599 e->GetYaxis()->SetTitle(
i == 0 ?
"Number of TPC clusters" :
i == 1 ?
"Fraction of TPC clusters" : CLUST_HIST_INT_SUM ?
"Total TPC clusters (integrated)" :
"Fraction of TPC clusters (integrated)");
2600 e->GetXaxis()->SetTitle(
"#it{p}_{Tmc} (GeV/#it{c})");
2601 e->GetXaxis()->SetTitleOffset(1.1);
2602 e->GetXaxis()->SetLabelOffset(-0.005);
2603 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2606 e->SetStats(kFALSE);
2608 e->SetLineStyle(CONFIG_DASHED_MARKERS ?
j + 1 : 1);
2610 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2612 if (qcout && !mConfig.shipToQCAsCanvas) {
2615 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2619 e->SetMarkerColor(kBlack);
2620 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2621 e->Draw(
j ==
end - 1 && k == 0 ?
"" :
"same");
2623 snprintf(
name, 2048,
"%s%s", fname, CLUSTER_NAMES[
j - begin]);
2624 mLClust[
i]->AddEntry(e,
name,
"l");
2627 if (ConfigNumInputs == 1) {
2628 TH1* e =
reinterpret_cast<TH1F*
>(mClusters[
begin + CL_att_adj]->Clone());
2629 e->Add(mClusters[begin + CL_prot], -1);
2630 if (qcout && !mConfig.shipToQCAsCanvas) {
2633 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2637 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2639 mLClust[
i]->AddEntry(e,
"Removed (Strategy A)",
"l");
2641 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2648 qcout->Add(mCClust[
i]);
2650 if (!mConfig.enableLocalOutput) {
2653 doPerfFigure(
i != 2 ? 0.37 : 0.6, 0.295, 0.030);
2655 mCClust[
i]->Print(
i == 2 ?
"plots/clusters_integral.pdf" :
i == 1 ?
"plots/clusters_relative.pdf" :
"plots/clusters.pdf");
2656 if (mConfig.writeRootFiles) {
2657 mCClust[
i]->Print(
i == 2 ?
"plots/clusters_integral.root" :
i == 1 ?
"plots/clusters_relative.root" :
"plots/clusters.root");
2663 if ((mQATasks & taskClusterCounts) && !mHaveExternalHists && !mConfig.clusterRejectionHistograms && !mConfig.inputHistogramsOnly) {
2664 DoClusterCounts(attachClusterCounts);
2666 if ((qcout || tout) && (mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
2667 for (uint32_t
i = 0;
i < mHistClusterCount.size();
i++) {
2669 mHistClusterCount[
i]->Write();
2672 qcout->Add(mHistClusterCount[
i]);
2677 if (mQATasks & taskTrackStatistics) {
2680 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2682 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2685 e->SetMaximum(-1111);
2686 if (e->GetMaximum() > tmpMax) {
2687 tmpMax = e->GetMaximum();
2691 mPTracks->SetLogx();
2692 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2694 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2697 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2700 e->SetMaximum(tmpMax * 1.02);
2701 e->SetMinimum(tmpMax * -0.02);
2702 e->SetStats(kFALSE);
2704 e->GetYaxis()->SetTitle(
"a.u.");
2705 e->GetXaxis()->SetTitle(
"#it{p}_{Tmc} (GeV/#it{c})");
2709 e->SetMarkerColor(kBlack);
2710 e->SetLineColor(colorNums[k % COLORCOUNT]);
2711 e->Draw(k == 0 ?
"" :
"same");
2713 snprintf(
name, 2048,
"%sTrack Pt", fname);
2714 mLTracks->AddEntry(e,
name,
"l");
2718 mCTracks->Print(
"plots/tracks.pdf");
2719 if (mConfig.writeRootFiles) {
2720 mCTracks->Print(
"plots/tracks.root");
2723 for (int32_t
i = 0;
i < 2;
i++) {
2725 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2727 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2730 e->SetMaximum(-1111);
2731 if (e->GetMaximum() > tmpMax) {
2732 tmpMax = e->GetMaximum();
2736 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2738 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2741 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2744 e->SetMaximum(tmpMax * 1.02);
2745 e->SetMinimum(tmpMax * -0.02);
2746 e->SetStats(kFALSE);
2748 e->GetYaxis()->SetTitle(
"a.u.");
2749 e->GetXaxis()->SetTitle(
"NClusters");
2753 e->SetMarkerColor(kBlack);
2754 e->SetLineColor(colorNums[k % COLORCOUNT]);
2755 e->Draw(k == 0 ?
"" :
"same");
2757 snprintf(
name, 2048,
"%sNClusters%d", fname,
i);
2758 mLNCl[
i]->AddEntry(e,
name,
"l");
2762 snprintf(
name, 2048,
"plots/nClusters%s.pdf",
i ?
"_corrected" :
"");
2763 mCNCl[
i]->Print(
name);
2764 if (mConfig.writeRootFiles) {
2765 snprintf(
name, 2048,
"plots/nClusters%s.root",
i ?
"_corrected" :
"");
2766 mCNCl[
i]->Print(
name);
2771 mClXY->SetOption(
"colz");
2774 mCClXY->Print(
"plots/clustersXY.pdf");
2775 if (mConfig.writeRootFiles) {
2776 mCClXY->Print(
"plots/clustersXY.root");
2780 if (tout && !mConfig.inputHistogramsOnly && mConfig.writeMCLabels) {
2781 gInterpreter->GenerateDictionary(
"vector<vector<int32_t>>",
"");
2782 tout->WriteObject(&mcEffBuffer,
"mcEffBuffer");
2783 tout->WriteObject(&mcLabelBuffer,
"mcLabelBuffer");
2784 remove(
"AutoDict_vector_vector_int__.cxx");
2785 remove(
"AutoDict_vector_vector_int___cxx_ACLiC_dict_rdict.pcm");
2786 remove(
"AutoDict_vector_vector_int___cxx.d");
2787 remove(
"AutoDict_vector_vector_int___cxx.so");
2793 for (uint32_t
i = 0;
i < mConfig.compareInputs.size();
i++) {
2797 clearGarbagageCollector();
2799 GPUInfo(
"GPU TPC QA histograms have been written to %s files", mConfig.writeRootFiles ?
".pdf and .root" :
".pdf");
2800 gErrorIgnoreLevel = oldRootIgnoreLevel;
2804void GPUQA::PrintClusterCount(int32_t
mode, int32_t&
num,
const char*
name, uint64_t
n, uint64_t normalization)
2808 }
else if (
mode == 1) {
2810 snprintf(name2, 128,
"clusterCount%d_",
num);
2811 char*
ptr = name2 + strlen(name2);
2812 for (uint32_t
i = 0;
i < strlen(
name);
i++) {
2818 createHist(mHistClusterCount[
num], name2,
name, 1000, 0, mConfig.histMaxNClusters, 1000, 0, 100);
2819 }
else if (
mode == 0) {
2820 if (normalization && mConfig.enableLocalOutput) {
2821 printf(
"\t%35s: %'12" PRIu64
" (%6.2f%%)\n",
name,
n, 100.f *
n / normalization);
2823 if (mConfig.clusterRejectionHistograms) {
2824 float ratio = 100.f *
n / std::max<uint64_t>(normalization, 1);
2825 mHistClusterCount[
num]->Fill(normalization, ratio, 1);
2831int32_t GPUQA::DoClusterCounts(uint64_t* attachClusterCounts, int32_t
mode)
2834 if (mcPresent() && (mQATasks & taskClusterAttach) && attachClusterCounts) {
2835 for (int32_t
i = 0;
i < N_CLS_HIST;
i++) {
2836 PrintClusterCount(
mode,
num, CLUSTER_NAMES[
i], attachClusterCounts[
i], mClusterCounts.nTotal);
2838 PrintClusterCount(
mode,
num,
"Unattached", attachClusterCounts[N_CLS_HIST - 1] - attachClusterCounts[CL_att_adj], mClusterCounts.nTotal);
2839 PrintClusterCount(
mode,
num,
"Removed (Strategy A)", attachClusterCounts[CL_att_adj] - attachClusterCounts[CL_prot], mClusterCounts.nTotal);
2840 PrintClusterCount(
mode,
num,
"Unaccessible", mClusterCounts.nUnaccessible, mClusterCounts.nTotal);
2842 PrintClusterCount(
mode,
num,
"All Clusters", mClusterCounts.nTotal, mClusterCounts.nTotal);
2843 PrintClusterCount(
mode,
num,
"Used in Physics", mClusterCounts.nPhysics, mClusterCounts.nTotal);
2844 PrintClusterCount(
mode,
num,
"Protected", mClusterCounts.nProt, mClusterCounts.nTotal);
2845 PrintClusterCount(
mode,
num,
"Unattached", mClusterCounts.nUnattached, mClusterCounts.nTotal);
2846 PrintClusterCount(
mode,
num,
"Removed (Strategy A)", mClusterCounts.nTotal - mClusterCounts.nUnattached - mClusterCounts.nProt, mClusterCounts.nTotal);
2847 PrintClusterCount(
mode,
num,
"Removed (Strategy B)", mClusterCounts.nTotal - mClusterCounts.nProt, mClusterCounts.nTotal);
2850 PrintClusterCount(
mode,
num,
"Merged Loopers (Afterburner)", mClusterCounts.nMergedLooper, mClusterCounts.nTotal);
2851 PrintClusterCount(
mode,
num,
"High Inclination Angle", mClusterCounts.nHighIncl, mClusterCounts.nTotal);
2852 PrintClusterCount(
mode,
num,
"Rejected", mClusterCounts.nRejected, mClusterCounts.nTotal);
2853 PrintClusterCount(
mode,
num,
"Tube (> 200 MeV)", mClusterCounts.nTube, mClusterCounts.nTotal);
2854 PrintClusterCount(
mode,
num,
"Tube (< 200 MeV)", mClusterCounts.nTube200, mClusterCounts.nTotal);
2855 PrintClusterCount(
mode,
num,
"Looping Legs", mClusterCounts.nLoopers, mClusterCounts.nTotal);
2856 PrintClusterCount(
mode,
num,
"Low Pt < 50 MeV", mClusterCounts.nLowPt, mClusterCounts.nTotal);
2857 PrintClusterCount(
mode,
num,
"Low Pt < 200 MeV", mClusterCounts.n200MeV, mClusterCounts.nTotal);
2859 if (mcPresent() && (mQATasks & taskClusterAttach)) {
2860 PrintClusterCount(
mode,
num,
"Tracks > 400 MeV", mClusterCounts.nAbove400, mClusterCounts.nTotal);
2861 PrintClusterCount(
mode,
num,
"Fake Removed (> 400 MeV)", mClusterCounts.nFakeRemove400, mClusterCounts.nAbove400);
2862 PrintClusterCount(
mode,
num,
"Full Fake Removed (> 400 MeV)", mClusterCounts.nFullFakeRemove400, mClusterCounts.nAbove400);
2863 PrintClusterCount(
mode,
num,
"Tracks < 40 MeV", mClusterCounts.nBelow40, mClusterCounts.nTotal);
2864 PrintClusterCount(
mode,
num,
"Fake Protect (< 40 MeV)", mClusterCounts.nFakeProtect40, mClusterCounts.nBelow40);
2871 mTrackingScratchBuffer.resize((nBytes +
sizeof(mTrackingScratchBuffer[0]) - 1) /
sizeof(mTrackingScratchBuffer[0]));
2872 return mTrackingScratchBuffer.data();
A const (ready only) version of MCTruthContainer.
Helper class to access correction maps.
#define GPUCA_MIN_TRACK_PTB5_DEFAULT
#define TRACK_EXPECTED_REFERENCE_X_DEFAULT
#define TRACK_EXPECTED_REFERENCE_X
#define CHECK_CLUSTER_STATE_NOCOUNT()
#define CHECK_CLUSTER_STATE()
Definition of the MCTrack class.
Definition of the Names Generator class.
double GetCurrentElapsedTime(bool reset=false)
Class for time synchronization of RawReader instances.
static const HBFUtils & Instance()
static constexpr int32_t NSECTORS
int32_t ReadO2MCData(const char *filename)
bool clusterRemovable(int32_t attach, bool prot) const
void * AllocateScratchBuffer(size_t nBytes)
void SetMCTrackRange(int32_t min, int32_t max)
mcLabelI_t GetMCTrackLabel(uint32_t trackId) const
int32_t DrawQAHistograms()
int32_t InitQA(int32_t tasks=0)
void DumpO2MCData(const char *filename) const
void RunQA(bool matchOnly=false)
static GPUROOTDump< T, Args... > getNew(const char *name1, Names... names)
static DigitizationContext * loadFromFile(std::string_view filename="")
GLfloat GLfloat GLfloat alpha
GLuint GLfloat GLfloat GLfloat GLfloat y1
GLuint const GLchar * name
GLuint GLuint GLfloat weight
GLuint GLsizei const GLchar * label
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLdouble GLdouble GLdouble z
const float3 float float float y2
constexpr int LHCBCPERTIMEBIN
Enum< T >::Iterator begin(Enum< T >)
struct o2::upgrades_utils::@453 tracks
structure to keep trigger-related info
Defining DataPointCompositeObject explicitly as copiable.
bool isValid(std::string alias)
int64_t differenceInBC(const InteractionRecord &other) const
std::tuple< std::vector< std::unique_ptr< TCanvas > >, std::vector< std::unique_ptr< TLegend > >, std::vector< std::unique_ptr< TPad > >, std::vector< std::unique_ptr< TLatex > >, std::vector< std::unique_ptr< TH1D > > > v
IR getFirstIRofTF(const IR &rec) const
get 1st IR of TF corresponding to the 1st sampled orbit (in MC)
unsigned int nClustersTotal
o2::InteractionRecord ir(0, 0)
o2::InteractionRecord ir0(3, 5)