23#include "TGraphAsymmErrors.h"
41#include "GPUChainTrackingGetters.inc"
49#include "GPUParam.inc"
70#include "TParticlePDG.h"
71#include "TDatabasePDG.h"
81#include <oneapi/tbb.h>
99template <
bool COUNT,
class T>
106 r.qpt = fabsf(mTracking->mIOPtrs.mergedTracks[
r.id].GetParam().GetQPt());
107 r.lowPt =
r.qpt * mTracking->GetParam().qptB5Scaler > mTracking->GetParam().rec.tpc.rejectQPtB5;
108 r.mev200 =
r.qpt > 5;
109 r.mergedLooperUnconnected = mTracking->mIOPtrs.mergedTracks[
r.id].MergedLooperUnconnected();
110 r.mergedLooperConnected = mTracking->mIOPtrs.mergedTracks[
r.id].MergedLooperConnected();
113 if constexpr (COUNT) {
118 if constexpr (COUNT) {
121 }
else if (
r.mergedLooperUnconnected) {
122 if constexpr (COUNT) {
123 counts->nMergedLooperUnconnected++;
125 }
else if (
r.mergedLooperConnected) {
126 if constexpr (COUNT) {
127 counts->nMergedLooperConnected++;
130 r.protect = !GPUTPCClusterRejection::GetRejectionStatus<COUNT>(attach,
r.physics, counts, &
r.mev200) && ((attach &
gputpcgmmergertypes::attachProtect) || !GPUTPCClusterRejection::IsTrackRejected(mTracking->mIOPtrs.mergedTracks[
r.id], mTracking->GetParam()));
137 static GPUSettingsQA defaultConfig;
139 return *
chain->mConfigQA;
141 return defaultConfig;
145static const constexpr float LOG_PT_MIN = -1.;
147static constexpr float Y_MAX = 40;
148static constexpr float Z_MAX = 100;
149static constexpr float PT_MIN = 0.01;
150static constexpr float PT_MIN_PRIM = 0.1;
151static constexpr float PT_MIN_CLUST = 0.01;
152static constexpr float PT_MAX = 20;
153static constexpr float ETA_MAX = 1.5;
154static constexpr float ETA_MAX2 = 0.9;
155static constexpr int32_t PADROW_CHECK_MINCLS = 50;
157static constexpr bool CLUST_HIST_INT_SUM =
false;
159static constexpr const int32_t COLORCOUNT = 12;
161static const constexpr char* EFF_TYPES[6] = {
"Rec",
"Clone",
"Fake",
"All",
"RecAndClone",
"MC"};
162static const constexpr char* FINDABLE_NAMES[2] = {
"All",
"Findable"};
163static const constexpr char* PRIM_NAMES[2] = {
"Prim",
"Sec"};
164static const constexpr char* PARAMETER_NAMES[5] = {
"Y",
"Z",
"#Phi",
"#lambda",
"Relative #it{p}_{T}"};
165static const constexpr char* PARAMETER_NAMES_NATIVE[5] = {
"Y",
"Z",
"sin(#Phi)",
"tan(#lambda)",
"q/#it{p}_{T} (curvature)"};
166static const constexpr char* VSPARAMETER_NAMES[6] = {
"Y",
"Z",
"Phi",
"Eta",
"Pt",
"Pt_log"};
167static const constexpr char* EFF_NAMES[3] = {
"Efficiency",
"Clone Rate",
"Fake Rate"};
168static const constexpr char* EFFICIENCY_TITLES[4] = {
"Efficiency (Primary Tracks, Findable)",
"Efficiency (Secondary Tracks, Findable)",
"Efficiency (Primary Tracks)",
"Efficiency (Secondary Tracks)"};
169static const constexpr double SCALE[5] = {10., 10., 1000., 1000., 100.};
170static const constexpr double SCALE_NATIVE[5] = {10., 10., 1000., 1000., 1.};
171static const constexpr char* XAXIS_TITLES[5] = {
"#it{y}_{mc} (cm)",
"#it{z}_{mc} (cm)",
"#Phi_{mc} (rad)",
"#eta_{mc}",
"#it{p}_{Tmc} (GeV/#it{c})"};
172static const constexpr char* AXIS_TITLES[5] = {
"#it{y}-#it{y}_{mc} (mm) (Resolution)",
"#it{z}-#it{z}_{mc} (mm) (Resolution)",
"#phi-#phi_{mc} (mrad) (Resolution)",
"#lambda-#lambda_{mc} (mrad) (Resolution)",
"(#it{p}_{T} - #it{p}_{Tmc}) / #it{p}_{Tmc} (%) (Resolution)"};
173static const constexpr char* AXIS_TITLES_NATIVE[5] = {
"#it{y}-#it{y}_{mc} (mm) (Resolution)",
"#it{z}-#it{z}_{mc} (mm) (Resolution)",
"sin(#phi)-sin(#phi_{mc}) (Resolution)",
"tan(#lambda)-tan(#lambda_{mc}) (Resolution)",
"q*(q/#it{p}_{T} - q/#it{p}_{Tmc}) (Resolution)"};
174static const constexpr char* AXIS_TITLES_PULL[5] = {
"#it{y}-#it{y}_{mc}/#sigma_{y} (Pull)",
"#it{z}-#it{z}_{mc}/#sigma_{z} (Pull)",
"sin(#phi)-sin(#phi_{mc})/#sigma_{sin(#phi)} (Pull)",
"tan(#lambda)-tan(#lambda_{mc})/#sigma_{tan(#lambda)} (Pull)",
175 "q*(q/#it{p}_{T} - q/#it{p}_{Tmc})/#sigma_{q/#it{p}_{T}} (Pull)"};
176static const constexpr char* CLUSTER_NAMES[GPUQA::N_CLS_HIST] = {
"Correctly attached clusters",
"Fake attached clusters",
"Attached + adjacent clusters",
"Fake adjacent clusters",
"Clusters of reconstructed tracks",
"Used in Physics",
"Protected",
"All clusters"};
177static const constexpr char* CLUSTER_TITLES[GPUQA::N_CLS_TYPE] = {
"Clusters Pt Distribution / Attachment",
"Clusters Pt Distribution / Attachment (relative to all clusters)",
"Clusters Pt Distribution / Attachment (integrated)"};
178static const constexpr char* CLUSTER_NAMES_SHORT[GPUQA::N_CLS_HIST] = {
"Attached",
"Fake",
"AttachAdjacent",
"FakeAdjacent",
"FoundTracks",
"Physics",
"Protected",
"All"};
179static const constexpr char* CLUSTER_TYPES[GPUQA::N_CLS_TYPE] = {
"",
"Ratio",
"Integral"};
180static const constexpr char* REJECTED_NAMES[3] = {
"All",
"Rejected",
"Fraction"};
181static const constexpr int32_t COLORS_HEX[COLORCOUNT] = {0xB03030, 0x00A000, 0x0000C0, 0x9400D3, 0x19BBBF, 0xF25900, 0x7F7F7F, 0xFFD700, 0x07F707, 0x07F7F7, 0xF08080, 0x000000};
183static const constexpr int32_t CONFIG_DASHED_MARKERS = 0;
185static const constexpr float AXES_MIN[5] = {-Y_MAX, -Z_MAX, 0.f, -ETA_MAX, PT_MIN};
186static const constexpr float AXES_MAX[5] = {Y_MAX, Z_MAX, 2.f * M_PI, ETA_MAX, PT_MAX};
187static const constexpr int32_t AXIS_BINS[5] = {51, 51, 144, 31, 50};
188static const constexpr int32_t RES_AXIS_BINS[] = {1017, 113};
189static const constexpr float RES_AXES[5] = {1., 1., 0.03, 0.03, 1.0};
190static const constexpr float RES_AXES_NATIVE[5] = {1., 1., 0.1, 0.1, 5.0};
191static const constexpr float PULL_AXIS = 10.f;
193std::vector<TColor*> GPUQA::mColors;
194int32_t GPUQA::initColors()
196 mColors.reserve(COLORCOUNT);
197 for (int32_t
i = 0;
i < COLORCOUNT;
i++) {
198 float f1 = (float)((COLORS_HEX[
i] >> 16) & 0xFF) / (
float)0xFF;
199 float f2 = (float)((COLORS_HEX[
i] >> 8) & 0xFF) / (
float)0xFF;
200 float f3 = (float)((COLORS_HEX[
i] >> 0) & 0xFF) / (
float)0xFF;
201 mColors.emplace_back(
new TColor(10000 +
i, f1, f2, f3));
205static constexpr Color_t defaultColorNums[COLORCOUNT] = {kRed, kBlue, kGreen, kMagenta, kOrange, kAzure, kBlack, kYellow, kGray, kTeal, kSpring, kPink};
207#define TRACK_EXPECTED_REFERENCE_X_DEFAULT 81
208#ifdef GPUCA_TPC_GEOMETRY_O2
209static inline int32_t GPUQA_O2_ConvertFakeLabel(int32_t
label) {
return label >= 0x7FFFFFFE ? -1 :
label; }
210inline uint32_t GPUQA::GetNMCCollissions()
const {
return mMCInfosCol.size(); }
211inline uint32_t GPUQA::GetNMCTracks(int32_t iCol)
const {
return mMCInfosCol[iCol].num; }
212inline uint32_t GPUQA::GetNMCTracks(
const mcLabelI_t&
label)
const {
return mMCInfosCol[mMCEventOffset[
label.getSourceID()] +
label.getEventID()].num; }
213inline uint32_t GPUQA::GetNMCLabels()
const {
return mClNative->clustersMCTruth ? mClNative->clustersMCTruth->getIndexedSize() : 0; }
214inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) {
return mMCInfos[mMCInfosCol[iCol].first + iTrk]; }
215inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(
const mcLabel_t&
label) {
return mMCInfos[mMCInfosCol[mMCEventOffset[
label.getSourceID()] +
label.getEventID()].first +
label.getTrackID()]; }
216inline GPUQA::mcLabels_t GPUQA::GetMCLabel(uint32_t
i) {
return mClNative->clustersMCTruth->getLabels(
i); }
217inline int32_t GPUQA::GetMCLabelNID(
const mcLabels_t&
label) {
return label.size(); }
218inline int32_t GPUQA::GetMCLabelNID(uint32_t
i) {
return mClNative->clustersMCTruth->getLabels(
i).size(); }
219inline GPUQA::mcLabel_t GPUQA::GetMCLabel(uint32_t
i, uint32_t
j) {
return mClNative->clustersMCTruth->getLabels(
i)[
j]; }
220inline int32_t GPUQA::GetMCLabelID(uint32_t
i, uint32_t
j) {
return GPUQA_O2_ConvertFakeLabel(mClNative->clustersMCTruth->getLabels(
i)[
j].getTrackID()); }
221inline int32_t GPUQA::GetMCLabelID(
const mcLabels_t&
label, uint32_t
j) {
return GPUQA_O2_ConvertFakeLabel(
label[
j].getTrackID()); }
222inline int32_t GPUQA::GetMCLabelID(
const mcLabel_t&
label) {
return GPUQA_O2_ConvertFakeLabel(
label.getTrackID()); }
223inline uint32_t GPUQA::GetMCLabelCol(uint32_t
i, uint32_t
j) {
return mMCEventOffset[mClNative->clustersMCTruth->getLabels(
i)[
j].getSourceID()] + mClNative->clustersMCTruth->getLabels(
i)[
j].getEventID(); }
224inline const auto& GPUQA::GetClusterLabels() {
return mClNative->clustersMCTruth; }
225inline float GPUQA::GetMCLabelWeight(uint32_t
i, uint32_t
j) {
return 1; }
226inline float GPUQA::GetMCLabelWeight(
const mcLabels_t&
label, uint32_t
j) {
return 1; }
227inline float GPUQA::GetMCLabelWeight(
const mcLabel_t&
label) {
return 1; }
228inline bool GPUQA::mcPresent() {
return !mConfig.noMC && mTracking && mClNative && mClNative->clustersMCTruth && mMCInfos.size(); }
229uint32_t GPUQA::GetMCLabelCol(
const mcLabel_t&
label)
const {
return !
label.isValid() ? 0 : (mMCEventOffset[
label.getSourceID()] +
label.getEventID()); }
231bool GPUQA::CompareIgnoreFake(
const mcLabelI_t& l1,
const mcLabelI_t& l2) {
return l1.compare(l2) >= 0; }
232#define TRACK_EXPECTED_REFERENCE_X 78
234inline GPUQA::mcLabelI_t::mcLabelI_t(
const GPUQA::mcLabel_t& l) : track(l.fMCID) {}
235inline bool GPUQA::mcLabelI_t::operator==(
const GPUQA::mcLabel_t& l) {
return AbsLabelID(track) == l.fMCID; }
236inline uint32_t GPUQA::GetNMCCollissions()
const {
return 1; }
237inline uint32_t GPUQA::GetNMCTracks(int32_t iCol)
const {
return mTracking->mIOPtrs.nMCInfosTPC; }
238inline uint32_t GPUQA::GetNMCTracks(
const mcLabelI_t&
label)
const {
return mTracking->mIOPtrs.nMCInfosTPC; }
239inline uint32_t GPUQA::GetNMCLabels()
const {
return mTracking->mIOPtrs.nMCLabelsTPC; }
240inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(uint32_t iTrk, uint32_t iCol) {
return mTracking->mIOPtrs.mcInfosTPC[AbsLabelID(iTrk)]; }
241inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(
const mcLabel_t&
label) {
return GetMCTrack(
label.fMCID, 0); }
242inline const GPUQA::mcInfo_t& GPUQA::GetMCTrack(
const mcLabelI_t&
label) {
return GetMCTrack(
label.track, 0); }
243inline const GPUQA::mcLabels_t& GPUQA::GetMCLabel(uint32_t
i) {
return mTracking->mIOPtrs.mcLabelsTPC[
i]; }
244inline const GPUQA::mcLabel_t& GPUQA::GetMCLabel(uint32_t
i, uint32_t
j) {
return mTracking->mIOPtrs.mcLabelsTPC[
i].fClusterID[
j]; }
245inline int32_t GPUQA::GetMCLabelNID(
const mcLabels_t&
label) {
return 3; }
246inline int32_t GPUQA::GetMCLabelNID(uint32_t
i) {
return 3; }
247inline int32_t GPUQA::GetMCLabelID(uint32_t
i, uint32_t
j) {
return mTracking->mIOPtrs.mcLabelsTPC[
i].fClusterID[
j].fMCID; }
248inline int32_t GPUQA::GetMCLabelID(
const mcLabels_t&
label, uint32_t
j) {
return label.fClusterID[
j].fMCID; }
249inline int32_t GPUQA::GetMCLabelID(
const mcLabel_t&
label) {
return label.fMCID; }
250inline uint32_t GPUQA::GetMCLabelCol(uint32_t
i, uint32_t
j) {
return 0; }
252inline const auto& GPUQA::GetClusterLabels() {
return mTracking->mIOPtrs.mcLabelsTPC; }
253inline float GPUQA::GetMCLabelWeight(uint32_t
i, uint32_t
j) {
return mTracking->mIOPtrs.mcLabelsTPC[
i].fClusterID[
j].fWeight; }
254inline float GPUQA::GetMCLabelWeight(
const mcLabels_t&
label, uint32_t
j) {
return label.fClusterID[
j].fWeight; }
255inline float GPUQA::GetMCLabelWeight(
const mcLabel_t&
label) {
return label.fWeight; }
256inline int32_t GPUQA::FakeLabelID(int32_t
id) {
return id < 0 ?
id : (-2 -
id); }
257inline int32_t GPUQA::AbsLabelID(int32_t
id) {
return id >= 0 ?
id : (-
id - 2); }
258inline bool GPUQA::mcPresent() {
return !mConfig.noMC && mTracking && GetNMCLabels() && GetNMCTracks(0); }
259uint32_t GPUQA::GetMCLabelCol(
const mcLabel_t&
label)
const {
return 0; }
261bool GPUQA::CompareIgnoreFake(
const mcLabelI_t& l1,
const mcLabelI_t& l2) {
return AbsLabelID(l1) == AbsLabelID(l2); }
262#define TRACK_EXPECTED_REFERENCE_X TRACK_EXPECTED_REFERENCE_X_DEFAULT
267 return obj[mMCEventOffset[l.getSourceID()] + l.getEventID()][l.getTrackID()];
271auto GPUQA::getHistArray<TH1F>()
273 return std::make_pair(mHist1D, &mHist1D_pos);
276auto GPUQA::getHistArray<TH2F>()
278 return std::make_pair(mHist2D, &mHist2D_pos);
281auto GPUQA::getHistArray<TH1D>()
283 return std::make_pair(mHist1Dd, &mHist1Dd_pos);
286auto GPUQA::getHistArray<TGraphAsymmErrors>()
288 return std::make_pair(mHistGraph, &mHistGraph_pos);
290template <
class T,
typename... Args>
291void GPUQA::createHist(T*&
h,
const char*
name, Args... args)
293 const auto&
p = getHistArray<T>();
294 if (mHaveExternalHists) {
295 if (
p.first->size() <=
p.second->size()) {
296 GPUError(
"Array sizes mismatch: Histograms %lu <= Positions %lu",
p.first->size(),
p.second->size());
297 throw std::runtime_error(
"Incoming histogram array incomplete");
299 if (strcmp((*
p.first)[
p.second->size()].GetName(),
name)) {
300 GPUError(
"Histogram name mismatch: in array %s, trying to create %s", (*
p.first)[
p.second->size()].GetName(),
name);
301 throw std::runtime_error(
"Incoming histogram has incorrect name");
304 if constexpr (std::is_same_v<T, TGraphAsymmErrors>) {
305 p.first->emplace_back();
306 p.first->back().SetName(
name);
308 p.first->emplace_back(
name, args...);
311 h = &((*
p.first)[
p.second->size()]);
312 p.second->emplace_back(&
h);
318 std::tuple<std::vector<std::unique_ptr<TCanvas>>, std::vector<std::unique_ptr<TLegend>>, std::vector<std::unique_ptr<TPad>>, std::vector<std::unique_ptr<TLatex>>, std::vector<std::unique_ptr<TH1D>>>
v;
322template <
class T,
typename... Args>
323T* GPUQA::createGarbageCollected(Args... args)
325 auto&
v = std::get<std::vector<std::unique_ptr<T>>>(mGarbageCollector->v);
326 v.emplace_back(std::make_unique<T>(args...));
327 return v.back().get();
329void GPUQA::clearGarbagageCollector()
331 std::get<std::vector<std::unique_ptr<TPad>>>(mGarbageCollector->v).
clear();
332 std::apply([](
auto&&... args) { ((args.clear()), ...); }, mGarbageCollector->v);
337 mMCEventOffset.resize(1, 0);
342 if (mQAInitialized && !mHaveExternalHists) {
348 clearGarbagageCollector();
353 const auto&
r = checkClusterState<false>(attach);
355 return r.protect ||
r.physics;
357 return (!
r.unattached && !
r.physics && !
r.protect);
361void GPUQA::SetAxisSize(T* e)
363 e->GetYaxis()->SetTitleOffset(1.0);
364 e->GetYaxis()->SetTitleSize(0.045);
365 e->GetYaxis()->SetLabelSize(0.045);
366 e->GetXaxis()->SetTitleOffset(1.03);
367 e->GetXaxis()->SetTitleSize(0.045);
368 e->GetXaxis()->SetLabelOffset(-0.005);
369 e->GetXaxis()->SetLabelSize(0.045);
372void GPUQA::SetLegend(TLegend* l,
bool bigText)
375 l->SetTextSize(bigText ? 0.03 : 0.016);
379double* GPUQA::CreateLogAxis(int32_t nbins,
float xmin,
float xmax)
381 float logxmin = std::log10(xmin);
382 float logxmax = std::log10(xmax);
383 float binwidth = (logxmax - logxmin) / nbins;
385 double* xbins =
new double[nbins + 1];
388 for (int32_t
i = 1;
i <= nbins;
i++) {
389 xbins[
i] = std::pow(10, logxmin +
i * binwidth);
394void GPUQA::ChangePadTitleSize(TPad* p,
float size)
397 TPaveText* pt = (TPaveText*)(
p->GetPrimitive(
"title"));
399 GPUError(
"Error changing title");
401 pt->SetTextSize(
size);
406void GPUQA::DrawHisto(TH1* histo,
char*
filename,
char* options)
410 histo->Draw(options);
414void GPUQA::doPerfFigure(
float x,
float y,
float size)
416 const char* str_perf_figure_1 =
"ALICE Performance";
417 const char* str_perf_figure_2_mc =
"MC, Pb#minusPb, #sqrt{s_{NN}} = 5.36 TeV";
418 const char* str_perf_figure_2_data =
"Pb#minusPb, #sqrt{s_{NN}} = 5.36 TeV";
420 if (mConfig.perfFigure == 0) {
423 TLatex* t = createGarbageCollected<TLatex>();
426 t->SetTextSize(
size);
427 t->DrawLatex(
x,
y, str_perf_figure_1);
428 t->SetTextSize(
size * 0.8);
429 t->DrawLatex(
x,
y - 0.01 -
size, mConfig.perfFigure > 0 ? str_perf_figure_2_mc : str_perf_figure_2_data);
438int32_t GPUQA::InitQACreateHistograms()
440 char name[2048], fname[1024];
441 if (mQATasks & taskTrackingEff) {
443 for (int32_t
i = 0;
i < 6;
i++) {
444 for (int32_t
j = 0;
j < 2;
j++) {
445 for (int32_t k = 0; k < 2; k++) {
446 for (int32_t l = 0; l < 5; l++) {
447 snprintf(
name, 2048,
"%s%s%s%sVs%s",
"tracks", EFF_TYPES[
i], FINDABLE_NAMES[
j], PRIM_NAMES[k], VSPARAMETER_NAMES[l]);
449 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], k == 0 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
450 createHist(mEff[
i][
j][k][l],
name,
name, AXIS_BINS[l], binsPt.get());
452 createHist(mEff[
i][
j][k][l],
name,
name, AXIS_BINS[l], AXES_MIN[l], AXES_MAX[l]);
454 if (!mHaveExternalHists) {
455 mEff[
i][
j][k][l]->Sumw2();
457 strcat(
name,
"_eff");
459 createHist(mEffResult[
i][
j][k][l],
name);
468 if (mQATasks & taskTrackingRes) {
469 for (int32_t
i = 0;
i < 5;
i++) {
470 for (int32_t
j = 0;
j < 5;
j++) {
471 snprintf(
name, 2048,
"rms_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
472 snprintf(fname, 1024,
"mean_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
474 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
475 createHist(mRes[
i][
j][0],
name,
name, AXIS_BINS[
j], binsPt.get());
476 createHist(mRes[
i][
j][1], fname, fname, AXIS_BINS[
j], binsPt.get());
478 createHist(mRes[
i][
j][0],
name,
name, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
479 createHist(mRes[
i][
j][1], fname, fname, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
481 snprintf(
name, 2048,
"res_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
482 const float* axis = mConfig.nativeFitResolutions ? RES_AXES_NATIVE : RES_AXES;
483 const int32_t nbins =
i == 4 && mConfig.nativeFitResolutions ? (10 * RES_AXIS_BINS[0]) : RES_AXIS_BINS[0];
485 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], mConfig.resPrimaries == 1 ? PT_MIN_PRIM : AXES_MIN[4], AXES_MAX[4])};
486 createHist(mRes2[
i][
j],
name,
name, nbins, -axis[
i], axis[
i], AXIS_BINS[
j], binsPt.get());
488 createHist(mRes2[
i][
j],
name,
name, nbins, -axis[
i], axis[
i], AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
495 if (mQATasks & taskTrackingResPull) {
496 for (int32_t
i = 0;
i < 5;
i++) {
497 for (int32_t
j = 0;
j < 5;
j++) {
498 snprintf(
name, 2048,
"pull_rms_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
499 snprintf(fname, 1024,
"pull_mean_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
501 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
502 createHist(mPull[
i][
j][0],
name,
name, AXIS_BINS[
j], binsPt.get());
503 createHist(mPull[
i][
j][1], fname, fname, AXIS_BINS[
j], binsPt.get());
505 createHist(mPull[
i][
j][0],
name,
name, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
506 createHist(mPull[
i][
j][1], fname, fname, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
508 snprintf(
name, 2048,
"pull_%s_vs_%s", VSPARAMETER_NAMES[
i], VSPARAMETER_NAMES[
j]);
510 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], AXES_MIN[4], AXES_MAX[4])};
511 createHist(mPull2[
i][
j],
name,
name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[
j], binsPt.get());
513 createHist(mPull2[
i][
j],
name,
name, RES_AXIS_BINS[0], -PULL_AXIS, PULL_AXIS, AXIS_BINS[
j], AXES_MIN[
j], AXES_MAX[
j]);
520 if (mQATasks & taskClusterAttach) {
521 for (int32_t
i = 0;
i < N_CLS_TYPE * N_CLS_HIST - 1;
i++) {
522 int32_t ioffset =
i >= (2 * N_CLS_HIST - 1) ? (2 * N_CLS_HIST - 1) :
i >= N_CLS_HIST ? N_CLS_HIST : 0;
523 int32_t itype =
i >= (2 * N_CLS_HIST - 1) ? 2 :
i >= N_CLS_HIST ? 1 : 0;
524 snprintf(
name, 2048,
"clusters%s%s", CLUSTER_NAMES_SHORT[
i - ioffset], CLUSTER_TYPES[itype]);
525 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
526 createHist(mClusters[
i],
name,
name, AXIS_BINS[4], binsPt.get());
530 createHist(mPadRow[1],
"padrow1",
"padrow1", 100.f, -0.2f, 0.2f,
GPUCA_ROW_COUNT - PADROW_CHECK_MINCLS, 0,
GPUCA_ROW_COUNT - 1 - PADROW_CHECK_MINCLS);
531 createHist(mPadRow[2],
"padrow2",
"padrow2", 100.f, -0.2f, 0.2f,
GPUCA_ROW_COUNT - PADROW_CHECK_MINCLS, 0,
GPUCA_ROW_COUNT - 1 - PADROW_CHECK_MINCLS);
532 createHist(mPadRow[3],
"padrow3",
"padrow3", 100.f, 0, 300000,
GPUCA_ROW_COUNT - PADROW_CHECK_MINCLS, 0,
GPUCA_ROW_COUNT - 1 - PADROW_CHECK_MINCLS);
535 if (mQATasks & taskTrackStatistics) {
537 for (int32_t
i = 0;
i < 2;
i++) {
538 snprintf(
name, 2048,
i ?
"nrows_with_cluster" :
"nclusters");
539 createHist(mNCl[
i],
name,
name, 160, 0, 159);
541 std::unique_ptr<double[]> binsPt{CreateLogAxis(AXIS_BINS[4], PT_MIN_CLUST, PT_MAX)};
542 createHist(mTracks,
"tracks_pt",
"tracks_pt", AXIS_BINS[4], binsPt.get());
543 const uint32_t maxTime = (mTracking && mTracking->GetParam().continuousMaxTimeBin > 0) ? mTracking->GetParam().continuousMaxTimeBin :
TPC_MAX_TIME_BIN_TRIGGERED;
544 createHist(mT0[0],
"tracks_t0",
"tracks_t0", (maxTime + 1) / 10, 0, maxTime);
545 createHist(mT0[1],
"tracks_t0_res",
"tracks_t0_res", 1000, -100, 100);
546 createHist(mClXY,
"clXY",
"clXY", 1000, -250, 250, 1000, -250, 250);
548 if (mQATasks & taskClusterRejection) {
550 for (int32_t
i = 0;
i < 3;
i++) {
551 snprintf(
name, 2048,
"clrej_%d",
i);
557 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
558 int32_t
num = DoClusterCounts(
nullptr, 2);
559 mHistClusterCount.resize(
num);
560 DoClusterCounts(
nullptr, 1);
563 for (uint32_t
i = 0;
i < mHist1D->size();
i++) {
564 *mHist1D_pos[
i] = &(*mHist1D)[
i];
566 for (uint32_t
i = 0;
i < mHist2D->size();
i++) {
567 *mHist2D_pos[
i] = &(*mHist2D)[
i];
569 for (uint32_t
i = 0;
i < mHist1Dd->size();
i++) {
570 *mHist1Dd_pos[
i] = &(*mHist1Dd)[
i];
572 for (uint32_t
i = 0;
i < mHistGraph->size();
i++) {
573 *mHistGraph_pos[
i] = &(*mHistGraph)[
i];
579int32_t GPUQA::loadHistograms(std::vector<TH1F>& i1, std::vector<TH2F>& i2, std::vector<TH1D>& i3, std::vector<TGraphAsymmErrors>& i4, int32_t tasks)
581 if (tasks == tasksAutomatic) {
584 if (mQAInitialized && (!mHaveExternalHists || tasks != mQATasks)) {
585 throw std::runtime_error(
"QA not initialized or initialized with different task array");
593 mHist1Dd_pos.clear();
594 mHistGraph_pos.clear();
595 mHaveExternalHists =
true;
600 if (InitQACreateHistograms()) {
603 mQAInitialized =
true;
613 uint32_t
n = mMCInfos.size();
614 fwrite(&
n,
sizeof(
n), 1, fp);
615 fwrite(mMCInfos.data(),
sizeof(mMCInfos[0]),
n, fp);
616 n = mMCInfosCol.size();
617 fwrite(&
n,
sizeof(
n), 1, fp);
618 fwrite(mMCInfosCol.data(),
sizeof(mMCInfosCol[0]),
n, fp);
619 n = mMCEventOffset.size();
620 fwrite(&
n,
sizeof(
n), 1, fp);
621 fwrite(mMCEventOffset.data(),
sizeof(mMCEventOffset[0]),
n, fp);
633 if ((
x = fread(&
n,
sizeof(
n), 1, fp)) != 1) {
638 if (fread(mMCInfos.data(),
sizeof(mMCInfos[0]),
n, fp) !=
n) {
642 if ((
x = fread(&
n,
sizeof(
n), 1, fp)) != 1) {
646 mMCInfosCol.resize(
n);
647 if (fread(mMCInfosCol.data(),
sizeof(mMCInfosCol[0]),
n, fp) !=
n) {
651 if ((
x = fread(&
n,
sizeof(
n), 1, fp)) != 1) {
655 mMCEventOffset.resize(
n);
656 if (fread(mMCEventOffset.data(),
sizeof(mMCEventOffset[0]),
n, fp) !=
n) {
660 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
661 printf(
"Read %ld bytes MC Infos\n", ftell(fp));
665 CopyO2MCtoIOPtr(&mTracking->mIOPtrs);
672 ptr->mcInfosTPC = mMCInfos.data();
673 ptr->nMCInfosTPC = mMCInfos.size();
674 ptr->mcInfosTPCCol = mMCInfosCol.data();
675 ptr->nMCInfosTPCCol = mMCInfosCol.size();
681 if (!mO2MCDataLoaded) {
682 HighResTimer timer(mTracking && mTracking->GetProcessingSettings().debugLevel);
683 if (mTracking && mTracking->GetProcessingSettings().debugLevel) {
684 GPUInfo(
"Start reading O2 Track MC information");
686 static constexpr float PRIM_MAX_T = 0.01f;
689 std::vector<int32_t> refId;
692 const auto& evrec = dc->getEventRecords();
693 const auto& evparts = dc->getEventParts();
694 std::vector<std::vector<float>> evTimeBins(mcReader.getNSources());
695 for (uint32_t
i = 0;
i < evTimeBins.size();
i++) {
696 evTimeBins[
i].resize(mcReader.getNEvents(
i), -100.f);
698 for (uint32_t
i = 0;
i < evrec.size();
i++) {
699 const auto&
ir = evrec[
i];
700 for (uint32_t
j = 0;
j < evparts[
i].size();
j++) {
701 const int iSim = evparts[
i][
j].sourceID;
702 const int iEv = evparts[
i][
j].entryID;
706 if (evTimeBins[iSim][iEv] >= 0) {
707 throw std::runtime_error(
"Multiple time bins for same MC collision found");
709 evTimeBins[iSim][iEv] = timebin;
714 uint32_t nSimSources = mcReader.getNSources();
715 mMCEventOffset.resize(nSimSources);
716 uint32_t nSimTotalEvents = 0;
717 uint32_t nSimTotalTracks = 0;
718 for (uint32_t
i = 0;
i < nSimSources;
i++) {
719 mMCEventOffset[
i] = nSimTotalEvents;
720 nSimTotalEvents += mcReader.getNEvents(
i);
723 mMCInfosCol.resize(nSimTotalEvents);
724 for (int32_t iSim = 0; iSim < mcReader.getNSources(); iSim++) {
725 for (int32_t
i = 0;
i < mcReader.getNEvents(iSim);
i++) {
726 const float timebin = evTimeBins[iSim][
i];
728 const std::vector<o2::MCTrack>&
tracks = mcReader.getTracks(iSim,
i);
729 const std::vector<o2::TrackReference>& trackRefs = mcReader.getTrackRefsByEvent(iSim,
i);
731 refId.resize(
tracks.size());
732 std::fill(refId.begin(), refId.end(), -1);
733 for (uint32_t
j = 0;
j < trackRefs.size();
j++) {
735 int32_t trkId = trackRefs[
j].getTrackID();
736 if (refId[trkId] == -1) {
741 mMCInfosCol[mMCEventOffset[iSim] +
i].first = mMCInfos.size();
742 mMCInfosCol[mMCEventOffset[iSim] +
i].num =
tracks.size();
743 mMCInfos.resize(mMCInfos.size() +
tracks.size());
744 for (uint32_t
j = 0;
j <
tracks.size();
j++) {
745 auto& info = mMCInfos[mMCInfosCol[mMCEventOffset[iSim] +
i].first +
j];
747 TParticlePDG* particle = TDatabasePDG::Instance()->GetParticle(trk.GetPdgCode());
749 if (abs(trk.GetPdgCode()) == kElectron) {
752 if (abs(trk.GetPdgCode()) == kMuonMinus) {
755 if (abs(trk.GetPdgCode()) == kPiPlus) {
758 if (abs(trk.GetPdgCode()) == kKPlus) {
761 if (abs(trk.GetPdgCode()) == kProton) {
765 info.charge = particle ? particle->Charge() : 0;
766 info.prim = trk.T() < PRIM_MAX_T;
767 info.primDaughters = 0;
768 if (trk.getFirstDaughterTrackId() != -1) {
769 for (int32_t k = trk.getFirstDaughterTrackId(); k <= trk.getLastDaughterTrackId(); k++) {
770 if (tracks[k].
T() < PRIM_MAX_T) {
771 info.primDaughters = 1;
779 const auto& trkRef = trackRefs[refId[
j]];
783 info.pX = trkRef.Px();
784 info.pY = trkRef.Py();
785 info.pZ = trkRef.Pz();
786 info.genRadius = std::sqrt(trk.GetStartVertexCoordinatesX() * trk.GetStartVertexCoordinatesX() + trk.GetStartVertexCoordinatesY() * trk.GetStartVertexCoordinatesY() + trk.GetStartVertexCoordinatesZ() * trk.GetStartVertexCoordinatesZ());
788 info.x = info.y = info.z = info.pX = info.pY = info.pZ = 0;
794 if (timer.IsRunning()) {
795 GPUInfo(
"Finished reading O2 Track MC information (%f seconds)", timer.GetCurrentElapsedTime());
797 mO2MCDataLoaded =
true;
800 CopyO2MCtoIOPtr(updateIOPtr);
807 if (mQAInitialized) {
808 throw std::runtime_error(
"QA already initialized");
810 if (tasks == tasksAutomatic) {
814 mHist1D =
new std::vector<TH1F>;
815 mHist2D =
new std::vector<TH2F>;
816 mHist1Dd =
new std::vector<TH1D>;
817 mHistGraph =
new std::vector<TGraphAsymmErrors>;
823 if (mTracking->GetProcessingSettings().qcRunFraction != 100.f && mQATasks != taskClusterCounts) {
824 throw std::runtime_error(
"QA with qcRunFraction only supported for taskClusterCounts");
828 mClNative = mTracking->mIOPtrs.clustersNative;
831 if (InitQACreateHistograms()) {
835 if (mConfig.enableLocalOutput) {
836 mkdir(mConfig.plotsDir.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
841 InitO2MCData(mTracking ? &mTracking->mIOPtrs : nullptr);
845 if (mConfig.matchMCLabels.size()) {
846 uint32_t nFiles = mConfig.matchMCLabels.size();
847 std::vector<std::unique_ptr<TFile>> files;
848 std::vector<std::vector<std::vector<int32_t>>*> labelsBuffer(nFiles);
849 std::vector<std::vector<std::vector<int32_t>>*> effBuffer(nFiles);
850 for (uint32_t
i = 0;
i < nFiles;
i++) {
851 files.emplace_back(std::make_unique<TFile>(mConfig.matchMCLabels[
i].c_str()));
852 labelsBuffer[
i] = (std::vector<std::vector<int32_t>>*)files[
i]->Get(
"mcLabelBuffer");
853 effBuffer[
i] = (std::vector<std::vector<int32_t>>*)files[
i]->Get(
"mcEffBuffer");
854 if (labelsBuffer[
i] ==
nullptr || effBuffer[
i] ==
nullptr) {
855 GPUError(
"Error opening / reading from labels file %u/%s: %p %p",
i, mConfig.matchMCLabels[
i].c_str(), (
void*)labelsBuffer[
i], (
void*)effBuffer[
i]);
860 mGoodTracks.resize(labelsBuffer[0]->
size());
861 mGoodHits.resize(labelsBuffer[0]->
size());
862 for (uint32_t iEvent = 0; iEvent < labelsBuffer[0]->size(); iEvent++) {
863 std::vector<bool> labelsOK((*effBuffer[0])[iEvent].
size());
864 for (uint32_t k = 0; k < (*effBuffer[0])[iEvent].
size(); k++) {
866 for (uint32_t l = 0; l < nFiles; l++) {
867 if ((*effBuffer[0])[iEvent][k] != (*effBuffer[l])[iEvent][k]) {
873 mGoodTracks[iEvent].resize((*labelsBuffer[0])[iEvent].size());
874 for (uint32_t k = 0; k < (*labelsBuffer[0])[iEvent].
size(); k++) {
875 if ((*labelsBuffer[0])[iEvent][k] == MC_LABEL_INVALID) {
878 mGoodTracks[iEvent][k] = labelsOK[abs((*labelsBuffer[0])[iEvent][k])];
882 mQAInitialized =
true;
888 if (!mQAInitialized) {
889 throw std::runtime_error(
"QA not initialized");
891 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
892 GPUInfo(
"Running QA - Mask %d, Efficiency %d, Resolution %d, Pulls %d, Cluster Attachment %d, Track Statistics %d, Cluster Counts %d", mQATasks, (int32_t)(mQATasks & taskTrackingEff), (int32_t)(mQATasks & taskTrackingRes), (int32_t)(mQATasks & taskTrackingResPull), (int32_t)(mQATasks & taskClusterAttach), (int32_t)(mQATasks & taskTrackStatistics), (int32_t)(mQATasks & taskClusterCounts));
894 if (!clNative && mTracking) {
895 clNative = mTracking->mIOPtrs.clustersNative;
897 mClNative = clNative;
899#ifdef GPUCA_TPC_GEOMETRY_O2
900 uint32_t nSimEvents = GetNMCCollissions();
901 if (mTrackMCLabelsReverse.size() < nSimEvents) {
902 mTrackMCLabelsReverse.resize(nSimEvents);
904 if (mRecTracks.size() < nSimEvents) {
905 mRecTracks.resize(nSimEvents);
907 if (mFakeTracks.size() < nSimEvents) {
908 mFakeTracks.resize(nSimEvents);
910 if (mMCParam.size() < nSimEvents) {
911 mMCParam.resize(nSimEvents);
916 uint32_t nReconstructedTracks = 0;
917 if (tracksExternal) {
919 nReconstructedTracks = tracksExternal->size();
922 nReconstructedTracks = mTracking->mIOPtrs.nMergedTracks;
924 mTrackMCLabels.resize(nReconstructedTracks);
925 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
926 mTrackMCLabelsReverse[iCol].resize(GetNMCTracks(iCol));
927 mRecTracks[iCol].resize(GetNMCTracks(iCol));
928 mFakeTracks[iCol].resize(GetNMCTracks(iCol));
929 mMCParam[iCol].resize(GetNMCTracks(iCol));
930 memset(mRecTracks[iCol].
data(), 0, mRecTracks[iCol].
size() *
sizeof(mRecTracks[iCol][0]));
931 memset(mFakeTracks[iCol].
data(), 0, mFakeTracks[iCol].
size() *
sizeof(mFakeTracks[iCol][0]));
932 for (
size_t i = 0;
i < mTrackMCLabelsReverse[iCol].size();
i++) {
933 mTrackMCLabelsReverse[iCol][
i] = -1;
936 if (mQATasks & taskClusterAttach && GetNMCLabels()) {
937 mClusterParam.resize(GetNMCLabels());
938 memset(mClusterParam.data(), 0, mClusterParam.size() *
sizeof(mClusterParam[0]));
943 if (mConfig.writeMCLabels) {
944 mcEffBuffer.resize(mNEvents);
945 mcLabelBuffer.resize(mNEvents);
946 mcEffBuffer[mNEvents - 1].resize(GetNMCTracks(0));
947 mcLabelBuffer[mNEvents - 1].resize(nReconstructedTracks);
950 bool mcAvail = mcPresent() || tracksExtMC;
953 if (tracksExternal) {
955 for (uint32_t
i = 0;
i < tracksExternal->size();
i++) {
956 mTrackMCLabels[
i] = (*tracksExtMC)[
i];
960 tbb::parallel_for(tbb::blocked_range<uint32_t>(0, nReconstructedTracks, (
QA_DEBUG == 0) ? 32 : nReconstructedTracks), [&](const tbb::blocked_range<uint32_t>&
range) {
961 auto acc = GPUTPCTrkLbl<true, mcLabelI_t>(GetClusterLabels(), 1.f - mConfig.recThreshold);
966 std::vector<mcLabel_t>
labels;
967 for (uint32_t k = 0; k < track.NClusters(); k++) {
972 uint32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
973 if (hitId >= GetNMCLabels()) {
974 GPUError(
"Invalid hit id %u > %d (nClusters %d)", hitId, GetNMCLabels(), clNative ? clNative->
nClustersTotal : 0);
975 throw std::runtime_error(
"qa error");
978 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
979 if (GetMCLabelID(hitId,
j) >= (int32_t)GetNMCTracks(GetMCLabelCol(hitId,
j))) {
980 GPUError(
"Invalid label %d > %d (hit %d, label %d, col %d)", GetMCLabelID(hitId,
j), GetNMCTracks(GetMCLabelCol(hitId,
j)), hitId,
j, (int32_t)GetMCLabelCol(hitId,
j));
981 throw std::runtime_error(
"qa error");
983 if (GetMCLabelID(hitId,
j) >= 0) {
985 GPUInfo(
"Track %d Cluster %u Label %d: %d (%f)",
i, k,
j, GetMCLabelID(hitId,
j), GetMCLabelWeight(hitId,
j));
991 float maxweight, sumweight;
993 auto maxLabel = acc.computeLabel(&maxweight, &sumweight, &maxcount);
994 mTrackMCLabels[
i] = maxLabel;
995 if (
QA_DEBUG && track.OK() && GetNMCTracks(maxLabel) > (uint32_t)maxLabel.getTrackID()) {
996 const mcInfo_t& mc = GetMCTrack(maxLabel);
997 GPUInfo(
"Track %d label %d (fake %d) weight %f clusters %d (fitted %d) (%f%% %f%%) Pt %f",
i, maxLabel.getTrackID(), (int32_t)(maxLabel.isFake()), maxweight,
nClusters, track.NClustersFitted(), 100.f * maxweight / sumweight, 100.f * (
float)maxcount / (
float)
nClusters,
998 std::sqrt(mc.pX * mc.pX + mc.pY * mc.pY));
1003 if (timer.IsRunning()) {
1004 GPUInfo(
"QA Time: Assign Track Labels:\t\t%6.0f us", timer.GetCurrentElapsedTime(
true) * 1e6);
1007 for (uint32_t
i = 0;
i < nReconstructedTracks;
i++) {
1009 mcLabelI_t
label = mTrackMCLabels[
i];
1010 if (mQATasks & taskClusterAttach) {
1015 if (!mTrackMCLabels[
i].
isValid()) {
1016 for (uint32_t k = 0; k < track->NClusters(); k++) {
1020 mClusterParam[mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num].fakeAttached++;
1024 if (mMCTrackMin == -1 || (
label.getTrackID() >= mMCTrackMin &&
label.getTrackID() < mMCTrackMax)) {
1025 for (uint32_t k = 0; k < track->NClusters(); k++) {
1029 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track->FirstClusterRef() + k].num;
1030 bool correct =
false;
1031 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
1032 if (
label == GetMCLabel(hitId,
j)) {
1038 mClusterParam[hitId].attached++;
1040 mClusterParam[hitId].fakeAttached++;
1046 if (mTrackMCLabels[
i].isFake()) {
1047 (GetMCTrackObj(mFakeTracks,
label))++;
1048 }
else if (tracksExternal || !track->MergedLooper()) {
1049 GetMCTrackObj(mRecTracks,
label)++;
1050 if (mMCTrackMin == -1 || (
label.getTrackID() >= mMCTrackMin &&
label.getTrackID() < mMCTrackMax)) {
1051 int32_t& revLabel = GetMCTrackObj(mTrackMCLabelsReverse,
label);
1052 if (tracksExternal) {
1054 if (revLabel == -1 || fabsf((*tracksExternal)[
i].getZ()) < fabsf((*tracksExternal)[revLabel].getZ())) {
1059 const auto* trks = mTracking->mIOPtrs.mergedTracks;
1061 if (revLabel == -1) {
1064 float shift1 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[
i].CSide() *
GPUChainTracking::NSECTORS / 2, trks[
i].GetParam().GetTOffset());
1065 float shift2 = mTracking->GetTPCTransformHelper()->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(trks[revLabel].CSide() *
GPUChainTracking::NSECTORS / 2, trks[revLabel].GetParam().GetTOffset());
1066 comp = fabsf(trks[
i].GetParam().GetZ() + shift1) < fabsf(trks[revLabel].GetParam().GetZ() + shift2);
1068 if (revLabel == -1 || !trks[revLabel].OK() || (trks[
i].OK() && comp)) {
1075 if ((mQATasks & taskClusterAttach) && !tracksExternal) {
1076 std::vector<uint8_t> lowestPadRow(mTracking->mIOPtrs.nMergedTracks);
1078 if (mTracking->mIOPtrs.mergedTrackHitAttachment) {
1079 for (uint32_t
i = 0;
i < GetNMCLabels();
i++) {
1080 if (mClusterParam[
i].attached == 0 && mClusterParam[
i].fakeAttached == 0) {
1081 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[
i];
1084 mcLabelI_t trackL = mTrackMCLabels[track];
1086 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1088 if (trackL == GetMCLabel(
i,
j)) {
1094 mClusterParam[
i].fakeAdjacent++;
1096 mClusterParam[
i].adjacent++;
1102 if (mTracking->mIOPtrs.nMergedTracks && clNative) {
1103 std::fill(lowestPadRow.begin(), lowestPadRow.end(), 255);
1104 for (uint32_t iSector = 0; iSector <
GPUCA_NSECTORS; iSector++) {
1106 for (uint32_t iCl = 0; iCl < clNative->
nClusters[iSector][iRow]; iCl++) {
1108 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1109 uint32_t trackId = GetMCTrackObj(mTrackMCLabelsReverse, GetMCLabel(
i,
j));
1110 if (trackId < lowestPadRow.size() && lowestPadRow[trackId] > iRow) {
1111 lowestPadRow[trackId] = iRow;
1117 for (uint32_t
i = 0;
i < mTracking->mIOPtrs.nMergedTracks;
i++) {
1118 const auto& trk = mTracking->mIOPtrs.mergedTracks[
i];
1119 if (trk.OK() && lowestPadRow[
i] != 255 && trk.NClustersFitted() >= PADROW_CHECK_MINCLS && CAMath::Abs(trk.GetParam().GetQPt()) < 1.0) {
1120 const auto& lowestCl = mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef()].row < mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + trk.NClusters() - 1].row ? mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef()] : mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + trk.NClusters() - 1];
1121 const int32_t lowestRow = lowestCl.row;
1122 mPadRow[0]->Fill(lowestPadRow[
i], lowestRow, 1.f);
1123 mPadRow[1]->Fill(CAMath::ATan2(trk.GetParam().GetY(), trk.GetParam().GetX()), lowestRow, 1.f);
1124 if (lowestPadRow[
i] < 10 && lowestRow > lowestPadRow[
i] + 3) {
1127 mTracking->GetTPCTransformHelper()->Transform(lowestCl.sector, lowestCl.row, cl.getPad(), cl.getTime(),
x,
y,
z, trk.GetParam().GetTOffset());
1128 float phi = CAMath::ATan2(
y,
x);
1129 mPadRow[2]->Fill(phi, lowestRow, 1.f);
1130 if (CAMath::Abs(phi) < 0.15) {
1131 const float time = cl.getTime();
1132 mPadRow[3]->Fill(mTracking->GetParam().GetUnscaledMult(
time), lowestRow, 1.f);
1140 if (mConfig.matchMCLabels.size()) {
1141 mGoodHits[mNEvents - 1].resize(GetNMCLabels());
1142 std::vector<bool> allowMCLabels(GetNMCTracks(0));
1143 for (uint32_t k = 0; k < GetNMCTracks(0); k++) {
1144 allowMCLabels[k] =
false;
1146 for (uint32_t
i = 0;
i < nReconstructedTracks;
i++) {
1147 if (!mGoodTracks[mNEvents - 1][
i]) {
1150 if (mConfig.matchDisplayMinPt > 0) {
1151 if (!mTrackMCLabels[
i].
isValid()) {
1154 const mcInfo_t& info = GetMCTrack(mTrackMCLabels[
i]);
1155 if (info.pX * info.pX + info.pY * info.pY < mConfig.matchDisplayMinPt * mConfig.matchDisplayMinPt) {
1161 for (uint32_t
j = 0;
j < track.NClusters();
j++) {
1162 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() +
j].num;
1163 if (GetMCLabelNID(hitId)) {
1164 int32_t mcID = GetMCLabelID(hitId, 0);
1166 allowMCLabels[mcID] =
true;
1171 for (uint32_t
i = 0;
i < GetNMCLabels();
i++) {
1172 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1173 int32_t mcID = GetMCLabelID(
i,
j);
1174 if (mcID >= 0 && allowMCLabels[mcID]) {
1175 mGoodHits[mNEvents - 1][
i] =
true;
1180 if (timer.IsRunning()) {
1181 GPUInfo(
"QA Time: Cluster attach status:\t\t%6.0f us", timer.GetCurrentElapsedTime(
true) * 1e6);
1189 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1190 for (uint32_t
i = 0;
i < GetNMCTracks(iCol);
i++) {
1191 mMCParam[iCol][
i].nWeightCls = 0.;
1194 for (uint32_t
i = 0;
i < GetNMCLabels();
i++) {
1195 float weightTotal = 0.f;
1196 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1197 if (GetMCLabelID(
i,
j) >= 0) {
1198 weightTotal += GetMCLabelWeight(
i,
j);
1201 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1202 if (GetMCLabelID(
i,
j) >= 0) {
1203 GetMCTrackObj(mMCParam, GetMCLabel(
i,
j)).nWeightCls += GetMCLabelWeight(
i,
j) / weightTotal;
1207 if (timer.IsRunning()) {
1208 GPUInfo(
"QA Time: Compute cluster label weights:\t%6.0f us", timer.GetCurrentElapsedTime(
true) * 1e6);
1212 tbb::parallel_for<uint32_t>(0, GetNMCCollissions(), [&](
auto iCol) {
1213 for (uint32_t
i = 0;
i < GetNMCTracks(iCol);
i++) {
1214 const mcInfo_t& info = GetMCTrack(
i, iCol);
1215 additionalMCParameters& mc2 = mMCParam[iCol][
i];
1216 mc2.pt = std::sqrt(info.pX * info.pX + info.pY * info.pY);
1217 mc2.phi = M_PI + std::atan2(-info.pY, -info.pX);
1218 float p = info.pX * info.pX + info.pY * info.pY + info.pZ * info.pZ;
1220 mc2.theta = mc2.eta = 0.f;
1222 mc2.theta = info.pZ == 0 ? (M_PI / 2) : (
std::acos(info.pZ /
std::sqrt(
p)));
1223 mc2.eta = -std::log(std::tan(0.5 * mc2.theta));
1225 if (mConfig.writeMCLabels) {
1226 std::vector<int32_t>& effBuffer = mcEffBuffer[mNEvents - 1];
1227 effBuffer[
i] = mRecTracks[iCol][
i] * 1000 + mFakeTracks[iCol][
i];
1230 }, tbb::simple_partitioner());
1231 if (timer.IsRunning()) {
1232 GPUInfo(
"QA Time: Compute track mc parameters:\t%6.0f us", timer.GetCurrentElapsedTime(
true) * 1e6);
1236 if (mQATasks & taskTrackingEff) {
1237 for (uint32_t iCol = 0; iCol < GetNMCCollissions(); iCol++) {
1238 for (uint32_t
i = 0;
i < GetNMCTracks(iCol);
i++) {
1239 if ((mMCTrackMin != -1 && (int32_t)
i < mMCTrackMin) || (mMCTrackMax != -1 && (int32_t)
i >= mMCTrackMax)) {
1242 const mcInfo_t& info = GetMCTrack(
i, iCol);
1243 const additionalMCParameters& mc2 = mMCParam[iCol][
i];
1244 if (mc2.nWeightCls == 0.f) {
1247 const float& mcpt = mc2.pt;
1248 const float& mcphi = mc2.phi;
1249 const float& mceta = mc2.eta;
1251 if (info.primDaughters) {
1254 if (mc2.nWeightCls < mConfig.minNClEff) {
1257 int32_t findable = mc2.nWeightCls >= mConfig.minNClFindable;
1261 if (info.charge == 0.f) {
1264 if (mConfig.filterCharge && info.charge * mConfig.filterCharge < 0) {
1267 if (mConfig.filterPID >= 0 && info.pid != mConfig.filterPID) {
1271 if (fabsf(mceta) > ETA_MAX || mcpt < PT_MIN || mcpt > PT_MAX) {
1275 float alpha = std::atan2(info.y, info.x);
1276 alpha /= M_PI / 9.f;
1278 alpha *= M_PI / 9.f;
1279 alpha += M_PI / 18.f;
1281 float c = std::cos(
alpha);
1282 float s = std::sin(
alpha);
1283 float localY = -info.x *
s + info.y *
c;
1285 if (mConfig.dumpToROOT) {
1287 float localX = info.x *
c + info.y *
s;
1288 effdump.Fill(
alpha, localX, localY, info.z, mcphi, mceta, mcpt, mRecTracks[iCol][
i], mFakeTracks[iCol][
i], findable, info.prim, mc2.nWeightCls);
1291 for (int32_t
j = 0;
j < 6;
j++) {
1292 if (
j == 3 ||
j == 4) {
1295 for (int32_t k = 0; k < 2; k++) {
1296 if (k == 0 && findable == 0) {
1300 int32_t
val = (
j == 0) ? (mRecTracks[iCol][
i] ? 1 : 0) : (
j == 1) ? (mRecTracks[iCol][
i] ? mRecTracks[iCol][
i] - 1 : 0) : (
j == 2) ? mFakeTracks[iCol][
i] : 1;
1305 for (int32_t l = 0; l < 5; l++) {
1306 if (info.prim && mcpt < PT_MIN_PRIM) {
1309 if (l != 3 && fabsf(mceta) > ETA_MAX2) {
1312 if (l < 4 && mcpt < 1.f / mConfig.qpt) {
1316 float pos = l == 0 ? localY : l == 1 ? info.z : l == 2 ? mcphi : l == 3 ? mceta : mcpt;
1318 mEff[
j][k][!info.prim][l]->Fill(
pos,
val);
1324 if (timer.IsRunning()) {
1325 GPUInfo(
"QA Time: Fill efficiency histograms:\t%6.0f us", timer.GetCurrentElapsedTime(
true) * 1e6);
1330 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
1332 prop.SetMaxSinPhi(.999);
1333 prop.SetMaterialTPC();
1334 prop.SetPolynomialField(&mParam->polynomialField);
1336 for (uint32_t
i = 0;
i < mTrackMCLabels.size();
i++) {
1337 if (mConfig.writeMCLabels) {
1338 std::vector<int32_t>& labelBuffer = mcLabelBuffer[mNEvents - 1];
1339 labelBuffer[
i] = mTrackMCLabels[
i].getTrackID();
1341 if (mTrackMCLabels[
i].isFake()) {
1344 const mcInfo_t& mc1 = GetMCTrack(mTrackMCLabels[
i]);
1345 const additionalMCParameters& mc2 = GetMCTrackObj(mMCParam, mTrackMCLabels[
i]);
1347 if (mc1.primDaughters) {
1350 if (!tracksExternal) {
1351 if (!mTracking->mIOPtrs.mergedTracks[
i].OK()) {
1354 if (mTracking->mIOPtrs.mergedTracks[
i].MergedLooper()) {
1358 if ((mMCTrackMin != -1 && mTrackMCLabels[
i].getTrackID() < mMCTrackMin) || (mMCTrackMax != -1 && mTrackMCLabels[
i].getTrackID() >= mMCTrackMax)) {
1361 if (fabsf(mc2.eta) > ETA_MAX || mc2.pt < PT_MIN || mc2.pt > PT_MAX) {
1364 if (mc1.charge == 0.f) {
1370 if (mc1.t0 == -100.f) {
1373 if (mConfig.filterCharge && mc1.charge * mConfig.filterCharge < 0) {
1376 if (mConfig.filterPID >= 0 && mc1.pid != mConfig.filterPID) {
1379 if (mc2.nWeightCls < mConfig.minNClRes) {
1382 if (mConfig.resPrimaries == 1 && !mc1.prim) {
1384 }
else if (mConfig.resPrimaries == 2 && mc1.prim) {
1387 if (GetMCTrackObj(mTrackMCLabelsReverse, mTrackMCLabels[
i]) != (int32_t)
i) {
1394 if (tracksExternal) {
1396 for (int32_t k = 0; k < 5; k++) {
1397 param.Par()[k] = (*tracksExternal)[
i].getParams()[k];
1399 for (int32_t k = 0; k < 15; k++) {
1400 param.Cov()[k] = (*tracksExternal)[
i].getCov()[k];
1402 param.X() = (*tracksExternal)[
i].getX();
1403 param.TOffset() = (*tracksExternal)[
i].getTime0();
1404 alpha = (*tracksExternal)[
i].getAlpha();
1405 side = (*tracksExternal)[
i].hasBothSidesClusters() ? 2 : ((*tracksExternal)[
i].hasCSideClusters() ? 1 : 0);
1408 param = mTracking->mIOPtrs.mergedTracks[
i].GetParam();
1409 alpha = mTracking->mIOPtrs.mergedTracks[
i].GetAlpha();
1410 side = mTracking->mIOPtrs.mergedTracks[
i].CCE() ? 2 : (mTracking->mIOPtrs.mergedTracks[
i].CSide() ? 1 : 0);
1414 float c = std::cos(
alpha);
1415 float s = std::sin(
alpha);
1418 mclocal[0] =
x *
c +
y *
s;
1419 mclocal[1] = -
x *
s +
y *
c;
1422 mclocal[2] = px *
c + py *
s;
1423 mclocal[3] = -px *
s + py *
c;
1428 if (mclocal[0] >
param.GetX() + 20) {
1431 if (
param.GetX() > mConfig.maxResX) {
1435 auto getdz = [
this, &
param, &mc1, &
side, tracksExternal]() {
1436 if (tracksExternal) {
1437 return param.GetZ();
1439 if (!mParam->continuousMaxTimeBin) {
1440 return param.GetZ() - mc1.z;
1443 return param.GetZ() + shift - mc1.z;
1447 bool inFlyDirection = 0;
1448 if (mConfig.strict) {
1450 const float dy =
param.Y() - mclocal[1];
1451 const float dz = getdz();
1452 if (dx * dx + dy * dy + dz * dz > 5.f * 5.f) {
1457 if (prop.PropagateToXAlpha(mclocal[0],
alpha, inFlyDirection)) {
1460 if (fabsf(
param.Y() - mclocal[1]) > (mConfig.strict ? 1.f : 4.f) || fabsf(getdz()) > (mConfig.strict ? 1.f : 4.f)) {
1463 float charge = mc1.charge > 0 ? 1.f : -1.f;
1465 float deltaY =
param.GetY() - mclocal[1];
1466 float deltaZ = getdz();
1467 float deltaPhiNative =
param.GetSinPhi() - mclocal[3] / mc2.pt;
1468 float deltaPhi = std::asin(
param.GetSinPhi()) - std::atan2(mclocal[3], mclocal[2]);
1469 float deltaLambdaNative =
param.GetDzDs() - mc1.pZ / mc2.pt;
1470 float deltaLambda = std::atan(
param.GetDzDs()) - std::atan2(mc1.pZ, mc2.pt);
1472 float deltaPt = (fabsf(1.f /
param.GetQPt()) - mc2.pt) / mc2.pt;
1474 float paramval[5] = {mclocal[1], mc1.z, mc2.phi, mc2.eta, mc2.pt};
1475 float resval[5] = {deltaY, deltaZ, mConfig.nativeFitResolutions ? deltaPhiNative : deltaPhi, mConfig.nativeFitResolutions ? deltaLambdaNative : deltaLambda, mConfig.nativeFitResolutions ? deltaPtNative : deltaPt};
1476 float pullval[5] = {deltaY / std::sqrt(
param.GetErr2Y()), deltaZ / std::sqrt(
param.GetErr2Z()), deltaPhiNative / std::sqrt(
param.GetErr2SinPhi()), deltaLambdaNative / std::sqrt(
param.GetErr2DzDs()), deltaPtNative / std::sqrt(
param.GetErr2QPt())};
1478 for (int32_t
j = 0;
j < 5;
j++) {
1479 for (int32_t k = 0; k < 5; k++) {
1480 if (k != 3 && fabsf(mc2.eta) > ETA_MAX2) {
1483 if (k < 4 && mc2.pt < 1.f / mConfig.qpt) {
1486 if (mQATasks & taskTrackingRes) {
1487 mRes2[
j][k]->Fill(resval[
j], paramval[k]);
1489 if (mQATasks & taskTrackingResPull) {
1490 mPull2[
j][k]->Fill(pullval[
j], paramval[k]);
1495 if (timer.IsRunning()) {
1496 GPUInfo(
"QA Time: Fill resolution histograms:\t%6.0f us", timer.GetCurrentElapsedTime(
true) * 1e6);
1500 if ((mQATasks & taskClusterAttach) && !tracksExternal) {
1502 for (uint32_t iTrk = 0; iTrk < nReconstructedTracks; iTrk++) {
1507 if (!mTrackMCLabels[iTrk].
isValid()) {
1508 for (uint32_t k = 0; k < track.NClusters(); k++) {
1512 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1513 float totalWeight = 0.;
1514 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
1515 if (GetMCLabelID(hitId,
j) >= 0 && GetMCTrackObj(mMCParam, GetMCLabel(hitId,
j)).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1516 totalWeight += GetMCLabelWeight(hitId,
j);
1519 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1520 const auto&
r = checkClusterState<false>(attach);
1521 if (totalWeight > 0) {
1522 float weight = 1.f / (totalWeight * (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached));
1523 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
1524 mcLabelI_t
label = GetMCLabel(hitId,
j);
1525 if (!
label.isFake() && GetMCTrackObj(mMCParam,
label).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1526 float pt = GetMCTrackObj(mMCParam,
label).pt;
1527 if (pt < PT_MIN_CLUST) {
1530 mClusters[CL_fake]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1531 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1532 if (GetMCTrackObj(mRecTracks,
label)) {
1533 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1535 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1536 if (
r.protect ||
r.physics) {
1537 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1540 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(hitId,
j) *
weight);
1545 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1546 mClusters[CL_fake]->Fill(0.f,
weight);
1547 mClusters[CL_att_adj]->Fill(0.f,
weight);
1548 mClusters[CL_all]->Fill(0.f,
weight);
1549 mClusterCounts.nUnaccessible +=
weight;
1550 if (
r.protect ||
r.physics) {
1551 mClusters[CL_prot]->Fill(0.f,
weight);
1554 mClusters[CL_physics]->Fill(0.f,
weight);
1560 mcLabelI_t
label = mTrackMCLabels[iTrk];
1561 if (mMCTrackMin != -1 && (
label.getTrackID() < mMCTrackMin ||
label.getTrackID() >= mMCTrackMax)) {
1564 for (uint32_t k = 0; k < track.NClusters(); k++) {
1568 int32_t hitId = mTracking->mIOPtrs.mergedTrackHits[track.FirstClusterRef() + k].num;
1569 float pt = GetMCTrackObj(mMCParam,
label).pt;
1570 if (pt < PT_MIN_CLUST) {
1573 float weight = 1.f / (mClusterParam[hitId].attached + mClusterParam[hitId].fakeAttached);
1574 bool correct =
false;
1575 for (int32_t
j = 0;
j < GetMCLabelNID(hitId);
j++) {
1576 if (
label == GetMCLabel(hitId,
j)) {
1582 mClusters[CL_attached]->Fill(pt,
weight);
1583 mClusters[CL_tracks]->Fill(pt,
weight);
1585 mClusters[CL_fake]->Fill(pt,
weight);
1587 mClusters[CL_att_adj]->Fill(pt,
weight);
1588 mClusters[CL_all]->Fill(pt,
weight);
1589 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[hitId];
1590 const auto&
r = checkClusterState<false>(attach);
1591 if (
r.protect ||
r.physics) {
1592 mClusters[CL_prot]->Fill(pt,
weight);
1595 mClusters[CL_physics]->Fill(pt,
weight);
1599 for (uint32_t
i = 0;
i < GetNMCLabels();
i++) {
1600 if ((mMCTrackMin != -1 && GetMCLabelID(
i, 0) < mMCTrackMin) || (mMCTrackMax != -1 && GetMCLabelID(
i, 0) >= mMCTrackMax)) {
1603 if (mClusterParam[
i].attached || mClusterParam[
i].fakeAttached) {
1606 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[
i];
1607 const auto&
r = checkClusterState<false>(attach);
1608 if (mClusterParam[
i].adjacent) {
1611 float totalWeight = 0.;
1612 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1613 mcLabelI_t labelT = GetMCLabel(
i,
j);
1614 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1615 totalWeight += GetMCLabelWeight(
i,
j);
1618 float weight = 1.f / totalWeight;
1619 if (totalWeight > 0) {
1620 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1621 mcLabelI_t labelT = GetMCLabel(
i,
j);
1622 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1623 float pt = GetMCTrackObj(mMCParam, labelT).pt;
1624 if (pt < PT_MIN_CLUST) {
1627 if (GetMCTrackObj(mRecTracks, labelT)) {
1628 mClusters[CL_tracks]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1630 mClusters[CL_att_adj]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1631 mClusters[CL_fakeAdj]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1632 mClusters[CL_all]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1633 if (
r.protect ||
r.physics) {
1634 mClusters[CL_prot]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1637 mClusters[CL_physics]->Fill(pt, GetMCLabelWeight(
i,
j) *
weight);
1642 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1643 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1644 mClusters[CL_all]->Fill(0.f, 1.f);
1645 mClusterCounts.nUnaccessible++;
1646 if (
r.protect ||
r.physics) {
1647 mClusters[CL_prot]->Fill(0.f, 1.f);
1650 mClusters[CL_physics]->Fill(0.f, 1.f);
1654 float pt = GetMCTrackObj(mMCParam, mTrackMCLabels[
label]).pt;
1655 if (pt < PT_MIN_CLUST) {
1658 mClusters[CL_att_adj]->Fill(pt, 1.f);
1659 mClusters[CL_tracks]->Fill(pt, 1.f);
1660 mClusters[CL_all]->Fill(pt, 1.f);
1661 if (
r.protect ||
r.physics) {
1662 mClusters[CL_prot]->Fill(pt, 1.f);
1665 mClusters[CL_physics]->Fill(pt, 1.f);
1669 float totalWeight = 0.;
1670 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1671 mcLabelI_t labelT = GetMCLabel(
i,
j);
1672 if (!labelT.isFake() && GetMCTrackObj(mMCParam, labelT).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1673 totalWeight += GetMCLabelWeight(
i,
j);
1676 if (totalWeight > 0) {
1677 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1678 mcLabelI_t
label = GetMCLabel(
i,
j);
1679 if (!
label.isFake() && GetMCTrackObj(mMCParam,
label).pt > 1.f / mTracking->GetParam().rec.maxTrackQPtB5) {
1680 float pt = GetMCTrackObj(mMCParam,
label).pt;
1681 if (pt < PT_MIN_CLUST) {
1684 float weight = GetMCLabelWeight(
i,
j) / totalWeight;
1685 if (mClusterParam[
i].fakeAdjacent) {
1686 mClusters[CL_fakeAdj]->Fill(pt,
weight);
1688 if (mClusterParam[
i].fakeAdjacent) {
1689 mClusters[CL_att_adj]->Fill(pt,
weight);
1691 if (GetMCTrackObj(mRecTracks,
label)) {
1692 mClusters[CL_tracks]->Fill(pt,
weight);
1694 mClusters[CL_all]->Fill(pt,
weight);
1695 if (
r.protect ||
r.physics) {
1696 mClusters[CL_prot]->Fill(pt,
weight);
1699 mClusters[CL_physics]->Fill(pt,
weight);
1704 if (mClusterParam[
i].fakeAdjacent) {
1705 mClusters[CL_fakeAdj]->Fill(0.f, 1.f);
1707 if (mClusterParam[
i].fakeAdjacent) {
1708 mClusters[CL_att_adj]->Fill(0.f, 1.f);
1710 mClusters[CL_all]->Fill(0.f, 1.f);
1711 mClusterCounts.nUnaccessible++;
1712 if (
r.protect ||
r.physics) {
1713 mClusters[CL_prot]->Fill(0.f, 1.f);
1716 mClusters[CL_physics]->Fill(0.f, 1.f);
1722 if (timer.IsRunning()) {
1723 GPUInfo(
"QA Time: Fill cluster histograms:\t%6.0f us", timer.GetCurrentElapsedTime(
true) * 1e6);
1726 }
else if (!mConfig.inputHistogramsOnly && !mConfig.noMC && (mQATasks & (taskTrackingEff | taskTrackingRes | taskTrackingResPull | taskClusterAttach))) {
1727 GPUWarning(
"No MC information available, only running partial TPC QA!");
1730 if ((mQATasks & taskTrackStatistics) && !tracksExternal) {
1732 std::vector<std::array<float, 3>> clusterAttachCounts;
1734 clusterAttachCounts.resize(GetNMCLabels(), {0.f, 0.f});
1736 for (uint32_t
i = 0;
i < nReconstructedTracks;
i++) {
1741 mTracks->Fill(1.f / fabsf(track.GetParam().GetQPt()));
1742 mNCl[0]->Fill(track.NClustersFitted());
1743 uint32_t nClCorrected = 0;
1744 const auto& trackClusters = mTracking->mIOPtrs.mergedTrackHits;
1746 for (uint32_t
j = 0;
j < track.NClusters();
j = jNext) {
1748 for (jNext =
j + 1;
j < track.NClusters(); jNext++) {
1749 if (trackClusters[track.FirstClusterRef() +
j].sector != trackClusters[track.FirstClusterRef() + jNext].sector || trackClusters[track.FirstClusterRef() +
j].row != trackClusters[track.FirstClusterRef() + jNext].row) {
1754 if (!track.MergedLooper() && rowClCount) {
1757 if (mcAvail && rowClCount) {
1758 for (uint32_t k =
j; k < jNext; k++) {
1759 const auto& cl = trackClusters[track.FirstClusterRef() + k];
1763 bool labelOk =
false, labelOkNonFake =
false;
1764 const mcLabelI_t& trkLabel = mTrackMCLabels[
i];
1765 if (trkLabel.isValid() && !trkLabel.isNoise()) {
1766 for (int32_t l = 0; l < GetMCLabelNID(cl.num); l++) {
1767 const mcLabelI_t& clLabel = GetMCLabel(cl.num, l);
1768 if (clLabel.isValid() && !clLabel.isNoise() && CompareIgnoreFake(trkLabel, clLabel)) {
1770 if (!trkLabel.isFake()) {
1771 labelOkNonFake =
true;
1777 clusterAttachCounts[cl.num][0] += 1.0f;
1778 clusterAttachCounts[cl.num][1] += (float)labelOk / rowClCount;
1779 clusterAttachCounts[cl.num][2] += (float)labelOkNonFake / rowClCount;
1784 mNCl[1]->Fill(nClCorrected);
1786 mT0[0]->Fill(track.GetParam().GetTOffset());
1787 if (mTrackMCLabels.size() && !mTrackMCLabels[
i].isFake() && !track.MergedLooper() && !track.CCE()) {
1788 const auto& info = GetMCTrack(mTrackMCLabels[
i]);
1789 if (info.t0 != -100.f) {
1790 mT0[1]->Fill(track.GetParam().GetTOffset() - info.t0);
1794 if (mClNative && mTracking && mTracking->GetTPCTransformHelper()) {
1797 for (uint32_t k = 0; k < mClNative->nClusters[
i][
j]; k++) {
1798 const auto& cl = mClNative->clusters[
i][
j][k];
1800 GPUTPCConvertImpl::convert(*mTracking->GetTPCTransformHelper()->getCorrMap(), mTracking->GetParam(),
i,
j, cl.getPad(), cl.getTime(),
x,
y,
z);
1801 mTracking->GetParam().Sector2Global(
i,
x,
y,
z, &
x, &
y, &
z);
1808 double clusterAttachNormalizedCount = 0, clusterAttachNormalizedCountNonFake = 0;
1809 for (uint32_t
i = 0;
i < clusterAttachCounts.size();
i++) {
1810 if (clusterAttachCounts[
i][0]) {
1811 clusterAttachNormalizedCount += clusterAttachCounts[
i][1] / clusterAttachCounts[
i][0];
1812 clusterAttachNormalizedCountNonFake += clusterAttachCounts[
i][2] / clusterAttachCounts[
i][0];
1815 mClusterCounts.nCorrectlyAttachedNormalized = clusterAttachNormalizedCount;
1816 mClusterCounts.nCorrectlyAttachedNormalizedNonFake = clusterAttachNormalizedCountNonFake;
1817 clusterAttachCounts.clear();
1820 if (timer.IsRunning()) {
1821 GPUInfo(
"QA Time: Fill track statistics:\t%6.0f us", timer.GetCurrentElapsedTime(
true) * 1e6);
1825 uint32_t nCl = clNative ? clNative->
nClustersTotal : mTracking->GetProcessors()->tpcMerger.NMaxClusters();
1826 mClusterCounts.nTotal += nCl;
1827 if (mQATasks & (taskClusterCounts | taskClusterRejection)) {
1828 for (uint32_t iSector = 0; iSector <
GPUCA_NSECTORS; iSector++) {
1830 for (uint32_t iCl = 0; iCl < clNative->
nClusters[iSector][iRow]; iCl++) {
1832 int32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[
i];
1833 const auto&
r = checkClusterState<true>(attach, &mClusterCounts);
1835 if (mQATasks & taskClusterRejection) {
1837 float totalWeight = 0, weight400 = 0, weight40 = 0;
1838 for (int32_t
j = 0;
j < GetMCLabelNID(
i);
j++) {
1839 const auto&
label = GetMCLabel(
i,
j);
1840 if (GetMCLabelID(
label) >= 0) {
1841 totalWeight += GetMCLabelWeight(
label);
1842 if (GetMCTrackObj(mMCParam,
label).pt >= 0.4) {
1843 weight400 += GetMCLabelWeight(
label);
1845 if (GetMCTrackObj(mMCParam,
label).pt <= 0.04) {
1846 weight40 += GetMCLabelWeight(
label);
1850 if (totalWeight > 0 && 10.f * weight400 >= totalWeight) {
1851 if (!
r.unattached && !
r.protect && !
r.physics) {
1852 mClusterCounts.nFakeRemove400++;
1853 int32_t totalFake = weight400 < 0.9f * totalWeight;
1855 mClusterCounts.nFullFakeRemove400++;
1870 mClusterCounts.nAbove400++;
1872 if (totalWeight > 0 && weight40 >= 0.9 * totalWeight) {
1873 mClusterCounts.nBelow40++;
1874 if (
r.protect ||
r.physics) {
1875 mClusterCounts.nFakeProtect40++;
1881 mClusterCounts.nPhysics++;
1884 mClusterCounts.nProt++;
1887 mClusterCounts.nUnattached++;
1890 if (mQATasks & taskClusterRejection) {
1891 if (mTracking && clNative) {
1893 mClRej[0]->Fill(cl.getPad() - GPUTPCGeometry::NPads(iRow) / 2 + 0.5, iRow, 1.f);
1894 if (!
r.unattached && !
r.protect) {
1895 mClRej[1]->Fill(cl.getPad() - GPUTPCGeometry::NPads(iRow) / 2 + 0.5, iRow, 1.f);
1905 if ((mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
1906 DoClusterCounts(
nullptr);
1907 mClusterCounts = counts_t();
1910 if (timer.IsRunning()) {
1911 GPUInfo(
"QA Time: Cluster Counts:\t%6.0f us", timer.GetCurrentElapsedTime(
true) * 1e6);
1914 if (mConfig.dumpToROOT && !tracksExternal) {
1915 if (!clNative || !mTracking || !mTracking->mIOPtrs.mergedTrackHitAttachment || !mTracking->mIOPtrs.mergedTracks) {
1916 throw std::runtime_error(
"Cannot dump non o2::tpc::clusterNative clusters, need also hit attachmend and GPU tracks");
1921 for (uint32_t k = 0; k < mClNative->nClusters[
i][
j]; k++) {
1922 const auto& cl = mClNative->clusters[
i][
j][k];
1923 uint32_t attach = mTracking->mIOPtrs.mergedTrackHitAttachment[clid];
1924 float x = 0,
y = 0,
z = 0;
1927 const auto& trk = mTracking->mIOPtrs.mergedTracks[track];
1928 mTracking->GetTPCTransformHelper()->Transform(
i,
j, cl.getPad(), cl.getTime(),
x,
y,
z, trk.GetParam().GetTOffset());
1929 mTracking->GetParam().Sector2Global(
i,
x,
y,
z, &
x, &
y, &
z);
1931 uint32_t extState = mTracking->mIOPtrs.mergedTrackHitStates ? mTracking->mIOPtrs.mergedTrackHitStates[clid] : 0;
1933 if (mConfig.dumpToROOT >= 2) {
1936 memset((
void*)&trk, 0,
sizeof(trk));
1937 memset((
void*)&trkHit, 0,
sizeof(trkHit));
1940 trk = mTracking->mIOPtrs.mergedTracks[track];
1941 for (uint32_t l = 0; l < trk.NClusters(); l++) {
1942 const auto& tmp = mTracking->mIOPtrs.mergedTrackHits[trk.FirstClusterRef() + l];
1943 if (tmp.num == clid) {
1949 static auto cldump =
GPUROOTDump<o2::tpc::ClusterNative, GPUTPCGMMergedTrack, GPUTPCGMMergedTrackHit, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew(
"cluster",
"track",
"trackHit",
"attach",
"extState",
"x",
"y",
"z",
"sector",
"row",
"nEv",
"clusterTree");
1950 cldump.Fill(cl, trk, trkHit, attach, extState,
x,
y,
z,
i,
j, mNEvents - 1);
1952 static auto cldump =
GPUROOTDump<o2::tpc::ClusterNative, uint32_t, uint32_t, float, float, float, uint32_t, uint32_t, uint32_t>::getNew(
"cluster",
"attach",
"extState",
"x",
"y",
"z",
"sector",
"row",
"nEv",
"clusterTree");
1953 cldump.Fill(cl, attach, extState,
x,
y,
z,
i,
j, mNEvents - 1);
1961 for (uint32_t
i = 0;
i < mTracking->mIOPtrs.nMergedTracks;
i++) {
1962 if (mTracking->mIOPtrs.mergedTracks[
i].OK()) {
1963 trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.mergedTracks[
i]);
1967 if (mTracking && mTracking->GetProcessingSettings().createO2Output) {
1969 for (uint32_t
i = 0;
i < mTracking->mIOPtrs.nOutputTracksTPCO2;
i++) {
1970 o2trkdump.Fill(mNEvents - 1, mTracking->mIOPtrs.outputTracksTPCO2[
i]);
1975 if (mConfig.compareTrackStatus) {
1976#ifdef GPUCA_DETERMINISTIC_MODE
1977 if (!mTracking || !mTracking->GetProcessingSettings().deterministicGPUReconstruction)
1980 throw std::runtime_error(
"Need deterministic processing to compare track status");
1982 std::vector<uint8_t> status(mTracking->mIOPtrs.nMergedTracks);
1983 for (uint32_t
i = 0;
i < mTracking->mIOPtrs.nMergedTracks;
i++) {
1984 const auto& trk = mTracking->mIOPtrs.mergedTracks[
i];
1985 status[
i] = trk.OK() && trk.NClusters() && trk.GetParam().GetNDF() > 0 && (mConfig.noMC || (mTrackMCLabels[
i].isValid() && !mTrackMCLabels[
i].isFake()));
1987 if (mConfig.compareTrackStatus == 1) {
1988 std::ofstream(
"track.status", std::ios::binary).write((
char*)status.data(), status.size() *
sizeof(status[0]));
1989 }
else if (mConfig.compareTrackStatus == 2) {
1990 std::ifstream
f(
"track.status", std::ios::binary | std::ios::ate);
1991 std::vector<uint8_t> comp(
f.tellg());
1993 f.read((
char*)comp.data(), comp.size());
1995 if (comp.size() != status.size()) {
1996 throw std::runtime_error(
"Number of tracks candidates in track fit in track.status and in current reconstruction differ");
1998 std::vector<uint32_t> missing, missingComp;
1999 for (uint32_t
i = 0;
i < status.size();
i++) {
2000 if (status[
i] && !comp[
i]) {
2001 missingComp.emplace_back(
i);
2003 if (comp[
i] && !status[
i]) {
2004 missing.emplace_back(
i);
2007 auto printer = [](std::vector<uint32_t>
m,
const char*
name) {
2009 printf(
"Missing in %s reconstruction: (%zu)\n",
name,
m.size());
2010 for (uint32_t
i = 0;
i <
m.size();
i++) {
2019 printer(missing,
"current");
2020 printer(missingComp,
"comparison");
2024 mTrackingScratchBuffer.clear();
2025 mTrackingScratchBuffer.shrink_to_fit();
2028void GPUQA::GetName(
char* fname, int32_t k,
bool noDash)
2030 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
2031 if (k || mConfig.inputHistogramsOnly || mConfig.name.size()) {
2032 if (!(mConfig.inputHistogramsOnly || k)) {
2033 snprintf(fname, 1024,
"%s%s", mConfig.name.c_str(), noDash ?
"" :
" - ");
2034 }
else if (mConfig.compareInputNames.size() > (
unsigned)(k - nNewInput)) {
2035 snprintf(fname, 1024,
"%s%s", mConfig.compareInputNames[k - nNewInput].c_str(), noDash ?
"" :
" - ");
2037 strcpy(fname, mConfig.compareInputs[k - nNewInput].c_str());
2038 if (strlen(fname) > 5 && strcmp(fname + strlen(fname) - 5,
".root") == 0) {
2039 fname[strlen(fname) - 5] = 0;
2042 strcat(fname,
" - ");
2051T* GPUQA::GetHist(T*& ee, std::vector<std::unique_ptr<TFile>>& tin, int32_t k, int32_t nNewInput)
2054 if ((mConfig.inputHistogramsOnly || k) && (e =
dynamic_cast<T*
>(tin[k - nNewInput]->Get(e->GetName()))) ==
nullptr) {
2055 GPUWarning(
"Missing histogram in input %s: %s", mConfig.compareInputs[k - nNewInput].c_str(), ee->GetName());
2062void GPUQA::DrawQAHistogramsCleanup()
2064 clearGarbagageCollector();
2067void GPUQA::resetHists()
2069 if (!mQAInitialized) {
2070 throw std::runtime_error(
"QA not initialized");
2072 if (mHaveExternalHists) {
2073 throw std::runtime_error(
"Cannot reset external hists");
2075 for (
auto&
h : *mHist1D) {
2078 for (
auto&
h : *mHist2D) {
2081 for (
auto&
h : *mHist1Dd) {
2084 for (
auto&
h : *mHistGraph) {
2085 h = TGraphAsymmErrors();
2087 mClusterCounts = counts_t();
2092 const auto oldRootIgnoreLevel = gErrorIgnoreLevel;
2093 gErrorIgnoreLevel = kWarning;
2094 if (!mQAInitialized) {
2095 throw std::runtime_error(
"QA not initialized");
2098 if (mTracking && mTracking->GetProcessingSettings().debugLevel >= 2) {
2099 printf(
"Creating QA Histograms\n");
2102 std::vector<Color_t> colorNums(COLORCOUNT);
2103 if (!(qcout || mConfig.writeFileExt ==
"root" || mConfig.writeFileExt ==
"C")) {
2104 [[maybe_unused]]
static int32_t initColorsInitialized = initColors();
2106 for (int32_t
i = 0;
i < COLORCOUNT;
i++) {
2107 colorNums[
i] = (qcout || mConfig.writeFileExt ==
"root" || mConfig.writeFileExt ==
"C") ? defaultColorNums[
i] : mColors[
i]->GetNumber();
2110 bool mcAvail = mcPresent();
2111 char name[2048], fname[1024];
2113 const int32_t nNewInput = mConfig.inputHistogramsOnly ? 0 : 1;
2114 const int32_t ConfigNumInputs = nNewInput + mConfig.compareInputs.size();
2116 std::vector<std::unique_ptr<TFile>> tin;
2117 for (uint32_t
i = 0;
i < mConfig.compareInputs.size();
i++) {
2118 tin.emplace_back(std::make_unique<TFile>(mConfig.compareInputs[
i].c_str()));
2120 std::unique_ptr<TFile> tout =
nullptr;
2121 if (mConfig.output.size()) {
2122 tout = std::make_unique<TFile>(mConfig.output.c_str(),
"RECREATE");
2125 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2126 float legendSpacingString = 0.025;
2127 for (int32_t
i = 0;
i < ConfigNumInputs;
i++) {
2129 if (strlen(fname) * 0.006 > legendSpacingString) {
2130 legendSpacingString = strlen(fname) * 0.006;
2135 if (mQATasks & taskTrackingEff) {
2136 for (int32_t ii = 0; ii < 6; ii++) {
2137 snprintf(
name, 1024,
"eff_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2138 mCEff[ii] = createGarbageCollected<TCanvas>(
name,
name, 0, 0, 700, 700. * 2. / 3.);
2141 mPEff[ii][0] = createGarbageCollected<TPad>(
"p0",
"", 0.0, dy * 0, 0.5, dy * 1);
2142 mPEff[ii][0]->Draw();
2143 mPEff[ii][0]->SetRightMargin(0.04);
2144 mPEff[ii][1] = createGarbageCollected<TPad>(
"p1",
"", 0.5, dy * 0, 1.0, dy * 1);
2145 mPEff[ii][1]->Draw();
2146 mPEff[ii][1]->SetRightMargin(0.04);
2147 mPEff[ii][2] = createGarbageCollected<TPad>(
"p2",
"", 0.0, dy * 1, 0.5, dy * 2 - .001);
2148 mPEff[ii][2]->Draw();
2149 mPEff[ii][2]->SetRightMargin(0.04);
2150 mPEff[ii][3] = createGarbageCollected<TPad>(
"p3",
"", 0.5, dy * 1, 1.0, dy * 2 - .001);
2151 mPEff[ii][3]->Draw();
2152 mPEff[ii][3]->SetRightMargin(0.04);
2153 mLEff[ii] = createGarbageCollected<TLegend>(0.92 - legendSpacingString * 1.45, 0.83 - (0.93 - 0.82) / 2. * (
float)ConfigNumInputs, 0.98, 0.849);
2154 SetLegend(mLEff[ii]);
2159 if (mQATasks & taskTrackingRes) {
2160 for (int32_t ii = 0; ii < 7; ii++) {
2162 snprintf(
name, 1024,
"res_integral_layout");
2164 snprintf(
name, 1024,
"res_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2166 mCRes[ii] = createGarbageCollected<TCanvas>(
name,
name, 0, 0, 700, 700. * 2. / 3.);
2168 gStyle->SetOptFit(1);
2171 mPRes[ii][3] = createGarbageCollected<TPad>(
"p0",
"", 0.0, dy * 0, 0.5, dy * 1);
2172 mPRes[ii][3]->Draw();
2173 mPRes[ii][3]->SetRightMargin(0.04);
2174 mPRes[ii][4] = createGarbageCollected<TPad>(
"p1",
"", 0.5, dy * 0, 1.0, dy * 1);
2175 mPRes[ii][4]->Draw();
2176 mPRes[ii][4]->SetRightMargin(0.04);
2177 mPRes[ii][0] = createGarbageCollected<TPad>(
"p2",
"", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2178 mPRes[ii][0]->Draw();
2179 mPRes[ii][0]->SetRightMargin(0.04);
2180 mPRes[ii][0]->SetLeftMargin(0.15);
2181 mPRes[ii][1] = createGarbageCollected<TPad>(
"p3",
"", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2182 mPRes[ii][1]->Draw();
2183 mPRes[ii][1]->SetRightMargin(0.04);
2184 mPRes[ii][1]->SetLeftMargin(0.135);
2185 mPRes[ii][2] = createGarbageCollected<TPad>(
"p4",
"", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2186 mPRes[ii][2]->Draw();
2187 mPRes[ii][2]->SetRightMargin(0.06);
2188 mPRes[ii][2]->SetLeftMargin(0.135);
2190 mLRes[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (
float)ConfigNumInputs, 0.98, 0.949);
2191 SetLegend(mLRes[ii]);
2197 if (mQATasks & taskTrackingResPull) {
2198 for (int32_t ii = 0; ii < 7; ii++) {
2200 snprintf(
name, 1024,
"pull_integral_layout");
2202 snprintf(
name, 1024,
"pull_vs_%s_layout", VSPARAMETER_NAMES[ii]);
2204 mCPull[ii] = createGarbageCollected<TCanvas>(
name,
name, 0, 0, 700, 700. * 2. / 3.);
2206 gStyle->SetOptFit(1);
2209 mPPull[ii][3] = createGarbageCollected<TPad>(
"p0",
"", 0.0, dy * 0, 0.5, dy * 1);
2210 mPPull[ii][3]->Draw();
2211 mPPull[ii][3]->SetRightMargin(0.04);
2212 mPPull[ii][4] = createGarbageCollected<TPad>(
"p1",
"", 0.5, dy * 0, 1.0, dy * 1);
2213 mPPull[ii][4]->Draw();
2214 mPPull[ii][4]->SetRightMargin(0.04);
2215 mPPull[ii][0] = createGarbageCollected<TPad>(
"p2",
"", 0.0, dy * 1, 1. / 3., dy * 2 - .001);
2216 mPPull[ii][0]->Draw();
2217 mPPull[ii][0]->SetRightMargin(0.04);
2218 mPPull[ii][0]->SetLeftMargin(0.15);
2219 mPPull[ii][1] = createGarbageCollected<TPad>(
"p3",
"", 1. / 3., dy * 1, 2. / 3., dy * 2 - .001);
2220 mPPull[ii][1]->Draw();
2221 mPPull[ii][1]->SetRightMargin(0.04);
2222 mPPull[ii][1]->SetLeftMargin(0.135);
2223 mPPull[ii][2] = createGarbageCollected<TPad>(
"p4",
"", 2. / 3., dy * 1, 1.0, dy * 2 - .001);
2224 mPPull[ii][2]->Draw();
2225 mPPull[ii][2]->SetRightMargin(0.06);
2226 mPPull[ii][2]->SetLeftMargin(0.135);
2228 mLPull[ii] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (
float)ConfigNumInputs, 0.98, 0.949);
2229 SetLegend(mLPull[ii]);
2235 if (mQATasks & taskClusterAttach) {
2236 for (int32_t
i = 0;
i < 3;
i++) {
2237 snprintf(
name, 1024,
"clusters_%s_layout", CLUSTER_TYPES[
i]);
2238 mCClust[
i] = createGarbageCollected<TCanvas>(
name,
name, 0, 0, 700, 700. * 2. / 3.);
2240 mPClust[
i] = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2242 float y1 =
i != 1 ? 0.77 : 0.27,
y2 =
i != 1 ? 0.9 : 0.42;
2243 mLClust[
i] = createGarbageCollected<TLegend>(
i == 2 ? 0.1 : (0.65 - legendSpacingString * 1.45),
y2 - (
y2 -
y1) * (ConfigNumInputs + (
i != 1) / 2.) + 0.005,
i == 2 ? (0.3 + legendSpacingString * 1.45) : 0.9,
y2);
2244 SetLegend(mLClust[
i]);
2249 if (mQATasks & taskTrackStatistics) {
2250 mCTracks = createGarbageCollected<TCanvas>(
"ctrackspt",
"ctrackspt", 0, 0, 700, 700. * 2. / 3.);
2252 mPTracks = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2254 mLTracks = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.5, 0.93 - (0.93 - 0.86) / 2. * (
float)ConfigNumInputs, 0.98, 0.949);
2255 SetLegend(mLTracks,
true);
2257 for (int32_t
i = 0;
i < 2;
i++) {
2258 snprintf(
name, 2048,
"ctrackst0%d",
i);
2259 mCT0[
i] = createGarbageCollected<TCanvas>(
name,
name, 0, 0, 700, 700. * 2. / 3.);
2261 mPT0[
i] = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2263 mLT0[
i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (
float)ConfigNumInputs, 0.98, 0.949);
2266 snprintf(
name, 2048,
"cncl%d",
i);
2267 mCNCl[
i] = createGarbageCollected<TCanvas>(
name,
name, 0, 0, 700, 700. * 2. / 3.);
2269 mPNCl[
i] = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2271 mLNCl[
i] = createGarbageCollected<TLegend>(0.9 - legendSpacingString * 1.45, 0.93 - (0.93 - 0.86) / 2. * (
float)ConfigNumInputs, 0.98, 0.949);
2272 SetLegend(mLNCl[
i],
true);
2275 mCClXY = createGarbageCollected<TCanvas>(
"clxy",
"clxy", 0, 0, 700, 700. * 2. / 3.);
2277 mPClXY = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2281 if (mQATasks & taskClusterRejection) {
2282 for (int32_t
i = 0;
i < 3;
i++) {
2283 snprintf(
name, 2048,
"cnclrej%d",
i);
2284 mCClRej[
i] = createGarbageCollected<TCanvas>(
name,
name, 0, 0, 700, 700. * 2. / 3.);
2286 mPClRej[
i] = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2289 mCClRejP = createGarbageCollected<TCanvas>(
"cnclrejp",
"cnclrejp", 0, 0, 700, 700. * 2. / 3.);
2291 mPClRejP = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2295 if (mQATasks & taskClusterAttach) {
2296 for (int32_t
i = 0;
i < 4;
i++) {
2297 snprintf(
name, 2048,
"cpadrow%d",
i);
2298 mCPadRow[
i] = createGarbageCollected<TCanvas>(
name,
name, 0, 0, 700, 700. * 2. / 3.);
2300 mPPadRow[
i] = createGarbageCollected<TPad>(
"p0",
"", 0.0, 0.0, 1.0, 1.0);
2301 mPPadRow[
i]->Draw();
2306 if (mConfig.enableLocalOutput && !mConfig.inputHistogramsOnly && (mQATasks & taskTrackingEff) && mcPresent()) {
2307 GPUInfo(
"QA Stats: Eff: Tracks Prim %d (Eta %d, Pt %d) %f%% (%f%%) Sec %d (Eta %d, Pt %d) %f%% (%f%%) - Res: Tracks %d (Eta %d, Pt %d)", (int32_t)mEff[3][1][0][0]->GetEntries(), (int32_t)mEff[3][1][0][3]->GetEntries(), (int32_t)mEff[3][1][0][4]->GetEntries(),
2308 mEff[0][0][0][0]->GetSumOfWeights() / std::max(1., mEff[3][0][0][0]->GetSumOfWeights()), mEff[0][1][0][0]->GetSumOfWeights() / std::max(1., mEff[3][1][0][0]->GetSumOfWeights()), (int32_t)mEff[3][1][1][0]->GetEntries(), (int32_t)mEff[3][1][1][3]->GetEntries(),
2309 (int32_t)mEff[3][1][1][4]->GetEntries(), mEff[0][0][1][0]->GetSumOfWeights() / std::max(1., mEff[3][0][1][0]->GetSumOfWeights()), mEff[0][1][1][0]->GetSumOfWeights() / std::max(1., mEff[3][1][1][0]->GetSumOfWeights()), (int32_t)mRes2[0][0]->GetEntries(),
2310 (int32_t)mRes2[0][3]->GetEntries(), (int32_t)mRes2[0][4]->GetEntries());
2313 int32_t flagShowVsPtLog = (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) ? 1 : 0;
2315 if (mQATasks & taskTrackingEff) {
2317 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2318 int32_t
i = ii == 5 ? 4 : ii;
2319 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2320 for (int32_t
j = 0;
j < 4;
j++) {
2321 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2324 mPEff[ii][
j]->SetLogx();
2327 for (int32_t l = 0; l < 3; l++) {
2328 if (k == 0 && mConfig.inputHistogramsOnly == 0 && ii != 5) {
2331 auto oldLevel = gErrorIgnoreLevel;
2332 gErrorIgnoreLevel = kError;
2333 mEffResult[0][
j / 2][
j % 2][
i]->Divide(mEff[l][
j / 2][
j % 2][
i], mEff[5][
j / 2][
j % 2][
i],
"cl=0.683 b(1,1) mode");
2334 gErrorIgnoreLevel = oldLevel;
2335 mEff[3][
j / 2][
j % 2][
i]->Reset();
2336 mEff[3][
j / 2][
j % 2][
i]->Add(mEff[0][
j / 2][
j % 2][
i]);
2337 mEff[3][
j / 2][
j % 2][
i]->Add(mEff[1][
j / 2][
j % 2][
i]);
2338 mEff[3][
j / 2][
j % 2][
i]->Add(mEff[2][
j / 2][
j % 2][
i]);
2339 mEff[4][
j / 2][
j % 2][
i]->Reset();
2340 mEff[4][
j / 2][
j % 2][
i]->Add(mEff[0][
j / 2][
j % 2][
i]);
2341 mEff[4][
j / 2][
j % 2][
i]->Add(mEff[1][
j / 2][
j % 2][
i]);
2344 auto oldLevel = gErrorIgnoreLevel;
2345 gErrorIgnoreLevel = kError;
2346 mEffResult[l][
j / 2][
j % 2][
i]->Divide(mEff[l][
j / 2][
j % 2][
i], mEff[l == 1 ? 4 : 3][
j / 2][
j % 2][
i],
"cl=0.683 b(1,1) mode");
2347 gErrorIgnoreLevel = oldLevel;
2351 TGraphAsymmErrors* e = mEffResult[l][
j / 2][
j % 2][
i];
2353 if (!mConfig.inputHistogramsOnly && k == 0) {
2355 mEff[l][
j / 2][
j % 2][
i]->Write();
2358 mEff[3][
j / 2][
j % 2][
i]->Write();
2359 mEff[4][
j / 2][
j % 2][
i]->Write();
2362 }
else if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2365 e->SetTitle(EFFICIENCY_TITLES[
j]);
2366 e->GetYaxis()->SetTitle(
"(Efficiency)");
2367 e->GetXaxis()->SetTitle(XAXIS_TITLES[
i]);
2370 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2372 if (qcout && !mConfig.shipToQCAsCanvas) {
2375 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2378 e->SetMarkerColor(kBlack);
2379 e->SetLineColor(colorNums[(k < 3 ? (l * 3 + k) : (k * 3 + l)) % COLORCOUNT]);
2380 e->GetHistogram()->GetYaxis()->SetRangeUser(-0.02, 1.02);
2381 e->Draw(k || l ?
"same P" :
"AP");
2384 mLEff[ii]->AddEntry(e, Form(
"%s%s", fname, EFF_NAMES[l]),
"l");
2387 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2391 ChangePadTitleSize(mPEff[ii][
j], 0.056);
2394 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2401 qcout->Add(mCEff[ii]);
2403 if (!mConfig.enableLocalOutput) {
2406 doPerfFigure(0.2, 0.295, 0.025);
2407 mCEff[ii]->Print(Form(
"%s/eff_vs_%s.pdf", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii]));
2408 if (mConfig.writeFileExt !=
"") {
2409 mCEff[ii]->Print(Form(
"%s/eff_vs_%s.%s", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii], mConfig.writeFileExt.c_str()));
2414 if (mQATasks & (taskTrackingRes | taskTrackingResPull)) {
2416 TH1D *resIntegral[5] = {}, *pullIntegral[5] = {};
2417 TCanvas* cfit =
nullptr;
2418 std::unique_ptr<TF1> customGaus = std::make_unique<TF1>(
"G",
"[0]*exp(-(x-[1])*(x-[1])/(2.*[2]*[2]))");
2419 for (int32_t p = 0;
p < 2;
p++) {
2420 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2423 for (int32_t ii = 0; ii < 5 + flagShowVsPtLog; ii++) {
2424 TCanvas* can =
p ? mCPull[ii] : mCRes[ii];
2425 TLegend* leg =
p ? mLPull[ii] : mLRes[ii];
2426 int32_t
i = ii == 5 ? 4 : ii;
2427 for (int32_t
j = 0;
j < 5;
j++) {
2428 TH2F*
src =
p ? mPull2[
j][
i] : mRes2[
j][
i];
2429 TH1F**
dst =
p ? mPull[
j][
i] : mRes[
j][
i];
2430 TH1D*& dstIntegral =
p ? pullIntegral[
j] : resIntegral[
j];
2431 TPad* pad =
p ? mPPull[ii][
j] : mPRes[ii][
j];
2433 if (!mConfig.inputHistogramsOnly && ii != 5) {
2434 if (cfit ==
nullptr) {
2435 cfit = createGarbageCollected<TCanvas>();
2439 TAxis* axis =
src->GetYaxis();
2440 int32_t nBins = axis->GetNbins();
2442 for (int32_t bin = 1; bin <= nBins; bin++) {
2443 int32_t bin0 = std::max(bin - integ, 0);
2444 int32_t bin1 = std::min(bin + integ, nBins);
2445 std::unique_ptr<TH1D> proj{
src->ProjectionX(
"proj", bin0, bin1)};
2446 proj->ClearUnderflowAndOverflow();
2447 if (proj->GetEntries()) {
2449 while (proj->GetMaximum() < 50 && rebin <
sizeof(RES_AXIS_BINS) /
sizeof(RES_AXIS_BINS[0])) {
2450 proj->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2454 if (proj->GetEntries() < 20 || proj->GetRMS() < 0.00001) {
2455 dst[0]->SetBinContent(bin, proj->GetRMS());
2456 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2457 dst[1]->SetBinContent(bin, proj->GetMean());
2458 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2460 proj->GetXaxis()->SetRange(0, 0);
2461 proj->GetXaxis()->SetRangeUser(std::max(proj->GetXaxis()->GetXmin(), proj->GetMean() - 3. * proj->GetRMS()), std::min(proj->GetXaxis()->GetXmax(), proj->GetMean() + 3. * proj->GetRMS()));
2462 bool forceLogLike = proj->GetMaximum() < 20;
2463 for (int32_t k = forceLogLike ? 2 : 0; k < 3; k++) {
2464 proj->Fit(
"gaus", forceLogLike || k == 2 ?
"sQl" : k ?
"sQww" :
"sQ");
2465 TF1* fitFunc = proj->GetFunction(
"gaus");
2467 if (k && !forceLogLike) {
2468 customGaus->SetParameters(fitFunc->GetParameter(0), fitFunc->GetParameter(1), fitFunc->GetParameter(2));
2469 proj->Fit(customGaus.get(),
"sQ");
2470 fitFunc = customGaus.get();
2473 const float sigma = fabs(fitFunc->GetParameter(2));
2474 dst[0]->SetBinContent(bin, sigma);
2475 dst[1]->SetBinContent(bin, fitFunc->GetParameter(1));
2476 dst[0]->SetBinError(bin, fitFunc->GetParError(2));
2477 dst[1]->SetBinError(bin, fitFunc->GetParError(1));
2479 const bool fail1 = sigma <= 0.f;
2480 const bool fail2 = fabs(proj->GetMean() -
dst[1]->GetBinContent(bin)) > std::min<float>(p ? PULL_AXIS : mConfig.nativeFitResolutions ? RES_AXES_NATIVE[
j] : RES_AXES[
j], 3.f * proj->GetRMS());
2481 const bool fail3 =
dst[0]->GetBinContent(bin) > 3.f * proj->GetRMS() ||
dst[0]->GetBinError(bin) > 1 ||
dst[1]->GetBinError(bin) > 1;
2482 const bool fail4 = fitFunc->GetParameter(0) < proj->GetMaximum() / 5.;
2483 const bool fail = fail1 || fail2 || fail3 || fail4;
2488 }
else if (k >= 2) {
2489 dst[0]->SetBinContent(bin, proj->GetRMS());
2490 dst[0]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2491 dst[1]->SetBinContent(bin, proj->GetMean());
2492 dst[1]->SetBinError(bin, std::sqrt(proj->GetRMS()));
2497 dst[0]->SetBinContent(bin, 0.f);
2498 dst[0]->SetBinError(bin, 0.f);
2499 dst[1]->SetBinContent(bin, 0.f);
2500 dst[1]->SetBinError(bin, 0.f);
2504 dstIntegral =
src->ProjectionX(mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[
j] : PARAMETER_NAMES[
j], 0, nBins + 1);
2506 while (dstIntegral->GetMaximum() < 50 && rebin <
sizeof(RES_AXIS_BINS) /
sizeof(RES_AXIS_BINS[0])) {
2507 dstIntegral->Rebin(RES_AXIS_BINS[rebin - 1] / RES_AXIS_BINS[rebin]);
2513 if (mConfig.inputHistogramsOnly) {
2514 dstIntegral = createGarbageCollected<TH1D>();
2516 dstIntegral->SetName(Form(p ?
"IntPull%s" :
"IntRes%s", VSPARAMETER_NAMES[
j]));
2517 dstIntegral->SetTitle(Form(p ?
"%s Pull" :
"%s Resolution",
p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[
j] : PARAMETER_NAMES[
j]));
2519 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2522 int32_t numColor = 0;
2523 float tmpMax = -1000.;
2524 float tmpMin = 1000.;
2526 for (int32_t l = 0; l < 2; l++) {
2527 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2529 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2532 if (nNewInput && k == 0 && ii != 5) {
2534 e->Scale(mConfig.nativeFitResolutions ? SCALE_NATIVE[
j] : SCALE[
j]);
2538 e->GetXaxis()->SetRangeUser(0.2, PT_MAX);
2539 }
else if (LOG_PT_MIN > 0 && ii == 5) {
2540 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2541 }
else if (ii == 5) {
2542 e->GetXaxis()->SetRange(1, 0);
2544 e->SetMinimum(-1111);
2545 e->SetMaximum(-1111);
2547 if (e->GetMaximum() > tmpMax) {
2548 tmpMax = e->GetMaximum();
2550 if (e->GetMinimum() < tmpMin) {
2551 tmpMin = e->GetMinimum();
2557 tmpSpan = tmpMax - tmpMin;
2558 tmpMax += tmpSpan * .02;
2559 tmpMin -= tmpSpan * .02;
2560 if (
j == 2 &&
i < 3) {
2561 tmpMax += tmpSpan * 0.13 * ConfigNumInputs;
2564 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2565 for (int32_t l = 0; l < 2; l++) {
2567 if (!mConfig.inputHistogramsOnly && k == 0) {
2568 e->SetTitle(Form(p ?
"%s Pull" :
"%s Resolution",
p || mConfig.nativeFitResolutions ? PARAMETER_NAMES_NATIVE[
j] : PARAMETER_NAMES[
j]));
2569 e->SetStats(kFALSE);
2572 mRes2[
j][
i]->SetOption(
"colz");
2573 mRes2[
j][
i]->Write();
2577 }
else if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2580 e->SetMaximum(tmpMax);
2581 e->SetMinimum(tmpMin);
2583 e->SetLineStyle(CONFIG_DASHED_MARKERS ? k + 1 : 1);
2585 e->GetYaxis()->SetTitle(p ? AXIS_TITLES_PULL[
j] : mConfig.nativeFitResolutions ? AXIS_TITLES_NATIVE[
j] : AXIS_TITLES[
j]);
2586 e->GetXaxis()->SetTitle(XAXIS_TITLES[
i]);
2587 if (LOG_PT_MIN > 0 && ii == 5) {
2588 e->GetXaxis()->SetRangeUser(LOG_PT_MIN, PT_MAX);
2592 e->GetYaxis()->SetTitleOffset(1.5);
2594 e->GetYaxis()->SetTitleOffset(1.4);
2596 if (qcout && !mConfig.shipToQCAsCanvas) {
2599 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2603 e->SetMarkerColor(kBlack);
2604 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2605 e->Draw(k || l ?
"same" :
"");
2608 leg->AddEntry(e, Form(
"%s%s", fname, l ?
"Mean" : (
p ?
"Pull" :
"Resolution")),
"l");
2612 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2621 ChangePadTitleSize(pad, 0.056);
2624 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2633 if (!mConfig.enableLocalOutput) {
2636 doPerfFigure(0.2, 0.295, 0.025);
2637 can->Print(Form(p ?
"%s/pull_vs_%s.pdf" :
"%s/res_vs_%s.pdf", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii]));
2638 if (mConfig.writeFileExt !=
"") {
2639 can->Print(Form(p ?
"%s/pull_vs_%s.%s" :
"%s/res_vs_%s.%s", mConfig.plotsDir.c_str(), VSPARAMETER_NAMES[ii], mConfig.writeFileExt.c_str()));
2645 for (int32_t p = 0;
p < 2;
p++) {
2646 if ((p == 0 && (mQATasks & taskTrackingRes) == 0) || (p == 1 && (mQATasks & taskTrackingResPull) == 0)) {
2649 TCanvas* can =
p ? mCPull[6] : mCRes[6];
2650 for (int32_t
i = 0;
i < 5;
i++) {
2651 TPad* pad =
p ? mPPull[6][
i] : mPRes[6][
i];
2652 TH1D* hist =
p ? pullIntegral[
i] : resIntegral[
i];
2653 int32_t numColor = 0;
2654 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2657 if (!mConfig.inputHistogramsOnly && mcAvail) {
2659 if (e && e->GetEntries()) {
2660 e->Fit(
"gaus",
"sQ");
2665 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2667 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2670 e->SetMaximum(-1111);
2671 if (e->GetMaximum() > tmpMax) {
2672 tmpMax = e->GetMaximum();
2676 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2678 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2681 e->SetMaximum(tmpMax * 1.02);
2682 e->SetMinimum(tmpMax * -0.02);
2683 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2686 if (qcout && !mConfig.shipToQCAsCanvas) {
2689 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2693 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2694 e->Draw(k == 0 ?
"" :
"same");
2696 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2704 if (!mConfig.enableLocalOutput) {
2708 can->Print(Form(p ?
"%s/pull_integral.pdf" :
"%s/res_integral.pdf", mConfig.plotsDir.c_str()));
2709 if (mConfig.writeFileExt !=
"") {
2710 can->Print(Form(p ?
"%s/pull_integral.%s" :
"%s/res_integral.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
2715 uint64_t attachClusterCounts[N_CLS_HIST];
2716 if (mQATasks & taskClusterAttach) {
2718 if (mConfig.inputHistogramsOnly == 0) {
2719 for (int32_t
i = N_CLS_HIST;
i < N_CLS_TYPE * N_CLS_HIST - 1;
i++) {
2720 mClusters[
i]->Sumw2(
true);
2722 double totalVal = 0;
2723 if (!CLUST_HIST_INT_SUM) {
2724 for (int32_t
j = 0;
j < mClusters[N_CLS_HIST - 1]->GetXaxis()->GetNbins() + 2;
j++) {
2725 totalVal += mClusters[N_CLS_HIST - 1]->GetBinContent(
j);
2728 if (totalVal == 0.) {
2731 for (int32_t
i = 0;
i < N_CLS_HIST;
i++) {
2733 for (int32_t
j = 0;
j < mClusters[
i]->GetXaxis()->GetNbins() + 2;
j++) {
2734 val += mClusters[
i]->GetBinContent(
j);
2735 mClusters[2 * N_CLS_HIST - 1 +
i]->SetBinContent(
j,
val / totalVal);
2737 attachClusterCounts[
i] =
val;
2740 if (!CLUST_HIST_INT_SUM) {
2741 for (int32_t
i = 0;
i < N_CLS_HIST;
i++) {
2742 mClusters[2 * N_CLS_HIST - 1 +
i]->SetMaximum(1.02);
2743 mClusters[2 * N_CLS_HIST - 1 +
i]->SetMinimum(-0.02);
2747 for (int32_t
i = 0;
i < N_CLS_HIST - 1;
i++) {
2748 auto oldLevel = gErrorIgnoreLevel;
2749 gErrorIgnoreLevel = kError;
2750 mClusters[N_CLS_HIST +
i]->Divide(mClusters[
i], mClusters[N_CLS_HIST - 1], 1, 1,
"B");
2751 gErrorIgnoreLevel = oldLevel;
2752 mClusters[N_CLS_HIST +
i]->SetMinimum(-0.02);
2753 mClusters[N_CLS_HIST +
i]->SetMaximum(1.02);
2757 float tmpMax[2] = {0, 0}, tmpMin[2] = {0, 0};
2758 for (int32_t l = 0; l <= CLUST_HIST_INT_SUM; l++) {
2759 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2760 TH1* e = mClusters[l ? (N_CLS_TYPE * N_CLS_HIST - 2) : (N_CLS_HIST - 1)];
2761 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2764 e->SetMinimum(-1111);
2765 e->SetMaximum(-1111);
2767 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2769 if (e->GetMaximum() > tmpMax[l]) {
2770 tmpMax[l] = e->GetMaximum();
2772 if (e->GetMinimum() < tmpMin[l]) {
2773 tmpMin[l] = e->GetMinimum();
2776 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2777 for (int32_t
i = 0;
i < N_CLS_HIST;
i++) {
2778 TH1* e = mClusters[l ? (2 * N_CLS_HIST - 1 +
i) :
i];
2779 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2782 e->SetMaximum(tmpMax[l] * 1.02);
2783 e->SetMinimum(tmpMax[l] * -0.02);
2788 for (int32_t
i = 0;
i < N_CLS_TYPE;
i++) {
2789 if (mConfig.enableLocalOutput || mConfig.shipToQCAsCanvas) {
2791 mPClust[
i]->SetLogx();
2793 int32_t
begin =
i == 2 ? (2 * N_CLS_HIST - 1) :
i == 1 ? N_CLS_HIST : 0;
2794 int32_t
end =
i == 2 ? (3 * N_CLS_HIST - 1) :
i == 1 ? (2 * N_CLS_HIST - 1) : N_CLS_HIST;
2795 int32_t numColor = 0;
2796 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2798 TH1* e = mClusters[
j];
2799 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2803 e->SetTitle(CLUSTER_TITLES[
i]);
2804 e->GetYaxis()->SetTitle(
i == 0 ?
"Number of TPC clusters" :
i == 1 ?
"Fraction of TPC clusters" : CLUST_HIST_INT_SUM ?
"Total TPC clusters (integrated)" :
"Fraction of TPC clusters (integrated)");
2805 e->GetXaxis()->SetTitle(
"#it{p}_{Tmc} (GeV/#it{c})");
2806 e->GetXaxis()->SetTitleOffset(1.1);
2807 e->GetXaxis()->SetLabelOffset(-0.005);
2808 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2811 e->SetStats(kFALSE);
2813 e->SetLineStyle(CONFIG_DASHED_MARKERS ?
j + 1 : 1);
2815 e->GetXaxis()->SetRange(2, AXIS_BINS[4]);
2817 if (qcout && !mConfig.shipToQCAsCanvas) {
2820 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2824 e->SetMarkerColor(kBlack);
2825 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2826 e->Draw(
j ==
end - 1 && k == 0 ?
"" :
"same");
2828 mLClust[
i]->AddEntry(e, Form(
"%s%s", fname, CLUSTER_NAMES[
j - begin]),
"l");
2831 if (ConfigNumInputs == 1) {
2832 TH1* e =
reinterpret_cast<TH1F*
>(mClusters[
begin + CL_att_adj]->Clone());
2833 e->Add(mClusters[begin + CL_prot], -1);
2834 if (qcout && !mConfig.shipToQCAsCanvas) {
2837 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2841 e->SetLineColor(colorNums[numColor++ % COLORCOUNT]);
2843 mLClust[
i]->AddEntry(e,
"Removed (Strategy A)",
"l");
2845 if (!mConfig.enableLocalOutput && !mConfig.shipToQCAsCanvas) {
2852 qcout->Add(mCClust[
i]);
2854 if (!mConfig.enableLocalOutput) {
2857 doPerfFigure(
i == 0 ? 0.37 : (
i == 1 ? 0.34 : 0.6), 0.295, 0.030);
2859 mCClust[
i]->Print(Form(
i == 2 ?
"%s/clusters_integral.pdf" :
i == 1 ?
"%s/clusters_relative.pdf" :
"%s/clusters.pdf", mConfig.plotsDir.c_str()));
2860 if (mConfig.writeFileExt !=
"") {
2861 mCClust[
i]->Print(Form(
i == 2 ?
"%s/clusters_integral.%s" :
i == 1 ?
"%s/clusters_relative.%s" :
"%s/clusters.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
2865 for (int32_t
i = 0;
i < 4;
i++) {
2866 auto* e = mPadRow[
i];
2867 if (tout && !mConfig.inputHistogramsOnly) {
2871 e->SetOption(
"colz");
2872 std::string title =
"First Track Pad Row (p_{T} > 1GeV, N_{Cl} #geq " +
std::to_string(PADROW_CHECK_MINCLS);
2874 title +=
", row_{trk} > row_{MC} + 3, row_{MC} < 10";
2877 title +=
", #Phi_{Cl} < 0.15";
2881 e->SetTitle(title.c_str());
2882 e->GetXaxis()->SetTitle(
i == 3 ?
"Local Occupancy" : (
i ?
"#Phi_{Cl} (sector)" :
"First MC Pad Row"));
2883 e->GetYaxis()->SetTitle(
"First Pad Row");
2886 static const constexpr char* PADROW_NAMES[4] = {
"MC",
"Phi",
"Phi1",
"Occ"};
2887 mCPadRow[
i]->Print(Form(
"%s/padRow%s.pdf", mConfig.plotsDir.c_str(), PADROW_NAMES[
i]));
2888 if (mConfig.writeFileExt !=
"") {
2889 mCPadRow[
i]->Print(Form(
"%s/padRow%s.%s", mConfig.plotsDir.c_str(), PADROW_NAMES[
i], mConfig.writeFileExt.c_str()));
2895 if ((mQATasks & taskClusterCounts) && !mHaveExternalHists && !mConfig.clusterRejectionHistograms && !mConfig.inputHistogramsOnly) {
2896 DoClusterCounts(attachClusterCounts);
2898 if ((qcout || tout) && (mQATasks & taskClusterCounts) && mConfig.clusterRejectionHistograms) {
2899 for (uint32_t
i = 0;
i < mHistClusterCount.size();
i++) {
2901 mHistClusterCount[
i]->Write();
2904 qcout->Add(mHistClusterCount[
i]);
2909 if (mQATasks & taskTrackStatistics) {
2912 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2914 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2917 e->SetMaximum(-1111);
2918 if (e->GetMaximum() > tmpMax) {
2919 tmpMax = e->GetMaximum();
2923 mPTracks->SetLogx();
2924 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2926 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2929 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2932 e->SetMaximum(tmpMax * 1.02);
2933 e->SetMinimum(tmpMax * -0.02);
2934 e->SetStats(kFALSE);
2936 e->SetTitle(
"Number of Tracks vs #it{p}_{T}");
2937 e->GetYaxis()->SetTitle(
"Number of Tracks");
2938 e->GetXaxis()->SetTitle(
"#it{p}_{T} (GeV/#it{c})");
2942 e->SetMarkerColor(kBlack);
2943 e->SetLineColor(colorNums[k % COLORCOUNT]);
2944 e->Draw(k == 0 ?
"" :
"same");
2945 GetName(fname, k, mConfig.inputHistogramsOnly);
2946 mLTracks->AddEntry(e, Form(mConfig.inputHistogramsOnly ?
"%s" :
"%sTrack #it{p}_{T}", fname),
"l");
2949 doPerfFigure(0.63, 0.7, 0.030);
2951 mCTracks->Print(Form(
"%s/tracks.pdf", mConfig.plotsDir.c_str()));
2952 if (mConfig.writeFileExt !=
"") {
2953 mCTracks->Print(Form(
"%s/tracks.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
2956 for (int32_t
i = 0;
i < 2;
i++) {
2958 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2960 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2963 e->SetMaximum(-1111);
2964 if (e->GetMaximum() > tmpMax) {
2965 tmpMax = e->GetMaximum();
2969 for (int32_t k = 0; k < ConfigNumInputs; k++) {
2971 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
2974 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
2977 e->SetMaximum(tmpMax * 1.02);
2978 e->SetMinimum(tmpMax * -0.02);
2979 e->SetStats(kFALSE);
2981 e->SetTitle(
i ?
"Track t_{0} resolution" :
"Track t_{0} distribution");
2982 e->GetYaxis()->SetTitle(
"a.u.");
2983 e->GetXaxis()->SetTitle(
i ?
"t_{0} - t_{0, mc}" :
"t_{0}");
2987 e->SetMarkerColor(kBlack);
2988 e->SetLineColor(colorNums[k % COLORCOUNT]);
2989 e->Draw(k == 0 ?
"" :
"same");
2990 GetName(fname, k, mConfig.inputHistogramsOnly);
2991 mLT0[
i]->AddEntry(e, Form(mConfig.inputHistogramsOnly ?
"%s (%s)" :
"%sTrack t_{0} %s", fname,
i ?
"" :
"resolution"),
"l");
2994 doPerfFigure(0.63, 0.7, 0.030);
2996 mCT0[
i]->Print(Form(
"%s/t0%s.pdf", mConfig.plotsDir.c_str(),
i ?
"_res" :
""));
2997 if (mConfig.writeFileExt !=
"") {
2998 mCT0[
i]->Print(Form(
"%s/t0%s.%s", mConfig.plotsDir.c_str(),
i ?
"_res" :
"", mConfig.writeFileExt.c_str()));
3002 for (int32_t k = 0; k < ConfigNumInputs; k++) {
3004 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
3007 e->SetMaximum(-1111);
3008 if (e->GetMaximum() > tmpMax) {
3009 tmpMax = e->GetMaximum();
3013 for (int32_t k = 0; k < ConfigNumInputs; k++) {
3015 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
3018 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
3021 e->SetMaximum(tmpMax * 1.02);
3022 e->SetMinimum(tmpMax * -0.02);
3023 e->SetStats(kFALSE);
3025 e->SetTitle(
i ?
"Number of Rows with attached Cluster" :
"Number of Clusters");
3026 e->GetYaxis()->SetTitle(
"a.u.");
3027 e->GetXaxis()->SetTitle(
i ?
"N_{Rows with Clusters}" :
"N_{Clusters}");
3031 e->SetMarkerColor(kBlack);
3032 e->SetLineColor(colorNums[k % COLORCOUNT]);
3033 e->Draw(k == 0 ?
"" :
"same");
3034 GetName(fname, k, mConfig.inputHistogramsOnly);
3035 mLNCl[
i]->AddEntry(e, Form(mConfig.inputHistogramsOnly ?
"%s" : (
i ?
"%sN_{Clusters}" :
"%sN_{Rows with Clusters}"), fname),
"l");
3038 doPerfFigure(0.6, 0.7, 0.030);
3040 mCNCl[
i]->Print(Form(
"%s/nClusters%s.pdf", mConfig.plotsDir.c_str(),
i ?
"_corrected" :
""));
3041 if (mConfig.writeFileExt !=
"") {
3042 mCNCl[
i]->Print(Form(
"%s/nClusters%s.%s", mConfig.plotsDir.c_str(),
i ?
"_corrected" :
"", mConfig.writeFileExt.c_str()));
3047 mClXY->SetOption(
"colz");
3050 mCClXY->Print(Form(
"%s/clustersXY.pdf", mConfig.plotsDir.c_str()));
3051 if (mConfig.writeFileExt !=
"") {
3052 mCClXY->Print(Form(
"%s/clustersXY.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
3056 if (mQATasks & taskClusterRejection) {
3057 mClRej[2]->Divide(mClRej[1], mClRej[0]);
3059 for (int32_t
i = 0;
i < 3;
i++) {
3060 if (tout && !mConfig.inputHistogramsOnly) {
3064 mClRej[
i]->SetTitle(REJECTED_NAMES[
i]);
3065 mClRej[
i]->SetOption(
"colz");
3068 mCClRej[
i]->Print(Form(
"%s/clustersRej%d%s.pdf", mConfig.plotsDir.c_str(),
i, REJECTED_NAMES[
i]));
3069 if (mConfig.writeFileExt !=
"") {
3070 mCClRej[
i]->Print(Form(
"%s/clustersRej%d%s.%s", mConfig.plotsDir.c_str(),
i, REJECTED_NAMES[
i], mConfig.writeFileExt.c_str()));
3075 for (int32_t k = 0; k < ConfigNumInputs; k++) {
3076 auto* tmp = mClRej[0];
3077 if (GetHist(tmp, tin, k, nNewInput) ==
nullptr) {
3080 TH1D* proj1 = tmp->ProjectionY(Form(
"clrejptmp1%d", k));
3081 proj1->SetDirectory(
nullptr);
3083 if (GetHist(tmp, tin, k, nNewInput) ==
nullptr) {
3086 TH1D* proj2 = tmp->ProjectionY(Form(
"clrejptmp2%d", k));
3087 proj2->SetDirectory(
nullptr);
3090 if (GetHist(e, tin, k, nNewInput) ==
nullptr) {
3093 e->Divide(proj2, proj1);
3094 if (tout && !mConfig.inputHistogramsOnly && k == 0) {
3099 e->SetMinimum(-0.02);
3100 e->SetMaximum(0.22);
3101 e->SetTitle(
"Rejected Clusters");
3102 e->GetXaxis()->SetTitle(
"Pad Row");
3103 e->GetYaxis()->SetTitle(
"Rejected Clusters (fraction)");
3104 e->Draw(k == 0 ?
"" :
"same");
3106 mPClRejP->Print(Form(
"%s/clustersRejProjected.pdf", mConfig.plotsDir.c_str()));
3107 if (mConfig.writeFileExt !=
"") {
3108 mPClRejP->Print(Form(
"%s/clustersRejProjected.%s", mConfig.plotsDir.c_str(), mConfig.writeFileExt.c_str()));
3112 if (tout && !mConfig.inputHistogramsOnly && mConfig.writeMCLabels) {
3113 gInterpreter->GenerateDictionary(
"vector<vector<int32_t>>",
"");
3114 tout->WriteObject(&mcEffBuffer,
"mcEffBuffer");
3115 tout->WriteObject(&mcLabelBuffer,
"mcLabelBuffer");
3116 remove(
"AutoDict_vector_vector_int__.cxx");
3117 remove(
"AutoDict_vector_vector_int___cxx_ACLiC_dict_rdict.pcm");
3118 remove(
"AutoDict_vector_vector_int___cxx.d");
3119 remove(
"AutoDict_vector_vector_int___cxx.so");
3125 for (uint32_t
i = 0;
i < mConfig.compareInputs.size();
i++) {
3129 clearGarbagageCollector();
3131 GPUInfo(
"GPU TPC QA histograms have been written to pdf%s%s files", mConfig.writeFileExt ==
"" ?
"" :
" and ", mConfig.writeFileExt.c_str());
3132 gErrorIgnoreLevel = oldRootIgnoreLevel;
3136void GPUQA::PrintClusterCount(int32_t
mode, int32_t&
num,
const char*
name, uint64_t
n, uint64_t normalization)
3140 }
else if (
mode == 1) {
3142 snprintf(name2, 128,
"clusterCount%d_",
num);
3143 char*
ptr = name2 + strlen(name2);
3144 for (uint32_t
i = 0;
i < strlen(
name);
i++) {
3150 createHist(mHistClusterCount[
num], name2,
name, 1000, 0, mConfig.histMaxNClusters, 1000, 0, 100);
3151 }
else if (
mode == 0) {
3152 if (normalization && mConfig.enableLocalOutput) {
3153 for (uint32_t
i = 0;
i < 1 + (mTextDump !=
nullptr);
i++) {
3154 fprintf(
i ? mTextDump : stdout,
"\t%40s: %'12" PRIu64
" (%6.2f%%)\n",
name,
n, 100.f *
n / normalization);
3157 if (mConfig.clusterRejectionHistograms) {
3158 float ratio = 100.f *
n / std::max<uint64_t>(normalization, 1);
3159 mHistClusterCount[
num]->Fill(normalization, ratio, 1);
3165int32_t GPUQA::DoClusterCounts(uint64_t* attachClusterCounts, int32_t
mode)
3167 if (mConfig.enableLocalOutput && !mConfig.inputHistogramsOnly && mConfig.plotsDir !=
"") {
3168 mTextDump = fopen((mConfig.plotsDir +
"/clusterCounts.txt").c_str(),
"w+");
3171 if (mcPresent() && (mQATasks & taskClusterAttach) && attachClusterCounts) {
3172 for (int32_t
i = 0;
i < N_CLS_HIST;
i++) {
3173 PrintClusterCount(
mode,
num, CLUSTER_NAMES[
i], attachClusterCounts[
i], mClusterCounts.nTotal);
3175 PrintClusterCount(
mode,
num,
"Unattached", attachClusterCounts[N_CLS_HIST - 1] - attachClusterCounts[CL_att_adj], mClusterCounts.nTotal);
3176 PrintClusterCount(
mode,
num,
"Removed (Strategy A)", attachClusterCounts[CL_att_adj] - attachClusterCounts[CL_prot], mClusterCounts.nTotal);
3177 PrintClusterCount(
mode,
num,
"Unaccessible", mClusterCounts.nUnaccessible, mClusterCounts.nTotal);
3179 PrintClusterCount(
mode,
num,
"All Clusters", mClusterCounts.nTotal, mClusterCounts.nTotal);
3180 PrintClusterCount(
mode,
num,
"Used in Physics", mClusterCounts.nPhysics, mClusterCounts.nTotal);
3181 PrintClusterCount(
mode,
num,
"Protected", mClusterCounts.nProt, mClusterCounts.nTotal);
3182 PrintClusterCount(
mode,
num,
"Unattached", mClusterCounts.nUnattached, mClusterCounts.nTotal);
3183 PrintClusterCount(
mode,
num,
"Removed (Strategy A)", mClusterCounts.nTotal - mClusterCounts.nUnattached - mClusterCounts.nProt, mClusterCounts.nTotal);
3184 PrintClusterCount(
mode,
num,
"Removed (Strategy B)", mClusterCounts.nTotal - mClusterCounts.nProt, mClusterCounts.nTotal);
3187 PrintClusterCount(
mode,
num,
"Merged Loopers (Track Merging)", mClusterCounts.nMergedLooperConnected, mClusterCounts.nTotal);
3188 PrintClusterCount(
mode,
num,
"Merged Loopers (Afterburner)", mClusterCounts.nMergedLooperUnconnected, mClusterCounts.nTotal);
3189 PrintClusterCount(
mode,
num,
"Looping Legs (other)", mClusterCounts.nLoopers, mClusterCounts.nTotal);
3190 PrintClusterCount(
mode,
num,
"High Inclination Angle", mClusterCounts.nHighIncl, mClusterCounts.nTotal);
3191 PrintClusterCount(
mode,
num,
"Rejected", mClusterCounts.nRejected, mClusterCounts.nTotal);
3192 PrintClusterCount(
mode,
num,
"Tube (> 200 MeV)", mClusterCounts.nTube, mClusterCounts.nTotal);
3193 PrintClusterCount(
mode,
num,
"Tube (< 200 MeV)", mClusterCounts.nTube200, mClusterCounts.nTotal);
3194 PrintClusterCount(
mode,
num,
"Low Pt < 50 MeV", mClusterCounts.nLowPt, mClusterCounts.nTotal);
3195 PrintClusterCount(
mode,
num,
"Low Pt < 200 MeV", mClusterCounts.n200MeV, mClusterCounts.nTotal);
3197 if (mcPresent() && (mQATasks & taskClusterAttach)) {
3198 PrintClusterCount(
mode,
num,
"Tracks > 400 MeV", mClusterCounts.nAbove400, mClusterCounts.nTotal);
3199 PrintClusterCount(
mode,
num,
"Fake Removed (> 400 MeV)", mClusterCounts.nFakeRemove400, mClusterCounts.nAbove400);
3200 PrintClusterCount(
mode,
num,
"Full Fake Removed (> 400 MeV)", mClusterCounts.nFullFakeRemove400, mClusterCounts.nAbove400);
3201 PrintClusterCount(
mode,
num,
"Tracks < 40 MeV", mClusterCounts.nBelow40, mClusterCounts.nTotal);
3202 PrintClusterCount(
mode,
num,
"Fake Protect (< 40 MeV)", mClusterCounts.nFakeProtect40, mClusterCounts.nBelow40);
3204 if (mcPresent() && (mQATasks & taskTrackStatistics)) {
3205 PrintClusterCount(
mode,
num,
"Correctly Attached all-trk normalized", mClusterCounts.nCorrectlyAttachedNormalized, mClusterCounts.nTotal);
3206 PrintClusterCount(
mode,
num,
"Correctly Attached non-fake normalized", mClusterCounts.nCorrectlyAttachedNormalizedNonFake, mClusterCounts.nTotal);
3210 mTextDump =
nullptr;
3217 mTrackingScratchBuffer.resize((nBytes +
sizeof(mTrackingScratchBuffer[0]) - 1) /
sizeof(mTrackingScratchBuffer[0]));
3218 return mTrackingScratchBuffer.data();
std::vector< std::string > labels
A const (ready only) version of MCTruthContainer.
Helper class to access correction maps.
#define TPC_MAX_TIME_BIN_TRIGGERED
#define TRACK_EXPECTED_REFERENCE_X_DEFAULT
#define TRACK_EXPECTED_REFERENCE_X
Definition of the MCTrack class.
Definition of the Names Generator class.
Class for time synchronization of RawReader instances.
static const HBFUtils & Instance()
static constexpr int32_t NSECTORS
int32_t ReadO2MCData(const char *filename)
bool clusterRemovable(int32_t attach, bool prot) const
void * AllocateScratchBuffer(size_t nBytes)
void SetMCTrackRange(int32_t min, int32_t max)
mcLabelI_t GetMCTrackLabel(uint32_t trackId) const
int32_t DrawQAHistograms()
int32_t InitQA(int32_t tasks=0)
void DumpO2MCData(const char *filename) const
void RunQA(bool matchOnly=false)
static GPUROOTDump< T, Args... > getNew(const char *name1, Names... names)
static DigitizationContext * loadFromFile(std::string_view filename="")
GLfloat GLfloat GLfloat alpha
GLuint GLfloat GLfloat GLfloat GLfloat y1
GLuint const GLchar * name
GLuint GLuint GLfloat weight
GLuint GLsizei const GLchar * label
GLdouble GLdouble GLdouble z
@ tasksDefaultPostprocess
constexpr int LHCBCPERTIMEBIN
Enum< T >::Iterator begin(Enum< T >)
struct o2::upgrades_utils::@454 tracks
structure to keep trigger-related info
Defining DataPointCompositeObject explicitly as copiable.
std::string to_string(gsl::span< T, Size > span)
bool isValid(std::string alias)
int64_t differenceInBC(const InteractionRecord &other) const
bool mergedLooperConnected
bool mergedLooperUnconnected
std::tuple< std::vector< std::unique_ptr< TCanvas > >, std::vector< std::unique_ptr< TLegend > >, std::vector< std::unique_ptr< TPad > >, std::vector< std::unique_ptr< TLatex > >, std::vector< std::unique_ptr< TH1D > > > v
IR getFirstIRofTF(const IR &rec) const
get 1st IR of TF corresponding to the 1st sampled orbit (in MC)
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
unsigned int nClustersTotal
unsigned int clusterOffset[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const ClusterNative * clustersLinear
o2::InteractionRecord ir(0, 0)
o2::InteractionRecord ir0(3, 5)