Project
Loading...
Searching...
No Matches
GPUChainTrackingDebugAndProfiling.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
15#include "GPUChainTracking.h"
18#include "GPUConstantMem.h"
19#include "GPUTPCClusterFilter.h"
20#include <map>
21#include <memory>
22#include <string>
23#include <numeric>
24
25#ifdef GPUCA_TRACKLET_CONSTRUCTOR_DO_PROFILE
26#include "bitmapfile.h"
27#endif
28
29#define PROFILE_MAX_SIZE (100 * 1024 * 1024)
30
31using namespace o2::gpu;
32
33static inline uint32_t RGB(uint8_t r, uint8_t g, uint8_t b) { return (uint32_t)r | ((uint32_t)g << 8) | ((uint32_t)b << 16); }
34
36{
37#ifdef GPUCA_TRACKLET_CONSTRUCTOR_DO_PROFILE
39 processorsShadow()->tpcTrackers[0].mStageAtSync = tmpMem;
40 runKernel<GPUMemClean16>({{BlockCount(), ThreadCount(), -1}}, tmpMem, PROFILE_MAX_SIZE);
41#endif
42 return 0;
43}
44
46{
47#ifdef GPUCA_TRACKLET_CONSTRUCTOR_DO_PROFILE
48 std::unique_ptr<char[]> stageAtSync{new char[PROFILE_MAX_SIZE]};
49 mRec->GPUMemCpy(stageAtSync.get(), processorsShadow()->tpcTrackers[0].mStageAtSync, PROFILE_MAX_SIZE, -1, false);
50
51 FILE* fp = fopen("profile.txt", "w+");
52 FILE* fp2 = fopen("profile.bmp", "w+b");
53
54 const int32_t bmpheight = 8192;
55 BITMAPFILEHEADER bmpFH;
56 BITMAPINFOHEADER bmpIH;
57 memset(&bmpFH, 0, sizeof(bmpFH));
58 memset(&bmpIH, 0, sizeof(bmpIH));
59
60 bmpFH.bfType = 19778; //"BM"
61 bmpFH.bfSize = sizeof(bmpFH) + sizeof(bmpIH) + (ConstructorBlockCount() * ConstructorThreadCount() / 32 * 33 - 1) * bmpheight;
62 bmpFH.bfOffBits = sizeof(bmpFH) + sizeof(bmpIH);
63
64 bmpIH.biSize = sizeof(bmpIH);
65 bmpIH.biWidth = ConstructorBlockCount() * ConstructorThreadCount() / 32 * 33 - 1;
66 bmpIH.biHeight = bmpheight;
67 bmpIH.biPlanes = 1;
68 bmpIH.biBitCount = 32;
69
70 fwrite(&bmpFH, 1, sizeof(bmpFH), fp2);
71 fwrite(&bmpIH, 1, sizeof(bmpIH), fp2);
72
73 [[maybe_unused]] int32_t nEmptySync = 0;
74 for (uint32_t i = 0; i < bmpheight * ConstructorBlockCount() * ConstructorThreadCount(); i += ConstructorBlockCount() * ConstructorThreadCount()) {
75 int32_t fEmpty = 1;
76 for (uint32_t j = 0; j < ConstructorBlockCount() * ConstructorThreadCount(); j++) {
77 fprintf(fp, "%d\t", stageAtSync[i + j]);
78 int32_t color = 0;
79 if (stageAtSync[i + j] == 1) {
80 color = RGB(255, 0, 0);
81 }
82 if (stageAtSync[i + j] == 2) {
83 color = RGB(0, 255, 0);
84 }
85 if (stageAtSync[i + j] == 3) {
86 color = RGB(0, 0, 255);
87 }
88 if (stageAtSync[i + j] == 4) {
89 color = RGB(255, 255, 0);
90 }
91 fwrite(&color, 1, sizeof(int32_t), fp2);
92 if (j > 0 && j % 32 == 0) {
93 color = RGB(255, 255, 255);
94 fwrite(&color, 1, 4, fp2);
95 }
96 if (stageAtSync[i + j]) {
97 fEmpty = 0;
98 }
99 }
100 fprintf(fp, "\n");
101 if (fEmpty) {
102 nEmptySync++;
103 } else {
104 nEmptySync = 0;
105 }
106 // if (nEmptySync == GPUCA_SCHED_ROW_STEP + 2) break;
107 }
108
109 fclose(fp);
110 fclose(fp2);
111#endif
112 return 0;
113}
114
115namespace
116{
117struct GPUChainTrackingMemUsage {
118 void add(size_t n, size_t bound)
119 {
120 nMax = std::max(nMax, n);
121 maxUse = std::max(n / std::max<double>(bound, 1.), maxUse);
122 nSum += n;
123 nBoundSum += bound;
124 count++;
125 }
126 size_t nMax;
127 size_t nSum = 0;
128 size_t nBoundSum = 0;
129 double maxUse = 0.;
130 uint32_t count = 0;
131};
132
133void addToMap(std::string name, std::map<std::string, GPUChainTrackingMemUsage>& map, uint64_t n, uint64_t bound)
134{
135 GPUChainTrackingMemUsage& obj = map.insert({name, {}}).first->second;
136 obj.add(n, bound);
137}
138} // namespace
139
141{
142 std::map<std::string, GPUChainTrackingMemUsage> usageMap;
143 for (int32_t i = 0; i < NSECTORS; i++) {
144#ifdef GPUCA_TPC_GEOMETRY_O2
145 if (processors()->tpcClusterer[i].mPmemory) {
146 addToMap("TPC Clusterer Sector Peaks", usageMap, processors()->tpcClusterer[i].mPmemory->counters.nPeaks, processors()->tpcClusterer[i].mNMaxPeaks);
147 addToMap("TPC Clusterer Sector Clusters", usageMap, processors()->tpcClusterer[i].mPmemory->counters.nClusters, processors()->tpcClusterer[i].mNMaxClusters);
148 }
149#endif
150 addToMap("TPC Sector Start Hits", usageMap, *processors()->tpcTrackers[i].NStartHits(), processors()->tpcTrackers[i].NMaxStartHits());
151 addToMap("TPC Sector Tracklets", usageMap, *processors()->tpcTrackers[i].NTracklets(), processors()->tpcTrackers[i].NMaxTracklets());
152 addToMap("TPC Sector TrackletHits", usageMap, *processors()->tpcTrackers[i].NRowHits(), processors()->tpcTrackers[i].NMaxRowHits());
153 addToMap("TPC Sector Tracks", usageMap, *processors()->tpcTrackers[i].NTracks(), processors()->tpcTrackers[i].NMaxTracks());
154 addToMap("TPC Sector TrackHits", usageMap, *processors()->tpcTrackers[i].NTrackHits(), processors()->tpcTrackers[i].NMaxTrackHits());
155 }
156 addToMap("TPC Clusterer Clusters", usageMap, mRec->MemoryScalers()->nTPCHits, mRec->MemoryScalers()->NTPCClusters(mRec->MemoryScalers()->nTPCdigits));
157 if (processors()->tpcMerger.Memory()) {
158 addToMap("TPC Tracks", usageMap, processors()->tpcMerger.NMergedTracks(), processors()->tpcMerger.NMaxTracks());
159 addToMap("TPC TrackHits", usageMap, processors()->tpcMerger.NMergedTrackClusters(), processors()->tpcMerger.NMaxMergedTrackClusters());
160 }
161
162 if (mRec->GetProcessingSettings().createO2Output) {
163 addToMap("TPC O2 Tracks", usageMap, processors()->tpcMerger.NOutputTracksTPCO2(), processors()->tpcMerger.NOutputTracksTPCO2());
164 addToMap("TPC O2 ClusRefs", usageMap, processors()->tpcMerger.NOutputClusRefsTPCO2(), processors()->tpcMerger.NOutputClusRefsTPCO2());
165 }
166
167#ifdef GPUCA_TPC_GEOMETRY_O2
168 if (processors()->tpcCompressor.mOutput) {
169 addToMap("TPC ComprCache HitsAttached", usageMap, processors()->tpcCompressor.mOutput->nAttachedClusters, processors()->tpcCompressor.mMaxTrackClusters);
170 addToMap("TPC ComprCache HitsUnattached", usageMap, processors()->tpcCompressor.mOutput->nUnattachedClusters, processors()->tpcCompressor.mMaxClustersInCache);
171 addToMap("TPC ComprCache Tracks", usageMap, processors()->tpcCompressor.mOutput->nTracks, processors()->tpcCompressor.mMaxTracks);
172 }
173#endif
174
175 for (auto& elem : usageMap) {
176 printf("Mem Usage %-30s : %'14zu / %'14zu (%3.0f%% / %3.0f%% / count %3u / max %'14zu)\n", elem.first.c_str(), elem.second.nSum, elem.second.nBoundSum, 100. * elem.second.nSum / std::max<size_t>(1, elem.second.nBoundSum), 100. * elem.second.maxUse, elem.second.count, elem.second.nMax);
177 }
178}
179
181{
182 for (int32_t i = 0; i < NSECTORS; i++) {
183 GPUInfo("MEMREL StartHits NCl %d NTrkl %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NStartHits());
184 GPUInfo("MEMREL Tracklets NCl %d NTrkl %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NTracklets());
185 GPUInfo("MEMREL Tracklets NCl %d NTrkl %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NRowHits());
186 GPUInfo("MEMREL SectorTracks NCl %d NTrk %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NTracks());
187 GPUInfo("MEMREL SectorTrackHits NCl %d NTrkH %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NTrackHits());
188 }
189 if (processors()->tpcMerger.Memory()) {
190 GPUInfo("MEMREL Tracks NCl %d NTrk %d", processors()->tpcMerger.NMaxClusters(), processors()->tpcMerger.NMergedTracks());
191 GPUInfo("MEMREL TrackHitss NCl %d NTrkH %d", processors()->tpcMerger.NMaxClusters(), processors()->tpcMerger.NMergedTrackClusters());
192 }
193}
194
196{
197#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
198 const auto& threadContext = GetThreadContext();
199 if (mRec->IsGPU()) {
200 SetupGPUProcessor(&processors()->debugOutput, false);
201 WriteToConstantMemory(RecoStep::NoRecoStep, (char*)&processors()->debugOutput - (char*)processors(), &processorsShadow()->debugOutput, sizeof(processors()->debugOutput), -1);
202 memset(processors()->debugOutput.memory(), 0, processors()->debugOutput.memorySize() * sizeof(processors()->debugOutput.memory()[0]));
203 }
204 runKernel<GPUMemClean16>({{BlockCount(), ThreadCount(), 0, RecoStep::TPCSectorTracking}}, (mRec->IsGPU() ? processorsShadow() : processors())->debugOutput.memory(), processorsShadow()->debugOutput.memorySize() * sizeof(processors()->debugOutput.memory()[0]));
205#endif
206}
207
209{
210#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
211 const auto& threadContext = GetThreadContext();
212 TransferMemoryResourcesToHost(RecoStep::NoRecoStep, &processors()->debugOutput, -1);
213 processors()->debugOutput.Print();
214#endif
215}
216
218{
219 int32_t nTracks = 0, nAttachedClusters = 0, nAttachedClustersFitted = 0, nAdjacentClusters = 0;
220 uint32_t nCls = GetProcessingSettings().doublePipeline ? mIOPtrs.clustersNative->nClustersTotal : processors()->tpcMerger.NMaxClusters();
221 if (GetProcessingSettings().createO2Output > 1) {
222 nTracks = mIOPtrs.nOutputTracksTPCO2;
223 nAttachedClusters = mIOPtrs.nMergedTrackHits;
224 } else {
225 for (uint32_t k = 0; k < mIOPtrs.nMergedTracks; k++) {
226 if (mIOPtrs.mergedTracks[k].OK()) {
227 if (!mIOPtrs.mergedTracks[k].MergedLooper()) {
228 nTracks++;
229 }
230 nAttachedClusters += mIOPtrs.mergedTracks[k].NClusters();
231 nAttachedClustersFitted += mIOPtrs.mergedTracks[k].NClustersFitted();
232 }
233 }
234 for (uint32_t k = 0; k < nCls; k++) {
235 int32_t attach = mIOPtrs.mergedTrackHitAttachment[k];
237 nAdjacentClusters++;
238 }
239 }
240 }
241
242 char trdText[1024] = "";
244 int32_t nTRDTracks = 0;
245 int32_t nTRDTracklets = 0;
246 for (uint32_t k = 0; k < mIOPtrs.nTRDTracks; k++) {
247 if (mIOPtrs.trdTracksO2) {
248 auto& trk = mIOPtrs.trdTracksO2[k];
249 nTRDTracklets += trk.getNtracklets();
250 nTRDTracks += trk.getNtracklets() != 0;
251 } else {
252 auto& trk = mIOPtrs.trdTracks[k];
253 nTRDTracklets += trk.getNtracklets();
254 nTRDTracks += trk.getNtracklets() != 0;
255 }
256 }
257 snprintf(trdText, 1024, " - TRD Tracker reconstructed %d tracks (%d tracklets)", nTRDTracks, nTRDTracklets);
258 }
259 GPUInfo("Output Tracks: %d (%d / %d / %d / %d clusters (fitted / attached / adjacent / total) - %s format)%s", nTracks, nAttachedClustersFitted, nAttachedClusters, nAdjacentClusters, nCls, GetProcessingSettings().createO2Output > 1 ? "O2" : "GPU", trdText);
260}
261
262void GPUChainTracking::OutputSanityCheck()
263{
264 size_t nErrors = 0;
265
266 for (uint32_t i = 0; i < mIOPtrs.nOutputTracksTPCO2; i++) {
267 const auto& trk = mIOPtrs.outputTracksTPCO2[i];
268 const auto& ref = trk.getClusterRef();
269 if (ref.getFirstEntry() > mIOPtrs.nOutputClusRefsTPCO2) {
270 if (nErrors++ < 1000) {
271 GPUError("Invalid getFirst() entry in cluster reference: %u > %u", ref.getFirstEntry(), mIOPtrs.nOutputClusRefsTPCO2);
272 continue;
273 }
274 }
275 if (ref.getFirstEntry() + (ref.getEntries() * 3 + 1) / 2 > mIOPtrs.nOutputClusRefsTPCO2) {
276 if (nErrors++ < 1000) {
277 GPUError("Invalid getEntries() entry in cluster reference: %u > %u", ref.getFirstEntry() + (ref.getEntries() * 3 + 1) / 2, mIOPtrs.nOutputClusRefsTPCO2);
278 continue;
279 }
280 }
281 for (int32_t j = 0; j < trk.getNClusters(); j++) {
282 uint8_t sector, row;
283 uint32_t cl;
284 trk.getClusterReference(mIOPtrs.outputClusRefsTPCO2, j, sector, row, cl);
285 if (sector >= GPUCA_NSECTORS || row >= GPUCA_ROW_COUNT) {
286 if (nErrors++ < 1000) {
287 GPUError("Invalid sector / row %d / %d", (int32_t)sector, (int32_t)row);
288 continue;
289 }
290 }
291 if (cl >= mIOPtrs.clustersNative->nClusters[sector][row]) {
292 if (nErrors++ < 1000) {
293 GPUError("Invalid cluster index %d >= %d", cl, mIOPtrs.clustersNative->nClusters[sector][row]);
294 }
295 }
296 }
297 }
298
299 if (nErrors == 0) {
300 GPUInfo("Sanity check passed");
301 } else {
302 GPUError("Sanity check found %lu errors", nErrors);
303 }
304}
305
306void GPUChainTracking::RunTPCClusterFilter(o2::tpc::ClusterNativeAccess* clusters, std::function<o2::tpc::ClusterNative*(size_t)> allocator, bool applyClusterCuts)
307{
308 const uint8_t filterType = GetProcessingSettings().tpcApplyClusterFilterOnCPU;
309 GPUTPCClusterFilter clusterFilter(*clusters, filterType);
310 o2::tpc::ClusterNative* outputBuffer = nullptr;
311 for (int32_t iPhase = 0; iPhase < 2; iPhase++) {
312 uint32_t countTotal = 0;
313 for (uint32_t iSector = 0; iSector < GPUCA_NSECTORS; iSector++) {
314 for (uint32_t iRow = 0; iRow < GPUCA_ROW_COUNT; iRow++) {
315 uint32_t count = 0;
316 for (uint32_t k = 0; k < clusters->nClusters[iSector][iRow]; k++) {
317 o2::tpc::ClusterNative cl = clusters->clusters[iSector][iRow][k];
318 bool keep = true;
319 if (applyClusterCuts) {
320 keep = keep && cl.qTot > param().rec.tpc.cfQTotCutoff && cl.qMax > param().rec.tpc.cfQMaxCutoff;
321 keep = keep && (!(cl.getFlags() & o2::tpc::ClusterNative::flagSingle) || ((cl.sigmaPadPacked || cl.qMax > param().rec.tpc.cfQMaxCutoffSinglePad) && (cl.sigmaTimePacked || cl.qMax > param().rec.tpc.cfQMaxCutoffSingleTime)));
322 }
323 if (param().tpcCutTimeBin > 0) {
324 keep = keep && cl.getTime() < param().tpcCutTimeBin;
325 }
326 keep = keep && (!filterType || clusterFilter.filter(iSector, iRow, cl));
327 if (iPhase && keep) {
328 outputBuffer[countTotal] = cl;
329 }
330 count += keep;
331 countTotal += keep;
332 }
333 if (iPhase) {
334 clusters->nClusters[iSector][iRow] = count;
335 }
336 }
337 }
338 if (iPhase) {
339 clusters->clustersLinear = outputBuffer;
340 clusters->setOffsetPtrs();
341 } else {
342 outputBuffer = allocator(countTotal);
343 }
344 }
345}
346
348{
349 out << "\nTPC Clusters:\n";
350 for (uint32_t iSec = 0; iSec < GPUCA_NSECTORS; iSec++) {
351 out << "TPCClusters - Sector " << iSec << "\n";
352 for (uint32_t i = 0; i < GPUCA_ROW_COUNT; i++) {
353 out << " Row: " << i << ": " << clusters->nClusters[iSec][i] << " clusters:\n";
354 for (uint32_t j = 0; j < clusters->nClusters[iSec][i]; j++) {
355 const auto& cl = clusters->clusters[iSec][i][j];
356 out << " " << std::hex << cl.timeFlagsPacked << std::dec << " " << cl.padPacked << " " << int32_t{cl.sigmaTimePacked} << " " << int32_t{cl.sigmaPadPacked} << " " << cl.qMax << " " << cl.qTot << "\n";
357 }
358 }
359 }
360}
361
363{
365 std::vector<uint32_t> sorted(c.nTracks), offsets(c.nTracks);
366 std::iota(sorted.begin(), sorted.end(), 0);
367 auto sorter = [&c](const auto a, const auto b) {
368 return std::tie(c.sliceA[a], c.rowA[a], c.timeA[a], c.padA[a], c.qPtA[a]) <
369 std::tie(c.sliceA[b], c.rowA[b], c.timeA[b], c.padA[b], c.qPtA[b]);
370 };
371 std::sort(sorted.begin(), sorted.end(), sorter);
372 uint32_t offset = 0;
373 for (uint32_t i = 0; i < c.nTracks; i++) {
374 offsets[i] = offset;
375 offset += c.nTrackClusters[i];
376 }
377
378 auto sortArray = [&c, &sorted, &offsets](auto* src, size_t totalSize, auto getOffset, auto getSize) {
379 auto buf = std::make_unique<std::remove_reference_t<decltype(src[0])>[]>(totalSize);
380 memcpy(buf.get(), src, totalSize * sizeof(*src));
381 uint32_t targetOffset = 0;
382 for (uint32_t i = 0; i < c.nTracks; i++) {
383 const uint32_t j = sorted[i];
384 memcpy(src + targetOffset, buf.get() + getOffset(offsets[j], j), getSize(j) * sizeof(*src));
385 targetOffset += getSize(j);
386 }
387 };
388 auto sortMultiple = [&sortArray](size_t totalSize, auto getOffset, auto getSize, auto&&... arrays) {
389 (..., sortArray(std::forward<decltype(arrays)>(arrays), totalSize, getOffset, getSize));
390 };
391 auto getFullOffset = [](uint32_t off, uint32_t ind) { return off; };
392 auto getReducedOffset = [](uint32_t off, uint32_t ind) { return off - ind; };
393 auto getIndex = [](uint32_t off, uint32_t ind) { return ind; };
394 auto getN = [&c](uint32_t j) { return c.nTrackClusters[j]; };
395 auto getN1 = [&c](uint32_t j) { return c.nTrackClusters[j] - 1; };
396 auto get1 = [](uint32_t j) { return 1; };
397
398 sortMultiple(c.nAttachedClusters, getFullOffset, getN, c.qTotA, c.qMaxA, c.flagsA, c.sigmaPadA, c.sigmaTimeA);
399 sortMultiple(c.nAttachedClustersReduced, getReducedOffset, getN1, c.rowDiffA, c.sliceLegDiffA, c.padResA, c.timeResA);
400 sortMultiple(c.nTracks, getIndex, get1, c.qPtA, c.rowA, c.sliceA, c.timeA, c.padA, c.nTrackClusters); // NOTE: This must be last, since nTrackClusters is used for handling the arrays above!
401}
402
404{
405 std::string dirName = mRec->getDebugFolder("tpc_raw");
406 if (dirName == "") {
407 return;
408 }
410 if (mIOPtrs.tpcZS) {
411 ioPtrs.tpcZS = mIOPtrs.tpcZS;
412 } else if (mIOPtrs.tpcPackedDigits) {
414 } else if (mIOPtrs.clustersNative) {
416 }
417
418 GPUInfo("Doing debug raw dump");
419 mRec->DumpSettings((dirName + "/").c_str());
420 DumpData((dirName + "/event.0.dump").c_str(), &ioPtrs);
421}
int32_t i
#define GPUCA_NSECTORS
#define GPUCA_ROW_COUNT
uint32_t j
Definition RawData.h:0
uint32_t c
Definition RawData.h:2
static void DebugSortCompressedClusters(o2::tpc::CompressedClustersFlat *cls)
static void DumpClusters(std::ostream &out, const o2::tpc::ClusterNativeAccess *clusters)
void DumpData(const char *filename, const GPUTrackingInOutPointers *ioPtrs=nullptr)
GPUTrackingInOutPointers & mIOPtrs
GPUReconstruction::RecoStepField GetRecoSteps() const
Definition GPUChain.h:71
void WriteToConstantMemory(RecoStep step, size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr)
Definition GPUChain.h:127
uint32_t ThreadCount() const
Definition GPUChain.h:215
virtual std::unique_ptr< GPUReconstructionProcessing::threadContext > GetThreadContext()
Definition GPUChain.h:109
GPUConstantMem * processors()
Definition GPUChain.h:84
GPUParam & param()
Definition GPUChain.h:87
void SetupGPUProcessor(T *proc, bool allocate)
Definition GPUChain.h:220
const GPUSettingsProcessing & GetProcessingSettings() const
Definition GPUChain.h:76
GPUReconstructionCPU * mRec
Definition GPUChain.h:79
GPUConstantMem * processorsShadow()
Definition GPUChain.h:85
static constexpr int32_t NSECTORS
Definition GPUChain.h:58
void TransferMemoryResourcesToHost(RecoStep step, GPUProcessor *proc, int32_t stream=-1, bool all=false)
Definition GPUChain.h:123
uint32_t BlockCount() const
Definition GPUChain.h:213
GPUReconstruction * rec()
Definition GPUChain.h:66
virtual size_t GPUMemCpy(void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
std::string getDebugFolder(const std::string &prefix="")
GPUMemorySizeScalers * MemoryScalers()
const GPUSettingsProcessing & GetProcessingSettings() const
void DumpSettings(const char *dir="")
void * AllocateDirectMemory(size_t size, int32_t type)
GLdouble n
Definition glcorearb.h:1982
GLenum src
Definition glcorearb.h:1767
GLint GLsizei count
Definition glcorearb.h:399
GLuint color
Definition glcorearb.h:1272
GLuint GLsizei const GLuint const GLintptr * offsets
Definition glcorearb.h:2595
GLuint const GLchar * name
Definition glcorearb.h:781
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLintptr offset
Definition glcorearb.h:660
GLboolean GLboolean g
Definition glcorearb.h:1233
const GLuint * arrays
Definition glcorearb.h:1314
GLboolean r
Definition glcorearb.h:1233
GLenum GLfloat param
Definition glcorearb.h:271
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition glcorearb.h:2514
uint8_t itsSharedClusterMap uint8_t
uint32_t bfSize
Definition bitmapfile.h:17
uint32_t bfOffBits
Definition bitmapfile.h:19
uint16_t bfType
Definition bitmapfile.h:16
uint32_t biWidth
Definition bitmapfile.h:24
uint32_t biSize
Definition bitmapfile.h:23
uint16_t biBitCount
Definition bitmapfile.h:27
uint16_t biPlanes
Definition bitmapfile.h:26
uint32_t biHeight
Definition bitmapfile.h:25
GPUTPCTracker tpcTrackers[GPUCA_NSECTORS]
size_t NTPCClusters(size_t tpcDigits, bool perSector=false)
const o2::tpc::ClusterNativeAccess * clustersNative
const uint32_t * mergedTrackHitAttachment
const GPUTRDTrackGPU * trdTracks
const GPUTrackingInOutZS * tpcZS
const o2::tpc::TrackTPC * outputTracksTPCO2
const GPUTrackingInOutDigits * tpcPackedDigits
const GPUTPCGMMergedTrack * mergedTracks
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
std::vector< Cluster > clusters
std::vector< int > row
auto getOffset(const map_T &resultsMap) -> typename map_T::key_type