Project
Loading...
Searching...
No Matches
GPUChainTrackingDebugAndProfiling.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
15#include "GPUChainTracking.h"
18#include "GPUConstantMem.h"
19#include "GPUTPCClusterFilter.h"
20#include <map>
21#include <memory>
22#include <string>
23#include <numeric>
24
25#ifdef GPUCA_TRACKLET_CONSTRUCTOR_DO_PROFILE
26#include "bitmapfile.h"
27#endif
28
29#define PROFILE_MAX_SIZE (100 * 1024 * 1024)
30
31using namespace o2::gpu;
32
33static inline uint32_t RGB(uint8_t r, uint8_t g, uint8_t b) { return (uint32_t)r | ((uint32_t)g << 8) | ((uint32_t)b << 16); }
34
36{
37#ifdef GPUCA_TRACKLET_CONSTRUCTOR_DO_PROFILE
39 processorsShadow()->tpcTrackers[0].mStageAtSync = tmpMem;
40 runKernel<GPUMemClean16>({{BlockCount(), ThreadCount(), -1}}, tmpMem, PROFILE_MAX_SIZE);
41#endif
42 return 0;
43}
44
46{
47#ifdef GPUCA_TRACKLET_CONSTRUCTOR_DO_PROFILE
48 std::unique_ptr<char[]> stageAtSync{new char[PROFILE_MAX_SIZE]};
49 mRec->GPUMemCpy(stageAtSync.get(), processorsShadow()->tpcTrackers[0].mStageAtSync, PROFILE_MAX_SIZE, -1, false);
50
51 FILE* fp = fopen("profile.txt", "w+");
52 FILE* fp2 = fopen("profile.bmp", "w+b");
53
54 const int32_t bmpheight = 8192;
55 BITMAPFILEHEADER bmpFH;
56 BITMAPINFOHEADER bmpIH;
57 memset(&bmpFH, 0, sizeof(bmpFH));
58 memset(&bmpIH, 0, sizeof(bmpIH));
59
60 bmpFH.bfType = 19778; //"BM"
61 bmpFH.bfSize = sizeof(bmpFH) + sizeof(bmpIH) + (ConstructorBlockCount() * ConstructorThreadCount() / 32 * 33 - 1) * bmpheight;
62 bmpFH.bfOffBits = sizeof(bmpFH) + sizeof(bmpIH);
63
64 bmpIH.biSize = sizeof(bmpIH);
65 bmpIH.biWidth = ConstructorBlockCount() * ConstructorThreadCount() / 32 * 33 - 1;
66 bmpIH.biHeight = bmpheight;
67 bmpIH.biPlanes = 1;
68 bmpIH.biBitCount = 32;
69
70 fwrite(&bmpFH, 1, sizeof(bmpFH), fp2);
71 fwrite(&bmpIH, 1, sizeof(bmpIH), fp2);
72
73 [[maybe_unused]] int32_t nEmptySync = 0;
74 for (uint32_t i = 0; i < bmpheight * ConstructorBlockCount() * ConstructorThreadCount(); i += ConstructorBlockCount() * ConstructorThreadCount()) {
75 int32_t fEmpty = 1;
76 for (uint32_t j = 0; j < ConstructorBlockCount() * ConstructorThreadCount(); j++) {
77 fprintf(fp, "%d\t", stageAtSync[i + j]);
78 int32_t color = 0;
79 if (stageAtSync[i + j] == 1) {
80 color = RGB(255, 0, 0);
81 }
82 if (stageAtSync[i + j] == 2) {
83 color = RGB(0, 255, 0);
84 }
85 if (stageAtSync[i + j] == 3) {
86 color = RGB(0, 0, 255);
87 }
88 if (stageAtSync[i + j] == 4) {
89 color = RGB(255, 255, 0);
90 }
91 fwrite(&color, 1, sizeof(int32_t), fp2);
92 if (j > 0 && j % 32 == 0) {
93 color = RGB(255, 255, 255);
94 fwrite(&color, 1, 4, fp2);
95 }
96 if (stageAtSync[i + j]) {
97 fEmpty = 0;
98 }
99 }
100 fprintf(fp, "\n");
101 if (fEmpty) {
102 nEmptySync++;
103 } else {
104 nEmptySync = 0;
105 }
106 // if (nEmptySync == GPUCA_SCHED_ROW_STEP + 2) break;
107 }
108
109 fclose(fp);
110 fclose(fp2);
111#endif
112 return 0;
113}
114
115namespace
116{
117struct GPUChainTrackingMemUsage {
118 void add(size_t n, size_t bound)
119 {
120 nMax = std::max(nMax, n);
121 maxUse = std::max(n / std::max<double>(bound, 1.), maxUse);
122 nSum += n;
123 nBoundSum += bound;
124 count++;
125 }
126 size_t nMax;
127 size_t nSum = 0;
128 size_t nBoundSum = 0;
129 double maxUse = 0.;
130 uint32_t count = 0;
131};
132
133void addToMap(std::string name, std::map<std::string, GPUChainTrackingMemUsage>& map, uint64_t n, uint64_t bound)
134{
135 GPUChainTrackingMemUsage& obj = map.insert({name, {}}).first->second;
136 obj.add(n, bound);
137}
138} // namespace
139
141{
142 std::map<std::string, GPUChainTrackingMemUsage> usageMap;
143 for (int32_t i = 0; i < NSECTORS; i++) {
144#ifdef GPUCA_TPC_GEOMETRY_O2
145 addToMap("TPC Clusterer Sector Peaks", usageMap, processors()->tpcClusterer[i].mPmemory->counters.nPeaks, processors()->tpcClusterer[i].mNMaxPeaks);
146 addToMap("TPC Clusterer Sector Clusters", usageMap, processors()->tpcClusterer[i].mPmemory->counters.nClusters, processors()->tpcClusterer[i].mNMaxClusters);
147#endif
148 addToMap("TPC Sector Start Hits", usageMap, *processors()->tpcTrackers[i].NStartHits(), processors()->tpcTrackers[i].NMaxStartHits());
149 addToMap("TPC Sector Tracklets", usageMap, *processors()->tpcTrackers[i].NTracklets(), processors()->tpcTrackers[i].NMaxTracklets());
150 addToMap("TPC Sector TrackletHits", usageMap, *processors()->tpcTrackers[i].NRowHits(), processors()->tpcTrackers[i].NMaxRowHits());
151 addToMap("TPC Sector Tracks", usageMap, *processors()->tpcTrackers[i].NTracks(), processors()->tpcTrackers[i].NMaxTracks());
152 addToMap("TPC Sector TrackHits", usageMap, *processors()->tpcTrackers[i].NTrackHits(), processors()->tpcTrackers[i].NMaxTrackHits());
153 }
154 addToMap("TPC Clusterer Clusters", usageMap, mRec->MemoryScalers()->nTPCHits, mRec->MemoryScalers()->NTPCClusters(mRec->MemoryScalers()->nTPCdigits));
155 addToMap("TPC Tracks", usageMap, processors()->tpcMerger.NMergedTracks(), processors()->tpcMerger.NMaxTracks());
156 addToMap("TPC TrackHits", usageMap, processors()->tpcMerger.NMergedTrackClusters(), processors()->tpcMerger.NMaxMergedTrackClusters());
157
158 if (mRec->GetProcessingSettings().createO2Output) {
159 addToMap("TPC O2 Tracks", usageMap, processors()->tpcMerger.NOutputTracksTPCO2(), processors()->tpcMerger.NOutputTracksTPCO2());
160 addToMap("TPC O2 ClusRefs", usageMap, processors()->tpcMerger.NOutputClusRefsTPCO2(), processors()->tpcMerger.NOutputClusRefsTPCO2());
161 }
162
163#ifdef GPUCA_TPC_GEOMETRY_O2
164 addToMap("TPC ComprCache HitsAttached", usageMap, processors()->tpcCompressor.mOutput->nAttachedClusters, processors()->tpcCompressor.mMaxTrackClusters);
165 addToMap("TPC ComprCache HitsUnattached", usageMap, processors()->tpcCompressor.mOutput->nUnattachedClusters, processors()->tpcCompressor.mMaxClustersInCache);
166 addToMap("TPC ComprCache Tracks", usageMap, processors()->tpcCompressor.mOutput->nTracks, processors()->tpcCompressor.mMaxTracks);
167#endif
168
169 for (auto& elem : usageMap) {
170 printf("Mem Usage %-30s : %'14zu / %'14zu (%3.0f%% / %3.0f%% / count %3u / max %'14zu)\n", elem.first.c_str(), elem.second.nSum, elem.second.nBoundSum, 100. * elem.second.nSum / std::max<size_t>(1, elem.second.nBoundSum), 100. * elem.second.maxUse, elem.second.count, elem.second.nMax);
171 }
172}
173
175{
176 for (int32_t i = 0; i < NSECTORS; i++) {
177 GPUInfo("MEMREL StartHits NCl %d NTrkl %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NStartHits());
178 GPUInfo("MEMREL Tracklets NCl %d NTrkl %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NTracklets());
179 GPUInfo("MEMREL Tracklets NCl %d NTrkl %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NRowHits());
180 GPUInfo("MEMREL SectorTracks NCl %d NTrk %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NTracks());
181 GPUInfo("MEMREL SectorTrackHits NCl %d NTrkH %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NTrackHits());
182 }
183 GPUInfo("MEMREL Tracks NCl %d NTrk %d", processors()->tpcMerger.NMaxClusters(), processors()->tpcMerger.NMergedTracks());
184 GPUInfo("MEMREL TrackHitss NCl %d NTrkH %d", processors()->tpcMerger.NMaxClusters(), processors()->tpcMerger.NMergedTrackClusters());
185}
186
188{
189#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
190 const auto& threadContext = GetThreadContext();
191 if (mRec->IsGPU()) {
192 SetupGPUProcessor(&processors()->debugOutput, false);
193 WriteToConstantMemory(RecoStep::NoRecoStep, (char*)&processors()->debugOutput - (char*)processors(), &processorsShadow()->debugOutput, sizeof(processors()->debugOutput), -1);
194 memset(processors()->debugOutput.memory(), 0, processors()->debugOutput.memorySize() * sizeof(processors()->debugOutput.memory()[0]));
195 }
196 runKernel<GPUMemClean16>({{BlockCount(), ThreadCount(), 0, RecoStep::TPCSectorTracking}}, (mRec->IsGPU() ? processorsShadow() : processors())->debugOutput.memory(), processorsShadow()->debugOutput.memorySize() * sizeof(processors()->debugOutput.memory()[0]));
197#endif
198}
199
201{
202#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
203 const auto& threadContext = GetThreadContext();
204 TransferMemoryResourcesToHost(RecoStep::NoRecoStep, &processors()->debugOutput, -1);
205 processors()->debugOutput.Print();
206#endif
207}
208
210{
211 int32_t nTracks = 0, nAttachedClusters = 0, nAttachedClustersFitted = 0, nAdjacentClusters = 0;
212 uint32_t nCls = GetProcessingSettings().doublePipeline ? mIOPtrs.clustersNative->nClustersTotal : processors()->tpcMerger.NMaxClusters();
213 if (GetProcessingSettings().createO2Output > 1) {
214 nTracks = mIOPtrs.nOutputTracksTPCO2;
215 nAttachedClusters = mIOPtrs.nMergedTrackHits;
216 } else {
217 for (uint32_t k = 0; k < mIOPtrs.nMergedTracks; k++) {
218 if (mIOPtrs.mergedTracks[k].OK()) {
219 if (!mIOPtrs.mergedTracks[k].MergedLooper()) {
220 nTracks++;
221 }
222 nAttachedClusters += mIOPtrs.mergedTracks[k].NClusters();
223 nAttachedClustersFitted += mIOPtrs.mergedTracks[k].NClustersFitted();
224 }
225 }
226 for (uint32_t k = 0; k < nCls; k++) {
227 int32_t attach = mIOPtrs.mergedTrackHitAttachment[k];
229 nAdjacentClusters++;
230 }
231 }
232 }
233
234 char trdText[1024] = "";
236 int32_t nTRDTracks = 0;
237 int32_t nTRDTracklets = 0;
238 for (uint32_t k = 0; k < mIOPtrs.nTRDTracks; k++) {
239 if (mIOPtrs.trdTracksO2) {
240 auto& trk = mIOPtrs.trdTracksO2[k];
241 nTRDTracklets += trk.getNtracklets();
242 nTRDTracks += trk.getNtracklets() != 0;
243 } else {
244 auto& trk = mIOPtrs.trdTracks[k];
245 nTRDTracklets += trk.getNtracklets();
246 nTRDTracks += trk.getNtracklets() != 0;
247 }
248 }
249 snprintf(trdText, 1024, " - TRD Tracker reconstructed %d tracks (%d tracklets)", nTRDTracks, nTRDTracklets);
250 }
251 GPUInfo("Output Tracks: %d (%d / %d / %d / %d clusters (fitted / attached / adjacent / total) - %s format)%s", nTracks, nAttachedClustersFitted, nAttachedClusters, nAdjacentClusters, nCls, GetProcessingSettings().createO2Output > 1 ? "O2" : "GPU", trdText);
252}
253
254void GPUChainTracking::OutputSanityCheck()
255{
256 size_t nErrors = 0;
257
258 for (uint32_t i = 0; i < mIOPtrs.nOutputTracksTPCO2; i++) {
259 const auto& trk = mIOPtrs.outputTracksTPCO2[i];
260 const auto& ref = trk.getClusterRef();
261 if (ref.getFirstEntry() > mIOPtrs.nOutputClusRefsTPCO2) {
262 if (nErrors++ < 1000) {
263 GPUError("Invalid getFirst() entry in cluster reference: %u > %u", ref.getFirstEntry(), mIOPtrs.nOutputClusRefsTPCO2);
264 continue;
265 }
266 }
267 if (ref.getFirstEntry() + (ref.getEntries() * 3 + 1) / 2 > mIOPtrs.nOutputClusRefsTPCO2) {
268 if (nErrors++ < 1000) {
269 GPUError("Invalid getEntries() entry in cluster reference: %u > %u", ref.getFirstEntry() + (ref.getEntries() * 3 + 1) / 2, mIOPtrs.nOutputClusRefsTPCO2);
270 continue;
271 }
272 }
273 for (int32_t j = 0; j < trk.getNClusters(); j++) {
274 uint8_t sector, row;
275 uint32_t cl;
276 trk.getClusterReference(mIOPtrs.outputClusRefsTPCO2, j, sector, row, cl);
277 if (sector >= GPUCA_NSECTORS || row >= GPUCA_ROW_COUNT) {
278 if (nErrors++ < 1000) {
279 GPUError("Invalid sector / row %d / %d", (int32_t)sector, (int32_t)row);
280 continue;
281 }
282 }
283 if (cl >= mIOPtrs.clustersNative->nClusters[sector][row]) {
284 if (nErrors++ < 1000) {
285 GPUError("Invalid cluster index %d >= %d", cl, mIOPtrs.clustersNative->nClusters[sector][row]);
286 }
287 }
288 }
289 }
290
291 if (nErrors == 0) {
292 GPUInfo("Sanity check passed");
293 } else {
294 GPUError("Sanity check found %lu errors", nErrors);
295 }
296}
297
298void GPUChainTracking::RunTPCClusterFilter(o2::tpc::ClusterNativeAccess* clusters, std::function<o2::tpc::ClusterNative*(size_t)> allocator, bool applyClusterCuts)
299{
300 const uint8_t filterType = GetProcessingSettings().tpcApplyClusterFilterOnCPU;
301 GPUTPCClusterFilter clusterFilter(*clusters, filterType);
302 o2::tpc::ClusterNative* outputBuffer = nullptr;
303 for (int32_t iPhase = 0; iPhase < 2; iPhase++) {
304 uint32_t countTotal = 0;
305 for (uint32_t iSector = 0; iSector < GPUCA_NSECTORS; iSector++) {
306 for (uint32_t iRow = 0; iRow < GPUCA_ROW_COUNT; iRow++) {
307 uint32_t count = 0;
308 for (uint32_t k = 0; k < clusters->nClusters[iSector][iRow]; k++) {
309 o2::tpc::ClusterNative cl = clusters->clusters[iSector][iRow][k];
310 bool keep = true;
311 if (applyClusterCuts) {
312 keep = keep && cl.qTot > param().rec.tpc.cfQTotCutoff && cl.qMax > param().rec.tpc.cfQMaxCutoff;
313 keep = keep && (!(cl.getFlags() & o2::tpc::ClusterNative::flagSingle) || ((cl.sigmaPadPacked || cl.qMax > param().rec.tpc.cfQMaxCutoffSinglePad) && (cl.sigmaTimePacked || cl.qMax > param().rec.tpc.cfQMaxCutoffSingleTime)));
314 }
315 if (param().tpcCutTimeBin > 0) {
316 keep = keep && cl.getTime() < param().tpcCutTimeBin;
317 }
318 keep = keep && (!filterType || clusterFilter.filter(iSector, iRow, cl));
319 if (iPhase && keep) {
320 outputBuffer[countTotal] = cl;
321 }
322 count += keep;
323 countTotal += keep;
324 }
325 if (iPhase) {
326 clusters->nClusters[iSector][iRow] = count;
327 }
328 }
329 }
330 if (iPhase) {
331 clusters->clustersLinear = outputBuffer;
332 clusters->setOffsetPtrs();
333 } else {
334 outputBuffer = allocator(countTotal);
335 }
336 }
337}
338
340{
341 out << "\nTPC Clusters:\n";
342 for (uint32_t iSec = 0; iSec < GPUCA_NSECTORS; iSec++) {
343 out << "TPCClusters - Sector " << iSec << "\n";
344 for (uint32_t i = 0; i < GPUCA_ROW_COUNT; i++) {
345 out << " Row: " << i << ": " << clusters->nClusters[iSec][i] << " clusters:\n";
346 for (uint32_t j = 0; j < clusters->nClusters[iSec][i]; j++) {
347 const auto& cl = clusters->clusters[iSec][i][j];
348 out << " " << std::hex << cl.timeFlagsPacked << std::dec << " " << cl.padPacked << " " << int32_t{cl.sigmaTimePacked} << " " << int32_t{cl.sigmaPadPacked} << " " << cl.qMax << " " << cl.qTot << "\n";
349 }
350 }
351 }
352}
353
355{
357 std::vector<uint32_t> sorted(c.nTracks), offsets(c.nTracks);
358 std::iota(sorted.begin(), sorted.end(), 0);
359 auto sorter = [&c](const auto a, const auto b) {
360 return std::tie(c.sliceA[a], c.rowA[a], c.timeA[a], c.padA[a], c.qPtA[a]) <
361 std::tie(c.sliceA[b], c.rowA[b], c.timeA[b], c.padA[b], c.qPtA[b]);
362 };
363 std::sort(sorted.begin(), sorted.end(), sorter);
364 uint32_t offset = 0;
365 for (uint32_t i = 0; i < c.nTracks; i++) {
366 offsets[i] = offset;
367 offset += c.nTrackClusters[i];
368 }
369
370 auto sortArray = [&c, &sorted, &offsets](auto* src, size_t totalSize, auto getOffset, auto getSize) {
371 auto buf = std::make_unique<std::remove_reference_t<decltype(src[0])>[]>(totalSize);
372 memcpy(buf.get(), src, totalSize * sizeof(*src));
373 uint32_t targetOffset = 0;
374 for (uint32_t i = 0; i < c.nTracks; i++) {
375 const uint32_t j = sorted[i];
376 memcpy(src + targetOffset, buf.get() + getOffset(offsets[j], j), getSize(j) * sizeof(*src));
377 targetOffset += getSize(j);
378 }
379 };
380 auto sortMultiple = [&sortArray](size_t totalSize, auto getOffset, auto getSize, auto&&... arrays) {
381 (..., sortArray(std::forward<decltype(arrays)>(arrays), totalSize, getOffset, getSize));
382 };
383 auto getFullOffset = [](uint32_t off, uint32_t ind) { return off; };
384 auto getReducedOffset = [](uint32_t off, uint32_t ind) { return off - ind; };
385 auto getIndex = [](uint32_t off, uint32_t ind) { return ind; };
386 auto getN = [&c](uint32_t j) { return c.nTrackClusters[j]; };
387 auto getN1 = [&c](uint32_t j) { return c.nTrackClusters[j] - 1; };
388 auto get1 = [](uint32_t j) { return 1; };
389
390 sortMultiple(c.nAttachedClusters, getFullOffset, getN, c.qTotA, c.qMaxA, c.flagsA, c.sigmaPadA, c.sigmaTimeA);
391 sortMultiple(c.nAttachedClustersReduced, getReducedOffset, getN1, c.rowDiffA, c.sliceLegDiffA, c.padResA, c.timeResA);
392 sortMultiple(c.nTracks, getIndex, get1, c.qPtA, c.rowA, c.sliceA, c.timeA, c.padA, c.nTrackClusters); // NOTE: This must be last, since nTrackClusters is used for handling the arrays above!
393}
394
396{
397 std::string dirName = mRec->getDebugFolder("tpc_raw");
398 if (dirName == "") {
399 return;
400 }
402 if (mIOPtrs.tpcZS) {
403 ioPtrs.tpcZS = mIOPtrs.tpcZS;
404 } else if (mIOPtrs.tpcPackedDigits) {
406 } else if (mIOPtrs.clustersNative) {
408 }
409
410 GPUInfo("Doing debug raw dump");
411 mRec->DumpSettings((dirName + "/").c_str());
412 DumpData((dirName + "/event.0.dump").c_str(), &ioPtrs);
413}
int32_t i
#define GPUCA_NSECTORS
#define GPUCA_ROW_COUNT
uint32_t j
Definition RawData.h:0
uint32_t c
Definition RawData.h:2
static void DebugSortCompressedClusters(o2::tpc::CompressedClustersFlat *cls)
static void DumpClusters(std::ostream &out, const o2::tpc::ClusterNativeAccess *clusters)
void DumpData(const char *filename, const GPUTrackingInOutPointers *ioPtrs=nullptr)
GPUTrackingInOutPointers & mIOPtrs
GPUReconstruction::RecoStepField GetRecoSteps() const
Definition GPUChain.h:71
void WriteToConstantMemory(RecoStep step, size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr)
Definition GPUChain.h:127
uint32_t ThreadCount() const
Definition GPUChain.h:215
virtual std::unique_ptr< GPUReconstructionProcessing::threadContext > GetThreadContext()
Definition GPUChain.h:109
GPUConstantMem * processors()
Definition GPUChain.h:84
GPUParam & param()
Definition GPUChain.h:87
void SetupGPUProcessor(T *proc, bool allocate)
Definition GPUChain.h:220
const GPUSettingsProcessing & GetProcessingSettings() const
Definition GPUChain.h:76
GPUReconstructionCPU * mRec
Definition GPUChain.h:79
GPUConstantMem * processorsShadow()
Definition GPUChain.h:85
static constexpr int32_t NSECTORS
Definition GPUChain.h:58
void TransferMemoryResourcesToHost(RecoStep step, GPUProcessor *proc, int32_t stream=-1, bool all=false)
Definition GPUChain.h:123
uint32_t BlockCount() const
Definition GPUChain.h:213
GPUReconstruction * rec()
Definition GPUChain.h:66
virtual size_t GPUMemCpy(void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
std::string getDebugFolder(const std::string &prefix="")
GPUMemorySizeScalers * MemoryScalers()
const GPUSettingsProcessing & GetProcessingSettings() const
void DumpSettings(const char *dir="")
void * AllocateDirectMemory(size_t size, int32_t type)
GLdouble n
Definition glcorearb.h:1982
GLenum src
Definition glcorearb.h:1767
GLint GLsizei count
Definition glcorearb.h:399
GLuint color
Definition glcorearb.h:1272
GLuint GLsizei const GLuint const GLintptr * offsets
Definition glcorearb.h:2595
GLuint const GLchar * name
Definition glcorearb.h:781
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLintptr offset
Definition glcorearb.h:660
GLboolean GLboolean g
Definition glcorearb.h:1233
const GLuint * arrays
Definition glcorearb.h:1314
GLboolean r
Definition glcorearb.h:1233
GLenum GLfloat param
Definition glcorearb.h:271
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition glcorearb.h:2514
uint8_t itsSharedClusterMap uint8_t
uint32_t bfSize
Definition bitmapfile.h:17
uint32_t bfOffBits
Definition bitmapfile.h:19
uint16_t bfType
Definition bitmapfile.h:16
uint32_t biWidth
Definition bitmapfile.h:24
uint32_t biSize
Definition bitmapfile.h:23
uint16_t biBitCount
Definition bitmapfile.h:27
uint16_t biPlanes
Definition bitmapfile.h:26
uint32_t biHeight
Definition bitmapfile.h:25
GPUTPCTracker tpcTrackers[GPUCA_NSECTORS]
size_t NTPCClusters(size_t tpcDigits, bool perSector=false)
const o2::tpc::ClusterNativeAccess * clustersNative
const uint32_t * mergedTrackHitAttachment
const GPUTRDTrackGPU * trdTracks
const GPUTrackingInOutZS * tpcZS
const o2::tpc::TrackTPC * outputTracksTPCO2
const GPUTrackingInOutDigits * tpcPackedDigits
const GPUTPCGMMergedTrack * mergedTracks
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
std::vector< Cluster > clusters
std::vector< int > row
auto getOffset(const map_T &resultsMap) -> typename map_T::key_type