Project
Loading...
Searching...
No Matches
GPUChainTrackingDebugAndProfiling.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
15#include "GPUChainTracking.h"
18#include "GPUConstantMem.h"
19#include "GPUTPCClusterFilter.h"
20#include <map>
21#include <memory>
22#include <string>
23#include <numeric>
24
25#ifdef GPUCA_TRACKLET_CONSTRUCTOR_DO_PROFILE
26#include "bitmapfile.h"
27#endif
28
29#define PROFILE_MAX_SIZE (100 * 1024 * 1024)
30
31using namespace o2::gpu;
32
33static inline uint32_t RGB(uint8_t r, uint8_t g, uint8_t b) { return (uint32_t)r | ((uint32_t)g << 8) | ((uint32_t)b << 16); }
34
36{
37#ifdef GPUCA_TRACKLET_CONSTRUCTOR_DO_PROFILE
39 processorsShadow()->tpcTrackers[0].mStageAtSync = tmpMem;
40 runKernel<GPUMemClean16>({{BlockCount(), ThreadCount(), -1}}, tmpMem, PROFILE_MAX_SIZE);
41#endif
42 return 0;
43}
44
46{
47#ifdef GPUCA_TRACKLET_CONSTRUCTOR_DO_PROFILE
48 std::unique_ptr<char[]> stageAtSync{new char[PROFILE_MAX_SIZE]};
49 mRec->GPUMemCpy(stageAtSync.get(), processorsShadow()->tpcTrackers[0].mStageAtSync, PROFILE_MAX_SIZE, -1, false);
50
51 FILE* fp = fopen("profile.txt", "w+");
52 FILE* fp2 = fopen("profile.bmp", "w+b");
53
54 const int32_t bmpheight = 8192;
55 BITMAPFILEHEADER bmpFH;
56 BITMAPINFOHEADER bmpIH;
57 memset(&bmpFH, 0, sizeof(bmpFH));
58 memset(&bmpIH, 0, sizeof(bmpIH));
59
60 bmpFH.bfType = 19778; //"BM"
61 bmpFH.bfSize = sizeof(bmpFH) + sizeof(bmpIH) + (ConstructorBlockCount() * ConstructorThreadCount() / 32 * 33 - 1) * bmpheight;
62 bmpFH.bfOffBits = sizeof(bmpFH) + sizeof(bmpIH);
63
64 bmpIH.biSize = sizeof(bmpIH);
65 bmpIH.biWidth = ConstructorBlockCount() * ConstructorThreadCount() / 32 * 33 - 1;
66 bmpIH.biHeight = bmpheight;
67 bmpIH.biPlanes = 1;
68 bmpIH.biBitCount = 32;
69
70 fwrite(&bmpFH, 1, sizeof(bmpFH), fp2);
71 fwrite(&bmpIH, 1, sizeof(bmpIH), fp2);
72
73 int32_t nEmptySync = 0;
74 for (uint32_t i = 0; i < bmpheight * ConstructorBlockCount() * ConstructorThreadCount(); i += ConstructorBlockCount() * ConstructorThreadCount()) {
75 int32_t fEmpty = 1;
76 for (uint32_t j = 0; j < ConstructorBlockCount() * ConstructorThreadCount(); j++) {
77 fprintf(fp, "%d\t", stageAtSync[i + j]);
78 int32_t color = 0;
79 if (stageAtSync[i + j] == 1) {
80 color = RGB(255, 0, 0);
81 }
82 if (stageAtSync[i + j] == 2) {
83 color = RGB(0, 255, 0);
84 }
85 if (stageAtSync[i + j] == 3) {
86 color = RGB(0, 0, 255);
87 }
88 if (stageAtSync[i + j] == 4) {
89 color = RGB(255, 255, 0);
90 }
91 fwrite(&color, 1, sizeof(int32_t), fp2);
92 if (j > 0 && j % 32 == 0) {
93 color = RGB(255, 255, 255);
94 fwrite(&color, 1, 4, fp2);
95 }
96 if (stageAtSync[i + j]) {
97 fEmpty = 0;
98 }
99 }
100 fprintf(fp, "\n");
101 if (fEmpty) {
102 nEmptySync++;
103 } else {
104 nEmptySync = 0;
105 }
106 (void)nEmptySync;
107 // if (nEmptySync == GPUCA_SCHED_ROW_STEP + 2) break;
108 }
109
110 fclose(fp);
111 fclose(fp2);
112#endif
113 return 0;
114}
115
116namespace
117{
118struct GPUChainTrackingMemUsage {
119 void add(size_t n, size_t bound)
120 {
121 nMax = std::max(nMax, n);
122 maxUse = std::max(n / std::max<double>(bound, 1.), maxUse);
123 nSum += n;
124 nBoundSum += bound;
125 count++;
126 }
127 size_t nMax;
128 size_t nSum = 0;
129 size_t nBoundSum = 0;
130 double maxUse = 0.;
131 uint32_t count = 0;
132};
133
134void addToMap(std::string name, std::map<std::string, GPUChainTrackingMemUsage>& map, uint64_t n, uint64_t bound)
135{
136 GPUChainTrackingMemUsage& obj = map.insert({name, {}}).first->second;
137 obj.add(n, bound);
138}
139} // namespace
140
142{
143 std::map<std::string, GPUChainTrackingMemUsage> usageMap;
144 for (int32_t i = 0; i < NSECTORS; i++) {
145#ifdef GPUCA_TPC_GEOMETRY_O2
146 addToMap("TPC Clusterer Sector Peaks", usageMap, processors()->tpcClusterer[i].mPmemory->counters.nPeaks, processors()->tpcClusterer[i].mNMaxPeaks);
147 addToMap("TPC Clusterer Sector Clusters", usageMap, processors()->tpcClusterer[i].mPmemory->counters.nClusters, processors()->tpcClusterer[i].mNMaxClusters);
148#endif
149 addToMap("TPC Sector Start Hits", usageMap, *processors()->tpcTrackers[i].NStartHits(), processors()->tpcTrackers[i].NMaxStartHits());
150 addToMap("TPC Sector Tracklets", usageMap, *processors()->tpcTrackers[i].NTracklets(), processors()->tpcTrackers[i].NMaxTracklets());
151 addToMap("TPC Sector TrackletHits", usageMap, *processors()->tpcTrackers[i].NRowHits(), processors()->tpcTrackers[i].NMaxRowHits());
152 addToMap("TPC Sector Tracks", usageMap, *processors()->tpcTrackers[i].NTracks(), processors()->tpcTrackers[i].NMaxTracks());
153 addToMap("TPC Sector TrackHits", usageMap, *processors()->tpcTrackers[i].NTrackHits(), processors()->tpcTrackers[i].NMaxTrackHits());
154 }
155 addToMap("TPC Clusterer Clusters", usageMap, mRec->MemoryScalers()->nTPCHits, mRec->MemoryScalers()->NTPCClusters(mRec->MemoryScalers()->nTPCdigits));
156 addToMap("TPC Tracks", usageMap, processors()->tpcMerger.NMergedTracks(), processors()->tpcMerger.NMaxTracks());
157 addToMap("TPC TrackHits", usageMap, processors()->tpcMerger.NMergedTrackClusters(), processors()->tpcMerger.NMaxMergedTrackClusters());
158
159 if (mRec->GetProcessingSettings().createO2Output) {
160 addToMap("TPC O2 Tracks", usageMap, processors()->tpcMerger.NOutputTracksTPCO2(), processors()->tpcMerger.NOutputTracksTPCO2());
161 addToMap("TPC O2 ClusRefs", usageMap, processors()->tpcMerger.NOutputClusRefsTPCO2(), processors()->tpcMerger.NOutputClusRefsTPCO2());
162 }
163
164#ifdef GPUCA_TPC_GEOMETRY_O2
165 addToMap("TPC ComprCache HitsAttached", usageMap, processors()->tpcCompressor.mOutput->nAttachedClusters, processors()->tpcCompressor.mMaxTrackClusters);
166 addToMap("TPC ComprCache HitsUnattached", usageMap, processors()->tpcCompressor.mOutput->nUnattachedClusters, processors()->tpcCompressor.mMaxClustersInCache);
167 addToMap("TPC ComprCache Tracks", usageMap, processors()->tpcCompressor.mOutput->nTracks, processors()->tpcCompressor.mMaxTracks);
168#endif
169
170 for (auto& elem : usageMap) {
171 printf("Mem Usage %-30s : %'14zu / %'14zu (%3.0f%% / %3.0f%% / count %3u / max %'14zu)\n", elem.first.c_str(), elem.second.nSum, elem.second.nBoundSum, 100. * elem.second.nSum / std::max<size_t>(1, elem.second.nBoundSum), 100. * elem.second.maxUse, elem.second.count, elem.second.nMax);
172 }
173}
174
176{
177 for (int32_t i = 0; i < NSECTORS; i++) {
178 GPUInfo("MEMREL StartHits NCl %d NTrkl %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NStartHits());
179 GPUInfo("MEMREL Tracklets NCl %d NTrkl %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NTracklets());
180 GPUInfo("MEMREL Tracklets NCl %d NTrkl %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NRowHits());
181 GPUInfo("MEMREL SectorTracks NCl %d NTrk %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NTracks());
182 GPUInfo("MEMREL SectorTrackHits NCl %d NTrkH %d", processors()->tpcTrackers[i].NHitsTotal(), *processors()->tpcTrackers[i].NTrackHits());
183 }
184 GPUInfo("MEMREL Tracks NCl %d NTrk %d", processors()->tpcMerger.NMaxClusters(), processors()->tpcMerger.NMergedTracks());
185 GPUInfo("MEMREL TrackHitss NCl %d NTrkH %d", processors()->tpcMerger.NMaxClusters(), processors()->tpcMerger.NMergedTrackClusters());
186}
187
189{
190#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
191 const auto& threadContext = GetThreadContext();
192 if (mRec->IsGPU()) {
193 SetupGPUProcessor(&processors()->debugOutput, false);
194 WriteToConstantMemory(RecoStep::NoRecoStep, (char*)&processors()->debugOutput - (char*)processors(), &processorsShadow()->debugOutput, sizeof(processors()->debugOutput), -1);
195 memset(processors()->debugOutput.memory(), 0, processors()->debugOutput.memorySize() * sizeof(processors()->debugOutput.memory()[0]));
196 }
197 runKernel<GPUMemClean16>({{BlockCount(), ThreadCount(), 0, RecoStep::TPCSectorTracking}}, (mRec->IsGPU() ? processorsShadow() : processors())->debugOutput.memory(), processorsShadow()->debugOutput.memorySize() * sizeof(processors()->debugOutput.memory()[0]));
198#endif
199}
200
202{
203#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
204 const auto& threadContext = GetThreadContext();
205 TransferMemoryResourcesToHost(RecoStep::NoRecoStep, &processors()->debugOutput, -1);
206 processors()->debugOutput.Print();
207#endif
208}
209
211{
212 int32_t nTracks = 0, nAttachedClusters = 0, nAttachedClustersFitted = 0, nAdjacentClusters = 0;
213 uint32_t nCls = GetProcessingSettings().doublePipeline ? mIOPtrs.clustersNative->nClustersTotal : processors()->tpcMerger.NMaxClusters();
214 if (GetProcessingSettings().createO2Output > 1) {
215 nTracks = mIOPtrs.nOutputTracksTPCO2;
216 nAttachedClusters = mIOPtrs.nMergedTrackHits;
217 } else {
218 for (uint32_t k = 0; k < mIOPtrs.nMergedTracks; k++) {
219 if (mIOPtrs.mergedTracks[k].OK()) {
220 nTracks++;
221 nAttachedClusters += mIOPtrs.mergedTracks[k].NClusters();
222 nAttachedClustersFitted += mIOPtrs.mergedTracks[k].NClustersFitted();
223 }
224 }
225 for (uint32_t k = 0; k < nCls; k++) {
226 int32_t attach = mIOPtrs.mergedTrackHitAttachment[k];
228 nAdjacentClusters++;
229 }
230 }
231 }
232
233 char trdText[1024] = "";
235 int32_t nTRDTracks = 0;
236 int32_t nTRDTracklets = 0;
237 for (uint32_t k = 0; k < mIOPtrs.nTRDTracks; k++) {
238 if (mIOPtrs.trdTracksO2) {
239 auto& trk = mIOPtrs.trdTracksO2[k];
240 nTRDTracklets += trk.getNtracklets();
241 nTRDTracks += trk.getNtracklets() != 0;
242 } else {
243 auto& trk = mIOPtrs.trdTracks[k];
244 nTRDTracklets += trk.getNtracklets();
245 nTRDTracks += trk.getNtracklets() != 0;
246 }
247 }
248 snprintf(trdText, 1024, " - TRD Tracker reconstructed %d tracks (%d tracklets)", nTRDTracks, nTRDTracklets);
249 }
250 GPUInfo("Output Tracks: %d (%d / %d / %d / %d clusters (fitted / attached / adjacent / total) - %s format)%s", nTracks, nAttachedClustersFitted, nAttachedClusters, nAdjacentClusters, nCls, GetProcessingSettings().createO2Output > 1 ? "O2" : "GPU", trdText);
251}
252
253void GPUChainTracking::SanityCheck()
254{
255 size_t nErrors = 0;
256
257 for (uint32_t i = 0; i < mIOPtrs.nOutputTracksTPCO2; i++) {
258 const auto& trk = mIOPtrs.outputTracksTPCO2[i];
259 const auto& ref = trk.getClusterRef();
260 if (ref.getFirstEntry() > mIOPtrs.nOutputClusRefsTPCO2) {
261 if (nErrors++ < 1000) {
262 GPUError("Invalid getFirst() entry in cluster reference: %u > %u", ref.getFirstEntry(), mIOPtrs.nOutputClusRefsTPCO2);
263 continue;
264 }
265 }
266 if (ref.getFirstEntry() + (ref.getEntries() * 3 + 1) / 2 > mIOPtrs.nOutputClusRefsTPCO2) {
267 if (nErrors++ < 1000) {
268 GPUError("Invalid getEntries() entry in cluster reference: %u > %u", ref.getFirstEntry() + (ref.getEntries() * 3 + 1) / 2, mIOPtrs.nOutputClusRefsTPCO2);
269 continue;
270 }
271 }
272 for (int32_t j = 0; j < trk.getNClusters(); j++) {
273 uint8_t sector, row;
274 uint32_t cl;
275 trk.getClusterReference(mIOPtrs.outputClusRefsTPCO2, j, sector, row, cl);
276 if (sector >= GPUCA_NSECTORS || row >= GPUCA_ROW_COUNT) {
277 if (nErrors++ < 1000) {
278 GPUError("Invalid sector / row %d / %d", (int32_t)sector, (int32_t)row);
279 continue;
280 }
281 }
282 if (cl >= mIOPtrs.clustersNative->nClusters[sector][row]) {
283 if (nErrors++ < 1000) {
284 GPUError("Invalid cluster index %d >= %d", cl, mIOPtrs.clustersNative->nClusters[sector][row]);
285 }
286 }
287 }
288 }
289
290 if (nErrors == 0) {
291 GPUInfo("Sanity check passed");
292 } else {
293 GPUError("Sanity check found %lu errors", nErrors);
294 }
295}
296
297void GPUChainTracking::RunTPCClusterFilter(o2::tpc::ClusterNativeAccess* clusters, std::function<o2::tpc::ClusterNative*(size_t)> allocator, bool applyClusterCuts)
298{
299 const uint8_t filterType = GetProcessingSettings().tpcApplyClusterFilterOnCPU;
300 GPUTPCClusterFilter clusterFilter(*clusters, filterType);
301 o2::tpc::ClusterNative* outputBuffer = nullptr;
302 for (int32_t iPhase = 0; iPhase < 2; iPhase++) {
303 uint32_t countTotal = 0;
304 for (uint32_t iSector = 0; iSector < GPUCA_NSECTORS; iSector++) {
305 for (uint32_t iRow = 0; iRow < GPUCA_ROW_COUNT; iRow++) {
306 uint32_t count = 0;
307 for (uint32_t k = 0; k < clusters->nClusters[iSector][iRow]; k++) {
308 o2::tpc::ClusterNative cl = clusters->clusters[iSector][iRow][k];
309 bool keep = true;
310 if (applyClusterCuts) {
311 keep = keep && cl.qTot > param().rec.tpc.cfQTotCutoff && cl.qMax > param().rec.tpc.cfQMaxCutoff;
312 keep = keep && (!(cl.getFlags() & o2::tpc::ClusterNative::flagSingle) || ((cl.sigmaPadPacked || cl.qMax > param().rec.tpc.cfQMaxCutoffSinglePad) && (cl.sigmaTimePacked || cl.qMax > param().rec.tpc.cfQMaxCutoffSingleTime)));
313 }
314 if (param().tpcCutTimeBin > 0) {
315 keep = keep && cl.getTime() < param().tpcCutTimeBin;
316 }
317 keep = keep && (!filterType || clusterFilter.filter(iSector, iRow, cl));
318 if (iPhase && keep) {
319 outputBuffer[countTotal] = cl;
320 }
321 count += keep;
322 countTotal += keep;
323 }
324 if (iPhase) {
325 clusters->nClusters[iSector][iRow] = count;
326 }
327 }
328 }
329 if (iPhase) {
330 clusters->clustersLinear = outputBuffer;
331 clusters->setOffsetPtrs();
332 } else {
333 outputBuffer = allocator(countTotal);
334 }
335 }
336}
337
339{
340 out << "\nTPC Clusters:\n";
341 for (uint32_t iSec = 0; iSec < GPUCA_NSECTORS; iSec++) {
342 out << "TPCClusters - Sector " << iSec << "\n";
343 for (uint32_t i = 0; i < GPUCA_ROW_COUNT; i++) {
344 out << " Row: " << i << ": " << clusters->nClusters[iSec][i] << " clusters:\n";
345 for (uint32_t j = 0; j < clusters->nClusters[iSec][i]; j++) {
346 const auto& cl = clusters->clusters[iSec][i][j];
347 out << " " << std::hex << cl.timeFlagsPacked << std::dec << " " << cl.padPacked << " " << int32_t{cl.sigmaTimePacked} << " " << int32_t{cl.sigmaPadPacked} << " " << cl.qMax << " " << cl.qTot << "\n";
348 }
349 }
350 }
351}
352
354{
356 std::vector<uint32_t> sorted(c.nTracks), offsets(c.nTracks);
357 std::iota(sorted.begin(), sorted.end(), 0);
358 auto sorter = [&c](const auto a, const auto b) {
359 return std::tie(c.sliceA[a], c.rowA[a], c.timeA[a], c.padA[a], c.qPtA[a]) <
360 std::tie(c.sliceA[b], c.rowA[b], c.timeA[b], c.padA[b], c.qPtA[b]);
361 };
362 std::sort(sorted.begin(), sorted.end(), sorter);
363 uint32_t offset = 0;
364 for (uint32_t i = 0; i < c.nTracks; i++) {
365 offsets[i] = offset;
366 offset += c.nTrackClusters[i];
367 }
368
369 auto sortArray = [&c, &sorted, &offsets](auto* src, size_t totalSize, auto getOffset, auto getSize) {
370 auto buf = std::make_unique<std::remove_reference_t<decltype(src[0])>[]>(totalSize);
371 memcpy(buf.get(), src, totalSize * sizeof(*src));
372 uint32_t targetOffset = 0;
373 for (uint32_t i = 0; i < c.nTracks; i++) {
374 const uint32_t j = sorted[i];
375 memcpy(src + targetOffset, buf.get() + getOffset(offsets[j], j), getSize(j) * sizeof(*src));
376 targetOffset += getSize(j);
377 }
378 };
379 auto sortMultiple = [&sortArray](size_t totalSize, auto getOffset, auto getSize, auto&&... arrays) {
380 (..., sortArray(std::forward<decltype(arrays)>(arrays), totalSize, getOffset, getSize));
381 };
382 auto getFullOffset = [](uint32_t off, uint32_t ind) { return off; };
383 auto getReducedOffset = [](uint32_t off, uint32_t ind) { return off - ind; };
384 auto getIndex = [](uint32_t off, uint32_t ind) { return ind; };
385 auto getN = [&c](uint32_t j) { return c.nTrackClusters[j]; };
386 auto getN1 = [&c](uint32_t j) { return c.nTrackClusters[j] - 1; };
387 auto get1 = [](uint32_t j) { return 1; };
388
389 sortMultiple(c.nAttachedClusters, getFullOffset, getN, c.qTotA, c.qMaxA, c.flagsA, c.sigmaPadA, c.sigmaTimeA);
390 sortMultiple(c.nAttachedClustersReduced, getReducedOffset, getN1, c.rowDiffA, c.sliceLegDiffA, c.padResA, c.timeResA);
391 sortMultiple(c.nTracks, getIndex, get1, c.qPtA, c.rowA, c.sliceA, c.timeA, c.padA, c.nTrackClusters); // NOTE: This must be last, since nTrackClusters is used for handling the arrays above!
392}
393
395{
396 std::string dirName = mRec->getDebugFolder("tpc_raw");
397 if (dirName == "") {
398 return;
399 }
401 if (mIOPtrs.tpcZS) {
402 ioPtrs.tpcZS = mIOPtrs.tpcZS;
403 } else if (mIOPtrs.tpcPackedDigits) {
405 } else if (mIOPtrs.clustersNative) {
407 }
408
409 GPUInfo("Doing debug raw dump");
410 mRec->DumpSettings((dirName + "/").c_str());
411 DumpData((dirName + "/event.0.dump").c_str(), &ioPtrs);
412}
int32_t i
#define GPUCA_NSECTORS
#define GPUCA_ROW_COUNT
uint32_t j
Definition RawData.h:0
uint32_t c
Definition RawData.h:2
static void DebugSortCompressedClusters(o2::tpc::CompressedClustersFlat *cls)
static void DumpClusters(std::ostream &out, const o2::tpc::ClusterNativeAccess *clusters)
void DumpData(const char *filename, const GPUTrackingInOutPointers *ioPtrs=nullptr)
GPUTrackingInOutPointers & mIOPtrs
GPUReconstruction::RecoStepField GetRecoSteps() const
Definition GPUChain.h:71
void WriteToConstantMemory(RecoStep step, size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr)
Definition GPUChain.h:127
uint32_t ThreadCount() const
Definition GPUChain.h:215
virtual std::unique_ptr< GPUReconstructionProcessing::threadContext > GetThreadContext()
Definition GPUChain.h:109
GPUConstantMem * processors()
Definition GPUChain.h:84
GPUParam & param()
Definition GPUChain.h:87
void SetupGPUProcessor(T *proc, bool allocate)
Definition GPUChain.h:220
const GPUSettingsProcessing & GetProcessingSettings() const
Definition GPUChain.h:76
GPUReconstructionCPU * mRec
Definition GPUChain.h:79
GPUConstantMem * processorsShadow()
Definition GPUChain.h:85
static constexpr int32_t NSECTORS
Definition GPUChain.h:58
void TransferMemoryResourcesToHost(RecoStep step, GPUProcessor *proc, int32_t stream=-1, bool all=false)
Definition GPUChain.h:123
uint32_t BlockCount() const
Definition GPUChain.h:213
GPUReconstruction * rec()
Definition GPUChain.h:66
virtual size_t GPUMemCpy(void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
std::string getDebugFolder(const std::string &prefix="")
GPUMemorySizeScalers * MemoryScalers()
const GPUSettingsProcessing & GetProcessingSettings() const
void DumpSettings(const char *dir="")
void * AllocateDirectMemory(size_t size, int32_t type)
GLdouble n
Definition glcorearb.h:1982
GLenum src
Definition glcorearb.h:1767
GLint GLsizei count
Definition glcorearb.h:399
GLuint color
Definition glcorearb.h:1272
GLuint GLsizei const GLuint const GLintptr * offsets
Definition glcorearb.h:2595
GLuint const GLchar * name
Definition glcorearb.h:781
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLintptr offset
Definition glcorearb.h:660
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLboolean GLboolean g
Definition glcorearb.h:1233
const GLuint * arrays
Definition glcorearb.h:1314
GLboolean r
Definition glcorearb.h:1233
GLenum GLfloat param
Definition glcorearb.h:271
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition glcorearb.h:2514
uint8_t itsSharedClusterMap uint8_t
uint32_t bfSize
Definition bitmapfile.h:17
uint32_t bfOffBits
Definition bitmapfile.h:19
uint16_t bfType
Definition bitmapfile.h:16
uint32_t biWidth
Definition bitmapfile.h:24
uint32_t biSize
Definition bitmapfile.h:23
uint16_t biBitCount
Definition bitmapfile.h:27
uint16_t biPlanes
Definition bitmapfile.h:26
uint32_t biHeight
Definition bitmapfile.h:25
GPUTPCTracker tpcTrackers[GPUCA_NSECTORS]
size_t NTPCClusters(size_t tpcDigits, bool perSector=false)
const o2::tpc::ClusterNativeAccess * clustersNative
const uint32_t * mergedTrackHitAttachment
const GPUTRDTrackGPU * trdTracks
const GPUTrackingInOutZS * tpcZS
const o2::tpc::TrackTPC * outputTracksTPCO2
const GPUTrackingInOutDigits * tpcPackedDigits
const GPUTPCGMMergedTrack * mergedTracks
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
std::vector< Cluster > clusters
std::vector< int > row
auto getOffset(const map_T &resultsMap) -> typename map_T::key_type