Project
Loading...
Searching...
No Matches
Clusterer.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14#include <algorithm>
15#include <TTree.h>
16#include "Framework/Logger.h"
20
21#ifdef WITH_OPENMP
22#include <omp.h>
23#endif
24using namespace o2::itsmft;
25
26//__________________________________________________
27void Clusterer::process(int nThreads, PixelReader& reader, CompClusCont* compClus,
28 PatternCont* patterns, ROFRecCont* vecROFRec, MCTruth* labelsCl)
29{
30#ifdef _PERFORM_TIMING_
31 mTimer.Start(kFALSE);
32#endif
33 nThreads = std::max(nThreads, 1);
34 auto autoDecode = reader.getDecodeNextAuto();
35 o2::InteractionRecord lastIR{};
36 do {
37 if (autoDecode) {
38 reader.setDecodeNextAuto(false); // internally do not autodecode
39 if (!reader.decodeNextTrigger()) {
40 break; // on the fly decoding was requested, but there were no data left
41 }
42 }
43 if (reader.getInteractionRecord().isDummy()) {
44 continue; // No IR info was found
45 }
46 if (!lastIR.isDummy() && lastIR >= reader.getInteractionRecord()) {
47 const int MaxErrLog = 2;
48 static int errLocCount = 0;
49 if (errLocCount++ < MaxErrLog) {
50 LOGP(warn, "Impossible ROF IR {}, does not exceed previous {}, discarding in clusterization", reader.getInteractionRecord().asString(), lastIR.asString());
51 }
52 continue;
53 }
54 lastIR = reader.getInteractionRecord();
55 // pre-fetch all non-empty chips of current ROF
56 ChipPixelData* curChipData = nullptr;
57 mFiredChipsPtr.clear();
58 size_t nPix = 0;
59 while ((curChipData = reader.getNextChipData(mChips))) {
60 mFiredChipsPtr.push_back(curChipData);
61 nPix += curChipData->getData().size();
62 }
63
64 auto& rof = vecROFRec->emplace_back(reader.getInteractionRecord(), vecROFRec->size(), compClus->size(), 0); // create new ROF
65
66 uint16_t nFired = mFiredChipsPtr.size();
67 if (!nFired) {
68 if (autoDecode) {
69 continue;
70 }
71 break; // just 1 ROF was asked to be processed
72 }
73 nThreads = std::min<int>(nFired, nThreads);
74#ifndef WITH_OPENMP
75 nThreads = 1;
76#endif
77 uint16_t chipStep = nThreads > 1 ? (nThreads == 2 ? 20 : 10) : nFired;
78 int dynGrp = std::min(4, std::max(1, nThreads / 2));
79 if (nThreads > mThreads.size()) {
80 int oldSz = mThreads.size();
81 mThreads.resize(nThreads);
82 for (int i = oldSz; i < nThreads; i++) {
83 mThreads[i] = std::make_unique<ClustererThread>(this, i);
84 }
85 }
86#ifdef WITH_OPENMP
87#pragma omp parallel for schedule(dynamic, dynGrp) num_threads(nThreads)
88 //>> start of MT region
89 for (uint16_t ic = 0; ic < nFired; ic += chipStep) {
90 auto ith = omp_get_thread_num();
91 if (nThreads > 1) {
92 mThreads[ith]->process(ic, std::min(chipStep, uint16_t(nFired - ic)),
93 &mThreads[ith]->compClusters,
94 patterns ? &mThreads[ith]->patterns : nullptr,
95 labelsCl ? reader.getDigitsMCTruth() : nullptr,
96 labelsCl ? &mThreads[ith]->labels : nullptr, rof);
97 } else { // put directly to the destination
98 mThreads[0]->process(0, nFired, compClus, patterns, labelsCl ? reader.getDigitsMCTruth() : nullptr, labelsCl, rof);
99 }
100 }
101 //<< end of MT region
102#else
103 mThreads[0]->process(0, nFired, compClus, patterns, labelsCl ? reader.getDigitsMCTruth() : nullptr, labelsCl, rof);
104#endif
105 // copy data of all threads but the 1st one to final destination
106 if (nThreads > 1) {
107#ifdef _PERFORM_TIMING_
108 mTimerMerge.Start(false);
109#endif
110 size_t nClTot = 0, nPattTot = 0;
111 int chid = 0, thrStatIdx[nThreads];
112 for (int ith = 0; ith < nThreads; ith++) {
113 std::sort(mThreads[ith]->stats.begin(), mThreads[ith]->stats.end(), [](const ThreadStat& a, const ThreadStat& b) { return a.firstChip < b.firstChip; });
114 thrStatIdx[ith] = 0;
115 nClTot += mThreads[ith]->compClusters.size();
116 nPattTot += mThreads[ith]->patterns.size();
117 }
118 compClus->reserve(nClTot);
119 if (patterns) {
120 patterns->reserve(nPattTot);
121 }
122 while (chid < nFired) {
123 for (int ith = 0; ith < nThreads; ith++) {
124 if (thrStatIdx[ith] >= mThreads[ith]->stats.size()) {
125 continue;
126 }
127 const auto& stat = mThreads[ith]->stats[thrStatIdx[ith]];
128 if (stat.firstChip == chid) {
129 thrStatIdx[ith]++;
130 chid += stat.nChips; // next chip to look
131 if (stat.nClus > 0) {
132 const auto clbeg = mThreads[ith]->compClusters.begin() + stat.firstClus;
133 auto szold = compClus->size();
134 compClus->insert(compClus->end(), clbeg, clbeg + stat.nClus);
135 if (patterns) {
136 const auto ptbeg = mThreads[ith]->patterns.begin() + stat.firstPatt;
137 patterns->insert(patterns->end(), ptbeg, ptbeg + stat.nPatt);
138 }
139 if (labelsCl) {
140 labelsCl->mergeAtBack(mThreads[ith]->labels, stat.firstClus, stat.nClus);
141 }
142 }
143 }
144 }
145 }
146 for (int ith = 0; ith < nThreads; ith++) {
147 mThreads[ith]->patterns.clear();
148 mThreads[ith]->compClusters.clear();
149 mThreads[ith]->labels.clear();
150 mThreads[ith]->stats.clear();
151 }
152#ifdef _PERFORM_TIMING_
153 mTimerMerge.Stop();
154#endif
155 } else {
156 mThreads[0]->stats.clear();
157 }
158 rof.setNEntries(compClus->size() - rof.getFirstEntry()); // update
159 } while (autoDecode);
160 reader.setDecodeNextAuto(autoDecode); // restore setting
161#ifdef _PERFORM_TIMING_
162 mTimer.Stop();
163#endif
164}
165
166//__________________________________________________
167void Clusterer::ClustererThread::process(uint16_t chip, uint16_t nChips, CompClusCont* compClusPtr, PatternCont* patternsPtr,
168 const ConstMCTruth* labelsDigPtr, MCTruth* labelsClPtr, const ROFRecord& rofPtr)
169{
170 if (stats.empty() || stats.back().firstChip + stats.back().nChips != chip) { // there is a jump, register new block
171 stats.emplace_back(ThreadStat{.firstChip = chip, .nChips = 0, .firstClus = uint32_t(compClusPtr->size()), .firstPatt = patternsPtr ? uint32_t(patternsPtr->size()) : 0, .nClus = 0, .nPatt = 0});
172 }
173 for (int ic = 0; ic < nChips; ic++) {
174 auto* curChipData = parent->mFiredChipsPtr[chip + ic];
175 auto chipID = curChipData->getChipID();
176 if (parent->mMaxBCSeparationToMask > 0) { // mask pixels fired from the previous ROF
177 const auto& chipInPrevROF = parent->mChipsOld[chipID];
178 if (std::abs(rofPtr.getBCData().differenceInBC(chipInPrevROF.getInteractionRecord())) < parent->mMaxBCSeparationToMask) {
179 parent->mMaxRowColDiffToMask ? curChipData->maskFiredInSample(parent->mChipsOld[chipID], parent->mMaxRowColDiffToMask) : curChipData->maskFiredInSample(parent->mChipsOld[chipID]);
180 }
181 }
182 auto nclus0 = compClusPtr->size();
183 auto validPixID = curChipData->getFirstUnmasked();
184 auto npix = curChipData->getData().size();
185 if (validPixID < npix) { // chip data may have all of its pixels masked!
186 auto valp = validPixID++;
187 if (validPixID == npix) { // special case of a single pixel fired on the chip
188 finishChipSingleHitFast(valp, curChipData, compClusPtr, patternsPtr, labelsDigPtr, labelsClPtr);
189 } else {
190 initChip(curChipData, valp);
191 for (; validPixID < npix; validPixID++) {
192 if (!curChipData->getData()[validPixID].isMasked()) {
193 updateChip(curChipData, validPixID);
194 }
195 }
196 finishChip(curChipData, compClusPtr, patternsPtr, labelsDigPtr, labelsClPtr);
197 }
198 }
199 if (parent->mMaxBCSeparationToMask > 0) { // current chip data will be used in the next ROF to mask overflow pixels
200 parent->mChipsOld[chipID].swap(*curChipData);
201 }
202 }
203 auto& currStat = stats.back();
204 currStat.nChips += nChips;
205 currStat.nClus = compClusPtr->size() - currStat.firstClus;
206 currStat.nPatt = patternsPtr ? (patternsPtr->size() - currStat.firstPatt) : 0;
207}
208
209//__________________________________________________
211 PatternCont* patternsPtr, const ConstMCTruth* labelsDigPtr, MCTruth* labelsClusPtr)
212{
213 const auto& pixData = curChipData->getData();
214 int nPreclusters = preClusters.size();
215 // account for the eventual reindexing of preClusters: Id2 might have been reindexed to Id1, which later was reindexed to Id0
216 for (int i = 1; i < nPreclusters; i++) {
217 if (preClusters[i].index != i) { // reindexing is always done towards smallest index
218 preClusters[i].index = preClusters[preClusters[i].index].index;
219 }
220 }
221 for (int i1 = 0; i1 < nPreclusters; ++i1) {
222 auto& preCluster = preClusters[i1];
223 auto ci = preCluster.index;
224 if (ci < 0) {
225 continue;
226 }
227 BBox bbox(curChipData->getChipID());
228 int nlab = 0;
229 int next = preCluster.head;
230 pixArrBuff.clear();
231 while (next >= 0) {
232 const auto& pixEntry = pixels[next];
233 const auto pix = pixData[pixEntry.second];
234 pixArrBuff.push_back(pix); // needed for cluster topology
235 bbox.adjust(pix.getRowDirect(), pix.getCol());
236 if (labelsClusPtr) {
237 if (parent->mSquashingDepth) { // the MCtruth for this pixel is stored in chip data: due to squashing we lose contiguity
238 fetchMCLabels(curChipData->getOrderedPixId(pixEntry.second), labelsDigPtr, nlab);
239 } else { // the MCtruth for this pixel is at curChipData->startID+pixEntry.second
240 fetchMCLabels(pixEntry.second + curChipData->getStartID(), labelsDigPtr, nlab);
241 }
242 }
243 next = pixEntry.first;
244 }
245 preCluster.index = -1;
246 for (int i2 = i1 + 1; i2 < nPreclusters; ++i2) {
247 auto& preCluster2 = preClusters[i2];
248 if (preCluster2.index != ci) {
249 continue;
250 }
251 next = preCluster2.head;
252 while (next >= 0) {
253 const auto& pixEntry = pixels[next];
254 const auto pix = pixData[pixEntry.second]; // PixelData
255 pixArrBuff.push_back(pix); // needed for cluster topology
256 bbox.adjust(pix.getRowDirect(), pix.getCol());
257 if (labelsClusPtr) {
258 if (parent->mSquashingDepth) { // the MCtruth for this pixel is stored in chip data: due to squashing we lose contiguity
259 fetchMCLabels(curChipData->getOrderedPixId(pixEntry.second), labelsDigPtr, nlab);
260 } else { // the MCtruth for this pixel is at curChipData->startID+pixEntry.second
261 fetchMCLabels(pixEntry.second + curChipData->getStartID(), labelsDigPtr, nlab);
262 }
263 }
264 next = pixEntry.first;
265 }
266 preCluster2.index = -1;
267 }
268 if (bbox.isAcceptableSize()) {
269 parent->streamCluster(pixArrBuff, &labelsBuff, bbox, parent->mPattIdConverter, compClusPtr, patternsPtr, labelsClusPtr, nlab);
270 } else {
271 auto warnLeft = MaxHugeClusWarn - parent->mNHugeClus;
272 if (!parent->mDropHugeClusters) {
273 if (warnLeft > 0) {
274 LOGP(warn, "Splitting a huge cluster: chipID {}, rows {}:{} cols {}:{}{}", bbox.chipID, bbox.rowMin, bbox.rowMax, bbox.colMin, bbox.colMax,
275 warnLeft == 1 ? " (Further warnings will be muted)" : "");
276#ifdef WITH_OPENMP
277#pragma omp critical
278#endif
279 {
280 parent->mNHugeClus++;
281 }
282 }
283 BBox bboxT(bbox); // truncated box
284 std::vector<PixelData> pixbuf;
285 do {
286 bboxT.rowMin = bbox.rowMin;
287 bboxT.colMax = std::min(bbox.colMax, uint16_t(bboxT.colMin + o2::itsmft::ClusterPattern::MaxColSpan - 1));
288 do { // Select a subset of pixels fitting the reduced bounding box
289 bboxT.rowMax = std::min(bbox.rowMax, uint16_t(bboxT.rowMin + o2::itsmft::ClusterPattern::MaxRowSpan - 1));
290 for (const auto& pix : pixArrBuff) {
291 if (bboxT.isInside(pix.getRowDirect(), pix.getCol())) {
292 pixbuf.push_back(pix);
293 }
294 }
295 if (!pixbuf.empty()) { // Stream a piece of cluster only if the reduced bounding box is not empty
296 parent->streamCluster(pixbuf, &labelsBuff, bboxT, parent->mPattIdConverter, compClusPtr, patternsPtr, labelsClusPtr, nlab, true);
297 pixbuf.clear();
298 }
299 bboxT.rowMin = bboxT.rowMax + 1;
300 } while (bboxT.rowMin < bbox.rowMax);
301 bboxT.colMin = bboxT.colMax + 1;
302 } while (bboxT.colMin < bbox.colMax);
303 }
304 }
305 }
306}
307
308//__________________________________________________
310 PatternCont* patternsPtr, const ConstMCTruth* labelsDigPtr, MCTruth* labelsClusPtr)
311{
312 auto pix = curChipData->getData()[hit];
313 uint16_t row = pix.getRowDirect(), col = pix.getCol();
314
315 if (labelsClusPtr) { // MC labels were requested
316 int nlab = 0;
317 fetchMCLabels(curChipData->getStartID() + hit, labelsDigPtr, nlab);
318 auto cnt = compClusPtr->size();
319 for (int i = nlab; i--;) {
320 labelsClusPtr->addElement(cnt, labelsBuff[i]);
321 }
322 }
323
324 // add to compact clusters, which must be always filled
325 unsigned char patt[ClusterPattern::MaxPatternBytes]{0x1 << (7 - (0 % 8))}; // unrolled 1 hit version of full loop in finishChip
326 uint16_t pattID = (parent->mPattIdConverter.size() == 0) ? CompCluster::InvalidPatternID : parent->mPattIdConverter.findGroupID(1, 1, patt);
327 if ((pattID == CompCluster::InvalidPatternID || parent->mPattIdConverter.isGroup(pattID)) && patternsPtr) {
328 patternsPtr->emplace_back(1); // rowspan
329 patternsPtr->emplace_back(1); // colspan
330 patternsPtr->insert(patternsPtr->end(), std::begin(patt), std::begin(patt) + 1);
331 }
332 compClusPtr->emplace_back(row, col, pattID, curChipData->getChipID());
333}
334
335//__________________________________________________
336Clusterer::Clusterer() : mPattIdConverter()
337{
338#ifdef _PERFORM_TIMING_
339 mTimer.Stop();
340 mTimer.Reset();
341 mTimerMerge.Stop();
342 mTimerMerge.Reset();
343#endif
344}
345
346//__________________________________________________
348{
349 // init chip with the 1st unmasked pixel (entry "from" in the mChipData)
350 prev = column1 + 1;
351 curr = column2 + 1;
353 pixels.clear();
354 preClusters.clear();
355 auto pix = curChipData->getData()[first];
356 currCol = pix.getCol();
357 curr[pix.getRowDirect()] = 0; // can use getRowDirect since the pixel is not masked
358 // start the first pre-cluster
359 preClusters.emplace_back();
360 pixels.emplace_back(-1, first); // id of current pixel
361 noLeftCol = true;
362}
363
364//__________________________________________________
365void Clusterer::ClustererThread::updateChip(const ChipPixelData* curChipData, uint32_t ip)
366{
367 const auto pix = curChipData->getData()[ip];
368 uint16_t row = pix.getRowDirect(); // can use getRowDirect since the pixel is not masked
369 if (currCol != pix.getCol()) { // switch the buffers
370 swapColumnBuffers();
371 resetColumn(curr);
372 noLeftCol = false;
373 if (pix.getCol() > currCol + 1) {
374 // no connection with previous column, this pixel cannot belong to any of the
375 // existing preclusters, create a new precluster and flag to check only the row above for next pixels of this column
376 currCol = pix.getCol();
377 addNewPrecluster(ip, row);
378 noLeftCol = true;
379 return;
380 }
381 currCol = pix.getCol();
382 }
383
384 if (noLeftCol) { // check only the row above
385 if (curr[row - 1] >= 0) {
386 expandPreCluster(ip, row, curr[row - 1]); // attach to the precluster of the previous row
387 } else {
388 addNewPrecluster(ip, row); // start new precluster
389 }
390 } else {
391 // row above should be always checked
392 int nnb = 0, lowestIndex = curr[row - 1], lowestNb = 0, *nbrCol[4], nbrRow[4];
393 if (lowestIndex >= 0) {
394 nbrCol[nnb] = curr;
395 nbrRow[nnb++] = row - 1;
396 } else {
397 lowestIndex = 0x7ffff;
398 lowestNb = -1;
399 }
400#ifdef _ALLOW_DIAGONAL_ALPIDE_CLUSTERS_
401 for (int i : {-1, 0, 1}) {
402 auto v = prev[row + i];
403 if (v >= 0) {
404 nbrCol[nnb] = prev;
405 nbrRow[nnb] = row + i;
406 if (v < lowestIndex) {
407 lowestIndex = v;
408 lowestNb = nnb;
409 }
410 nnb++;
411 }
412 }
413#else
414 if (prev[row] >= 0) {
415 nbrCol[nnb] = prev;
416 nbrRow[nnb] = row;
417 if (prev[row] < lowestIndex) {
418 lowestIndex = v;
419 lowestNb = nnb;
420 }
421 nnb++;
422 }
423#endif
424 if (!nnb) { // no neighbours, create new precluster
425 addNewPrecluster(ip, row); // start new precluster
426 } else {
427 expandPreCluster(ip, row, lowestIndex); // attach to the adjascent precluster with smallest index
428 if (nnb > 1) {
429 for (int inb = 0; inb < nnb; inb++) { // reassign precluster index to smallest one, replicating updated values to columns caches
430 auto& prevIndex = (nbrCol[inb])[nbrRow[inb]];
431 prevIndex = preClusters[prevIndex].index = lowestIndex;
432 }
433 }
434 }
435 }
436}
437
438//__________________________________________________
439void Clusterer::ClustererThread::fetchMCLabels(int digID, const ConstMCTruth* labelsDig, int& nfilled)
440{
441 // transfer MC labels to cluster
442 if (nfilled >= MaxLabels) {
443 return;
444 }
445 const auto& lbls = labelsDig->getLabels(digID);
446 for (int i = lbls.size(); i--;) {
447 int ic = nfilled;
448 for (; ic--;) { // check if the label is already present
449 if (labelsBuff[ic] == lbls[i]) {
450 return; // label is found, do nothing
451 }
452 }
453 labelsBuff[nfilled++] = lbls[i];
454 if (nfilled >= MaxLabels) {
455 break;
456 }
457 }
458 //
459}
460
461//__________________________________________________
463{
464 // reset
465#ifdef _PERFORM_TIMING_
466 mTimer.Stop();
467 mTimer.Reset();
468 mTimerMerge.Stop();
469 mTimerMerge.Reset();
470#endif
471}
472
473//__________________________________________________
474void Clusterer::print(bool showsTiming) const
475{
476 // print settings
477 if (mSquashingLayerDepth.empty()) {
478 LOGP(info, "Clusterizer squashes overflow pixels separated by {} BC and <= {} in row/col seeking down to {} neighbour ROFs", mMaxBCSeparationToSquash, mMaxRowColDiffToMask, mSquashingDepth);
479 } else {
480 LOGP(info, "Clusterizer squashes overflow pixels <= {} in row/col", mMaxRowColDiffToMask);
481 for (size_t i{0}; i < mSquashingLayerDepth.size(); ++i) {
482 LOGP(info, "\tlay:{} separated by {} BC seeking down to {} neighbour ROFs", i, mMaxBCSeparationToSquashLayer[i], mSquashingLayerDepth[i]);
483 }
484 }
485 LOGP(info, "Clusterizer masks overflow pixels separated by < {} BC and <= {} in row/col", mMaxBCSeparationToMask, mMaxRowColDiffToMask);
486 LOGP(info, "Clusterizer does {} drop huge clusters", mDropHugeClusters ? "" : "not");
487
488 if (showsTiming) {
489#ifdef _PERFORM_TIMING_
490 auto& tmr = const_cast<TStopwatch&>(mTimer); // ugly but this is what root does internally
491 auto& tmrm = const_cast<TStopwatch&>(mTimerMerge);
492 LOG(info) << "Inclusive clusterization timing (w/o disk IO): Cpu: " << tmr.CpuTime()
493 << " Real: " << tmr.RealTime() << " s in " << tmr.Counter() << " slots";
494 LOG(info) << "Threads output merging timing : Cpu: " << tmrm.CpuTime()
495 << " Real: " << tmrm.RealTime() << " s in " << tmrm.Counter() << " slots";
496
497#endif
498 }
499}
500
501//__________________________________________________
503{
504 // reset for new run
505 clear();
506 mNHugeClus = 0;
507}
std::vector< std::string > labels
int32_t i
Definition of the ITS cluster finder.
Definition of a container to keep Monte Carlo truth external to simulation objects.
uint32_t col
Definition RawData.h:4
gsl::span< const TruthElement > getLabels(uint32_t dataindex) const
void mergeAtBack(MCTruthContainer< TruthElement > const &other)
void addElement(uint32_t dataindex, TruthElement const &element, bool noElement=false)
uint32_t getOrderedPixId(int pos) const
Definition PixelData.h:275
uint32_t getStartID() const
Definition PixelData.h:110
uint16_t getChipID() const
Definition PixelData.h:108
const std::vector< PixelData > & getData() const
Definition PixelData.h:115
static constexpr int MaxPatternBytes
static constexpr uint8_t MaxRowSpan
static constexpr uint8_t MaxColSpan
void process(int nThreads, PixelReader &r, CompClusCont *compClus, PatternCont *patterns, ROFRecCont *vecROFRec, MCTruth *labelsCl=nullptr)
Definition Clusterer.cxx:27
static constexpr int MaxLabels
Definition Clusterer.h:76
void print(bool showTiming=true) const
static constexpr int MaxHugeClusWarn
Definition Clusterer.h:77
static constexpr unsigned short InvalidPatternID
Definition CompCluster.h:46
PixelReader class for the ITSMFT.
Definition PixelReader.h:34
virtual int decodeNextTrigger()=0
bool getDecodeNextAuto() const
Definition PixelReader.h:67
void setDecodeNextAuto(bool v)
Definition PixelReader.h:68
const o2::InteractionRecord & getInteractionRecord() const
Definition PixelReader.h:58
virtual const o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > * getDigitsMCTruth() const
Definition PixelReader.h:49
virtual bool getNextChipData(ChipPixelData &chipData)=0
const BCData & getBCData() const
Definition ROFRecord.h:58
const GLdouble * v
Definition glcorearb.h:832
GLuint index
Definition glcorearb.h:781
GLint first
Definition glcorearb.h:399
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLint GLint GLsizei GLint GLenum GLenum const void * pixels
Definition glcorearb.h:275
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
std::vector< unsigned char > PatternCont
Definition Clusterer.h:59
std::vector< ROFRecord > ROFRecCont
Definition Clusterer.h:60
std::vector< CompClusterExt > CompClusCont
Definition Clusterer.h:58
std::string asString() const
int64_t differenceInBC(const InteractionRecord &other) const
bool isInside(uint16_t row, uint16_t col) const
Definition Clusterer.h:86
int column2[SegmentationAlpide::NRows+2]
Definition Clusterer.h:133
int column1[SegmentationAlpide::NRows+2]
Definition Clusterer.h:132
std::vector< PreCluster > preClusters
temporary buffer for pattern calc.
Definition Clusterer.h:143
void initChip(const ChipPixelData *curChipData, uint32_t first)
void finishChip(ChipPixelData *curChipData, CompClusCont *compClus, PatternCont *patterns, const ConstMCTruth *labelsDig, MCTruth *labelsClus)
uint16_t currCol
Column being processed.
Definition Clusterer.h:139
bool noLeftCol
flag that there is no column on the left to check
Definition Clusterer.h:140
void fetchMCLabels(int digID, const ConstMCTruth *labelsDig, int &nfilled)
void process(uint16_t chip, uint16_t nChips, CompClusCont *compClusPtr, PatternCont *patternsPtr, const ConstMCTruth *labelsDigPtr, MCTruth *labelsClPtr, const ROFRecord &rofPtr)
void finishChipSingleHitFast(uint32_t hit, ChipPixelData *curChipData, CompClusCont *compClusPtr, PatternCont *patternsPtr, const ConstMCTruth *labelsDigPtr, MCTruth *labelsClusPTr)
std::vector< ThreadStat > stats
Definition Clusterer.h:149
void resetColumn(int *buff)
reset column buffer, for the performance reasons we use memset
Definition Clusterer.h:152
void updateChip(const ChipPixelData *curChipData, uint32_t ip)
methods and transient data used within a thread
Definition Clusterer.h:114
std::vector< o2::mch::DsChannelId > chid
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
std::vector< int > row