Project
Loading...
Searching...
No Matches
Clusterer.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14#include <algorithm>
15#include <TTree.h>
16#include "Framework/Logger.h"
20
21#ifdef WITH_OPENMP
22#include <omp.h>
23#endif
24using namespace o2::itsmft;
25
26//__________________________________________________
27void Clusterer::process(int nThreads, PixelReader& reader, CompClusCont* compClus,
28 PatternCont* patterns, ROFRecCont* vecROFRec, MCTruth* labelsCl)
29{
30#ifdef _PERFORM_TIMING_
31 mTimer.Start(kFALSE);
32#endif
33 if (nThreads < 1) {
34 nThreads = 1;
35 }
36 auto autoDecode = reader.getDecodeNextAuto();
37 int rofcount{0};
38 o2::InteractionRecord lastIR{};
39 do {
40 if (autoDecode) {
41 reader.setDecodeNextAuto(false); // internally do not autodecode
42 if (!reader.decodeNextTrigger()) {
43 break; // on the fly decoding was requested, but there were no data left
44 }
45 }
46 if (reader.getInteractionRecord().isDummy()) {
47 continue; // No IR info was found
48 }
49 if (!lastIR.isDummy() && lastIR >= reader.getInteractionRecord()) {
50 const int MaxErrLog = 2;
51 static int errLocCount = 0;
52 if (errLocCount++ < MaxErrLog) {
53 LOGP(warn, "Impossible ROF IR {}, does not exceed previous {}, discarding in clusterization", reader.getInteractionRecord().asString(), lastIR.asString());
54 }
55 continue;
56 }
57 lastIR = reader.getInteractionRecord();
58 // pre-fetch all non-empty chips of current ROF
59 ChipPixelData* curChipData = nullptr;
60 mFiredChipsPtr.clear();
61 size_t nPix = 0;
62 while ((curChipData = reader.getNextChipData(mChips))) {
63 mFiredChipsPtr.push_back(curChipData);
64 nPix += curChipData->getData().size();
65 }
66
67 auto& rof = vecROFRec->emplace_back(reader.getInteractionRecord(), vecROFRec->size(), compClus->size(), 0); // create new ROF
68
69 uint16_t nFired = mFiredChipsPtr.size();
70 if (!nFired) {
71 if (autoDecode) {
72 continue;
73 }
74 break; // just 1 ROF was asked to be processed
75 }
76 if (nFired < nThreads) {
77 nThreads = nFired;
78 }
79#ifndef WITH_OPENMP
80 nThreads = 1;
81#endif
82 uint16_t chipStep = nThreads > 1 ? (nThreads == 2 ? 20 : 10) : nFired;
83 int dynGrp = std::min(4, std::max(1, nThreads / 2));
84 if (nThreads > mThreads.size()) {
85 int oldSz = mThreads.size();
86 mThreads.resize(nThreads);
87 for (int i = oldSz; i < nThreads; i++) {
88 mThreads[i] = std::make_unique<ClustererThread>(this, i);
89 }
90 }
91#ifdef WITH_OPENMP
92#pragma omp parallel for schedule(dynamic, dynGrp) num_threads(nThreads)
93 //>> start of MT region
94 for (uint16_t ic = 0; ic < nFired; ic += chipStep) {
95 auto ith = omp_get_thread_num();
96 if (nThreads > 1) {
97 mThreads[ith]->process(ic, std::min(chipStep, uint16_t(nFired - ic)),
98 &mThreads[ith]->compClusters,
99 patterns ? &mThreads[ith]->patterns : nullptr,
100 labelsCl ? reader.getDigitsMCTruth() : nullptr,
101 labelsCl ? &mThreads[ith]->labels : nullptr, rof);
102 } else { // put directly to the destination
103 mThreads[0]->process(0, nFired, compClus, patterns, labelsCl ? reader.getDigitsMCTruth() : nullptr, labelsCl, rof);
104 }
105 }
106 //<< end of MT region
107#else
108 mThreads[0]->process(0, nFired, compClus, patterns, labelsCl ? reader.getDigitsMCTruth() : nullptr, labelsCl, rof);
109#endif
110 // copy data of all threads but the 1st one to final destination
111 if (nThreads > 1) {
112#ifdef _PERFORM_TIMING_
113 mTimerMerge.Start(false);
114#endif
115 size_t nClTot = 0, nPattTot = 0;
116 int chid = 0, thrStatIdx[nThreads];
117 for (int ith = 0; ith < nThreads; ith++) {
118 std::sort(mThreads[ith]->stats.begin(), mThreads[ith]->stats.end(), [](const ThreadStat& a, const ThreadStat& b) { return a.firstChip < b.firstChip; });
119 thrStatIdx[ith] = 0;
120 nClTot += mThreads[ith]->compClusters.size();
121 nPattTot += mThreads[ith]->patterns.size();
122 }
123 compClus->reserve(nClTot);
124 if (patterns) {
125 patterns->reserve(nPattTot);
126 }
127 while (chid < nFired) {
128 for (int ith = 0; ith < nThreads; ith++) {
129 if (thrStatIdx[ith] >= mThreads[ith]->stats.size()) {
130 continue;
131 }
132 const auto& stat = mThreads[ith]->stats[thrStatIdx[ith]];
133 if (stat.firstChip == chid) {
134 thrStatIdx[ith]++;
135 chid += stat.nChips; // next chip to look
136 if (stat.nClus > 0) {
137 const auto clbeg = mThreads[ith]->compClusters.begin() + stat.firstClus;
138 auto szold = compClus->size();
139 compClus->insert(compClus->end(), clbeg, clbeg + stat.nClus);
140 if (patterns) {
141 const auto ptbeg = mThreads[ith]->patterns.begin() + stat.firstPatt;
142 patterns->insert(patterns->end(), ptbeg, ptbeg + stat.nPatt);
143 }
144 if (labelsCl) {
145 labelsCl->mergeAtBack(mThreads[ith]->labels, stat.firstClus, stat.nClus);
146 }
147 }
148 }
149 }
150 }
151 for (int ith = 0; ith < nThreads; ith++) {
152 mThreads[ith]->patterns.clear();
153 mThreads[ith]->compClusters.clear();
154 mThreads[ith]->labels.clear();
155 mThreads[ith]->stats.clear();
156 }
157#ifdef _PERFORM_TIMING_
158 mTimerMerge.Stop();
159#endif
160 } else {
161 mThreads[0]->stats.clear();
162 }
163 rof.setNEntries(compClus->size() - rof.getFirstEntry()); // update
164 } while (autoDecode);
165 reader.setDecodeNextAuto(autoDecode); // restore setting
166#ifdef _PERFORM_TIMING_
167 mTimer.Stop();
168#endif
169}
170
171//__________________________________________________
172void Clusterer::ClustererThread::process(uint16_t chip, uint16_t nChips, CompClusCont* compClusPtr, PatternCont* patternsPtr,
173 const ConstMCTruth* labelsDigPtr, MCTruth* labelsClPtr, const ROFRecord& rofPtr)
174{
175 if (stats.empty() || stats.back().firstChip + stats.back().nChips != chip) { // there is a jump, register new block
176 stats.emplace_back(ThreadStat{chip, 0, uint32_t(compClusPtr->size()), patternsPtr ? uint32_t(patternsPtr->size()) : 0, 0, 0});
177 }
178 for (int ic = 0; ic < nChips; ic++) {
179 auto* curChipData = parent->mFiredChipsPtr[chip + ic];
180 auto chipID = curChipData->getChipID();
181 if (parent->mMaxBCSeparationToMask > 0) { // mask pixels fired from the previous ROF
182 const auto& chipInPrevROF = parent->mChipsOld[chipID];
183 if (std::abs(rofPtr.getBCData().differenceInBC(chipInPrevROF.getInteractionRecord())) < parent->mMaxBCSeparationToMask) {
184 parent->mMaxRowColDiffToMask ? curChipData->maskFiredInSample(parent->mChipsOld[chipID], parent->mMaxRowColDiffToMask) : curChipData->maskFiredInSample(parent->mChipsOld[chipID]);
185 }
186 }
187 auto nclus0 = compClusPtr->size();
188 auto validPixID = curChipData->getFirstUnmasked();
189 auto npix = curChipData->getData().size();
190 if (validPixID < npix) { // chip data may have all of its pixels masked!
191 auto valp = validPixID++;
192 if (validPixID == npix) { // special case of a single pixel fired on the chip
193 finishChipSingleHitFast(valp, curChipData, compClusPtr, patternsPtr, labelsDigPtr, labelsClPtr);
194 } else {
195 initChip(curChipData, valp);
196 for (; validPixID < npix; validPixID++) {
197 if (!curChipData->getData()[validPixID].isMasked()) {
198 updateChip(curChipData, validPixID);
199 }
200 }
201 finishChip(curChipData, compClusPtr, patternsPtr, labelsDigPtr, labelsClPtr);
202 }
203 }
204 if (parent->mMaxBCSeparationToMask > 0) { // current chip data will be used in the next ROF to mask overflow pixels
205 parent->mChipsOld[chipID].swap(*curChipData);
206 }
207 }
208 auto& currStat = stats.back();
209 currStat.nChips += nChips;
210 currStat.nClus = compClusPtr->size() - currStat.firstClus;
211 currStat.nPatt = patternsPtr ? (patternsPtr->size() - currStat.firstPatt) : 0;
212}
213
214//__________________________________________________
216 PatternCont* patternsPtr, const ConstMCTruth* labelsDigPtr, MCTruth* labelsClusPtr)
217{
218 const auto& pixData = curChipData->getData();
219 int nPreclusters = preClusters.size();
220 // account for the eventual reindexing of preClusters: Id2 might have been reindexed to Id1, which later was reindexed to Id0
221 for (int i = 1; i < nPreclusters; i++) {
222 if (preClusters[i].index != i) { // reindexing is always done towards smallest index
223 preClusters[i].index = preClusters[preClusters[i].index].index;
224 }
225 }
226 for (int i1 = 0; i1 < nPreclusters; ++i1) {
227 auto& preCluster = preClusters[i1];
228 auto ci = preCluster.index;
229 if (ci < 0) {
230 continue;
231 }
232 BBox bbox(curChipData->getChipID());
233 int nlab = 0;
234 int next = preCluster.head;
235 pixArrBuff.clear();
236 while (next >= 0) {
237 const auto& pixEntry = pixels[next];
238 const auto pix = pixData[pixEntry.second];
239 pixArrBuff.push_back(pix); // needed for cluster topology
240 bbox.adjust(pix.getRowDirect(), pix.getCol());
241 if (labelsClusPtr) {
242 if (parent->mSquashingDepth) { // the MCtruth for this pixel is stored in chip data: due to squashing we lose contiguity
243 fetchMCLabels(curChipData->getOrderedPixId(pixEntry.second), labelsDigPtr, nlab);
244 } else { // the MCtruth for this pixel is at curChipData->startID+pixEntry.second
245 fetchMCLabels(pixEntry.second + curChipData->getStartID(), labelsDigPtr, nlab);
246 }
247 }
248 next = pixEntry.first;
249 }
250 preCluster.index = -1;
251 for (int i2 = i1 + 1; i2 < nPreclusters; ++i2) {
252 auto& preCluster2 = preClusters[i2];
253 if (preCluster2.index != ci) {
254 continue;
255 }
256 next = preCluster2.head;
257 while (next >= 0) {
258 const auto& pixEntry = pixels[next];
259 const auto pix = pixData[pixEntry.second]; // PixelData
260 pixArrBuff.push_back(pix); // needed for cluster topology
261 bbox.adjust(pix.getRowDirect(), pix.getCol());
262 if (labelsClusPtr) {
263 if (parent->mSquashingDepth) { // the MCtruth for this pixel is stored in chip data: due to squashing we lose contiguity
264 fetchMCLabels(curChipData->getOrderedPixId(pixEntry.second), labelsDigPtr, nlab);
265 } else { // the MCtruth for this pixel is at curChipData->startID+pixEntry.second
266 fetchMCLabels(pixEntry.second + curChipData->getStartID(), labelsDigPtr, nlab);
267 }
268 }
269 next = pixEntry.first;
270 }
271 preCluster2.index = -1;
272 }
273 if (bbox.isAcceptableSize()) {
274 parent->streamCluster(pixArrBuff, &labelsBuff, bbox, parent->mPattIdConverter, compClusPtr, patternsPtr, labelsClusPtr, nlab);
275 } else {
276 auto warnLeft = MaxHugeClusWarn - parent->mNHugeClus;
277 if (!parent->mDropHugeClusters) {
278 if (warnLeft > 0) {
279 LOGP(warn, "Splitting a huge cluster: chipID {}, rows {}:{} cols {}:{}{}", bbox.chipID, bbox.rowMin, bbox.rowMax, bbox.colMin, bbox.colMax,
280 warnLeft == 1 ? " (Further warnings will be muted)" : "");
281#ifdef WITH_OPENMP
282#pragma omp critical
283#endif
284 {
285 parent->mNHugeClus++;
286 }
287 }
288 BBox bboxT(bbox); // truncated box
289 std::vector<PixelData> pixbuf;
290 do {
291 bboxT.rowMin = bbox.rowMin;
292 bboxT.colMax = std::min(bbox.colMax, uint16_t(bboxT.colMin + o2::itsmft::ClusterPattern::MaxColSpan - 1));
293 do { // Select a subset of pixels fitting the reduced bounding box
294 bboxT.rowMax = std::min(bbox.rowMax, uint16_t(bboxT.rowMin + o2::itsmft::ClusterPattern::MaxRowSpan - 1));
295 for (const auto& pix : pixArrBuff) {
296 if (bboxT.isInside(pix.getRowDirect(), pix.getCol())) {
297 pixbuf.push_back(pix);
298 }
299 }
300 if (!pixbuf.empty()) { // Stream a piece of cluster only if the reduced bounding box is not empty
301 parent->streamCluster(pixbuf, &labelsBuff, bboxT, parent->mPattIdConverter, compClusPtr, patternsPtr, labelsClusPtr, nlab, true);
302 pixbuf.clear();
303 }
304 bboxT.rowMin = bboxT.rowMax + 1;
305 } while (bboxT.rowMin < bbox.rowMax);
306 bboxT.colMin = bboxT.colMax + 1;
307 } while (bboxT.colMin < bbox.colMax);
308 }
309 }
310 }
311}
312
313//__________________________________________________
315 PatternCont* patternsPtr, const ConstMCTruth* labelsDigPtr, MCTruth* labelsClusPtr)
316{
317 auto pix = curChipData->getData()[hit];
318 uint16_t row = pix.getRowDirect(), col = pix.getCol();
319
320 if (labelsClusPtr) { // MC labels were requested
321 int nlab = 0;
322 fetchMCLabels(curChipData->getStartID() + hit, labelsDigPtr, nlab);
323 auto cnt = compClusPtr->size();
324 for (int i = nlab; i--;) {
325 labelsClusPtr->addElement(cnt, labelsBuff[i]);
326 }
327 }
328
329 // add to compact clusters, which must be always filled
330 unsigned char patt[ClusterPattern::MaxPatternBytes]{0x1 << (7 - (0 % 8))}; // unrolled 1 hit version of full loop in finishChip
331 uint16_t pattID = (parent->mPattIdConverter.size() == 0) ? CompCluster::InvalidPatternID : parent->mPattIdConverter.findGroupID(1, 1, patt);
332 if ((pattID == CompCluster::InvalidPatternID || parent->mPattIdConverter.isGroup(pattID)) && patternsPtr) {
333 patternsPtr->emplace_back(1); // rowspan
334 patternsPtr->emplace_back(1); // colspan
335 patternsPtr->insert(patternsPtr->end(), std::begin(patt), std::begin(patt) + 1);
336 }
337 compClusPtr->emplace_back(row, col, pattID, curChipData->getChipID());
338}
339
340//__________________________________________________
341Clusterer::Clusterer() : mPattIdConverter()
342{
343#ifdef _PERFORM_TIMING_
344 mTimer.Stop();
345 mTimer.Reset();
346 mTimerMerge.Stop();
347 mTimerMerge.Reset();
348#endif
349}
350
351//__________________________________________________
353{
354 // init chip with the 1st unmasked pixel (entry "from" in the mChipData)
355 prev = column1 + 1;
356 curr = column2 + 1;
358 pixels.clear();
359 preClusters.clear();
360 auto pix = curChipData->getData()[first];
361 currCol = pix.getCol();
362 curr[pix.getRowDirect()] = 0; // can use getRowDirect since the pixel is not masked
363 // start the first pre-cluster
364 preClusters.emplace_back();
365 pixels.emplace_back(-1, first); // id of current pixel
366 noLeftCol = true;
367}
368
369//__________________________________________________
370void Clusterer::ClustererThread::updateChip(const ChipPixelData* curChipData, uint32_t ip)
371{
372 const auto pix = curChipData->getData()[ip];
373 uint16_t row = pix.getRowDirect(); // can use getRowDirect since the pixel is not masked
374 if (currCol != pix.getCol()) { // switch the buffers
375 swapColumnBuffers();
376 resetColumn(curr);
377 noLeftCol = false;
378 if (pix.getCol() > currCol + 1) {
379 // no connection with previous column, this pixel cannot belong to any of the
380 // existing preclusters, create a new precluster and flag to check only the row above for next pixels of this column
381 currCol = pix.getCol();
382 addNewPrecluster(ip, row);
383 noLeftCol = true;
384 return;
385 }
386 currCol = pix.getCol();
387 }
388
389 if (noLeftCol) { // check only the row above
390 if (curr[row - 1] >= 0) {
391 expandPreCluster(ip, row, curr[row - 1]); // attach to the precluster of the previous row
392 } else {
393 addNewPrecluster(ip, row); // start new precluster
394 }
395 } else {
396 // row above should be always checked
397 int nnb = 0, lowestIndex = curr[row - 1], lowestNb = 0, *nbrCol[4], nbrRow[4];
398 if (lowestIndex >= 0) {
399 nbrCol[nnb] = curr;
400 nbrRow[nnb++] = row - 1;
401 } else {
402 lowestIndex = 0x7ffff;
403 lowestNb = -1;
404 }
405#ifdef _ALLOW_DIAGONAL_ALPIDE_CLUSTERS_
406 for (int i : {-1, 0, 1}) {
407 auto v = prev[row + i];
408 if (v >= 0) {
409 nbrCol[nnb] = prev;
410 nbrRow[nnb] = row + i;
411 if (v < lowestIndex) {
412 lowestIndex = v;
413 lowestNb = nnb;
414 }
415 nnb++;
416 }
417 }
418#else
419 if (prev[row] >= 0) {
420 nbrCol[nnb] = prev;
421 nbrRow[nnb] = row;
422 if (prev[row] < lowestIndex) {
423 lowestIndex = v;
424 lowestNb = nnb;
425 }
426 nnb++;
427 }
428#endif
429 if (!nnb) { // no neighbours, create new precluster
430 addNewPrecluster(ip, row); // start new precluster
431 } else {
432 expandPreCluster(ip, row, lowestIndex); // attach to the adjascent precluster with smallest index
433 if (nnb > 1) {
434 for (int inb = 0; inb < nnb; inb++) { // reassign precluster index to smallest one, replicating updated values to columns caches
435 auto& prevIndex = (nbrCol[inb])[nbrRow[inb]];
436 prevIndex = preClusters[prevIndex].index = lowestIndex;
437 }
438 }
439 }
440 }
441}
442
443//__________________________________________________
444void Clusterer::ClustererThread::fetchMCLabels(int digID, const ConstMCTruth* labelsDig, int& nfilled)
445{
446 // transfer MC labels to cluster
447 if (nfilled >= MaxLabels) {
448 return;
449 }
450 const auto& lbls = labelsDig->getLabels(digID);
451 for (int i = lbls.size(); i--;) {
452 int ic = nfilled;
453 for (; ic--;) { // check if the label is already present
454 if (labelsBuff[ic] == lbls[i]) {
455 return; // label is found, do nothing
456 }
457 }
458 labelsBuff[nfilled++] = lbls[i];
459 if (nfilled >= MaxLabels) {
460 break;
461 }
462 }
463 //
464}
465
466//__________________________________________________
468{
469 // reset
470#ifdef _PERFORM_TIMING_
471 mTimer.Stop();
472 mTimer.Reset();
473 mTimerMerge.Stop();
474 mTimerMerge.Reset();
475#endif
476}
477
478//__________________________________________________
480{
481 // print settings
482 LOGP(info, "Clusterizer squashes overflow pixels separated by {} BC and <= {} in row/col seeking down to {} neighbour ROFs", mMaxBCSeparationToSquash, mMaxRowColDiffToMask, mSquashingDepth);
483 LOGP(info, "Clusterizer masks overflow pixels separated by < {} BC and <= {} in row/col", mMaxBCSeparationToMask, mMaxRowColDiffToMask);
484 LOGP(info, "Clusterizer does {} drop huge clusters", mDropHugeClusters ? "" : "not");
485
486#ifdef _PERFORM_TIMING_
487 auto& tmr = const_cast<TStopwatch&>(mTimer); // ugly but this is what root does internally
488 auto& tmrm = const_cast<TStopwatch&>(mTimerMerge);
489 LOG(info) << "Inclusive clusterization timing (w/o disk IO): Cpu: " << tmr.CpuTime()
490 << " Real: " << tmr.RealTime() << " s in " << tmr.Counter() << " slots";
491 LOG(info) << "Threads output merging timing : Cpu: " << tmrm.CpuTime()
492 << " Real: " << tmrm.RealTime() << " s in " << tmrm.Counter() << " slots";
493
494#endif
495}
496
497//__________________________________________________
499{
500 // reset for new run
501 clear();
502 mNHugeClus = 0;
503}
std::vector< std::string > labels
int32_t i
Definition of the ITS cluster finder.
Definition of a container to keep Monte Carlo truth external to simulation objects.
uint32_t col
Definition RawData.h:4
gsl::span< const TruthElement > getLabels(uint32_t dataindex) const
void mergeAtBack(MCTruthContainer< TruthElement > const &other)
void addElement(uint32_t dataindex, TruthElement const &element, bool noElement=false)
uint32_t getOrderedPixId(int pos) const
Definition PixelData.h:275
uint32_t getStartID() const
Definition PixelData.h:110
uint16_t getChipID() const
Definition PixelData.h:108
const std::vector< PixelData > & getData() const
Definition PixelData.h:115
static constexpr int MaxPatternBytes
static constexpr uint8_t MaxRowSpan
static constexpr uint8_t MaxColSpan
void process(int nThreads, PixelReader &r, CompClusCont *compClus, PatternCont *patterns, ROFRecCont *vecROFRec, MCTruth *labelsCl=nullptr)
Definition Clusterer.cxx:27
static constexpr int MaxLabels
Definition Clusterer.h:76
static constexpr int MaxHugeClusWarn
Definition Clusterer.h:77
static constexpr unsigned short InvalidPatternID
Definition CompCluster.h:46
PixelReader class for the ITSMFT.
Definition PixelReader.h:34
virtual int decodeNextTrigger()=0
bool getDecodeNextAuto() const
Definition PixelReader.h:67
void setDecodeNextAuto(bool v)
Definition PixelReader.h:68
const o2::InteractionRecord & getInteractionRecord() const
Definition PixelReader.h:58
virtual const o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > * getDigitsMCTruth() const
Definition PixelReader.h:49
virtual bool getNextChipData(ChipPixelData &chipData)=0
const BCData & getBCData() const
Definition ROFRecord.h:58
const GLdouble * v
Definition glcorearb.h:832
GLuint index
Definition glcorearb.h:781
GLint first
Definition glcorearb.h:399
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLint GLint GLsizei GLint GLenum GLenum const void * pixels
Definition glcorearb.h:275
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
std::vector< unsigned char > PatternCont
Definition Clusterer.h:59
std::vector< ROFRecord > ROFRecCont
Definition Clusterer.h:60
std::vector< CompClusterExt > CompClusCont
Definition Clusterer.h:58
std::string asString() const
int64_t differenceInBC(const InteractionRecord &other) const
bool isInside(uint16_t row, uint16_t col) const
Definition Clusterer.h:86
int column2[SegmentationAlpide::NRows+2]
Definition Clusterer.h:133
int column1[SegmentationAlpide::NRows+2]
Definition Clusterer.h:132
std::vector< PreCluster > preClusters
temporary buffer for pattern calc.
Definition Clusterer.h:143
void initChip(const ChipPixelData *curChipData, uint32_t first)
void finishChip(ChipPixelData *curChipData, CompClusCont *compClus, PatternCont *patterns, const ConstMCTruth *labelsDig, MCTruth *labelsClus)
uint16_t currCol
Column being processed.
Definition Clusterer.h:139
bool noLeftCol
flag that there is no column on the left to check
Definition Clusterer.h:140
void fetchMCLabels(int digID, const ConstMCTruth *labelsDig, int &nfilled)
void process(uint16_t chip, uint16_t nChips, CompClusCont *compClusPtr, PatternCont *patternsPtr, const ConstMCTruth *labelsDigPtr, MCTruth *labelsClPtr, const ROFRecord &rofPtr)
void finishChipSingleHitFast(uint32_t hit, ChipPixelData *curChipData, CompClusCont *compClusPtr, PatternCont *patternsPtr, const ConstMCTruth *labelsDigPtr, MCTruth *labelsClusPTr)
std::vector< ThreadStat > stats
Definition Clusterer.h:149
void resetColumn(int *buff)
reset column buffer, for the performance reasons we use memset
Definition Clusterer.h:152
void updateChip(const ChipPixelData *curChipData, uint32_t ip)
methods and transient data used within a thread
Definition Clusterer.h:114
std::vector< o2::mch::DsChannelId > chid
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
std::vector< int > row