29 s.mNHits = tracker.mData.mRows[s.mIRow].mNHits;
37 for (int32_t ih = iThread; ih < s.mNHits; ih += nThreads) {
38 int64_t lHitNumberOffset =
row.mHitNumberOffset;
39 uint32_t linkUpData = tracker.mData.mLinkUpData[lHitNumberOffset + ih];
41 if (tracker.mData.mLinkDownData[lHitNumberOffset + ih] ==
CALINK_INVAL && linkUpData !=
CALINK_INVAL && tracker.mData.mLinkUpData[rowUp.mHitNumberOffset + linkUpData] !=
CALINK_INVAL) {
42#ifdef GPUCA_SORT_STARTHITS
44 uint32_t nextRowStartHits = CAMath::AtomicAddShared(&s.mNRowStartHits, 1u);
45 if (nextRowStartHits >= tracker.mNMaxRowStartHits) {
46 tracker.raiseError(GPUErrors::ERROR_ROWSTARTHIT_OVERFLOW, tracker.ISector() * 1000 + s.mIRow, nextRowStartHits, tracker.mNMaxRowStartHits);
47 CAMath::AtomicExchShared(&s.mNRowStartHits, tracker.mNMaxRowStartHits);
52 uint32_t nextRowStartHits = CAMath::AtomicAdd(&tracker.mCommonMem->nStartHits, 1u);
53 if (nextRowStartHits >= tracker.mNMaxStartHits) {
54 tracker.raiseError(GPUErrors::ERROR_STARTHIT_OVERFLOW, tracker.ISector() * 1000 + s.mIRow, nextRowStartHits, tracker.mNMaxStartHits);
55 CAMath::AtomicExch(&tracker.mCommonMem->nStartHits, tracker.mNMaxStartHits);
59 startHits[nextRowStartHits].Set(s.mIRow, ih);
64#ifdef GPUCA_SORT_STARTHITS
66 uint32_t nOffset = CAMath::AtomicAdd(&tracker.mCommonMem->nStartHits, s.mNRowStartHits);
67 tracker.mRowStartHitCountOffset[s.mIRow] = s.mNRowStartHits;
68 if (nOffset + s.mNRowStartHits > tracker.mNMaxStartHits) {
69 tracker.raiseError(GPUErrors::ERROR_STARTHIT_OVERFLOW, tracker.ISector() * 1000 + s.mIRow, nOffset + s.mNRowStartHits, tracker.mNMaxStartHits);
70 CAMath::AtomicExch(&tracker.mCommonMem->nStartHits, tracker.mNMaxStartHits);