28 s.mNHits = tracker.mData.mRows[s.mIRow].mNHits;
33 for (uint32_t ih = iThread; ih < s.mNHits; ih += nThreads) {
34 int64_t lHitNumberOffset =
row.mHitNumberOffset;
35 uint32_t linkUpData = tracker.mData.mLinkUpData[lHitNumberOffset + ih];
37 if (tracker.mData.mLinkDownData[lHitNumberOffset + ih] ==
CALINK_INVAL && linkUpData !=
CALINK_INVAL && tracker.mData.mLinkUpData[rowUp.mHitNumberOffset + linkUpData] !=
CALINK_INVAL) {
39 uint32_t nextRowStartHits;
40 if constexpr (GPUCA_PAR_SORT_STARTHITS > 0) {
41 startHits = tracker.mTrackletTmpStartHits + s.mIRow * tracker.mNMaxRowStartHits;
42 nextRowStartHits = CAMath::AtomicAddShared(&s.mNRowStartHits, 1u);
43 if (nextRowStartHits >= tracker.mNMaxRowStartHits) {
44 tracker.raiseError(GPUErrors::ERROR_ROWSTARTHIT_OVERFLOW, tracker.ISector() * 1000 + s.mIRow, nextRowStartHits, tracker.mNMaxRowStartHits);
45 CAMath::AtomicExchShared(&s.mNRowStartHits, tracker.mNMaxRowStartHits);
49 startHits = tracker.mTrackletStartHits;
50 nextRowStartHits = CAMath::AtomicAdd(&tracker.mCommonMem->nStartHits, 1u);
51 if (nextRowStartHits >= tracker.mNMaxStartHits) {
52 tracker.raiseError(GPUErrors::ERROR_STARTHIT_OVERFLOW, tracker.ISector() * 1000 + s.mIRow, nextRowStartHits, tracker.mNMaxStartHits);
53 CAMath::AtomicExch(&tracker.mCommonMem->nStartHits, tracker.mNMaxStartHits);
57 startHits[nextRowStartHits].Set(s.mIRow, ih);
62 if constexpr (GPUCA_PAR_SORT_STARTHITS > 0) {
64 uint32_t nOffset = CAMath::AtomicAdd(&tracker.mCommonMem->nStartHits, s.mNRowStartHits);
65 tracker.mRowStartHitCountOffset[s.mIRow] = s.mNRowStartHits;
66 if (nOffset + s.mNRowStartHits > tracker.mNMaxStartHits) {
67 tracker.raiseError(GPUErrors::ERROR_STARTHIT_OVERFLOW, tracker.ISector() * 1000 + s.mIRow, nOffset + s.mNRowStartHits, tracker.mNMaxStartHits);
68 CAMath::AtomicExch(&tracker.mCommonMem->nStartHits, tracker.mNMaxStartHits);