22#include "GPUTPCCompressionKernels.inc"
43 bool rejectTrk = CAMath::Abs(trk.GetParam().GetQPt() * processors.param.qptB5Scaler) > processors.param.rec.tpc.rejectQPtB5 || trk.MergedLooper();
44 uint32_t nClustersStored = 0;
46 uint8_t lastRow = 0, lastSector = 0;
49 for (int32_t k = trk.NClusters() - 1; k >= 0; k--) {
55 int32_t hitId = hit.
num;
71 float x = geo.Row2X(hit.
row);
72 float y = track.LinearPad2Y(hit.
sector, orgCl.getPad(), geo.PadWidth(hit.
row), geo.NPads(hit.
row));
73 float z = geo.LinearTime2Z(hit.
sector, orgCl.getTime());
74 if (nClustersStored) {
78 if (lastLeg != hit.
leg && track.Mirror()) {
81 if (track.Propagate(geo.Row2X(hit.
row),
param.SectorParam[hit.
sector].Alpha)) {
88 int32_t cidx = trk.FirstClusterRef() + nClustersStored++;
89 if (nClustersStored == 1) {
90 uint8_t qpt = fabs(trk.GetParam().GetQPt()) < 20.f ? (trk.GetParam().GetQPt() * (127.f / 20.f) + 127.5f) : (trk.GetParam().GetQPt() > 0 ? 254 : 0);
97 c.qPtA[myTrack] = qpt;
98 c.rowA[myTrack] = hit.
row;
100 c.timeA[myTrack] = orgCl.getTimePacked();
104 uint32_t sector = hit.
sector;
111 if (lastSector > sector) {
114 sector -= lastSector;
116 c.rowDiffA[cidx] =
row;
117 c.sliceLegDiffA[cidx] = (hit.
leg == lastLeg ? 0 : compressor.
NSECTORS) + sector;
118 float pad = CAMath::Max(0.f, CAMath::Min((
float)geo.NPads(
GPUCA_ROW_COUNT - 1), track.LinearY2Pad(hit.
sector, track.Y(), geo.PadWidth(hit.
row), geo.NPads(hit.
row))));
119 c.padResA[cidx] = orgCl.
padPacked - orgCl.packPad(pad);
120 float time = CAMath::Max(0.f, geo.LinearZ2Time(hit.
sector, track.Z() + zOffset));
121 c.timeResA[cidx] = (orgCl.getTimePacked() - orgCl.packTime(
time)) & 0xFFFFFF;
124 uint16_t qtot = orgCl.
qTot, qmax = orgCl.
qMax;
127 compressor.truncateSignificantBitsChargeMax(qmax,
param);
128 compressor.truncateSignificantBitsCharge(qtot,
param);
129 compressor.truncateSignificantBitsWidth(sigmapad,
param);
130 compressor.truncateSignificantBitsWidth(sigmatime,
param);
132 c.qTotA[cidx] = qtot;
133 c.qMaxA[cidx] = qmax;
134 c.sigmaPadA[cidx] = sigmapad;
135 c.sigmaTimeA[cidx] = sigmatime;
136 c.flagsA[cidx] = orgCl.getFlags();
137 if (k && track.Filter(
y,
z - zOffset, hit.
row)) {
143 if (nClustersStored) {
145 c.nTrackClusters[myTrack] = nClustersStored;
153 return mClsPtr[
a].getTimePacked() < mClsPtr[
b].getTimePacked();
159 return mClsPtr[
a].padPacked < mClsPtr[
b].padPacked;
165 if (mClsPtr[
a].getTimePacked() >> 3 == mClsPtr[
b].getTimePacked() >> 3) {
166 return mClsPtr[
a].padPacked < mClsPtr[
b].padPacked;
168 return mClsPtr[
a].getTimePacked() < mClsPtr[
b].getTimePacked();
174 if (mClsPtr[
a].padPacked >> 3 == mClsPtr[
b].padPacked >> 3) {
175 return mClsPtr[
a].getTimePacked() < mClsPtr[
b].getTimePacked();
177 return mClsPtr[
a].padPacked < mClsPtr[
b].padPacked;
183 if (mClsPtr[
a].getTimePacked() != mClsPtr[
b].getTimePacked()) {
184 return mClsPtr[
a].getTimePacked() < mClsPtr[
b].getTimePacked();
186 if (mClsPtr[
a].padPacked != mClsPtr[
b].padPacked) {
187 return mClsPtr[
a].padPacked < mClsPtr[
b].padPacked;
189 return mClsPtr[
a].qTot < mClsPtr[
b].qTot;
199 uint32_t* sortBuffer = smem.sortBuffer;
203 const uint32_t idOffset =
clusters->clusterOffset[iSector][iRow];
204 const uint32_t idOffsetOut =
clusters->clusterOffset[iSector][iRow] * compressor.mMaxClusterFactorBase1024 / 1024;
205 const uint32_t idOffsetOutMax = ((
const uint32_t*)
clusters->clusterOffset[iSector])[iRow + 1] * compressor.mMaxClusterFactorBase1024 / 1024;
206 if (iThread == nThreads - 1) {
209 uint32_t totalCount = 0;
215 for (uint32_t
i = iThread;
i < nn + nThreads;
i += nThreads) {
216 const int32_t
idx = idOffset +
i;
217 int32_t storeCluster = 0;
219 if (
i >=
clusters->nClusters[iSector][iRow]) {
222 if (compressor.mClusterStatus[idx]) {
225 int32_t attach = ioPtrs.mergedTrackHitAttachment[
idx];
226 bool unattached = attach == 0;
237 auto& trk = ioPtrs.mergedTracks[
id];
238 if (CAMath::Abs(trk.GetParam().GetQPt() * processors.param.qptB5Scaler) > processors.param.rec.tpc.rejectQPtB5 || trk.MergedLooper()) {
246 int32_t myIndex = work_group_scan_inclusive_add(storeCluster);
247 int32_t storeLater = -1;
250 sortBuffer[smem.nCount + myIndex - 1] =
i;
256 if (iThread == nThreads - 1) {
257 smem.nCount += myIndex;
266 if (idOffsetOut + totalCount +
count > idOffsetOutMax) {
267 if (iThread == nThreads - 1) {
268 compressor.raiseError(GPUErrors::ERROR_COMPRESSION_ROW_HIT_OVERFLOW, iSector * 1000 + iRow, idOffsetOut + totalCount +
count, idOffsetOutMax);
276#ifdef GPUCA_DETERMINISTIC_MODE
277 CAAlgo::sortInBlock(sortBuffer, sortBuffer +
count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>(
clusters->clusters[iSector][iRow]));
280 CAAlgo::sortInBlock(sortBuffer, sortBuffer +
count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>(
clusters->clusters[iSector][iRow]));
282 CAAlgo::sortInBlock(sortBuffer, sortBuffer +
count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZTimePad>(
clusters->clusters[iSector][iRow]));
284 CAAlgo::sortInBlock(sortBuffer, sortBuffer +
count, GPUTPCCompressionKernels_Compare<GPUSettings::SortPad>(
clusters->clusters[iSector][iRow]));
286 CAAlgo::sortInBlock(sortBuffer, sortBuffer +
count, GPUTPCCompressionKernels_Compare<GPUSettings::SortTime>(
clusters->clusters[iSector][iRow]));
293 int32_t outidx = idOffsetOut + totalCount +
j;
296 int32_t preId =
j != 0 ? (int32_t)sortBuffer[
j - 1] : (totalCount != 0 ? (int32_t)smem.lastIndex : -1);
297 GPUTPCCompression_EncodeUnattached(
param.
rec.tpc.compressionTypeMask, orgCl,
c.timeDiffU[outidx],
c.padDiffU[outidx], preId == -1 ?
nullptr : &
clusters->
clusters[iSector][iRow][preId]);
299 uint16_t qtot = orgCl.qTot, qmax = orgCl.qMax;
300 uint8_t sigmapad = orgCl.sigmaPadPacked, sigmatime = orgCl.sigmaTimePacked;
302 compressor.truncateSignificantBitsChargeMax(qmax,
param);
303 compressor.truncateSignificantBitsCharge(qtot,
param);
304 compressor.truncateSignificantBitsWidth(sigmapad,
param);
305 compressor.truncateSignificantBitsWidth(sigmatime,
param);
307 c.qTotU[outidx] = qtot;
308 c.qMaxU[outidx] = qmax;
309 c.sigmaPadU[outidx] = sigmapad;
310 c.sigmaTimeU[outidx] = sigmatime;
311 c.flagsU[outidx] = orgCl.getFlags();
315 if (storeLater >= 0) {
316 sortBuffer[storeLater] =
i;
319 if (iThread == nThreads - 1 &&
count) {
320 smem.lastIndex = sortBuffer[
count - 1];
321 smem.nCount -=
count;
325 if (iThread == nThreads - 1) {
327 CAMath::AtomicAdd(&compressor.mMemory->nStoredUnattachedClusters, totalCount);
348 return buf128[iWarp];
351template <
typename T,
typename S>
354 if constexpr (
alignof(
S) >=
alignof(T)) {
355 static_cast<void>(
ptr);
358 return reinterpret_cast<size_t>(
ptr) %
alignof(T) == 0;
365 constexpr const int32_t vec128Elems = CpyVector<uint8_t, Vec128>::Size;
366 constexpr const int32_t vec64Elems = CpyVector<uint8_t, Vec64>::Size;
367 constexpr const int32_t vec32Elems = CpyVector<uint8_t, Vec32>::Size;
368 constexpr const int32_t vec16Elems = CpyVector<uint8_t, Vec16>::Size;
370 if (
size >= uint32_t(nThreads * vec128Elems)) {
371 compressorMemcpyVectorised<uint8_t, Vec128>(
dst,
src,
size, nThreads, iThread);
372 }
else if (
size >= uint32_t(nThreads * vec64Elems)) {
373 compressorMemcpyVectorised<uint8_t, Vec64>(
dst,
src,
size, nThreads, iThread);
374 }
else if (
size >= uint32_t(nThreads * vec32Elems)) {
375 compressorMemcpyVectorised<uint8_t, Vec32>(
dst,
src,
size, nThreads, iThread);
376 }
else if (
size >= uint32_t(nThreads * vec16Elems)) {
377 compressorMemcpyVectorised<uint8_t, Vec16>(
dst,
src,
size, nThreads, iThread);
379 compressorMemcpyBasic(
dst,
src,
size, nThreads, iThread);
386 constexpr const int32_t vec128Elems = CpyVector<uint16_t, Vec128>::Size;
387 constexpr const int32_t vec64Elems = CpyVector<uint16_t, Vec64>::Size;
388 constexpr const int32_t vec32Elems = CpyVector<uint16_t, Vec32>::Size;
390 if (
size >= uint32_t(nThreads * vec128Elems)) {
391 compressorMemcpyVectorised<uint16_t, Vec128>(
dst,
src,
size, nThreads, iThread);
392 }
else if (
size >= uint32_t(nThreads * vec64Elems)) {
393 compressorMemcpyVectorised<uint16_t, Vec64>(
dst,
src,
size, nThreads, iThread);
394 }
else if (
size >= uint32_t(nThreads * vec32Elems)) {
395 compressorMemcpyVectorised<uint16_t, Vec32>(
dst,
src,
size, nThreads, iThread);
397 compressorMemcpyBasic(
dst,
src,
size, nThreads, iThread);
404 constexpr const int32_t vec128Elems = CpyVector<uint32_t, Vec128>::Size;
405 constexpr const int32_t vec64Elems = CpyVector<uint32_t, Vec64>::Size;
407 if (
size >= uint32_t(nThreads * vec128Elems)) {
408 compressorMemcpyVectorised<uint32_t, Vec128>(
dst,
src,
size, nThreads, iThread);
409 }
else if (
size >= uint32_t(nThreads * vec64Elems)) {
410 compressorMemcpyVectorised<uint32_t, Vec64>(
dst,
src,
size, nThreads, iThread);
412 compressorMemcpyBasic(
dst,
src,
size, nThreads, iThread);
416template <
typename Scalar,
typename BaseVector>
419 if (not isAlignedTo<BaseVector>(
dst)) {
420 size_t dsti =
reinterpret_cast<size_t>(
dst);
421 int32_t
offset = (
alignof(BaseVector) - dsti %
alignof(BaseVector)) /
sizeof(Scalar);
422 compressorMemcpyBasic(
dst,
src,
offset, nThreads, iThread);
428 BaseVector*
GPUrestrict() dstAligned = reinterpret_cast<BaseVector*>(
dst);
430 using CpyVec = CpyVector<Scalar, BaseVector>;
431 uint32_t sizeAligned =
size / CpyVec::
Size;
433 if (isAlignedTo<BaseVector>(
src)) {
434 const BaseVector*
GPUrestrict() srcAligned = reinterpret_cast<const BaseVector*>(
src);
435 compressorMemcpyBasic(dstAligned, srcAligned, sizeAligned, nThreads, iThread);
437 for (uint32_t
i = iThread;
i < sizeAligned;
i += nThreads) {
439 for (uint32_t
j = 0;
j < CpyVec::Size;
j++) {
440 buf.elems[
j] =
src[
i * CpyVec::Size +
j];
442 dstAligned[
i] =
buf.all;
446 int32_t leftovers =
size % CpyVec::Size;
447 compressorMemcpyBasic(
dst +
size - leftovers,
src +
size - leftovers, leftovers, nThreads, iThread);
453 uint32_t
start = (
size + nBlocks - 1) / nBlocks * iBlock + iThread;
454 uint32_t
end = CAMath::Min(
size, (
size + nBlocks - 1) / nBlocks * (iBlock + 1));
460template <
typename V,
typename T,
typename S>
464 uint32_t dstOffset = 0;
467 T* bufT =
reinterpret_cast<T*
>(
buf);
468 constexpr const int32_t
bufSize = GPUCA_WARP_SIZE;
469 constexpr const int32_t bufTSize =
bufSize *
sizeof(V) /
sizeof(T);
471 for (uint32_t
i = 0;
i < nEntries;
i++) {
473 uint32_t srcOffset = (srcOffsets[
i] * scaleBase1024 / 1024) + diff;
474 uint32_t srcSize = nums[
i] - diff;
476 if (dstAligned ==
nullptr) {
477 if (not isAlignedTo<V>(
dst)) {
478 size_t dsti =
reinterpret_cast<size_t>(
dst);
479 uint32_t
offset = (
alignof(V) - dsti %
alignof(V)) /
sizeof(T);
481 compressorMemcpyBasic(
dst,
src + srcOffset,
offset, nLanes, iLane);
485 if (isAlignedTo<V>(
dst)) {
486 dstAligned =
reinterpret_cast<V*
>(
dst);
489 while (srcPos < srcSize) {
490 uint32_t shmElemsLeft = bufTSize - shmPos;
491 uint32_t srcElemsLeft = srcSize - srcPos;
492 uint32_t
size = CAMath::Min(srcElemsLeft, shmElemsLeft);
493 compressorMemcpyBasic(bufT + shmPos,
src + srcOffset + srcPos,
size, nLanes, iLane);
498 if (shmPos >= bufTSize) {
499 compressorMemcpyBasic(dstAligned + dstOffset,
buf,
bufSize, nLanes, iLane);
507 compressorMemcpyBasic(
reinterpret_cast<T*
>(dstAligned + dstOffset), bufT, shmPos, nLanes, iLane);
514 uint32_t blockOffset = 0;
515 int32_t iThread = nLanes * iWarp + iLane;
516 int32_t nThreads = nLanes * nWarps;
517 uint32_t blockStart = work_group_broadcast(
start, 0);
518 for (uint32_t
i = iThread;
i < blockStart;
i += nThreads) {
519 blockOffset += nums[
i];
521 blockOffset = work_group_reduce_add(blockOffset);
524 for (uint32_t
i =
start + iLane;
i <
end;
i += nLanes) {
528 if (iWarp > -1 && iLane == nLanes - 1) {
529 smem.warpOffset[iWarp] =
offset;
532 offset = (iWarp <= 0) ? 0 : smem.warpOffset[iWarp - 1];
535 return offset + blockOffset;
544 int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
545 int32_t iWarp = iThread / GPUCA_WARP_SIZE;
547 int32_t nLanes = GPUCA_WARP_SIZE;
548 int32_t iLane = iThread % GPUCA_WARP_SIZE;
553 uint32_t rowsPerWarp = (nRows + nWarps - 1) / nWarps;
554 uint32_t rowStart = rowsPerWarp * iWarp;
555 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
556 if (rowStart >= nRows) {
561 uint32_t rowsOffset = calculateWarpOffsets(smem, compressor.
mPtrs.
nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
577 for (uint32_t
i = sectorStart;
i <= sectorEnd &&
i < compressor.
NSECTORS;
i++) {
578 for (uint32_t
j = ((
i == sectorStart) ? sectorRowStart : 0);
j < ((
i == sectorEnd) ? sectorRowEnd :
GPUCA_ROW_COUNT);
j++) {
595 uint32_t trackStart = tracksPerWarp * iWarp;
602 uint32_t tracksOffset = calculateWarpOffsets(smem, compressor.
mPtrs.
nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
604 for (uint32_t
i = trackStart;
i < trackEnd;
i += nLanes) {
605 uint32_t nTrackClusters = 0;
606 uint32_t srcOffset = 0;
608 if (
i + iLane < trackEnd) {
612 smem.unbuffered.sizes[iWarp][iLane] = nTrackClusters;
613 smem.unbuffered.srcOffsets[iWarp][iLane] = srcOffset;
615 uint32_t elems = (
i + nLanes < trackEnd) ? nLanes : (trackEnd -
i);
617 for (uint32_t
j = 0;
j < elems;
j++) {
618 nTrackClusters = smem.unbuffered.sizes[iWarp][
j];
619 srcOffset = smem.unbuffered.srcOffsets[iWarp][
j];
620 uint32_t idx =
i +
j;
621 compressorMemcpy(compressor.
mOutput->
qTotA + tracksOffset, compressor.
mPtrs.
qTotA + srcOffset, nTrackClusters, nLanes, iLane);
622 compressorMemcpy(compressor.
mOutput->
qMaxA + tracksOffset, compressor.
mPtrs.
qMaxA + srcOffset, nTrackClusters, nLanes, iLane);
623 compressorMemcpy(compressor.
mOutput->
flagsA + tracksOffset, compressor.
mPtrs.
flagsA + srcOffset, nTrackClusters, nLanes, iLane);
628 compressorMemcpy(compressor.
mOutput->
rowDiffA + tracksOffset - idx, compressor.
mPtrs.
rowDiffA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
630 compressorMemcpy(compressor.
mOutput->
padResA + tracksOffset - idx, compressor.
mPtrs.
padResA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
631 compressorMemcpy(compressor.
mOutput->
timeResA + tracksOffset - idx, compressor.
mPtrs.
timeResA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
633 tracksOffset += nTrackClusters;
646 int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
647 int32_t iWarp = iThread / GPUCA_WARP_SIZE;
649 int32_t nGlobalWarps = nWarps * nBlocks;
650 int32_t iGlobalWarp = nWarps * iBlock + iWarp;
652 int32_t nLanes = GPUCA_WARP_SIZE;
653 int32_t iLane = iThread % GPUCA_WARP_SIZE;
655 auto& input = compressor.
mPtrs;
659 uint32_t rowsPerWarp = (nRows + nGlobalWarps - 1) / nGlobalWarps;
660 uint32_t rowStart = rowsPerWarp * iGlobalWarp;
661 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
662 if (rowStart >= nRows) {
666 rowsPerWarp = rowEnd - rowStart;
668 uint32_t rowsOffset = calculateWarpOffsets(smem, input.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
671 uint32_t tracksPerWarp = (nStoredTracks + nGlobalWarps - 1) / nGlobalWarps;
672 uint32_t trackStart = tracksPerWarp * iGlobalWarp;
673 uint32_t trackEnd = CAMath::Min(nStoredTracks, trackStart + tracksPerWarp);
674 if (trackStart >= nStoredTracks) {
678 tracksPerWarp = trackEnd - trackStart;
680 uint32_t tracksOffset = calculateWarpOffsets(smem, input.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
692 const uint32_t* clusterOffsets = &
clusters->clusterOffset[0][0] + rowStart;
693 const uint32_t* nSectorRowClusters = input.nSliceRowClusters + rowStart;
695 auto*
buf = smem.getBuffer<V>(iWarp);
697 compressorMemcpyBuffered(
buf,
output->qTotU + rowsOffset, input.qTotU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.
mMaxClusterFactorBase1024);
698 compressorMemcpyBuffered(
buf,
output->qMaxU + rowsOffset, input.qMaxU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.
mMaxClusterFactorBase1024);
699 compressorMemcpyBuffered(
buf,
output->flagsU + rowsOffset, input.flagsU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.
mMaxClusterFactorBase1024);
700 compressorMemcpyBuffered(
buf,
output->padDiffU + rowsOffset, input.padDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.
mMaxClusterFactorBase1024);
701 compressorMemcpyBuffered(
buf,
output->timeDiffU + rowsOffset, input.timeDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.
mMaxClusterFactorBase1024);
702 compressorMemcpyBuffered(
buf,
output->sigmaPadU + rowsOffset, input.sigmaPadU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.
mMaxClusterFactorBase1024);
703 compressorMemcpyBuffered(
buf,
output->sigmaTimeU + rowsOffset, input.sigmaTimeU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.
mMaxClusterFactorBase1024);
705 const uint16_t* nTrackClustersPtr = input.nTrackClusters + trackStart;
708 compressorMemcpyBuffered(
buf,
output->qTotA + tracksOffset, input.qTotA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
709 compressorMemcpyBuffered(
buf,
output->qMaxA + tracksOffset, input.qMaxA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
710 compressorMemcpyBuffered(
buf,
output->flagsA + tracksOffset, input.flagsA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
711 compressorMemcpyBuffered(
buf,
output->sigmaPadA + tracksOffset, input.sigmaPadA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
712 compressorMemcpyBuffered(
buf,
output->sigmaTimeA + tracksOffset, input.sigmaTimeA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
715 uint32_t tracksOffsetDiff = tracksOffset - trackStart;
716 compressorMemcpyBuffered(
buf,
output->rowDiffA + tracksOffsetDiff, input.rowDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
717 compressorMemcpyBuffered(
buf,
output->sliceLegDiffA + tracksOffsetDiff, input.sliceLegDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
718 compressorMemcpyBuffered(
buf,
output->padResA + tracksOffsetDiff, input.padResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
719 compressorMemcpyBuffered(
buf,
output->timeResA + tracksOffsetDiff, input.timeResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
726 const auto& input = compressor.mPtrs;
727 auto*
output = compressor.mOutput;
729 const int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
730 const int32_t iWarp = iThread / GPUCA_WARP_SIZE;
731 const int32_t nLanes = GPUCA_WARP_SIZE;
732 const int32_t iLane = iThread % GPUCA_WARP_SIZE;
736 compressorMemcpyBasic(
output->nSliceRowClusters, input.nSliceRowClusters, compressor.NSECTORS *
GPUCA_ROW_COUNT, nThreads, iThread);
737 compressorMemcpyBasic(
output->nTrackClusters, input.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
738 compressorMemcpyBasic(
output->qPtA, input.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
739 compressorMemcpyBasic(
output->rowA, input.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
740 compressorMemcpyBasic(
output->sliceA, input.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
741 compressorMemcpyBasic(
output->timeA, input.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
742 compressorMemcpyBasic(
output->padA, input.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
743 }
else if (iBlock & 1) {
744 const uint32_t nGlobalWarps = nWarps * (nBlocks - 1) / 2;
745 const uint32_t iGlobalWarp = nWarps * (iBlock - 1) / 2 + iWarp;
748 uint32_t rowsPerWarp = (
nRows + nGlobalWarps - 1) / nGlobalWarps;
749 uint32_t rowStart = rowsPerWarp * iGlobalWarp;
750 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
751 if (rowStart >= nRows) {
755 rowsPerWarp = rowEnd - rowStart;
757 const uint32_t rowsOffset = calculateWarpOffsets(smem, input.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
758 const uint32_t* clusterOffsets = &
clusters->clusterOffset[0][0] + rowStart;
759 const uint32_t* nSectorRowClusters = input.nSliceRowClusters + rowStart;
761 compressorMemcpyBuffered(
buf,
output->qTotU + rowsOffset, input.qTotU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
762 compressorMemcpyBuffered(
buf,
output->qMaxU + rowsOffset, input.qMaxU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
763 compressorMemcpyBuffered(
buf,
output->flagsU + rowsOffset, input.flagsU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
764 compressorMemcpyBuffered(
buf,
output->padDiffU + rowsOffset, input.padDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
765 compressorMemcpyBuffered(
buf,
output->timeDiffU + rowsOffset, input.timeDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
766 compressorMemcpyBuffered(
buf,
output->sigmaPadU + rowsOffset, input.sigmaPadU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
767 compressorMemcpyBuffered(
buf,
output->sigmaTimeU + rowsOffset, input.sigmaTimeU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
769 const uint32_t nGlobalWarps = nWarps * (nBlocks - 1) / 2;
770 const uint32_t iGlobalWarp = nWarps * (iBlock / 2 - 1) + iWarp;
772 const uint32_t nStoredTracks = compressor.mMemory->nStoredTracks;
773 uint32_t tracksPerWarp = (nStoredTracks + nGlobalWarps - 1) / nGlobalWarps;
774 uint32_t trackStart = tracksPerWarp * iGlobalWarp;
775 uint32_t trackEnd = CAMath::Min(nStoredTracks, trackStart + tracksPerWarp);
776 if (trackStart >= nStoredTracks) {
780 tracksPerWarp = trackEnd - trackStart;
782 const uint32_t tracksOffset = calculateWarpOffsets(smem, input.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
783 const uint16_t* nTrackClustersPtr = input.nTrackClusters + trackStart;
784 const uint32_t* aClsFstIdx = compressor.mAttachedClusterFirstIndex + trackStart;
786 compressorMemcpyBuffered(
buf,
output->qTotA + tracksOffset, input.qTotA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
787 compressorMemcpyBuffered(
buf,
output->qMaxA + tracksOffset, input.qMaxA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
788 compressorMemcpyBuffered(
buf,
output->flagsA + tracksOffset, input.flagsA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
789 compressorMemcpyBuffered(
buf,
output->sigmaPadA + tracksOffset, input.sigmaPadA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
790 compressorMemcpyBuffered(
buf,
output->sigmaTimeA + tracksOffset, input.sigmaTimeA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
793 uint32_t tracksOffsetDiff = tracksOffset - trackStart;
794 compressorMemcpyBuffered(
buf,
output->rowDiffA + tracksOffsetDiff, input.rowDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
795 compressorMemcpyBuffered(
buf,
output->sliceLegDiffA + tracksOffsetDiff, input.sliceLegDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
796 compressorMemcpyBuffered(
buf,
output->padResA + tracksOffsetDiff, input.padResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
797 compressorMemcpyBuffered(
buf,
output->timeResA + tracksOffsetDiff, input.timeResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
804 gatherBuffered<Vec32>(nBlocks, nThreads, iBlock, iThread, smem, processors);
810 gatherBuffered<Vec64>(nBlocks, nThreads, iBlock, iThread, smem, processors);
816 gatherBuffered<Vec128>(nBlocks, nThreads, iBlock, iThread, smem, processors);
822 gatherMulti(nBlocks, nThreads, iBlock, iThread, smem, processors);
#define get_local_size(dim)
#define get_local_id(dim)
#define get_global_size(dim)
#define get_global_id(dim)
#define GPUCA_TPC_COMP_CHUNK_SIZE
#define GPUCA_GET_THREAD_COUNT(...)
GPUdii() void GPUTPCCompressionKernels
uint32_t * mAttachedClusterFirstIndex
o2::tpc::CompressedClusters * mOutput
static constexpr uint32_t NSECTORS
o2::tpc::CompressedClustersPtrs mPtrs
size_t mMaxClusterFactorBase1024
GLboolean GLboolean GLboolean b
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLboolean GLboolean GLboolean GLboolean a
GLenum GLuint GLenum GLsizei const GLchar * buf
GLdouble GLdouble GLdouble z
Global TPC definitions and constants.
GPUdi() T BetheBlochAleph(T bg
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
static constexpr bool GetIsRejected(int32_t attach)
uint32_t nStoredAttachedClusters
const o2::tpc::ClusterNativeAccess * clustersNative
const uint32_t * mergedTrackHitAttachment
const GPUTPCGMMergedTrackHit * mergedTrackHits
const GPUTPCGMMergedTrack * mergedTracks
std::vector< std::byte > getBuffer(const char *filename)
std::vector< Cluster > clusters
for(int irof=0;irof< 1000;irof++)