Project
Loading...
Searching...
No Matches
GPUTPCCompressionKernels.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
16#include "GPUConstantMem.h"
17#include "GPUO2DataTypes.h"
18#include "GPUParam.h"
19#include "GPUCommonAlgorithm.h"
22#include "GPUTPCCompressionKernels.inc"
23
24using namespace o2::gpu;
25using namespace o2::tpc;
26
27template <>
28GPUdii() void GPUTPCCompressionKernels::Thread<GPUTPCCompressionKernels::step0attached>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& processors)
29{
30 const GPUTrackingInOutPointers& GPUrestrict() ioPtrs = processors.ioPtrs;
32 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
33 const GPUParam& GPUrestrict() param = processors.param;
34
35 int32_t myTrack = 0;
36 for (uint32_t i = get_global_id(0); i < ioPtrs.nMergedTracks; i += get_global_size(0)) {
38 const GPUTPCGMMergedTrack& GPUrestrict() trk = ioPtrs.mergedTracks[i];
39 if (!trk.OK()) {
40 continue;
41 }
42 bool rejectTrk = CAMath::Abs(trk.GetParam().GetQPt() * processors.param.qptB5Scaler) > processors.param.rec.tpc.rejectQPtB5 || trk.MergedLooper();
43 uint32_t nClustersStored = 0;
45 uint8_t lastRow = 0, lastSector = 0;
47 float zOffset = 0;
48 for (int32_t k = trk.NClusters() - 1; k >= 0; k--) {
49 const GPUTPCGMMergedTrackHit& GPUrestrict() hit = ioPtrs.mergedTrackHits[trk.FirstClusterRef() + k];
51 continue;
52 }
53
54 int32_t hitId = hit.num;
55 int32_t attach = ioPtrs.mergedTrackHitAttachment[hitId];
56 if ((attach & gputpcgmmergertypes::attachTrackMask) != i) {
57 continue; // Main attachment to different track
58 }
59 bool rejectCluster = processors.param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyA && (rejectTrk || GPUTPCClusterRejection::GetIsRejected(attach));
60 if (rejectCluster) {
61 compressor.mClusterStatus[hitId] = 1; // Cluster rejected, do not store
62 continue;
63 }
64
65 if (!(param.rec.tpc.compressionTypeMask & GPUSettings::CompressionTrackModel)) {
66 continue; // No track model compression
67 }
68 const ClusterNative& GPUrestrict() orgCl = clusters -> clusters[hit.sector][hit.row][hit.num - clusters->clusterOffset[hit.sector][hit.row]];
69 constexpr GPUTPCGeometry geo;
70 float x = geo.Row2X(hit.row);
71 float y = track.LinearPad2Y(hit.sector, orgCl.getPad(), geo.PadWidth(hit.row), geo.NPads(hit.row));
72 float z = geo.LinearTime2Z(hit.sector, orgCl.getTime());
73 if (nClustersStored) {
74 if ((hit.sector < GPUCA_NSECTORS) ^ (lastSector < GPUCA_NSECTORS)) {
75 break;
76 }
77 if (track.Propagate(geo.Row2X(hit.row), param.SectorParam[hit.sector].Alpha)) {
78 break;
79 }
80 }
81
82 compressor.mClusterStatus[hitId] = 1; // Cluster compressed in track model, do not store as difference
83
84 int32_t cidx = trk.FirstClusterRef() + nClustersStored++;
85 if (nClustersStored == 1) {
86 uint8_t qpt = fabs(trk.GetParam().GetQPt()) < 20.f ? (trk.GetParam().GetQPt() * (127.f / 20.f) + 127.5f) : (trk.GetParam().GetQPt() > 0 ? 254 : 0);
87 zOffset = z;
88 track.Init(x, y, z - zOffset, param.SectorParam[hit.sector].Alpha, qpt, param);
89
90 myTrack = CAMath::AtomicAdd(&compressor.mMemory->nStoredTracks, 1u);
91 compressor.mAttachedClusterFirstIndex[myTrack] = trk.FirstClusterRef();
92 c.qPtA[myTrack] = qpt;
93 c.rowA[myTrack] = hit.row;
94 c.sliceA[myTrack] = hit.sector;
95 c.timeA[myTrack] = orgCl.getTimePacked();
96 c.padA[myTrack] = orgCl.padPacked;
97 } else {
98 uint32_t row = hit.row;
99 uint32_t sector = hit.sector;
100
101 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionDifferences) {
102 if (lastRow > row) {
104 }
105 row -= lastRow;
106 if (lastSector > sector) {
107 sector += compressor.NSECTORS;
108 }
109 sector -= lastSector;
110 }
111 c.rowDiffA[cidx] = row;
112 c.sliceLegDiffA[cidx] = sector;
113 float pad = CAMath::Max(0.f, CAMath::Min((float)geo.NPads(GPUCA_ROW_COUNT - 1), track.LinearY2Pad(hit.sector, track.Y(), geo.PadWidth(hit.row), geo.NPads(hit.row))));
114 c.padResA[cidx] = orgCl.padPacked - orgCl.packPad(pad);
115 float time = CAMath::Max(0.f, geo.LinearZ2Time(hit.sector, track.Z() + zOffset));
116 c.timeResA[cidx] = (orgCl.getTimePacked() - orgCl.packTime(time)) & 0xFFFFFF;
117 }
118 uint16_t qtot = orgCl.qTot, qmax = orgCl.qMax;
119 uint8_t sigmapad = orgCl.sigmaPadPacked, sigmatime = orgCl.sigmaTimePacked;
120 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionTruncate) {
121 compressor.truncateSignificantBitsChargeMax(qmax, param);
122 compressor.truncateSignificantBitsCharge(qtot, param);
123 compressor.truncateSignificantBitsWidth(sigmapad, param);
124 compressor.truncateSignificantBitsWidth(sigmatime, param);
125 }
126 c.qTotA[cidx] = qtot;
127 c.qMaxA[cidx] = qmax;
128 c.sigmaPadA[cidx] = sigmapad;
129 c.sigmaTimeA[cidx] = sigmatime;
130 c.flagsA[cidx] = orgCl.getFlags();
131 if (k && track.Filter(y, z - zOffset, hit.row)) {
132 break;
133 }
134 lastRow = hit.row;
135 lastSector = hit.sector;
136 }
137 if (nClustersStored) {
138 CAMath::AtomicAdd(&compressor.mMemory->nStoredAttachedClusters, nClustersStored);
139 c.nTrackClusters[myTrack] = nClustersStored;
140 }
141 }
142}
143
144template <>
145GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortTime>::operator()(uint32_t a, uint32_t b) const
146{
147 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
148}
149
150template <>
151GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortPad>::operator()(uint32_t a, uint32_t b) const
152{
153 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
154}
155
156template <>
157GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortZTimePad>::operator()(uint32_t a, uint32_t b) const
158{
159 if (mClsPtr[a].getTimePacked() >> 3 == mClsPtr[b].getTimePacked() >> 3) {
160 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
161 }
162 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
163}
164
165template <>
166GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>::operator()(uint32_t a, uint32_t b) const
167{
168 if (mClsPtr[a].padPacked >> 3 == mClsPtr[b].padPacked >> 3) {
169 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
170 }
171 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
172}
173
174template <> // Deterministic comparison
175GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<4>::operator()(uint32_t a, uint32_t b) const
176{
177 if (mClsPtr[a].getTimePacked() != mClsPtr[b].getTimePacked()) {
178 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
179 }
180 if (mClsPtr[a].padPacked != mClsPtr[b].padPacked) {
181 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
182 }
183 return mClsPtr[a].qTot < mClsPtr[b].qTot;
184}
185
186GPUd() bool GPUTPCCompression::rejectCluster(int32_t idx, GPUParam& GPUrestrict() param, const GPUTrackingInOutPointers& GPUrestrict() ioPtrs)
187{
188 if (mClusterStatus[idx]) {
189 return true;
190 }
191 int32_t attach = ioPtrs.mergedTrackHitAttachment[idx];
192 bool unattached = attach == 0;
193
194 if (unattached) {
195 if (param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyB) {
196 return true;
197 }
198 } else if (param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyA) {
200 return true;
201 }
202 int32_t id = attach & gputpcgmmergertypes::attachTrackMask;
203 auto& trk = ioPtrs.mergedTracks[id];
204 if (CAMath::Abs(trk.GetParam().GetQPt() * param.qptB5Scaler) > param.rec.tpc.rejectQPtB5 || trk.MergedLooper()) {
205 return true;
206 }
207 }
208 return false;
209}
210
211template <>
212GPUdii() void GPUTPCCompressionKernels::Thread<GPUTPCCompressionKernels::step1unattached>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
213{
214 const GPUTrackingInOutPointers& GPUrestrict() ioPtrs = processors.ioPtrs;
216 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
217 GPUParam& GPUrestrict() param = processors.param;
218 uint32_t* sortBuffer = smem.sortBuffer;
219 for (int32_t iSectorRow = iBlock; iSectorRow < GPUCA_NSECTORS * GPUCA_ROW_COUNT; iSectorRow += nBlocks) {
220 const uint32_t iSector = iSectorRow / GPUCA_ROW_COUNT;
221 const uint32_t iRow = iSectorRow % GPUCA_ROW_COUNT;
222 const uint32_t idOffset = clusters->clusterOffset[iSector][iRow];
223 const uint32_t idOffsetOut = clusters->clusterOffset[iSector][iRow] * compressor.mMaxClusterFactorBase1024 / 1024; // 32 bit enough for number of clusters per row * 1024
224 const uint32_t idOffsetOutMax = ((const uint32_t*)clusters->clusterOffset[iSector])[iRow + 1] * compressor.mMaxClusterFactorBase1024 / 1024; // Array out of bounds access is ok, since it goes to the correct nClustersTotal
225 if (iThread == nThreads - 1) {
226 smem.nCount = 0;
227 }
228 uint32_t totalCount = 0;
229 GPUbarrier();
230
232
233 const uint32_t nn = CAMath::nextMultipleOf<GPUCA_GET_THREAD_COUNT(GPUCA_LB_GPUTPCCompressionKernels_step1unattached)>(clusters->nClusters[iSector][iRow]);
234 for (uint32_t i = iThread; i < nn + nThreads; i += nThreads) {
235 const int32_t idx = idOffset + i;
236 int32_t storeCluster = i < clusters->nClusters[iSector][iRow] && !compressor.rejectCluster(idx, param, ioPtrs);
237
238 GPUbarrier();
239 int32_t myIndex = work_group_scan_inclusive_add(storeCluster);
240 int32_t storeLater = -1;
241 if (storeCluster) {
242 if (smem.nCount + myIndex <= GPUCA_TPC_COMP_CHUNK_SIZE) {
243 sortBuffer[smem.nCount + myIndex - 1] = i;
244 } else {
245 storeLater = smem.nCount + myIndex - 1 - GPUCA_TPC_COMP_CHUNK_SIZE;
246 }
247 }
248 GPUbarrier();
249 if (iThread == nThreads - 1) {
250 smem.nCount += myIndex;
251 }
252 GPUbarrier();
253
254 if (smem.nCount < GPUCA_TPC_COMP_CHUNK_SIZE && i < nn) {
255 continue;
256 }
257
258 uint32_t count = CAMath::Min(smem.nCount, (uint32_t)GPUCA_TPC_COMP_CHUNK_SIZE);
259 if (idOffsetOut + totalCount + count > idOffsetOutMax) {
260 if (iThread == nThreads - 1) {
261 compressor.raiseError(GPUErrors::ERROR_COMPRESSION_ROW_HIT_OVERFLOW, iSector * 1000 + iRow, idOffsetOut + totalCount + count, idOffsetOutMax);
262 }
263 break;
264 }
265 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionDifferences) {
266#ifdef GPUCA_GPUCODE
267 static_assert(GPUCA_GET_THREAD_COUNT(GPUCA_LB_GPUTPCCompressionKernels_step1unattached) * 2 <= GPUCA_TPC_COMP_CHUNK_SIZE);
268#endif
269#ifdef GPUCA_DETERMINISTIC_MODE // Not using GPUCA_DETERMINISTIC_CODE, which is enforced in TPC compression
270 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>(clusters->clusters[iSector][iRow]));
271#else // GPUCA_DETERMINISTIC_MODE
272 if (param.rec.tpc.compressionSortOrder == GPUSettings::SortZPadTime) {
273 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>(clusters->clusters[iSector][iRow]));
274 } else if (param.rec.tpc.compressionSortOrder == GPUSettings::SortZTimePad) {
275 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZTimePad>(clusters->clusters[iSector][iRow]));
276 } else if (param.rec.tpc.compressionSortOrder == GPUSettings::SortPad) {
277 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortPad>(clusters->clusters[iSector][iRow]));
278 } else if (param.rec.tpc.compressionSortOrder == GPUSettings::SortTime) {
279 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortTime>(clusters->clusters[iSector][iRow]));
280 }
281#endif // GPUCA_DETERMINISTIC_MODE
282 GPUbarrier();
283 }
284
285 for (uint32_t j = get_local_id(0); j < count; j += get_local_size(0)) {
286 int32_t outidx = idOffsetOut + totalCount + j;
287 const ClusterNative& GPUrestrict() orgCl = clusters -> clusters[iSector][iRow][sortBuffer[j]];
288
289 int32_t preId = j != 0 ? (int32_t)sortBuffer[j - 1] : (totalCount != 0 ? (int32_t)smem.lastIndex : -1);
290 GPUTPCCompression_EncodeUnattached(param.rec.tpc.compressionTypeMask, orgCl, c.timeDiffU[outidx], c.padDiffU[outidx], preId == -1 ? nullptr : &clusters->clusters[iSector][iRow][preId]);
291
292 uint16_t qtot = orgCl.qTot, qmax = orgCl.qMax;
293 uint8_t sigmapad = orgCl.sigmaPadPacked, sigmatime = orgCl.sigmaTimePacked;
294 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionTruncate) {
295 compressor.truncateSignificantBitsChargeMax(qmax, param);
296 compressor.truncateSignificantBitsCharge(qtot, param);
297 compressor.truncateSignificantBitsWidth(sigmapad, param);
298 compressor.truncateSignificantBitsWidth(sigmatime, param);
299 }
300 c.qTotU[outidx] = qtot;
301 c.qMaxU[outidx] = qmax;
302 c.sigmaPadU[outidx] = sigmapad;
303 c.sigmaTimeU[outidx] = sigmatime;
304 c.flagsU[outidx] = orgCl.getFlags();
305 }
306
307 GPUbarrier();
308 if (storeLater >= 0) {
309 sortBuffer[storeLater] = i;
310 }
311 totalCount += count;
312 if (iThread == nThreads - 1 && count) {
313 smem.lastIndex = sortBuffer[count - 1];
314 smem.nCount -= count;
315 }
316 }
317
318 if (iThread == nThreads - 1) {
319 c.nSliceRowClusters[iSector * GPUCA_ROW_COUNT + iRow] = totalCount;
320 CAMath::AtomicAdd(&compressor.mMemory->nStoredUnattachedClusters, totalCount);
321 }
322 GPUbarrier();
323 }
324}
325
326template <>
328{
329 return buf32[iWarp];
330}
331
332template <>
334{
335 return buf64[iWarp];
336}
337
338template <>
340{
341 return buf128[iWarp];
342}
343
344template <typename T, typename S>
345GPUdi() bool GPUTPCCompressionGatherKernels::isAlignedTo(const S* ptr)
346{
347 if constexpr (alignof(S) >= alignof(T)) {
348 static_cast<void>(ptr);
349 return true;
350 } else {
351 return reinterpret_cast<size_t>(ptr) % alignof(T) == 0;
352 }
353}
354
355template <>
356GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpy<uint8_t>(uint8_t* GPUrestrict() dst, const uint8_t* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread)
357{
358 constexpr const int32_t vec128Elems = CpyVector<uint8_t, Vec128>::Size;
359 constexpr const int32_t vec64Elems = CpyVector<uint8_t, Vec64>::Size;
360 constexpr const int32_t vec32Elems = CpyVector<uint8_t, Vec32>::Size;
361 constexpr const int32_t vec16Elems = CpyVector<uint8_t, Vec16>::Size;
362
363 if (size >= uint32_t(nThreads * vec128Elems)) {
364 compressorMemcpyVectorised<uint8_t, Vec128>(dst, src, size, nThreads, iThread);
365 } else if (size >= uint32_t(nThreads * vec64Elems)) {
366 compressorMemcpyVectorised<uint8_t, Vec64>(dst, src, size, nThreads, iThread);
367 } else if (size >= uint32_t(nThreads * vec32Elems)) {
368 compressorMemcpyVectorised<uint8_t, Vec32>(dst, src, size, nThreads, iThread);
369 } else if (size >= uint32_t(nThreads * vec16Elems)) {
370 compressorMemcpyVectorised<uint8_t, Vec16>(dst, src, size, nThreads, iThread);
371 } else {
372 compressorMemcpyBasic(dst, src, size, nThreads, iThread);
373 }
374}
375
376template <>
377GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpy<uint16_t>(uint16_t* GPUrestrict() dst, const uint16_t* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread)
378{
379 constexpr const int32_t vec128Elems = CpyVector<uint16_t, Vec128>::Size;
380 constexpr const int32_t vec64Elems = CpyVector<uint16_t, Vec64>::Size;
381 constexpr const int32_t vec32Elems = CpyVector<uint16_t, Vec32>::Size;
382
383 if (size >= uint32_t(nThreads * vec128Elems)) {
384 compressorMemcpyVectorised<uint16_t, Vec128>(dst, src, size, nThreads, iThread);
385 } else if (size >= uint32_t(nThreads * vec64Elems)) {
386 compressorMemcpyVectorised<uint16_t, Vec64>(dst, src, size, nThreads, iThread);
387 } else if (size >= uint32_t(nThreads * vec32Elems)) {
388 compressorMemcpyVectorised<uint16_t, Vec32>(dst, src, size, nThreads, iThread);
389 } else {
390 compressorMemcpyBasic(dst, src, size, nThreads, iThread);
391 }
392}
393
394template <>
395GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpy<uint32_t>(uint32_t* GPUrestrict() dst, const uint32_t* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread)
396{
397 constexpr const int32_t vec128Elems = CpyVector<uint32_t, Vec128>::Size;
398 constexpr const int32_t vec64Elems = CpyVector<uint32_t, Vec64>::Size;
399
400 if (size >= uint32_t(nThreads * vec128Elems)) {
401 compressorMemcpyVectorised<uint32_t, Vec128>(dst, src, size, nThreads, iThread);
402 } else if (size >= uint32_t(nThreads * vec64Elems)) {
403 compressorMemcpyVectorised<uint32_t, Vec64>(dst, src, size, nThreads, iThread);
404 } else {
405 compressorMemcpyBasic(dst, src, size, nThreads, iThread);
406 }
407}
408
409template <typename Scalar, typename BaseVector>
410GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpyVectorised(Scalar* dst, const Scalar* src, uint32_t size, int32_t nThreads, int32_t iThread)
411{
412 if (not isAlignedTo<BaseVector>(dst)) {
413 size_t dsti = reinterpret_cast<size_t>(dst);
414 int32_t offset = (alignof(BaseVector) - dsti % alignof(BaseVector)) / sizeof(Scalar);
415 compressorMemcpyBasic(dst, src, offset, nThreads, iThread);
416 src += offset;
417 dst += offset;
418 size -= offset;
419 }
420
421 BaseVector* GPUrestrict() dstAligned = reinterpret_cast<BaseVector*>(dst);
422
423 using CpyVec = CpyVector<Scalar, BaseVector>;
424 uint32_t sizeAligned = size / CpyVec::Size;
425
426 if (isAlignedTo<BaseVector>(src)) {
427 const BaseVector* GPUrestrict() srcAligned = reinterpret_cast<const BaseVector*>(src);
428 compressorMemcpyBasic(dstAligned, srcAligned, sizeAligned, nThreads, iThread);
429 } else {
430 for (uint32_t i = iThread; i < sizeAligned; i += nThreads) {
431 CpyVec buf;
432 for (uint32_t j = 0; j < CpyVec::Size; j++) {
433 buf.elems[j] = src[i * CpyVec::Size + j];
434 }
435 dstAligned[i] = buf.all;
436 }
437 }
438
439 int32_t leftovers = size % CpyVec::Size;
440 compressorMemcpyBasic(dst + size - leftovers, src + size - leftovers, leftovers, nThreads, iThread);
441}
442
443template <typename T>
444GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpyBasic(T* GPUrestrict() dst, const T* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread, int32_t nBlocks, int32_t iBlock)
445{
446 uint32_t start = (size + nBlocks - 1) / nBlocks * iBlock + iThread;
447 uint32_t end = CAMath::Min(size, (size + nBlocks - 1) / nBlocks * (iBlock + 1));
448 for (uint32_t i = start; i < end; i += nThreads) {
449 dst[i] = src[i];
450 }
451}
452
453template <typename V, typename T, typename S>
454GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpyBuffered(V* buf, T* GPUrestrict() dst, const T* GPUrestrict() src, const S* GPUrestrict() nums, const uint32_t* GPUrestrict() srcOffsets, uint32_t nEntries, int32_t nLanes, int32_t iLane, int32_t diff, size_t scaleBase1024)
455{
456 int32_t shmPos = 0;
457 uint32_t dstOffset = 0;
458 V* GPUrestrict() dstAligned = nullptr;
459
460 T* bufT = reinterpret_cast<T*>(buf);
461 constexpr const int32_t bufSize = GPUCA_WARP_SIZE;
462 constexpr const int32_t bufTSize = bufSize * sizeof(V) / sizeof(T);
463
464 for (uint32_t i = 0; i < nEntries; i++) {
465 uint32_t srcPos = 0;
466 uint32_t srcOffset = (srcOffsets[i] * scaleBase1024 / 1024) + diff;
467 uint32_t srcSize = nums[i] - diff;
468
469 if (dstAligned == nullptr) {
470 if (not isAlignedTo<V>(dst)) {
471 size_t dsti = reinterpret_cast<size_t>(dst);
472 uint32_t offset = (alignof(V) - dsti % alignof(V)) / sizeof(T);
473 offset = CAMath::Min<uint32_t>(offset, srcSize);
474 compressorMemcpyBasic(dst, src + srcOffset, offset, nLanes, iLane);
475 dst += offset;
476 srcPos += offset;
477 }
478 if (isAlignedTo<V>(dst)) {
479 dstAligned = reinterpret_cast<V*>(dst);
480 }
481 }
482 while (srcPos < srcSize) {
483 uint32_t shmElemsLeft = bufTSize - shmPos;
484 uint32_t srcElemsLeft = srcSize - srcPos;
485 uint32_t size = CAMath::Min(srcElemsLeft, shmElemsLeft);
486 compressorMemcpyBasic(bufT + shmPos, src + srcOffset + srcPos, size, nLanes, iLane);
487 srcPos += size;
488 shmPos += size;
490
491 if (shmPos >= bufTSize) {
492 compressorMemcpyBasic(dstAligned + dstOffset, buf, bufSize, nLanes, iLane);
493 dstOffset += bufSize;
494 shmPos = 0;
496 }
497 }
498 }
499
500 compressorMemcpyBasic(reinterpret_cast<T*>(dstAligned + dstOffset), bufT, shmPos, nLanes, iLane);
502}
503
504template <typename T>
505GPUdi() uint32_t GPUTPCCompressionGatherKernels::calculateWarpOffsets(GPUSharedMemory& smem, T* nums, uint32_t start, uint32_t end, int32_t nWarps, int32_t iWarp, int32_t nLanes, int32_t iLane)
506{
507 uint32_t blockOffset = 0;
508 int32_t iThread = nLanes * iWarp + iLane;
509 int32_t nThreads = nLanes * nWarps;
510 uint32_t blockStart = work_group_broadcast(start, 0);
511 for (uint32_t i = iThread; i < blockStart; i += nThreads) {
512 blockOffset += nums[i];
513 }
514 blockOffset = work_group_reduce_add(blockOffset);
515
516 uint32_t offset = 0;
517 for (uint32_t i = start + iLane; i < end; i += nLanes) {
518 offset += nums[i];
519 }
520 offset = work_group_scan_inclusive_add(offset);
521 if (iWarp > -1 && iLane == nLanes - 1) {
522 smem.warpOffset[iWarp] = offset;
523 }
524 GPUbarrier();
525 offset = (iWarp <= 0) ? 0 : smem.warpOffset[iWarp - 1];
526 GPUbarrier();
527
528 return offset + blockOffset;
529}
530
531template <>
532GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::unbuffered>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
533{
534 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
535 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = processors.ioPtrs.clustersNative;
536
537 int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
538 int32_t iWarp = iThread / GPUCA_WARP_SIZE;
539
540 int32_t nLanes = GPUCA_WARP_SIZE;
541 int32_t iLane = iThread % GPUCA_WARP_SIZE;
542
543 if (iBlock == 0) {
544
545 uint32_t nRows = compressor.NSECTORS * GPUCA_ROW_COUNT;
546 uint32_t rowsPerWarp = (nRows + nWarps - 1) / nWarps;
547 uint32_t rowStart = rowsPerWarp * iWarp;
548 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
549 if (rowStart >= nRows) {
550 rowStart = 0;
551 rowEnd = 0;
552 }
553
554 uint32_t rowsOffset = calculateWarpOffsets(smem, compressor.mPtrs.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
555
556 compressorMemcpy(compressor.mOutput->nSliceRowClusters, compressor.mPtrs.nSliceRowClusters, compressor.NSECTORS * GPUCA_ROW_COUNT, nThreads, iThread);
557 compressorMemcpy(compressor.mOutput->nTrackClusters, compressor.mPtrs.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
558 compressorMemcpy(compressor.mOutput->qPtA, compressor.mPtrs.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
559 compressorMemcpy(compressor.mOutput->rowA, compressor.mPtrs.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
560 compressorMemcpy(compressor.mOutput->sliceA, compressor.mPtrs.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
561 compressorMemcpy(compressor.mOutput->timeA, compressor.mPtrs.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
562 compressorMemcpy(compressor.mOutput->padA, compressor.mPtrs.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
563
564 uint32_t sectorStart = rowStart / GPUCA_ROW_COUNT;
565 uint32_t sectorEnd = rowEnd / GPUCA_ROW_COUNT;
566
567 uint32_t sectorRowStart = rowStart % GPUCA_ROW_COUNT;
568 uint32_t sectorRowEnd = rowEnd % GPUCA_ROW_COUNT;
569
570 for (uint32_t i = sectorStart; i <= sectorEnd && i < compressor.NSECTORS; i++) {
571 for (uint32_t j = ((i == sectorStart) ? sectorRowStart : 0); j < ((i == sectorEnd) ? sectorRowEnd : GPUCA_ROW_COUNT); j++) {
572 uint32_t nClusters = compressor.mPtrs.nSliceRowClusters[i * GPUCA_ROW_COUNT + j];
573 uint32_t clusterOffsetInCache = clusters->clusterOffset[i][j] * compressor.mMaxClusterFactorBase1024 / 1024;
574 compressorMemcpy(compressor.mOutput->qTotU + rowsOffset, compressor.mPtrs.qTotU + clusterOffsetInCache, nClusters, nLanes, iLane);
575 compressorMemcpy(compressor.mOutput->qMaxU + rowsOffset, compressor.mPtrs.qMaxU + clusterOffsetInCache, nClusters, nLanes, iLane);
576 compressorMemcpy(compressor.mOutput->flagsU + rowsOffset, compressor.mPtrs.flagsU + clusterOffsetInCache, nClusters, nLanes, iLane);
577 compressorMemcpy(compressor.mOutput->padDiffU + rowsOffset, compressor.mPtrs.padDiffU + clusterOffsetInCache, nClusters, nLanes, iLane);
578 compressorMemcpy(compressor.mOutput->timeDiffU + rowsOffset, compressor.mPtrs.timeDiffU + clusterOffsetInCache, nClusters, nLanes, iLane);
579 compressorMemcpy(compressor.mOutput->sigmaPadU + rowsOffset, compressor.mPtrs.sigmaPadU + clusterOffsetInCache, nClusters, nLanes, iLane);
580 compressorMemcpy(compressor.mOutput->sigmaTimeU + rowsOffset, compressor.mPtrs.sigmaTimeU + clusterOffsetInCache, nClusters, nLanes, iLane);
581 rowsOffset += nClusters;
582 }
583 }
584 }
585
586 if (iBlock == 1) {
587 uint32_t tracksPerWarp = (compressor.mMemory->nStoredTracks + nWarps - 1) / nWarps;
588 uint32_t trackStart = tracksPerWarp * iWarp;
589 uint32_t trackEnd = CAMath::Min(compressor.mMemory->nStoredTracks, trackStart + tracksPerWarp);
590 if (trackStart >= compressor.mMemory->nStoredTracks) {
591 trackStart = 0;
592 trackEnd = 0;
593 }
594
595 uint32_t tracksOffset = calculateWarpOffsets(smem, compressor.mPtrs.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
596
597 for (uint32_t i = trackStart; i < trackEnd; i += nLanes) {
598 uint32_t nTrackClusters = 0;
599 uint32_t srcOffset = 0;
600
601 if (i + iLane < trackEnd) {
602 nTrackClusters = compressor.mPtrs.nTrackClusters[i + iLane];
603 srcOffset = compressor.mAttachedClusterFirstIndex[i + iLane];
604 }
605 smem.unbuffered.sizes[iWarp][iLane] = nTrackClusters;
606 smem.unbuffered.srcOffsets[iWarp][iLane] = srcOffset;
607
608 uint32_t elems = (i + nLanes < trackEnd) ? nLanes : (trackEnd - i);
609
610 for (uint32_t j = 0; j < elems; j++) {
611 nTrackClusters = smem.unbuffered.sizes[iWarp][j];
612 srcOffset = smem.unbuffered.srcOffsets[iWarp][j];
613 uint32_t idx = i + j;
614 compressorMemcpy(compressor.mOutput->qTotA + tracksOffset, compressor.mPtrs.qTotA + srcOffset, nTrackClusters, nLanes, iLane);
615 compressorMemcpy(compressor.mOutput->qMaxA + tracksOffset, compressor.mPtrs.qMaxA + srcOffset, nTrackClusters, nLanes, iLane);
616 compressorMemcpy(compressor.mOutput->flagsA + tracksOffset, compressor.mPtrs.flagsA + srcOffset, nTrackClusters, nLanes, iLane);
617 compressorMemcpy(compressor.mOutput->sigmaPadA + tracksOffset, compressor.mPtrs.sigmaPadA + srcOffset, nTrackClusters, nLanes, iLane);
618 compressorMemcpy(compressor.mOutput->sigmaTimeA + tracksOffset, compressor.mPtrs.sigmaTimeA + srcOffset, nTrackClusters, nLanes, iLane);
619
620 // First index stored with track
621 compressorMemcpy(compressor.mOutput->rowDiffA + tracksOffset - idx, compressor.mPtrs.rowDiffA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
622 compressorMemcpy(compressor.mOutput->sliceLegDiffA + tracksOffset - idx, compressor.mPtrs.sliceLegDiffA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
623 compressorMemcpy(compressor.mOutput->padResA + tracksOffset - idx, compressor.mPtrs.padResA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
624 compressorMemcpy(compressor.mOutput->timeResA + tracksOffset - idx, compressor.mPtrs.timeResA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
625
626 tracksOffset += nTrackClusters;
627 }
628 }
629 }
630}
631
632template <typename V>
633GPUdii() void GPUTPCCompressionGatherKernels::gatherBuffered(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
634{
635
636 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
637 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = processors.ioPtrs.clustersNative;
638
639 int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
640 int32_t iWarp = iThread / GPUCA_WARP_SIZE;
641
642 int32_t nGlobalWarps = nWarps * nBlocks;
643 int32_t iGlobalWarp = nWarps * iBlock + iWarp;
644
645 int32_t nLanes = GPUCA_WARP_SIZE;
646 int32_t iLane = iThread % GPUCA_WARP_SIZE;
647
648 auto& input = compressor.mPtrs;
649 auto* output = compressor.mOutput;
650
651 uint32_t nRows = compressor.NSECTORS * GPUCA_ROW_COUNT;
652 uint32_t rowsPerWarp = (nRows + nGlobalWarps - 1) / nGlobalWarps;
653 uint32_t rowStart = rowsPerWarp * iGlobalWarp;
654 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
655 if (rowStart >= nRows) {
656 rowStart = 0;
657 rowEnd = 0;
658 }
659 rowsPerWarp = rowEnd - rowStart;
660
661 uint32_t rowsOffset = calculateWarpOffsets(smem, input.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
662
663 uint32_t nStoredTracks = compressor.mMemory->nStoredTracks;
664 uint32_t tracksPerWarp = (nStoredTracks + nGlobalWarps - 1) / nGlobalWarps;
665 uint32_t trackStart = tracksPerWarp * iGlobalWarp;
666 uint32_t trackEnd = CAMath::Min(nStoredTracks, trackStart + tracksPerWarp);
667 if (trackStart >= nStoredTracks) {
668 trackStart = 0;
669 trackEnd = 0;
670 }
671 tracksPerWarp = trackEnd - trackStart;
672
673 uint32_t tracksOffset = calculateWarpOffsets(smem, input.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
674
675 if (iBlock == 0) {
676 compressorMemcpyBasic(output->nSliceRowClusters, input.nSliceRowClusters, compressor.NSECTORS * GPUCA_ROW_COUNT, nThreads, iThread);
677 compressorMemcpyBasic(output->nTrackClusters, input.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
678 compressorMemcpyBasic(output->qPtA, input.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
679 compressorMemcpyBasic(output->rowA, input.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
680 compressorMemcpyBasic(output->sliceA, input.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
681 compressorMemcpyBasic(output->timeA, input.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
682 compressorMemcpyBasic(output->padA, input.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
683 }
684
685 const uint32_t* clusterOffsets = &clusters->clusterOffset[0][0] + rowStart;
686 const uint32_t* nSectorRowClusters = input.nSliceRowClusters + rowStart;
687
688 auto* buf = smem.getBuffer<V>(iWarp);
689
690 compressorMemcpyBuffered(buf, output->qTotU + rowsOffset, input.qTotU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
691 compressorMemcpyBuffered(buf, output->qMaxU + rowsOffset, input.qMaxU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
692 compressorMemcpyBuffered(buf, output->flagsU + rowsOffset, input.flagsU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
693 compressorMemcpyBuffered(buf, output->padDiffU + rowsOffset, input.padDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
694 compressorMemcpyBuffered(buf, output->timeDiffU + rowsOffset, input.timeDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
695 compressorMemcpyBuffered(buf, output->sigmaPadU + rowsOffset, input.sigmaPadU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
696 compressorMemcpyBuffered(buf, output->sigmaTimeU + rowsOffset, input.sigmaTimeU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
697
698 const uint16_t* nTrackClustersPtr = input.nTrackClusters + trackStart;
699 const uint32_t* aClsFstIdx = compressor.mAttachedClusterFirstIndex + trackStart;
700
701 compressorMemcpyBuffered(buf, output->qTotA + tracksOffset, input.qTotA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
702 compressorMemcpyBuffered(buf, output->qMaxA + tracksOffset, input.qMaxA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
703 compressorMemcpyBuffered(buf, output->flagsA + tracksOffset, input.flagsA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
704 compressorMemcpyBuffered(buf, output->sigmaPadA + tracksOffset, input.sigmaPadA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
705 compressorMemcpyBuffered(buf, output->sigmaTimeA + tracksOffset, input.sigmaTimeA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
706
707 // First index stored with track
708 uint32_t tracksOffsetDiff = tracksOffset - trackStart;
709 compressorMemcpyBuffered(buf, output->rowDiffA + tracksOffsetDiff, input.rowDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
710 compressorMemcpyBuffered(buf, output->sliceLegDiffA + tracksOffsetDiff, input.sliceLegDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
711 compressorMemcpyBuffered(buf, output->padResA + tracksOffsetDiff, input.padResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
712 compressorMemcpyBuffered(buf, output->timeResA + tracksOffsetDiff, input.timeResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
713}
714
715GPUdii() void GPUTPCCompressionGatherKernels::gatherMulti(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
716{
717 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
718 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = processors.ioPtrs.clustersNative;
719 const auto& input = compressor.mPtrs;
720 auto* output = compressor.mOutput;
721
722 const int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
723 const int32_t iWarp = iThread / GPUCA_WARP_SIZE;
724 const int32_t nLanes = GPUCA_WARP_SIZE;
725 const int32_t iLane = iThread % GPUCA_WARP_SIZE;
726 auto* buf = smem.getBuffer<Vec128>(iWarp);
727
728 if (iBlock == 0) {
729 compressorMemcpyBasic(output->nSliceRowClusters, input.nSliceRowClusters, compressor.NSECTORS * GPUCA_ROW_COUNT, nThreads, iThread);
730 compressorMemcpyBasic(output->nTrackClusters, input.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
731 compressorMemcpyBasic(output->qPtA, input.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
732 compressorMemcpyBasic(output->rowA, input.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
733 compressorMemcpyBasic(output->sliceA, input.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
734 compressorMemcpyBasic(output->timeA, input.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
735 compressorMemcpyBasic(output->padA, input.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
736 } else if (iBlock & 1) {
737 const uint32_t nGlobalWarps = nWarps * (nBlocks - 1) / 2;
738 const uint32_t iGlobalWarp = nWarps * (iBlock - 1) / 2 + iWarp;
739
740 const uint32_t nRows = compressor.NSECTORS * GPUCA_ROW_COUNT;
741 uint32_t rowsPerWarp = (nRows + nGlobalWarps - 1) / nGlobalWarps;
742 uint32_t rowStart = rowsPerWarp * iGlobalWarp;
743 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
744 if (rowStart >= nRows) {
745 rowStart = 0;
746 rowEnd = 0;
747 }
748 rowsPerWarp = rowEnd - rowStart;
749
750 const uint32_t rowsOffset = calculateWarpOffsets(smem, input.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
751 const uint32_t* clusterOffsets = &clusters->clusterOffset[0][0] + rowStart;
752 const uint32_t* nSectorRowClusters = input.nSliceRowClusters + rowStart;
753
754 compressorMemcpyBuffered(buf, output->qTotU + rowsOffset, input.qTotU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
755 compressorMemcpyBuffered(buf, output->qMaxU + rowsOffset, input.qMaxU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
756 compressorMemcpyBuffered(buf, output->flagsU + rowsOffset, input.flagsU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
757 compressorMemcpyBuffered(buf, output->padDiffU + rowsOffset, input.padDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
758 compressorMemcpyBuffered(buf, output->timeDiffU + rowsOffset, input.timeDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
759 compressorMemcpyBuffered(buf, output->sigmaPadU + rowsOffset, input.sigmaPadU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
760 compressorMemcpyBuffered(buf, output->sigmaTimeU + rowsOffset, input.sigmaTimeU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
761 } else {
762 const uint32_t nGlobalWarps = nWarps * (nBlocks - 1) / 2;
763 const uint32_t iGlobalWarp = nWarps * (iBlock / 2 - 1) + iWarp;
764
765 const uint32_t nStoredTracks = compressor.mMemory->nStoredTracks;
766 uint32_t tracksPerWarp = (nStoredTracks + nGlobalWarps - 1) / nGlobalWarps;
767 uint32_t trackStart = tracksPerWarp * iGlobalWarp;
768 uint32_t trackEnd = CAMath::Min(nStoredTracks, trackStart + tracksPerWarp);
769 if (trackStart >= nStoredTracks) {
770 trackStart = 0;
771 trackEnd = 0;
772 }
773 tracksPerWarp = trackEnd - trackStart;
774
775 const uint32_t tracksOffset = calculateWarpOffsets(smem, input.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
776 const uint16_t* nTrackClustersPtr = input.nTrackClusters + trackStart;
777 const uint32_t* aClsFstIdx = compressor.mAttachedClusterFirstIndex + trackStart;
778
779 compressorMemcpyBuffered(buf, output->qTotA + tracksOffset, input.qTotA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
780 compressorMemcpyBuffered(buf, output->qMaxA + tracksOffset, input.qMaxA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
781 compressorMemcpyBuffered(buf, output->flagsA + tracksOffset, input.flagsA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
782 compressorMemcpyBuffered(buf, output->sigmaPadA + tracksOffset, input.sigmaPadA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
783 compressorMemcpyBuffered(buf, output->sigmaTimeA + tracksOffset, input.sigmaTimeA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
784
785 // First index stored with track
786 uint32_t tracksOffsetDiff = tracksOffset - trackStart;
787 compressorMemcpyBuffered(buf, output->rowDiffA + tracksOffsetDiff, input.rowDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
788 compressorMemcpyBuffered(buf, output->sliceLegDiffA + tracksOffsetDiff, input.sliceLegDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
789 compressorMemcpyBuffered(buf, output->padResA + tracksOffsetDiff, input.padResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
790 compressorMemcpyBuffered(buf, output->timeResA + tracksOffsetDiff, input.timeResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
791 }
792}
793
794template <>
795GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::buffered32>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
796{
797 gatherBuffered<Vec32>(nBlocks, nThreads, iBlock, iThread, smem, processors);
798}
799
800template <>
801GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::buffered64>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
802{
803 gatherBuffered<Vec64>(nBlocks, nThreads, iBlock, iThread, smem, processors);
804}
805
806template <>
807GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::buffered128>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
808{
809 gatherBuffered<Vec128>(nBlocks, nThreads, iBlock, iThread, smem, processors);
810}
811
812template <>
813GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::multiBlock>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
814{
815 gatherMulti(nBlocks, nThreads, iBlock, iThread, smem, processors);
816}
int16_t time
Definition RawEventData.h:4
int32_t i
#define get_local_size(dim)
#define get_local_id(dim)
#define GPUsharedref()
#define GPUbarrierWarp()
#define get_global_size(dim)
#define GPUbarrier()
#define GPUrestrict()
#define get_global_id(dim)
#define GPUCA_TPC_COMP_CHUNK_SIZE
#define GPUCA_GET_THREAD_COUNT(...)
GPUdii() void GPUTPCCompressionKernels
#define GPUCA_NSECTORS
#define GPUCA_ROW_COUNT
void output(const std::map< std::string, ChannelStat > &channels)
Definition rawdump.cxx:197
uint32_t j
Definition RawData.h:0
uint32_t c
Definition RawData.h:2
TBranch * ptr
int nClusters
o2::tpc::CompressedClusters * mOutput
static constexpr uint32_t NSECTORS
o2::tpc::CompressedClustersPtrs mPtrs
GLint GLenum GLint x
Definition glcorearb.h:403
GLenum src
Definition glcorearb.h:1767
GLint GLsizei count
Definition glcorearb.h:399
GLsizeiptr size
Definition glcorearb.h:659
GLuint GLuint end
Definition glcorearb.h:469
GLuint GLsizei bufSize
Definition glcorearb.h:790
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLenum GLenum dst
Definition glcorearb.h:1767
GLintptr offset
Definition glcorearb.h:660
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLuint start
Definition glcorearb.h:469
GLenum GLfloat param
Definition glcorearb.h:271
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition glcorearb.h:2514
GLuint id
Definition glcorearb.h:650
GLdouble GLdouble GLdouble z
Definition glcorearb.h:843
Global TPC definitions and constants.
Definition SimTraits.h:168
GPUd() void PIDResponse
Definition PIDResponse.h:71
GPUdi() T BetheBlochAleph(T bg
constexpr std::array< int, nLayers > nRows
Definition Specs.h:56
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
static constexpr bool GetIsRejected(int32_t attach)
const o2::tpc::ClusterNativeAccess * clustersNative
const uint32_t * mergedTrackHitAttachment
const GPUTPCGMMergedTrackHit * mergedTrackHits
const GPUTPCGMMergedTrack * mergedTracks
std::vector< std::byte > getBuffer(const char *filename)
std::vector< Cluster > clusters
std::vector< int > row