Project
Loading...
Searching...
No Matches
GPUTPCCompressionKernels.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
16#include "GPUConstantMem.h"
17#include "GPUO2DataTypes.h"
18#include "GPUParam.h"
19#include "GPUCommonAlgorithm.h"
22#include "GPUTPCCompressionKernels.inc"
23
24using namespace o2::gpu;
25using namespace o2::tpc;
26
27template <>
28GPUdii() void GPUTPCCompressionKernels::Thread<GPUTPCCompressionKernels::step0attached>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& processors)
29{
30 const GPUTrackingInOutPointers& GPUrestrict() ioPtrs = processors.ioPtrs;
32 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
33 const GPUParam& GPUrestrict() param = processors.param;
34
35 int32_t myTrack = 0;
36 for (uint32_t i = get_global_id(0); i < ioPtrs.nMergedTracks; i += get_global_size(0)) {
38 const GPUTPCGMMergedTrack& GPUrestrict() trk = ioPtrs.mergedTracks[i];
39 if (!trk.OK()) {
40 continue;
41 }
42 bool rejectTrk = GPUTPCClusterRejection::IsTrackRejected(trk, param);
43 uint32_t nClustersStored = 0;
45 uint8_t lastRow = 0, lastSector = 0;
47 float zOffset = 0;
48 for (int32_t k = trk.NClusters() - 1; k >= 0; k--) {
49 const GPUTPCGMMergedTrackHit& GPUrestrict() hit = ioPtrs.mergedTrackHits[trk.FirstClusterRef() + k];
51 continue;
52 }
53
54 int32_t hitId = hit.num;
55 int32_t attach = ioPtrs.mergedTrackHitAttachment[hitId];
56 if ((attach & gputpcgmmergertypes::attachTrackMask) != i) {
57 continue; // Main attachment to different track
58 }
59 bool rejectCluster = processors.param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyA && !(attach & gputpcgmmergertypes::attachProtect) && (rejectTrk || GPUTPCClusterRejection::GetIsRejected(attach));
60 if (rejectCluster) {
61 compressor.mClusterStatus[hitId] = 1; // Cluster rejected, do not store
62 continue;
63 } else if (processors.param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyA && rejectTrk) {
64 continue;
65 }
66
67 if (!(param.rec.tpc.compressionTypeMask & GPUSettings::CompressionTrackModel)) {
68 continue; // No track model compression
69 }
70 const ClusterNative& GPUrestrict() orgCl = clusters -> clusters[hit.sector][hit.row][hit.num - clusters->clusterOffset[hit.sector][hit.row]];
71 constexpr GPUTPCGeometry geo;
72 float x = geo.Row2X(hit.row);
73 float y = track.LinearPad2Y(hit.sector, orgCl.getPad(), geo.PadWidth(hit.row), geo.NPads(hit.row));
74 float z = geo.LinearTime2Z(hit.sector, orgCl.getTime());
75 if (nClustersStored) {
76 if ((hit.sector < GPUCA_NSECTORS) ^ (lastSector < GPUCA_NSECTORS)) {
77 break;
78 }
79 if (track.Propagate(geo.Row2X(hit.row), param.SectorParam[hit.sector].Alpha)) {
80 break;
81 }
82 }
83
84 compressor.mClusterStatus[hitId] = 1; // Cluster compressed in track model, do not store as difference
85
86 int32_t cidx = trk.FirstClusterRef() + nClustersStored++;
87 if (nClustersStored == 1) {
88 uint8_t qpt = fabs(trk.GetParam().GetQPt()) < 20.f ? (trk.GetParam().GetQPt() * (127.f / 20.f) + 127.5f) : (trk.GetParam().GetQPt() > 0 ? 254 : 0);
89 zOffset = z;
90 track.Init(x, y, z - zOffset, param.SectorParam[hit.sector].Alpha, qpt, param);
91
92 myTrack = CAMath::AtomicAdd(&compressor.mMemory->nStoredTracks, 1u);
93 compressor.mAttachedClusterFirstIndex[myTrack] = trk.FirstClusterRef();
94 c.qPtA[myTrack] = qpt;
95 c.rowA[myTrack] = hit.row;
96 c.sliceA[myTrack] = hit.sector;
97 c.timeA[myTrack] = orgCl.getTimePacked();
98 c.padA[myTrack] = orgCl.padPacked;
99 } else {
100 uint32_t row = hit.row;
101 uint32_t sector = hit.sector;
102
103 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionDifferences) {
104 if (lastRow > row) {
106 }
107 row -= lastRow;
108 if (lastSector > sector) {
109 sector += compressor.NSECTORS;
110 }
111 sector -= lastSector;
112 }
113 c.rowDiffA[cidx] = row;
114 c.sliceLegDiffA[cidx] = sector;
115 float pad = CAMath::Max(0.f, CAMath::Min((float)geo.NPads(GPUCA_ROW_COUNT - 1), track.LinearY2Pad(hit.sector, track.Y(), geo.PadWidth(hit.row), geo.NPads(hit.row))));
116 c.padResA[cidx] = orgCl.padPacked - orgCl.packPad(pad);
117 float time = CAMath::Max(0.f, geo.LinearZ2Time(hit.sector, track.Z() + zOffset));
118 c.timeResA[cidx] = (orgCl.getTimePacked() - orgCl.packTime(time)) & 0xFFFFFF;
119 }
120 uint16_t qtot = orgCl.qTot, qmax = orgCl.qMax;
121 uint8_t sigmapad = orgCl.sigmaPadPacked, sigmatime = orgCl.sigmaTimePacked;
122 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionTruncate) {
123 compressor.truncateSignificantBitsChargeMax(qmax, param);
124 compressor.truncateSignificantBitsCharge(qtot, param);
125 compressor.truncateSignificantBitsWidth(sigmapad, param);
126 compressor.truncateSignificantBitsWidth(sigmatime, param);
127 }
128 c.qTotA[cidx] = qtot;
129 c.qMaxA[cidx] = qmax;
130 c.sigmaPadA[cidx] = sigmapad;
131 c.sigmaTimeA[cidx] = sigmatime;
132 c.flagsA[cidx] = orgCl.getFlags();
133 if (k && track.Filter(y, z - zOffset, hit.row)) {
134 break;
135 }
136 lastRow = hit.row;
137 lastSector = hit.sector;
138 }
139 if (nClustersStored) {
140 CAMath::AtomicAdd(&compressor.mMemory->nStoredAttachedClusters, nClustersStored);
141 c.nTrackClusters[myTrack] = nClustersStored;
142 }
143 }
144}
145
146template <>
147GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortTime>::operator()(uint32_t a, uint32_t b) const
148{
149 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
150}
151
152template <>
153GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortPad>::operator()(uint32_t a, uint32_t b) const
154{
155 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
156}
157
158template <>
159GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortZTimePad>::operator()(uint32_t a, uint32_t b) const
160{
161 if (mClsPtr[a].getTimePacked() >> 3 == mClsPtr[b].getTimePacked() >> 3) {
162 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
163 }
164 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
165}
166
167template <>
168GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>::operator()(uint32_t a, uint32_t b) const
169{
170 if (mClsPtr[a].padPacked >> 3 == mClsPtr[b].padPacked >> 3) {
171 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
172 }
173 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
174}
175
176template <> // Deterministic comparison
177GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<4>::operator()(uint32_t a, uint32_t b) const
178{
179 if (mClsPtr[a].getTimePacked() != mClsPtr[b].getTimePacked()) {
180 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
181 }
182 if (mClsPtr[a].padPacked != mClsPtr[b].padPacked) {
183 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
184 }
185 return mClsPtr[a].qTot < mClsPtr[b].qTot;
186}
187
188GPUd() bool GPUTPCCompression::rejectCluster(int32_t idx, const GPUParam& GPUrestrict() param, const GPUTrackingInOutPointers& GPUrestrict() ioPtrs) const
189{
190 if (mClusterStatus[idx]) {
191 return true;
192 }
193 int32_t attach = ioPtrs.mergedTrackHitAttachment[idx];
194 bool unattached = attach == 0;
195
196 if (unattached) {
197 if (param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyB) {
198 return true;
199 }
200 } else if (param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyA) {
201 if (GPUTPCClusterRejection::GetIsRejected(attach)) {
202 return true;
203 }
205 return false;
206 }
207 int32_t id = attach & gputpcgmmergertypes::attachTrackMask;
208 auto& trk = ioPtrs.mergedTracks[id];
209 if (GPUTPCClusterRejection::IsTrackRejected(trk, param)) {
210 return true;
211 }
212 }
213 return false;
214}
215
216template <>
217GPUdii() void GPUTPCCompressionKernels::Thread<GPUTPCCompressionKernels::step1unattached>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
218{
219 const GPUTrackingInOutPointers& GPUrestrict() ioPtrs = processors.ioPtrs;
220 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = ioPtrs.clustersNative;
221 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
222 GPUParam& GPUrestrict() param = processors.param;
223 uint32_t* sortBuffer = smem.sortBuffer;
224 for (int32_t iSectorRow = iBlock; iSectorRow < GPUCA_NSECTORS * GPUCA_ROW_COUNT; iSectorRow += nBlocks) {
225 const uint32_t iSector = iSectorRow / GPUCA_ROW_COUNT;
226 const uint32_t iRow = iSectorRow % GPUCA_ROW_COUNT;
227 const uint32_t idOffset = clusters->clusterOffset[iSector][iRow];
228 const uint32_t idOffsetOut = clusters->clusterOffset[iSector][iRow] * compressor.mMaxClusterFactorBase1024 / 1024; // 32 bit enough for number of clusters per row * 1024
229 const uint32_t idOffsetOutMax = ((const uint32_t*)clusters->clusterOffset[iSector])[iRow + 1] * compressor.mMaxClusterFactorBase1024 / 1024; // Array out of bounds access is ok, since it goes to the correct nClustersTotal
230 if (iThread == nThreads - 1) {
231 smem.nCount = 0;
232 }
233 uint32_t totalCount = 0;
234 GPUbarrier();
235
236 CompressedClustersPtrs& GPUrestrict() c = compressor.mPtrs;
237
238 const uint32_t nn = CAMath::nextMultipleOf<GPUCA_GET_THREAD_COUNT(GPUCA_LB_GPUTPCCompressionKernels_step1unattached)>(clusters->nClusters[iSector][iRow]);
239 for (uint32_t i = iThread; i < nn + nThreads; i += nThreads) {
240 const int32_t idx = idOffset + i;
241 int32_t storeCluster = i < clusters->nClusters[iSector][iRow] && !compressor.rejectCluster(idx, param, ioPtrs);
242
243 GPUbarrier();
244 int32_t myIndex = work_group_scan_inclusive_add(storeCluster);
245 int32_t storeLater = -1;
246 if (storeCluster) {
247 if (smem.nCount + myIndex <= GPUCA_TPC_COMP_CHUNK_SIZE) {
248 sortBuffer[smem.nCount + myIndex - 1] = i;
249 } else {
250 storeLater = smem.nCount + myIndex - 1 - GPUCA_TPC_COMP_CHUNK_SIZE;
251 }
252 }
253 GPUbarrier();
254 if (iThread == nThreads - 1) {
255 smem.nCount += myIndex;
256 }
257 GPUbarrier();
258
259 if (smem.nCount < GPUCA_TPC_COMP_CHUNK_SIZE && i < nn) {
260 continue;
261 }
262
263 uint32_t count = CAMath::Min(smem.nCount, (uint32_t)GPUCA_TPC_COMP_CHUNK_SIZE);
264 if (idOffsetOut + totalCount + count > idOffsetOutMax) {
265 if (iThread == nThreads - 1) {
266 compressor.raiseError(GPUErrors::ERROR_COMPRESSION_ROW_HIT_OVERFLOW, iSector * 1000 + iRow, idOffsetOut + totalCount + count, idOffsetOutMax);
267 }
268 break;
269 }
270 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionDifferences) {
271#ifdef GPUCA_GPUCODE
272 static_assert(GPUCA_GET_THREAD_COUNT(GPUCA_LB_GPUTPCCompressionKernels_step1unattached) * 2 <= GPUCA_TPC_COMP_CHUNK_SIZE);
273#endif
274#ifdef GPUCA_DETERMINISTIC_MODE // Not using GPUCA_DETERMINISTIC_CODE, which is enforced in TPC compression
275 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>(clusters->clusters[iSector][iRow]));
276#else // GPUCA_DETERMINISTIC_MODE
277 if (param.rec.tpc.compressionSortOrder == GPUSettings::SortZPadTime) {
278 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>(clusters->clusters[iSector][iRow]));
279 } else if (param.rec.tpc.compressionSortOrder == GPUSettings::SortZTimePad) {
280 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZTimePad>(clusters->clusters[iSector][iRow]));
281 } else if (param.rec.tpc.compressionSortOrder == GPUSettings::SortPad) {
282 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortPad>(clusters->clusters[iSector][iRow]));
283 } else if (param.rec.tpc.compressionSortOrder == GPUSettings::SortTime) {
284 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortTime>(clusters->clusters[iSector][iRow]));
285 }
286#endif // GPUCA_DETERMINISTIC_MODE
287 GPUbarrier();
288 }
289
290 for (uint32_t j = get_local_id(0); j < count; j += get_local_size(0)) {
291 int32_t outidx = idOffsetOut + totalCount + j;
292 const ClusterNative& GPUrestrict() orgCl = clusters -> clusters[iSector][iRow][sortBuffer[j]];
293
294 int32_t preId = j != 0 ? (int32_t)sortBuffer[j - 1] : (totalCount != 0 ? (int32_t)smem.lastIndex : -1);
295 GPUTPCCompression_EncodeUnattached(param.rec.tpc.compressionTypeMask, orgCl, c.timeDiffU[outidx], c.padDiffU[outidx], preId == -1 ? nullptr : &clusters->clusters[iSector][iRow][preId]);
296
297 uint16_t qtot = orgCl.qTot, qmax = orgCl.qMax;
298 uint8_t sigmapad = orgCl.sigmaPadPacked, sigmatime = orgCl.sigmaTimePacked;
299 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionTruncate) {
300 compressor.truncateSignificantBitsChargeMax(qmax, param);
301 compressor.truncateSignificantBitsCharge(qtot, param);
302 compressor.truncateSignificantBitsWidth(sigmapad, param);
303 compressor.truncateSignificantBitsWidth(sigmatime, param);
304 }
305 c.qTotU[outidx] = qtot;
306 c.qMaxU[outidx] = qmax;
307 c.sigmaPadU[outidx] = sigmapad;
308 c.sigmaTimeU[outidx] = sigmatime;
309 c.flagsU[outidx] = orgCl.getFlags();
310 }
311
312 GPUbarrier();
313 if (storeLater >= 0) {
314 sortBuffer[storeLater] = i;
315 }
316 totalCount += count;
317 if (iThread == nThreads - 1 && count) {
318 smem.lastIndex = sortBuffer[count - 1];
319 smem.nCount -= count;
320 }
321 }
322
323 if (iThread == nThreads - 1) {
324 c.nSliceRowClusters[iSector * GPUCA_ROW_COUNT + iRow] = totalCount;
325 CAMath::AtomicAdd(&compressor.mMemory->nStoredUnattachedClusters, totalCount);
326 }
327 GPUbarrier();
328 }
329}
330
331template <>
333{
334 return buf32[iWarp];
335}
336
337template <>
339{
340 return buf64[iWarp];
341}
342
343template <>
345{
346 return buf128[iWarp];
347}
348
349template <typename T, typename S>
350GPUdi() bool GPUTPCCompressionGatherKernels::isAlignedTo(const S* ptr)
351{
352 if constexpr (alignof(S) >= alignof(T)) {
353 static_cast<void>(ptr);
354 return true;
355 } else {
356 return reinterpret_cast<size_t>(ptr) % alignof(T) == 0;
357 }
358}
359
360template <>
361GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpy<uint8_t>(uint8_t* GPUrestrict() dst, const uint8_t* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread)
362{
363 constexpr const int32_t vec128Elems = CpyVector<uint8_t, Vec128>::Size;
364 constexpr const int32_t vec64Elems = CpyVector<uint8_t, Vec64>::Size;
365 constexpr const int32_t vec32Elems = CpyVector<uint8_t, Vec32>::Size;
366 constexpr const int32_t vec16Elems = CpyVector<uint8_t, Vec16>::Size;
367
368 if (size >= uint32_t(nThreads * vec128Elems)) {
369 compressorMemcpyVectorised<uint8_t, Vec128>(dst, src, size, nThreads, iThread);
370 } else if (size >= uint32_t(nThreads * vec64Elems)) {
371 compressorMemcpyVectorised<uint8_t, Vec64>(dst, src, size, nThreads, iThread);
372 } else if (size >= uint32_t(nThreads * vec32Elems)) {
373 compressorMemcpyVectorised<uint8_t, Vec32>(dst, src, size, nThreads, iThread);
374 } else if (size >= uint32_t(nThreads * vec16Elems)) {
375 compressorMemcpyVectorised<uint8_t, Vec16>(dst, src, size, nThreads, iThread);
376 } else {
377 compressorMemcpyBasic(dst, src, size, nThreads, iThread);
378 }
379}
380
381template <>
382GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpy<uint16_t>(uint16_t* GPUrestrict() dst, const uint16_t* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread)
383{
384 constexpr const int32_t vec128Elems = CpyVector<uint16_t, Vec128>::Size;
385 constexpr const int32_t vec64Elems = CpyVector<uint16_t, Vec64>::Size;
386 constexpr const int32_t vec32Elems = CpyVector<uint16_t, Vec32>::Size;
387
388 if (size >= uint32_t(nThreads * vec128Elems)) {
389 compressorMemcpyVectorised<uint16_t, Vec128>(dst, src, size, nThreads, iThread);
390 } else if (size >= uint32_t(nThreads * vec64Elems)) {
391 compressorMemcpyVectorised<uint16_t, Vec64>(dst, src, size, nThreads, iThread);
392 } else if (size >= uint32_t(nThreads * vec32Elems)) {
393 compressorMemcpyVectorised<uint16_t, Vec32>(dst, src, size, nThreads, iThread);
394 } else {
395 compressorMemcpyBasic(dst, src, size, nThreads, iThread);
396 }
397}
398
399template <>
400GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpy<uint32_t>(uint32_t* GPUrestrict() dst, const uint32_t* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread)
401{
402 constexpr const int32_t vec128Elems = CpyVector<uint32_t, Vec128>::Size;
403 constexpr const int32_t vec64Elems = CpyVector<uint32_t, Vec64>::Size;
404
405 if (size >= uint32_t(nThreads * vec128Elems)) {
406 compressorMemcpyVectorised<uint32_t, Vec128>(dst, src, size, nThreads, iThread);
407 } else if (size >= uint32_t(nThreads * vec64Elems)) {
408 compressorMemcpyVectorised<uint32_t, Vec64>(dst, src, size, nThreads, iThread);
409 } else {
410 compressorMemcpyBasic(dst, src, size, nThreads, iThread);
411 }
412}
413
414template <typename Scalar, typename BaseVector>
415GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpyVectorised(Scalar* dst, const Scalar* src, uint32_t size, int32_t nThreads, int32_t iThread)
416{
417 if (not isAlignedTo<BaseVector>(dst)) {
418 size_t dsti = reinterpret_cast<size_t>(dst);
419 int32_t offset = (alignof(BaseVector) - dsti % alignof(BaseVector)) / sizeof(Scalar);
420 compressorMemcpyBasic(dst, src, offset, nThreads, iThread);
421 src += offset;
422 dst += offset;
423 size -= offset;
424 }
425
426 BaseVector* GPUrestrict() dstAligned = reinterpret_cast<BaseVector*>(dst);
427
428 using CpyVec = CpyVector<Scalar, BaseVector>;
429 uint32_t sizeAligned = size / CpyVec::Size;
430
431 if (isAlignedTo<BaseVector>(src)) {
432 const BaseVector* GPUrestrict() srcAligned = reinterpret_cast<const BaseVector*>(src);
433 compressorMemcpyBasic(dstAligned, srcAligned, sizeAligned, nThreads, iThread);
434 } else {
435 for (uint32_t i = iThread; i < sizeAligned; i += nThreads) {
436 CpyVec buf;
437 for (uint32_t j = 0; j < CpyVec::Size; j++) {
438 buf.elems[j] = src[i * CpyVec::Size + j];
439 }
440 dstAligned[i] = buf.all;
441 }
442 }
443
444 int32_t leftovers = size % CpyVec::Size;
445 compressorMemcpyBasic(dst + size - leftovers, src + size - leftovers, leftovers, nThreads, iThread);
446}
447
448template <typename T>
449GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpyBasic(T* GPUrestrict() dst, const T* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread, int32_t nBlocks, int32_t iBlock)
450{
451 uint32_t start = (size + nBlocks - 1) / nBlocks * iBlock + iThread;
452 uint32_t end = CAMath::Min(size, (size + nBlocks - 1) / nBlocks * (iBlock + 1));
453 for (uint32_t i = start; i < end; i += nThreads) {
454 dst[i] = src[i];
455 }
456}
457
458template <typename V, typename T, typename S>
459GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpyBuffered(V* buf, T* GPUrestrict() dst, const T* GPUrestrict() src, const S* GPUrestrict() nums, const uint32_t* GPUrestrict() srcOffsets, uint32_t nEntries, int32_t nLanes, int32_t iLane, int32_t diff, size_t scaleBase1024)
460{
461 int32_t shmPos = 0;
462 uint32_t dstOffset = 0;
463 V* GPUrestrict() dstAligned = nullptr;
464
465 T* bufT = reinterpret_cast<T*>(buf);
466 constexpr const int32_t bufSize = GPUCA_WARP_SIZE;
467 constexpr const int32_t bufTSize = bufSize * sizeof(V) / sizeof(T);
468
469 for (uint32_t i = 0; i < nEntries; i++) {
470 uint32_t srcPos = 0;
471 uint32_t srcOffset = (srcOffsets[i] * scaleBase1024 / 1024) + diff;
472 uint32_t srcSize = nums[i] - diff;
473
474 if (dstAligned == nullptr) {
475 if (not isAlignedTo<V>(dst)) {
476 size_t dsti = reinterpret_cast<size_t>(dst);
477 uint32_t offset = (alignof(V) - dsti % alignof(V)) / sizeof(T);
478 offset = CAMath::Min<uint32_t>(offset, srcSize);
479 compressorMemcpyBasic(dst, src + srcOffset, offset, nLanes, iLane);
480 dst += offset;
481 srcPos += offset;
482 }
483 if (isAlignedTo<V>(dst)) {
484 dstAligned = reinterpret_cast<V*>(dst);
485 }
486 }
487 while (srcPos < srcSize) {
488 uint32_t shmElemsLeft = bufTSize - shmPos;
489 uint32_t srcElemsLeft = srcSize - srcPos;
490 uint32_t size = CAMath::Min(srcElemsLeft, shmElemsLeft);
491 compressorMemcpyBasic(bufT + shmPos, src + srcOffset + srcPos, size, nLanes, iLane);
492 srcPos += size;
493 shmPos += size;
495
496 if (shmPos >= bufTSize) {
497 compressorMemcpyBasic(dstAligned + dstOffset, buf, bufSize, nLanes, iLane);
498 dstOffset += bufSize;
499 shmPos = 0;
501 }
502 }
503 }
504
505 compressorMemcpyBasic(reinterpret_cast<T*>(dstAligned + dstOffset), bufT, shmPos, nLanes, iLane);
507}
508
509template <typename T>
510GPUdi() uint32_t GPUTPCCompressionGatherKernels::calculateWarpOffsets(GPUSharedMemory& smem, T* nums, uint32_t start, uint32_t end, int32_t nWarps, int32_t iWarp, int32_t nLanes, int32_t iLane)
511{
512 uint32_t blockOffset = 0;
513 int32_t iThread = nLanes * iWarp + iLane;
514 int32_t nThreads = nLanes * nWarps;
515 uint32_t blockStart = work_group_broadcast(start, 0);
516 for (uint32_t i = iThread; i < blockStart; i += nThreads) {
517 blockOffset += nums[i];
518 }
519 blockOffset = work_group_reduce_add(blockOffset);
520
521 uint32_t offset = 0;
522 for (uint32_t i = start + iLane; i < end; i += nLanes) {
523 offset += nums[i];
524 }
525 offset = work_group_scan_inclusive_add(offset);
526 if (iWarp > -1 && iLane == nLanes - 1) {
527 smem.warpOffset[iWarp] = offset;
528 }
529 GPUbarrier();
530 offset = (iWarp <= 0) ? 0 : smem.warpOffset[iWarp - 1];
531 GPUbarrier();
532
533 return offset + blockOffset;
534}
535
536template <>
537GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::unbuffered>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
538{
539 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
540 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = processors.ioPtrs.clustersNative;
541
542 int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
543 int32_t iWarp = iThread / GPUCA_WARP_SIZE;
544
545 int32_t nLanes = GPUCA_WARP_SIZE;
546 int32_t iLane = iThread % GPUCA_WARP_SIZE;
547
548 if (iBlock == 0) {
549
550 uint32_t nRows = compressor.NSECTORS * GPUCA_ROW_COUNT;
551 uint32_t rowsPerWarp = (nRows + nWarps - 1) / nWarps;
552 uint32_t rowStart = rowsPerWarp * iWarp;
553 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
554 if (rowStart >= nRows) {
555 rowStart = 0;
556 rowEnd = 0;
557 }
558
559 uint32_t rowsOffset = calculateWarpOffsets(smem, compressor.mPtrs.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
560
561 compressorMemcpy(compressor.mOutput->nSliceRowClusters, compressor.mPtrs.nSliceRowClusters, compressor.NSECTORS * GPUCA_ROW_COUNT, nThreads, iThread);
562 compressorMemcpy(compressor.mOutput->nTrackClusters, compressor.mPtrs.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
563 compressorMemcpy(compressor.mOutput->qPtA, compressor.mPtrs.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
564 compressorMemcpy(compressor.mOutput->rowA, compressor.mPtrs.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
565 compressorMemcpy(compressor.mOutput->sliceA, compressor.mPtrs.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
566 compressorMemcpy(compressor.mOutput->timeA, compressor.mPtrs.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
567 compressorMemcpy(compressor.mOutput->padA, compressor.mPtrs.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
568
569 uint32_t sectorStart = rowStart / GPUCA_ROW_COUNT;
570 uint32_t sectorEnd = rowEnd / GPUCA_ROW_COUNT;
571
572 uint32_t sectorRowStart = rowStart % GPUCA_ROW_COUNT;
573 uint32_t sectorRowEnd = rowEnd % GPUCA_ROW_COUNT;
574
575 for (uint32_t i = sectorStart; i <= sectorEnd && i < compressor.NSECTORS; i++) {
576 for (uint32_t j = ((i == sectorStart) ? sectorRowStart : 0); j < ((i == sectorEnd) ? sectorRowEnd : GPUCA_ROW_COUNT); j++) {
577 uint32_t nClusters = compressor.mPtrs.nSliceRowClusters[i * GPUCA_ROW_COUNT + j];
578 uint32_t clusterOffsetInCache = clusters->clusterOffset[i][j] * compressor.mMaxClusterFactorBase1024 / 1024;
579 compressorMemcpy(compressor.mOutput->qTotU + rowsOffset, compressor.mPtrs.qTotU + clusterOffsetInCache, nClusters, nLanes, iLane);
580 compressorMemcpy(compressor.mOutput->qMaxU + rowsOffset, compressor.mPtrs.qMaxU + clusterOffsetInCache, nClusters, nLanes, iLane);
581 compressorMemcpy(compressor.mOutput->flagsU + rowsOffset, compressor.mPtrs.flagsU + clusterOffsetInCache, nClusters, nLanes, iLane);
582 compressorMemcpy(compressor.mOutput->padDiffU + rowsOffset, compressor.mPtrs.padDiffU + clusterOffsetInCache, nClusters, nLanes, iLane);
583 compressorMemcpy(compressor.mOutput->timeDiffU + rowsOffset, compressor.mPtrs.timeDiffU + clusterOffsetInCache, nClusters, nLanes, iLane);
584 compressorMemcpy(compressor.mOutput->sigmaPadU + rowsOffset, compressor.mPtrs.sigmaPadU + clusterOffsetInCache, nClusters, nLanes, iLane);
585 compressorMemcpy(compressor.mOutput->sigmaTimeU + rowsOffset, compressor.mPtrs.sigmaTimeU + clusterOffsetInCache, nClusters, nLanes, iLane);
586 rowsOffset += nClusters;
587 }
588 }
589 }
590
591 if (iBlock == 1) {
592 uint32_t tracksPerWarp = (compressor.mMemory->nStoredTracks + nWarps - 1) / nWarps;
593 uint32_t trackStart = tracksPerWarp * iWarp;
594 uint32_t trackEnd = CAMath::Min(compressor.mMemory->nStoredTracks, trackStart + tracksPerWarp);
595 if (trackStart >= compressor.mMemory->nStoredTracks) {
596 trackStart = 0;
597 trackEnd = 0;
598 }
599
600 uint32_t tracksOffset = calculateWarpOffsets(smem, compressor.mPtrs.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
601
602 for (uint32_t i = trackStart; i < trackEnd; i += nLanes) {
603 uint32_t nTrackClusters = 0;
604 uint32_t srcOffset = 0;
605
606 if (i + iLane < trackEnd) {
607 nTrackClusters = compressor.mPtrs.nTrackClusters[i + iLane];
608 srcOffset = compressor.mAttachedClusterFirstIndex[i + iLane];
609 }
610 smem.unbuffered.sizes[iWarp][iLane] = nTrackClusters;
611 smem.unbuffered.srcOffsets[iWarp][iLane] = srcOffset;
612
613 uint32_t elems = (i + nLanes < trackEnd) ? nLanes : (trackEnd - i);
614
615 for (uint32_t j = 0; j < elems; j++) {
616 nTrackClusters = smem.unbuffered.sizes[iWarp][j];
617 srcOffset = smem.unbuffered.srcOffsets[iWarp][j];
618 uint32_t idx = i + j;
619 compressorMemcpy(compressor.mOutput->qTotA + tracksOffset, compressor.mPtrs.qTotA + srcOffset, nTrackClusters, nLanes, iLane);
620 compressorMemcpy(compressor.mOutput->qMaxA + tracksOffset, compressor.mPtrs.qMaxA + srcOffset, nTrackClusters, nLanes, iLane);
621 compressorMemcpy(compressor.mOutput->flagsA + tracksOffset, compressor.mPtrs.flagsA + srcOffset, nTrackClusters, nLanes, iLane);
622 compressorMemcpy(compressor.mOutput->sigmaPadA + tracksOffset, compressor.mPtrs.sigmaPadA + srcOffset, nTrackClusters, nLanes, iLane);
623 compressorMemcpy(compressor.mOutput->sigmaTimeA + tracksOffset, compressor.mPtrs.sigmaTimeA + srcOffset, nTrackClusters, nLanes, iLane);
624
625 // First index stored with track
626 compressorMemcpy(compressor.mOutput->rowDiffA + tracksOffset - idx, compressor.mPtrs.rowDiffA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
627 compressorMemcpy(compressor.mOutput->sliceLegDiffA + tracksOffset - idx, compressor.mPtrs.sliceLegDiffA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
628 compressorMemcpy(compressor.mOutput->padResA + tracksOffset - idx, compressor.mPtrs.padResA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
629 compressorMemcpy(compressor.mOutput->timeResA + tracksOffset - idx, compressor.mPtrs.timeResA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
630
631 tracksOffset += nTrackClusters;
632 }
633 }
634 }
635}
636
637template <typename V>
638GPUdii() void GPUTPCCompressionGatherKernels::gatherBuffered(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
639{
640
641 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
642 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = processors.ioPtrs.clustersNative;
643
644 int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
645 int32_t iWarp = iThread / GPUCA_WARP_SIZE;
646
647 int32_t nGlobalWarps = nWarps * nBlocks;
648 int32_t iGlobalWarp = nWarps * iBlock + iWarp;
649
650 int32_t nLanes = GPUCA_WARP_SIZE;
651 int32_t iLane = iThread % GPUCA_WARP_SIZE;
652
653 auto& input = compressor.mPtrs;
654 auto* output = compressor.mOutput;
655
656 uint32_t nRows = compressor.NSECTORS * GPUCA_ROW_COUNT;
657 uint32_t rowsPerWarp = (nRows + nGlobalWarps - 1) / nGlobalWarps;
658 uint32_t rowStart = rowsPerWarp * iGlobalWarp;
659 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
660 if (rowStart >= nRows) {
661 rowStart = 0;
662 rowEnd = 0;
663 }
664 rowsPerWarp = rowEnd - rowStart;
665
666 uint32_t rowsOffset = calculateWarpOffsets(smem, input.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
667
668 uint32_t nStoredTracks = compressor.mMemory->nStoredTracks;
669 uint32_t tracksPerWarp = (nStoredTracks + nGlobalWarps - 1) / nGlobalWarps;
670 uint32_t trackStart = tracksPerWarp * iGlobalWarp;
671 uint32_t trackEnd = CAMath::Min(nStoredTracks, trackStart + tracksPerWarp);
672 if (trackStart >= nStoredTracks) {
673 trackStart = 0;
674 trackEnd = 0;
675 }
676 tracksPerWarp = trackEnd - trackStart;
677
678 uint32_t tracksOffset = calculateWarpOffsets(smem, input.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
679
680 if (iBlock == 0) {
681 compressorMemcpyBasic(output->nSliceRowClusters, input.nSliceRowClusters, compressor.NSECTORS * GPUCA_ROW_COUNT, nThreads, iThread);
682 compressorMemcpyBasic(output->nTrackClusters, input.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
683 compressorMemcpyBasic(output->qPtA, input.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
684 compressorMemcpyBasic(output->rowA, input.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
685 compressorMemcpyBasic(output->sliceA, input.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
686 compressorMemcpyBasic(output->timeA, input.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
687 compressorMemcpyBasic(output->padA, input.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
688 }
689
690 const uint32_t* clusterOffsets = &clusters->clusterOffset[0][0] + rowStart;
691 const uint32_t* nSectorRowClusters = input.nSliceRowClusters + rowStart;
692
693 auto* buf = smem.getBuffer<V>(iWarp);
694
695 compressorMemcpyBuffered(buf, output->qTotU + rowsOffset, input.qTotU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
696 compressorMemcpyBuffered(buf, output->qMaxU + rowsOffset, input.qMaxU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
697 compressorMemcpyBuffered(buf, output->flagsU + rowsOffset, input.flagsU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
698 compressorMemcpyBuffered(buf, output->padDiffU + rowsOffset, input.padDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
699 compressorMemcpyBuffered(buf, output->timeDiffU + rowsOffset, input.timeDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
700 compressorMemcpyBuffered(buf, output->sigmaPadU + rowsOffset, input.sigmaPadU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
701 compressorMemcpyBuffered(buf, output->sigmaTimeU + rowsOffset, input.sigmaTimeU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
702
703 const uint16_t* nTrackClustersPtr = input.nTrackClusters + trackStart;
704 const uint32_t* aClsFstIdx = compressor.mAttachedClusterFirstIndex + trackStart;
705
706 compressorMemcpyBuffered(buf, output->qTotA + tracksOffset, input.qTotA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
707 compressorMemcpyBuffered(buf, output->qMaxA + tracksOffset, input.qMaxA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
708 compressorMemcpyBuffered(buf, output->flagsA + tracksOffset, input.flagsA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
709 compressorMemcpyBuffered(buf, output->sigmaPadA + tracksOffset, input.sigmaPadA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
710 compressorMemcpyBuffered(buf, output->sigmaTimeA + tracksOffset, input.sigmaTimeA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
711
712 // First index stored with track
713 uint32_t tracksOffsetDiff = tracksOffset - trackStart;
714 compressorMemcpyBuffered(buf, output->rowDiffA + tracksOffsetDiff, input.rowDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
715 compressorMemcpyBuffered(buf, output->sliceLegDiffA + tracksOffsetDiff, input.sliceLegDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
716 compressorMemcpyBuffered(buf, output->padResA + tracksOffsetDiff, input.padResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
717 compressorMemcpyBuffered(buf, output->timeResA + tracksOffsetDiff, input.timeResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
718}
719
720GPUdii() void GPUTPCCompressionGatherKernels::gatherMulti(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
721{
722 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
723 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = processors.ioPtrs.clustersNative;
724 const auto& input = compressor.mPtrs;
725 auto* output = compressor.mOutput;
726
727 const int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
728 const int32_t iWarp = iThread / GPUCA_WARP_SIZE;
729 const int32_t nLanes = GPUCA_WARP_SIZE;
730 const int32_t iLane = iThread % GPUCA_WARP_SIZE;
731 auto* buf = smem.getBuffer<Vec128>(iWarp);
732
733 if (iBlock == 0) {
734 compressorMemcpyBasic(output->nSliceRowClusters, input.nSliceRowClusters, compressor.NSECTORS * GPUCA_ROW_COUNT, nThreads, iThread);
735 compressorMemcpyBasic(output->nTrackClusters, input.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
736 compressorMemcpyBasic(output->qPtA, input.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
737 compressorMemcpyBasic(output->rowA, input.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
738 compressorMemcpyBasic(output->sliceA, input.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
739 compressorMemcpyBasic(output->timeA, input.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
740 compressorMemcpyBasic(output->padA, input.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
741 } else if (iBlock & 1) {
742 const uint32_t nGlobalWarps = nWarps * (nBlocks - 1) / 2;
743 const uint32_t iGlobalWarp = nWarps * (iBlock - 1) / 2 + iWarp;
744
745 const uint32_t nRows = compressor.NSECTORS * GPUCA_ROW_COUNT;
746 uint32_t rowsPerWarp = (nRows + nGlobalWarps - 1) / nGlobalWarps;
747 uint32_t rowStart = rowsPerWarp * iGlobalWarp;
748 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
749 if (rowStart >= nRows) {
750 rowStart = 0;
751 rowEnd = 0;
752 }
753 rowsPerWarp = rowEnd - rowStart;
754
755 const uint32_t rowsOffset = calculateWarpOffsets(smem, input.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
756 const uint32_t* clusterOffsets = &clusters->clusterOffset[0][0] + rowStart;
757 const uint32_t* nSectorRowClusters = input.nSliceRowClusters + rowStart;
758
759 compressorMemcpyBuffered(buf, output->qTotU + rowsOffset, input.qTotU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
760 compressorMemcpyBuffered(buf, output->qMaxU + rowsOffset, input.qMaxU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
761 compressorMemcpyBuffered(buf, output->flagsU + rowsOffset, input.flagsU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
762 compressorMemcpyBuffered(buf, output->padDiffU + rowsOffset, input.padDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
763 compressorMemcpyBuffered(buf, output->timeDiffU + rowsOffset, input.timeDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
764 compressorMemcpyBuffered(buf, output->sigmaPadU + rowsOffset, input.sigmaPadU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
765 compressorMemcpyBuffered(buf, output->sigmaTimeU + rowsOffset, input.sigmaTimeU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
766 } else {
767 const uint32_t nGlobalWarps = nWarps * (nBlocks - 1) / 2;
768 const uint32_t iGlobalWarp = nWarps * (iBlock / 2 - 1) + iWarp;
769
770 const uint32_t nStoredTracks = compressor.mMemory->nStoredTracks;
771 uint32_t tracksPerWarp = (nStoredTracks + nGlobalWarps - 1) / nGlobalWarps;
772 uint32_t trackStart = tracksPerWarp * iGlobalWarp;
773 uint32_t trackEnd = CAMath::Min(nStoredTracks, trackStart + tracksPerWarp);
774 if (trackStart >= nStoredTracks) {
775 trackStart = 0;
776 trackEnd = 0;
777 }
778 tracksPerWarp = trackEnd - trackStart;
779
780 const uint32_t tracksOffset = calculateWarpOffsets(smem, input.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
781 const uint16_t* nTrackClustersPtr = input.nTrackClusters + trackStart;
782 const uint32_t* aClsFstIdx = compressor.mAttachedClusterFirstIndex + trackStart;
783
784 compressorMemcpyBuffered(buf, output->qTotA + tracksOffset, input.qTotA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
785 compressorMemcpyBuffered(buf, output->qMaxA + tracksOffset, input.qMaxA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
786 compressorMemcpyBuffered(buf, output->flagsA + tracksOffset, input.flagsA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
787 compressorMemcpyBuffered(buf, output->sigmaPadA + tracksOffset, input.sigmaPadA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
788 compressorMemcpyBuffered(buf, output->sigmaTimeA + tracksOffset, input.sigmaTimeA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
789
790 // First index stored with track
791 uint32_t tracksOffsetDiff = tracksOffset - trackStart;
792 compressorMemcpyBuffered(buf, output->rowDiffA + tracksOffsetDiff, input.rowDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
793 compressorMemcpyBuffered(buf, output->sliceLegDiffA + tracksOffsetDiff, input.sliceLegDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
794 compressorMemcpyBuffered(buf, output->padResA + tracksOffsetDiff, input.padResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
795 compressorMemcpyBuffered(buf, output->timeResA + tracksOffsetDiff, input.timeResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
796 }
797}
798
799template <>
800GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::buffered32>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
801{
802 gatherBuffered<Vec32>(nBlocks, nThreads, iBlock, iThread, smem, processors);
803}
804
805template <>
806GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::buffered64>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
807{
808 gatherBuffered<Vec64>(nBlocks, nThreads, iBlock, iThread, smem, processors);
809}
810
811template <>
812GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::buffered128>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
813{
814 gatherBuffered<Vec128>(nBlocks, nThreads, iBlock, iThread, smem, processors);
815}
816
817template <>
818GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::multiBlock>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
819{
820 gatherMulti(nBlocks, nThreads, iBlock, iThread, smem, processors);
821}
int16_t time
Definition RawEventData.h:4
int32_t i
#define get_local_size(dim)
#define get_local_id(dim)
#define GPUsharedref()
#define GPUbarrierWarp()
#define get_global_size(dim)
#define GPUbarrier()
#define GPUrestrict()
#define get_global_id(dim)
#define GPUCA_TPC_COMP_CHUNK_SIZE
#define GPUCA_GET_THREAD_COUNT(...)
GPUdii() void GPUTPCCompressionKernels
#define GPUCA_NSECTORS
#define GPUCA_ROW_COUNT
void output(const std::map< std::string, ChannelStat > &channels)
Definition rawdump.cxx:197
uint32_t j
Definition RawData.h:0
uint32_t c
Definition RawData.h:2
TBranch * ptr
int nClusters
o2::tpc::CompressedClusters * mOutput
static constexpr uint32_t NSECTORS
o2::tpc::CompressedClustersPtrs mPtrs
GLint GLenum GLint x
Definition glcorearb.h:403
GLenum src
Definition glcorearb.h:1767
GLint GLsizei count
Definition glcorearb.h:399
GLsizeiptr size
Definition glcorearb.h:659
GLuint GLuint end
Definition glcorearb.h:469
GLuint GLsizei bufSize
Definition glcorearb.h:790
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLenum GLenum dst
Definition glcorearb.h:1767
GLintptr offset
Definition glcorearb.h:660
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLuint start
Definition glcorearb.h:469
GLenum GLfloat param
Definition glcorearb.h:271
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition glcorearb.h:2514
GLuint id
Definition glcorearb.h:650
GLdouble GLdouble GLdouble z
Definition glcorearb.h:843
Global TPC definitions and constants.
Definition SimTraits.h:168
GPUd() void PIDResponse
Definition PIDResponse.h:71
GPUdi() T BetheBlochAleph(T bg
constexpr std::array< int, nLayers > nRows
Definition Specs.h:56
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
GPUReconstruction * rec
const o2::tpc::ClusterNativeAccess * clustersNative
const uint32_t * mergedTrackHitAttachment
const GPUTPCGMMergedTrackHit * mergedTrackHits
const GPUTPCGMMergedTrack * mergedTracks
std::vector< std::byte > getBuffer(const char *filename)
std::vector< Cluster > clusters
for(int irof=0;irof< 1000;irof++)
std::vector< int > row