Project
Loading...
Searching...
No Matches
GPUTPCCompressionKernels.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
16#include "GPUConstantMem.h"
17#include "GPUO2DataTypes.h"
18#include "GPUParam.h"
19#include "GPUCommonAlgorithm.h"
22#include "GPUTPCCompressionKernels.inc"
23
24using namespace o2::gpu;
25using namespace o2::tpc;
26
27template <>
28GPUdii() void GPUTPCCompressionKernels::Thread<GPUTPCCompressionKernels::step0attached>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& processors)
29{
30 const GPUTrackingInOutPointers& GPUrestrict() ioPtrs = processors.ioPtrs;
32 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
33 const GPUParam& GPUrestrict() param = processors.param;
34
35 uint8_t lastLeg = 0;
36 int32_t myTrack = 0;
37 for (uint32_t i = get_global_id(0); i < ioPtrs.nMergedTracks; i += get_global_size(0)) {
39 const GPUTPCGMMergedTrack& GPUrestrict() trk = ioPtrs.mergedTracks[i];
40 if (!trk.OK()) {
41 continue;
42 }
43 bool rejectTrk = CAMath::Abs(trk.GetParam().GetQPt() * processors.param.qptB5Scaler) > processors.param.rec.tpc.rejectQPtB5 || trk.MergedLooper();
44 uint32_t nClustersStored = 0;
46 uint8_t lastRow = 0, lastSector = 0;
48 float zOffset = 0;
49 for (int32_t k = trk.NClusters() - 1; k >= 0; k--) {
50 const GPUTPCGMMergedTrackHit& GPUrestrict() hit = ioPtrs.mergedTrackHits[trk.FirstClusterRef() + k];
52 continue;
53 }
54
55 int32_t hitId = hit.num;
56 int32_t attach = ioPtrs.mergedTrackHitAttachment[hitId];
57 if ((attach & gputpcgmmergertypes::attachTrackMask) != i) {
58 continue; // Main attachment to different track
59 }
60 bool rejectCluster = processors.param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyA && (rejectTrk || GPUTPCClusterRejection::GetIsRejected(attach));
61 if (rejectCluster) {
62 compressor.mClusterStatus[hitId] = 1; // Cluster rejected, do not store
63 continue;
64 }
65
66 if (!(param.rec.tpc.compressionTypeMask & GPUSettings::CompressionTrackModel)) {
67 continue; // No track model compression
68 }
69 const ClusterNative& GPUrestrict() orgCl = clusters -> clusters[hit.sector][hit.row][hit.num - clusters->clusterOffset[hit.sector][hit.row]];
70 constexpr GPUTPCGeometry geo;
71 float x = geo.Row2X(hit.row);
72 float y = track.LinearPad2Y(hit.sector, orgCl.getPad(), geo.PadWidth(hit.row), geo.NPads(hit.row));
73 float z = geo.LinearTime2Z(hit.sector, orgCl.getTime());
74 if (nClustersStored) {
75 if ((hit.sector < GPUCA_NSECTORS) ^ (lastSector < GPUCA_NSECTORS)) {
76 break;
77 }
78 if (lastLeg != hit.leg && track.Mirror()) {
79 break;
80 }
81 if (track.Propagate(geo.Row2X(hit.row), param.SectorParam[hit.sector].Alpha)) {
82 break;
83 }
84 }
85
86 compressor.mClusterStatus[hitId] = 1; // Cluster compressed in track model, do not store as difference
87
88 int32_t cidx = trk.FirstClusterRef() + nClustersStored++;
89 if (nClustersStored == 1) {
90 uint8_t qpt = fabs(trk.GetParam().GetQPt()) < 20.f ? (trk.GetParam().GetQPt() * (127.f / 20.f) + 127.5f) : (trk.GetParam().GetQPt() > 0 ? 254 : 0);
91 zOffset = z;
92 track.Init(x, y, z - zOffset, param.SectorParam[hit.sector].Alpha, qpt, param);
93
94 myTrack = CAMath::AtomicAdd(&compressor.mMemory->nStoredTracks, 1u);
95 compressor.mAttachedClusterFirstIndex[myTrack] = trk.FirstClusterRef();
96 lastLeg = hit.leg;
97 c.qPtA[myTrack] = qpt;
98 c.rowA[myTrack] = hit.row;
99 c.sliceA[myTrack] = hit.sector;
100 c.timeA[myTrack] = orgCl.getTimePacked();
101 c.padA[myTrack] = orgCl.padPacked;
102 } else {
103 uint32_t row = hit.row;
104 uint32_t sector = hit.sector;
105
106 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionDifferences) {
107 if (lastRow > row) {
109 }
110 row -= lastRow;
111 if (lastSector > sector) {
112 sector += compressor.NSECTORS;
113 }
114 sector -= lastSector;
115 }
116 c.rowDiffA[cidx] = row;
117 c.sliceLegDiffA[cidx] = (hit.leg == lastLeg ? 0 : compressor.NSECTORS) + sector;
118 float pad = CAMath::Max(0.f, CAMath::Min((float)geo.NPads(GPUCA_ROW_COUNT - 1), track.LinearY2Pad(hit.sector, track.Y(), geo.PadWidth(hit.row), geo.NPads(hit.row))));
119 c.padResA[cidx] = orgCl.padPacked - orgCl.packPad(pad);
120 float time = CAMath::Max(0.f, geo.LinearZ2Time(hit.sector, track.Z() + zOffset));
121 c.timeResA[cidx] = (orgCl.getTimePacked() - orgCl.packTime(time)) & 0xFFFFFF;
122 lastLeg = hit.leg;
123 }
124 uint16_t qtot = orgCl.qTot, qmax = orgCl.qMax;
125 uint8_t sigmapad = orgCl.sigmaPadPacked, sigmatime = orgCl.sigmaTimePacked;
126 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionTruncate) {
127 compressor.truncateSignificantBitsChargeMax(qmax, param);
128 compressor.truncateSignificantBitsCharge(qtot, param);
129 compressor.truncateSignificantBitsWidth(sigmapad, param);
130 compressor.truncateSignificantBitsWidth(sigmatime, param);
131 }
132 c.qTotA[cidx] = qtot;
133 c.qMaxA[cidx] = qmax;
134 c.sigmaPadA[cidx] = sigmapad;
135 c.sigmaTimeA[cidx] = sigmatime;
136 c.flagsA[cidx] = orgCl.getFlags();
137 if (k && track.Filter(y, z - zOffset, hit.row)) {
138 break;
139 }
140 lastRow = hit.row;
141 lastSector = hit.sector;
142 }
143 if (nClustersStored) {
144 CAMath::AtomicAdd(&compressor.mMemory->nStoredAttachedClusters, nClustersStored);
145 c.nTrackClusters[myTrack] = nClustersStored;
146 }
147 }
148}
149
150template <>
151GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortTime>::operator()(uint32_t a, uint32_t b) const
152{
153 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
154}
155
156template <>
157GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortPad>::operator()(uint32_t a, uint32_t b) const
158{
159 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
160}
161
162template <>
163GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortZTimePad>::operator()(uint32_t a, uint32_t b) const
164{
165 if (mClsPtr[a].getTimePacked() >> 3 == mClsPtr[b].getTimePacked() >> 3) {
166 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
167 }
168 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
169}
170
171template <>
172GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>::operator()(uint32_t a, uint32_t b) const
173{
174 if (mClsPtr[a].padPacked >> 3 == mClsPtr[b].padPacked >> 3) {
175 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
176 }
177 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
178}
179
180template <> // Deterministic comparison
181GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<4>::operator()(uint32_t a, uint32_t b) const
182{
183 if (mClsPtr[a].getTimePacked() != mClsPtr[b].getTimePacked()) {
184 return mClsPtr[a].getTimePacked() < mClsPtr[b].getTimePacked();
185 }
186 if (mClsPtr[a].padPacked != mClsPtr[b].padPacked) {
187 return mClsPtr[a].padPacked < mClsPtr[b].padPacked;
188 }
189 return mClsPtr[a].qTot < mClsPtr[b].qTot;
190}
191
192template <>
193GPUdii() void GPUTPCCompressionKernels::Thread<GPUTPCCompressionKernels::step1unattached>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
194{
195 const GPUTrackingInOutPointers& GPUrestrict() ioPtrs = processors.ioPtrs;
196 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = ioPtrs.clustersNative;
197 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
198 GPUParam& GPUrestrict() param = processors.param;
199 uint32_t* sortBuffer = smem.sortBuffer;
200 for (int32_t iSectorRow = iBlock; iSectorRow < GPUCA_NSECTORS * GPUCA_ROW_COUNT; iSectorRow += nBlocks) {
201 const uint32_t iSector = iSectorRow / GPUCA_ROW_COUNT;
202 const uint32_t iRow = iSectorRow % GPUCA_ROW_COUNT;
203 const uint32_t idOffset = clusters->clusterOffset[iSector][iRow];
204 const uint32_t idOffsetOut = clusters->clusterOffset[iSector][iRow] * compressor.mMaxClusterFactorBase1024 / 1024; // 32 bit enough for number of clusters per row * 1024
205 const uint32_t idOffsetOutMax = ((const uint32_t*)clusters->clusterOffset[iSector])[iRow + 1] * compressor.mMaxClusterFactorBase1024 / 1024; // Array out of bounds access is ok, since it goes to the correct nClustersTotal
206 if (iThread == nThreads - 1) {
207 smem.nCount = 0;
208 }
209 uint32_t totalCount = 0;
210 GPUbarrier();
211
212 CompressedClustersPtrs& GPUrestrict() c = compressor.mPtrs;
213
214 const uint32_t nn = CAMath::nextMultipleOf<GPUCA_GET_THREAD_COUNT(GPUCA_LB_GPUTPCCompressionKernels_step1unattached)>(clusters->nClusters[iSector][iRow]);
215 for (uint32_t i = iThread; i < nn + nThreads; i += nThreads) {
216 const int32_t idx = idOffset + i;
217 int32_t storeCluster = 0;
218 do {
219 if (i >= clusters->nClusters[iSector][iRow]) {
220 break;
221 }
222 if (compressor.mClusterStatus[idx]) {
223 break;
224 }
225 int32_t attach = ioPtrs.mergedTrackHitAttachment[idx];
226 bool unattached = attach == 0;
227
228 if (unattached) {
229 if (processors.param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyB) {
230 break;
231 }
232 } else if (processors.param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyA) {
234 break;
235 }
236 int32_t id = attach & gputpcgmmergertypes::attachTrackMask;
237 auto& trk = ioPtrs.mergedTracks[id];
238 if (CAMath::Abs(trk.GetParam().GetQPt() * processors.param.qptB5Scaler) > processors.param.rec.tpc.rejectQPtB5 || trk.MergedLooper()) {
239 break;
240 }
241 }
242 storeCluster = 1;
243 } while (false);
244
245 GPUbarrier();
246 int32_t myIndex = work_group_scan_inclusive_add(storeCluster);
247 int32_t storeLater = -1;
248 if (storeCluster) {
249 if (smem.nCount + myIndex <= GPUCA_TPC_COMP_CHUNK_SIZE) {
250 sortBuffer[smem.nCount + myIndex - 1] = i;
251 } else {
252 storeLater = smem.nCount + myIndex - 1 - GPUCA_TPC_COMP_CHUNK_SIZE;
253 }
254 }
255 GPUbarrier();
256 if (iThread == nThreads - 1) {
257 smem.nCount += myIndex;
258 }
259 GPUbarrier();
260
261 if (smem.nCount < GPUCA_TPC_COMP_CHUNK_SIZE && i < nn) {
262 continue;
263 }
264
265 uint32_t count = CAMath::Min(smem.nCount, (uint32_t)GPUCA_TPC_COMP_CHUNK_SIZE);
266 if (idOffsetOut + totalCount + count > idOffsetOutMax) {
267 if (iThread == nThreads - 1) {
268 compressor.raiseError(GPUErrors::ERROR_COMPRESSION_ROW_HIT_OVERFLOW, iSector * 1000 + iRow, idOffsetOut + totalCount + count, idOffsetOutMax);
269 }
270 break;
271 }
272 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionDifferences) {
273#ifdef GPUCA_GPUCODE
274 static_assert(GPUCA_GET_THREAD_COUNT(GPUCA_LB_GPUTPCCompressionKernels_step1unattached) * 2 <= GPUCA_TPC_COMP_CHUNK_SIZE);
275#endif
276#ifdef GPUCA_DETERMINISTIC_MODE // Not using GPUCA_DETERMINISTIC_CODE, which is enforced in TPC compression
277 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>(clusters->clusters[iSector][iRow]));
278#else // GPUCA_DETERMINISTIC_MODE
279 if (param.rec.tpc.compressionSortOrder == GPUSettings::SortZPadTime) {
280 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZPadTime>(clusters->clusters[iSector][iRow]));
281 } else if (param.rec.tpc.compressionSortOrder == GPUSettings::SortZTimePad) {
282 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortZTimePad>(clusters->clusters[iSector][iRow]));
283 } else if (param.rec.tpc.compressionSortOrder == GPUSettings::SortPad) {
284 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortPad>(clusters->clusters[iSector][iRow]));
285 } else if (param.rec.tpc.compressionSortOrder == GPUSettings::SortTime) {
286 CAAlgo::sortInBlock(sortBuffer, sortBuffer + count, GPUTPCCompressionKernels_Compare<GPUSettings::SortTime>(clusters->clusters[iSector][iRow]));
287 }
288#endif // GPUCA_DETERMINISTIC_MODE
289 GPUbarrier();
290 }
291
292 for (uint32_t j = get_local_id(0); j < count; j += get_local_size(0)) {
293 int32_t outidx = idOffsetOut + totalCount + j;
294 const ClusterNative& GPUrestrict() orgCl = clusters -> clusters[iSector][iRow][sortBuffer[j]];
295
296 int32_t preId = j != 0 ? (int32_t)sortBuffer[j - 1] : (totalCount != 0 ? (int32_t)smem.lastIndex : -1);
297 GPUTPCCompression_EncodeUnattached(param.rec.tpc.compressionTypeMask, orgCl, c.timeDiffU[outidx], c.padDiffU[outidx], preId == -1 ? nullptr : &clusters->clusters[iSector][iRow][preId]);
298
299 uint16_t qtot = orgCl.qTot, qmax = orgCl.qMax;
300 uint8_t sigmapad = orgCl.sigmaPadPacked, sigmatime = orgCl.sigmaTimePacked;
301 if (param.rec.tpc.compressionTypeMask & GPUSettings::CompressionTruncate) {
302 compressor.truncateSignificantBitsChargeMax(qmax, param);
303 compressor.truncateSignificantBitsCharge(qtot, param);
304 compressor.truncateSignificantBitsWidth(sigmapad, param);
305 compressor.truncateSignificantBitsWidth(sigmatime, param);
306 }
307 c.qTotU[outidx] = qtot;
308 c.qMaxU[outidx] = qmax;
309 c.sigmaPadU[outidx] = sigmapad;
310 c.sigmaTimeU[outidx] = sigmatime;
311 c.flagsU[outidx] = orgCl.getFlags();
312 }
313
314 GPUbarrier();
315 if (storeLater >= 0) {
316 sortBuffer[storeLater] = i;
317 }
318 totalCount += count;
319 if (iThread == nThreads - 1 && count) {
320 smem.lastIndex = sortBuffer[count - 1];
321 smem.nCount -= count;
322 }
323 }
324
325 if (iThread == nThreads - 1) {
326 c.nSliceRowClusters[iSector * GPUCA_ROW_COUNT + iRow] = totalCount;
327 CAMath::AtomicAdd(&compressor.mMemory->nStoredUnattachedClusters, totalCount);
328 }
329 GPUbarrier();
330 }
331}
332
333template <>
335{
336 return buf32[iWarp];
337}
338
339template <>
341{
342 return buf64[iWarp];
343}
344
345template <>
347{
348 return buf128[iWarp];
349}
350
351template <typename T, typename S>
352GPUdi() bool GPUTPCCompressionGatherKernels::isAlignedTo(const S* ptr)
353{
354 if constexpr (alignof(S) >= alignof(T)) {
355 static_cast<void>(ptr);
356 return true;
357 } else {
358 return reinterpret_cast<size_t>(ptr) % alignof(T) == 0;
359 }
360}
361
362template <>
363GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpy<uint8_t>(uint8_t* GPUrestrict() dst, const uint8_t* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread)
364{
365 constexpr const int32_t vec128Elems = CpyVector<uint8_t, Vec128>::Size;
366 constexpr const int32_t vec64Elems = CpyVector<uint8_t, Vec64>::Size;
367 constexpr const int32_t vec32Elems = CpyVector<uint8_t, Vec32>::Size;
368 constexpr const int32_t vec16Elems = CpyVector<uint8_t, Vec16>::Size;
369
370 if (size >= uint32_t(nThreads * vec128Elems)) {
371 compressorMemcpyVectorised<uint8_t, Vec128>(dst, src, size, nThreads, iThread);
372 } else if (size >= uint32_t(nThreads * vec64Elems)) {
373 compressorMemcpyVectorised<uint8_t, Vec64>(dst, src, size, nThreads, iThread);
374 } else if (size >= uint32_t(nThreads * vec32Elems)) {
375 compressorMemcpyVectorised<uint8_t, Vec32>(dst, src, size, nThreads, iThread);
376 } else if (size >= uint32_t(nThreads * vec16Elems)) {
377 compressorMemcpyVectorised<uint8_t, Vec16>(dst, src, size, nThreads, iThread);
378 } else {
379 compressorMemcpyBasic(dst, src, size, nThreads, iThread);
380 }
381}
382
383template <>
384GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpy<uint16_t>(uint16_t* GPUrestrict() dst, const uint16_t* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread)
385{
386 constexpr const int32_t vec128Elems = CpyVector<uint16_t, Vec128>::Size;
387 constexpr const int32_t vec64Elems = CpyVector<uint16_t, Vec64>::Size;
388 constexpr const int32_t vec32Elems = CpyVector<uint16_t, Vec32>::Size;
389
390 if (size >= uint32_t(nThreads * vec128Elems)) {
391 compressorMemcpyVectorised<uint16_t, Vec128>(dst, src, size, nThreads, iThread);
392 } else if (size >= uint32_t(nThreads * vec64Elems)) {
393 compressorMemcpyVectorised<uint16_t, Vec64>(dst, src, size, nThreads, iThread);
394 } else if (size >= uint32_t(nThreads * vec32Elems)) {
395 compressorMemcpyVectorised<uint16_t, Vec32>(dst, src, size, nThreads, iThread);
396 } else {
397 compressorMemcpyBasic(dst, src, size, nThreads, iThread);
398 }
399}
400
401template <>
402GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpy<uint32_t>(uint32_t* GPUrestrict() dst, const uint32_t* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread)
403{
404 constexpr const int32_t vec128Elems = CpyVector<uint32_t, Vec128>::Size;
405 constexpr const int32_t vec64Elems = CpyVector<uint32_t, Vec64>::Size;
406
407 if (size >= uint32_t(nThreads * vec128Elems)) {
408 compressorMemcpyVectorised<uint32_t, Vec128>(dst, src, size, nThreads, iThread);
409 } else if (size >= uint32_t(nThreads * vec64Elems)) {
410 compressorMemcpyVectorised<uint32_t, Vec64>(dst, src, size, nThreads, iThread);
411 } else {
412 compressorMemcpyBasic(dst, src, size, nThreads, iThread);
413 }
414}
415
416template <typename Scalar, typename BaseVector>
417GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpyVectorised(Scalar* dst, const Scalar* src, uint32_t size, int32_t nThreads, int32_t iThread)
418{
419 if (not isAlignedTo<BaseVector>(dst)) {
420 size_t dsti = reinterpret_cast<size_t>(dst);
421 int32_t offset = (alignof(BaseVector) - dsti % alignof(BaseVector)) / sizeof(Scalar);
422 compressorMemcpyBasic(dst, src, offset, nThreads, iThread);
423 src += offset;
424 dst += offset;
425 size -= offset;
426 }
427
428 BaseVector* GPUrestrict() dstAligned = reinterpret_cast<BaseVector*>(dst);
429
430 using CpyVec = CpyVector<Scalar, BaseVector>;
431 uint32_t sizeAligned = size / CpyVec::Size;
432
433 if (isAlignedTo<BaseVector>(src)) {
434 const BaseVector* GPUrestrict() srcAligned = reinterpret_cast<const BaseVector*>(src);
435 compressorMemcpyBasic(dstAligned, srcAligned, sizeAligned, nThreads, iThread);
436 } else {
437 for (uint32_t i = iThread; i < sizeAligned; i += nThreads) {
438 CpyVec buf;
439 for (uint32_t j = 0; j < CpyVec::Size; j++) {
440 buf.elems[j] = src[i * CpyVec::Size + j];
441 }
442 dstAligned[i] = buf.all;
443 }
444 }
445
446 int32_t leftovers = size % CpyVec::Size;
447 compressorMemcpyBasic(dst + size - leftovers, src + size - leftovers, leftovers, nThreads, iThread);
448}
449
450template <typename T>
451GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpyBasic(T* GPUrestrict() dst, const T* GPUrestrict() src, uint32_t size, int32_t nThreads, int32_t iThread, int32_t nBlocks, int32_t iBlock)
452{
453 uint32_t start = (size + nBlocks - 1) / nBlocks * iBlock + iThread;
454 uint32_t end = CAMath::Min(size, (size + nBlocks - 1) / nBlocks * (iBlock + 1));
455 for (uint32_t i = start; i < end; i += nThreads) {
456 dst[i] = src[i];
457 }
458}
459
460template <typename V, typename T, typename S>
461GPUdi() void GPUTPCCompressionGatherKernels::compressorMemcpyBuffered(V* buf, T* GPUrestrict() dst, const T* GPUrestrict() src, const S* GPUrestrict() nums, const uint32_t* GPUrestrict() srcOffsets, uint32_t nEntries, int32_t nLanes, int32_t iLane, int32_t diff, size_t scaleBase1024)
462{
463 int32_t shmPos = 0;
464 uint32_t dstOffset = 0;
465 V* GPUrestrict() dstAligned = nullptr;
466
467 T* bufT = reinterpret_cast<T*>(buf);
468 constexpr const int32_t bufSize = GPUCA_WARP_SIZE;
469 constexpr const int32_t bufTSize = bufSize * sizeof(V) / sizeof(T);
470
471 for (uint32_t i = 0; i < nEntries; i++) {
472 uint32_t srcPos = 0;
473 uint32_t srcOffset = (srcOffsets[i] * scaleBase1024 / 1024) + diff;
474 uint32_t srcSize = nums[i] - diff;
475
476 if (dstAligned == nullptr) {
477 if (not isAlignedTo<V>(dst)) {
478 size_t dsti = reinterpret_cast<size_t>(dst);
479 uint32_t offset = (alignof(V) - dsti % alignof(V)) / sizeof(T);
480 offset = CAMath::Min<uint32_t>(offset, srcSize);
481 compressorMemcpyBasic(dst, src + srcOffset, offset, nLanes, iLane);
482 dst += offset;
483 srcPos += offset;
484 }
485 if (isAlignedTo<V>(dst)) {
486 dstAligned = reinterpret_cast<V*>(dst);
487 }
488 }
489 while (srcPos < srcSize) {
490 uint32_t shmElemsLeft = bufTSize - shmPos;
491 uint32_t srcElemsLeft = srcSize - srcPos;
492 uint32_t size = CAMath::Min(srcElemsLeft, shmElemsLeft);
493 compressorMemcpyBasic(bufT + shmPos, src + srcOffset + srcPos, size, nLanes, iLane);
494 srcPos += size;
495 shmPos += size;
497
498 if (shmPos >= bufTSize) {
499 compressorMemcpyBasic(dstAligned + dstOffset, buf, bufSize, nLanes, iLane);
500 dstOffset += bufSize;
501 shmPos = 0;
503 }
504 }
505 }
506
507 compressorMemcpyBasic(reinterpret_cast<T*>(dstAligned + dstOffset), bufT, shmPos, nLanes, iLane);
509}
510
511template <typename T>
512GPUdi() uint32_t GPUTPCCompressionGatherKernels::calculateWarpOffsets(GPUSharedMemory& smem, T* nums, uint32_t start, uint32_t end, int32_t nWarps, int32_t iWarp, int32_t nLanes, int32_t iLane)
513{
514 uint32_t blockOffset = 0;
515 int32_t iThread = nLanes * iWarp + iLane;
516 int32_t nThreads = nLanes * nWarps;
517 uint32_t blockStart = work_group_broadcast(start, 0);
518 for (uint32_t i = iThread; i < blockStart; i += nThreads) {
519 blockOffset += nums[i];
520 }
521 blockOffset = work_group_reduce_add(blockOffset);
522
523 uint32_t offset = 0;
524 for (uint32_t i = start + iLane; i < end; i += nLanes) {
525 offset += nums[i];
526 }
527 offset = work_group_scan_inclusive_add(offset);
528 if (iWarp > -1 && iLane == nLanes - 1) {
529 smem.warpOffset[iWarp] = offset;
530 }
531 GPUbarrier();
532 offset = (iWarp <= 0) ? 0 : smem.warpOffset[iWarp - 1];
533 GPUbarrier();
534
535 return offset + blockOffset;
536}
537
538template <>
539GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::unbuffered>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
540{
541 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
542 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = processors.ioPtrs.clustersNative;
543
544 int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
545 int32_t iWarp = iThread / GPUCA_WARP_SIZE;
546
547 int32_t nLanes = GPUCA_WARP_SIZE;
548 int32_t iLane = iThread % GPUCA_WARP_SIZE;
549
550 if (iBlock == 0) {
551
552 uint32_t nRows = compressor.NSECTORS * GPUCA_ROW_COUNT;
553 uint32_t rowsPerWarp = (nRows + nWarps - 1) / nWarps;
554 uint32_t rowStart = rowsPerWarp * iWarp;
555 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
556 if (rowStart >= nRows) {
557 rowStart = 0;
558 rowEnd = 0;
559 }
560
561 uint32_t rowsOffset = calculateWarpOffsets(smem, compressor.mPtrs.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
562
563 compressorMemcpy(compressor.mOutput->nSliceRowClusters, compressor.mPtrs.nSliceRowClusters, compressor.NSECTORS * GPUCA_ROW_COUNT, nThreads, iThread);
564 compressorMemcpy(compressor.mOutput->nTrackClusters, compressor.mPtrs.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
565 compressorMemcpy(compressor.mOutput->qPtA, compressor.mPtrs.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
566 compressorMemcpy(compressor.mOutput->rowA, compressor.mPtrs.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
567 compressorMemcpy(compressor.mOutput->sliceA, compressor.mPtrs.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
568 compressorMemcpy(compressor.mOutput->timeA, compressor.mPtrs.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
569 compressorMemcpy(compressor.mOutput->padA, compressor.mPtrs.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
570
571 uint32_t sectorStart = rowStart / GPUCA_ROW_COUNT;
572 uint32_t sectorEnd = rowEnd / GPUCA_ROW_COUNT;
573
574 uint32_t sectorRowStart = rowStart % GPUCA_ROW_COUNT;
575 uint32_t sectorRowEnd = rowEnd % GPUCA_ROW_COUNT;
576
577 for (uint32_t i = sectorStart; i <= sectorEnd && i < compressor.NSECTORS; i++) {
578 for (uint32_t j = ((i == sectorStart) ? sectorRowStart : 0); j < ((i == sectorEnd) ? sectorRowEnd : GPUCA_ROW_COUNT); j++) {
579 uint32_t nClusters = compressor.mPtrs.nSliceRowClusters[i * GPUCA_ROW_COUNT + j];
580 uint32_t clusterOffsetInCache = clusters->clusterOffset[i][j] * compressor.mMaxClusterFactorBase1024 / 1024;
581 compressorMemcpy(compressor.mOutput->qTotU + rowsOffset, compressor.mPtrs.qTotU + clusterOffsetInCache, nClusters, nLanes, iLane);
582 compressorMemcpy(compressor.mOutput->qMaxU + rowsOffset, compressor.mPtrs.qMaxU + clusterOffsetInCache, nClusters, nLanes, iLane);
583 compressorMemcpy(compressor.mOutput->flagsU + rowsOffset, compressor.mPtrs.flagsU + clusterOffsetInCache, nClusters, nLanes, iLane);
584 compressorMemcpy(compressor.mOutput->padDiffU + rowsOffset, compressor.mPtrs.padDiffU + clusterOffsetInCache, nClusters, nLanes, iLane);
585 compressorMemcpy(compressor.mOutput->timeDiffU + rowsOffset, compressor.mPtrs.timeDiffU + clusterOffsetInCache, nClusters, nLanes, iLane);
586 compressorMemcpy(compressor.mOutput->sigmaPadU + rowsOffset, compressor.mPtrs.sigmaPadU + clusterOffsetInCache, nClusters, nLanes, iLane);
587 compressorMemcpy(compressor.mOutput->sigmaTimeU + rowsOffset, compressor.mPtrs.sigmaTimeU + clusterOffsetInCache, nClusters, nLanes, iLane);
588 rowsOffset += nClusters;
589 }
590 }
591 }
592
593 if (iBlock == 1) {
594 uint32_t tracksPerWarp = (compressor.mMemory->nStoredTracks + nWarps - 1) / nWarps;
595 uint32_t trackStart = tracksPerWarp * iWarp;
596 uint32_t trackEnd = CAMath::Min(compressor.mMemory->nStoredTracks, trackStart + tracksPerWarp);
597 if (trackStart >= compressor.mMemory->nStoredTracks) {
598 trackStart = 0;
599 trackEnd = 0;
600 }
601
602 uint32_t tracksOffset = calculateWarpOffsets(smem, compressor.mPtrs.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
603
604 for (uint32_t i = trackStart; i < trackEnd; i += nLanes) {
605 uint32_t nTrackClusters = 0;
606 uint32_t srcOffset = 0;
607
608 if (i + iLane < trackEnd) {
609 nTrackClusters = compressor.mPtrs.nTrackClusters[i + iLane];
610 srcOffset = compressor.mAttachedClusterFirstIndex[i + iLane];
611 }
612 smem.unbuffered.sizes[iWarp][iLane] = nTrackClusters;
613 smem.unbuffered.srcOffsets[iWarp][iLane] = srcOffset;
614
615 uint32_t elems = (i + nLanes < trackEnd) ? nLanes : (trackEnd - i);
616
617 for (uint32_t j = 0; j < elems; j++) {
618 nTrackClusters = smem.unbuffered.sizes[iWarp][j];
619 srcOffset = smem.unbuffered.srcOffsets[iWarp][j];
620 uint32_t idx = i + j;
621 compressorMemcpy(compressor.mOutput->qTotA + tracksOffset, compressor.mPtrs.qTotA + srcOffset, nTrackClusters, nLanes, iLane);
622 compressorMemcpy(compressor.mOutput->qMaxA + tracksOffset, compressor.mPtrs.qMaxA + srcOffset, nTrackClusters, nLanes, iLane);
623 compressorMemcpy(compressor.mOutput->flagsA + tracksOffset, compressor.mPtrs.flagsA + srcOffset, nTrackClusters, nLanes, iLane);
624 compressorMemcpy(compressor.mOutput->sigmaPadA + tracksOffset, compressor.mPtrs.sigmaPadA + srcOffset, nTrackClusters, nLanes, iLane);
625 compressorMemcpy(compressor.mOutput->sigmaTimeA + tracksOffset, compressor.mPtrs.sigmaTimeA + srcOffset, nTrackClusters, nLanes, iLane);
626
627 // First index stored with track
628 compressorMemcpy(compressor.mOutput->rowDiffA + tracksOffset - idx, compressor.mPtrs.rowDiffA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
629 compressorMemcpy(compressor.mOutput->sliceLegDiffA + tracksOffset - idx, compressor.mPtrs.sliceLegDiffA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
630 compressorMemcpy(compressor.mOutput->padResA + tracksOffset - idx, compressor.mPtrs.padResA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
631 compressorMemcpy(compressor.mOutput->timeResA + tracksOffset - idx, compressor.mPtrs.timeResA + srcOffset + 1, (nTrackClusters - 1), nLanes, iLane);
632
633 tracksOffset += nTrackClusters;
634 }
635 }
636 }
637}
638
639template <typename V>
640GPUdii() void GPUTPCCompressionGatherKernels::gatherBuffered(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
641{
642
643 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
644 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = processors.ioPtrs.clustersNative;
645
646 int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
647 int32_t iWarp = iThread / GPUCA_WARP_SIZE;
648
649 int32_t nGlobalWarps = nWarps * nBlocks;
650 int32_t iGlobalWarp = nWarps * iBlock + iWarp;
651
652 int32_t nLanes = GPUCA_WARP_SIZE;
653 int32_t iLane = iThread % GPUCA_WARP_SIZE;
654
655 auto& input = compressor.mPtrs;
656 auto* output = compressor.mOutput;
657
658 uint32_t nRows = compressor.NSECTORS * GPUCA_ROW_COUNT;
659 uint32_t rowsPerWarp = (nRows + nGlobalWarps - 1) / nGlobalWarps;
660 uint32_t rowStart = rowsPerWarp * iGlobalWarp;
661 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
662 if (rowStart >= nRows) {
663 rowStart = 0;
664 rowEnd = 0;
665 }
666 rowsPerWarp = rowEnd - rowStart;
667
668 uint32_t rowsOffset = calculateWarpOffsets(smem, input.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
669
670 uint32_t nStoredTracks = compressor.mMemory->nStoredTracks;
671 uint32_t tracksPerWarp = (nStoredTracks + nGlobalWarps - 1) / nGlobalWarps;
672 uint32_t trackStart = tracksPerWarp * iGlobalWarp;
673 uint32_t trackEnd = CAMath::Min(nStoredTracks, trackStart + tracksPerWarp);
674 if (trackStart >= nStoredTracks) {
675 trackStart = 0;
676 trackEnd = 0;
677 }
678 tracksPerWarp = trackEnd - trackStart;
679
680 uint32_t tracksOffset = calculateWarpOffsets(smem, input.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
681
682 if (iBlock == 0) {
683 compressorMemcpyBasic(output->nSliceRowClusters, input.nSliceRowClusters, compressor.NSECTORS * GPUCA_ROW_COUNT, nThreads, iThread);
684 compressorMemcpyBasic(output->nTrackClusters, input.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
685 compressorMemcpyBasic(output->qPtA, input.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
686 compressorMemcpyBasic(output->rowA, input.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
687 compressorMemcpyBasic(output->sliceA, input.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
688 compressorMemcpyBasic(output->timeA, input.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
689 compressorMemcpyBasic(output->padA, input.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
690 }
691
692 const uint32_t* clusterOffsets = &clusters->clusterOffset[0][0] + rowStart;
693 const uint32_t* nSectorRowClusters = input.nSliceRowClusters + rowStart;
694
695 auto* buf = smem.getBuffer<V>(iWarp);
696
697 compressorMemcpyBuffered(buf, output->qTotU + rowsOffset, input.qTotU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
698 compressorMemcpyBuffered(buf, output->qMaxU + rowsOffset, input.qMaxU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
699 compressorMemcpyBuffered(buf, output->flagsU + rowsOffset, input.flagsU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
700 compressorMemcpyBuffered(buf, output->padDiffU + rowsOffset, input.padDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
701 compressorMemcpyBuffered(buf, output->timeDiffU + rowsOffset, input.timeDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
702 compressorMemcpyBuffered(buf, output->sigmaPadU + rowsOffset, input.sigmaPadU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
703 compressorMemcpyBuffered(buf, output->sigmaTimeU + rowsOffset, input.sigmaTimeU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
704
705 const uint16_t* nTrackClustersPtr = input.nTrackClusters + trackStart;
706 const uint32_t* aClsFstIdx = compressor.mAttachedClusterFirstIndex + trackStart;
707
708 compressorMemcpyBuffered(buf, output->qTotA + tracksOffset, input.qTotA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
709 compressorMemcpyBuffered(buf, output->qMaxA + tracksOffset, input.qMaxA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
710 compressorMemcpyBuffered(buf, output->flagsA + tracksOffset, input.flagsA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
711 compressorMemcpyBuffered(buf, output->sigmaPadA + tracksOffset, input.sigmaPadA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
712 compressorMemcpyBuffered(buf, output->sigmaTimeA + tracksOffset, input.sigmaTimeA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
713
714 // First index stored with track
715 uint32_t tracksOffsetDiff = tracksOffset - trackStart;
716 compressorMemcpyBuffered(buf, output->rowDiffA + tracksOffsetDiff, input.rowDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
717 compressorMemcpyBuffered(buf, output->sliceLegDiffA + tracksOffsetDiff, input.sliceLegDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
718 compressorMemcpyBuffered(buf, output->padResA + tracksOffsetDiff, input.padResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
719 compressorMemcpyBuffered(buf, output->timeResA + tracksOffsetDiff, input.timeResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
720}
721
722GPUdii() void GPUTPCCompressionGatherKernels::gatherMulti(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
723{
724 GPUTPCCompression& GPUrestrict() compressor = processors.tpcCompressor;
725 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = processors.ioPtrs.clustersNative;
726 const auto& input = compressor.mPtrs;
727 auto* output = compressor.mOutput;
728
729 const int32_t nWarps = nThreads / GPUCA_WARP_SIZE;
730 const int32_t iWarp = iThread / GPUCA_WARP_SIZE;
731 const int32_t nLanes = GPUCA_WARP_SIZE;
732 const int32_t iLane = iThread % GPUCA_WARP_SIZE;
733 auto* buf = smem.getBuffer<Vec128>(iWarp);
734
735 if (iBlock == 0) {
736 compressorMemcpyBasic(output->nSliceRowClusters, input.nSliceRowClusters, compressor.NSECTORS * GPUCA_ROW_COUNT, nThreads, iThread);
737 compressorMemcpyBasic(output->nTrackClusters, input.nTrackClusters, compressor.mMemory->nStoredTracks, nThreads, iThread);
738 compressorMemcpyBasic(output->qPtA, input.qPtA, compressor.mMemory->nStoredTracks, nThreads, iThread);
739 compressorMemcpyBasic(output->rowA, input.rowA, compressor.mMemory->nStoredTracks, nThreads, iThread);
740 compressorMemcpyBasic(output->sliceA, input.sliceA, compressor.mMemory->nStoredTracks, nThreads, iThread);
741 compressorMemcpyBasic(output->timeA, input.timeA, compressor.mMemory->nStoredTracks, nThreads, iThread);
742 compressorMemcpyBasic(output->padA, input.padA, compressor.mMemory->nStoredTracks, nThreads, iThread);
743 } else if (iBlock & 1) {
744 const uint32_t nGlobalWarps = nWarps * (nBlocks - 1) / 2;
745 const uint32_t iGlobalWarp = nWarps * (iBlock - 1) / 2 + iWarp;
746
747 const uint32_t nRows = compressor.NSECTORS * GPUCA_ROW_COUNT;
748 uint32_t rowsPerWarp = (nRows + nGlobalWarps - 1) / nGlobalWarps;
749 uint32_t rowStart = rowsPerWarp * iGlobalWarp;
750 uint32_t rowEnd = CAMath::Min(nRows, rowStart + rowsPerWarp);
751 if (rowStart >= nRows) {
752 rowStart = 0;
753 rowEnd = 0;
754 }
755 rowsPerWarp = rowEnd - rowStart;
756
757 const uint32_t rowsOffset = calculateWarpOffsets(smem, input.nSliceRowClusters, rowStart, rowEnd, nWarps, iWarp, nLanes, iLane);
758 const uint32_t* clusterOffsets = &clusters->clusterOffset[0][0] + rowStart;
759 const uint32_t* nSectorRowClusters = input.nSliceRowClusters + rowStart;
760
761 compressorMemcpyBuffered(buf, output->qTotU + rowsOffset, input.qTotU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
762 compressorMemcpyBuffered(buf, output->qMaxU + rowsOffset, input.qMaxU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
763 compressorMemcpyBuffered(buf, output->flagsU + rowsOffset, input.flagsU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
764 compressorMemcpyBuffered(buf, output->padDiffU + rowsOffset, input.padDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
765 compressorMemcpyBuffered(buf, output->timeDiffU + rowsOffset, input.timeDiffU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
766 compressorMemcpyBuffered(buf, output->sigmaPadU + rowsOffset, input.sigmaPadU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
767 compressorMemcpyBuffered(buf, output->sigmaTimeU + rowsOffset, input.sigmaTimeU, nSectorRowClusters, clusterOffsets, rowsPerWarp, nLanes, iLane, 0, compressor.mMaxClusterFactorBase1024);
768 } else {
769 const uint32_t nGlobalWarps = nWarps * (nBlocks - 1) / 2;
770 const uint32_t iGlobalWarp = nWarps * (iBlock / 2 - 1) + iWarp;
771
772 const uint32_t nStoredTracks = compressor.mMemory->nStoredTracks;
773 uint32_t tracksPerWarp = (nStoredTracks + nGlobalWarps - 1) / nGlobalWarps;
774 uint32_t trackStart = tracksPerWarp * iGlobalWarp;
775 uint32_t trackEnd = CAMath::Min(nStoredTracks, trackStart + tracksPerWarp);
776 if (trackStart >= nStoredTracks) {
777 trackStart = 0;
778 trackEnd = 0;
779 }
780 tracksPerWarp = trackEnd - trackStart;
781
782 const uint32_t tracksOffset = calculateWarpOffsets(smem, input.nTrackClusters, trackStart, trackEnd, nWarps, iWarp, nLanes, iLane);
783 const uint16_t* nTrackClustersPtr = input.nTrackClusters + trackStart;
784 const uint32_t* aClsFstIdx = compressor.mAttachedClusterFirstIndex + trackStart;
785
786 compressorMemcpyBuffered(buf, output->qTotA + tracksOffset, input.qTotA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
787 compressorMemcpyBuffered(buf, output->qMaxA + tracksOffset, input.qMaxA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
788 compressorMemcpyBuffered(buf, output->flagsA + tracksOffset, input.flagsA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
789 compressorMemcpyBuffered(buf, output->sigmaPadA + tracksOffset, input.sigmaPadA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
790 compressorMemcpyBuffered(buf, output->sigmaTimeA + tracksOffset, input.sigmaTimeA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 0);
791
792 // First index stored with track
793 uint32_t tracksOffsetDiff = tracksOffset - trackStart;
794 compressorMemcpyBuffered(buf, output->rowDiffA + tracksOffsetDiff, input.rowDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
795 compressorMemcpyBuffered(buf, output->sliceLegDiffA + tracksOffsetDiff, input.sliceLegDiffA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
796 compressorMemcpyBuffered(buf, output->padResA + tracksOffsetDiff, input.padResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
797 compressorMemcpyBuffered(buf, output->timeResA + tracksOffsetDiff, input.timeResA, nTrackClustersPtr, aClsFstIdx, tracksPerWarp, nLanes, iLane, 1);
798 }
799}
800
801template <>
802GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::buffered32>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
803{
804 gatherBuffered<Vec32>(nBlocks, nThreads, iBlock, iThread, smem, processors);
805}
806
807template <>
808GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::buffered64>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
809{
810 gatherBuffered<Vec64>(nBlocks, nThreads, iBlock, iThread, smem, processors);
811}
812
813template <>
814GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::buffered128>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
815{
816 gatherBuffered<Vec128>(nBlocks, nThreads, iBlock, iThread, smem, processors);
817}
818
819template <>
820GPUdii() void GPUTPCCompressionGatherKernels::Thread<GPUTPCCompressionGatherKernels::multiBlock>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors)
821{
822 gatherMulti(nBlocks, nThreads, iBlock, iThread, smem, processors);
823}
int16_t time
Definition RawEventData.h:4
int32_t i
#define get_local_size(dim)
#define get_local_id(dim)
#define GPUsharedref()
#define GPUbarrierWarp()
#define get_global_size(dim)
#define GPUbarrier()
#define GPUrestrict()
#define get_global_id(dim)
#define GPUCA_TPC_COMP_CHUNK_SIZE
#define GPUCA_GET_THREAD_COUNT(...)
GPUdii() void GPUTPCCompressionKernels
#define GPUCA_NSECTORS
#define GPUCA_ROW_COUNT
void output(const std::map< std::string, ChannelStat > &channels)
Definition rawdump.cxx:197
uint32_t j
Definition RawData.h:0
uint32_t c
Definition RawData.h:2
TBranch * ptr
int nClusters
o2::tpc::CompressedClusters * mOutput
static constexpr uint32_t NSECTORS
o2::tpc::CompressedClustersPtrs mPtrs
GLint GLenum GLint x
Definition glcorearb.h:403
GLenum src
Definition glcorearb.h:1767
GLint GLsizei count
Definition glcorearb.h:399
GLsizeiptr size
Definition glcorearb.h:659
GLuint GLuint end
Definition glcorearb.h:469
GLuint GLsizei bufSize
Definition glcorearb.h:790
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
GLenum GLenum dst
Definition glcorearb.h:1767
GLintptr offset
Definition glcorearb.h:660
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLuint start
Definition glcorearb.h:469
GLenum GLfloat param
Definition glcorearb.h:271
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
GLenum GLuint GLenum GLsizei const GLchar * buf
Definition glcorearb.h:2514
GLuint id
Definition glcorearb.h:650
GLdouble GLdouble GLdouble z
Definition glcorearb.h:843
Global TPC definitions and constants.
Definition SimTraits.h:167
GPUd() void PIDResponse
Definition PIDResponse.h:71
GPUdi() T BetheBlochAleph(T bg
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
GPUReconstruction * rec
static constexpr bool GetIsRejected(int32_t attach)
const o2::tpc::ClusterNativeAccess * clustersNative
const uint32_t * mergedTrackHitAttachment
const GPUTPCGMMergedTrackHit * mergedTrackHits
const GPUTPCGMMergedTrack * mergedTracks
std::vector< std::byte > getBuffer(const char *filename)
std::vector< Cluster > clusters
for(int irof=0;irof< 1000;irof++)
std::vector< int > row