31 RecoStep myStep = RecoStep::TPCCompression;
48 runKernel<GPUTPCCompressionKernels, GPUTPCCompressionKernels::step0attached>(
GetGridAuto(0));
49 runKernel<GPUTPCCompressionKernels, GPUTPCCompressionKernels::step1unattached>(
GetGridAuto(0));
51#ifdef GPUCA_TPC_GEOMETRY_O2
60 memset((
void*)O, 0,
sizeof(*O));
79 int32_t outputStream = 0;
89 for (uint32_t
i = 0;
i <
sizeof(ptrs) /
sizeof(
void*);
i++) {
90 reinterpret_cast<char**
>(&ptrs)[
i] =
reinterpret_cast<char**
>(&ptrs)[
i] + (
reinterpret_cast<char*
>(devicePtr) -
reinterpret_cast<char*
>(Compressor.
mOutputFlat));
95 constexpr uint32_t nBlocksDefault = 2;
96 constexpr uint32_t nBlocksMulti = 1 + 2 * 200;
99 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::unbuffered>(
GetGridBlkStep(nBlocksDefault, outputStream, RecoStep::TPCCompression));
100 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::unbuffered>(RecoStep::TPCCompression, 0, outputSize,
false);
103 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered32>(
GetGridBlkStep(nBlocksDefault, outputStream, RecoStep::TPCCompression));
104 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered32>(RecoStep::TPCCompression, 0, outputSize,
false);
107 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered64>(
GetGridBlkStep(nBlocksDefault, outputStream, RecoStep::TPCCompression));
108 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered64>(RecoStep::TPCCompression, 0, outputSize,
false);
111 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered128>(
GetGridBlkStep(nBlocksDefault, outputStream, RecoStep::TPCCompression));
112 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered128>(RecoStep::TPCCompression, 0, outputSize,
false);
115 static_assert((nBlocksMulti & 1) && nBlocksMulti >= 3);
116 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::multiBlock>(
GetGridBlkStep(nBlocksMulti, outputStream, RecoStep::TPCCompression));
117 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::multiBlock>(RecoStep::TPCCompression, 0, outputSize,
false);
120 GPUError(
"Invalid compression kernel %d selected.", (int32_t)
GetProcessingSettings().tpcCompressionGatherModeKernel);
127 const size_t blockSize = CAMath::nextMultipleOf<1024>(copySize / 30);
128 const uint32_t
n = (copySize + blockSize - 1) / blockSize;
129 for (uint32_t
i = 0;
i <
n;
i++) {
130 GPUMemCpy(myStep, hostFlatPtr +
i * blockSize, deviceFlatPts +
i * blockSize, CAMath::Min(blockSize, copySize -
i * blockSize), outputStream,
false);
133 GPUMemCpy(myStep, hostFlatPtr, deviceFlatPts, copySize, outputStream,
false);
137 int8_t direction = 0;
139 P = &CompressorShadow.
mPtrs;
143 gatherTimer = &getTimer<GPUTPCCompressionKernels>(
"GPUTPCCompression_GatherOnCPU", 0);
144 gatherTimer->
Start();
193 if (mPipelineFinalizationCtx ==
nullptr) {
207 GPUFatal(
"tpcApplyCFCutsAtDecoding, tpcApplyClusterFilterOnCPU and tpcCutTimeBin currently require tpcUseOldCPUDecoding");
211 const bool runFiltering = needFullFiltering || runTimeBinCutFiltering;
214 auto allocatorFinal = [
this](
size_t size) {
219 std::unique_ptr<ClusterNative[]> tmpBuffer;
220 auto allocatorTmp = [&tmpBuffer](
size_t size) {
223 auto& decompressTimer = getTimer<TPCClusterDecompressor>(
"TPCDecompression", 0);
224 auto allocatorUse = runFiltering ? std::function<
ClusterNative*(size_t)>{allocatorTmp} : std::function<
ClusterNative*(size_t)>{allocatorFinal};
225 decompressTimer.Start();
227 GPUError(
"Error decompressing clusters");
233 decompressTimer.Stop();
241 mInputsHost->mPclusterNativeAccess->setOffsetPtrs();
248 RecoStep myStep = RecoStep::TPCDecompression;
258 throw std::runtime_error(
"Configured solenoid Bz does not match value used for track model encoding");
261 throw std::runtime_error(
"Configured max time bin does not match value used for track model encoding");
264 int32_t inputStream = 0;
266 inputGPU = cmprClsHost;
269 inputGPU = cmprClsHost;
276 for (int32_t iStream = 0; iStream < nStreams; iStream++) {
277 uint32_t startTrack = cmprClsHost.
nTracks / nStreams * iStream;
278 uint32_t endTrack = cmprClsHost.
nTracks / nStreams * (iStream + 1) + (iStream < nStreams - 1 ? 0 : cmprClsHost.
nTracks % nStreams);
279 uint32_t numTracks = endTrack - startTrack;
282 uint32_t numClustersRed = numClusters - numTracks;
294 GPUMemCpy(myStep, inputGPUShadow.
qPtA + startTrack, cmprClsHost.
qPtA + startTrack, numTracks *
sizeof(cmprClsHost.
qPtA[0]), iStream, toGPU);
295 GPUMemCpy(myStep, inputGPUShadow.
rowA + startTrack, cmprClsHost.
rowA + startTrack, numTracks *
sizeof(cmprClsHost.
rowA[0]), iStream, toGPU);
296 GPUMemCpy(myStep, inputGPUShadow.
sliceA + startTrack, cmprClsHost.
sliceA + startTrack, numTracks *
sizeof(cmprClsHost.
sliceA[0]), iStream, toGPU);
297 GPUMemCpy(myStep, inputGPUShadow.
timeA + startTrack, cmprClsHost.
timeA + startTrack, numTracks *
sizeof(cmprClsHost.
timeA[0]), iStream, toGPU);
298 GPUMemCpy(myStep, inputGPUShadow.
padA + startTrack, cmprClsHost.
padA + startTrack, numTracks *
sizeof(cmprClsHost.
padA[0]), iStream, toGPU);
314 uint32_t decodedAttachedClusters = 0;
321 offset += unattachedOffset;
329 if (runTimeBinCutFiltering) {
361 uint32_t batchSize = doGPU ? 6 :
NSECTORS;
362 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector = iSector + batchSize) {
363 int32_t iStream = (iSector / batchSize) %
mRec->
NStreams();
366 if (!runTimeBinCutFiltering) {
372 if (runTimeBinCutFiltering) {
376 runKernel<GPUTPCDecompressionUtilKernels, GPUTPCDecompressionUtilKernels::countFilteredClusters>(
GetGridAutoStep(unattachedStream, RecoStep::TPCDecompression));
402 runKernel<GPUTPCDecompressionUtilKernels, GPUTPCDecompressionUtilKernels::storeFilteredClusters>(
GetGridAutoStep(unattachedStream, RecoStep::TPCDecompression));
407 runKernel<GPUTPCDecompressionUtilKernels, GPUTPCDecompressionUtilKernels::sortPerSectorRow>(
GetGridAutoStep(unattachedStream, RecoStep::TPCDecompression));