Project
Loading...
Searching...
No Matches
GPUChainTrackingCompression.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
15#include "GPUChainTracking.h"
17#include "GPULogging.h"
18#include "GPUO2DataTypes.h"
22#include "GPUDefParametersRuntime.h"
23#include "GPUConstantMem.h" // TODO: Try to get rid of as many GPUConstantMem includes as possible!
28#include "utils/strtag.h"
29
30#include <numeric>
31
32using namespace o2::gpu;
33using namespace o2::tpc;
34
36{
38 RecoStep myStep = RecoStep::TPCCompression;
39 bool doGPU = GetRecoStepsGPU() & RecoStep::TPCCompression;
40 int32_t gatherMode = mRec->GetProcessingSettings().tpcCompressionGatherMode == -1 ? mRec->getGPUParameters(doGPU).par_COMP_GATHER_MODE : mRec->GetProcessingSettings().tpcCompressionGatherMode;
42 GPUTPCCompression& CompressorShadow = doGPU ? processorsShadow()->tpcCompressor : Compressor;
43 const auto& threadContext = GetThreadContext();
44 if (mPipelineFinalizationCtx && GetProcessingSettings().doublePipelineClusterizer) {
46 }
47
48 if (gatherMode == 3) {
50 }
51 SetupGPUProcessor(&Compressor, true);
52 new (Compressor.mMemory) GPUTPCCompression::memory;
53 WriteToConstantMemory(myStep, (char*)&processors()->tpcCompressor - (char*)processors(), &CompressorShadow, sizeof(CompressorShadow), 0);
54 TransferMemoryResourcesToGPU(myStep, &Compressor, 0);
55 runKernel<GPUMemClean16>(GetGridAutoStep(0, RecoStep::TPCCompression), CompressorShadow.mClusterStatus, Compressor.mMaxClusters * sizeof(CompressorShadow.mClusterStatus[0]));
56 runKernel<GPUTPCCompressionKernels, GPUTPCCompressionKernels::step0attached>(GetGridAuto(0));
57 if (GetProcessingSettings().tpcWriteClustersAfterRejection) {
58 WriteReducedClusters();
59 }
60 runKernel<GPUTPCCompressionKernels, GPUTPCCompressionKernels::step1unattached>(GetGridAuto(0));
61 TransferMemoryResourcesToHost(myStep, &Compressor, 0);
62#ifdef GPUCA_TPC_GEOMETRY_O2
63 if (mPipelineFinalizationCtx && GetProcessingSettings().doublePipelineClusterizer) {
65 auto* foreignChain = (GPUChainTracking*)GetNextChainInQueue();
66 foreignChain->RunTPCClusterizer_prepare(false);
67 foreignChain->mCFContext->ptrClusterNativeSave = processorsShadow()->ioPtrs.clustersNative;
68 }
69#endif
72 memset((void*)O, 0, sizeof(*O));
73 O->nTracks = Compressor.mMemory->nStoredTracks;
78 O->nComppressionModes = param().rec.tpc.compressionTypeMask;
79 O->solenoidBz = param().bzkG;
82 Compressor.mOutputFlat->set(outputSize, *Compressor.mOutput);
83 char* hostFlatPtr = (char*)Compressor.mOutput->qTotU; // First array as allocated in GPUTPCCompression::SetPointersCompressedClusters
84 size_t copySize = 0;
85 if (gatherMode == 3) {
86 CompressorShadow.mOutputA = Compressor.mOutput;
87 copySize = AllocateRegisteredMemory(Compressor.mMemoryResOutputGPU); // We overwrite Compressor.mOutput with the allocated output pointers on the GPU
88 }
89 const o2::tpc::CompressedClustersPtrs* P = nullptr;
90 HighResTimer* gatherTimer = nullptr;
91 int32_t outputStream = 0;
92 if (GetProcessingSettings().doublePipeline) {
93 SynchronizeStream(OutputStream()); // Synchronize output copies running in parallel from memory that might be released, only the following async copy from stacked memory is safe after the chain finishes.
94 outputStream = OutputStream();
95 }
96 if (gatherMode >= 2) {
97 if (gatherMode == 2) {
98 void* devicePtr = mRec->getGPUPointer(Compressor.mOutputFlat);
99 if (devicePtr != Compressor.mOutputFlat) {
100 CompressedClustersPtrs& ptrs = *Compressor.mOutput; // We need to update the ptrs with the gpu-mapped version of the host address space
101 for (uint32_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
102 reinterpret_cast<char**>(&ptrs)[i] = reinterpret_cast<char**>(&ptrs)[i] + (reinterpret_cast<char*>(devicePtr) - reinterpret_cast<char*>(Compressor.mOutputFlat));
103 }
104 }
105 }
106 TransferMemoryResourcesToGPU(myStep, &Compressor, outputStream);
107 constexpr uint32_t nBlocksDefault = 2;
108 constexpr uint32_t nBlocksMulti = 1 + 2 * 200;
109 int32_t gatherModeKernel = mRec->GetProcessingSettings().tpcCompressionGatherModeKernel == -1 ? mRec->getGPUParameters(doGPU).par_COMP_GATHER_KERNEL : mRec->GetProcessingSettings().tpcCompressionGatherMode;
110 switch (gatherModeKernel) {
111 case 0:
112 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::unbuffered>(GetGridBlkStep(nBlocksDefault, outputStream, RecoStep::TPCCompression));
113 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::unbuffered>(RecoStep::TPCCompression, 0, outputSize, false);
114 break;
115 case 1:
116 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered32>(GetGridBlkStep(nBlocksDefault, outputStream, RecoStep::TPCCompression));
117 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered32>(RecoStep::TPCCompression, 0, outputSize, false);
118 break;
119 case 2:
120 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered64>(GetGridBlkStep(nBlocksDefault, outputStream, RecoStep::TPCCompression));
121 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered64>(RecoStep::TPCCompression, 0, outputSize, false);
122 break;
123 case 3:
124 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered128>(GetGridBlkStep(nBlocksDefault, outputStream, RecoStep::TPCCompression));
125 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::buffered128>(RecoStep::TPCCompression, 0, outputSize, false);
126 break;
127 case 4:
128 static_assert((nBlocksMulti & 1) && nBlocksMulti >= 3);
129 runKernel<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::multiBlock>(GetGridBlkStep(nBlocksMulti, outputStream, RecoStep::TPCCompression));
130 getKernelTimer<GPUTPCCompressionGatherKernels, GPUTPCCompressionGatherKernels::multiBlock>(RecoStep::TPCCompression, 0, outputSize, false);
131 break;
132 default:
133 GPUError("Invalid compression kernel %d selected.", (int32_t)gatherModeKernel);
134 return 1;
135 }
136 if (gatherMode == 3) {
137 RecordMarker(&mEvents->stream[outputStream], outputStream);
138 char* deviceFlatPts = (char*)Compressor.mOutput->qTotU;
139 if (GetProcessingSettings().doublePipeline) {
140 const size_t blockSize = CAMath::nextMultipleOf<1024>(copySize / 30);
141 const uint32_t n = (copySize + blockSize - 1) / blockSize;
142 for (uint32_t i = 0; i < n; i++) {
143 GPUMemCpy(myStep, hostFlatPtr + i * blockSize, deviceFlatPts + i * blockSize, CAMath::Min(blockSize, copySize - i * blockSize), outputStream, false);
144 }
145 } else {
146 GPUMemCpy(myStep, hostFlatPtr, deviceFlatPts, copySize, outputStream, false);
147 }
148 }
149 } else {
150 int8_t direction = 0;
151 if (gatherMode == 0) {
152 P = &CompressorShadow.mPtrs;
153 } else if (gatherMode == 1) {
154 P = &Compressor.mPtrs;
155 direction = -1;
156 gatherTimer = &getTimer<GPUTPCCompressionKernels>("GPUTPCCompression_GatherOnCPU", 0);
157 gatherTimer->Start();
158 }
159 GPUMemCpyAlways(myStep, O->nSliceRowClusters, P->nSliceRowClusters, NSECTORS * GPUCA_ROW_COUNT * sizeof(O->nSliceRowClusters[0]), outputStream, direction);
160 GPUMemCpyAlways(myStep, O->nTrackClusters, P->nTrackClusters, O->nTracks * sizeof(O->nTrackClusters[0]), outputStream, direction);
161 SynchronizeStream(outputStream);
162 uint32_t offset = 0;
163 for (uint32_t i = 0; i < NSECTORS; i++) {
164 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
165 uint32_t srcOffset = mIOPtrs.clustersNative->clusterOffset[i][j] * Compressor.mMaxClusterFactorBase1024 / 1024;
166 GPUMemCpyAlways(myStep, O->qTotU + offset, P->qTotU + srcOffset, O->nSliceRowClusters[i * GPUCA_ROW_COUNT + j] * sizeof(O->qTotU[0]), outputStream, direction);
167 GPUMemCpyAlways(myStep, O->qMaxU + offset, P->qMaxU + srcOffset, O->nSliceRowClusters[i * GPUCA_ROW_COUNT + j] * sizeof(O->qMaxU[0]), outputStream, direction);
168 GPUMemCpyAlways(myStep, O->flagsU + offset, P->flagsU + srcOffset, O->nSliceRowClusters[i * GPUCA_ROW_COUNT + j] * sizeof(O->flagsU[0]), outputStream, direction);
169 GPUMemCpyAlways(myStep, O->padDiffU + offset, P->padDiffU + srcOffset, O->nSliceRowClusters[i * GPUCA_ROW_COUNT + j] * sizeof(O->padDiffU[0]), outputStream, direction);
170 GPUMemCpyAlways(myStep, O->timeDiffU + offset, P->timeDiffU + srcOffset, O->nSliceRowClusters[i * GPUCA_ROW_COUNT + j] * sizeof(O->timeDiffU[0]), outputStream, direction);
171 GPUMemCpyAlways(myStep, O->sigmaPadU + offset, P->sigmaPadU + srcOffset, O->nSliceRowClusters[i * GPUCA_ROW_COUNT + j] * sizeof(O->sigmaPadU[0]), outputStream, direction);
172 GPUMemCpyAlways(myStep, O->sigmaTimeU + offset, P->sigmaTimeU + srcOffset, O->nSliceRowClusters[i * GPUCA_ROW_COUNT + j] * sizeof(O->sigmaTimeU[0]), outputStream, direction);
174 }
175 }
176 offset = 0;
177 for (uint32_t i = 0; i < O->nTracks; i++) {
178 GPUMemCpyAlways(myStep, O->qTotA + offset, P->qTotA + Compressor.mAttachedClusterFirstIndex[i], O->nTrackClusters[i] * sizeof(O->qTotA[0]), outputStream, direction);
179 GPUMemCpyAlways(myStep, O->qMaxA + offset, P->qMaxA + Compressor.mAttachedClusterFirstIndex[i], O->nTrackClusters[i] * sizeof(O->qMaxA[0]), outputStream, direction);
180 GPUMemCpyAlways(myStep, O->flagsA + offset, P->flagsA + Compressor.mAttachedClusterFirstIndex[i], O->nTrackClusters[i] * sizeof(O->flagsA[0]), outputStream, direction);
181 GPUMemCpyAlways(myStep, O->sigmaPadA + offset, P->sigmaPadA + Compressor.mAttachedClusterFirstIndex[i], O->nTrackClusters[i] * sizeof(O->sigmaPadA[0]), outputStream, direction);
182 GPUMemCpyAlways(myStep, O->sigmaTimeA + offset, P->sigmaTimeA + Compressor.mAttachedClusterFirstIndex[i], O->nTrackClusters[i] * sizeof(O->sigmaTimeA[0]), outputStream, direction);
183
184 // First index stored with track
185 GPUMemCpyAlways(myStep, O->rowDiffA + offset - i, P->rowDiffA + Compressor.mAttachedClusterFirstIndex[i] + 1, (O->nTrackClusters[i] - 1) * sizeof(O->rowDiffA[0]), outputStream, direction);
186 GPUMemCpyAlways(myStep, O->sliceLegDiffA + offset - i, P->sliceLegDiffA + Compressor.mAttachedClusterFirstIndex[i] + 1, (O->nTrackClusters[i] - 1) * sizeof(O->sliceLegDiffA[0]), outputStream, direction);
187 GPUMemCpyAlways(myStep, O->padResA + offset - i, P->padResA + Compressor.mAttachedClusterFirstIndex[i] + 1, (O->nTrackClusters[i] - 1) * sizeof(O->padResA[0]), outputStream, direction);
188 GPUMemCpyAlways(myStep, O->timeResA + offset - i, P->timeResA + Compressor.mAttachedClusterFirstIndex[i] + 1, (O->nTrackClusters[i] - 1) * sizeof(O->timeResA[0]), outputStream, direction);
189 offset += O->nTrackClusters[i];
190 }
191 GPUMemCpyAlways(myStep, O->qPtA, P->qPtA, O->nTracks * sizeof(O->qPtA[0]), outputStream, direction);
192 GPUMemCpyAlways(myStep, O->rowA, P->rowA, O->nTracks * sizeof(O->rowA[0]), outputStream, direction);
193 GPUMemCpyAlways(myStep, O->sliceA, P->sliceA, O->nTracks * sizeof(O->sliceA[0]), outputStream, direction);
194 GPUMemCpyAlways(myStep, O->timeA, P->timeA, O->nTracks * sizeof(O->timeA[0]), outputStream, direction);
195 GPUMemCpyAlways(myStep, O->padA, P->padA, O->nTracks * sizeof(O->padA[0]), outputStream, direction);
196 }
197 if (gatherMode == 1) {
198 gatherTimer->Stop();
199 }
201 if (gatherMode == 3) {
204 }
205
206 if (mPipelineFinalizationCtx == nullptr) {
207 SynchronizeStream(outputStream);
208 } else {
210 }
211 mRec->PopNonPersistentMemory(RecoStep::TPCCompression, qStr2Tag("TPCCOMPR"));
212 if (GetProcessingSettings().deterministicGPUReconstruction) {
215 }
217 return 0;
218}
219
221{
222 const bool needFullFiltering = GetProcessingSettings().tpcApplyCFCutsAtDecoding || (GetProcessingSettings().tpcApplyClusterFilterOnCPU > 0);
223 const bool runTimeBinCutFiltering = param().tpcCutTimeBin > 0;
224 if (needFullFiltering && !GetProcessingSettings().tpcUseOldCPUDecoding) {
225 GPUFatal("tpcApplyCFCutsAtDecoding, tpcApplyClusterFilterOnCPU and tpcCutTimeBin currently require tpcUseOldCPUDecoding");
226 }
227
229 const bool useTemporaryBz = cmprClsHost.nTracks && cmprClsHost.solenoidBz != -1e6f && cmprClsHost.solenoidBz != param().bzkG && !GetProcessingSettings().doublePipeline;
230 std::unique_ptr<GPUParam> tmpParam;
231 int32_t inputStream = 0;
232
233 if (useTemporaryBz) {
234 tmpParam = std::make_unique<GPUParam>(param());
237 WriteConstantParams(inputStream);
238 }
239 if (GetProcessingSettings().tpcUseOldCPUDecoding) {
240 const bool runFiltering = needFullFiltering || runTimeBinCutFiltering;
241 const auto& threadContext = GetThreadContext();
243 auto allocatorFinal = [this](size_t size) {
244 this->mInputsHost->mNClusterNative = this->mInputsShadow->mNClusterNative = size;
245 this->AllocateRegisteredMemory(this->mInputsHost->mResourceClusterNativeOutput, this->mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clustersNative)]);
246 return this->mInputsHost->mPclusterNativeOutput;
247 };
248 std::unique_ptr<ClusterNative[]> tmpBuffer;
249 auto allocatorTmp = [&tmpBuffer](size_t size) {
250 return ((tmpBuffer = std::make_unique<ClusterNative[]>(size))).get();
251 };
252 auto& decompressTimer = getTimer<TPCClusterDecompressor>("TPCDecompression", 0);
253 auto allocatorUse = runFiltering ? std::function<ClusterNative*(size_t)>{allocatorTmp} : std::function<ClusterNative*(size_t)>{allocatorFinal};
254 decompressTimer.Start();
255 if (decomp.decompress(mIOPtrs.tpcCompressedClusters, *mClusterNativeAccess, allocatorUse, param(), GetProcessingSettings().deterministicGPUReconstruction)) {
256 GPUError("Error decompressing clusters");
257 return 1;
258 }
259 if (runFiltering) {
260 RunTPCClusterFilter(mClusterNativeAccess.get(), allocatorFinal, GetProcessingSettings().tpcApplyCFCutsAtDecoding);
261 }
262 decompressTimer.Stop();
264 if (mRec->IsGPU()) {
265 AllocateRegisteredMemory(mInputsHost->mResourceClusterNativeBuffer);
266 processorsShadow()->ioPtrs.clustersNative = mInputsShadow->mPclusterNativeAccess;
267 WriteToConstantMemory(RecoStep::TPCDecompression, (char*)&processors()->ioPtrs - (char*)processors(), &processorsShadow()->ioPtrs, sizeof(processorsShadow()->ioPtrs), 0);
268 *mInputsHost->mPclusterNativeAccess = *mIOPtrs.clustersNative;
269 mInputsHost->mPclusterNativeAccess->clustersLinear = mInputsShadow->mPclusterNativeBuffer;
270 mInputsHost->mPclusterNativeAccess->setOffsetPtrs();
271 GPUMemCpy(RecoStep::TPCDecompression, mInputsShadow->mPclusterNativeBuffer, mIOPtrs.clustersNative->clustersLinear, sizeof(mIOPtrs.clustersNative->clustersLinear[0]) * mIOPtrs.clustersNative->nClustersTotal, 0, true);
272 TransferMemoryResourceLinkToGPU(RecoStep::TPCDecompression, mInputsHost->mResourceClusterNativeAccess, 0);
274 }
275 } else {
277 RecoStep myStep = RecoStep::TPCDecompression;
278 bool doGPU = GetRecoStepsGPU() & RecoStep::TPCDecompression;
280 GPUTPCDecompression& DecompressorShadow = doGPU ? processorsShadow()->tpcDecompressor : Decompressor;
281 const auto& threadContext = GetThreadContext();
282 CompressedClusters& inputGPU = Decompressor.mInputGPU;
283 CompressedClusters& inputGPUShadow = DecompressorShadow.mInputGPU;
284
285 if (cmprClsHost.nTracks && cmprClsHost.solenoidBz != -1e6f && cmprClsHost.solenoidBz != param().bzkG) {
286 throw std::runtime_error("Configured solenoid Bz " + std::to_string(param().bzkG) + " does not match value used for track model encoding " + std::to_string(cmprClsHost.solenoidBz));
287 }
288 if (cmprClsHost.nTracks && cmprClsHost.maxTimeBin != -1e6 && cmprClsHost.maxTimeBin != param().continuousMaxTimeBin) {
289 throw std::runtime_error("Configured max time bin " + std::to_string(param().continuousMaxTimeBin) + " does not match value used for track model encoding " + std::to_string(cmprClsHost.maxTimeBin));
290 }
291
292 int32_t unattachedStream = mRec->NStreams() - 1;
293 inputGPU = cmprClsHost;
294 SetupGPUProcessor(&Decompressor, true);
295 WriteToConstantMemory(myStep, (char*)&processors()->tpcDecompressor - (char*)processors(), &DecompressorShadow, sizeof(DecompressorShadow), inputStream);
296 inputGPU = cmprClsHost;
297
298 bool toGPU = true;
299 runKernel<GPUMemClean16>({GetGridAutoStep(inputStream, RecoStep::TPCDecompression), krnlRunRangeNone, &mEvents->init}, DecompressorShadow.mNativeClustersIndex, NSECTORS * GPUCA_ROW_COUNT * sizeof(DecompressorShadow.mNativeClustersIndex[0]));
300 int32_t nStreams = doGPU ? mRec->NStreams() - 1 : 1;
301 if (cmprClsHost.nAttachedClusters != 0) {
302 std::exclusive_scan(cmprClsHost.nTrackClusters, cmprClsHost.nTrackClusters + cmprClsHost.nTracks, Decompressor.mAttachedClustersOffsets, 0u); // computing clusters offsets for first kernel
303 for (int32_t iStream = 0; iStream < nStreams; iStream++) {
304 uint32_t startTrack = cmprClsHost.nTracks / nStreams * iStream;
305 uint32_t endTrack = cmprClsHost.nTracks / nStreams * (iStream + 1) + (iStream < nStreams - 1 ? 0 : cmprClsHost.nTracks % nStreams); // index of last track (excluded from computation)
306 uint32_t numTracks = endTrack - startTrack;
307 uint32_t* offsets = Decompressor.mAttachedClustersOffsets;
308 uint32_t numClusters = (endTrack == cmprClsHost.nTracks ? offsets[endTrack - 1] + cmprClsHost.nTrackClusters[endTrack - 1] : offsets[endTrack]) - offsets[startTrack];
309 uint32_t numClustersRed = numClusters - numTracks;
310 GPUMemCpy(myStep, DecompressorShadow.mAttachedClustersOffsets + startTrack, Decompressor.mAttachedClustersOffsets + startTrack, numTracks * sizeof(Decompressor.mAttachedClustersOffsets[0]), iStream, toGPU);
311 GPUMemCpy(myStep, inputGPUShadow.nTrackClusters + startTrack, cmprClsHost.nTrackClusters + startTrack, numTracks * sizeof(cmprClsHost.nTrackClusters[0]), iStream, toGPU);
312 GPUMemCpy(myStep, inputGPUShadow.qTotA + offsets[startTrack], cmprClsHost.qTotA + offsets[startTrack], numClusters * sizeof(cmprClsHost.qTotA[0]), iStream, toGPU);
313 GPUMemCpy(myStep, inputGPUShadow.qMaxA + offsets[startTrack], cmprClsHost.qMaxA + offsets[startTrack], numClusters * sizeof(cmprClsHost.qMaxA[0]), iStream, toGPU);
314 GPUMemCpy(myStep, inputGPUShadow.flagsA + offsets[startTrack], cmprClsHost.flagsA + offsets[startTrack], numClusters * sizeof(cmprClsHost.flagsA[0]), iStream, toGPU);
315 GPUMemCpy(myStep, inputGPUShadow.rowDiffA + offsets[startTrack] - startTrack, cmprClsHost.rowDiffA + offsets[startTrack] - startTrack, numClustersRed * sizeof(cmprClsHost.rowDiffA[0]), iStream, toGPU);
316 GPUMemCpy(myStep, inputGPUShadow.sliceLegDiffA + offsets[startTrack] - startTrack, cmprClsHost.sliceLegDiffA + offsets[startTrack] - startTrack, numClustersRed * sizeof(cmprClsHost.sliceLegDiffA[0]), iStream, toGPU);
317 GPUMemCpy(myStep, inputGPUShadow.padResA + offsets[startTrack] - startTrack, cmprClsHost.padResA + offsets[startTrack] - startTrack, numClustersRed * sizeof(cmprClsHost.padResA[0]), iStream, toGPU);
318 GPUMemCpy(myStep, inputGPUShadow.timeResA + offsets[startTrack] - startTrack, cmprClsHost.timeResA + offsets[startTrack] - startTrack, numClustersRed * sizeof(cmprClsHost.timeResA[0]), iStream, toGPU);
319 GPUMemCpy(myStep, inputGPUShadow.sigmaPadA + offsets[startTrack], cmprClsHost.sigmaPadA + offsets[startTrack], numClusters * sizeof(cmprClsHost.sigmaPadA[0]), iStream, toGPU);
320 GPUMemCpy(myStep, inputGPUShadow.sigmaTimeA + offsets[startTrack], cmprClsHost.sigmaTimeA + offsets[startTrack], numClusters * sizeof(cmprClsHost.sigmaTimeA[0]), iStream, toGPU);
321 GPUMemCpy(myStep, inputGPUShadow.qPtA + startTrack, cmprClsHost.qPtA + startTrack, numTracks * sizeof(cmprClsHost.qPtA[0]), iStream, toGPU);
322 GPUMemCpy(myStep, inputGPUShadow.rowA + startTrack, cmprClsHost.rowA + startTrack, numTracks * sizeof(cmprClsHost.rowA[0]), iStream, toGPU);
323 GPUMemCpy(myStep, inputGPUShadow.sliceA + startTrack, cmprClsHost.sliceA + startTrack, numTracks * sizeof(cmprClsHost.sliceA[0]), iStream, toGPU);
324 GPUMemCpy(myStep, inputGPUShadow.timeA + startTrack, cmprClsHost.timeA + startTrack, numTracks * sizeof(cmprClsHost.timeA[0]), iStream, toGPU);
325 GPUMemCpy(myStep, inputGPUShadow.padA + startTrack, cmprClsHost.padA + startTrack, numTracks * sizeof(cmprClsHost.padA[0]), iStream, toGPU);
326 runKernel<GPUTPCDecompressionKernels, GPUTPCDecompressionKernels::step0attached>({GetGridAuto(iStream), krnlRunRangeNone, {&mEvents->stream[iStream], &mEvents->init}}, startTrack, endTrack);
327 }
328 }
329 GPUMemCpy(myStep, inputGPUShadow.nSliceRowClusters, cmprClsHost.nSliceRowClusters, NSECTORS * GPUCA_ROW_COUNT * sizeof(cmprClsHost.nSliceRowClusters[0]), unattachedStream, toGPU);
330 GPUMemCpy(myStep, inputGPUShadow.qTotU, cmprClsHost.qTotU, cmprClsHost.nUnattachedClusters * sizeof(cmprClsHost.qTotU[0]), unattachedStream, toGPU);
331 GPUMemCpy(myStep, inputGPUShadow.qMaxU, cmprClsHost.qMaxU, cmprClsHost.nUnattachedClusters * sizeof(cmprClsHost.qMaxU[0]), unattachedStream, toGPU);
332 GPUMemCpy(myStep, inputGPUShadow.flagsU, cmprClsHost.flagsU, cmprClsHost.nUnattachedClusters * sizeof(cmprClsHost.flagsU[0]), unattachedStream, toGPU);
333 GPUMemCpy(myStep, inputGPUShadow.padDiffU, cmprClsHost.padDiffU, cmprClsHost.nUnattachedClusters * sizeof(cmprClsHost.padDiffU[0]), unattachedStream, toGPU);
334 GPUMemCpy(myStep, inputGPUShadow.timeDiffU, cmprClsHost.timeDiffU, cmprClsHost.nUnattachedClusters * sizeof(cmprClsHost.timeDiffU[0]), unattachedStream, toGPU);
335 GPUMemCpy(myStep, inputGPUShadow.sigmaPadU, cmprClsHost.sigmaPadU, cmprClsHost.nUnattachedClusters * sizeof(cmprClsHost.sigmaPadU[0]), unattachedStream, toGPU);
336 GPUMemCpy(myStep, inputGPUShadow.sigmaTimeU, cmprClsHost.sigmaTimeU, cmprClsHost.nUnattachedClusters * sizeof(cmprClsHost.sigmaTimeU[0]), unattachedStream, toGPU);
337
338 TransferMemoryResourceLinkToHost(RecoStep::TPCDecompression, Decompressor.mResourceTmpIndexes, inputStream, nullptr, mEvents->stream, nStreams);
339 SynchronizeStream(inputStream);
340 uint32_t offset = 0;
341 uint32_t decodedAttachedClusters = 0;
342 for (uint32_t i = 0; i < NSECTORS; i++) {
343 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
344 uint32_t linearIndex = i * GPUCA_ROW_COUNT + j;
345 uint32_t unattachedOffset = (linearIndex >= cmprClsHost.nSliceRows) ? 0 : cmprClsHost.nSliceRowClusters[linearIndex];
346 (mClusterNativeAccess->nClusters)[i][j] = Decompressor.mNativeClustersIndex[linearIndex] + unattachedOffset;
347 Decompressor.mUnattachedClustersOffsets[linearIndex] = offset;
348 offset += unattachedOffset;
349 decodedAttachedClusters += Decompressor.mNativeClustersIndex[linearIndex];
350 }
351 }
352 TransferMemoryResourceLinkToGPU(RecoStep::TPCDecompression, Decompressor.mResourceTmpClustersOffsets, inputStream);
353 if (decodedAttachedClusters != cmprClsHost.nAttachedClusters) {
354 GPUWarning("%u / %u clusters failed track model decoding (%f %%)", cmprClsHost.nAttachedClusters - decodedAttachedClusters, cmprClsHost.nAttachedClusters, 100.f * (float)(cmprClsHost.nAttachedClusters - decodedAttachedClusters) / (float)cmprClsHost.nAttachedClusters);
355 }
356 if (runTimeBinCutFiltering) { // If filtering, allocate a temporary buffer and cluster native access in decompressor context
357 Decompressor.mNClusterNativeBeforeFiltering = DecompressorShadow.mNClusterNativeBeforeFiltering = decodedAttachedClusters + cmprClsHost.nUnattachedClusters;
360 mClusterNativeAccess->clustersLinear = DecompressorShadow.mNativeClustersBuffer;
361 mClusterNativeAccess->setOffsetPtrs();
363 WriteToConstantMemory(myStep, (char*)&processors()->tpcDecompressor - (char*)processors(), &DecompressorShadow, sizeof(DecompressorShadow), inputStream);
364 TransferMemoryResourceLinkToGPU(RecoStep::TPCDecompression, Decompressor.mResourceClusterNativeAccess, inputStream, &mEvents->single);
365 } else { // If not filtering, directly allocate the final buffers
366 mInputsHost->mNClusterNative = mInputsShadow->mNClusterNative = cmprClsHost.nAttachedClusters + cmprClsHost.nUnattachedClusters;
368 AllocateRegisteredMemory(mInputsHost->mResourceClusterNativeBuffer);
369 DecompressorShadow.mNativeClustersBuffer = mInputsShadow->mPclusterNativeBuffer;
370 Decompressor.mNativeClustersBuffer = mInputsHost->mPclusterNativeOutput;
371 DecompressorShadow.mClusterNativeAccess = mInputsShadow->mPclusterNativeAccess;
372 Decompressor.mClusterNativeAccess = mInputsHost->mPclusterNativeAccess;
373 WriteToConstantMemory(myStep, (char*)&processors()->tpcDecompressor - (char*)processors(), &DecompressorShadow, sizeof(DecompressorShadow), inputStream);
374 if (doGPU) {
375 mClusterNativeAccess->clustersLinear = mInputsShadow->mPclusterNativeBuffer;
376 mClusterNativeAccess->setOffsetPtrs();
377 *mInputsHost->mPclusterNativeAccess = *mClusterNativeAccess;
378 processorsShadow()->ioPtrs.clustersNative = mInputsShadow->mPclusterNativeAccess;
379 WriteToConstantMemory(RecoStep::TPCDecompression, (char*)&processors()->ioPtrs - (char*)processors(), &processorsShadow()->ioPtrs, sizeof(processorsShadow()->ioPtrs), inputStream);
380 TransferMemoryResourceLinkToGPU(RecoStep::TPCDecompression, mInputsHost->mResourceClusterNativeAccess, inputStream, &mEvents->single);
381 }
383 mClusterNativeAccess->clustersLinear = mInputsHost->mPclusterNativeOutput;
384 mClusterNativeAccess->setOffsetPtrs();
385 *mInputsHost->mPclusterNativeAccess = *mClusterNativeAccess;
386 }
387
388 uint32_t batchSize = doGPU ? 6 : NSECTORS;
389 for (uint32_t iSector = 0; iSector < NSECTORS; iSector = iSector + batchSize) {
390 int32_t iStream = (iSector / batchSize) % mRec->NStreams();
391 runKernel<GPUTPCDecompressionKernels, GPUTPCDecompressionKernels::step1unattached>({GetGridAuto(iStream), krnlRunRangeNone, {nullptr, &mEvents->single}}, iSector, batchSize);
392 uint32_t copySize = std::accumulate(mClusterNativeAccess->nClustersSector + iSector, mClusterNativeAccess->nClustersSector + iSector + batchSize, 0u);
393 if (!runTimeBinCutFiltering) {
394 GPUMemCpy(RecoStep::TPCDecompression, mInputsHost->mPclusterNativeOutput + mClusterNativeAccess->clusterOffset[iSector][0], DecompressorShadow.mNativeClustersBuffer + mClusterNativeAccess->clusterOffset[iSector][0], sizeof(Decompressor.mNativeClustersBuffer[0]) * copySize, iStream, false);
395 }
396 }
398
399 if (runTimeBinCutFiltering) { // If filtering is applied, count how many clusters will remain after filtering and allocate final buffers accordingly
401 WriteToConstantMemory(myStep, (char*)&processors()->tpcDecompressor - (char*)processors(), &DecompressorShadow, sizeof(DecompressorShadow), unattachedStream);
402 runKernel<GPUMemClean16>({GetGridAutoStep(unattachedStream, RecoStep::TPCDecompression), krnlRunRangeNone}, DecompressorShadow.mNClusterPerSectorRow, NSECTORS * GPUCA_ROW_COUNT * sizeof(DecompressorShadow.mNClusterPerSectorRow[0]));
403 runKernel<GPUTPCDecompressionUtilKernels, GPUTPCDecompressionUtilKernels::countFilteredClusters>(GetGridAutoStep(unattachedStream, RecoStep::TPCDecompression));
404 TransferMemoryResourceLinkToHost(RecoStep::TPCDecompression, Decompressor.mResourceNClusterPerSectorRow, unattachedStream);
405 SynchronizeStream(unattachedStream);
406 uint32_t nClustersFinal = std::accumulate(Decompressor.mNClusterPerSectorRow, Decompressor.mNClusterPerSectorRow + inputGPU.nSliceRows, 0u);
407 mInputsHost->mNClusterNative = mInputsShadow->mNClusterNative = nClustersFinal;
409 AllocateRegisteredMemory(mInputsHost->mResourceClusterNativeBuffer);
410 DecompressorShadow.mNativeClustersBuffer = mInputsShadow->mPclusterNativeBuffer;
411 Decompressor.mNativeClustersBuffer = mInputsHost->mPclusterNativeOutput;
412 WriteToConstantMemory(myStep, (char*)&processors()->tpcDecompressor - (char*)processors(), &DecompressorShadow, sizeof(DecompressorShadow), unattachedStream);
413 for (uint32_t i = 0; i < NSECTORS; i++) {
414 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
415 mClusterNativeAccess->nClusters[i][j] = Decompressor.mNClusterPerSectorRow[i * GPUCA_ROW_COUNT + j];
416 }
417 }
418 if (doGPU) {
419 mClusterNativeAccess->clustersLinear = mInputsShadow->mPclusterNativeBuffer;
420 mClusterNativeAccess->setOffsetPtrs();
421 *mInputsHost->mPclusterNativeAccess = *mClusterNativeAccess;
422 processorsShadow()->ioPtrs.clustersNative = mInputsShadow->mPclusterNativeAccess;
423 WriteToConstantMemory(RecoStep::TPCDecompression, (char*)&processors()->ioPtrs - (char*)processors(), &processorsShadow()->ioPtrs, sizeof(processorsShadow()->ioPtrs), unattachedStream);
424 TransferMemoryResourceLinkToGPU(RecoStep::TPCDecompression, mInputsHost->mResourceClusterNativeAccess, unattachedStream);
425 }
427 mClusterNativeAccess->clustersLinear = mInputsHost->mPclusterNativeOutput;
428 mClusterNativeAccess->setOffsetPtrs();
429 runKernel<GPUTPCDecompressionUtilKernels, GPUTPCDecompressionUtilKernels::storeFilteredClusters>(GetGridAutoStep(unattachedStream, RecoStep::TPCDecompression));
430 GPUMemCpy(RecoStep::TPCDecompression, mInputsHost->mPclusterNativeOutput, DecompressorShadow.mNativeClustersBuffer, sizeof(Decompressor.mNativeClustersBuffer[0]) * nClustersFinal, unattachedStream, false);
431 SynchronizeStream(unattachedStream);
432 }
433 if (GetProcessingSettings().deterministicGPUReconstruction || GetProcessingSettings().debugLevel >= 4) {
434 runKernel<GPUTPCDecompressionUtilKernels, GPUTPCDecompressionUtilKernels::sortPerSectorRow>(GetGridAutoStep(unattachedStream, RecoStep::TPCDecompression));
436 if (doGPU) {
437 for (uint32_t i = 0; i < NSECTORS; i++) {
438 for (uint32_t j = 0; j < GPUCA_ROW_COUNT; j++) {
439 ClusterNative* begin = mInputsHost->mPclusterNativeOutput + decoded->clusterOffset[i][j];
440 ClusterNative* end = begin + decoded->nClusters[i][j];
441 std::sort(begin, end);
442 }
443 }
444 }
445 SynchronizeStream(unattachedStream);
446 }
447 mRec->PopNonPersistentMemory(RecoStep::TPCDecompression, qStr2Tag("TPCDCMPR"));
448 }
449 if (useTemporaryBz) {
451 param() = *tmpParam;
452 tmpParam.reset();
454 }
456 return 0;
457}
458
459void GPUChainTracking::WriteReducedClusters()
460{
462 mClusterNativeAccessReduced = std::make_unique<ClusterNativeAccess>();
463 uint32_t nOutput = 0;
464 for (uint32_t iSec = 0; iSec < GPUCA_NSECTORS; iSec++) {
465 for (uint32_t iRow = 0; iRow < GPUCA_ROW_COUNT; iRow++) {
466 mClusterNativeAccessReduced->nClusters[iSec][iRow] = 0;
467 for (uint32_t i = 0; i < mIOPtrs.clustersNative->nClusters[iSec][iRow]; i++) {
468 mClusterNativeAccessReduced->nClusters[iSec][iRow] += !Compressor.rejectCluster(mIOPtrs.clustersNative->clusterOffset[iSec][iRow] + i, param(), mIOPtrs);
469 }
470 nOutput += mClusterNativeAccessReduced->nClusters[iSec][iRow];
471 }
472 }
473
475 if (!clOutput || !clOutput->allocator) {
476 throw std::runtime_error("No output allocator for clusterNative available");
477 }
478 auto* clBuffer = (ClusterNative*)clOutput->allocator(nOutput * sizeof(ClusterNative));
479 mClusterNativeAccessReduced->clustersLinear = clBuffer;
480 mClusterNativeAccessReduced->setOffsetPtrs();
481
482 std::pair<o2::dataformats::ConstMCLabelContainer*, o2::dataformats::ConstMCLabelContainerView*> labelBuffer;
485 if (!labelOutput || !labelOutput->allocator) {
486 throw std::runtime_error("No output allocator for clusterNative labels available");
487 }
489 labelBuffer = {&labelContainer->first, &labelContainer->second};
490 }
491
492 nOutput = 0;
494 for (uint32_t i = 0; i < mIOPtrs.clustersNative->nClustersTotal; i++) {
495 if (!Compressor.rejectCluster(i, param(), mIOPtrs)) {
497 for (const auto& element : mIOPtrs.clustersNative->clustersMCTruth->getLabels(i)) {
498 tmpContainer.addElement(nOutput, element);
499 }
500 }
501 clBuffer[nOutput++] = mIOPtrs.clustersNative->clustersLinear[i];
502 }
503 }
506 tmpContainer.flatten_to(*labelBuffer.first);
507 *labelBuffer.second = *labelBuffer.first;
508 mClusterNativeAccessReduced->clustersMCTruth = labelBuffer.second;
509 }
510}
A const (ready only) version of MCTruthContainer.
atype::type element
int32_t i
#define GPUCA_NSECTORS
#define GPUCA_ROW_COUNT
uint32_t j
Definition RawData.h:0
void Start()
Definition timer.cxx:64
void Stop()
Definition timer.cxx:76
void addElement(uint32_t dataindex, TruthElement const &element, bool noElement=false)
size_t flatten_to(ContainerType &container) const
static void DebugSortCompressedClusters(o2::tpc::CompressedClustersFlat *cls)
std::unique_ptr< o2::tpc::ClusterNativeAccess > mClusterNativeAccess
static void DumpClusters(std::ostream &out, const o2::tpc::ClusterNativeAccess *clusters)
std::unique_ptr< GPUTrackingInputProvider > mInputsHost
std::array< GPUOutputControl *, GPUTrackingOutputs::count()> mSubOutputControls
std::unique_ptr< std::ofstream > mDebugFile
GPUTrackingInOutPointers & mIOPtrs
std::unique_ptr< GPUTrackingInputProvider > mInputsShadow
std::unique_ptr< o2::tpc::ClusterNativeAccess > mClusterNativeAccessReduced
void RecordMarker(deviceEvent *ev, int32_t stream)
Definition GPUChain.h:108
void TransferMemoryResourceLinkToGPU(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
Definition GPUChain.h:124
void GPUMemCpyAlways(RecoStep step, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
Definition GPUChain.h:130
void GPUMemCpy(RecoStep step, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
Definition GPUChain.h:129
bool DoDebugAndDump(RecoStep step, uint32_t mask, T &processor, S T::*func, Args &&... args)
Definition GPUChain.h:229
void SynchronizeGPU()
Definition GPUChain.h:110
GPUReconstruction::RecoStepField GetRecoStepsGPU() const
Definition GPUChain.h:72
void WriteToConstantMemory(RecoStep step, size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr)
Definition GPUChain.h:128
krnlExec GetGridAuto(int32_t stream, GPUReconstruction::krnlDeviceType d=GPUReconstruction::krnlDeviceType::Auto, gpudatatypes::RecoStep st=gpudatatypes::RecoStep::NoRecoStep)
Definition GPUChain.cxx:42
GPUChain * GetNextChainInQueue()
Definition GPUChain.h:226
size_t AllocateRegisteredMemory(GPUProcessor *proc)
Definition GPUChain.h:218
virtual std::unique_ptr< GPUReconstructionProcessing::threadContext > GetThreadContext()
Definition GPUChain.h:109
GPUConstantMem * processors()
Definition GPUChain.h:84
static constexpr krnlRunRange krnlRunRangeNone
Definition GPUChain.h:41
bool DoDebugDump(uint32_t mask, std::function< void(Args &...)> func, Args &... args)
krnlExec GetGridAutoStep(int32_t stream, gpudatatypes::RecoStep st=gpudatatypes::RecoStep::NoRecoStep)
Definition GPUChain.cxx:47
GPUParam & param()
Definition GPUChain.h:87
void SetupGPUProcessor(T *proc, bool allocate)
Definition GPUChain.h:221
krnlExec GetGridBlkStep(uint32_t nBlocks, int32_t stream, gpudatatypes::RecoStep st=gpudatatypes::RecoStep::NoRecoStep)
Definition GPUChain.cxx:37
const GPUSettingsProcessing & GetProcessingSettings() const
Definition GPUChain.h:76
void SynchronizeStream(int32_t stream)
Definition GPUChain.h:89
GPUReconstructionCPU * mRec
Definition GPUChain.h:79
GPUConstantMem * processorsShadow()
Definition GPUChain.h:85
static constexpr int32_t NSECTORS
Definition GPUChain.h:58
void TransferMemoryResourceLinkToHost(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
Definition GPUChain.h:125
void TransferMemoryResourcesToHost(RecoStep step, GPUProcessor *proc, int32_t stream=-1, bool all=false)
Definition GPUChain.h:123
void WriteConstantParams(int32_t stream=-1)
Definition GPUChain.h:127
void SynchronizeEventAndRelease(deviceEvent &ev, bool doGPU=true)
Definition GPUChain.h:92
void TransferMemoryResourcesToGPU(RecoStep step, GPUProcessor *proc, int32_t stream=-1, bool all=false)
Definition GPUChain.h:122
const GPUDefParameters & getGPUParameters(bool doGPU) const override
virtual void * getGPUPointer(void *ptr)
void PopNonPersistentMemory(RecoStep step, uint64_t tag, const GPUProcessor *proc=nullptr)
void PushNonPersistentMemory(uint64_t tag)
void BlockStackedMemory(GPUReconstruction *rec)
const GPUSettingsProcessing & GetProcessingSettings() const
const GPUSettingsGRP & GetGRPSettings() const
void DumpCompressedClusters(std::ostream &out)
o2::tpc::CompressedClusters * mOutput
o2::tpc::CompressedClusters * mOutputA
o2::tpc::CompressedClustersPtrs mPtrs
o2::tpc::CompressedClustersFlat * mOutputFlat
o2::tpc::CompressedClusters mInputGPU
o2::tpc::ClusterNative * mNativeClustersBuffer
o2::tpc::ClusterNativeAccess * mClusterNativeAccess
static int32_t decompress(const o2::tpc::CompressedClustersFlat *clustersCompressed, o2::tpc::ClusterNativeAccess &clustersNative, std::function< o2::tpc::ClusterNative *(size_t)> allocator, const GPUParam &param, bool deterministicRec)
GLdouble n
Definition glcorearb.h:1982
GLsizeiptr size
Definition glcorearb.h:659
GLuint GLsizei const GLuint const GLintptr * offsets
Definition glcorearb.h:2595
GLuint GLuint end
Definition glcorearb.h:469
GLintptr offset
Definition glcorearb.h:660
GLenum GLfloat param
Definition glcorearb.h:271
std::unique_ptr< const o2::dataformats::MCTruthContainer< MCLabel > > getLabels(framework::ProcessingContext &pc, std::string_view dataBind, EventType eventType=EventType::Standard)
Global TPC definitions and constants.
Definition SimTraits.h:168
Enum< T >::Iterator begin(Enum< T >)
Definition Defs.h:156
std::string to_string(gsl::span< T, Size > span)
Definition common.h:52
constexpr T qStr2Tag(const char(&str)[N])
Definition strtag.h:24
deviceEvent stream[GPUCA_MAX_STREAMS]
GPUTPCDecompression tpcDecompressor
GPUTrackingInOutPointers ioPtrs
GPUTPCCompression tpcCompressor
std::function< void *(size_t)> allocator
void UpdateBzOnly(float newSolenoidBz, bool assumeConstantBz=false)
Definition GPUParam.cxx:140
const o2::tpc::ClusterNativeAccess * clustersNative
const o2::tpc::CompressedClustersFlat * tpcCompressedClusters
const o2::tpc::ClusterNativeAccess * clustersNativeReduced
size_t getIndex(const GPUOutputControl &v)
GPUOutputControl compressedClusters
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > * clustersMCTruth
std::pair< ConstMCLabelContainer, ConstMCLabelContainerView > ConstMCLabelContainerViewWithBuffer
unsigned int clusterOffset[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const ClusterNative * clustersLinear
void set(size_t bufferSize, const CompressedClusters &v)