Project
Loading...
Searching...
No Matches
GPUChainTracking.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
17#include <fstream>
18#include <chrono>
19
20#include "GPUChainTracking.h"
22#include "GPUTPCClusterData.h"
24#include "GPUTPCGMMergedTrack.h"
26#include "GPUTPCTrack.h"
27#include "GPUTPCHitId.h"
28#include "TPCZSLinkMapping.h"
29#include "GPUTRDTrackletWord.h"
31#include "GPUTPCMCInfo.h"
32#include "GPUTRDTrack.h"
33#include "GPUTRDTracker.h"
34#include "AliHLTTPCRawCluster.h"
37#include "GPUQA.h"
38#include "GPULogging.h"
41#include "GPUNewCalibValues.h"
42#include "GPUTriggerOutputs.h"
43
45#include "GPUHostDataTypes.h"
47#include "GPUTrackingRefit.h"
48#include "CalibdEdxContainer.h"
49
50#include "TPCFastTransform.h"
52
53#include "utils/linux_helpers.h"
54#include "utils/strtag.h"
55using namespace o2::gpu;
56
57#include "GPUO2DataTypes.h"
58
59using namespace o2::tpc;
60using namespace o2::trd;
61
62GPUChainTracking::GPUChainTracking(GPUReconstruction* rec, uint32_t maxTPCHits, uint32_t maxTRDTracklets) : GPUChain(rec), mIOPtrs(processors()->ioPtrs), mInputsHost(new GPUTrackingInputProvider), mInputsShadow(new GPUTrackingInputProvider), mClusterNativeAccess(new ClusterNativeAccess), mTriggerBuffer(new GPUTriggerOutputs), mMaxTPCHits(maxTPCHits), mMaxTRDTracklets(maxTRDTracklets), mDebugFile(new std::ofstream)
63{
67}
68
70
72{
73 if (mRec->IsGPU()) {
77 }
78
80 if (GetRecoSteps() & RecoStep::TPCSectorTracking) {
81 for (uint32_t i = 0; i < NSECTORS; i++) {
82 mRec->RegisterGPUProcessor(&processors()->tpcTrackers[i], GetRecoStepsGPU() & RecoStep::TPCSectorTracking);
83 }
84 }
85 if (GetRecoSteps() & RecoStep::TPCMerging) {
86 mRec->RegisterGPUProcessor(&processors()->tpcMerger, GetRecoStepsGPU() & RecoStep::TPCMerging);
87 }
88 if (GetRecoSteps() & RecoStep::TRDTracking) {
89 mRec->RegisterGPUProcessor(&processors()->trdTrackerGPU, GetRecoStepsGPU() & RecoStep::TRDTracking);
90 }
91 if (GetRecoSteps() & RecoStep::TRDTracking) {
92 mRec->RegisterGPUProcessor(&processors()->trdTrackerO2, GetRecoStepsGPU() & RecoStep::TRDTracking);
93 }
94 if (GetRecoSteps() & RecoStep::TPCConversion) {
95 mRec->RegisterGPUProcessor(&processors()->tpcConverter, GetRecoStepsGPU() & RecoStep::TPCConversion);
96 }
97 if (GetRecoSteps() & RecoStep::TPCCompression) {
98 mRec->RegisterGPUProcessor(&processors()->tpcCompressor, GetRecoStepsGPU() & RecoStep::TPCCompression);
99 }
100 if (GetRecoSteps() & RecoStep::TPCDecompression) {
101 mRec->RegisterGPUProcessor(&processors()->tpcDecompressor, GetRecoStepsGPU() & RecoStep::TPCDecompression);
102 }
103 if (GetRecoSteps() & RecoStep::TPCClusterFinding) {
104 for (uint32_t i = 0; i < NSECTORS; i++) {
105 mRec->RegisterGPUProcessor(&processors()->tpcClusterer[i], GetRecoStepsGPU() & RecoStep::TPCClusterFinding);
106#ifdef GPUCA_HAS_ONNX
107 mRec->RegisterGPUProcessor(&processors()->tpcNNClusterer[i], GetRecoStepsGPU() & RecoStep::TPCClusterFinding);
108#endif
109 }
110 }
111 if (GetRecoSteps() & RecoStep::Refit) {
112 mRec->RegisterGPUProcessor(&processors()->trackingRefit, GetRecoStepsGPU() & RecoStep::Refit);
113 }
114#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
115 mRec->RegisterGPUProcessor(&processors()->debugOutput, true);
116#endif
118}
119
121{
122 if (mRec->IsGPU()) {
124 }
125 memcpy((void*)&processorsShadow()->trdTrackerGPU, (const void*)&processors()->trdTrackerGPU, sizeof(processors()->trdTrackerGPU));
126 if (GetRecoStepsGPU() & RecoStep::TPCSectorTracking) {
127 for (uint32_t i = 0; i < NSECTORS; i++) {
128 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcTrackers[i], &processors()->tpcTrackers[i]);
129 }
130 }
131 if (GetRecoStepsGPU() & RecoStep::TPCMerging) {
132 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcMerger, &processors()->tpcMerger);
133 }
134 if (GetRecoStepsGPU() & RecoStep::TRDTracking) {
135 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->trdTrackerGPU, &processors()->trdTrackerGPU);
136 }
137
138 memcpy((void*)&processorsShadow()->trdTrackerO2, (const void*)&processors()->trdTrackerO2, sizeof(processors()->trdTrackerO2));
139 if (GetRecoStepsGPU() & RecoStep::TRDTracking) {
140 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->trdTrackerO2, &processors()->trdTrackerO2);
141 }
142 if (GetRecoStepsGPU() & RecoStep::TPCConversion) {
143 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcConverter, &processors()->tpcConverter);
144 }
145 if (GetRecoStepsGPU() & RecoStep::TPCCompression) {
146 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcCompressor, &processors()->tpcCompressor);
147 }
148 if (GetRecoStepsGPU() & RecoStep::TPCDecompression) {
149 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcDecompressor, &processors()->tpcDecompressor);
150 }
151 if (GetRecoStepsGPU() & RecoStep::TPCClusterFinding) {
152 for (uint32_t i = 0; i < NSECTORS; i++) {
153 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcClusterer[i], &processors()->tpcClusterer[i]);
154#ifdef GPUCA_HAS_ONNX
155 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcNNClusterer[i], &processors()->tpcNNClusterer[i]);
156#endif
157 }
158 }
159 if (GetRecoStepsGPU() & RecoStep::Refit) {
160 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->trackingRefit, &processors()->trackingRefit);
161 }
162#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
163 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->debugOutput, &processors()->debugOutput);
164#endif
165}
166
167void GPUChainTracking::MemorySize(size_t& gpuMem, size_t& pageLockedHostMem)
168{
169 gpuMem = GPUCA_MEMORY_SIZE;
170 pageLockedHostMem = GPUCA_HOST_MEMORY_SIZE;
171}
172
174{
176 GPUError("Invalid Reconstruction Step Setting: dEdx requires TPC Merger to be active");
177 return false;
178 }
180 GPUError("Invalid GPU Reconstruction Step Setting: dEdx requires TPC Merger to be active");
181 return false;
182 }
183 if (!param().par.earlyTpcTransform) {
185 GPUError("Invalid Reconstruction Step Setting: Tracking without early transform requires TPC Conversion to be active");
186 return false;
187 }
188 }
190 GPUError("Invalid input, TPC Clusterizer needs TPC raw input");
191 return false;
192 }
194 GPUError("Invalid input / output / step, merger cannot read/store sectors tracks and needs TPC conversion");
195 return false;
196 }
198 if ((GetRecoSteps() & GPUDataTypes::RecoStep::TPCMerging) && !tpcClustersAvail) {
199 GPUError("Invalid Inputs for track merging, TPC Clusters required");
200 return false;
201 }
202#ifndef GPUCA_TPC_GEOMETRY_O2
204 GPUError("Can not run TPC GPU Cluster Finding with Run 2 Data");
205 return false;
206 }
207#endif
209 GPUError("Missing input for TPC Cluster conversion / sector tracking / compression / dEdx: TPC Clusters required");
210 return false;
211 }
213 GPUError("Input for TPC merger missing");
214 return false;
215 }
217 GPUError("Input for TPC compressor missing");
218 return false;
219 }
221 GPUError("Input for TRD Tracker missing");
222 return false;
223 }
225 GPUError("TPC Raw / TPC Clusters / TRD Tracklets cannot be output");
226 return false;
227 }
229 GPUError("No TPC Merged Track Output available");
230 return false;
231 }
233 GPUError("No TPC Compression Output available");
234 return false;
235 }
237 GPUError("No TRD Tracker Output available");
238 return false;
239 }
240 if ((GetRecoSteps() & GPUDataTypes::RecoStep::TPCdEdx) && (processors()->calibObjects.dEdxCalibContainer == nullptr)) {
241 GPUError("Cannot run dE/dx without dE/dx calibration container object");
242 return false;
243 }
244 if ((GetRecoSteps() & GPUDataTypes::RecoStep::TPCClusterFinding) && processors()->calibObjects.tpcPadGain == nullptr) {
245 GPUError("Cannot run gain calibration without calibration object");
246 return false;
247 }
248 if ((GetRecoSteps() & GPUDataTypes::RecoStep::TPCClusterFinding) && processors()->calibObjects.tpcZSLinkMapping == nullptr && mIOPtrs.tpcZS != nullptr) {
249 GPUError("Cannot run TPC ZS Decoder without mapping object. (tpczslinkmapping.dump missing?)");
250 return false;
251 }
252 return true;
253}
254
256{
257 if ((param().rec.tpc.nWays & 1) == 0) {
258 GPUError("nWay setting musst be odd number!");
259 return false;
260 }
261 if (param().rec.tpc.mergerInterpolateErrors && param().rec.tpc.nWays == 1) {
262 GPUError("Cannot do error interpolation with NWays = 1!");
263 return false;
264 }
265 if (param().continuousMaxTimeBin > (int32_t)GPUSettings::TPC_MAX_TF_TIME_BIN) {
266 GPUError("configured max time bin exceeds 256 orbits");
267 return false;
268 }
269 if ((GetRecoStepsGPU() & RecoStep::TPCClusterFinding) && std::max(GetProcessingSettings().nTPCClustererLanes + 1, GetProcessingSettings().nTPCClustererLanes * 2) + (GetProcessingSettings().doublePipeline ? 1 : 0) > (int32_t)mRec->NStreams()) {
270 GPUError("NStreams (%d) must be > nTPCClustererLanes (%d)", mRec->NStreams(), (int32_t)GetProcessingSettings().nTPCClustererLanes);
271 return false;
272 }
273 if (GetProcessingSettings().noGPUMemoryRegistration && GetProcessingSettings().tpcCompressionGatherMode != 3) {
274 GPUError("noGPUMemoryRegistration only possible with gather mode 3");
275 return false;
276 }
277 if (GetProcessingSettings().doublePipeline) {
279 GPUError("Invalid outputs for double pipeline mode 0x%x", (uint32_t)GetRecoStepsOutputs());
280 return false;
281 }
286 GPUError("Must use external output for double pipeline mode");
287 return false;
288 }
289 if (GetProcessingSettings().tpcCompressionGatherMode == 1) {
290 GPUError("Double pipeline incompatible to compression mode 1");
291 return false;
292 }
294 GPUError("Invalid reconstruction settings for double pipeline: Needs compression and cluster finding");
295 return false;
296 }
297 }
298 if ((GetRecoStepsGPU() & GPUDataTypes::RecoStep::TPCCompression) && !(GetRecoStepsGPU() & GPUDataTypes::RecoStep::TPCCompression) && (GetProcessingSettings().tpcCompressionGatherMode == 1 || GetProcessingSettings().tpcCompressionGatherMode == 3)) {
299 GPUError("Invalid tpcCompressionGatherMode for compression on CPU");
300 return false;
301 }
302 if (GetProcessingSettings().tpcApplyClusterFilterOnCPU > 0 && (GetRecoStepsGPU() & GPUDataTypes::RecoStep::TPCClusterFinding || GetProcessingSettings().runMC)) {
303 GPUError("tpcApplyClusterFilterOnCPU cannot be used with GPU clusterization or with MC labels");
304 return false;
305 }
306 if (GetRecoSteps() & RecoStep::TRDTracking) {
307 if (GetProcessingSettings().trdTrackModelO2 && (GetProcessingSettings().createO2Output == 0 || param().rec.tpc.nWaysOuter == 0 || (GetMatLUT() == nullptr && !GetProcessingSettings().willProvideO2PropagatorLate))) {
308 GPUError("TRD tracking can only run on O2 TPC tracks if createO2Output is enabled (%d), nWaysOuter is set (%d), and matBudLUT is available (0x%p)", (int32_t)GetProcessingSettings().createO2Output, (int32_t)param().rec.tpc.nWaysOuter, (void*)GetMatLUT());
309 return false;
310 }
311 if ((GetRecoStepsGPU() & RecoStep::TRDTracking) && !GetProcessingSettings().trdTrackModelO2 && GetProcessingSettings().createO2Output > 1) {
312 GPUError("TRD tracking can only run on GPU TPC tracks if the createO2Output setting does not suppress them");
313 return false;
314 }
315 if ((((GetRecoStepsGPU() & RecoStep::TRDTracking) && GetProcessingSettings().trdTrackModelO2) || ((GetRecoStepsGPU() & RecoStep::Refit) && !param().rec.trackingRefitGPUModel)) && (!GetProcessingSettings().o2PropagatorUseGPUField || (GetMatLUT() == nullptr && !GetProcessingSettings().willProvideO2PropagatorLate))) {
316 GPUError("Cannot use TRD tracking or Refit on GPU without GPU polynomial field map (%d) or matlut table (%p)", (int32_t)GetProcessingSettings().o2PropagatorUseGPUField, (void*)GetMatLUT());
317 return false;
318 }
319 }
320 return true;
321}
322
324{
325 const auto& threadContext = GetThreadContext();
326 if (GetProcessingSettings().debugLevel >= 1) {
327 printf("Enabled Reconstruction Steps: 0x%x (on GPU: 0x%x)", (int32_t)GetRecoSteps().get(), (int32_t)GetRecoStepsGPU().get());
328 for (uint32_t i = 0; i < sizeof(GPUDataTypes::RECO_STEP_NAMES) / sizeof(GPUDataTypes::RECO_STEP_NAMES[0]); i++) {
329 if (GetRecoSteps().isSet(1u << i)) {
330 printf(" - %s", GPUDataTypes::RECO_STEP_NAMES[i]);
331 if (GetRecoStepsGPU().isSet(1u << i)) {
332 printf(" (G)");
333 }
334 }
335 }
336 printf("\n");
337 }
338 if (!ValidateSteps()) {
339 return 1;
340 }
341
342 for (uint32_t i = 0; i < mSubOutputControls.size(); i++) {
343 if (mSubOutputControls[i] == nullptr) {
345 }
346 }
347
348 if (!ValidateSettings()) {
349 return 1;
350 }
351
354 if (!qa) {
355 qa.reset(new GPUQA(this));
356 }
357 }
360 if (mEventDisplay == nullptr) {
361 throw std::runtime_error("Error loading event display");
362 }
363 }
364
367
368 if (mRec->IsGPU()) {
370 UpdateGPUCalibObjectsPtrs(-1); // First initialization, for users not using RunChain
372 WriteToConstantMemory(RecoStep::NoRecoStep, (char*)&processors()->errorCodes - (char*)processors(), &processorsShadow()->errorCodes, sizeof(processorsShadow()->errorCodes), -1);
373 TransferMemoryResourceLinkToGPU(RecoStep::NoRecoStep, mInputsHost->mResourceErrorCodes);
374 }
375
376 if (GetProcessingSettings().debugLevel >= 6) {
377 mDebugFile->open(mRec->IsGPU() ? "GPU.out" : "CPU.out");
378 }
379
380 return 0;
381}
382
384{
385 if (processors()->calibObjects.fastTransform && (ptrMask == nullptr || ptrMask->fastTransform)) {
386 memcpy((void*)mFlatObjectsShadow.mCalibObjects.fastTransform, (const void*)processors()->calibObjects.fastTransform, sizeof(*processors()->calibObjects.fastTransform));
387 memcpy((void*)mFlatObjectsShadow.mTpcTransformBuffer, (const void*)processors()->calibObjects.fastTransform->getFlatBufferPtr(), processors()->calibObjects.fastTransform->getFlatBufferSize());
388 mFlatObjectsShadow.mCalibObjects.fastTransform->clearInternalBufferPtr();
391 }
392 if (processors()->calibObjects.fastTransformMShape && (ptrMask == nullptr || ptrMask->fastTransformMShape)) {
393 memcpy((void*)mFlatObjectsShadow.mCalibObjects.fastTransformMShape, (const void*)processors()->calibObjects.fastTransformMShape, sizeof(*processors()->calibObjects.fastTransformMShape));
394 memcpy((void*)mFlatObjectsShadow.mTpcTransformMShapeBuffer, (const void*)processors()->calibObjects.fastTransformMShape->getFlatBufferPtr(), processors()->calibObjects.fastTransformMShape->getFlatBufferSize());
395 mFlatObjectsShadow.mCalibObjects.fastTransformMShape->clearInternalBufferPtr();
398 }
399 if (processors()->calibObjects.fastTransformRef && (ptrMask == nullptr || ptrMask->fastTransformRef)) {
400 memcpy((void*)mFlatObjectsShadow.mCalibObjects.fastTransformRef, (const void*)processors()->calibObjects.fastTransformRef, sizeof(*processors()->calibObjects.fastTransformRef));
401 memcpy((void*)mFlatObjectsShadow.mTpcTransformRefBuffer, (const void*)processors()->calibObjects.fastTransformRef->getFlatBufferPtr(), processors()->calibObjects.fastTransformRef->getFlatBufferSize());
402 mFlatObjectsShadow.mCalibObjects.fastTransformRef->clearInternalBufferPtr();
405 }
406 if (processors()->calibObjects.fastTransformHelper && (ptrMask == nullptr || ptrMask->fastTransformHelper)) {
407 memcpy((void*)mFlatObjectsShadow.mCalibObjects.fastTransformHelper, (const void*)processors()->calibObjects.fastTransformHelper, sizeof(*processors()->calibObjects.fastTransformHelper));
411 }
412 if (processors()->calibObjects.dEdxCalibContainer && (ptrMask == nullptr || ptrMask->dEdxCalibContainer)) {
413 memcpy((void*)mFlatObjectsShadow.mCalibObjects.dEdxCalibContainer, (const void*)processors()->calibObjects.dEdxCalibContainer, sizeof(*processors()->calibObjects.dEdxCalibContainer));
414 memcpy((void*)mFlatObjectsShadow.mdEdxSplinesBuffer, (const void*)processors()->calibObjects.dEdxCalibContainer->getFlatBufferPtr(), processors()->calibObjects.dEdxCalibContainer->getFlatBufferSize());
415 mFlatObjectsShadow.mCalibObjects.dEdxCalibContainer->clearInternalBufferPtr();
418 }
419 if (processors()->calibObjects.matLUT && (ptrMask == nullptr || ptrMask->matLUT)) {
420 memcpy((void*)mFlatObjectsShadow.mCalibObjects.matLUT, (const void*)processors()->calibObjects.matLUT, sizeof(*processors()->calibObjects.matLUT));
421 memcpy((void*)mFlatObjectsShadow.mMatLUTBuffer, (const void*)processors()->calibObjects.matLUT->getFlatBufferPtr(), processors()->calibObjects.matLUT->getFlatBufferSize());
422 mFlatObjectsShadow.mCalibObjects.matLUT->clearInternalBufferPtr();
425 }
426 if (processors()->calibObjects.trdGeometry && (ptrMask == nullptr || ptrMask->trdGeometry)) {
427 memcpy((void*)mFlatObjectsShadow.mCalibObjects.trdGeometry, (const void*)processors()->calibObjects.trdGeometry, sizeof(*processors()->calibObjects.trdGeometry));
428 mFlatObjectsShadow.mCalibObjects.trdGeometry->clearInternalBufferPtr();
429 }
430 if (processors()->calibObjects.tpcPadGain && (ptrMask == nullptr || ptrMask->tpcPadGain)) {
431 memcpy((void*)mFlatObjectsShadow.mCalibObjects.tpcPadGain, (const void*)processors()->calibObjects.tpcPadGain, sizeof(*processors()->calibObjects.tpcPadGain));
432 }
433 if (processors()->calibObjects.tpcZSLinkMapping && (ptrMask == nullptr || ptrMask->tpcZSLinkMapping)) {
434 memcpy((void*)mFlatObjectsShadow.mCalibObjects.tpcZSLinkMapping, (const void*)processors()->calibObjects.tpcZSLinkMapping, sizeof(*processors()->calibObjects.tpcZSLinkMapping));
435 }
436 if (processors()->calibObjects.o2Propagator && (ptrMask == nullptr || ptrMask->o2Propagator)) {
437 memcpy((void*)mFlatObjectsShadow.mCalibObjects.o2Propagator, (const void*)processors()->calibObjects.o2Propagator, sizeof(*processors()->calibObjects.o2Propagator));
438 mFlatObjectsShadow.mCalibObjects.o2Propagator->setGPUField(&processorsDevice()->param.polynomialField);
440 }
442 memcpy((void*)&processorsShadow()->calibObjects, (void*)&mFlatObjectsDevice.mCalibObjects, sizeof(mFlatObjectsDevice.mCalibObjects));
443}
444
446{
447 WriteToConstantMemory(RecoStep::NoRecoStep, (char*)&processors()->calibObjects - (char*)processors(), &mFlatObjectsDevice.mCalibObjects, sizeof(mFlatObjectsDevice.mCalibObjects), stream);
448}
449
451{
455 }
456 if (mIOPtrs.tpcZS && param().rec.fwdTPCDigitsAsClusters) {
457 throw std::runtime_error("Forwading zero-suppressed hits not supported");
458 }
460 return 0;
461}
462
464{
466 if (!qa) {
467 qa.reset(new GPUQA(this));
468 }
469 if (!GetQA()->IsInitialized()) {
470 return GetQA()->InitQA();
471 }
472 return 0;
473}
474
476{
477 if (GetProcessingSettings().runQA && GetQA()->IsInitialized() && !(mConfigQA && mConfigQA->shipToQC) && !mQAFromForeignChain) {
478 GetQA()->UpdateChain(this);
480 }
481 if (GetProcessingSettings().debugLevel >= 6) {
482 mDebugFile->close();
483 }
485 mCompressionStatistics->Finish();
486 }
487 return 0;
488}
489
491{
492 char* fastTransformBase = (char*)mem;
496 }
500 }
504 }
507 }
508 if ((char*)mem - fastTransformBase < mChainTracking->GetProcessingSettings().fastTransformObjectsMinMemorySize) {
509 mem = fastTransformBase + mChainTracking->GetProcessingSettings().fastTransformObjectsMinMemorySize; // TODO: Fixme and do proper dynamic allocation
510 }
513 }
516 }
517 char* dummyPtr;
521 } else if (mChainTracking->GetProcessingSettings().lateO2MatLutProvisioningSize) {
522 computePointerWithAlignment(mem, dummyPtr, mChainTracking->GetProcessingSettings().lateO2MatLutProvisioningSize);
523 }
527 }
530 }
533 mCalibObjects.o2Propagator = nullptr; // Always reserve memory for o2::Propagator, since it may be propagatred only during run() not during init().
534 }
536 mem = (char*)mem + mChainTracking->GetProcessingSettings().calibObjectsExtraMemorySize; // TODO: Fixme and do proper dynamic allocation
537 }
538 return mem;
539}
540
542{
543 std::memset((void*)&mIOPtrs, 0, sizeof(mIOPtrs));
545 new (&mIOMem) InOutMemory;
546}
547
549{
550 for (uint32_t i = 0; i < NSECTORS; i++) {
555 }
557 std::memset(mIOMem.clusterNativeAccess.get(), 0, sizeof(ClusterNativeAccess)); // ClusterNativeAccess has no its own constructor
572}
573
574void GPUChainTracking::SetTPCFastTransform(std::unique_ptr<TPCFastTransform>&& tpcFastTransform, std::unique_ptr<CorrectionMapsHelper>&& tpcTransformHelper)
575{
576 mTPCFastTransformU = std::move(tpcFastTransform);
577 mTPCFastTransformHelperU = std::move(tpcTransformHelper);
580}
581
582void GPUChainTracking::SetMatLUT(std::unique_ptr<o2::base::MatLayerCylSet>&& lut)
583{
584 mMatLUTU = std::move(lut);
586}
587
588void GPUChainTracking::SetTRDGeometry(std::unique_ptr<o2::trd::GeometryFlat>&& geo)
589{
590 mTRDGeometryU = std::move(geo);
592}
593
594int32_t GPUChainTracking::DoQueuedUpdates(int32_t stream, bool updateSlave)
595{
596 int32_t retVal = 0;
597 std::unique_ptr<GPUSettingsGRP> grp;
598 const GPUSettingsProcessing* p = nullptr;
599 std::lock_guard lk(mMutexUpdateCalib);
601 if (mNewCalibValues->newSolenoidField || mNewCalibValues->newContinuousMaxTimeBin || mNewCalibValues->newTPCTimeBinCut) {
602 grp = std::make_unique<GPUSettingsGRP>(mRec->GetGRPSettings());
603 if (mNewCalibValues->newSolenoidField) {
604 grp->solenoidBzNominalGPU = mNewCalibValues->solenoidField;
605 }
606 if (mNewCalibValues->newContinuousMaxTimeBin) {
607 grp->grpContinuousMaxTimeBin = mNewCalibValues->continuousMaxTimeBin;
608 }
609 if (mNewCalibValues->newTPCTimeBinCut) {
610 grp->tpcCutTimeBin = mNewCalibValues->tpcTimeBinCut;
611 }
612 }
613 }
614 if (GetProcessingSettings().tpcDownscaledEdx != 0) {
616 }
617 if (grp || p) {
618 mRec->UpdateSettings(grp.get(), p);
619 retVal = 1;
620 }
622 if (mNewCalibObjects->o2Propagator && ((mNewCalibObjects->o2Propagator->getGPUField() != nullptr) ^ GetProcessingSettings().o2PropagatorUseGPUField)) {
623 GPUFatal("GPU magnetic field for propagator requested, but received an O2 propagator without GPU field");
624 }
625 void* const* pSrc = (void* const*)mNewCalibObjects.get();
626 void** pDst = (void**)&processors()->calibObjects;
627 for (uint32_t i = 0; i < sizeof(processors()->calibObjects) / sizeof(void*); i++) {
628 if (pSrc[i]) {
629 pDst[i] = pSrc[i];
630 }
631 }
633 if (GetProcessingSettings().trdTrackModelO2) {
635 if (mRec->IsGPU()) {
636 TransferMemoryResourceLinkToGPU(RecoStep::NoRecoStep, processors()->trdTrackerO2.MemoryPermanent(), stream);
637 }
638 } else {
640 if (mRec->IsGPU()) {
641 TransferMemoryResourceLinkToGPU(RecoStep::NoRecoStep, processors()->trdTrackerGPU.MemoryPermanent(), stream);
642 }
643 }
644 }
645 if (mRec->IsGPU()) {
646 std::array<uint8_t, sizeof(GPUTrackingFlatObjects)> oldFlatPtrs, oldFlatPtrsDevice;
647 memcpy(oldFlatPtrs.data(), (void*)&mFlatObjectsShadow, oldFlatPtrs.size());
648 memcpy(oldFlatPtrsDevice.data(), (void*)&mFlatObjectsDevice, oldFlatPtrsDevice.size());
650 bool ptrsChanged = memcmp(oldFlatPtrs.data(), (void*)&mFlatObjectsShadow, oldFlatPtrs.size()) || memcmp(oldFlatPtrsDevice.data(), (void*)&mFlatObjectsDevice, oldFlatPtrsDevice.size());
651 if (ptrsChanged) {
652 GPUInfo("Updating all calib objects since pointers changed");
653 }
654 UpdateGPUCalibObjects(stream, ptrsChanged ? nullptr : mNewCalibObjects.get());
655 }
656 }
657
658 if ((mUpdateNewCalibObjects || (mRec->slavesExist() && updateSlave)) && mRec->IsGPU()) {
659 UpdateGPUCalibObjectsPtrs(stream); // Reinitialize
660 retVal = 1;
661 }
662 mNewCalibObjects.reset(nullptr);
663 mNewCalibValues.reset(nullptr);
665 return retVal;
666}
667
669{
670 if ((((GetRecoSteps() & RecoStep::TRDTracking) && !GetProcessingSettings().trdTrackModelO2 && !GetProcessingSettings().willProvideO2PropagatorLate) || ((GetRecoSteps() & RecoStep::Refit) && !param().rec.trackingRefitGPUModel)) && processors()->calibObjects.o2Propagator == nullptr) {
671 GPUFatal("Cannot run TRD tracking or refit with o2 track model without o2 propagator"); // This check must happen during run, since o2::Propagator cannot be available during init
672 }
673 if (GetProcessingSettings().autoAdjustHostThreads && !mRec->IsGPU()) {
675 }
676 const auto threadContext = GetThreadContext();
677 if (GetProcessingSettings().runCompressionStatistics && mCompressionStatistics == nullptr) {
679 }
680 const bool needQA = GPUQA::QAAvailable() && (GetProcessingSettings().runQA || (GetProcessingSettings().eventDisplay && (mIOPtrs.nMCInfosTPC || GetProcessingSettings().runMC)));
681 if (needQA && GetQA()->IsInitialized() == false) {
682 if (GetQA()->InitQA(GetProcessingSettings().runQA ? -GetProcessingSettings().runQA : -1)) {
683 return 1;
684 }
685 }
686 if (needQA) {
687 mFractionalQAEnabled = GetProcessingSettings().qcRunFraction == 100.f || (uint32_t)(rand() % 10000) < (uint32_t)(GetProcessingSettings().qcRunFraction * 100);
688 }
689 if (GetProcessingSettings().debugLevel >= 6) {
690 *mDebugFile << "\n\nProcessing event " << mRec->getNEventsProcessed() << std::endl;
691 }
693
694 mRec->getGeneralStepTimer(GeneralStep::Prepare).Start();
695 try {
697 } catch (const std::bad_alloc& e) {
698 GPUError("Memory Allocation Error");
699 return (1);
700 }
701 mRec->getGeneralStepTimer(GeneralStep::Prepare).Stop();
702
704
705 SynchronizeStream(0); // Synchronize all init copies that might be ongoing
706
708 if (runRecoStep(RecoStep::TPCDecompression, &GPUChainTracking::RunTPCDecompression)) {
709 return 1;
710 }
711 } else if (mIOPtrs.tpcPackedDigits || mIOPtrs.tpcZS) {
712 if (runRecoStep(RecoStep::TPCClusterFinding, &GPUChainTracking::RunTPCClusterizer, false)) {
713 return 1;
714 }
715 }
716
717 if (GetProcessingSettings().autoAdjustHostThreads && !mRec->IsGPU() && mIOPtrs.clustersNative) {
719 }
720
722 return 1;
723 }
724
725 mRec->PushNonPersistentMemory(qStr2Tag("TPCSLCD1")); // 1st stack level for TPC tracking sector data
727 if (runRecoStep(RecoStep::TPCSectorTracking, &GPUChainTracking::RunTPCTrackingSectors)) {
728 return 1;
729 }
730
731 if (runRecoStep(RecoStep::TPCMerging, &GPUChainTracking::RunTPCTrackingMerger, false)) {
732 return 1;
733 }
735 mRec->PopNonPersistentMemory(RecoStep::TPCSectorTracking, qStr2Tag("TPCSLCD1")); // Release 1st stack level, TPC sector data not needed after merger
737 }
738
740 if (GetProcessingSettings().doublePipeline) {
742 if (foreignChain && foreignChain->mIOPtrs.tpcZS) {
743 if (GetProcessingSettings().debugLevel >= 3) {
744 GPUInfo("Preempting tpcZS input of foreign chain");
745 }
746 mPipelineFinalizationCtx.reset(new GPUChainTrackingFinalContext);
747 mPipelineFinalizationCtx->rec = this->mRec;
748 foreignChain->mPipelineNotifyCtx = mPipelineFinalizationCtx.get();
749 }
750 }
751 if (runRecoStep(RecoStep::TPCCompression, &GPUChainTracking::RunTPCCompression)) {
752 return 1;
753 }
754 }
755
756 if (GetProcessingSettings().trdTrackModelO2 ? runRecoStep(RecoStep::TRDTracking, &GPUChainTracking::RunTRDTracking<GPUTRDTrackerKernels::o2Version>) : runRecoStep(RecoStep::TRDTracking, &GPUChainTracking::RunTRDTracking<GPUTRDTrackerKernels::gpuVersion>)) {
757 return 1;
758 }
759
760 if (runRecoStep(RecoStep::Refit, &GPUChainTracking::RunRefit)) {
761 return 1;
762 }
763
764 if (!GetProcessingSettings().doublePipeline) { // Synchronize with output copies running asynchronously
766 }
767
768 if (GetProcessingSettings().autoAdjustHostThreads && !mRec->IsGPU()) {
770 }
771
772 int32_t retVal = 0;
773 if (CheckErrorCodes(false, false, mRec->getErrorCodeOutput())) {
774 retVal = 3;
775 if (!GetProcessingSettings().ignoreNonFatalGPUErrors) {
776 return retVal;
777 }
778 }
779
780 if (GetProcessingSettings().doublePipeline) {
781 return retVal;
782 }
783 int32_t retVal2 = RunChainFinalize();
784 return retVal2 ? retVal2 : retVal;
785}
786
787int32_t GPUChainTracking::RunChainFinalize()
788{
789 if (mIOPtrs.clustersNative && (GetRecoSteps() & RecoStep::TPCCompression) && GetProcessingSettings().runCompressionStatistics) {
792 }
793
794 if (GetProcessingSettings().outputSanityCheck) {
795 SanityCheck();
796 }
797
798 const bool needQA = GPUQA::QAAvailable() && (GetProcessingSettings().runQA || (GetProcessingSettings().eventDisplay && mIOPtrs.nMCInfosTPC));
799 if (needQA && mFractionalQAEnabled) {
800 mRec->getGeneralStepTimer(GeneralStep::QA).Start();
801 GetQA()->UpdateChain(this);
802 GetQA()->RunQA(!GetProcessingSettings().runQA);
803 mRec->getGeneralStepTimer(GeneralStep::QA).Stop();
804 if (GetProcessingSettings().debugLevel == 0) {
805 GPUInfo("Total QA runtime: %d us", (int32_t)(mRec->getGeneralStepTimer(GeneralStep::QA).GetElapsedTime() * 1000000));
806 }
807 }
808
809 if (GetProcessingSettings().showOutputStat) {
811 }
812
814
815 // PrintMemoryRelations();
816
818 if (!mDisplayRunning) {
819 if (mEventDisplay->StartDisplay()) {
820 return (1);
821 }
822 mDisplayRunning = true;
823 } else {
824 mEventDisplay->ShowNextEvent();
825 }
826
827 if (GetProcessingSettings().eventDisplay->EnableSendKey()) {
828 while (kbhit()) {
829 getch();
830 }
831 GPUInfo("Press key for next event!");
832 }
833
834 int32_t iKey;
835 do {
836 Sleep(10);
837 if (GetProcessingSettings().eventDisplay->EnableSendKey()) {
838 iKey = kbhit() ? getch() : 0;
839 if (iKey == 27) {
840 GetProcessingSettings().eventDisplay->setDisplayControl(2);
841 } else if (iKey == 'n') {
842 break;
843 } else if (iKey) {
844 while (GetProcessingSettings().eventDisplay->getSendKey() != 0) {
845 Sleep(1);
846 }
847 GetProcessingSettings().eventDisplay->setSendKey(iKey);
848 }
849 }
850 } while (GetProcessingSettings().eventDisplay->getDisplayControl() == 0);
851 if (GetProcessingSettings().eventDisplay->getDisplayControl() == 2) {
852 mDisplayRunning = false;
853 GetProcessingSettings().eventDisplay->DisplayExit();
854 const_cast<GPUSettingsProcessing&>(GetProcessingSettings()).eventDisplay = nullptr; // TODO: fixme - eventDisplay should probably not be put into ProcessingSettings in the first place
855 return (2);
856 }
857 GetProcessingSettings().eventDisplay->setDisplayControl(0);
858 GPUInfo("Loading next event");
859
860 mEventDisplay->WaitForNextEvent();
861 }
862
863 return 0;
864}
865
867{
868 if (mPipelineFinalizationCtx) {
869 {
870 std::unique_lock<std::mutex> lock(mPipelineFinalizationCtx->mutex);
871 auto* ctx = mPipelineFinalizationCtx.get();
872 mPipelineFinalizationCtx->cond.wait(lock, [ctx]() { return ctx->ready; });
873 }
874 mPipelineFinalizationCtx.reset();
875 }
876 return RunChainFinalize();
877}
878
879int32_t GPUChainTracking::CheckErrorCodes(bool cpuOnly, bool forceShowErrors, std::vector<std::array<uint32_t, 4>>* fillErrors)
880{
881 int32_t retVal = 0;
882 for (int32_t i = 0; i < 1 + (!cpuOnly && mRec->IsGPU()); i++) {
883 if (i) {
884 const auto& threadContext = GetThreadContext();
885 if (GetProcessingSettings().doublePipeline) {
886 TransferMemoryResourceLinkToHost(RecoStep::NoRecoStep, mInputsHost->mResourceErrorCodes, 0);
888 } else {
889 TransferMemoryResourceLinkToHost(RecoStep::NoRecoStep, mInputsHost->mResourceErrorCodes);
890 }
891 }
892 if (processors()->errorCodes.hasError()) {
893 static int32_t errorsShown = 0;
894 static bool quiet = false;
895 static std::chrono::time_point<std::chrono::steady_clock> silenceFrom;
896 if (!quiet && errorsShown++ >= 10 && GetProcessingSettings().throttleAlarms && !forceShowErrors) {
897 silenceFrom = std::chrono::steady_clock::now();
898 quiet = true;
899 } else if (quiet) {
900 auto currentTime = std::chrono::steady_clock::now();
901 std::chrono::duration<double> elapsed_seconds = currentTime - silenceFrom;
902 if (elapsed_seconds.count() > 60 * 10) {
903 quiet = false;
904 errorsShown = 1;
905 }
906 }
907 retVal = 1;
908 if (GetProcessingSettings().throttleAlarms && !forceShowErrors) {
909 GPUWarning("GPUReconstruction suffered from an error in the %s part", i ? "GPU" : "CPU");
910 } else {
911 GPUError("GPUReconstruction suffered from an error in the %s part", i ? "GPU" : "CPU");
912 }
913 if (!quiet) {
914 processors()->errorCodes.printErrors(GetProcessingSettings().throttleAlarms && !forceShowErrors);
915 }
916 if (fillErrors) {
917 uint32_t nErrors = processors()->errorCodes.getNErrors();
918 const uint32_t* pErrors = processors()->errorCodes.getErrorPtr();
919 for (uint32_t j = 0; j < nErrors; j++) {
920 fillErrors->emplace_back(std::array<uint32_t, 4>{pErrors[4 * j], pErrors[4 * j + 1], pErrors[4 * j + 2], pErrors[4 * j + 3]});
921 }
922 }
923 }
924 }
925 ClearErrorCodes(cpuOnly);
926 return retVal;
927}
928
930{
932 if (mRec->IsGPU() && !cpuOnly) {
933 const auto& threadContext = GetThreadContext();
934 WriteToConstantMemory(RecoStep::NoRecoStep, (char*)&processors()->errorCodes - (char*)processors(), &processorsShadow()->errorCodes, sizeof(processorsShadow()->errorCodes), 0);
935 TransferMemoryResourceLinkToGPU(RecoStep::NoRecoStep, mInputsHost->mResourceErrorCodes, 0);
936 }
937}
938
940{
941 std::lock_guard lk(mMutexUpdateCalib);
942 if (mNewCalibObjects) {
943 void* const* pSrc = (void* const*)&obj;
944 void** pDst = (void**)mNewCalibObjects.get();
945 for (uint32_t i = 0; i < sizeof(*mNewCalibObjects) / sizeof(void*); i++) {
946 if (pSrc[i]) {
947 pDst[i] = pSrc[i];
948 }
949 }
950 } else {
952 }
953 if (mNewCalibValues) {
954 mNewCalibValues->updateFrom(&vals);
955 } else {
956 mNewCalibValues.reset(new GPUNewCalibValues(vals));
957 }
959}
960
962{
963 return (mRec->IsGPU() ? processorsShadow() : processors())->calibObjects.o2Propagator;
964}
965
967{
969 if ((prop->getGPUField() != nullptr) ^ GetProcessingSettings().o2PropagatorUseGPUField) {
970 GPUFatal("GPU magnetic field for propagator requested, but received an O2 propagator without GPU field");
971 }
972}
Definition of container class for dE/dx corrections.
Helper class to access correction maps.
int32_t i
#define GPUCA_MEMORY_SIZE
#define GPUCA_HOST_MEMORY_SIZE
int32_t retVal
Online TRD tracker based on extrapolated TPC tracks.
Used for storing the MC labels for the TRD tracklets.
TRD Tracklet word for GPU tracker - 32bit tracklet info + half chamber ID + index.
Definition of a container to keep Monte Carlo truth external to simulation objects.
uint32_t j
Definition RawData.h:0
uint32_t c
Definition RawData.h:2
Definition of TPCFastTransform class.
void Start()
Definition timer.cxx:57
double GetElapsedTime()
Definition timer.cxx:108
void Stop()
Definition timer.cxx:69
bool isSet(const bitfield &v) const
Definition bitfield.h:70
size_t getFlatBufferSize() const
Gives size of the flat buffer.
Definition FlatObject.h:256
void SetMatLUT(std::unique_ptr< o2::base::MatLayerCylSet > &&lut)
std::unique_ptr< o2::base::MatLayerCylSet > mMatLUTU
const o2::base::Propagator * GetDeviceO2Propagator()
void SetTRDGeometry(std::unique_ptr< o2::trd::GeometryFlat > &&geo)
GPUChainTracking * mQAFromForeignChain
void SetO2Propagator(const o2::base::Propagator *prop)
std::unique_ptr< GPUQA > mQA
std::unique_ptr< CorrectionMapsHelper > mTPCFastTransformHelperU
GPUTrackingFlatObjects mFlatObjectsDevice
int32_t RunTPCClusterizer(bool synchronizeOutput=true)
void UpdateGPUCalibObjectsPtrs(int32_t stream)
int32_t RunTPCTrackingMerger(bool synchronizeOutput=true)
std::unique_ptr< GPUTrackingInputProvider > mInputsHost
std::unique_ptr< GPUTPCClusterStatistics > mCompressionStatistics
const o2::base::MatLayerCylSet * GetMatLUT() const
std::array< GPUOutputControl *, GPUTrackingOutputs::count()> mSubOutputControls
std::unique_ptr< std::ofstream > mDebugFile
void SetUpdateCalibObjects(const GPUCalibObjectsConst &obj, const GPUNewCalibValues &vals)
std::unique_ptr< GPUCalibObjectsConst > mNewCalibObjects
std::unique_ptr< o2::trd::GeometryFlat > mTRDGeometryU
GPUTrackingFlatObjects mFlatObjectsShadow
void RegisterPermanentMemoryAndProcessors() override
void RegisterGPUProcessors() override
void ClearErrorCodes(bool cpuOnly=false)
const GPUQA * GetQA() const
void MemorySize(size_t &gpuMem, size_t &pageLockedHostMem) override
void SetTPCFastTransform(std::unique_ptr< TPCFastTransform > &&tpcFastTransform, std::unique_ptr< CorrectionMapsHelper > &&tpcTransformHelper)
int32_t DoQueuedUpdates(int32_t stream, bool updateSlave=true)
std::unique_ptr< GPUNewCalibValues > mNewCalibValues
int32_t CheckErrorCodes(bool cpuOnly=false, bool forceShowErrors=false, std::vector< std::array< uint32_t, 4 > > *fillErrors=nullptr) override
std::unique_ptr< TPCFastTransform > mTPCFastTransformU
int32_t PrepareEvent() override
GPUChainTracking(GPUReconstruction *rec, uint32_t maxTPCHits=GPUCA_MAX_CLUSTERS, uint32_t maxTRDTracklets=GPUCA_MAX_TRD_TRACKLETS)
void UpdateGPUCalibObjects(int32_t stream, const GPUCalibObjectsConst *ptrMask=nullptr)
GPUTrackingInOutPointers & mIOPtrs
struct o2::gpu::GPUChainTracking::InOutMemory mIOMem
const o2::tpc::CalibdEdxContainer * GetdEdxCalibContainer() const
std::unique_ptr< GPUTrackingInputProvider > mInputsShadow
const GPUSettingsQA * mConfigQA
int32_t FinalizePipelinedProcessing() override
std::unique_ptr< GPUDisplayInterface > mEventDisplay
void TransferMemoryResourceLinkToGPU(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
Definition GPUChain.h:119
GPUConstantMem * processorsDevice()
Definition GPUChain.h:82
GPUReconstruction::RecoStepField GetRecoStepsGPU() const
Definition GPUChain.h:68
GPUReconstruction::RecoStepField GetRecoSteps() const
Definition GPUChain.h:67
virtual std::unique_ptr< gpu_reconstruction_kernels::threadContext > GetThreadContext()
Definition GPUChain.h:104
void WriteToConstantMemory(RecoStep step, size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr)
Definition GPUChain.h:122
GPUChain * GetNextChainInQueue()
Definition GPUChain.h:217
GPUReconstruction::InOutTypeField GetRecoStepsOutputs() const
Definition GPUChain.h:70
GPUConstantMem * processors()
Definition GPUChain.h:80
GPUParam & param()
Definition GPUChain.h:83
const GPUSettingsProcessing & GetProcessingSettings() const
Definition GPUChain.h:72
void SynchronizeStream(int32_t stream)
Definition GPUChain.h:85
GPUReconstructionCPU * mRec
Definition GPUChain.h:75
GPUConstantMem * processorsShadow()
Definition GPUChain.h:81
GPUReconstruction::InOutTypeField GetRecoStepsInputs() const
Definition GPUChain.h:69
static constexpr int32_t NSECTORS
Definition GPUChain.h:54
void TransferMemoryResourceLinkToHost(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
Definition GPUChain.h:120
void AllocateIOMemoryHelper(uint32_t n, const T *&ptr, std::unique_ptr< T[]> &u)
Definition GPUChain.h:134
int32_t runRecoStep(RecoStep step, S T::*func, Args... args)
Definition GPUChain.h:282
GPUReconstruction * rec()
Definition GPUChain.h:62
static constexpr const char *const RECO_STEP_NAMES[]
static GPUDisplayInterface * getDisplay(GPUDisplayFrontendInterface *frontend, GPUChainTracking *chain, GPUQA *qa, const GPUParam *param=nullptr, const GPUCalibObjectsConst *calib=nullptr, const GPUSettingsDisplay *config=nullptr)
const uint32_t * getErrorPtr() const
Definition GPUErrors.cxx:85
void setMemory(GPUglobalref() uint32_t *m)
Definition GPUErrors.h:34
void printErrors(bool silent=false)
Definition GPUErrors.cxx:57
uint32_t getNErrors() const
Definition GPUErrors.cxx:80
static void computePointerWithAlignment(T *&basePtr, S *&objPtr, size_t nEntries=1)
void InitGPUProcessor(GPUReconstruction *rec, ProcessorType type=PROCESSOR_TYPE_CPU, GPUProcessor *slaveProcessor=nullptr)
static bool QAAvailable()
Definition GPUQA.h:56
int32_t DrawQAHistograms()
Definition GPUQA.h:46
void UpdateChain(GPUChainTracking *chain)
Definition GPUQA.h:58
static bool IsInitialized()
Definition GPUQA.h:57
int32_t InitQA(int32_t tasks=0)
Definition GPUQA.h:44
void RunQA(bool matchOnly=false)
Definition GPUQA.h:45
HighResTimer & getGeneralStepTimer(GeneralStep step)
std::vector< std::array< uint32_t, 4 > > * getErrorCodeOutput()
void PopNonPersistentMemory(RecoStep step, uint64_t tag)
void RegisterGPUDeviceProcessor(GPUProcessor *proc, GPUProcessor *slaveProcessor)
void RegisterGPUProcessor(T *proc, bool deviceSlave)
void ResetRegisteredMemoryPointers(GPUProcessor *proc)
int16_t RegisterMemoryAllocation(T *proc, void *(T::*setPtr)(void *), int32_t type, const char *name="", const GPUMemoryReuse &re=GPUMemoryReuse())
void UpdateSettings(const GPUSettingsGRP *g, const GPUSettingsProcessing *p=nullptr, const GPUSettingsRecDynamic *d=nullptr)
void PushNonPersistentMemory(uint64_t tag)
GPUMemorySizeScalers * MemoryScalers()
const GPUSettingsGRP & GetGRPSettings() const
GPUOutputControl & OutputControl()
static constexpr const uint32_t TPC_MAX_TF_TIME_BIN
Definition GPUSettings.h:47
GLenum GLfloat param
Definition glcorearb.h:271
GLuint GLuint stream
Definition glcorearb.h:1806
GPUCalibObjectsTemplate< ConstPtr > GPUCalibObjectsConst
Global TPC definitions and constants.
Definition SimTraits.h:167
Defining DataPointCompositeObject explicitly as copiable.
std::unique_ptr< GPUDisplayFrontendInterface > eventDisplay
GPUReconstruction * rec
constexpr T qStr2Tag(const char *str)
Definition strtag.h:22
S< o2::trd::GeometryFlat >::type * trdGeometry
S< o2::tpc::CalibdEdxContainer >::type * dEdxCalibContainer
S< TPCZSLinkMapping >::type * tpcZSLinkMapping
S< TPCFastTransform >::type * fastTransform
S< TPCPadGainCalib >::type * tpcPadGain
S< o2::base::PropagatorImpl< float > >::type * o2Propagator
S< o2::base::MatLayerCylSet >::type * matLUT
S< CorrectionMapsHelper >::type * fastTransformHelper
S< TPCFastTransform >::type * fastTransformRef
S< TPCFastTransform >::type * fastTransformMShape
std::unique_ptr< GPUTPCMCInfo[]> mcInfosTPC
std::unique_ptr< GPUTRDTrackletWord[]> trdTracklets
std::unique_ptr< GPUTPCMCInfoCol[]> mcInfosTPCCol
std::unique_ptr< GPUTPCGMMergedTrackHit[]> mergedTrackHits
std::unique_ptr< int32_t[]> trdTrackletIdxFirst
std::unique_ptr< GPUTPCGMMergedTrack[]> mergedTracks
std::unique_ptr< AliHLTTPCClusterMCLabel[]> mcLabelsTPC
std::unique_ptr< GPUTPCGMMergedTrackHitXYZ[]> mergedTrackHitsXYZ
std::unique_ptr< GPUTPCClusterData[]> clusterData[NSECTORS]
std::unique_ptr< o2::tpc::ClusterNative[]> clustersNative
std::unique_ptr< float[]> trdTriggerTimes
std::unique_ptr< GPUTPCTrack[]> sectorTracks[NSECTORS]
std::unique_ptr< GPUTRDSpacePoint[]> trdSpacePoints
std::unique_ptr< uint8_t[]> trdTrigRecMask
std::unique_ptr< o2::tpc::ClusterNativeAccess > clusterNativeAccess
std::unique_ptr< AliHLTTPCRawCluster[]> rawClusters[NSECTORS]
std::unique_ptr< GPUTPCHitId[]> sectorClusters[NSECTORS]
std::unique_ptr< GPUTRDTrackGPU[]> trdTracks
GPUTRDTrackerGPU trdTrackerGPU
GPUCalibObjectsConst calibObjects
const GPUTPCGMMergedTrackHitXYZ * mergedTrackHitsXYZ
const GPUTPCHitId * sectorClusters[NSECTORS]
const o2::tpc::ClusterNativeAccess * clustersNative
const GPUTPCMCInfo * mcInfosTPC
const o2::tpc::CompressedClustersFlat * tpcCompressedClusters
const AliHLTTPCClusterMCLabel * mcLabelsTPC
const GPUTRDSpacePoint * trdSpacePoints
const GPUTPCTrack * sectorTracks[NSECTORS]
const GPUTRDTrackGPU * trdTracks
const GPUTRDTrackletWord * trdTracklets
const GPUTrackingInOutZS * tpcZS
const AliHLTTPCRawCluster * rawClusters[NSECTORS]
const GPUTPCClusterData * clusterData[NSECTORS]
const GPUTPCGMMergedTrackHit * mergedTrackHits
const GPUTrackingInOutDigits * tpcPackedDigits
const GPUTPCMCInfoCol * mcInfosTPCCol
const GPUTPCGMMergedTrack * mergedTracks
size_t getIndex(const GPUOutputControl &v)
GPUOutputControl sharedClusterMap
GPUOutputControl compressedClusters