Project
Loading...
Searching...
No Matches
GPUChainTracking.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
17#include <fstream>
18#include <chrono>
19
20#include "GPUChainTracking.h"
21#include "GPUChainTrackingGetters.inc"
22#include "GPUReconstructionIO.h"
24#include "GPUTPCGMMergedTrack.h"
26#include "GPUTPCTrack.h"
27#include "GPUTPCHitId.h"
28#include "GPUTPCClusterData.h"
29#include "TPCZSLinkMapping.h"
30#include "GPUTRDTrackletWord.h"
32#include "GPUTPCMCInfo.h"
33#include "GPUTRDTrack.h"
34#include "GPUTRDTracker.h"
35#include "AliHLTTPCRawCluster.h"
37#include "GPUTRDRecoParam.h"
39#include "GPUQA.h"
40#include "GPULogging.h"
43#include "GPUNewCalibValues.h"
44#include "GPUTriggerOutputs.h"
45#include "GPUDefParametersRuntime.h"
46
48#include "GPUHostDataTypes.h"
50#include "GPUTrackingRefit.h"
51#include "CalibdEdxContainer.h"
52
53#include "TPCFastTransformPOD.h"
54
55#include "utils/linux_helpers.h"
56#include "utils/strtag.h"
57using namespace o2::gpu;
58
59#include "GPUO2DataTypes.h"
60
61using namespace o2::tpc;
62using namespace o2::trd;
63
64GPUChainTracking::GPUChainTracking(GPUReconstruction* rec, uint32_t maxTPCHits, uint32_t maxTRDTracklets) : GPUChain(rec), mIOPtrs(processors()->ioPtrs), mInputsHost(new GPUTrackingInputProvider), mInputsShadow(new GPUTrackingInputProvider), mClusterNativeAccess(new ClusterNativeAccess), mTriggerBuffer(new GPUTriggerOutputs), mMaxTPCHits(maxTPCHits), mMaxTRDTracklets(maxTRDTracklets), mDebugFile(new std::ofstream)
65{
69}
70
72
74{
75 if (mRec->IsGPU()) {
79 }
80
82 if (GetRecoSteps() & RecoStep::TPCSectorTracking) {
83 for (uint32_t i = 0; i < NSECTORS; i++) {
84 mRec->RegisterGPUProcessor(&processors()->tpcTrackers[i], GetRecoStepsGPU() & RecoStep::TPCSectorTracking);
85 }
86 }
87 if (GetRecoSteps() & RecoStep::TPCMerging) {
88 mRec->RegisterGPUProcessor(&processors()->tpcMerger, GetRecoStepsGPU() & RecoStep::TPCMerging);
89 }
90 if (GetRecoSteps() & RecoStep::TRDTracking) {
91 mRec->RegisterGPUProcessor(&processors()->trdTrackerGPU, GetRecoStepsGPU() & RecoStep::TRDTracking);
92 }
93 if (GetRecoSteps() & RecoStep::TRDTracking) {
94 mRec->RegisterGPUProcessor(&processors()->trdTrackerO2, GetRecoStepsGPU() & RecoStep::TRDTracking);
95 }
96 if (GetRecoSteps() & RecoStep::TPCCompression) {
97 mRec->RegisterGPUProcessor(&processors()->tpcCompressor, GetRecoStepsGPU() & RecoStep::TPCCompression);
98 }
99 if (GetRecoSteps() & RecoStep::TPCDecompression) {
100 mRec->RegisterGPUProcessor(&processors()->tpcDecompressor, GetRecoStepsGPU() & RecoStep::TPCDecompression);
101 }
102 if (GetRecoSteps() & RecoStep::TPCClusterFinding) {
103 for (uint32_t i = 0; i < NSECTORS; i++) {
104 mRec->RegisterGPUProcessor(&processors()->tpcClusterer[i], GetRecoStepsGPU() & RecoStep::TPCClusterFinding);
105#ifdef GPUCA_HAS_ONNX
106 mRec->RegisterGPUProcessor(&processors()->tpcNNClusterer[i], GetRecoStepsGPU() & RecoStep::TPCClusterFinding);
107#endif
108 }
109 }
110 if (GetRecoSteps() & RecoStep::Refit) {
111 mRec->RegisterGPUProcessor(&processors()->trackingRefit, GetRecoStepsGPU() & RecoStep::Refit);
112 }
113#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
114 mRec->RegisterGPUProcessor(&processors()->debugOutput, true);
115#endif
117}
118
120{
121 if (mRec->IsGPU()) {
123 }
124 memcpy((void*)&processorsShadow()->trdTrackerGPU, (const void*)&processors()->trdTrackerGPU, sizeof(processors()->trdTrackerGPU));
125 if (GetRecoStepsGPU() & RecoStep::TPCSectorTracking) {
126 for (uint32_t i = 0; i < NSECTORS; i++) {
127 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcTrackers[i], &processors()->tpcTrackers[i]);
128 }
129 }
130 if (GetRecoStepsGPU() & RecoStep::TPCMerging) {
131 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcMerger, &processors()->tpcMerger);
132 }
133 if (GetRecoStepsGPU() & RecoStep::TRDTracking) {
134 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->trdTrackerGPU, &processors()->trdTrackerGPU);
135 }
136
137 memcpy((void*)&processorsShadow()->trdTrackerO2, (const void*)&processors()->trdTrackerO2, sizeof(processors()->trdTrackerO2));
138 if (GetRecoStepsGPU() & RecoStep::TRDTracking) {
139 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->trdTrackerO2, &processors()->trdTrackerO2);
140 }
141 if (GetRecoStepsGPU() & RecoStep::TPCCompression) {
142 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcCompressor, &processors()->tpcCompressor);
143 }
144 if (GetRecoStepsGPU() & RecoStep::TPCDecompression) {
145 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcDecompressor, &processors()->tpcDecompressor);
146 }
147 if (GetRecoStepsGPU() & RecoStep::TPCClusterFinding) {
148 for (uint32_t i = 0; i < NSECTORS; i++) {
149 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcClusterer[i], &processors()->tpcClusterer[i]);
150#ifdef GPUCA_HAS_ONNX
151 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->tpcNNClusterer[i], &processors()->tpcNNClusterer[i]);
152#endif
153 }
154 }
155 if (GetRecoStepsGPU() & RecoStep::Refit) {
156 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->trackingRefit, &processors()->trackingRefit);
157 }
158#ifdef GPUCA_KERNEL_DEBUGGER_OUTPUT
159 mRec->RegisterGPUDeviceProcessor(&processorsShadow()->debugOutput, &processors()->debugOutput);
160#endif
161}
162
163void GPUChainTracking::MemorySize(size_t& gpuMem, size_t& pageLockedHostMem)
164{
165 gpuMem = constants::GPU_DEFAULT_MEMORY_SIZE;
166 pageLockedHostMem = constants::GPU_DEFAULT_HOST_MEMORY_SIZE;
167}
168
170{
172 GPUError("Invalid Reconstruction Step Setting: dEdx requires TPC Merger to be active");
173 return false;
174 }
176 GPUError("Invalid GPU Reconstruction Step Setting: dEdx requires TPC Merger to be active");
177 return false;
178 }
180 GPUError("Invalid Reconstruction Step Setting: Tracking requires TPC Conversion to be active");
181 return false;
182 }
184 GPUError("Invalid input, TPC Clusterizer needs TPC raw input");
185 return false;
186 }
188 GPUError("Invalid input / output / step, merger cannot read/store sectors tracks and needs TPC conversion");
189 return false;
190 }
192 if ((GetRecoSteps() & gpudatatypes::RecoStep::TPCMerging) && !tpcClustersAvail) {
193 GPUError("Invalid Inputs for track merging, TPC Clusters required");
194 return false;
195 }
196#ifdef GPUCA_RUN2
198 GPUError("Can not run TPC GPU Cluster Finding with Run 2 Data");
199 return false;
200 }
201#endif
203 GPUError("Missing input for TPC Cluster conversion / sector tracking / compression / dEdx: TPC Clusters required");
204 return false;
205 }
207 GPUError("Input for TPC merger missing");
208 return false;
209 }
211 GPUError("Input for TPC compressor missing");
212 return false;
213 }
215 GPUError("Input for TRD Tracker missing");
216 return false;
217 }
218 if ((GetRecoSteps() & gpudatatypes::RecoStep::TRDTracking) && (processors()->calibObjects.trdRecoParam == nullptr)) {
219 GPUError("TRD Reco Parameters are missing");
220 return false;
221 }
223 GPUError("TPC Raw / TPC Clusters / TRD Tracklets cannot be output");
224 return false;
225 }
227 GPUError("No TPC Merged Track Output available");
228 return false;
229 }
231 GPUError("No TPC Compression Output available");
232 return false;
233 }
235 GPUError("No TRD Tracker Output available");
236 return false;
237 }
238 if ((GetRecoSteps() & gpudatatypes::RecoStep::TPCdEdx) && (processors()->calibObjects.dEdxCalibContainer == nullptr)) {
239 GPUError("Cannot run dE/dx without dE/dx calibration container object");
240 return false;
241 }
242 if ((GetRecoSteps() & gpudatatypes::RecoStep::TPCClusterFinding) && processors()->calibObjects.tpcPadGain == nullptr) {
243 GPUError("Cannot run gain calibration without calibration object");
244 return false;
245 }
246 if ((GetRecoSteps() & gpudatatypes::RecoStep::TPCClusterFinding) && processors()->calibObjects.tpcZSLinkMapping == nullptr && mIOPtrs.tpcZS != nullptr) {
247 GPUError("Cannot run TPC ZS Decoder without mapping object. (tpczslinkmapping.dump missing?)");
248 return false;
249 }
250 return true;
251}
252
254{
255 int32_t gatherMode = mRec->GetProcessingSettings().tpcCompressionGatherMode == -1 ? mRec->getGPUParameters(mRec->GetRecoStepsGPU() & gpudatatypes::RecoStep::TPCCompression).par_COMP_GATHER_MODE : mRec->GetProcessingSettings().tpcCompressionGatherMode;
256 if ((param().rec.tpc.nWays & 1) == 0) {
257 GPUError("nWay setting musst be odd number!");
258 return false;
259 }
260 if (param().rec.tpc.mergerInterpolateErrors && param().rec.tpc.nWays < 3) {
261 GPUError("Cannot do error interpolation with NWays < 3!");
262 return false;
263 }
264 if (param().continuousMaxTimeBin > (int32_t)GPUSettings::TPC_MAX_TF_TIME_BIN) {
265 GPUError("configured max time bin exceeds 256 orbits");
266 return false;
267 }
268 if ((GetRecoStepsGPU() & RecoStep::TPCClusterFinding) && std::max(GetProcessingSettings().nTPCClustererLanes + 1, GetProcessingSettings().nTPCClustererLanes * 2) + (GetProcessingSettings().doublePipeline ? 1 : 0) > (int32_t)mRec->NStreams()) {
269 GPUError("NStreams of %d insufficient for %d nTPCClustererLanes", mRec->NStreams(), (int32_t)GetProcessingSettings().nTPCClustererLanes);
270 return false;
271 }
272 if ((mRec->GetRecoStepsGPU() & gpudatatypes::RecoStep::TPCCompression) && GetProcessingSettings().noGPUMemoryRegistration && gatherMode != 3) {
273 GPUError("noGPUMemoryRegistration only possible with gather mode 3 (set to %d / %d)", mRec->GetProcessingSettings().tpcCompressionGatherMode, gatherMode);
274 return false;
275 }
276 if (mRec->IsGPU() && (GetProcessingSettings().clusterizerZSSanityCheck || GetProcessingSettings().mergerSanityCheck)) {
277 GPUError("Clusterizer and merger Sanity checks only supported when not running on GPU");
278 return false;
279 }
280 if (GetProcessingSettings().tpcWriteClustersAfterRejection && (mRec->IsGPU() || param().rec.tpc.compressionTypeMask || !(GetRecoSteps() & gpudatatypes::RecoStep::TPCCompression))) {
281 GPUError("tpcWriteClustersAfterRejection requires compressionTypeMask = 0, no GPU usage, and compression enabled");
282 return false;
283 }
284 if (GetProcessingSettings().doublePipeline) {
285 if (GetProcessingSettings().tpcFreeAllocatedMemoryAfterProcessing) {
286 GPUError("Cannot use double pipeline with tpcFreeAllocatedMemoryAfterProcessing");
287 return false;
288 }
290 GPUError("Invalid outputs for double pipeline mode 0x%x", (uint32_t)GetRecoStepsOutputs());
291 return false;
292 }
297 GPUError("Must use external output for double pipeline mode");
298 return false;
299 }
300 if (gatherMode == 1) {
301 GPUError("Double pipeline incompatible to compression mode 1");
302 return false;
303 }
305 GPUError("Invalid reconstruction settings for double pipeline: Needs compression and cluster finding");
306 return false;
307 }
308 }
309 if ((GetRecoStepsGPU() & gpudatatypes::RecoStep::TPCCompression) && !(GetRecoStepsGPU() & gpudatatypes::RecoStep::TPCCompression) && (gatherMode == 1 || gatherMode == 3)) {
310 GPUError("Invalid tpcCompressionGatherMode for compression on CPU");
311 return false;
312 }
313 if (GetProcessingSettings().tpcApplyClusterFilterOnCPU > 0 && (GetRecoStepsGPU() & gpudatatypes::RecoStep::TPCClusterFinding || GetProcessingSettings().runMC)) {
314 GPUError("tpcApplyClusterFilterOnCPU cannot be used with GPU clusterization or with MC labels");
315 return false;
316 }
317 if (GetRecoSteps() & RecoStep::TRDTracking) {
318 if (GetProcessingSettings().trdTrackModelO2 && (GetProcessingSettings().createO2Output == 0 || (GetMatLUT() == nullptr && !GetProcessingSettings().willProvideO2PropagatorLate))) {
319 GPUError("TRD tracking can only run on O2 TPC tracks if createO2Output is enabled (%d), and matBudLUT is available (0x%p)", (int32_t)GetProcessingSettings().createO2Output, (void*)GetMatLUT());
320 return false;
321 }
322 if ((GetRecoStepsGPU() & RecoStep::TRDTracking) && !GetProcessingSettings().trdTrackModelO2 && GetProcessingSettings().createO2Output > 1) {
323 GPUError("TRD tracking can only run on GPU TPC tracks if the createO2Output setting does not suppress them");
324 return false;
325 }
326 if ((((GetRecoStepsGPU() & RecoStep::TRDTracking) && GetProcessingSettings().trdTrackModelO2) || ((GetRecoStepsGPU() & RecoStep::Refit) && !param().rec.trackingRefitGPUModel)) && (!GetProcessingSettings().o2PropagatorUseGPUField || (GetMatLUT() == nullptr && !GetProcessingSettings().willProvideO2PropagatorLate))) {
327 GPUError("Cannot use TRD tracking or Refit on GPU without GPU polynomial field map (%d) or matlut table (%p)", (int32_t)GetProcessingSettings().o2PropagatorUseGPUField, (void*)GetMatLUT());
328 return false;
329 }
330 }
331 return true;
332}
333
335{
336 const auto& threadContext = GetThreadContext();
337 if (GetProcessingSettings().debugLevel >= 1) {
338 printf("Enabled Reconstruction Steps: 0x%x (on GPU: 0x%x)", (int32_t)GetRecoSteps().get(), (int32_t)GetRecoStepsGPU().get());
339 for (uint32_t i = 0; i < sizeof(gpudatatypes::RECO_STEP_NAMES) / sizeof(gpudatatypes::RECO_STEP_NAMES[0]); i++) {
340 if (GetRecoSteps().isSet(1u << i)) {
341 printf(" - %s", gpudatatypes::RECO_STEP_NAMES[i]);
342 if (GetRecoStepsGPU().isSet(1u << i)) {
343 printf(" (G)");
344 }
345 }
346 }
347 printf("\n");
348 }
349 if (!ValidateSteps()) {
350 return 1;
351 }
352
353 for (uint32_t i = 0; i < mSubOutputControls.size(); i++) {
354 if (mSubOutputControls[i] == nullptr) {
356 }
357 }
358
359 if (!ValidateSettings()) {
360 return 1;
361 }
362
365 if (!qa) {
366 qa.reset(new GPUQA(this));
367 }
368 }
371 if (mEventDisplay == nullptr) {
372 throw std::runtime_error("Error loading event display");
373 }
374 }
375
378
379 if (mRec->IsGPU()) {
381 UpdateGPUCalibObjectsPtrs(-1); // First initialization, for users not using RunChain
383 WriteToConstantMemory(RecoStep::NoRecoStep, (char*)&processors()->errorCodes - (char*)processors(), &processorsShadow()->errorCodes, sizeof(processorsShadow()->errorCodes), -1);
384 TransferMemoryResourceLinkToGPU(RecoStep::NoRecoStep, mInputsHost->mResourceErrorCodes);
385 }
386
387 if (GetProcessingSettings().debugLevel >= 6) {
388 std::string filename = std::string(mRec->IsGPU() ? "GPU" : "CPU") + (mRec->slaveId() != -1 ? (std::string("_slave") + std::to_string(mRec->slaveId())) : std::string(mRec->slavesExist() ? "_master" : "")) + GetProcessingSettings().debugLogSuffix + ".out";
389 mDebugFile->open(filename.c_str());
390 if (GetProcessingSettings().debugFileHexFloat >= 1 || (GetProcessingSettings().debugFileHexFloat == -1 && GetProcessingSettings().deterministicGPUReconstruction)) {
391 *mDebugFile << std::hexfloat;
392 }
393 }
394
395 return 0;
396}
397
399{
400 // TODO: Is this correct?
401 if (processors()->calibObjects.fastTransform && (ptrMask == nullptr || ptrMask->fastTransform)) {
402 memcpy((void*)mFlatObjectsShadow.mCalibObjects.fastTransform, (const void*)processors()->calibObjects.fastTransform, processors()->calibObjects.fastTransform->size());
403 }
404
405 if (processors()->calibObjects.dEdxCalibContainer && (ptrMask == nullptr || ptrMask->dEdxCalibContainer)) {
406 memcpy((void*)mFlatObjectsShadow.mCalibObjects.dEdxCalibContainer, (const void*)processors()->calibObjects.dEdxCalibContainer, sizeof(*processors()->calibObjects.dEdxCalibContainer));
407 memcpy((void*)mFlatObjectsShadow.mdEdxSplinesBuffer, (const void*)processors()->calibObjects.dEdxCalibContainer->getFlatBufferPtr(), processors()->calibObjects.dEdxCalibContainer->getFlatBufferSize());
408 mFlatObjectsShadow.mCalibObjects.dEdxCalibContainer->clearInternalBufferPtr();
411 }
412 if (processors()->calibObjects.matLUT && (ptrMask == nullptr || ptrMask->matLUT)) {
413 memcpy((void*)mFlatObjectsShadow.mCalibObjects.matLUT, (const void*)processors()->calibObjects.matLUT, sizeof(*processors()->calibObjects.matLUT));
414 memcpy((void*)mFlatObjectsShadow.mMatLUTBuffer, (const void*)processors()->calibObjects.matLUT->getFlatBufferPtr(), processors()->calibObjects.matLUT->getFlatBufferSize());
415 mFlatObjectsShadow.mCalibObjects.matLUT->clearInternalBufferPtr();
418 }
419 if (processors()->calibObjects.trdGeometry && (ptrMask == nullptr || ptrMask->trdGeometry)) {
420 memcpy((void*)mFlatObjectsShadow.mCalibObjects.trdGeometry, (const void*)processors()->calibObjects.trdGeometry, sizeof(*processors()->calibObjects.trdGeometry));
421 mFlatObjectsShadow.mCalibObjects.trdGeometry->clearInternalBufferPtr();
422 }
423 if (processors()->calibObjects.trdRecoParam && (ptrMask == nullptr || ptrMask->trdRecoParam)) {
424 memcpy((void*)mFlatObjectsShadow.mCalibObjects.trdRecoParam, (const void*)processors()->calibObjects.trdRecoParam, sizeof(*processors()->calibObjects.trdRecoParam));
425 }
426 if (processors()->calibObjects.tpcPadGain && (ptrMask == nullptr || ptrMask->tpcPadGain)) {
427 memcpy((void*)mFlatObjectsShadow.mCalibObjects.tpcPadGain, (const void*)processors()->calibObjects.tpcPadGain, sizeof(*processors()->calibObjects.tpcPadGain));
428 }
429 if (processors()->calibObjects.tpcZSLinkMapping && (ptrMask == nullptr || ptrMask->tpcZSLinkMapping)) {
430 memcpy((void*)mFlatObjectsShadow.mCalibObjects.tpcZSLinkMapping, (const void*)processors()->calibObjects.tpcZSLinkMapping, sizeof(*processors()->calibObjects.tpcZSLinkMapping));
431 }
432 if (processors()->calibObjects.o2Propagator && (ptrMask == nullptr || ptrMask->o2Propagator)) {
433 memcpy((void*)mFlatObjectsShadow.mCalibObjects.o2Propagator, (const void*)processors()->calibObjects.o2Propagator, sizeof(*processors()->calibObjects.o2Propagator));
434 mFlatObjectsShadow.mCalibObjects.o2Propagator->setGPUField(&processorsDevice()->param.polynomialField);
436 }
438 memcpy((void*)&processorsShadow()->calibObjects, (void*)&mFlatObjectsDevice.mCalibObjects, sizeof(mFlatObjectsDevice.mCalibObjects));
439}
440
442{
443 WriteToConstantMemory(RecoStep::NoRecoStep, (char*)&processors()->calibObjects - (char*)processors(), &mFlatObjectsDevice.mCalibObjects, sizeof(mFlatObjectsDevice.mCalibObjects), stream);
444}
445
447{
451 }
452 if (mIOPtrs.tpcZS && param().rec.fwdTPCDigitsAsClusters) {
453 throw std::runtime_error("Forwading zero-suppressed hits not supported");
454 }
456 return 0;
457}
458
460{
462 if (!qa) {
463 qa.reset(new GPUQA(this));
464 }
465 if (!GetQA()->IsInitialized()) {
467 }
468 return 0;
469}
470
472{
473 if (GetProcessingSettings().runQA && GetQA()->IsInitialized() && !(mConfigQA && mConfigQA->shipToQC) && !mQAFromForeignChain) {
474 GetQA()->UpdateChain(this);
476 }
477 if (GetProcessingSettings().debugLevel >= 6) {
478 mDebugFile->close();
479 }
481 mCompressionStatistics->Finish();
482 }
483 return 0;
484}
485
487{
488 char* fastTransformBase = (char*)mem;
490 // TODO: Is this correct?!
491 char* podBuf = nullptr;
493 mCalibObjects.fastTransform = reinterpret_cast<TPCFastTransformPOD*>(podBuf);
494 }
495 if ((char*)mem - fastTransformBase < mChainTracking->GetProcessingSettings().fastTransformObjectsMinMemorySize) {
496 mem = fastTransformBase + mChainTracking->GetProcessingSettings().fastTransformObjectsMinMemorySize; // TODO: Fixme and do proper dynamic allocation
497 }
500 }
503 }
504 char* dummyPtr;
508 } else if (mChainTracking->GetProcessingSettings().lateO2MatLutProvisioningSize) {
509 computePointerWithAlignment(mem, dummyPtr, mChainTracking->GetProcessingSettings().lateO2MatLutProvisioningSize);
510 }
514 }
517 }
520 }
523 mCalibObjects.o2Propagator = nullptr; // Always reserve memory for o2::Propagator, since it may be propagatred only during run() not during init().
524 }
526 mem = (char*)mem + mChainTracking->GetProcessingSettings().calibObjectsExtraMemorySize; // TODO: Fixme and do proper dynamic allocation
527 }
528 return mem;
529}
530
532{
533 std::memset((void*)&mIOPtrs, 0, sizeof(mIOPtrs));
535 new (&mIOMem) InOutMemory;
536 mClusterNativeAccessReduced.reset(nullptr);
537 if (mClusterNativeAccess.get()) {
538 memset((void*)mClusterNativeAccess.get(), 0, sizeof(*mClusterNativeAccess));
539 }
540}
541
543{
544 for (uint32_t i = 0; i < NSECTORS; i++) {
549 }
551 std::memset(mIOMem.clusterNativeAccess.get(), 0, sizeof(ClusterNativeAccess)); // ClusterNativeAccess has no its own constructor
565}
566
572
573void GPUChainTracking::SetMatLUT(std::unique_ptr<o2::base::MatLayerCylSet>&& lut)
574{
575 mMatLUTU = std::move(lut);
577}
578
579void GPUChainTracking::SetTRDGeometry(std::unique_ptr<o2::trd::GeometryFlat>&& geo)
580{
581 mTRDGeometryU = std::move(geo);
583}
584
585void GPUChainTracking::SetTRDRecoParam(std::unique_ptr<GPUTRDRecoParam>&& par)
586{
587 mTRDRecoParamU = std::move(par);
589}
590
591int32_t GPUChainTracking::DoQueuedUpdates(int32_t stream, bool updateSlave)
592{
593 int32_t retVal = 0;
594 std::unique_ptr<GPUSettingsGRP> grp;
595 const GPUSettingsProcessing* p = nullptr;
596 std::lock_guard lk(mMutexUpdateCalib);
598 if (mNewCalibValues->newSolenoidField || mNewCalibValues->newContinuousMaxTimeBin || mNewCalibValues->newTPCTimeBinCut) {
599 grp = std::make_unique<GPUSettingsGRP>(mRec->GetGRPSettings());
600 if (mNewCalibValues->newSolenoidField) {
601 grp->solenoidBzNominalGPU = mNewCalibValues->solenoidField;
602 }
603 if (mNewCalibValues->newContinuousMaxTimeBin) {
604 grp->grpContinuousMaxTimeBin = mNewCalibValues->continuousMaxTimeBin;
605 }
606 if (mNewCalibValues->newTPCTimeBinCut) {
607 grp->tpcCutTimeBin = mNewCalibValues->tpcTimeBinCut;
608 }
609 }
610 }
611 if (GetProcessingSettings().tpcDownscaledEdx != 0) {
613 }
614 if (grp || p) {
615 mRec->UpdateSettings(grp.get(), p);
616 retVal = 1;
617 }
619 if (mNewCalibObjects->o2Propagator && ((mNewCalibObjects->o2Propagator->getGPUField() != nullptr) ^ GetProcessingSettings().o2PropagatorUseGPUField)) {
620 GPUFatal("GPU magnetic field for propagator requested, but received an O2 propagator without GPU field");
621 }
622 void* const* pSrc = (void* const*)mNewCalibObjects.get();
623 void** pDst = (void**)&processors()->calibObjects;
624 for (uint32_t i = 0; i < sizeof(processors()->calibObjects) / sizeof(void*); i++) {
625 if (pSrc[i]) {
626 pDst[i] = pSrc[i];
627 }
628 }
630 if (GetProcessingSettings().trdTrackModelO2) {
632 if (mRec->IsGPU()) {
633 TransferMemoryResourceLinkToGPU(RecoStep::NoRecoStep, processors()->trdTrackerO2.MemoryPermanent(), stream);
634 }
635 } else {
637 if (mRec->IsGPU()) {
638 TransferMemoryResourceLinkToGPU(RecoStep::NoRecoStep, processors()->trdTrackerGPU.MemoryPermanent(), stream);
639 }
640 }
641 }
642 if (mRec->IsGPU()) {
643 std::array<uint8_t, sizeof(GPUTrackingFlatObjects)> oldFlatPtrs, oldFlatPtrsDevice;
644 memcpy(oldFlatPtrs.data(), (void*)&mFlatObjectsShadow, oldFlatPtrs.size());
645 memcpy(oldFlatPtrsDevice.data(), (void*)&mFlatObjectsDevice, oldFlatPtrsDevice.size());
647 bool ptrsChanged = memcmp(oldFlatPtrs.data(), (void*)&mFlatObjectsShadow, oldFlatPtrs.size()) || memcmp(oldFlatPtrsDevice.data(), (void*)&mFlatObjectsDevice, oldFlatPtrsDevice.size());
648 if (ptrsChanged) {
649 GPUInfo("Updating all calib objects since pointers changed");
650 }
651 UpdateGPUCalibObjects(stream, ptrsChanged ? nullptr : mNewCalibObjects.get());
652 }
653 }
654
655 if ((mUpdateNewCalibObjects || (mRec->slavesExist() && updateSlave)) && mRec->IsGPU()) {
656 UpdateGPUCalibObjectsPtrs(stream); // Reinitialize
657 retVal = 1;
658 }
659 mNewCalibObjects.reset(nullptr);
660 mNewCalibValues.reset(nullptr);
662 return retVal;
663}
664
666{
667 if ((((GetRecoSteps() & RecoStep::TRDTracking) && !GetProcessingSettings().trdTrackModelO2 && !GetProcessingSettings().willProvideO2PropagatorLate) || ((GetRecoSteps() & RecoStep::Refit) && !param().rec.trackingRefitGPUModel)) && processors()->calibObjects.o2Propagator == nullptr) {
668 GPUFatal("Cannot run TRD tracking or refit with o2 track model without o2 propagator"); // This check must happen during run, since o2::Propagator cannot be available during init
669 }
670 if (GetProcessingSettings().autoAdjustHostThreads && !mRec->IsGPU()) {
672 }
673 const auto threadContext = GetThreadContext();
674 if (GetProcessingSettings().runCompressionStatistics && mCompressionStatistics == nullptr) {
676 }
677 const bool needQA = GPUQA::QAAvailable() && (GetProcessingSettings().runQA || (GetProcessingSettings().eventDisplay && (mIOPtrs.nMCInfosTPC || GetProcessingSettings().runMC)));
678 if (needQA && GetQA()->IsInitialized() == false) {
680 return 1;
681 }
682 }
683 if (needQA) {
684 mFractionalQAEnabled = GetProcessingSettings().qcRunFraction == 100.f || (uint32_t)(rand() % 10000) < (uint32_t)(GetProcessingSettings().qcRunFraction * 100);
685 }
686 if (GetProcessingSettings().debugLevel >= 6) {
687 *mDebugFile << "\n\nProcessing event " << mRec->getNEventsProcessed() << std::endl;
688 }
690
691 mRec->getGeneralStepTimer(GeneralStep::Prepare).Start();
692 try {
694 } catch (const std::bad_alloc& e) {
695 GPUError("Memory Allocation Error");
696 return (1);
697 }
698 mRec->getGeneralStepTimer(GeneralStep::Prepare).Stop();
699
701
702 SynchronizeStream(0); // Synchronize all init copies that might be ongoing
703
704 if (GetProcessingSettings().debugOnFailure) {
705 mRec->setDebugDumpCallback([this]() { DoDebugRawDump(); });
706 }
707
709 if (runRecoStep(RecoStep::TPCDecompression, &GPUChainTracking::RunTPCDecompression)) {
710 return 1;
711 }
712 } else if (mIOPtrs.tpcPackedDigits || mIOPtrs.tpcZS) {
713 if (runRecoStep(RecoStep::TPCClusterFinding, &GPUChainTracking::RunTPCClusterizer, false)) {
714 return 1;
715 }
716 }
717
718 if (GetProcessingSettings().autoAdjustHostThreads && !mRec->IsGPU() && mIOPtrs.clustersNative) {
720 }
721
723 return 1;
724 }
725
726 mRec->PushNonPersistentMemory(qStr2Tag("TPCSLCD1")); // 1st stack level for TPC tracking sector data
728 if (runRecoStep(RecoStep::TPCSectorTracking, &GPUChainTracking::RunTPCTrackingSectors)) {
729 return 1;
730 }
731
732 if (runRecoStep(RecoStep::TPCMerging, &GPUChainTracking::RunTPCTrackingMerger, false)) {
733 return 1;
734 }
736 mRec->PopNonPersistentMemory(RecoStep::TPCSectorTracking, qStr2Tag("TPCSLCD1")); // Release 1st stack level, TPC sector data not needed after merger
738 }
739
741 if (GetProcessingSettings().doublePipeline) {
743 if (foreignChain && foreignChain->mIOPtrs.tpcZS) {
744 if (GetProcessingSettings().debugLevel >= 3) {
745 GPUInfo("Preempting tpcZS input of foreign chain");
746 }
747 mPipelineFinalizationCtx.reset(new GPUChainTrackingFinalContext);
748 mPipelineFinalizationCtx->rec = this->mRec;
749 foreignChain->mPipelineNotifyCtx = mPipelineFinalizationCtx.get();
750 }
751 }
752 if (runRecoStep(RecoStep::TPCCompression, &GPUChainTracking::RunTPCCompression)) {
753 return 1;
754 }
755 }
756
757 if (runRecoStep(RecoStep::TRDTracking, &GPUChainTracking::RunTRDTracking)) {
758 return 1;
759 }
760
761 if (runRecoStep(RecoStep::Refit, &GPUChainTracking::RunRefit)) {
762 return 1;
763 }
764
765 if (!GetProcessingSettings().doublePipeline) { // Synchronize with output copies running asynchronously
766 SynchronizeStream(OutputStream());
767 }
768
769 if (GetProcessingSettings().autoAdjustHostThreads && !mRec->IsGPU()) {
771 }
772
773 int32_t retVal = 0;
774 if (CheckErrorCodes(false, false, mRec->getErrorCodeOutput())) { // TODO: Eventually, we should use GPUReconstruction::CheckErrorCodes
775 retVal = 3;
776 if (!GetProcessingSettings().ignoreNonFatalGPUErrors) {
777 return retVal;
778 }
779 }
780
781 if (GetProcessingSettings().doublePipeline) {
782 return retVal;
783 }
784 int32_t retVal2 = RunChainFinalize();
785 return retVal2 ? retVal2 : retVal;
786}
787
788int32_t GPUChainTracking::RunChainFinalize()
789{
790 if (mIOPtrs.clustersNative && (GetRecoSteps() & RecoStep::TPCCompression) && GetProcessingSettings().runCompressionStatistics) {
793 }
794
795 if (GetProcessingSettings().outputSanityCheck) {
796 OutputSanityCheck();
797 }
798
799 const bool needQA = GPUQA::QAAvailable() && (GetProcessingSettings().runQA || (GetProcessingSettings().eventDisplay && mIOPtrs.nMCInfosTPC));
800 if (needQA && mFractionalQAEnabled) {
801 mRec->getGeneralStepTimer(GeneralStep::QA).Start();
802 GetQA()->UpdateChain(this);
803 GetQA()->RunQA(!GetProcessingSettings().runQA);
804 mRec->getGeneralStepTimer(GeneralStep::QA).Stop();
805 if (GetProcessingSettings().debugLevel == 0) {
806 GPUInfo("Total QA runtime: %d us", (int32_t)(mRec->getGeneralStepTimer(GeneralStep::QA).GetElapsedTime() * 1000000));
807 }
808 }
809
810 if (GetProcessingSettings().showOutputStat) {
812 }
813
815
816 // PrintMemoryRelations();
817
819 if (!mDisplayRunning) {
820 GPUInfo("Starting Event Display...");
821 if (mEventDisplay->StartDisplay()) {
822 GPUError("Error starting Event Display");
823 return (1);
824 }
825 mDisplayRunning = true;
826 } else {
827 mEventDisplay->ShowNextEvent();
828 }
829
830 mEventDisplay->WaitTillEventShown();
831
832 if (GetProcessingSettings().eventDisplay->EnableSendKey()) {
833 while (kbhit()) {
834 getch();
835 }
836 GPUInfo("Press key for next event!");
837 }
838
839 int32_t iKey;
840 do {
841 usleep(10000);
842 if (GetProcessingSettings().eventDisplay->EnableSendKey()) {
843 iKey = kbhit() ? getch() : 0;
844 if (iKey == 27) {
845 GetProcessingSettings().eventDisplay->setDisplayControl(2);
846 } else if (iKey == 'n') {
847 break;
848 } else if (iKey) {
849 while (GetProcessingSettings().eventDisplay->getSendKey() != 0) {
850 usleep(1000);
851 }
852 GetProcessingSettings().eventDisplay->setSendKey(iKey);
853 }
854 }
855 } while (GetProcessingSettings().eventDisplay->getDisplayControl() == 0);
856 if (GetProcessingSettings().eventDisplay->getDisplayControl() == 2) {
857 mDisplayRunning = false;
858 GetProcessingSettings().eventDisplay->DisplayExit();
859 const_cast<GPUSettingsProcessing&>(GetProcessingSettings()).eventDisplay = nullptr; // TODO: fixme - eventDisplay should probably not be put into ProcessingSettings in the first place
860 return (2);
861 }
862 GetProcessingSettings().eventDisplay->setDisplayControl(0);
863 GPUInfo("Loading next event...");
864
865 mEventDisplay->BlockTillNextEvent();
866 }
867
868 return 0;
869}
870
872{
873 if (mPipelineFinalizationCtx) {
874 {
875 std::unique_lock<std::mutex> lock(mPipelineFinalizationCtx->mutex);
876 auto* ctx = mPipelineFinalizationCtx.get();
877 mPipelineFinalizationCtx->cond.wait(lock, [ctx]() { return ctx->ready; });
878 }
879 mPipelineFinalizationCtx.reset();
880 }
881 return RunChainFinalize();
882}
883
884int32_t GPUChainTracking::CheckErrorCodes(bool cpuOnly, bool forceShowErrors, std::vector<std::array<uint32_t, 4>>* fillErrors)
885{
886 int32_t retVal = 0;
887 bool hasDebugError = false;
888 for (int32_t i = 0; i < 1 + (!cpuOnly && mRec->IsGPU()); i++) {
889 if (i) {
890 const auto& threadContext = GetThreadContext();
891 if (GetProcessingSettings().doublePipeline) {
892 TransferMemoryResourceLinkToHost(RecoStep::NoRecoStep, mInputsHost->mResourceErrorCodes, 0);
894 } else {
895 TransferMemoryResourceLinkToHost(RecoStep::NoRecoStep, mInputsHost->mResourceErrorCodes);
896 }
897 }
898 if (processors()->errorCodes.hasError()) {
899 static int32_t errorsShown = 0;
900 static bool quiet = false;
901 static std::chrono::time_point<std::chrono::steady_clock> silenceFrom;
902 if (!quiet && errorsShown++ >= 10 && GetProcessingSettings().throttleAlarms && !forceShowErrors) {
903 silenceFrom = std::chrono::steady_clock::now();
904 quiet = true;
905 } else if (quiet) {
906 auto currentTime = std::chrono::steady_clock::now();
907 std::chrono::duration<double> elapsed_seconds = currentTime - silenceFrom;
908 if (elapsed_seconds.count() > 60 * 10) {
909 quiet = false;
910 errorsShown = 1;
911 }
912 }
913 retVal = 1;
914 if (GetProcessingSettings().throttleAlarms && !forceShowErrors) {
915 GPUWarning("GPUReconstruction suffered from an error in the %s part", i ? "GPU" : "CPU");
916 } else {
917 GPUError("GPUReconstruction suffered from an error in the %s part", i ? "GPU" : "CPU");
918 }
919 if (!quiet) {
920 processors()->errorCodes.printErrors(GetProcessingSettings().throttleAlarms && !forceShowErrors);
921 }
922 if (fillErrors) {
923 uint32_t nErrors = processors()->errorCodes.getNErrors();
924 const uint32_t* pErrors = processors()->errorCodes.getErrorPtr();
925 for (uint32_t j = 0; j < nErrors; j++) {
926 fillErrors->emplace_back(std::array<uint32_t, 4>{pErrors[4 * j], pErrors[4 * j + 1], pErrors[4 * j + 2], pErrors[4 * j + 3]});
927 }
928 }
929 if ((GetProcessingSettings().debugOnFailure & 1) || (GetProcessingSettings().debugOnFailure & 4)) {
930 if (GetProcessingSettings().debugOnFailureErrorMask == (uint64_t)-1) {
931 hasDebugError = true;
932 } else {
933 uint32_t nErrors = processors()->errorCodes.getNErrors();
934 const uint32_t* pErrors = processors()->errorCodes.getErrorPtr();
935 for (uint32_t j = 0; j < nErrors; j++) {
936 if (GetProcessingSettings().debugOnFailureErrorMask & (1 << pErrors[4 * j])) {
937 hasDebugError = true;
938 break;
939 }
940 }
941 }
942 }
943 }
944 }
945 ClearErrorCodes(cpuOnly);
946 if (hasDebugError) {
948 }
949 return retVal;
950}
951
953{
955 if (mRec->IsGPU() && !cpuOnly) {
956 const auto& threadContext = GetThreadContext();
957 WriteToConstantMemory(RecoStep::NoRecoStep, (char*)&processors()->errorCodes - (char*)processors(), &processorsShadow()->errorCodes, sizeof(processorsShadow()->errorCodes), 0);
958 TransferMemoryResourceLinkToGPU(RecoStep::NoRecoStep, mInputsHost->mResourceErrorCodes, 0);
959 }
960}
961
963{
964 std::lock_guard lk(mMutexUpdateCalib);
965 if (mNewCalibObjects) {
966 void* const* pSrc = (void* const*)&obj;
967 void** pDst = (void**)mNewCalibObjects.get();
968 for (uint32_t i = 0; i < sizeof(*mNewCalibObjects) / sizeof(void*); i++) {
969 if (pSrc[i]) {
970 pDst[i] = pSrc[i];
971 }
972 }
973 } else {
975 }
976 if (mNewCalibValues) {
977 mNewCalibValues->updateFrom(&vals);
978 } else {
979 mNewCalibValues.reset(new GPUNewCalibValues(vals));
980 }
982}
983
985{
986 return (mRec->IsGPU() ? processorsShadow() : processors())->calibObjects.o2Propagator;
987}
988
990{
992 if ((prop->getGPUField() != nullptr) ^ GetProcessingSettings().o2PropagatorUseGPUField) {
993 GPUFatal("GPU magnetic field for propagator requested, but received an O2 propagator without GPU field");
994 }
995}
996
997void GPUChainTracking::ApplySyncSettings(GPUSettingsProcessing& proc, GPUSettingsRec& rec, gpudatatypes::RecoStepField& steps, bool syncMode, int32_t dEdxMode)
998{
999 if (syncMode) {
1000 rec.useMatLUT = false;
1001 }
1002 if (proc.rtc.optSpecialCode == -1) {
1003 proc.rtc.optSpecialCode = syncMode;
1004 }
1005 if (dEdxMode != -2) {
1006 steps.setBits(gpudatatypes::RecoStep::TPCdEdx, dEdxMode == -1 ? !syncMode : (dEdxMode > 0));
1007 }
1008}
Definition of container class for dE/dx corrections.
int32_t i
int32_t retVal
Online TRD tracker based on extrapolated TPC tracks.
Used for storing the MC labels for the TRD tracklets.
TRD Tracklet word for GPU tracker - 32bit tracklet info + half chamber ID + index.
Definition of a container to keep Monte Carlo truth external to simulation objects.
uint32_t j
Definition RawData.h:0
uint32_t c
Definition RawData.h:2
POD correction map.
void Start()
Definition timer.cxx:64
double GetElapsedTime()
Definition timer.cxx:115
void Stop()
Definition timer.cxx:76
bitfield & setBits(const bitfield v, bool w)
Definition bitfield.h:45
bool isSet(const bitfield &v) const
Definition bitfield.h:66
void SetMatLUT(std::unique_ptr< o2::base::MatLayerCylSet > &&lut)
static void ApplySyncSettings(GPUSettingsProcessing &proc, GPUSettingsRec &rec, gpudatatypes::RecoStepField &steps, bool syncMode, int32_t dEdxMode=-2)
std::unique_ptr< o2::base::MatLayerCylSet > mMatLUTU
const o2::base::Propagator * GetDeviceO2Propagator()
std::unique_ptr< o2::tpc::ClusterNativeAccess > mClusterNativeAccess
std::unique_ptr< GPUTRDRecoParam > mTRDRecoParamU
void SetTRDGeometry(std::unique_ptr< o2::trd::GeometryFlat > &&geo)
GPUChainTracking * mQAFromForeignChain
void SetO2Propagator(const o2::base::Propagator *prop)
std::unique_ptr< GPUQA > mQA
GPUTrackingFlatObjects mFlatObjectsDevice
int32_t RunTPCClusterizer(bool synchronizeOutput=true)
void UpdateGPUCalibObjectsPtrs(int32_t stream)
int32_t RunTPCTrackingMerger(bool synchronizeOutput=true)
std::unique_ptr< GPUTrackingInputProvider > mInputsHost
std::unique_ptr< GPUTPCClusterStatistics > mCompressionStatistics
const o2::base::MatLayerCylSet * GetMatLUT() const
void SetTRDRecoParam(std::unique_ptr< GPUTRDRecoParam > &&par)
std::array< GPUOutputControl *, GPUTrackingOutputs::count()> mSubOutputControls
std::unique_ptr< std::ofstream > mDebugFile
void SetTPCFastTransform(aligned_unique_buffer_ptr< TPCFastTransformPOD > &&tpcFastTransform)
void SetUpdateCalibObjects(const GPUCalibObjectsConst &obj, const GPUNewCalibValues &vals)
std::unique_ptr< GPUCalibObjectsConst > mNewCalibObjects
std::unique_ptr< o2::trd::GeometryFlat > mTRDGeometryU
GPUTrackingFlatObjects mFlatObjectsShadow
void RegisterPermanentMemoryAndProcessors() override
void RegisterGPUProcessors() override
void ClearErrorCodes(bool cpuOnly=false)
const GPUQA * GetQA() const
void MemorySize(size_t &gpuMem, size_t &pageLockedHostMem) override
GPUChainTracking(GPUReconstruction *rec, uint32_t maxTPCHits=constants::GPU_MEM_MAX_TPC_CLUSTERS, uint32_t maxTRDTracklets=constants::GPU_MEM_MAX_TRD_TRACKLETS)
int32_t DoQueuedUpdates(int32_t stream, bool updateSlave=true)
std::unique_ptr< GPUNewCalibValues > mNewCalibValues
int32_t CheckErrorCodes(bool cpuOnly=false, bool forceShowErrors=false, std::vector< std::array< uint32_t, 4 > > *fillErrors=nullptr) override
int32_t PrepareEvent() override
void UpdateGPUCalibObjects(int32_t stream, const GPUCalibObjectsConst *ptrMask=nullptr)
GPUTrackingInOutPointers & mIOPtrs
struct o2::gpu::GPUChainTracking::InOutMemory mIOMem
const o2::tpc::CalibdEdxContainer * GetdEdxCalibContainer() const
std::unique_ptr< GPUTrackingInputProvider > mInputsShadow
const GPUSettingsQA * mConfigQA
int32_t FinalizePipelinedProcessing() override
std::unique_ptr< GPUDisplayInterface > mEventDisplay
std::unique_ptr< o2::tpc::ClusterNativeAccess > mClusterNativeAccessReduced
aligned_unique_buffer_ptr< TPCFastTransformPOD > mTPCFastTransformU
void TransferMemoryResourceLinkToGPU(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
Definition GPUChain.h:124
GPUConstantMem * processorsDevice()
Definition GPUChain.h:86
GPUReconstruction::RecoStepField GetRecoStepsGPU() const
Definition GPUChain.h:72
GPUReconstruction::RecoStepField GetRecoSteps() const
Definition GPUChain.h:71
void WriteToConstantMemory(RecoStep step, size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr)
Definition GPUChain.h:128
GPUChain * GetNextChainInQueue()
Definition GPUChain.h:236
GPUReconstruction::InOutTypeField GetRecoStepsOutputs() const
Definition GPUChain.h:74
virtual std::unique_ptr< GPUReconstructionProcessing::threadContext > GetThreadContext()
Definition GPUChain.h:109
GPUConstantMem * processors()
Definition GPUChain.h:84
GPUParam & param()
Definition GPUChain.h:87
const GPUSettingsProcessing & GetProcessingSettings() const
Definition GPUChain.h:76
void SynchronizeStream(int32_t stream)
Definition GPUChain.h:89
GPUReconstructionCPU * mRec
Definition GPUChain.h:79
GPUConstantMem * processorsShadow()
Definition GPUChain.h:85
GPUReconstruction::InOutTypeField GetRecoStepsInputs() const
Definition GPUChain.h:73
static constexpr int32_t NSECTORS
Definition GPUChain.h:58
void TransferMemoryResourceLinkToHost(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
Definition GPUChain.h:125
void AllocateIOMemoryHelper(uint32_t n, const T *&ptr, std::unique_ptr< T[]> &u)
Definition GPUChain.h:140
int32_t runRecoStep(RecoStep step, S T::*func, Args... args)
Definition GPUChain.h:298
GPUReconstruction * rec()
Definition GPUChain.h:66
static GPUDisplayInterface * getDisplay(GPUDisplayFrontendInterface *frontend, GPUChainTracking *chain, GPUQA *qa, const GPUParam *param=nullptr, const GPUCalibObjectsConst *calib=nullptr, const GPUSettingsDisplay *config=nullptr, const GPUSettingsProcessing *proc=nullptr)
const uint32_t * getErrorPtr() const
Definition GPUErrors.cxx:96
void setMemory(GPUglobalref() uint32_t *m)
Definition GPUErrors.h:37
bool printErrors(bool silent=false, uint64_t mask=0)
Definition GPUErrors.cxx:61
uint32_t getNErrors() const
Definition GPUErrors.cxx:91
static void computePointerWithAlignment(T *&basePtr, S *&objPtr, size_t nEntries=1)
void InitGPUProcessor(GPUReconstruction *rec, ProcessorType type=PROCESSOR_TYPE_CPU, GPUProcessor *slaveProcessor=nullptr)
static bool QAAvailable()
Definition GPUQA.h:57
int32_t DrawQAHistograms()
Definition GPUQA.h:47
void UpdateChain(GPUChainTracking *chain)
Definition GPUQA.h:59
static bool IsInitialized()
Definition GPUQA.h:58
int32_t InitQA(int32_t tasks=0)
Definition GPUQA.h:45
void RunQA(bool matchOnly=false)
Definition GPUQA.h:46
HighResTimer & getGeneralStepTimer(GeneralStep step)
const GPUDefParameters & getGPUParameters(bool doGPU) const override
std::vector< std::array< uint32_t, 4 > > * getErrorCodeOutput()
RecoStepField GetRecoStepsGPU() const
void RegisterGPUDeviceProcessor(GPUProcessor *proc, GPUProcessor *slaveProcessor)
void RegisterGPUProcessor(T *proc, bool deviceSlave)
void setDebugDumpCallback(std::function< void()> &&callback=std::function< void()>(nullptr))
void ResetRegisteredMemoryPointers(GPUProcessor *proc)
int16_t RegisterMemoryAllocation(T *proc, void *(T::*setPtr)(void *), int32_t type, const char *name="", const GPUMemoryReuse &re=GPUMemoryReuse())
void PopNonPersistentMemory(RecoStep step, uint64_t tag, const GPUProcessor *proc=nullptr)
void UpdateSettings(const GPUSettingsGRP *g, const GPUSettingsProcessing *p=nullptr, const GPUSettingsRecDynamic *d=nullptr)
void PushNonPersistentMemory(uint64_t tag)
GPUMemorySizeScalers * MemoryScalers()
const GPUSettingsProcessing & GetProcessingSettings() const
const GPUSettingsGRP & GetGRPSettings() const
GPUOutputControl & OutputControl()
static constexpr const uint32_t TPC_MAX_TF_TIME_BIN
Definition GPUSettings.h:47
GLenum GLfloat param
Definition glcorearb.h:271
GLuint GLuint stream
Definition glcorearb.h:1806
GPUCalibObjectsTemplate< ConstPtr > GPUCalibObjectsConst
Global TPC definitions and constants.
Definition SimTraits.h:168
std::string to_string(gsl::span< T, Size > span)
Definition common.h:52
std::string filename()
std::unique_ptr< GPUDisplayFrontendInterface > eventDisplay
GPUReconstruction * rec
constexpr T qStr2Tag(const char(&str)[N])
Definition strtag.h:24
S< o2::trd::GeometryFlat >::type * trdGeometry
S< o2::tpc::CalibdEdxContainer >::type * dEdxCalibContainer
S< TPCZSLinkMapping >::type * tpcZSLinkMapping
S< TPCFastTransformPOD >::type * fastTransform
S< GPUTRDRecoParam >::type * trdRecoParam
S< TPCPadGainCalib >::type * tpcPadGain
S< o2::base::PropagatorImpl< float > >::type * o2Propagator
S< o2::base::MatLayerCylSet >::type * matLUT
std::unique_ptr< GPUTPCMCInfo[]> mcInfosTPC
std::unique_ptr< GPUTRDTrackletWord[]> trdTracklets
std::unique_ptr< GPUTPCMCInfoCol[]> mcInfosTPCCol
std::unique_ptr< GPUTPCGMMergedTrackHit[]> mergedTrackHits
std::unique_ptr< int32_t[]> trdTrackletIdxFirst
std::unique_ptr< GPUTPCGMMergedTrack[]> mergedTracks
std::unique_ptr< AliHLTTPCClusterMCLabel[]> mcLabelsTPC
std::unique_ptr< GPUTPCClusterData[]> clusterData[NSECTORS]
std::unique_ptr< o2::tpc::ClusterNative[]> clustersNative
std::unique_ptr< float[]> trdTriggerTimes
std::unique_ptr< GPUTPCTrack[]> sectorTracks[NSECTORS]
std::unique_ptr< GPUTRDSpacePoint[]> trdSpacePoints
std::unique_ptr< uint8_t[]> trdTrigRecMask
std::unique_ptr< o2::tpc::ClusterNativeAccess > clusterNativeAccess
std::unique_ptr< AliHLTTPCRawCluster[]> rawClusters[NSECTORS]
std::unique_ptr< GPUTPCHitId[]> sectorClusters[NSECTORS]
std::unique_ptr< GPUTRDTrackGPU[]> trdTracks
GPUTRDTrackerGPU trdTrackerGPU
GPUCalibObjectsConst calibObjects
const GPUTPCHitId * sectorClusters[NSECTORS]
const o2::tpc::ClusterNativeAccess * clustersNative
const o2::tpc::CompressedClustersFlat * tpcCompressedClusters
const AliHLTTPCClusterMCLabel * mcLabelsTPC
const GPUTRDSpacePoint * trdSpacePoints
const GPUTPCTrack * sectorTracks[NSECTORS]
const GPUTRDTrackGPU * trdTracks
const GPUTRDTrackletWord * trdTracklets
const GPUTrackingInOutZS * tpcZS
const AliHLTTPCRawCluster * rawClusters[NSECTORS]
const GPUTPCClusterData * clusterData[NSECTORS]
const GPUTPCGMMergedTrackHit * mergedTrackHits
const GPUTrackingInOutDigits * tpcPackedDigits
const GPUTPCMCInfoCol * mcInfosTPCCol
const GPUTPCGMMergedTrack * mergedTracks
size_t getIndex(const GPUOutputControl &v)
GPUOutputControl sharedClusterMap
GPUOutputControl compressedClusters