47#ifdef GPUCA_TPC_GEOMETRY_O2
48std::pair<uint32_t, uint32_t> GPUChainTracking::TPCClusterizerDecodeZSCountUpdate(uint32_t iSector,
const CfFragment& fragment)
58 uint16_t posInEndpoint = 0;
59 uint16_t pagesEndpoint = 0;
63 for (uint32_t l = pageFirst; l < pageLast; l++) {
64 uint16_t pageDigits =
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j][posInEndpoint++];
72 if (pagesEndpoint !=
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j].size()) {
74 GPUError(
"TPC raw page count mismatch in TPCClusterizerDecodeZSCountUpdate: expected %d / buffered %lu", pagesEndpoint,
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j].size());
77 GPUFatal(
"TPC raw page count mismatch in TPCClusterizerDecodeZSCountUpdate: expected %d / buffered %lu", pagesEndpoint,
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j].size());
90 TPCClusterizerEnsureZSOffsets(iSector, fragment);
95void GPUChainTracking::TPCClusterizerEnsureZSOffsets(uint32_t iSector,
const CfFragment& fragment)
101 uint32_t pagesEndpoint = 0;
102 const uint32_t nAdcsExpected =
data.nDigits[iSector][
endpoint];
103 const uint32_t nPagesExpected =
data.nPages[iSector][
endpoint];
105 uint32_t nAdcDecoded = 0;
108 const uint32_t pageFirst = (
i ==
data.minMaxCN[iSector][
endpoint].zsPtrFirst) ?
data.minMaxCN[iSector][
endpoint].zsPageFirst : 0;
110 for (uint32_t
j = pageFirst;
j < pageLast;
j++) {
114 const uint16_t nSamplesInPage = decHdr->
nADCsamples;
116 nAdcDecoded += nSamplesInPage;
121 if (pagesEndpoint != nPagesExpected) {
122 GPUFatal(
"Sector %d, Endpoint %d, Fragment %d: TPC raw page count mismatch: expected %d / buffered %lu", iSector,
endpoint, fragment.
index, pagesEndpoint, nPagesExpected);
125 if (nAdcDecoded != nAdcsExpected) {
126 GPUFatal(
"Sector %d, Endpoint %d, Fragment %d: TPC ADC count mismatch: expected %u, buffered %u", iSector,
endpoint, fragment.
index, nAdcsExpected, nAdcDecoded);
133 nAdcs += nAdcsExpected;
139struct TPCCFDecodeScanTmp {
140 int32_t zsPtrFirst, zsPageFirst, zsPtrLast, zsPageLast,
hasData, pageCounter;
144std::pair<uint32_t, uint32_t> GPUChainTracking::TPCClusterizerDecodeZSCount(uint32_t iSector,
const CfFragment& fragment)
147 uint32_t nDigits = 0;
150 memset(endpointAdcSamples, 0,
sizeof(endpointAdcSamples));
166 std::vector<std::pair<CfFragment, TPCCFDecodeScanTmp>> fragments;
168 fragments.emplace_back(std::pair<CfFragment, TPCCFDecodeScanTmp>{fragment, {0, 0, 0, 0, 0, -1}});
170 fragments.emplace_back(std::pair<CfFragment, TPCCFDecodeScanTmp>{fragments.back().
first.next(), {0, 0, 0, 0, 0, -1}});
172 std::vector<bool> fragmentExtends(
mCFContext->nFragments,
false);
174 uint32_t firstPossibleFragment = 0;
175 uint32_t pageCounter = 0;
176 uint32_t emptyPages = 0;
200 static bool errorShown =
false;
201 if (errorShown ==
false) {
202 GPUAlarm(
"Trigger handling only possible with TPC Dense Link Based data, received version %d, disabling",
mCFContext->zsVersion);
207 GPUError(
"Received TPC ZS 8kb page of mixed versions, expected %d, received %d (linkid %d, feeCRU %d, feeEndpoint %d, feelinkid %d)",
mCFContext->zsVersion, (int32_t)hdr->
version, (int32_t)o2::raw::RDHUtils::getLinkID(*rdh), (int32_t)rdh_utils::getCRU(*rdh), (int32_t)rdh_utils::getEndPoint(*rdh), (int32_t)rdh_utils::getLink(*rdh));
208 constexpr size_t bufferSize = 3 * std::max(
sizeof(*rdh),
sizeof(*hdr)) + 1;
210 for (
size_t i = 0;
i <
sizeof(*rdh);
i++) {
212 snprintf(dumpBuffer + 3 *
i, 4,
"%02X ", (int32_t)((uint8_t*)rdh)[
i]);
214 GPUAlarm(
"RDH of page: %s", dumpBuffer);
215 for (
size_t i = 0;
i <
sizeof(*hdr);
i++) {
217 snprintf(dumpBuffer + 3 *
i, 4,
"%02X ", (int32_t)((uint8_t*)hdr)[
i]);
219 GPUAlarm(
"Metainfo of page: %s", dumpBuffer);
224 GPUFatal(
"Cannot process with invalid TPC ZS data, exiting");
229 if (hdr2->
flags & TPCZSHDRV2::ZSFlags::TriggerWordPresent) {
233 tmp.
orbit = o2::raw::RDHUtils::getHeartBeatOrbit(*rdh);
239 nDigits += hdr->nADCsamples;
240 endpointAdcSamples[
j] += hdr->nADCsamples;
242 uint32_t maxTimeBin = timeBin + hdr->nTimeBinSpan;
243 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
245 if (hdr2->
flags & TPCZSHDRV2::ZSFlags::nTimeBinSpanBit8) {
252 bool extendsInNextPage =
false;
253 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
256 extendsInNextPage = o2::raw::RDHUtils::getHeartBeatOrbit(*nextrdh) == o2::raw::RDHUtils::getHeartBeatOrbit(*rdh) && o2::raw::RDHUtils::getMemorySize(*nextrdh) >
sizeof(
o2::header::RAWDataHeader);
259 while (firstPossibleFragment && (uint32_t)fragments[firstPossibleFragment - 1].first.last() > timeBin) {
260 firstPossibleFragment--;
262 auto handleExtends = [&](uint32_t ff) {
263 if (fragmentExtends[ff]) {
267 fragments[ff].second.zsPageLast++;
269 mCFContext->fragmentData[ff].pageDigits[iSector][
j].emplace_back(0);
271 fragmentExtends[ff] =
false;
274 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
275 for (uint32_t ff = 0; ff < firstPossibleFragment; ff++) {
279 for (uint32_t
f = firstPossibleFragment;
f <
mCFContext->nFragments;
f++) {
280 if (timeBin < (uint32_t)fragments[
f].
first.last() && (uint32_t)fragments[
f].first.first() <= maxTimeBin) {
281 if (!fragments[
f].second.hasData) {
282 fragments[
f].second.hasData = 1;
283 fragments[
f].second.zsPtrFirst = k;
284 fragments[
f].second.zsPageFirst = l;
286 if (pageCounter > (uint32_t)fragments[
f].second.pageCounter + 1) {
287 mCFContext->fragmentData[
f].nPages[iSector][
j] += emptyPages + pageCounter - fragments[
f].second.pageCounter - 1;
288 for (uint32_t k2 = fragments[
f].second.zsPtrLast - 1; k2 <= k; k2++) {
289 for (uint32_t l2 = ((int32_t)k2 == fragments[
f].second.zsPtrLast - 1) ? fragments[
f].second.zsPageLast : 0; l2 < (k2 < k ?
mIOPtrs.
tpcZS->
sector[iSector].
nZSPtr[
j][k2] : l); l2++) {
291 mCFContext->fragmentData[
f].pageDigits[iSector][
j].emplace_back(0);
297 const TPCZSHDR*
const hdrTmp = (
const TPCZSHDR*)(rdh_utils::getLink(o2::raw::RDHUtils::getFEEID(*rdhTmp)) == rdh_utils::DLBZSLinkID ? (pageTmp + o2::raw::RDHUtils::getMemorySize(*rdhTmp) -
sizeof(
TPCZSHDRV2)) : (pageTmp +
sizeof(
o2::header::RAWDataHeader)));
303 }
else if (emptyPages) {
304 mCFContext->fragmentData[
f].nPages[iSector][
j] += emptyPages;
306 for (uint32_t
m = 0;
m < emptyPages;
m++) {
307 mCFContext->fragmentData[
f].pageDigits[iSector][
j].emplace_back(0);
312 fragments[
f].second.zsPtrLast = k + 1;
313 fragments[
f].second.zsPageLast = l + 1;
314 fragments[
f].second.pageCounter = pageCounter;
316 mCFContext->fragmentData[
f].nDigits[iSector][
j] += hdr->nADCsamples;
318 mCFContext->fragmentData[
f].pageDigits[iSector][
j].emplace_back(hdr->nADCsamples);
320 fragmentExtends[
f] = extendsInNextPage;
323 if (timeBin < (uint32_t)fragments[
f].
first.last()) {
324 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
325 for (uint32_t ff =
f + 1; ff <
mCFContext->nFragments; ff++) {
331 firstPossibleFragment =
f + 1;
339 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPtrLast = fragments[
f].second.zsPtrLast;
340 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPtrFirst = fragments[
f].second.zsPtrFirst;
341 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPageLast = fragments[
f].second.zsPageLast;
342 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPageFirst = fragments[
f].second.zsPageFirst;
350 if (endpointAdcSamples[
i] >
mCFContext->nDigitsEndpointMax[iSector]) {
351 mCFContext->nDigitsEndpointMax[iSector] = endpointAdcSamples[
i];
354 uint32_t nDigitsFragmentMax = 0;
356 uint32_t pagesInFragment = 0;
357 uint32_t digitsInFragment = 0;
359 pagesInFragment +=
mCFContext->fragmentData[
i].nPages[iSector][
j];
360 digitsInFragment +=
mCFContext->fragmentData[
i].nDigits[iSector][
j];
363 nDigitsFragmentMax = std::max(nDigitsFragmentMax, digitsInFragment);
366 return {nDigits, nDigitsFragmentMax};
374 const uint32_t iSector = clusterer.
mISector;
377 std::vector<size_t> counts;
380 if (nSteps > clusterer.
mNBufs) {
381 GPUError(
"Clusterer buffers exceeded (%u > %u)", nSteps, (int32_t)clusterer.
mNBufs);
385 size_t tmpCount =
count;
387 for (uint32_t
i = 1;
i < nSteps;
i++) {
388 counts.push_back(tmpCount);
390 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanStart>({
GetGrid(tmpCount, clusterer.
mScanWorkGroupSize, lane), {iSector}},
i, stage);
392 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanUp>({
GetGrid(tmpCount, clusterer.
mScanWorkGroupSize, lane), {iSector}},
i, tmpCount);
397 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanTop>({
GetGrid(tmpCount, clusterer.
mScanWorkGroupSize, lane), {iSector}}, nSteps, tmpCount);
399 for (uint32_t
i = nSteps - 1;
i > 1;
i--) {
400 tmpCount = counts[
i - 1];
405 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::compactDigits>({
GetGrid(
count, clusterer.
mScanWorkGroupSize, lane), {iSector}}, 1, stage, in, out);
410 for (
size_t i = 0;
i < nIn;
i++) {
419std::pair<uint32_t, uint32_t> GPUChainTracking::RunTPCClusterizer_transferZS(int32_t iSector,
const CfFragment& fragment, int32_t lane)
425 const auto&
retVal = TPCClusterizerDecodeZSCountUpdate(iSector, fragment);
429 uint32_t nPagesSector = 0;
448 nPagesSector += nPages;
455int32_t GPUChainTracking::RunTPCClusterizer_prepare(
bool restorePointers)
458 if (restorePointers) {
459 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
474 mCFContext->tpcMaxTimeBin = maxAllowedTimebin;
481 uint32_t nDigitsFragmentMax[
NSECTORS];
483 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
487 GPUError(
"Data has invalid RDH version %d, %d required\n",
o2::raw::RDHUtils::getVersion(rdh), o2::raw::RDHUtils::getVersion<o2::header::RAWDataHeader>());
503 const auto&
x = TPCClusterizerDecodeZSCount(iSector, fragmentMax);
504 nDigitsFragmentMax[iSector] =
x.first;
508 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
509 uint32_t nDigitsBase = nDigitsFragmentMax[iSector];
510 uint32_t threshold = 40000000;
511 uint32_t nDigitsScaled = nDigitsBase > threshold ? nDigitsBase : std::min((threshold + nDigitsBase) / 2, 2 * nDigitsBase);
525 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
538 if (
mCFContext->tpcMaxTimeBin > maxAllowedTimebin) {
539 GPUError(
"Input data has invalid time bin %u > %d",
mCFContext->tpcMaxTimeBin, maxAllowedTimebin);
542 mCFContext->tpcMaxTimeBin = maxAllowedTimebin;
556 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
568 if (
param().
rec.fwdTPCDigitsAsClusters) {
571#ifdef GPUCA_TPC_GEOMETRY_O2
576 if (RunTPCClusterizer_prepare(mPipelineNotifyCtx &&
GetProcessingSettings().doublePipelineClusterizer)) {
584 float tpcHitLowOccupancyScalingFactor = 1.f;
592 if (nHitsBase < threshold) {
595 tpcHitLowOccupancyScalingFactor = std::min(3.5f, (
float)threshold / nHitsBase);
598 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
602 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
606 RunTPCClusterizer_prepare(
true);
617 size_t nClsTotal = 0;
620 std::unique_ptr<ClusterNative[]> tmpNativeClusterBuffer;
627 bool buildNativeGPU = doGPU && NeedTPCClustersOnGPU();
631 if (buildNativeGPU) {
635 if (mWaitForFinalInputs) {
636 GPUFatal(
"Cannot use waitForFinalInput callback without delayed output");
640 tmpNativeClusters =
mInputsHost->mPclusterNativeOutput;
642 tmpNativeClusterBuffer = std::make_unique<ClusterNative[]>(
mInputsHost->mNClusterNative);
643 tmpNativeClusters = tmpNativeClusterBuffer.get();
648 if (propagateMCLabels) {
654 int8_t transferRunning[
NSECTORS] = {0};
657 auto notifyForeignChainFinished = [
this]() {
658 if (mPipelineNotifyCtx) {
661 std::lock_guard<std::mutex> lock(mPipelineNotifyCtx->
mutex);
662 mPipelineNotifyCtx->
ready =
true;
664 mPipelineNotifyCtx->
cond.notify_one();
667 bool synchronizeCalibUpdate =
false;
673 for (
CfFragment fragment =
mCFContext->fragmentFirst; !fragment.isEnd(); fragment = fragment.next()) {
675 GPUInfo(
"Processing time bins [%d, %d) for sectors %d to %d", fragment.
start, fragment.last(), iSectorBase, iSectorBase +
GetProcessingSettings().nTPCClustererLanes - 1);
678 if (doGPU && fragment.
index != 0) {
679 SynchronizeStream(lane);
682 uint32_t iSector = iSectorBase + lane;
690 bool setDigitsOnHost = (not doGPU && not
mIOPtrs.
tpcZS) || propagateMCLabels;
692 size_t numDigits = inDigits->
nTPCDigits[iSector];
693 if (setDigitsOnGPU) {
694 GPUMemCpy(RecoStep::TPCClusterFinding, clustererShadow.
mPdigits, inDigits->tpcDigits[iSector],
sizeof(clustererShadow.
mPdigits[0]) * numDigits, lane,
true);
696 if (setDigitsOnHost) {
712 using ChargeMapType =
decltype(*clustererShadow.
mPchargeMap);
713 using PeakMapType =
decltype(*clustererShadow.
mPpeakMap);
716 if (fragment.
index == 0) {
738 if (propagateMCLabels && fragment.
index == 0) {
742 GPUFatal(
"MC label container missing, sector %d", iSector);
751 runKernel<GPUTPCCFChargeMapFiller, GPUTPCCFChargeMapFiller::findFragmentStart>({
GetGrid(1, lane), {iSector}},
mIOPtrs.
tpcZS ==
nullptr);
753 }
else if (propagateMCLabels) {
766 GPUFatal(
"Data with invalid TPC ZS mode (%d) received",
mCFContext->zsVersion);
770 runKernel<GPUTPCCFDecodeZS>({
GetGridBlk(nBlocks, lane), {iSector}}, firstHBF);
773 runKernel<GPUTPCCFDecodeZSLink>({
GetGridBlk(nBlocks, lane), {iSector}}, firstHBF);
776 runKernel<GPUTPCCFDecodeZSDenseLink>({
GetGridBlk(nBlocks, lane), {iSector}}, firstHBF);
783 uint32_t iSector = iSectorBase + lane;
789 int32_t nextSector = iSector;
794 if (nextSector < NSECTORS && mIOPtrs.tpcZS && mCFContext->nPagesSector[nextSector] &&
mCFContext->zsVersion != -1 && !
mCFContext->abandonTimeframe) {
810 if (propagateMCLabels) {
814 bool checkForNoisyPads = (
rec()->
GetParam().
rec.tpc.maxTimeBinAboveThresholdIn1000Bin > 0) || (
rec()->
GetParam().
rec.tpc.maxConsecTimeBinAboveThreshold > 0);
815 checkForNoisyPads &= (
rec()->
GetParam().
rec.tpc.noisyPadsQuickCheck ? fragment.
index == 0 :
true);
818 if (checkForNoisyPads) {
821 runKernel<GPUTPCCFCheckPadBaseline>({
GetGridBlk(nBlocks, lane), {iSector}});
829 RunTPCClusterizer_compactPeaks(clusterer, clustererShadow, 0, doGPU, lane);
834 uint32_t iSector = iSectorBase + lane;
843 runKernel<GPUTPCCFNoiseSuppression, GPUTPCCFNoiseSuppression::noiseSuppression>({
GetGrid(clusterer.
mPmemory->
counters.
nPeaks, lane), {iSector}});
849 RunTPCClusterizer_compactPeaks(clusterer, clustererShadow, 1, doGPU, lane);
854 uint32_t iSector = iSectorBase + lane;
861 if (fragment.
index == 0) {
863 if (transferRunning[lane] == 1) {
865 transferRunning[lane] = 2;
878 if (doGPU && propagateMCLabels) {
890 laneHasData[lane] =
true;
897 size_t nClsFirst = nClsTotal;
898 bool anyLaneHasData =
false;
899 for (int32_t lane = 0; lane < maxLane; lane++) {
900 uint32_t iSector = iSectorBase + lane;
908 if (laneHasData[lane]) {
909 anyLaneHasData =
true;
915 clusterer.raiseError(GPUErrors::ERROR_CF_GLOBAL_CLUSTER_OVERFLOW, iSector * 1000 +
j, nClsTotal + clusterer.
mPclusterInRow[
j],
mInputsHost->mNClusterNative);
918 if (buildNativeGPU) {
922 }
else if (buildNativeHost) {
928 if (transferRunning[lane]) {
932 transferRunning[lane] = 1;
935 if (not propagateMCLabels || not laneHasData[lane]) {
936 assert(propagateMCLabels ? mcLinearLabels.
header.size() == nClsTotal :
true);
944 assert(propagateMCLabels ? mcLinearLabels.
header.size() == nClsTotal :
true);
946 if (propagateMCLabels) {
947 for (int32_t lane = 0; lane < maxLane; lane++) {
951 if (buildNativeHost && buildNativeGPU && anyLaneHasData) {
953 mOutputQueue.emplace_back(
outputQueueEntry{(
void*)((
char*)&tmpNativeClusters[nClsFirst] - (
char*)&tmpNativeClusters[0]), &
mInputsShadow->mPclusterNativeBuffer[nClsFirst], (nClsTotal - nClsFirst) *
sizeof(tmpNativeClusters[0]), RecoStep::TPCClusterFinding});
955 GPUMemCpy(RecoStep::TPCClusterFinding, (
void*)&tmpNativeClusters[nClsFirst], (
const void*)&
mInputsShadow->mPclusterNativeBuffer[nClsFirst], (nClsTotal - nClsFirst) *
sizeof(tmpNativeClusters[0]),
mRec->
NStreams() - 1,
false);
959 if (mWaitForFinalInputs && iSectorBase >= 21 && (int32_t)iSectorBase < 21 +
GetProcessingSettings().nTPCClustererLanes) {
960 notifyForeignChainFinished();
962 if (mWaitForFinalInputs && iSectorBase >= 30 && (int32_t)iSectorBase < 30 +
GetProcessingSettings().nTPCClustererLanes) {
963 mWaitForFinalInputs();
968 if (transferRunning[
i]) {
975 if (triggerOutput && triggerOutput->
allocator) {
984 if (propagateMCLabels) {
987 std::pair<ConstMCLabelContainer*, ConstMCLabelContainerView*>
buffer;
990 throw std::runtime_error(
"Cluster MC Label buffer missing");
993 buffer = {&container->first, &container->second};
1001 assert(propagateMCLabels ? mcLinearLabels.
header.size() == nClsTotal :
true);
1002 assert(propagateMCLabels ? mcLinearLabels.
data.size() >= nClsTotal :
true);
1007 mcLabelsConstView =
buffer.second;
1013 tmpNativeClusters =
mInputsHost->mPclusterNativeOutput;
1019 if (buildNativeHost) {
1025 auto allocator = [
this, &tmpNativeClusters](
size_t size) {
1028 return (tmpNativeClusters = this->
mInputsHost->mPclusterNativeOutput);
1030 RunTPCClusterFilter(tmpNativeAccess, allocator,
false);
1035 if (!mWaitForFinalInputs) {
1036 notifyForeignChainFinished();
1039 if (buildNativeGPU) {
1044 mInputsHost->mPclusterNativeAccess->setOffsetPtrs();
1047 if (doGPU && synchronizeOutput) {
1050 if (doGPU && synchronizeCalibUpdate) {
1059 if (buildNativeGPU) {
1060 GPUMemCpy(RecoStep::TPCClusterFinding, (
void*)
mInputsShadow->mPclusterNativeBuffer, (
const void*)tmpNativeClusters, nClsTotal *
sizeof(tmpNativeClusters[0]), -1,
true);
1065 if (mPipelineNotifyCtx) {
1067 mPipelineNotifyCtx =
nullptr;
#define TPC_MAX_TIME_BIN_TRIGGERED
#define GPUCA_MAX_STREAMS
std::enable_if_t< std::is_signed< T >::value, bool > hasData(const CalArray< T > &cal)
Definitions of TPC Zero Suppression Data Headers.
std::unique_ptr< o2::tpc::ClusterNativeAccess > mClusterNativeAccess
int32_t RunTPCClusterizer(bool synchronizeOutput=true)
std::unique_ptr< GPUTrackingInputProvider > mInputsHost
std::array< GPUOutputControl *, GPUTrackingOutputs::count()> mSubOutputControls
std::unique_ptr< std::ofstream > mDebugFile
std::unique_ptr< GPUTriggerOutputs > mTriggerBuffer
std::vector< outputQueueEntry > mOutputQueue
bool mUpdateNewCalibObjects
std::unique_ptr< GPUTPCCFChainContext > mCFContext
int32_t DoQueuedUpdates(int32_t stream, bool updateSlave=true)
std::unique_ptr< GPUNewCalibValues > mNewCalibValues
GPUTrackingInOutPointers & mIOPtrs
struct o2::gpu::GPUChainTracking::InOutMemory mIOMem
std::unique_ptr< GPUTrackingInputProvider > mInputsShadow
int32_t ForwardTPCDigits()
void RecordMarker(deviceEvent *ev, int32_t stream)
void TransferMemoryResourceLinkToGPU(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void GPUMemCpyAlways(RecoStep step, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void GPUMemCpy(RecoStep step, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
krnlExec GetGridBlk(uint32_t nBlocks, int32_t stream, GPUReconstruction::krnlDeviceType d=GPUReconstruction::krnlDeviceType::Auto, GPUCA_RECO_STEP st=GPUCA_RECO_STEP::NoRecoStep)
GPUReconstruction::RecoStepField GetRecoStepsGPU() const
virtual std::unique_ptr< gpu_reconstruction_kernels::threadContext > GetThreadContext()
void WriteToConstantMemory(RecoStep step, size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr)
void ReleaseEvent(deviceEvent ev, bool doGPU=true)
size_t AllocateRegisteredMemory(GPUProcessor *proc)
GPUConstantMem * processors()
static constexpr krnlRunRange krnlRunRangeNone
krnlExec GetGridAutoStep(int32_t stream, GPUCA_RECO_STEP st=GPUCA_RECO_STEP::NoRecoStep)
void SetupGPUProcessor(T *proc, bool allocate)
const GPUSettingsProcessing & GetProcessingSettings() const
void SynchronizeStream(int32_t stream)
GPUReconstructionCPU * mRec
GPUConstantMem * processorsShadow()
static constexpr int32_t NSECTORS
void TransferMemoryResourceLinkToHost(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
krnlExec GetGrid(uint32_t totalItems, uint32_t nThreads, int32_t stream, GPUReconstruction::krnlDeviceType d=GPUReconstruction::krnlDeviceType::Auto, GPUCA_RECO_STEP st=GPUCA_RECO_STEP::NoRecoStep)
void TransferMemoryResourcesToHost(RecoStep step, GPUProcessor *proc, int32_t stream=-1, bool all=false)
bool DoDebugAndDump(RecoStep step, int32_t mask, T &processor, S T::*func, Args &&... args)
GPUReconstruction * rec()
HighResTimer & getGeneralStepTimer(GeneralStep step)
void runParallelOuterLoop(bool doGPU, uint32_t nThreads, std::function< void(uint32_t)> lambda)
void SetNActiveThreads(int32_t n)
void SetNActiveThreadsOuterLoop(uint32_t f)
void AllocateRegisteredForeignMemory(int16_t res, GPUReconstruction *rec, GPUOutputControl *control=nullptr)
void PopNonPersistentMemory(RecoStep step, uint64_t tag)
void ComputeReuseMax(GPUProcessor *proc)
const GPUParam & GetParam() const
RecoStepField GetRecoStepsGPU() const
void UnblockStackedMemory()
void PushNonPersistentMemory(uint64_t tag)
InOutTypeField GetRecoStepsOutputs() const
GPUMemorySizeScalers * MemoryScalers()
static void setGlobalOffsetsAndAllocate(GPUTPCClusterFinder &, GPUTPCLinearLabels &)
void DumpDigits(std::ostream &out)
static constexpr int32_t mScanWorkGroupSize
void SetMaxData(const GPUTrackingInOutPointers &io)
void DumpClusters(std::ostream &out)
void SetNMaxDigits(size_t nDigits, size_t nPages, size_t nDigitsFragment, size_t nDigitsEndpointMax)
void DumpSuppressedPeaks(std::ostream &out)
uint32_t mNMaxClusterPerRow
void DumpPeakMap(std::ostream &out, std::string_view)
o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > const * mPinputLabels
uint32_t * mPclusterInRow
void DumpChargeMap(std::ostream &out, std::string_view)
uint32_t getNSteps(size_t items) const
ChargePos * mPpeakPositions
ChargePos * mPfilteredPeakPositions
void DumpSuppressedPeaksCompacted(std::ostream &out)
void DumpPeaksCompacted(std::ostream &out)
tpc::ClusterNative * mPclusterByRow
void DumpPeaks(std::ostream &out)
#define TPC_PADS_IN_SECTOR
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
uint8_t itsSharedClusterMap uint8_t
constexpr int LHCMaxBunches
void dumpBuffer(gsl::span< const std::byte > buffer, std::ostream &out=std::cout, size_t maxbytes=std::numeric_limits< size_t >::max())
constexpr int LHCBCPERTIMEBIN
constexpr int MAXGLOBALPADROW
Global TPC definitions and constants.
@ ZSVersionDenseLinkBased
@ ZSVersionLinkBasedWithMeta
@ ZSVersionRowBased10BitADC
@ ZSVersionRowBased12BitADC
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
constexpr T qStr2Tag(const char *str)
std::condition_variable cond
std::unique_ptr< o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > > clusterNativeMCView
std::unique_ptr< o2::dataformats::ConstMCTruthContainer< o2::MCCompLabel > > clusterNativeMCBuffer
deviceEvent stream[GPUCA_MAX_STREAMS]
GPUTPCClusterFinder tpcClusterer[GPUCA_NSECTORS]
GPUTrackingInOutPointers ioPtrs
size_t NTPCClusters(size_t tpcDigits, bool perSector=false)
std::function< void *(size_t)> allocator
tpccf::SizeT nDigitsInFragment
struct o2::gpu::GPUTPCClusterFinder::Memory::counters_t counters
std::vector< o2::MCCompLabel > data
std::vector< o2::dataformats::MCTruthHeaderElement > header
size_t nTPCDigits[NSECTORS]
const GPUTPCDigitsMCInput * tpcDigitsMC
const o2::tpc::ClusterNativeAccess * clustersNative
const GPUSettingsTF * settingsTF
const GPUTrackingInOutZS * tpcZS
const GPUTrackingInOutDigits * tpcPackedDigits
const void *const * zsPtr[NENDPOINTS]
uint32_t count[NENDPOINTS]
const uint32_t * nZSPtr[NENDPOINTS]
GPUTrackingInOutZSSector sector[NSECTORS]
static constexpr uint32_t NENDPOINTS
GPUOutputControl clustersNative
size_t getIndex(const GPUOutputControl &v)
GPUOutputControl clusterLabels
GPUOutputControl tpcTriggerWords
static constexpr int getVersion()
get numeric version of the RDH
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > * clustersMCTruth
std::pair< ConstMCLabelContainer, ConstMCLabelContainerView > ConstMCLabelContainerViewWithBuffer
unsigned int nClustersTotal
unsigned int clusterOffset[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const ClusterNative * clustersLinear
static constexpr unsigned int TRIGGER_WORD_SIZE
static constexpr size_t TPC_ZS_PAGE_SIZE
unsigned short nADCsamples
Trigger info including the orbit.
uint32_t orbit
orbit of the trigger word
TriggerWordDLBZS triggerWord
trigger Word information
bool isValid(int entry=0) const
std::vector< Digit > digits