26#include "GPUDefParametersRuntime.h"
68#ifdef GPUCA_TPC_GEOMETRY_O2
69std::pair<uint32_t, uint32_t> GPUChainTracking::TPCClusterizerDecodeZSCountUpdate(uint32_t iSector,
const CfFragment& fragment)
79 uint16_t posInEndpoint = 0;
80 uint16_t pagesEndpoint = 0;
84 for (uint32_t l = pageFirst; l < pageLast; l++) {
85 uint16_t pageDigits =
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j][posInEndpoint++];
93 if (pagesEndpoint !=
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j].size()) {
95 GPUError(
"TPC raw page count mismatch in TPCClusterizerDecodeZSCountUpdate: expected %d / buffered %lu", pagesEndpoint,
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j].size());
98 GPUFatal(
"TPC raw page count mismatch in TPCClusterizerDecodeZSCountUpdate: expected %d / buffered %lu", pagesEndpoint,
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j].size());
111 TPCClusterizerEnsureZSOffsets(iSector, fragment);
116void GPUChainTracking::TPCClusterizerEnsureZSOffsets(uint32_t iSector,
const CfFragment& fragment)
122 uint32_t pagesEndpoint = 0;
123 const uint32_t nAdcsExpected =
data.nDigits[iSector][
endpoint];
124 const uint32_t nPagesExpected =
data.nPages[iSector][
endpoint];
126 uint32_t nAdcDecoded = 0;
129 const uint32_t pageFirst = (
i ==
data.minMaxCN[iSector][
endpoint].zsPtrFirst) ?
data.minMaxCN[iSector][
endpoint].zsPageFirst : 0;
131 for (uint32_t
j = pageFirst;
j < pageLast;
j++) {
135 const uint16_t nSamplesInPage = decHdr->
nADCsamples;
137 nAdcDecoded += nSamplesInPage;
142 if (pagesEndpoint != nPagesExpected) {
143 GPUFatal(
"Sector %d, Endpoint %d, Fragment %d: TPC raw page count mismatch: expected %d / buffered %u", iSector,
endpoint, fragment.
index, pagesEndpoint, nPagesExpected);
146 if (nAdcDecoded != nAdcsExpected) {
147 GPUFatal(
"Sector %d, Endpoint %d, Fragment %d: TPC ADC count mismatch: expected %u, buffered %u", iSector,
endpoint, fragment.
index, nAdcsExpected, nAdcDecoded);
154 nAdcs += nAdcsExpected;
160struct TPCCFDecodeScanTmp {
161 int32_t zsPtrFirst, zsPageFirst, zsPtrLast, zsPageLast,
hasData, pageCounter;
165std::pair<uint32_t, uint32_t> GPUChainTracking::TPCClusterizerDecodeZSCount(uint32_t iSector,
const CfFragment& fragment)
168 uint32_t nDigits = 0;
171 memset(endpointAdcSamples, 0,
sizeof(endpointAdcSamples));
187 std::vector<std::pair<CfFragment, TPCCFDecodeScanTmp>> fragments;
189 fragments.emplace_back(std::pair<CfFragment, TPCCFDecodeScanTmp>{fragment, {0, 0, 0, 0, 0, -1}});
191 fragments.emplace_back(std::pair<CfFragment, TPCCFDecodeScanTmp>{fragments.back().
first.next(), {0, 0, 0, 0, 0, -1}});
193 std::vector<bool> fragmentExtends(
mCFContext->nFragments,
false);
195 uint32_t firstPossibleFragment = 0;
196 uint32_t pageCounter = 0;
197 uint32_t emptyPages = 0;
221 static bool errorShown =
false;
222 if (errorShown ==
false) {
223 GPUAlarm(
"Trigger handling only possible with TPC Dense Link Based data, received version %d, disabling",
mCFContext->zsVersion);
228 GPUError(
"Received TPC ZS 8kb page of mixed versions, expected %d, received %d (linkid %d, feeCRU %d, feeEndpoint %d, feelinkid %d)",
mCFContext->zsVersion, (int32_t)hdr->
version, (int32_t)o2::raw::RDHUtils::getLinkID(*rdh), (int32_t)rdh_utils::getCRU(*rdh), (int32_t)rdh_utils::getEndPoint(*rdh), (int32_t)rdh_utils::getLink(*rdh));
229 constexpr size_t bufferSize = 3 * std::max(
sizeof(*rdh),
sizeof(*hdr)) + 1;
231 for (
size_t i = 0;
i <
sizeof(*rdh);
i++) {
233 snprintf(dumpBuffer + 3 *
i, 4,
"%02X ", (int32_t)((uint8_t*)rdh)[
i]);
235 GPUAlarm(
"RDH of page: %s", dumpBuffer);
236 for (
size_t i = 0;
i <
sizeof(*hdr);
i++) {
238 snprintf(dumpBuffer + 3 *
i, 4,
"%02X ", (int32_t)((uint8_t*)hdr)[
i]);
240 GPUAlarm(
"Metainfo of page: %s", dumpBuffer);
245 GPUFatal(
"Cannot process with invalid TPC ZS data, exiting");
250 if (hdr2->
flags & TPCZSHDRV2::ZSFlags::TriggerWordPresent) {
254 tmp.
orbit = o2::raw::RDHUtils::getHeartBeatOrbit(*rdh);
260 nDigits += hdr->nADCsamples;
261 endpointAdcSamples[
j] += hdr->nADCsamples;
263 uint32_t maxTimeBin = timeBin + hdr->nTimeBinSpan;
264 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
266 if (hdr2->
flags & TPCZSHDRV2::ZSFlags::nTimeBinSpanBit8) {
273 bool extendsInNextPage =
false;
274 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
277 extendsInNextPage = o2::raw::RDHUtils::getHeartBeatOrbit(*nextrdh) == o2::raw::RDHUtils::getHeartBeatOrbit(*rdh) && o2::raw::RDHUtils::getMemorySize(*nextrdh) >
sizeof(
o2::header::RAWDataHeader);
280 while (firstPossibleFragment && (uint32_t)fragments[firstPossibleFragment - 1].first.last() > timeBin) {
281 firstPossibleFragment--;
283 auto handleExtends = [&](uint32_t ff) {
284 if (fragmentExtends[ff]) {
288 fragments[ff].second.zsPageLast++;
290 mCFContext->fragmentData[ff].pageDigits[iSector][
j].emplace_back(0);
292 fragmentExtends[ff] =
false;
295 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
296 for (uint32_t ff = 0; ff < firstPossibleFragment; ff++) {
300 for (uint32_t
f = firstPossibleFragment;
f <
mCFContext->nFragments;
f++) {
301 if (timeBin < (uint32_t)fragments[
f].
first.last() && (uint32_t)fragments[
f].first.first() <= maxTimeBin) {
302 if (!fragments[
f].second.hasData) {
303 fragments[
f].second.hasData = 1;
304 fragments[
f].second.zsPtrFirst = k;
305 fragments[
f].second.zsPageFirst = l;
307 if (pageCounter > (uint32_t)fragments[
f].second.pageCounter + 1) {
308 mCFContext->fragmentData[
f].nPages[iSector][
j] += emptyPages + pageCounter - fragments[
f].second.pageCounter - 1;
309 for (uint32_t k2 = fragments[
f].second.zsPtrLast - 1; k2 <= k; k2++) {
310 for (uint32_t l2 = ((int32_t)k2 == fragments[
f].second.zsPtrLast - 1) ? fragments[
f].second.zsPageLast : 0; l2 < (k2 < k ?
mIOPtrs.
tpcZS->
sector[iSector].
nZSPtr[
j][k2] : l); l2++) {
312 mCFContext->fragmentData[
f].pageDigits[iSector][
j].emplace_back(0);
318 const TPCZSHDR*
const hdrTmp = (
const TPCZSHDR*)(rdh_utils::getLink(o2::raw::RDHUtils::getFEEID(*rdhTmp)) == rdh_utils::DLBZSLinkID ? (pageTmp + o2::raw::RDHUtils::getMemorySize(*rdhTmp) -
sizeof(
TPCZSHDRV2)) : (pageTmp +
sizeof(
o2::header::RAWDataHeader)));
324 }
else if (emptyPages) {
325 mCFContext->fragmentData[
f].nPages[iSector][
j] += emptyPages;
327 for (uint32_t
m = 0;
m < emptyPages;
m++) {
328 mCFContext->fragmentData[
f].pageDigits[iSector][
j].emplace_back(0);
333 fragments[
f].second.zsPtrLast = k + 1;
334 fragments[
f].second.zsPageLast = l + 1;
335 fragments[
f].second.pageCounter = pageCounter;
337 mCFContext->fragmentData[
f].nDigits[iSector][
j] += hdr->nADCsamples;
339 mCFContext->fragmentData[
f].pageDigits[iSector][
j].emplace_back(hdr->nADCsamples);
341 fragmentExtends[
f] = extendsInNextPage;
344 if (timeBin < (uint32_t)fragments[
f].
first.last()) {
345 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
346 for (uint32_t ff =
f + 1; ff <
mCFContext->nFragments; ff++) {
352 firstPossibleFragment =
f + 1;
360 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPtrLast = fragments[
f].second.zsPtrLast;
361 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPtrFirst = fragments[
f].second.zsPtrFirst;
362 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPageLast = fragments[
f].second.zsPageLast;
363 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPageFirst = fragments[
f].second.zsPageFirst;
371 if (endpointAdcSamples[
i] >
mCFContext->nDigitsEndpointMax[iSector]) {
372 mCFContext->nDigitsEndpointMax[iSector] = endpointAdcSamples[
i];
375 uint32_t nDigitsFragmentMax = 0;
377 uint32_t pagesInFragment = 0;
378 uint32_t digitsInFragment = 0;
380 pagesInFragment +=
mCFContext->fragmentData[
i].nPages[iSector][
j];
381 digitsInFragment +=
mCFContext->fragmentData[
i].nDigits[iSector][
j];
384 nDigitsFragmentMax = std::max(nDigitsFragmentMax, digitsInFragment);
387 return {nDigits, nDigitsFragmentMax};
395 const uint32_t iSector = clusterer.
mISector;
398 std::vector<size_t> counts;
401 if (nSteps > clusterer.
mNBufs) {
402 GPUError(
"Clusterer buffers exceeded (%u > %u)", nSteps, (int32_t)clusterer.
mNBufs);
407 size_t tmpCount =
count;
409 for (uint32_t
i = 1;
i < nSteps;
i++) {
410 counts.push_back(tmpCount);
412 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanStart>({
GetGrid(tmpCount, scanWorkgroupSize, lane), {iSector}},
i, stage);
414 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanUp>({
GetGrid(tmpCount, scanWorkgroupSize, lane), {iSector}},
i, tmpCount);
416 tmpCount = (tmpCount + scanWorkgroupSize - 1) / scanWorkgroupSize;
419 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanTop>({
GetGrid(tmpCount, scanWorkgroupSize, lane), {iSector}}, nSteps, tmpCount);
421 for (uint32_t
i = nSteps - 1;
i > 1;
i--) {
422 tmpCount = counts[
i - 1];
423 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanDown>({
GetGrid(tmpCount - scanWorkgroupSize, scanWorkgroupSize, lane), {iSector}},
i, scanWorkgroupSize, tmpCount);
427 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::compactDigits>({
GetGrid(
count, scanWorkgroupSize, lane), {iSector}}, 1, stage, in, out);
432 for (
size_t i = 0;
i < nIn;
i++) {
441std::pair<uint32_t, uint32_t> GPUChainTracking::RunTPCClusterizer_transferZS(int32_t iSector,
const CfFragment& fragment, int32_t lane)
447 const auto&
retVal = TPCClusterizerDecodeZSCountUpdate(iSector, fragment);
451 uint32_t nPagesSector = 0;
470 nPagesSector += nPages;
477int32_t GPUChainTracking::RunTPCClusterizer_prepare(
bool restorePointers)
480 if (restorePointers) {
481 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
496 mCFContext->tpcMaxTimeBin = maxAllowedTimebin;
503 uint32_t nDigitsFragmentMax[
NSECTORS];
505 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
509 GPUError(
"Data has invalid RDH version %d, %d required\n",
o2::raw::RDHUtils::getVersion(rdh), o2::raw::RDHUtils::getVersion<o2::header::RAWDataHeader>());
525 const auto&
x = TPCClusterizerDecodeZSCount(iSector, fragmentMax);
526 nDigitsFragmentMax[iSector] =
x.first;
530 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
531 uint32_t nDigitsBase = nDigitsFragmentMax[iSector];
532 uint32_t threshold = 40000000;
533 uint32_t nDigitsScaled = nDigitsBase > threshold ? nDigitsBase : std::min((threshold + nDigitsBase) / 2, 2 * nDigitsBase);
547 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
560 if (
mCFContext->tpcMaxTimeBin > maxAllowedTimebin) {
561 GPUError(
"Input data has invalid time bin %u > %d",
mCFContext->tpcMaxTimeBin, maxAllowedTimebin);
564 mCFContext->tpcMaxTimeBin = maxAllowedTimebin;
578 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
590 if (
param().
rec.fwdTPCDigitsAsClusters) {
593#ifdef GPUCA_TPC_GEOMETRY_O2
598 if (RunTPCClusterizer_prepare(mPipelineNotifyCtx &&
GetProcessingSettings().doublePipelineClusterizer)) {
606 float tpcHitLowOccupancyScalingFactor = 1.f;
614 if (nHitsBase < threshold) {
617 tpcHitLowOccupancyScalingFactor = std::min(3.5f, (
float)threshold / nHitsBase);
620 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
624 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
628 RunTPCClusterizer_prepare(
true);
644 int32_t deviceId = -1;
649 nnApplications[lane].
init(nn_settings);
650 if (nnApplications[lane].mModelsUsed[0]) {
651 SetONNXGPUStream(*(nnApplications[lane].mModelClass).getSessionOptions(), lane, &deviceId);
652 (nnApplications[lane].
mModelClass).setDeviceId(deviceId);
653 if (nnApplications[lane].mModelClass.getIntraOpNumThreads() > maxThreads) {
656 (nnApplications[lane].
mModelClass).initEnvironment();
665 if (nnApplications[lane].mModelsUsed[1]) {
666 SetONNXGPUStream(*(nnApplications[lane].mModelReg1).getSessionOptions(), lane, &deviceId);
667 (nnApplications[lane].
mModelReg1).setDeviceId(deviceId);
668 if (nnApplications[lane].mModelReg1.getIntraOpNumThreads() > maxThreads) {
672 (nnApplications[lane].
mModelReg1).initEnvironment();
674 (nnApplications[lane].
mModelReg1).initSession();
676 if (nnApplications[lane].mModelsUsed[2]) {
677 SetONNXGPUStream(*(nnApplications[lane].mModelReg2).getSessionOptions(), lane, &deviceId);
678 (nnApplications[lane].
mModelReg2).setDeviceId(deviceId);
679 if (nnApplications[lane].mModelReg2.getIntraOpNumThreads() > maxThreads) {
682 (nnApplications[lane].
mModelReg2).initEnvironment();
684 (nnApplications[lane].
mModelReg2).initSession();
686 if (nn_settings.nnClusterizerVerbosity < 3) {
687 LOG(info) <<
"(ORT) Allocated ONNX stream for lane " << lane <<
" and device " << deviceId;
693 int32_t lane = sector % numLanes;
700 clustererNNShadow.
mISector = sector;
709 LOG(info) <<
"Size of nnApplications[lane]: " <<
sizeof(nnApplications[0]) <<
" bytes";
714 size_t nClsTotal = 0;
717 std::unique_ptr<ClusterNative[]> tmpNativeClusterBuffer;
724 bool buildNativeGPU = doGPU && NeedTPCClustersOnGPU();
728 if (buildNativeGPU) {
732 if (mWaitForFinalInputs) {
733 GPUFatal(
"Cannot use waitForFinalInput callback without delayed output");
737 tmpNativeClusters =
mInputsHost->mPclusterNativeOutput;
739 tmpNativeClusterBuffer = std::make_unique<ClusterNative[]>(
mInputsHost->mNClusterNative);
740 tmpNativeClusters = tmpNativeClusterBuffer.get();
745 if (propagateMCLabels) {
751 int8_t transferRunning[
NSECTORS] = {0};
754 auto notifyForeignChainFinished = [
this]() {
755 if (mPipelineNotifyCtx) {
758 std::lock_guard<std::mutex> lock(mPipelineNotifyCtx->
mutex);
759 mPipelineNotifyCtx->
ready =
true;
761 mPipelineNotifyCtx->
cond.notify_one();
764 bool synchronizeCalibUpdate =
false;
770 for (
CfFragment fragment =
mCFContext->fragmentFirst; !fragment.isEnd(); fragment = fragment.next()) {
772 GPUInfo(
"Processing time bins [%d, %d) for sectors %d to %d", fragment.
start, fragment.last(), iSectorBase, iSectorBase +
GetProcessingSettings().nTPCClustererLanes - 1);
775 if (doGPU && fragment.
index != 0) {
776 SynchronizeStream(lane);
779 uint32_t iSector = iSectorBase + lane;
787 bool setDigitsOnHost = (not doGPU && not
mIOPtrs.
tpcZS) || propagateMCLabels;
789 size_t numDigits = inDigits->
nTPCDigits[iSector];
790 if (setDigitsOnGPU) {
791 GPUMemCpy(RecoStep::TPCClusterFinding, clustererShadow.
mPdigits, inDigits->tpcDigits[iSector],
sizeof(clustererShadow.
mPdigits[0]) * numDigits, lane,
true);
793 if (setDigitsOnHost) {
809 using ChargeMapType =
decltype(*clustererShadow.
mPchargeMap);
810 using PeakMapType =
decltype(*clustererShadow.
mPpeakMap);
813 if (fragment.
index == 0) {
835 if (propagateMCLabels && fragment.
index == 0) {
839 GPUFatal(
"MC label container missing, sector %d", iSector);
848 runKernel<GPUTPCCFChargeMapFiller, GPUTPCCFChargeMapFiller::findFragmentStart>({
GetGrid(1, lane), {iSector}},
mIOPtrs.
tpcZS ==
nullptr);
850 }
else if (propagateMCLabels) {
863 GPUFatal(
"Data with invalid TPC ZS mode (%d) received",
mCFContext->zsVersion);
867 runKernel<GPUTPCCFDecodeZS>({
GetGridBlk(nBlocks, lane), {iSector}}, firstHBF);
870 runKernel<GPUTPCCFDecodeZSLink>({
GetGridBlk(nBlocks, lane), {iSector}}, firstHBF);
873 runKernel<GPUTPCCFDecodeZSDenseLink>({
GetGridBlk(nBlocks, lane), {iSector}}, firstHBF);
880 uint32_t iSector = iSectorBase + lane;
886 int32_t nextSector = iSector;
891 if (nextSector < NSECTORS && mIOPtrs.tpcZS && mCFContext->nPagesSector[nextSector] &&
mCFContext->zsVersion != -1 && !
mCFContext->abandonTimeframe) {
907 if (propagateMCLabels) {
911 bool checkForNoisyPads = (
rec()->
GetParam().
rec.tpc.maxTimeBinAboveThresholdIn1000Bin > 0) || (
rec()->
GetParam().
rec.tpc.maxConsecTimeBinAboveThreshold > 0);
912 checkForNoisyPads &= (
rec()->
GetParam().
rec.tpc.noisyPadsQuickCheck ? fragment.
index == 0 :
true);
915 if (checkForNoisyPads) {
918 runKernel<GPUTPCCFCheckPadBaseline>({
GetGridBlk(nBlocks, lane), {iSector}});
926 RunTPCClusterizer_compactPeaks(clusterer, clustererShadow, 0, doGPU, lane);
931 uint32_t iSector = iSectorBase + lane;
940 runKernel<GPUTPCCFNoiseSuppression, GPUTPCCFNoiseSuppression::noiseSuppression>({
GetGrid(clusterer.
mPmemory->
counters.
nPeaks, lane), {iSector}});
946 RunTPCClusterizer_compactPeaks(clusterer, clustererShadow, 1, doGPU, lane);
951 uint32_t iSector = iSectorBase + lane;
959 if (fragment.
index == 0) {
961 if (transferRunning[lane] == 1) {
963 transferRunning[lane] = 2;
978 int withMC = (doGPU && propagateMCLabels);
1091 GPUFatal(
"Project not compiled with neural network clusterization. Aborting.");
1099 if (doGPU && propagateMCLabels) {
1112 laneHasData[lane] =
true;
1119 size_t nClsFirst = nClsTotal;
1120 bool anyLaneHasData =
false;
1121 for (int32_t lane = 0; lane < maxLane; lane++) {
1122 uint32_t iSector = iSectorBase + lane;
1130 if (laneHasData[lane]) {
1131 anyLaneHasData =
true;
1137 clusterer.raiseError(GPUErrors::ERROR_CF_GLOBAL_CLUSTER_OVERFLOW, iSector * 1000 +
j, nClsTotal + clusterer.
mPclusterInRow[
j],
mInputsHost->mNClusterNative);
1140 if (buildNativeGPU) {
1144 }
else if (buildNativeHost) {
1150 if (transferRunning[lane]) {
1154 transferRunning[lane] = 1;
1157 if (not propagateMCLabels || not laneHasData[lane]) {
1158 assert(propagateMCLabels ? mcLinearLabels.
header.size() == nClsTotal :
true);
1166 assert(propagateMCLabels ? mcLinearLabels.
header.size() == nClsTotal :
true);
1168 if (propagateMCLabels) {
1169 for (int32_t lane = 0; lane < maxLane; lane++) {
1173 if (buildNativeHost && buildNativeGPU && anyLaneHasData) {
1175 mOutputQueue.emplace_back(
outputQueueEntry{(
void*)((
char*)&tmpNativeClusters[nClsFirst] - (
char*)&tmpNativeClusters[0]), &
mInputsShadow->mPclusterNativeBuffer[nClsFirst], (nClsTotal - nClsFirst) *
sizeof(tmpNativeClusters[0]), RecoStep::TPCClusterFinding});
1177 GPUMemCpy(RecoStep::TPCClusterFinding, (
void*)&tmpNativeClusters[nClsFirst], (
const void*)&
mInputsShadow->mPclusterNativeBuffer[nClsFirst], (nClsTotal - nClsFirst) *
sizeof(tmpNativeClusters[0]),
mRec->
NStreams() - 1,
false);
1181 if (mWaitForFinalInputs && iSectorBase >= 21 && (int32_t)iSectorBase < 21 +
GetProcessingSettings().nTPCClustererLanes) {
1182 notifyForeignChainFinished();
1184 if (mWaitForFinalInputs && iSectorBase >= 30 && (int32_t)iSectorBase < 30 +
GetProcessingSettings().nTPCClustererLanes) {
1185 mWaitForFinalInputs();
1196 if (transferRunning[
i]) {
1203 if (triggerOutput && triggerOutput->
allocator) {
1212 if (propagateMCLabels) {
1215 std::pair<ConstMCLabelContainer*, ConstMCLabelContainerView*>
buffer;
1218 throw std::runtime_error(
"Cluster MC Label buffer missing");
1221 buffer = {&container->first, &container->second};
1229 assert(propagateMCLabels ? mcLinearLabels.
header.size() == nClsTotal :
true);
1230 assert(propagateMCLabels ? mcLinearLabels.
data.size() >= nClsTotal :
true);
1235 mcLabelsConstView =
buffer.second;
1241 tmpNativeClusters =
mInputsHost->mPclusterNativeOutput;
1247 if (buildNativeHost) {
1253 auto allocator = [
this, &tmpNativeClusters](
size_t size) {
1256 return (tmpNativeClusters = this->
mInputsHost->mPclusterNativeOutput);
1258 RunTPCClusterFilter(tmpNativeAccess, allocator,
false);
1263 if (!mWaitForFinalInputs) {
1264 notifyForeignChainFinished();
1267 if (buildNativeGPU) {
1272 mInputsHost->mPclusterNativeAccess->setOffsetPtrs();
1275 if (doGPU && synchronizeOutput) {
1278 if (doGPU && synchronizeCalibUpdate) {
1287 if (buildNativeGPU) {
1288 GPUMemCpy(RecoStep::TPCClusterFinding, (
void*)
mInputsShadow->mPclusterNativeBuffer, (
const void*)tmpNativeClusters, nClsTotal *
sizeof(tmpNativeClusters[0]), -1,
true);
1293 if (mPipelineNotifyCtx) {
1295 mPipelineNotifyCtx =
nullptr;
#define TPC_MAX_TIME_BIN_TRIGGERED
#define GPUCA_MAX_STREAMS
std::enable_if_t< std::is_signed< T >::value, bool > hasData(const CalArray< T > &cal)
Definitions of TPC Zero Suppression Data Headers.
std::unique_ptr< o2::tpc::ClusterNativeAccess > mClusterNativeAccess
int32_t RunTPCClusterizer(bool synchronizeOutput=true)
std::unique_ptr< GPUTrackingInputProvider > mInputsHost
std::array< GPUOutputControl *, GPUTrackingOutputs::count()> mSubOutputControls
std::unique_ptr< std::ofstream > mDebugFile
std::unique_ptr< GPUTriggerOutputs > mTriggerBuffer
std::vector< outputQueueEntry > mOutputQueue
bool mUpdateNewCalibObjects
std::unique_ptr< GPUTPCCFChainContext > mCFContext
int32_t DoQueuedUpdates(int32_t stream, bool updateSlave=true)
std::unique_ptr< GPUNewCalibValues > mNewCalibValues
GPUTrackingInOutPointers & mIOPtrs
struct o2::gpu::GPUChainTracking::InOutMemory mIOMem
std::unique_ptr< GPUTrackingInputProvider > mInputsShadow
int32_t ForwardTPCDigits()
void RecordMarker(deviceEvent *ev, int32_t stream)
void TransferMemoryResourceLinkToGPU(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void GPUMemCpyAlways(RecoStep step, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void GPUMemCpy(RecoStep step, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
GPUReconstruction::RecoStepField GetRecoStepsGPU() const
void WriteToConstantMemory(RecoStep step, size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr)
void ReleaseEvent(deviceEvent ev, bool doGPU=true)
size_t AllocateRegisteredMemory(GPUProcessor *proc)
virtual std::unique_ptr< GPUReconstructionProcessing::threadContext > GetThreadContext()
GPUConstantMem * processors()
static constexpr krnlRunRange krnlRunRangeNone
void SetONNXGPUStream(Ort::SessionOptions &opt, int32_t stream, int32_t *deviceId)
void SetupGPUProcessor(T *proc, bool allocate)
const GPUSettingsProcessing & GetProcessingSettings() const
void SynchronizeStream(int32_t stream)
GPUReconstructionCPU * mRec
GPUConstantMem * processorsShadow()
krnlExec GetGridAutoStep(int32_t stream, GPUDataTypes::RecoStep st=GPUDataTypes::RecoStep::NoRecoStep)
static constexpr int32_t NSECTORS
void TransferMemoryResourceLinkToHost(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void TransferMemoryResourcesToHost(RecoStep step, GPUProcessor *proc, int32_t stream=-1, bool all=false)
bool DoDebugAndDump(RecoStep step, int32_t mask, T &processor, S T::*func, Args &&... args)
krnlExec GetGrid(uint32_t totalItems, uint32_t nThreads, int32_t stream, GPUReconstruction::krnlDeviceType d=GPUReconstruction::krnlDeviceType::Auto, GPUDataTypes::RecoStep st=GPUDataTypes::RecoStep::NoRecoStep)
krnlExec GetGridBlk(uint32_t nBlocks, int32_t stream, GPUReconstruction::krnlDeviceType d=GPUReconstruction::krnlDeviceType::Auto, GPUDataTypes::RecoStep st=GPUDataTypes::RecoStep::NoRecoStep)
GPUReconstruction * rec()
HighResTimer & getGeneralStepTimer(GeneralStep step)
void runParallelOuterLoop(bool doGPU, uint32_t nThreads, std::function< void(uint32_t)> lambda)
void SetNActiveThreads(int32_t n)
int32_t getNKernelHostThreads(bool splitCores)
const GPUDefParameters & getGPUParameters(bool doGPU) const override
void SetNActiveThreadsOuterLoop(uint32_t f)
void AllocateRegisteredForeignMemory(int16_t res, GPUReconstruction *rec, GPUOutputControl *control=nullptr)
void PopNonPersistentMemory(RecoStep step, uint64_t tag)
void ComputeReuseMax(GPUProcessor *proc)
RecoStepField GetRecoStepsGPU() const
void UnblockStackedMemory()
uint32_t NStreams() const
const GPUParam & GetParam() const
void PushNonPersistentMemory(uint64_t tag)
InOutTypeField GetRecoStepsOutputs() const
GPUMemorySizeScalers * MemoryScalers()
static void setGlobalOffsetsAndAllocate(GPUTPCClusterFinder &, GPUTPCLinearLabels &)
void DumpDigits(std::ostream &out)
void SetMaxData(const GPUTrackingInOutPointers &io)
CfChargePos * mPpeakPositions
void DumpClusters(std::ostream &out)
void SetNMaxDigits(size_t nDigits, size_t nPages, size_t nDigitsFragment, size_t nDigitsEndpointMax)
void DumpSuppressedPeaks(std::ostream &out)
uint32_t mNMaxClusterPerRow
void DumpPeakMap(std::ostream &out, std::string_view)
o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > const * mPinputLabels
uint32_t * mPclusterInRow
void DumpChargeMap(std::ostream &out, std::string_view)
uint32_t getNSteps(size_t items) const
CfChargePos * mPpositions
CfChargePos * mPfilteredPeakPositions
void DumpSuppressedPeaksCompacted(std::ostream &out)
void DumpPeaksCompacted(std::ostream &out)
tpc::ClusterNative * mPclusterByRow
void DumpPeaks(std::ostream &out)
o2::ml::OrtModel mModelReg1
o2::ml::OrtModel mModelClass
o2::ml::OrtModel mModelReg2
void initClusterizer(const GPUSettingsProcessingNNclusterizer &, GPUTPCNNClusterizer &)
void init(const GPUSettingsProcessingNNclusterizer &)
float * mOutputDataReg2_32
OrtDataType::Float16_t * mInputData_16
int mNnClusterizerBatchedMode
int mNnInferenceOutputDType
int mNnInferenceInputDType
int mNnClusterizerElementSize
OrtDataType::Float16_t * mOutputDataReg2_16
float * mModelProbabilities_32
int mNnClusterizerTotalClusters
int mNnClusterizerUseCfRegression
float * mOutputDataReg1_32
OrtDataType::Float16_t * mModelProbabilities_16
OrtDataType::Float16_t * mOutputDataReg1_16
void setIntraOpNumThreads(int threads)
std::vector< std::vector< int64_t > > getNumOutputNodes() const
#define TPC_PADS_IN_SECTOR
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
uint8_t itsSharedClusterMap uint8_t
constexpr int LHCMaxBunches
void dumpBuffer(gsl::span< const std::byte > buffer, std::ostream &out=std::cout, size_t maxbytes=std::numeric_limits< size_t >::max())
constexpr int LHCBCPERTIMEBIN
constexpr int MAXGLOBALPADROW
Global TPC definitions and constants.
@ ZSVersionDenseLinkBased
@ ZSVersionLinkBasedWithMeta
@ ZSVersionRowBased10BitADC
@ ZSVersionRowBased12BitADC
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
constexpr T qStr2Tag(const char *str)
std::condition_variable cond
std::unique_ptr< o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > > clusterNativeMCView
std::unique_ptr< o2::dataformats::ConstMCTruthContainer< o2::MCCompLabel > > clusterNativeMCBuffer
deviceEvent stream[GPUCA_MAX_STREAMS]
GPUTPCClusterFinder tpcClusterer[GPUCA_NSECTORS]
GPUTrackingInOutPointers ioPtrs
size_t NTPCClusters(size_t tpcDigits, bool perSector=false)
std::function< void *(size_t)> allocator
tpccf::SizeT nDigitsInFragment
struct o2::gpu::GPUTPCClusterFinder::Memory::counters_t counters
std::vector< o2::MCCompLabel > data
std::vector< o2::dataformats::MCTruthHeaderElement > header
size_t nTPCDigits[NSECTORS]
const GPUTPCDigitsMCInput * tpcDigitsMC
const o2::tpc::ClusterNativeAccess * clustersNative
const GPUSettingsTF * settingsTF
const GPUTrackingInOutZS * tpcZS
const GPUTrackingInOutDigits * tpcPackedDigits
const void *const * zsPtr[NENDPOINTS]
uint32_t count[NENDPOINTS]
const uint32_t * nZSPtr[NENDPOINTS]
GPUTrackingInOutZSSector sector[NSECTORS]
static constexpr uint32_t NENDPOINTS
GPUOutputControl clustersNative
size_t getIndex(const GPUOutputControl &v)
GPUOutputControl clusterLabels
GPUOutputControl tpcTriggerWords
static constexpr int getVersion()
get numeric version of the RDH
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > * clustersMCTruth
std::pair< ConstMCLabelContainer, ConstMCLabelContainerView > ConstMCLabelContainerViewWithBuffer
unsigned int nClustersTotal
unsigned int clusterOffset[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const ClusterNative * clustersLinear
static constexpr unsigned int TRIGGER_WORD_SIZE
static constexpr size_t TPC_ZS_PAGE_SIZE
unsigned short nADCsamples
Trigger info including the orbit.
uint32_t orbit
orbit of the trigger word
TriggerWordDLBZS triggerWord
trigger Word information
bool isValid(int entry=0) const
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
std::vector< Digit > digits