27#include "GPUDefParametersRuntime.h"
68#ifdef GPUCA_TPC_GEOMETRY_O2
69std::pair<uint32_t, uint32_t> GPUChainTracking::TPCClusterizerDecodeZSCountUpdate(uint32_t iSector,
const CfFragment& fragment)
79 uint16_t posInEndpoint = 0;
80 uint16_t pagesEndpoint = 0;
84 for (uint32_t l = pageFirst; l < pageLast; l++) {
85 uint16_t pageDigits =
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j][posInEndpoint++];
93 if (pagesEndpoint !=
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j].size()) {
95 GPUError(
"TPC raw page count mismatch in TPCClusterizerDecodeZSCountUpdate: expected %d / buffered %lu", pagesEndpoint,
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j].size());
98 GPUFatal(
"TPC raw page count mismatch in TPCClusterizerDecodeZSCountUpdate: expected %d / buffered %lu", pagesEndpoint,
mCFContext->fragmentData[fragment.
index].pageDigits[iSector][
j].size());
111 TPCClusterizerEnsureZSOffsets(iSector, fragment);
116void GPUChainTracking::TPCClusterizerEnsureZSOffsets(uint32_t iSector,
const CfFragment& fragment)
122 uint32_t pagesEndpoint = 0;
123 const uint32_t nAdcsExpected =
data.nDigits[iSector][
endpoint];
124 const uint32_t nPagesExpected =
data.nPages[iSector][
endpoint];
126 uint32_t nAdcDecoded = 0;
129 const uint32_t pageFirst = (
i ==
data.minMaxCN[iSector][
endpoint].zsPtrFirst) ?
data.minMaxCN[iSector][
endpoint].zsPageFirst : 0;
131 for (uint32_t
j = pageFirst;
j < pageLast;
j++) {
135 const uint16_t nSamplesInPage = decHdr->
nADCsamples;
137 nAdcDecoded += nSamplesInPage;
142 if (pagesEndpoint != nPagesExpected) {
143 GPUFatal(
"Sector %d, Endpoint %d, Fragment %d: TPC raw page count mismatch: expected %d / buffered %u", iSector,
endpoint, fragment.
index, pagesEndpoint, nPagesExpected);
146 if (nAdcDecoded != nAdcsExpected) {
147 GPUFatal(
"Sector %d, Endpoint %d, Fragment %d: TPC ADC count mismatch: expected %u, buffered %u", iSector,
endpoint, fragment.
index, nAdcsExpected, nAdcDecoded);
154 nAdcs += nAdcsExpected;
160struct TPCCFDecodeScanTmp {
161 int32_t zsPtrFirst, zsPageFirst, zsPtrLast, zsPageLast,
hasData, pageCounter;
165std::pair<uint32_t, uint32_t> GPUChainTracking::TPCClusterizerDecodeZSCount(uint32_t iSector,
const CfFragment& fragment)
168 uint32_t nDigits = 0;
171 memset(endpointAdcSamples, 0,
sizeof(endpointAdcSamples));
186 std::vector<std::pair<CfFragment, TPCCFDecodeScanTmp>> fragments;
188 fragments.emplace_back(std::pair<CfFragment, TPCCFDecodeScanTmp>{fragment, {0, 0, 0, 0, 0, -1}});
190 fragments.emplace_back(std::pair<CfFragment, TPCCFDecodeScanTmp>{fragments.back().
first.next(), {0, 0, 0, 0, 0, -1}});
192 std::vector<bool> fragmentExtends(
mCFContext->nFragments,
false);
194 uint32_t firstPossibleFragment = 0;
195 uint32_t pageCounter = 0;
196 uint32_t emptyPages = 0;
220 static bool errorShown =
false;
221 if (errorShown ==
false) {
222 GPUAlarm(
"Trigger handling only possible with TPC Dense Link Based data, received version %d, disabling",
mCFContext->zsVersion);
227 GPUError(
"Received TPC ZS 8kb page of mixed versions, expected %d, received %d (linkid %d, feeCRU %d, feeEndpoint %d, feelinkid %d)",
mCFContext->zsVersion, (int32_t)hdr->
version, (int32_t)o2::raw::RDHUtils::getLinkID(*rdh), (int32_t)rdh_utils::getCRU(*rdh), (int32_t)rdh_utils::getEndPoint(*rdh), (int32_t)rdh_utils::getLink(*rdh));
228 constexpr size_t bufferSize = 3 * std::max(
sizeof(*rdh),
sizeof(*hdr)) + 1;
230 for (
size_t i = 0;
i <
sizeof(*rdh);
i++) {
232 snprintf(dumpBuffer + 3 *
i, 4,
"%02X ", (int32_t)((uint8_t*)rdh)[
i]);
234 GPUAlarm(
"RDH of page: %s", dumpBuffer);
235 for (
size_t i = 0;
i <
sizeof(*hdr);
i++) {
237 snprintf(dumpBuffer + 3 *
i, 4,
"%02X ", (int32_t)((uint8_t*)hdr)[
i]);
239 GPUAlarm(
"Metainfo of page: %s", dumpBuffer);
244 GPUFatal(
"Cannot process with invalid TPC ZS data, exiting");
249 if (hdr2->
flags & TPCZSHDRV2::ZSFlags::TriggerWordPresent) {
253 tmp.
orbit = o2::raw::RDHUtils::getHeartBeatOrbit(*rdh);
259 nDigits += hdr->nADCsamples;
260 endpointAdcSamples[
j] += hdr->nADCsamples;
262 uint32_t maxTimeBin = timeBin + hdr->nTimeBinSpan;
263 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
265 if (hdr2->
flags & TPCZSHDRV2::ZSFlags::nTimeBinSpanBit8) {
272 bool extendsInNextPage =
false;
273 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
276 extendsInNextPage = o2::raw::RDHUtils::getHeartBeatOrbit(*nextrdh) == o2::raw::RDHUtils::getHeartBeatOrbit(*rdh) && o2::raw::RDHUtils::getMemorySize(*nextrdh) >
sizeof(
o2::header::RAWDataHeader);
279 while (firstPossibleFragment && (uint32_t)fragments[firstPossibleFragment - 1].first.last() > timeBin) {
280 firstPossibleFragment--;
282 auto handleExtends = [&](uint32_t ff) {
283 if (fragmentExtends[ff]) {
287 fragments[ff].second.zsPageLast++;
289 mCFContext->fragmentData[ff].pageDigits[iSector][
j].emplace_back(0);
291 fragmentExtends[ff] =
false;
294 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
295 for (uint32_t ff = 0; ff < firstPossibleFragment; ff++) {
299 for (uint32_t
f = firstPossibleFragment;
f <
mCFContext->nFragments;
f++) {
300 if (timeBin < (uint32_t)fragments[
f].
first.last() && (uint32_t)fragments[
f].first.first() <= maxTimeBin) {
301 if (!fragments[
f].second.hasData) {
302 fragments[
f].second.hasData = 1;
303 fragments[
f].second.zsPtrFirst = k;
304 fragments[
f].second.zsPageFirst = l;
306 if (pageCounter > (uint32_t)fragments[
f].second.pageCounter + 1) {
307 mCFContext->fragmentData[
f].nPages[iSector][
j] += emptyPages + pageCounter - fragments[
f].second.pageCounter - 1;
308 for (uint32_t k2 = fragments[
f].second.zsPtrLast - 1;
k2 <= k;
k2++) {
309 for (uint32_t l2 = ((int32_t)k2 == fragments[
f].second.zsPtrLast - 1) ? fragments[
f].second.zsPageLast : 0; l2 < (
k2 < k ?
mIOPtrs.
tpcZS->
sector[iSector].
nZSPtr[
j][
k2] : l); l2++) {
311 mCFContext->fragmentData[
f].pageDigits[iSector][
j].emplace_back(0);
317 const TPCZSHDR*
const hdrTmp = (
const TPCZSHDR*)(rdh_utils::getLink(o2::raw::RDHUtils::getFEEID(*rdhTmp)) == rdh_utils::DLBZSLinkID ? (pageTmp + o2::raw::RDHUtils::getMemorySize(*rdhTmp) -
sizeof(
TPCZSHDRV2)) : (pageTmp +
sizeof(
o2::header::RAWDataHeader)));
323 }
else if (emptyPages) {
324 mCFContext->fragmentData[
f].nPages[iSector][
j] += emptyPages;
326 for (uint32_t
m = 0;
m < emptyPages;
m++) {
327 mCFContext->fragmentData[
f].pageDigits[iSector][
j].emplace_back(0);
332 fragments[
f].second.zsPtrLast = k + 1;
333 fragments[
f].second.zsPageLast = l + 1;
334 fragments[
f].second.pageCounter = pageCounter;
336 mCFContext->fragmentData[
f].nDigits[iSector][
j] += hdr->nADCsamples;
338 mCFContext->fragmentData[
f].pageDigits[iSector][
j].emplace_back(hdr->nADCsamples);
340 fragmentExtends[
f] = extendsInNextPage;
343 if (timeBin < (uint32_t)fragments[
f].
first.last()) {
344 if (
mCFContext->zsVersion >= ZSVersion::ZSVersionDenseLinkBased) {
345 for (uint32_t ff =
f + 1; ff <
mCFContext->nFragments; ff++) {
351 firstPossibleFragment =
f + 1;
359 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPtrLast = fragments[
f].second.zsPtrLast;
360 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPtrFirst = fragments[
f].second.zsPtrFirst;
361 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPageLast = fragments[
f].second.zsPageLast;
362 mCFContext->fragmentData[
f].minMaxCN[iSector][
j].zsPageFirst = fragments[
f].second.zsPageFirst;
370 if (endpointAdcSamples[
i] >
mCFContext->nDigitsEndpointMax[iSector]) {
371 mCFContext->nDigitsEndpointMax[iSector] = endpointAdcSamples[
i];
374 uint32_t nDigitsFragmentMax = 0;
376 uint32_t pagesInFragment = 0;
377 uint32_t digitsInFragment = 0;
379 pagesInFragment +=
mCFContext->fragmentData[
i].nPages[iSector][
j];
380 digitsInFragment +=
mCFContext->fragmentData[
i].nDigits[iSector][
j];
383 nDigitsFragmentMax = std::max(nDigitsFragmentMax, digitsInFragment);
386 return {nDigits, nDigitsFragmentMax};
394 const uint32_t iSector = clusterer.
mISector;
397 std::vector<size_t> counts;
400 if (nSteps > clusterer.
mNBufs) {
401 GPUError(
"Clusterer buffers exceeded (%u > %u)", nSteps, (int32_t)clusterer.
mNBufs);
406 size_t tmpCount =
count;
408 for (uint32_t
i = 1;
i < nSteps;
i++) {
409 counts.push_back(tmpCount);
411 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanStart>({
GetGrid(tmpCount, scanWorkgroupSize, lane), {iSector}},
i, stage);
413 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanUp>({
GetGrid(tmpCount, scanWorkgroupSize, lane), {iSector}},
i, tmpCount);
415 tmpCount = (tmpCount + scanWorkgroupSize - 1) / scanWorkgroupSize;
418 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanTop>({
GetGrid(tmpCount, scanWorkgroupSize, lane), {iSector}}, nSteps, tmpCount);
420 for (uint32_t
i = nSteps - 1;
i > 1;
i--) {
421 tmpCount = counts[
i - 1];
422 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::scanDown>({
GetGrid(tmpCount - scanWorkgroupSize, scanWorkgroupSize, lane), {iSector}},
i, scanWorkgroupSize, tmpCount);
426 runKernel<GPUTPCCFStreamCompaction, GPUTPCCFStreamCompaction::compactDigits>({
GetGrid(
count, scanWorkgroupSize, lane), {iSector}}, 1, stage, in, out);
431 for (
size_t i = 0;
i < nIn;
i++) {
440std::pair<uint32_t, uint32_t> GPUChainTracking::RunTPCClusterizer_transferZS(int32_t iSector,
const CfFragment& fragment, int32_t lane)
446 const auto&
retVal = TPCClusterizerDecodeZSCountUpdate(iSector, fragment);
450 uint32_t nPagesSector = 0;
469 nPagesSector += nPages;
476int32_t GPUChainTracking::RunTPCClusterizer_prepare(
bool restorePointers)
479 if (restorePointers) {
480 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
495 mCFContext->tpcMaxTimeBin = maxAllowedTimebin;
502 uint32_t nDigitsFragmentMax[
NSECTORS];
504 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
508 GPUError(
"Data has invalid RDH version %d, %d required\n",
o2::raw::RDHUtils::getVersion(rdh), o2::raw::RDHUtils::getVersion<o2::header::RAWDataHeader>());
524 const auto&
x = TPCClusterizerDecodeZSCount(iSector, fragmentMax);
525 nDigitsFragmentMax[iSector] =
x.first;
529 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
530 uint32_t nDigitsBase = nDigitsFragmentMax[iSector];
531 uint32_t threshold = 40000000;
532 uint32_t nDigitsScaled = nDigitsBase > threshold ? nDigitsBase : std::min((threshold + nDigitsBase) / 2, 2 * nDigitsBase);
546 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
559 if (
mCFContext->tpcMaxTimeBin > maxAllowedTimebin) {
560 GPUError(
"Input data has invalid time bin %u > %d",
mCFContext->tpcMaxTimeBin, maxAllowedTimebin);
563 mCFContext->tpcMaxTimeBin = maxAllowedTimebin;
577 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
589 if (
param().
rec.fwdTPCDigitsAsClusters) {
592#ifdef GPUCA_TPC_GEOMETRY_O2
598 if (RunTPCClusterizer_prepare(mPipelineNotifyCtx &&
GetProcessingSettings().doublePipelineClusterizer)) {
606 float tpcHitLowOccupancyScalingFactor = 1.f;
614 if (nHitsBase < threshold) {
617 tpcHitLowOccupancyScalingFactor = std::min(3.5f, (
float)threshold / nHitsBase);
620 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
624 for (uint32_t iSector = 0; iSector <
NSECTORS; iSector++) {
628 RunTPCClusterizer_prepare(
true);
646 if (nn_settings.applyNNclusterizer) {
647 int32_t deviceId = -1;
653 nnTimers[0] = &getTimer<GPUTPCNNClusterizer, 0>(
"GPUTPCNNClusterizer_ONNXClassification_0_", 0);
654 nnTimers[1] = &getTimer<GPUTPCNNClusterizer, 1>(
"GPUTPCNNClusterizer_ONNXRegression_1_", 1);
655 nnTimers[2] = &getTimer<GPUTPCNNClusterizer, 2>(
"GPUTPCNNClusterizer_ONNXRegression2_2_", 2);
656 nnTimers[3] = &getTimer<GPUTPCNNClusterizer, 3>(
"GPUTPCNNClusterizer_ONNXClassification_0_", 3);
657 nnTimers[4] = &getTimer<GPUTPCNNClusterizer, 4>(
"GPUTPCNNClusterizer_ONNXRegression_1_", 4);
658 nnTimers[5] = &getTimer<GPUTPCNNClusterizer, 5>(
"GPUTPCNNClusterizer_ONNXRegression2_2_", 5);
659 nnTimers[6] = &getTimer<GPUTPCNNClusterizer, 6>(
"GPUTPCNNClusterizer_ONNXClassification_0_", 6);
660 nnTimers[7] = &getTimer<GPUTPCNNClusterizer, 7>(
"GPUTPCNNClusterizer_ONNXRegression_1_", 7);
661 nnTimers[8] = &getTimer<GPUTPCNNClusterizer, 8>(
"GPUTPCNNClusterizer_ONNXRegression2_2_", 8);
662 nnTimers[9] = &getTimer<GPUTPCNNClusterizer, 9>(
"GPUTPCNNClusterizer_ONNXClassification_0_", 9);
663 nnTimers[10] = &getTimer<GPUTPCNNClusterizer, 10>(
"GPUTPCNNClusterizer_ONNXRegression_1_", 10);
664 nnTimers[11] = &getTimer<GPUTPCNNClusterizer, 11>(
"GPUTPCNNClusterizer_ONNXRegression2_2_", 11);
669 if (nnApplications[lane].mModelsUsed[0]) {
670 SetONNXGPUStream(*(nnApplications[lane].mModelClass).getSessionOptions(), lane, &deviceId);
671 (nnApplications[lane].
mModelClass).setDeviceId(deviceId);
672 if (nnApplications[lane].mModelClass.getIntraOpNumThreads() > maxThreads) {
675 (nnApplications[lane].
mModelClass).initEnvironment();
684 if (!nn_settings.nnLoadFromCCDB) {
690 if (nnApplications[lane].mModelsUsed[1]) {
691 SetONNXGPUStream(*(nnApplications[lane].mModelReg1).getSessionOptions(), lane, &deviceId);
692 (nnApplications[lane].
mModelReg1).setDeviceId(deviceId);
693 if (nnApplications[lane].mModelReg1.getIntraOpNumThreads() > maxThreads) {
697 (nnApplications[lane].
mModelReg1).initEnvironment();
699 if (!nn_settings.nnLoadFromCCDB) {
700 (nnApplications[lane].
mModelReg1).initSession();
705 if (nnApplications[lane].mModelsUsed[2]) {
706 SetONNXGPUStream(*(nnApplications[lane].mModelReg2).getSessionOptions(), lane, &deviceId);
707 (nnApplications[lane].
mModelReg2).setDeviceId(deviceId);
708 if (nnApplications[lane].mModelReg2.getIntraOpNumThreads() > maxThreads) {
712 (nnApplications[lane].
mModelReg2).initEnvironment();
714 if (!nn_settings.nnLoadFromCCDB) {
715 (nnApplications[lane].
mModelReg2).initSession();
720 if (nn_settings.nnClusterizerVerbosity > 0) {
721 LOG(info) <<
"(ORT) Allocated ONNX stream for lane " << lane <<
" and device " << deviceId;
726 for (int32_t sector = 0; sector <
NSECTORS; sector++) {
729 int32_t lane = sector % numLanes;
733 nnApplications[lane].
initClusterizer(nn_settings, clustererNN, maxFragmentLen, maxAllowedTimebin);
736 clustererNNShadow.
mISector = sector;
738 nnApplications[lane].
initClusterizer(nn_settings, clustererNNShadow, maxFragmentLen, maxAllowedTimebin);
740 if (nn_settings.nnClusterizerVerbosity > 2) {
741 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Processor initialized. Sector " << sector <<
", lane " << lane <<
", max clusters " << clustererNN.
mNnClusterizerTotalClusters <<
" (clustererNN=" << &clustererNN <<
", clustererNNShadow=" << &clustererNNShadow <<
")";
744 if (nn_settings.nnClusterizerVerbosity > 2) {
745 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Memory registered for memoryId " << clustererNN.
mMemoryId <<
" (clustererNN=" << &clustererNN <<
", clustererNNShadow=" << &clustererNNShadow <<
")";
751 if (nn_settings.nnClusterizerVerbosity > 2) {
752 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Writing to constant memory...";
755 if (nn_settings.nnClusterizerVerbosity > 2) {
756 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Writing to constant memory done";
762 size_t nClsTotal = 0;
765 std::unique_ptr<ClusterNative[]> tmpNativeClusterBuffer;
767 const bool buildNativeGPU = doGPU && NeedTPCClustersOnGPU();
775 if (buildNativeGPU) {
779 GPUFatal(
"ERROR, mWaitForFinalInputs cannot be called with nTPCClustererLanes > 6");
782 if (mWaitForFinalInputs) {
783 GPUFatal(
"Cannot use waitForFinalInput callback without delayed output");
787 tmpNativeClusters =
mInputsHost->mPclusterNativeOutput;
789 tmpNativeClusterBuffer = std::make_unique<ClusterNative[]>(
mInputsHost->mNClusterNative);
790 tmpNativeClusters = tmpNativeClusterBuffer.get();
795 if (propagateMCLabels) {
801 int8_t transferRunning[
NSECTORS] = {0};
804 auto notifyForeignChainFinished = [
this]() {
805 if (mPipelineNotifyCtx) {
808 std::lock_guard<std::mutex> lock(mPipelineNotifyCtx->
mutex);
809 mPipelineNotifyCtx->
ready =
true;
811 mPipelineNotifyCtx->
cond.notify_one();
814 bool synchronizeCalibUpdate =
false;
820 for (
CfFragment fragment =
mCFContext->fragmentFirst; !fragment.isEnd(); fragment = fragment.next()) {
822 GPUInfo(
"Processing time bins [%d, %d) for sectors %d to %d", fragment.
start, fragment.last(), iSectorBase, iSectorBase +
GetProcessingSettings().nTPCClustererLanes - 1);
825 if (doGPU && fragment.
index != 0) {
826 SynchronizeStream(lane);
829 uint32_t iSector = iSectorBase + lane;
837 bool setDigitsOnHost = (not doGPU && not
mIOPtrs.
tpcZS) || propagateMCLabels;
839 size_t numDigits = inDigits->
nTPCDigits[iSector];
840 if (setDigitsOnGPU) {
841 GPUMemCpy(RecoStep::TPCClusterFinding, clustererShadow.
mPdigits, inDigits->tpcDigits[iSector],
sizeof(clustererShadow.
mPdigits[0]) * numDigits, lane,
true);
843 if (setDigitsOnHost) {
859 using ChargeMapType =
decltype(*clustererShadow.
mPchargeMap);
860 using PeakMapType =
decltype(*clustererShadow.
mPpeakMap);
863 if (fragment.
index == 0) {
885 if (propagateMCLabels && fragment.
index == 0) {
889 GPUFatal(
"MC label container missing, sector %d", iSector);
898 runKernel<GPUTPCCFChargeMapFiller, GPUTPCCFChargeMapFiller::findFragmentStart>({
GetGrid(1, lane), {iSector}},
mIOPtrs.
tpcZS ==
nullptr);
900 }
else if (propagateMCLabels) {
912 GPUFatal(
"Data with invalid TPC ZS mode (%d) received",
mCFContext->zsVersion);
916 runKernel<GPUTPCCFDecodeZS>({
GetGridBlk(nBlocks, lane), {iSector}}, firstHBF, tpcTimeBinCut);
919 runKernel<GPUTPCCFDecodeZSLink>({
GetGridBlk(nBlocks, lane), {iSector}}, firstHBF, tpcTimeBinCut);
922 runKernel<GPUTPCCFDecodeZSDenseLink>({
GetGridBlk(nBlocks, lane), {iSector}}, firstHBF, tpcTimeBinCut);
929 uint32_t iSector = iSectorBase + lane;
935 int32_t nextSector = iSector;
940 if (nextSector < NSECTORS && mIOPtrs.tpcZS && mCFContext->nPagesSector[nextSector] &&
mCFContext->zsVersion != -1 && !
mCFContext->abandonTimeframe) {
956 if (propagateMCLabels) {
960 bool checkForNoisyPads = (
rec()->
GetParam().
rec.tpc.maxTimeBinAboveThresholdIn1000Bin > 0) || (
rec()->
GetParam().
rec.tpc.maxConsecTimeBinAboveThreshold > 0);
961 checkForNoisyPads &= (
rec()->
GetParam().
rec.tpc.noisyPadsQuickCheck ? fragment.
index == 0 :
true);
964 if (checkForNoisyPads) {
967 runKernel<GPUTPCCFCheckPadBaseline>({
GetGridBlk(nBlocks, lane), {iSector}});
968 getKernelTimer<GPUTPCCFCheckPadBaseline>(RecoStep::TPCClusterFinding, iSector,
TPC_PADS_IN_SECTOR * fragment.lengthWithoutOverlap() *
sizeof(
PackedCharge),
false);
976 RunTPCClusterizer_compactPeaks(clusterer, clustererShadow, 0, doGPU, lane);
981 uint32_t iSector = iSectorBase + lane;
990 runKernel<GPUTPCCFNoiseSuppression, GPUTPCCFNoiseSuppression::noiseSuppression>({
GetGrid(clusterer.
mPmemory->
counters.
nPeaks, lane), {iSector}});
996 RunTPCClusterizer_compactPeaks(clusterer, clustererShadow, 1, doGPU, lane);
1001 uint32_t iSector = iSectorBase + lane;
1009 if (fragment.
index == 0) {
1011 if (transferRunning[lane] == 1) {
1013 transferRunning[lane] = 2;
1023#ifdef GPUCA_HAS_ONNX
1030 if (nn_settings.nnClusterizerApplyCfDeconvolution) {
1037 if (nn_settings.nnClusterizerVerbosity > 2) {
1041 if (nn_settings.nnClusterizerVerbosity > 3) {
1042 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Start. Loop=" << batch <<
". (clustererNN=" << &clustererNN <<
", clustererNNShadow=" << &clustererNNShadow <<
")";
1055 if (nn_settings.nnClusterizerVerbosity > 3) {
1056 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Done filling data. Loop=" << batch <<
". (clustererNN=" << &clustererNN <<
", clustererNNShadow=" << &clustererNNShadow <<
")";
1061 if (nn_settings.nnClusterizerVerbosity > 3) {
1062 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Done setting deconvolution flags. Loop=" << batch <<
". (clustererNN=" << &clustererNN <<
", clustererNNShadow=" << &clustererNNShadow <<
")";
1083 if (nn_settings.nnClusterizerVerbosity > 3) {
1084 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Done with NN classification inference. Loop=" << batch <<
". (clustererNN=" << &clustererNN <<
", clustererNNShadow=" << &clustererNNShadow <<
")";
1120 if (nn_settings.nnClusterizerVerbosity > 3) {
1121 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Done with NN regression inference. Loop=" << batch <<
". (clustererNN=" << &clustererNN <<
", clustererNNShadow=" << &clustererNNShadow <<
")";
1138 if (nn_settings.nnClusterizerVerbosity > 3) {
1139 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Done publishing. Loop=" << batch <<
". (clustererNN=" << &clustererNN <<
", clustererNNShadow=" << &clustererNNShadow <<
")";
1144 if(!nn_settings.nnClusterizerApplyCfDeconvolution) {
1149 if (nn_settings.nnClusterizerVerbosity > 3) {
1150 LOG(info) <<
"(NNCLUS, GPUChainTrackingClusterizer, this=" <<
this <<
") Done with CF regression. (clustererNN=" << &clustererNN <<
", clustererNNShadow=" << &clustererNNShadow <<
")";
1154 GPUFatal(
"Project not compiled with neural network clusterization. Aborting.");
1162 if (doGPU && propagateMCLabels) {
1175 laneHasData[lane] =
true;
1182 size_t nClsFirst = nClsTotal;
1183 bool anyLaneHasData =
false;
1184 for (int32_t lane = 0; lane < maxLane; lane++) {
1185 uint32_t iSector = iSectorBase + lane;
1193 if (laneHasData[lane]) {
1194 anyLaneHasData =
true;
1200 clusterer.raiseError(GPUErrors::ERROR_CF_GLOBAL_CLUSTER_OVERFLOW, iSector * 1000 +
j, nClsTotal + clusterer.
mPclusterInRow[
j],
mInputsHost->mNClusterNative);
1203 if (buildNativeGPU) {
1207 }
else if (buildNativeHost) {
1213 if (transferRunning[lane]) {
1217 transferRunning[lane] = 1;
1220 if (not propagateMCLabels || not laneHasData[lane]) {
1221 assert(propagateMCLabels ? mcLinearLabels.
header.size() == nClsTotal :
true);
1229 assert(propagateMCLabels ? mcLinearLabels.
header.size() == nClsTotal :
true);
1231 if (propagateMCLabels) {
1232 for (int32_t lane = 0; lane < maxLane; lane++) {
1236 if (buildNativeHost && buildNativeGPU && anyLaneHasData) {
1238 mOutputQueue.emplace_back(
outputQueueEntry{(
void*)((
char*)&tmpNativeClusters[nClsFirst] - (
char*)&tmpNativeClusters[0]), &
mInputsShadow->mPclusterNativeBuffer[nClsFirst], (nClsTotal - nClsFirst) *
sizeof(tmpNativeClusters[0]), RecoStep::TPCClusterFinding});
1240 GPUMemCpy(RecoStep::TPCClusterFinding, (
void*)&tmpNativeClusters[nClsFirst], (
const void*)&
mInputsShadow->mPclusterNativeBuffer[nClsFirst], (nClsTotal - nClsFirst) *
sizeof(tmpNativeClusters[0]),
mRec->
NStreams() - 1,
false);
1244 if (mWaitForFinalInputs && iSectorBase >= 21 && (int32_t)iSectorBase < 21 +
GetProcessingSettings().nTPCClustererLanes) {
1245 notifyForeignChainFinished();
1247 if (mWaitForFinalInputs && iSectorBase >= 30 && (int32_t)iSectorBase < 30 +
GetProcessingSettings().nTPCClustererLanes) {
1248 mWaitForFinalInputs();
1253#ifdef GPUCA_HAS_ONNX
1256 LOG(info) <<
"(ORT) Environment releasing...";
1264 if (transferRunning[
i]) {
1271 if (triggerOutput && triggerOutput->
allocator) {
1281 GPUInfo(
"Event has %zu TPC Clusters", nClsTotal);
1285 if (propagateMCLabels) {
1287 std::pair<ConstMCLabelContainer*, ConstMCLabelContainerView*>
buffer;
1289 if (!
GetProcessingSettings().tpcWriteClustersAfterRejection && !sortClusters && labelOutputControl && labelOutputControl->useExternal()) {
1290 if (!labelOutputControl->allocator) {
1291 throw std::runtime_error(
"Cluster MC Label buffer missing");
1294 buffer = {&container->first, &container->second};
1301 assert(propagateMCLabels ? mcLinearLabels.
header.size() == nClsTotal :
true);
1302 assert(propagateMCLabels ? mcLinearLabels.
data.size() >= nClsTotal :
true);
1307 mcLabelsConstView =
buffer.second;
1313 tmpNativeClusters =
mInputsHost->mPclusterNativeOutput;
1319 if (buildNativeHost) {
1325 auto allocator = [
this, &tmpNativeClusters](
size_t size) {
1328 return (tmpNativeClusters = this->
mInputsHost->mPclusterNativeOutput);
1330 RunTPCClusterFilter(tmpNativeAccess, allocator,
false);
1335 if (!mWaitForFinalInputs) {
1336 notifyForeignChainFinished();
1339 if (buildNativeGPU) {
1344 mInputsHost->mPclusterNativeAccess->setOffsetPtrs();
1347 if (doGPU && synchronizeOutput) {
1350 if (doGPU && synchronizeCalibUpdate) {
1354 SortClusters(buildNativeGPU, propagateMCLabels, tmpNativeAccess, tmpNativeClusters);
1358 if (mPipelineNotifyCtx) {
1360 mPipelineNotifyCtx =
nullptr;
1373 if (propagateMCLabels) {
1375 std::iota(clsOrder.begin(), clsOrder.end(), 0);
1376 std::vector<ClusterNative> tmpClusters;
1381 return clusters[a] < clusters[b];
1383 tmpClusters.resize(clusterAccess->
nClusters[
i][
j]);
1385 for (uint32_t k = 0; k < tmpClusters.size(); k++) {
1390 tmpClusters.clear();
1392 std::pair<o2::dataformats::ConstMCLabelContainer*, o2::dataformats::ConstMCLabelContainerView*> labelBuffer;
1394 std::unique_ptr<ConstMCLabelContainerView> tmpUniqueContainerView;
1395 std::unique_ptr<ConstMCLabelContainer> tmpUniqueContainerBuffer;
1396 if (labelOutput && labelOutput->
allocator) {
1398 labelBuffer = {&labelContainer->first, &labelContainer->second};
1409 for (
const auto&
element : clusterAccess->clustersMCTruth->
getLabels(clsOrder[
i])) {
1414 *labelBuffer.second = *labelBuffer.first;
1423 if (buildNativeGPU) {
#define TPC_MAX_TIME_BIN_TRIGGERED
#define GPUCA_MAX_STREAMS
Class to serialize ONNX objects for ROOT snapshots of CCDB objects at runtime.
std::enable_if_t< std::is_signed< T >::value, bool > hasData(const CalArray< T > &cal)
Provides a basic fallback implementation for Vc.
Definitions of TPC Zero Suppression Data Headers.
std::unique_ptr< o2::tpc::ClusterNativeAccess > mClusterNativeAccess
int32_t RunTPCClusterizer(bool synchronizeOutput=true)
std::unique_ptr< GPUTrackingInputProvider > mInputsHost
std::array< GPUOutputControl *, GPUTrackingOutputs::count()> mSubOutputControls
std::unique_ptr< std::ofstream > mDebugFile
std::unique_ptr< GPUTriggerOutputs > mTriggerBuffer
std::vector< outputQueueEntry > mOutputQueue
bool mUpdateNewCalibObjects
std::unique_ptr< GPUTPCCFChainContext > mCFContext
int32_t DoQueuedUpdates(int32_t stream, bool updateSlave=true)
std::unique_ptr< GPUNewCalibValues > mNewCalibValues
GPUTrackingInOutPointers & mIOPtrs
struct o2::gpu::GPUChainTracking::InOutMemory mIOMem
std::unique_ptr< GPUTrackingInputProvider > mInputsShadow
int32_t ForwardTPCDigits()
void RecordMarker(deviceEvent *ev, int32_t stream)
void TransferMemoryResourceLinkToGPU(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void GPUMemCpyAlways(RecoStep step, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void GPUMemCpy(RecoStep step, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
bool DoDebugAndDump(RecoStep step, uint32_t mask, T &processor, S T::*func, Args &&... args)
GPUReconstruction::RecoStepField GetRecoStepsGPU() const
GPUReconstruction::RecoStepField GetRecoSteps() const
void WriteToConstantMemory(RecoStep step, size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr)
void ReleaseEvent(deviceEvent ev, bool doGPU=true)
krnlExec GetGrid(uint32_t totalItems, uint32_t nThreads, int32_t stream, GPUReconstruction::krnlDeviceType d=GPUReconstruction::krnlDeviceType::Auto, gpudatatypes::RecoStep st=gpudatatypes::RecoStep::NoRecoStep)
size_t AllocateRegisteredMemory(GPUProcessor *proc)
virtual std::unique_ptr< GPUReconstructionProcessing::threadContext > GetThreadContext()
GPUConstantMem * processors()
static constexpr krnlRunRange krnlRunRangeNone
void SetONNXGPUStream(Ort::SessionOptions &opt, int32_t stream, int32_t *deviceId)
krnlExec GetGridAutoStep(int32_t stream, gpudatatypes::RecoStep st=gpudatatypes::RecoStep::NoRecoStep)
void SetupGPUProcessor(T *proc, bool allocate)
const GPUSettingsProcessing & GetProcessingSettings() const
void SynchronizeStream(int32_t stream)
GPUReconstructionCPU * mRec
GPUConstantMem * processorsShadow()
krnlExec GetGridBlk(uint32_t nBlocks, int32_t stream, GPUReconstruction::krnlDeviceType d=GPUReconstruction::krnlDeviceType::Auto, gpudatatypes::RecoStep st=gpudatatypes::RecoStep::NoRecoStep)
static constexpr int32_t NSECTORS
void TransferMemoryResourceLinkToHost(RecoStep step, int16_t res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void TransferMemoryResourcesToHost(RecoStep step, GPUProcessor *proc, int32_t stream=-1, bool all=false)
GPUReconstruction * rec()
HighResTimer & getGeneralStepTimer(GeneralStep step)
void runParallelOuterLoop(bool doGPU, uint32_t nThreads, std::function< void(uint32_t)> lambda)
void SetNActiveThreads(int32_t n)
int32_t getNKernelHostThreads(bool splitCores)
const GPUDefParameters & getGPUParameters(bool doGPU) const override
void SetNActiveThreadsOuterLoop(uint32_t f)
void AllocateRegisteredForeignMemory(int16_t res, GPUReconstruction *rec, GPUOutputControl *control=nullptr)
void ComputeReuseMax(GPUProcessor *proc)
RecoStepField GetRecoStepsGPU() const
void UnblockStackedMemory()
void PopNonPersistentMemory(RecoStep step, uint64_t tag, const GPUProcessor *proc=nullptr)
uint32_t NStreams() const
const GPUParam & GetParam() const
void PushNonPersistentMemory(uint64_t tag)
InOutTypeField GetRecoStepsOutputs() const
GPUMemorySizeScalers * MemoryScalers()
static void setGlobalOffsetsAndAllocate(GPUTPCClusterFinder &, GPUTPCLinearLabels &)
void DumpDigits(std::ostream &out)
void SetMaxData(const GPUTrackingInOutPointers &io)
CfChargePos * mPpeakPositions
void DumpClusters(std::ostream &out)
void SetNMaxDigits(size_t nDigits, size_t nPages, size_t nDigitsFragment, size_t nDigitsEndpointMax)
void DumpSuppressedPeaks(std::ostream &out)
uint32_t mNMaxClusterPerRow
void DumpPeakMap(std::ostream &out, std::string_view)
o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > const * mPinputLabels
uint32_t * mPclusterInRow
void DumpChargeMap(std::ostream &out, std::string_view)
uint32_t getNSteps(size_t items) const
CfChargePos * mPpositions
CfChargePos * mPfilteredPeakPositions
void DumpSuppressedPeaksCompacted(std::ostream &out)
void DumpPeaksCompacted(std::ostream &out)
tpc::ClusterNative * mPclusterByRow
void DumpPeaks(std::ostream &out)
o2::ml::OrtModel mModelReg1
o2::ml::OrtModel mModelClass
o2::ml::OrtModel mModelReg2
void init(const GPUSettingsProcessingNNclusterizer &, bool=false)
void initClusterizer(const GPUSettingsProcessingNNclusterizer &, GPUTPCNNClusterizer &, int32_t=-1, int32_t=-1)
float * mOutputDataReg2_32
OrtDataType::Float16_t * mInputData_16
int32_t mNnClusterizerBatchedMode
int32_t mNnClusterizerTotalClusters
OrtDataType::Float16_t * mOutputDataReg2_16
float * mModelProbabilities_32
int32_t mNnClusterizerUseCfRegression
int32_t mNnInferenceInputDType
int32_t mNnInferenceOutputDType
float * mOutputDataReg1_32
uint32_t mNnClusterizerRowTimeSizeThreads
OrtDataType::Float16_t * mModelProbabilities_16
int8_t mNnClusterizerSetDeconvolutionFlags
OrtDataType::Float16_t * mOutputDataReg1_16
int8_t mNnClusterizerUseClassification
void setIntraOpNumThreads(int threads)
std::vector< std::vector< int64_t > > getNumOutputNodes() const
#define TPC_PADS_IN_SECTOR
GLboolean GLboolean GLboolean b
GLboolean GLboolean GLboolean GLboolean a
uint8_t itsSharedClusterMap uint8_t
constexpr int LHCMaxBunches
@ TPCClustererSuppressedPeaks
@ TPCClustererZeroedCharges
void dumpBuffer(gsl::span< const std::byte > buffer, std::ostream &out=std::cout, size_t maxbytes=std::numeric_limits< size_t >::max())
std::unique_ptr< const o2::dataformats::MCTruthContainer< MCLabel > > getLabels(framework::ProcessingContext &pc, std::string_view dataBind)
constexpr int LHCBCPERTIMEBIN
constexpr int MAXGLOBALPADROW
Global TPC definitions and constants.
@ ZSVersionDenseLinkBased
@ ZSVersionLinkBasedWithMeta
@ ZSVersionRowBased10BitADC
@ ZSVersionRowBased12BitADC
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
constexpr T qStr2Tag(const char *str)
S< o2::tpc::ORTRootSerializer >::type * nnClusterizerNetworks[3]
std::condition_variable cond
std::unique_ptr< o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > > clusterNativeMCView
std::unique_ptr< o2::dataformats::ConstMCTruthContainer< o2::MCCompLabel > > clusterNativeMCBuffer
deviceEvent stream[GPUCA_MAX_STREAMS]
GPUTPCClusterFinder tpcClusterer[GPUCA_NSECTORS]
GPUCalibObjectsConst calibObjects
GPUTrackingInOutPointers ioPtrs
size_t NTPCClusters(size_t tpcDigits, bool perSector=false)
std::function< void *(size_t)> allocator
tpccf::SizeT nDigitsInFragment
struct o2::gpu::GPUTPCClusterFinder::Memory::counters_t counters
std::vector< o2::MCCompLabel > data
std::vector< o2::dataformats::MCTruthHeaderElement > header
size_t nTPCDigits[NSECTORS]
const GPUTPCDigitsMCInput * tpcDigitsMC
const o2::tpc::ClusterNativeAccess * clustersNative
const GPUSettingsTF * settingsTF
const GPUTrackingInOutZS * tpcZS
const GPUTrackingInOutDigits * tpcPackedDigits
const void *const * zsPtr[NENDPOINTS]
uint32_t count[NENDPOINTS]
const uint32_t * nZSPtr[NENDPOINTS]
GPUTrackingInOutZSSector sector[NSECTORS]
static constexpr uint32_t NENDPOINTS
GPUOutputControl clustersNative
size_t getIndex(const GPUOutputControl &v)
GPUOutputControl clusterLabels
GPUOutputControl tpcTriggerWords
static constexpr int getVersion()
get numeric version of the RDH
unsigned int nClusters[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const o2::dataformats::ConstMCTruthContainerView< o2::MCCompLabel > * clustersMCTruth
std::pair< ConstMCLabelContainer, ConstMCLabelContainerView > ConstMCLabelContainerViewWithBuffer
unsigned int nClustersTotal
unsigned int clusterOffset[constants::MAXSECTOR][constants::MAXGLOBALPADROW]
const ClusterNative * clustersLinear
static constexpr unsigned int TRIGGER_WORD_SIZE
static constexpr size_t TPC_ZS_PAGE_SIZE
unsigned short nADCsamples
Trigger info including the orbit.
uint32_t orbit
orbit of the trigger word
TriggerWordDLBZS triggerWord
trigger Word information
bool isValid(int entry=0) const
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
std::vector< Cluster > clusters
std::vector< Digit > digits