54 const uint32_t nTracks = merger.NMergedTracks();
56 const GPUdEdxInfo* tracksdEdx = merger.MergedTracksdEdx();
58 constexpr uint8_t flagsReject = getFlagsReject();
59 const uint32_t flagsRequired = getFlagsRequired(merger.Param().rec);
60 bool cutOnTrackdEdx = merger.Param().par.dodEdx && merger.Param().dodEdxEnabled && merger.Param().rec.tpc.minTrackdEdxMax2Tot > 0.f;
65 if (!tracks[
i].OK()) {
68 if (tracks[
i].MergedLooper()) {
73 for (uint32_t
j = 0;
j < tracks[
i].NClusters();
j++) {
74 if ((trackClusters[tracks[
i].FirstClusterRef() +
j].
state & flagsReject) || (merger.ClusterAttachment()[trackClusters[tracks[
i].FirstClusterRef() +
j].
num] & flagsRequired) != flagsRequired) {
85 if (merger.Param().rec.tpc.minNClustersFinalTrack != -1 && nCl < (uint32_t)merger.Param().rec.tpc.minNClustersFinalTrack) {
88 if (cutOnTrackdEdx && (tracksdEdx[
i].dEdxMaxTPC < merger.Param().rec.tpc.minTrackdEdxMax || tracksdEdx[
i].
dEdxMaxTPC < tracksdEdx[
i].
dEdxTotTPC * merger.Param().rec.tpc.minTrackdEdxMax2Tot) && !(tracksdEdx[
i].dEdxMaxTPC == 0 && CAMath::Abs(tracks[
i].GetParam().GetDzDs()) > 0.03f)) {
91 uint32_t myId = CAMath::AtomicAdd(&merger.Memory()->nO2Tracks, 1u);
92 tmpData[
i] = {nCl, CAMath::AtomicAdd(&merger.Memory()->nO2ClusRefs, nCl + (nCl + 1) / 2)};
93 trackSort[myId] = {
i, tracks[
i].CSide() ? tracks[
i].GetParam().GetTOffset() : -tracks[
i].GetParam().GetTOffset()};
111 constexpr float MinDelta = 0.1f;
113 GPUdEdxInfo* tracksdEdx = merger.MergedTracksdEdx();
114 GPUdEdxInfo* tracksdEdxAlt = merger.MergedTracksdEdxAlt();
115 const int32_t nTracks = merger.NOutputTracksTPCO2();
117 constexpr uint8_t flagsReject = getFlagsReject();
118 const uint32_t flagsRequired = getFlagsRequired(merger.Param().rec);
119 TrackTPC* outputTracks = merger.OutputTracksTPCO2();
120 uint32_t* clusRefs = merger.OutputClusRefsTPCO2();
121 const auto&
param = merger.Param();
125 float const SNPThresh = 0.999990f;
129 const int32_t
i = trackSort[iTmp].
x;
130 const auto& track = tracks[
i];
131 auto snpIn = track.GetParam().GetSinPhi();
132 if (snpIn > SNPThresh) {
134 }
else if (snpIn < -SNPThresh) {
137 oTrack.set(track.GetParam().GetX(), track.GetAlpha(),
138 {track.GetParam().GetY(), track.GetParam().GetZ(), snpIn, track.GetParam().GetDzDs(), track.GetParam().GetQPt()},
139 {track.GetParam().GetCov(0),
140 track.GetParam().GetCov(1), track.GetParam().GetCov(2),
141 track.GetParam().GetCov(3), track.GetParam().GetCov(4), track.GetParam().GetCov(5),
142 track.GetParam().GetCov(6), track.GetParam().GetCov(7), track.GetParam().GetCov(8), track.GetParam().GetCov(9),
143 track.GetParam().GetCov(10), track.GetParam().GetCov(11), track.GetParam().GetCov(12), track.GetParam().GetCov(13), track.GetParam().GetCov(14)});
145 oTrack.setChi2(track.GetParam().GetChi2());
146 auto& outerPar = track.OuterParam();
148 if (
param.dodEdxEnabled) {
149 oTrack.setdEdx(tracksdEdx[
i]);
151 oTrack.setdEdxAlt(tracksdEdxAlt[
i]);
153 oTrack.setdEdxAlt(tracksdEdx[
i]);
158 auto snpOut = outerPar.P[2];
159 if (snpOut > SNPThresh) {
161 }
else if (snpOut < -SNPThresh) {
165 outerPar.X, outerPar.alpha,
166 {outerPar.P[0], outerPar.P[1], snpOut, outerPar.P[3], outerPar.P[4]},
167 {outerPar.C[0], outerPar.C[1], outerPar.C[2], outerPar.C[3], outerPar.C[4], outerPar.C[5],
168 outerPar.C[6], outerPar.C[7], outerPar.C[8], outerPar.C[9], outerPar.C[10], outerPar.C[11],
169 outerPar.C[12], outerPar.C[13], outerPar.C[14]}));
171 if (
param.par.dodEdx &&
param.dodEdxEnabled &&
param.rec.tpc.enablePID) {
173 auto pid = pidResponse.getMostProbablePID(oTrack,
param.rec.tpc.PID_EKrangeMin,
param.rec.tpc.PID_EKrangeMax,
param.rec.tpc.PID_EPrangeMin,
param.rec.tpc.PID_EPrangeMax,
param.rec.tpc.PID_EDrangeMin,
param.rec.tpc.PID_EDrangeMax,
param.rec.tpc.PID_ETrangeMin, merger.Param().rec.tpc.PID_ETrangeMax, merger.Param().rec.tpc.PID_useNsigma, merger.Param().rec.tpc.PID_sigma);
174 auto pidRemap = merger.Param().rec.tpc.PID_remap[
pid];
178 oTrack.setPID(
pid,
true);
179 oTrack.getParamOut().setPID(
pid,
true);
182 uint32_t nOutCl = tmpData[
i].
x;
183 uint32_t clBuff = tmpData[
i].
y;
184 oTrack.setClusterRef(clBuff, nOutCl);
185 uint32_t* clIndArr = &clusRefs[clBuff];
186 uint8_t* sectorIndexArr =
reinterpret_cast<uint8_t*
>(clIndArr + nOutCl);
187 uint8_t* rowIndexArr = sectorIndexArr + nOutCl;
189 uint32_t nOutCl2 = 0;
190 float t1 = 0, t2 = 0;
191 int32_t sector1 = 0, sector2 = 0;
193 for (uint32_t
j = 0;
j < track.NClusters();
j++) {
194 if ((trackClusters[track.FirstClusterRef() +
j].
state & flagsReject) || (merger.ClusterAttachment()[trackClusters[track.FirstClusterRef() +
j].
num] & flagsRequired) != flagsRequired) {
197 int32_t clusterIdGlobal = trackClusters[track.FirstClusterRef() +
j].
num;
198 int32_t sector = trackClusters[track.FirstClusterRef() +
j].
sector;
199 int32_t globalRow = trackClusters[track.FirstClusterRef() +
j].
row;
200 int32_t clusterIdInRow = clusterIdGlobal -
clusters->clusterOffset[sector][globalRow];
201 clIndArr[nOutCl2] = clusterIdInRow;
202 sectorIndexArr[nOutCl2] = sector;
203 rowIndexArr[nOutCl2] = globalRow;
205 t1 =
clusters->clustersLinear[clusterIdGlobal].getTime();
208 if (++nOutCl2 == nOutCl) {
209 t2 =
clusters->clustersLinear[clusterIdGlobal].getTime();
214 if (track.PrevSegment() >= 0) {
215 const GPUTPCGMMergedTrack* chkTrk = track.GetFirstSegment(tracks, merger.Param().rec.enableCyclicGraphWorkarounds);
216 const auto& firstPrevCluster = trackClusters[chkTrk->FirstClusterRef()];
217 t1 =
clusters->clustersLinear[firstPrevCluster.num].getTime();
218 sector1 = firstPrevCluster.
sector;
222 float time0 = 0.f, tFwd = 0.f, tBwd = 0.f;
223 if (merger.Param().par.continuousTracking) {
224 time0 = track.GetParam().GetTOffset();
226 bool lastSide = trackClusters[track.FirstClusterRef()].
sector <
MAXSECTOR / 2;
228 for (uint32_t iCl = 1; iCl < track.NClusters(); iCl++) {
229 auto& cacl1 = trackClusters[track.FirstClusterRef() + iCl];
230 if (lastSide ^ (cacl1.sector <
MAXSECTOR / 2)) {
231 auto& cl1 =
clusters->clustersLinear[cacl1.num];
232 auto& cl2 =
clusters->clustersLinear[trackClusters[track.FirstClusterRef() + iCl - 1].
num];
233 delta = CAMath::Abs(cl1.getTime() - cl2.getTime()) * 0.5f;
237 if (delta < MinDelta) {
243 const float tmin = CAMath::Min(
t1, t2);
244 const float maxDriftTime = merger.GetConstantMem()->calibObjects.fastTransformHelper->getCorrMap()->getMaxDriftTime(
t1 > t2 ? sector1 : sector2);
245 const float clusterT0 = merger.GetConstantMem()->calibObjects.fastTransformHelper->getCorrMap()->getT0();
246 const float tmax = CAMath::Min(tmin + maxDriftTime, CAMath::Max(
t1, t2));
248 if (time0 + maxDriftTime < tmax) {
249 delta = tmax - time0 - maxDriftTime;
251 if (tmin - clusterT0 < time0 + delta) {
252 delta = tmin - clusterT0 - time0;
256 const float deltaZ = merger.GetConstantMem()->calibObjects.fastTransformHelper->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(sector2, delta);
257 oTrack.setZ(oTrack.getZ() + deltaZ);
259 tFwd = tmin - clusterT0 - time0;
260 tBwd = time0 - tmax + maxDriftTime;
266 oTrack.setTime0(time0);
267 oTrack.setDeltaTBwd(tBwd);
268 oTrack.setDeltaTFwd(tFwd);
270 oTrack.setHasCSideClusters();
271 oTrack.setHasASideClusters();
272 }
else if (track.CSide()) {
273 oTrack.setHasCSideClusters();
275 oTrack.setHasASideClusters();
277 outputTracks[iTmp] = oTrack;