41 const uint32_t nTracks = merger.NOutputTracks();
43 const GPUdEdxInfo* tracksdEdx = merger.OutputTracksdEdx();
45 constexpr uint8_t flagsReject = getFlagsReject();
46 const uint32_t flagsRequired = getFlagsRequired(merger.Param().rec);
47 bool cutOnTrackdEdx = merger.Param().par.dodEdx && merger.Param().dodEdxDownscaled && merger.Param().rec.tpc.minTrackdEdxMax2Tot > 0.f;
52 if (!tracks[
i].OK()) {
56 for (uint32_t
j = 0;
j < tracks[
i].NClusters();
j++) {
57 if ((trackClusters[tracks[
i].FirstClusterRef() +
j].
state & flagsReject) || (merger.ClusterAttachment()[trackClusters[tracks[
i].FirstClusterRef() +
j].
num] & flagsRequired) != flagsRequired) {
60 if (merger.Param().rec.tpc.dropSecondaryLegsInOutput && trackClusters[tracks[
i].FirstClusterRef() +
j].
leg != trackClusters[tracks[
i].FirstClusterRef() + tracks[
i].NClusters() - 1].
leg) {
71 if (merger.Param().rec.tpc.minNClustersFinalTrack != -1 && nCl < (uint32_t)merger.Param().rec.tpc.minNClustersFinalTrack) {
74 if (cutOnTrackdEdx && (tracksdEdx[
i].dEdxMaxTPC < merger.Param().rec.tpc.minTrackdEdxMax || tracksdEdx[
i].
dEdxMaxTPC < tracksdEdx[
i].
dEdxTotTPC * merger.Param().rec.tpc.minTrackdEdxMax2Tot) && !(tracksdEdx[
i].dEdxMaxTPC == 0 && CAMath::Abs(tracks[
i].GetParam().GetDzDs()) > 0.03f)) {
77 uint32_t myId = CAMath::AtomicAdd(&merger.Memory()->nO2Tracks, 1u);
78 tmpData[
i] = {nCl, CAMath::AtomicAdd(&merger.Memory()->nO2ClusRefs, nCl + (nCl + 1) / 2)};
79 trackSort[myId] = {
i, (merger.Param().par.earlyTpcTransform || tracks[
i].CSide()) ? tracks[
i].GetParam().GetTZOffset() : -tracks[
i].GetParam().GetTZOffset()};
99 constexpr float MinDelta = 0.1f;
101 GPUdEdxInfo* tracksdEdx = merger.OutputTracksdEdx();
102 GPUdEdxInfo* tracksdEdxAlt = merger.OutputTracksdEdxAlt();
103 const int32_t nTracks = merger.NOutputTracksTPCO2();
105 constexpr uint8_t flagsReject = getFlagsReject();
106 const uint32_t flagsRequired = getFlagsRequired(merger.Param().rec);
107 TrackTPC* outputTracks = merger.OutputTracksTPCO2();
108 uint32_t* clusRefs = merger.OutputClusRefsTPCO2();
112 float const SNPThresh = 0.999990f;
116 const int32_t
i = trackSort[iTmp].
x;
117 auto snpIn = tracks[
i].GetParam().GetSinPhi();
118 if (snpIn > SNPThresh) {
120 }
else if (snpIn < -SNPThresh) {
123 oTrack.set(tracks[
i].GetParam().GetX(), tracks[
i].GetAlpha(),
124 {tracks[
i].GetParam().GetY(), tracks[
i].GetParam().GetZ(), snpIn, tracks[
i].GetParam().GetDzDs(), tracks[
i].GetParam().GetQPt()},
125 {tracks[
i].GetParam().GetCov(0),
126 tracks[
i].GetParam().GetCov(1), tracks[
i].GetParam().GetCov(2),
127 tracks[
i].GetParam().GetCov(3), tracks[
i].GetParam().GetCov(4), tracks[
i].GetParam().GetCov(5),
128 tracks[
i].GetParam().GetCov(6), tracks[
i].GetParam().GetCov(7), tracks[
i].GetParam().GetCov(8), tracks[
i].GetParam().GetCov(9),
129 tracks[
i].GetParam().GetCov(10), tracks[
i].GetParam().GetCov(11), tracks[
i].GetParam().GetCov(12), tracks[
i].GetParam().GetCov(13), tracks[
i].GetParam().GetCov(14)});
131 oTrack.setChi2(tracks[
i].GetParam().GetChi2());
132 auto& outerPar = tracks[
i].OuterParam();
133 if (merger.Param().par.dodEdx && merger.Param().dodEdxDownscaled) {
134 oTrack.setdEdx(tracksdEdx[
i]);
135 oTrack.setdEdxAlt(tracksdEdxAlt[
i]);
138 auto snpOut = outerPar.P[2];
139 if (snpOut > SNPThresh) {
141 }
else if (snpOut < -SNPThresh) {
145 outerPar.X, outerPar.alpha,
146 {outerPar.P[0], outerPar.P[1], snpOut, outerPar.P[3], outerPar.P[4]},
147 {outerPar.C[0], outerPar.C[1], outerPar.C[2], outerPar.C[3], outerPar.C[4], outerPar.C[5],
148 outerPar.C[6], outerPar.C[7], outerPar.C[8], outerPar.C[9], outerPar.C[10], outerPar.C[11],
149 outerPar.C[12], outerPar.C[13], outerPar.C[14]}));
151 if (merger.Param().par.dodEdx && merger.Param().dodEdxDownscaled && merger.Param().rec.tpc.enablePID) {
153 auto pid = pidResponse.getMostProbablePID(oTrack, merger.Param().rec.tpc.PID_EKrangeMin, merger.Param().rec.tpc.PID_EKrangeMax, merger.Param().rec.tpc.PID_EPrangeMin, merger.Param().rec.tpc.PID_EPrangeMax, merger.Param().rec.tpc.PID_EDrangeMin, merger.Param().rec.tpc.PID_EDrangeMax, merger.Param().rec.tpc.PID_ETrangeMin, merger.Param().rec.tpc.PID_ETrangeMax, merger.Param().rec.tpc.PID_useNsigma, merger.Param().rec.tpc.PID_sigma);
154 auto pidRemap = merger.Param().rec.tpc.PID_remap[
pid];
158 oTrack.setPID(
pid,
true);
159 oTrack.getParamOut().setPID(
pid,
true);
162 uint32_t nOutCl = tmpData[
i].
x;
163 uint32_t clBuff = tmpData[
i].
y;
164 oTrack.setClusterRef(clBuff, nOutCl);
165 uint32_t* clIndArr = &clusRefs[clBuff];
166 uint8_t* sectorIndexArr =
reinterpret_cast<uint8_t*
>(clIndArr + nOutCl);
167 uint8_t* rowIndexArr = sectorIndexArr + nOutCl;
169 uint32_t nOutCl2 = 0;
170 float t1 = 0, t2 = 0;
171 int32_t sector1 = 0, sector2 = 0;
173 for (uint32_t
j = 0;
j < tracks[
i].NClusters();
j++) {
174 if ((trackClusters[tracks[
i].FirstClusterRef() +
j].
state & flagsReject) || (merger.ClusterAttachment()[trackClusters[tracks[
i].FirstClusterRef() +
j].
num] & flagsRequired) != flagsRequired) {
177 if (merger.Param().rec.tpc.dropSecondaryLegsInOutput && trackClusters[tracks[
i].FirstClusterRef() +
j].
leg != trackClusters[tracks[
i].FirstClusterRef() + tracks[
i].NClusters() - 1].
leg) {
180 int32_t clusterIdGlobal = trackClusters[tracks[
i].FirstClusterRef() +
j].
num;
181 int32_t sector = trackClusters[tracks[
i].FirstClusterRef() +
j].
sector;
182 int32_t globalRow = trackClusters[tracks[
i].FirstClusterRef() +
j].
row;
183 int32_t clusterIdInRow = clusterIdGlobal -
clusters->clusterOffset[sector][globalRow];
184 clIndArr[nOutCl2] = clusterIdInRow;
185 sectorIndexArr[nOutCl2] = sector;
186 rowIndexArr[nOutCl2] = globalRow;
188 t1 =
clusters->clustersLinear[clusterIdGlobal].getTime();
191 if (++nOutCl2 == nOutCl) {
192 t2 =
clusters->clustersLinear[clusterIdGlobal].getTime();
198 float time0 = 0.f, tFwd = 0.f, tBwd = 0.f;
199 if (merger.Param().par.continuousTracking) {
200 time0 = tracks[
i].GetParam().GetTZOffset();
202 bool lastSide = trackClusters[tracks[
i].FirstClusterRef()].
sector <
MAXSECTOR / 2;
204 for (uint32_t iCl = 1; iCl < tracks[
i].NClusters(); iCl++) {
205 auto& cacl1 = trackClusters[tracks[
i].FirstClusterRef() + iCl];
206 if (lastSide ^ (cacl1.sector <
MAXSECTOR / 2)) {
207 auto& cl1 =
clusters->clustersLinear[cacl1.num];
208 auto& cl2 =
clusters->clustersLinear[trackClusters[tracks[
i].FirstClusterRef() + iCl - 1].
num];
209 delta = CAMath::Abs(cl1.getTime() - cl2.getTime()) * 0.5f;
210 if (delta < MinDelta) {
219 const float tmin = CAMath::Min(
t1, t2);
220 const float maxDriftTime = merger.GetConstantMem()->calibObjects.fastTransformHelper->getCorrMap()->getMaxDriftTime(
t1 > t2 ? sector1 : sector2);
221 const float clusterT0 = merger.GetConstantMem()->calibObjects.fastTransformHelper->getCorrMap()->getT0();
222 const float tmax = CAMath::Min(tmin + maxDriftTime, CAMath::Max(
t1, t2));
224 if (time0 + maxDriftTime < tmax) {
225 delta = tmax - time0 - maxDriftTime;
227 if (tmin - clusterT0 < time0 + delta) {
228 delta = tmin - clusterT0 - time0;
232 const float deltaZ = merger.GetConstantMem()->calibObjects.fastTransformHelper->getCorrMap()->convDeltaTimeToDeltaZinTimeFrame(sector2, delta);
233 oTrack.setZ(oTrack.getZ() + deltaZ);
235 tFwd = tmin - clusterT0 - time0;
236 tBwd = time0 - tmax + maxDriftTime;
242 oTrack.setTime0(time0);
243 oTrack.setDeltaTBwd(tBwd);
244 oTrack.setDeltaTFwd(tFwd);
246 oTrack.setHasCSideClusters();
247 oTrack.setHasASideClusters();
248 }
else if (tracks[
i].CSide()) {
249 oTrack.setHasCSideClusters();
251 oTrack.setHasASideClusters();
253 outputTracks[iTmp] = oTrack;