Project
Loading...
Searching...
No Matches
GPUTPCGMO2Output.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
15#include "GPUTPCDef.h"
16#include "GPUTPCGMO2Output.h"
17#include "GPUCommonAlgorithm.h"
21#include "TPCFastTransformPOD.h"
22#include "GPUGetConstexpr.h"
23
24#ifndef GPUCA_GPUCODE
27#include "GPUQAHelper.h"
28#endif
29
30using namespace o2::gpu;
31using namespace o2::tpc;
32using namespace o2::tpc::constants;
33
34GPUdi() static constexpr uint8_t getFlagsReject() { return GPUTPCGMMergedTrackHit::flagReject | GPUTPCGMMergedTrackHit::flagHighIncl; }
35
36namespace o2::gpu::internal
37{
38
40 GPUd() bool operator()(const GPUTPCGMMerger::tmpSort& a, const GPUTPCGMMerger::tmpSort& b)
41 {
42 return GPUCA_DETERMINISTIC_CODE(a.y != b.y ? a.y > b.y : a.x > b.x, a.y > b.y);
43 }
44};
45
46} // namespace o2::gpu::internal
47
48template <>
49GPUdii() void GPUTPCGMO2Output::Thread<GPUTPCGMO2Output::prepare>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() merger)
50{
51 const GPUTPCGMMergedTrack* tracks = merger.MergedTracks();
52 const uint32_t nTracks = merger.NMergedTracks();
53 const GPUTPCGMMergedTrackHit* trackClusters = merger.Clusters();
54 const GPUdEdxInfo* tracksdEdx = merger.MergedTracksdEdx();
55
56 constexpr uint8_t flagsReject = getFlagsReject();
57 bool cutOnTrackdEdx = merger.Param().par.dodEdx && merger.Param().dodEdxEnabled && merger.Param().rec.tpc.minTrackdEdxMax2Tot > 0.f;
58
59 GPUTPCGMMerger::tmpSort* GPUrestrict() trackSort = merger.TrackSortO2();
60 uint2* GPUrestrict() tmpData = merger.ClusRefTmp();
61 for (uint32_t i = get_global_id(0); i < nTracks; i += get_global_size(0)) {
62 if (!tracks[i].OK()) {
63 continue;
64 }
65 if (tracks[i].MergedLooper()) {
66 continue;
67 }
68
69 uint32_t nCl = 0;
70 for (uint32_t j = 0; j < tracks[i].NClusters(); j++) {
71 if ((trackClusters[tracks[i].FirstClusterRef() + j].state & flagsReject)) {
72 continue;
73 }
74 nCl++;
75 }
76 if (nCl == 0) {
77 continue;
78 }
79 if (nCl + 2 < merger.Param().tpcMinHitsB5(tracks[i].GetParam().GetQPt() * merger.Param().qptB5Scaler)) { // Give 2 hits tolerance in the primary leg, compared to the full fit of the looper
80 continue;
81 }
82 if (merger.Param().rec.tpc.minNClustersFinalTrack != -1 && nCl < (uint32_t)merger.Param().rec.tpc.minNClustersFinalTrack) {
83 continue;
84 }
85 if (cutOnTrackdEdx && (tracksdEdx[i].dEdxMaxTPC < merger.Param().rec.tpc.minTrackdEdxMax || tracksdEdx[i].dEdxMaxTPC < tracksdEdx[i].dEdxTotTPC * merger.Param().rec.tpc.minTrackdEdxMax2Tot) && !(tracksdEdx[i].dEdxMaxTPC == 0 && CAMath::Abs(tracks[i].GetParam().GetDzDs()) > 0.03f)) {
86 continue;
87 }
88 uint32_t myId = CAMath::AtomicAdd(&merger.Memory()->nO2Tracks, 1u);
89 tmpData[i] = {nCl, CAMath::AtomicAdd(&merger.Memory()->nO2ClusRefs, nCl + (nCl + 1) / 2)};
90 trackSort[myId] = {i, tracks[i].CSide() ? tracks[i].GetParam().GetTOffset() : -tracks[i].GetParam().GetTOffset()};
91 }
92}
93
94template <>
95GPUdii() void GPUTPCGMO2Output::Thread<GPUTPCGMO2Output::sort>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() merger)
96{
97#ifndef GPUCA_SPECIALIZE_THRUST_SORTS
98 if (iThread == 0 && iBlock == 0) {
99 GPUTPCGMMerger::tmpSort* GPUrestrict() trackSort = merger.TrackSortO2();
100 GPUCommonAlgorithm::sortDeviceDynamic(trackSort, trackSort + merger.Memory()->nO2Tracks, internal::GPUTPCGMO2OutputSort_comp());
101 }
102#endif
103}
104
105template <>
106GPUdii() void GPUTPCGMO2Output::Thread<GPUTPCGMO2Output::output>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() merger)
107{
108 constexpr float MinDelta = 0.1f;
109 const GPUTPCGMMergedTrack* tracks = merger.MergedTracks();
110 GPUdEdxInfo* tracksdEdx = merger.MergedTracksdEdx();
111 GPUdEdxInfo* tracksdEdxAlt = merger.MergedTracksdEdxAlt();
112 const int32_t nTracks = merger.NOutputTracksTPCO2();
113 const GPUTPCGMMergedTrackHit* trackClusters = merger.Clusters();
114 constexpr uint8_t flagsReject = getFlagsReject();
115 TrackTPC* outputTracks = merger.OutputTracksTPCO2();
116 uint32_t* clusRefs = merger.OutputClusRefsTPCO2();
117 const auto& param = merger.Param();
118
119 GPUTPCGMMerger::tmpSort* GPUrestrict() trackSort = merger.TrackSortO2();
120 uint2* GPUrestrict() tmpData = merger.ClusRefTmp();
121 float const SNPThresh = 0.999990f;
122
123 for (int32_t iTmp = get_global_id(0); iTmp < nTracks; iTmp += get_global_size(0)) {
124 TrackTPC oTrack;
125 const int32_t i = trackSort[iTmp].x;
126 const auto& track = tracks[i];
127 auto snpIn = track.GetParam().GetSinPhi();
128 if (snpIn > SNPThresh) {
129 snpIn = SNPThresh;
130 } else if (snpIn < -SNPThresh) {
131 snpIn = -SNPThresh;
132 }
133 oTrack.set(track.GetParam().GetX(), track.GetAlpha(),
134 {track.GetParam().GetY(), track.GetParam().GetZ(), snpIn, track.GetParam().GetDzDs(), track.GetParam().GetQPt()},
135 {track.GetParam().GetCov(0),
136 track.GetParam().GetCov(1), track.GetParam().GetCov(2),
137 track.GetParam().GetCov(3), track.GetParam().GetCov(4), track.GetParam().GetCov(5),
138 track.GetParam().GetCov(6), track.GetParam().GetCov(7), track.GetParam().GetCov(8), track.GetParam().GetCov(9),
139 track.GetParam().GetCov(10), track.GetParam().GetCov(11), track.GetParam().GetCov(12), track.GetParam().GetCov(13), track.GetParam().GetCov(14)});
140
141 oTrack.setChi2(track.GetParam().GetChi2());
142 auto& outerPar = track.OuterParam();
144 if (param.dodEdxEnabled) {
145 oTrack.setdEdx(tracksdEdx[i]);
146 if GPUCA_RTC_CONSTEXPR (GPUCA_GET_CONSTEXPR(param.rec.tpc, dEdxClusterRejectionFlagMask) != GPUCA_GET_CONSTEXPR(param.rec.tpc, dEdxClusterRejectionFlagMaskAlt)) {
147 oTrack.setdEdxAlt(tracksdEdxAlt[i]);
148 } else {
149 oTrack.setdEdxAlt(tracksdEdx[i]);
150 }
151 }
152 }
153
154 auto snpOut = outerPar.P[2];
155 if (snpOut > SNPThresh) {
156 snpOut = SNPThresh;
157 } else if (snpOut < -SNPThresh) {
158 snpOut = -SNPThresh;
159 }
160 oTrack.setOuterParam(o2::track::TrackParCov(
161 outerPar.X, outerPar.alpha,
162 {outerPar.P[0], outerPar.P[1], snpOut, outerPar.P[3], outerPar.P[4]},
163 {outerPar.C[0], outerPar.C[1], outerPar.C[2], outerPar.C[3], outerPar.C[4], outerPar.C[5],
164 outerPar.C[6], outerPar.C[7], outerPar.C[8], outerPar.C[9], outerPar.C[10], outerPar.C[11],
165 outerPar.C[12], outerPar.C[13], outerPar.C[14]}));
166
167 if (param.par.dodEdx && param.dodEdxEnabled && param.rec.tpc.enablePID) {
168 PIDResponse pidResponse{};
169 auto pid = pidResponse.getMostProbablePID(oTrack, param.rec.tpc.PID_EKrangeMin, param.rec.tpc.PID_EKrangeMax, param.rec.tpc.PID_EPrangeMin, param.rec.tpc.PID_EPrangeMax, param.rec.tpc.PID_EDrangeMin, param.rec.tpc.PID_EDrangeMax, param.rec.tpc.PID_ETrangeMin, merger.Param().rec.tpc.PID_ETrangeMax, merger.Param().rec.tpc.PID_useNsigma, merger.Param().rec.tpc.PID_sigma);
170 auto pidRemap = merger.Param().rec.tpc.PID_remap[pid];
171 if (pidRemap >= 0) {
172 pid = pidRemap;
173 }
174 oTrack.setPID(pid, true);
175 oTrack.getParamOut().setPID(pid, true);
176 }
177
178 uint32_t nOutCl = tmpData[i].x;
179 uint32_t clBuff = tmpData[i].y;
180 oTrack.setClusterRef(clBuff, nOutCl);
181 uint32_t* clIndArr = &clusRefs[clBuff];
182 uint8_t* sectorIndexArr = reinterpret_cast<uint8_t*>(clIndArr + nOutCl);
183 uint8_t* rowIndexArr = sectorIndexArr + nOutCl;
184
185 uint32_t nOutCl2 = 0;
186 float t1 = 0, t2 = 0;
187 int32_t sector1 = 0, sector2 = 0;
188 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = merger.GetConstantMem()->ioPtrs.clustersNative;
189 for (uint32_t j = 0; j < track.NClusters(); j++) {
190 if ((trackClusters[track.FirstClusterRef() + j].state & flagsReject)) {
191 continue;
192 }
193 int32_t clusterIdGlobal = trackClusters[track.FirstClusterRef() + j].num;
194 int32_t sector = trackClusters[track.FirstClusterRef() + j].sector;
195 int32_t globalRow = trackClusters[track.FirstClusterRef() + j].row;
196 int32_t clusterIdInRow = clusterIdGlobal - clusters->clusterOffset[sector][globalRow];
197 clIndArr[nOutCl2] = clusterIdInRow;
198 sectorIndexArr[nOutCl2] = sector;
199 rowIndexArr[nOutCl2] = globalRow;
200 if (nOutCl2 == 0) {
201 t1 = clusters->clustersLinear[clusterIdGlobal].getTime();
202 sector1 = sector;
203 }
204 if (++nOutCl2 == nOutCl) {
205 t2 = clusters->clustersLinear[clusterIdGlobal].getTime();
206 sector2 = sector;
207 }
208 }
209
210 if (track.PrevSegment() >= 0) {
211 const GPUTPCGMMergedTrack* chkTrk = track.GetFirstSegment(tracks, merger.Param().rec.enableCyclicGraphWorkarounds);
212 const auto& firstPrevCluster = trackClusters[chkTrk->FirstClusterRef()];
213 t1 = clusters->clustersLinear[firstPrevCluster.num].getTime();
214 sector1 = firstPrevCluster.sector;
215 }
216
217 bool cce = track.CCE() && ((sector1 < MAXSECTOR / 2) ^ (sector2 < MAXSECTOR / 2));
218 float time0 = 0.f, tFwd = 0.f, tBwd = 0.f;
219 if (merger.Param().par.continuousTracking) {
220 time0 = track.GetParam().GetTOffset();
221 if (cce) {
222 bool lastSide = trackClusters[track.FirstClusterRef()].sector < MAXSECTOR / 2;
223 float delta = 0.f;
224 for (uint32_t iCl = 1; iCl < track.NClusters(); iCl++) {
225 auto& cacl1 = trackClusters[track.FirstClusterRef() + iCl];
226 if (lastSide ^ (cacl1.sector < MAXSECTOR / 2)) {
227 auto& cl1 = clusters->clustersLinear[cacl1.num];
228 auto& cl2 = clusters->clustersLinear[trackClusters[track.FirstClusterRef() + iCl - 1].num];
229 delta = CAMath::Abs(cl1.getTime() - cl2.getTime()) * 0.5f;
230 break;
231 }
232 }
233 if (delta < MinDelta) {
234 delta = MinDelta;
235 }
236 tFwd = tBwd = delta;
237 } else {
238 // estimate max/min time increments which still keep track in the physical limits of the TPC
239 const float tmin = CAMath::Min(t1, t2);
240 const float maxDriftTime = merger.GetConstantMem()->calibObjects.fastTransform->getMaxDriftTime(t1 > t2 ? sector1 : sector2);
241 const float clusterT0 = merger.GetConstantMem()->calibObjects.fastTransform->getT0();
242 const float tmax = CAMath::Min(tmin + maxDriftTime, CAMath::Max(t1, t2));
243 float delta = 0.f;
244 if (time0 + maxDriftTime < tmax) {
245 delta = tmax - time0 - maxDriftTime;
246 }
247 if (tmin - clusterT0 < time0 + delta) {
248 delta = tmin - clusterT0 - time0;
249 }
250 if (delta != 0.f) {
251 time0 += delta;
252 const float deltaZ = merger.GetConstantMem()->calibObjects.fastTransform->convDeltaTimeToDeltaZinTimeFrame(sector2, delta);
253 oTrack.setZ(oTrack.getZ() + deltaZ);
254 }
255 tFwd = tmin - clusterT0 - time0;
256 tBwd = time0 - tmax + maxDriftTime;
257 }
258 }
259 if (tBwd < 0.f) {
260 tBwd = 0.f;
261 }
262 oTrack.setTime0(time0);
263 oTrack.setDeltaTBwd(tBwd);
264 oTrack.setDeltaTFwd(tFwd);
265 if (cce) {
266 oTrack.setHasCSideClusters();
267 oTrack.setHasASideClusters();
268 } else if (track.CSide()) {
269 oTrack.setHasCSideClusters();
270 } else {
271 oTrack.setHasASideClusters();
272 }
273 outputTracks[iTmp] = oTrack;
274 }
275}
276
277template <>
278GPUdii() void GPUTPCGMO2Output::Thread<GPUTPCGMO2Output::mc>(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() merger)
279{
280#ifndef GPUCA_GPUCODE
281 const o2::tpc::ClusterNativeAccess* GPUrestrict() clusters = merger.GetConstantMem()->ioPtrs.clustersNative;
282 if (clusters == nullptr || clusters->clustersMCTruth == nullptr) {
283 return;
284 }
285 if (merger.OutputTracksTPCO2MC() == nullptr) {
286 return;
287 }
288
289 auto labelAssigner = GPUTPCTrkLbl(clusters->clustersMCTruth, 0.1f);
290 uint32_t* clusRefs = merger.OutputClusRefsTPCO2();
291 for (uint32_t i = get_global_id(0); i < merger.NOutputTracksTPCO2(); i += get_global_size(0)) {
292 labelAssigner.reset();
293 const auto& trk = merger.OutputTracksTPCO2()[i];
294 for (int32_t j = 0; j < trk.getNClusters(); j++) {
295 uint8_t sectorIndex, rowIndex;
296 uint32_t clusterIndex;
297 trk.getClusterReference(clusRefs, j, sectorIndex, rowIndex, clusterIndex);
298 uint32_t clusterIdGlobal = clusters->clusterOffset[sectorIndex][rowIndex] + clusterIndex;
299 labelAssigner.addLabel(clusterIdGlobal);
300 }
301 merger.OutputTracksTPCO2MC()[i] = labelAssigner.computeLabel();
302 }
303#endif
304}
benchmark::State & state
A const (ready only) version of MCTruthContainer.
int32_t i
#define GPUsharedref()
#define get_global_size(dim)
#define GPUrestrict()
#define get_global_id(dim)
#define GPUCA_RTC_CONSTEXPR
#define GPUCA_DETERMINISTIC_CODE(det, indet)
#define GPUCA_GET_CONSTEXPR(obj, val)
GPUdii() void GPUTPCGMO2Output
void output(const std::map< std::string, ChannelStat > &channels)
Definition rawdump.cxx:197
uint32_t j
Definition RawData.h:0
uint16_t pid
Definition RawData.h:2
POD correction map.
PID response class.
Definition PIDResponse.h:39
GLboolean GLboolean GLboolean b
Definition glcorearb.h:1233
typedef void(APIENTRYP PFNGLCULLFACEPROC)(GLenum mode)
GLenum GLfloat param
Definition glcorearb.h:271
GLboolean GLboolean GLboolean GLboolean a
Definition glcorearb.h:1233
GLuint GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat t1
Definition glcorearb.h:5034
constexpr int MAXSECTOR
Definition Constants.h:28
Global TPC definitions and constants.
Definition SimTraits.h:168
GPUdi() T BetheBlochAleph(T bg
TrackParCovF TrackParCov
Definition Track.h:33
GPUd() bool operator()(const GPUTPCGMMerger
uint32_t x
uint32_t y
std::vector< Cluster > clusters