Project
Loading...
Searching...
No Matches
GPUReconstructionTimeframe.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
16#include "GPUReconstruction.h"
17#include "GPUChainTracking.h"
18#include "GPUChainTrackingGetters.inc"
20#include "GPUQA.h"
22#include "GPUTPCMCInfo.h"
23#include "GPUTPCClusterData.h"
24#include "AliHLTTPCRawCluster.h"
25#include "TPCFastTransform.h"
27#include "GPUO2DataTypes.h"
28
29#include <cstdio>
30#include <exception>
31#include <memory>
32#include <cstring>
33
34#include "utils/qconfig.h"
35
36using namespace o2::gpu;
37
38namespace o2::gpu
39{
40extern GPUSettingsStandalone configStandalone;
41}
42static auto& config = configStandalone.TF;
43
44GPUReconstructionTimeframe::GPUReconstructionTimeframe(GPUChainTracking* chain, int32_t (*read)(int32_t), int32_t nEvents) : mChain(chain), mReadEvent(read), mNEventsInDirectory(nEvents), mDisUniReal(0.f, 1.f), mRndGen1(configStandalone.seed), mRndGen2(mDisUniInt(mRndGen1))
45{
46 mMaxBunchesFull = TIME_ORBIT / config.bunchSpacing;
47 mMaxBunches = (TIME_ORBIT - config.abortGapTime) / config.bunchSpacing;
48
49 if (config.overlayRaw && chain->GetTPCTransformHelper() == nullptr) {
50 GPUInfo("Overlay Raw Events requires TPC Fast Transform");
51 throw std::exception();
52 }
53 if (config.bunchSim) {
54 if (config.bunchCount * config.bunchTrainCount > mMaxBunches) {
55 GPUInfo("Invalid timeframe settings: too many colliding bunches requested!");
56 throw std::exception();
57 }
58 mTrainDist = mMaxBunches / config.bunchTrainCount;
59 mCollisionProbability = (float)config.interactionRate * (float)(mMaxBunchesFull * config.bunchSpacing / 1e9f) / (float)(config.bunchCount * config.bunchTrainCount);
60 GPUInfo("Timeframe settings: %d trains of %d bunches, bunch spacing: %d, train spacing: %dx%d, filled bunches %d / %d (%d), collision probability %f, mixing %d events", config.bunchTrainCount, config.bunchCount, config.bunchSpacing, mTrainDist, config.bunchSpacing,
61 config.bunchCount * config.bunchTrainCount, mMaxBunches, mMaxBunchesFull, mCollisionProbability, mNEventsInDirectory);
62 }
63
64 mEventStride = configStandalone.seed;
65 mSimBunchNoRepeatEvent = configStandalone.StartEvent;
66 mEventUsed.resize(mNEventsInDirectory);
67 if (config.noEventRepeat == 2) {
68 memset(mEventUsed.data(), 0, mNEventsInDirectory * sizeof(mEventUsed[0]));
69 }
70}
71
72int32_t GPUReconstructionTimeframe::ReadEventShifted(int32_t iEvent, float shiftZ, float minZ, float maxZ, bool silent)
73{
74 mReadEvent(iEvent);
75 if (config.overlayRaw) {
76 float shiftTTotal = (((double)config.timeFrameLen - DRIFT_TIME) * ((double)TPCZ / (double)DRIFT_TIME) - shiftZ) / mChain->GetTPCTransformHelper()->getCorrMap()->getVDrift();
77 for (uint32_t iSector = 0; iSector < NSECTORS; iSector++) {
78 for (uint32_t j = 0; j < mChain->mIOPtrs.nRawClusters[iSector]; j++) {
79 auto& tmp = mChain->mIOMem.rawClusters[iSector][j];
80 tmp.fTime += shiftTTotal;
81 }
82 }
83 }
84 if (shiftZ != 0.f) {
85 for (uint32_t iSector = 0; iSector < NSECTORS; iSector++) {
86 for (uint32_t j = 0; j < mChain->mIOPtrs.nClusterData[iSector]; j++) {
87 auto& tmp = mChain->mIOMem.clusterData[iSector][j];
88 tmp.z += iSector < NSECTORS / 2 ? shiftZ : -shiftZ;
89 }
90 }
91 for (uint32_t i = 0; i < mChain->mIOPtrs.nMCInfosTPC; i++) {
92 auto& tmp = mChain->mIOMem.mcInfosTPC[i];
93 tmp.z += i < NSECTORS / 2 ? shiftZ : -shiftZ;
94 }
95 }
96
97 // Remove clusters outside boundaries
98 uint32_t nClusters = 0;
99 uint32_t removed = 0;
100 if (minZ > -1e6 || maxZ > -1e6) {
101 uint32_t currentClusterTotal = 0;
102 for (uint32_t iSector = 0; iSector < NSECTORS; iSector++) {
103 uint32_t currentClusterSector = 0;
104 bool doRaw = config.overlayRaw && mChain->mIOPtrs.nClusterData[iSector] == mChain->mIOPtrs.nRawClusters[iSector];
105 for (uint32_t i = 0; i < mChain->mIOPtrs.nClusterData[iSector]; i++) {
106 float sign = iSector < NSECTORS / 2 ? 1 : -1;
107 if (sign * mChain->mIOMem.clusterData[iSector][i].z >= minZ && sign * mChain->mIOMem.clusterData[iSector][i].z <= maxZ) {
108 if (currentClusterSector != i) {
109 mChain->mIOMem.clusterData[iSector][currentClusterSector] = mChain->mIOMem.clusterData[iSector][i];
110 if (doRaw) {
111 mChain->mIOMem.rawClusters[iSector][currentClusterSector] = mChain->mIOMem.rawClusters[iSector][i];
112 }
113 }
114 if (mChain->mIOPtrs.nMCLabelsTPC > currentClusterTotal && nClusters != currentClusterTotal) {
115 mChain->mIOMem.mcLabelsTPC[nClusters] = mChain->mIOMem.mcLabelsTPC[currentClusterTotal];
116 }
117 // GPUInfo("Keeping Cluster ID %d (ID in sector %d) Z=%f (sector %d) --> %d (sector %d)", currentClusterTotal, i, mChain->mIOMem.clusterData[iSector][i].fZ, iSector, nClusters, currentClusterSector);
118 currentClusterSector++;
119 nClusters++;
120 } else {
121 // GPUInfo("Removing Cluster ID %d (ID in sector %d) Z=%f (sector %d)", currentClusterTotal, i, mChain->mIOMem.clusterData[iSector][i].fZ, iSector);
122 removed++;
123 }
124 currentClusterTotal++;
125 }
126 mChain->mIOPtrs.nClusterData[iSector] = currentClusterSector;
127 if (doRaw) {
128 mChain->mIOPtrs.nRawClusters[iSector] = currentClusterSector;
129 }
130 }
131 if (mChain->mIOPtrs.nMCLabelsTPC) {
133 }
134 } else {
135 for (uint32_t i = 0; i < NSECTORS; i++) {
136 nClusters += mChain->mIOPtrs.nClusterData[i];
137 }
138 }
139
140 if (!silent) {
141 GPUInfo("Read %u Clusters with %d MC labels and %d MC tracks", nClusters, (int32_t)mChain->mIOPtrs.nMCLabelsTPC, (int32_t)mChain->mIOPtrs.nMCInfosTPC);
142 if (minZ > -1e6 || maxZ > 1e6) {
143 GPUInfo("\tRemoved %u / %u clusters", removed, nClusters + removed);
144 }
145 }
146
147 mShiftedEvents.emplace_back(mChain->mIOPtrs, std::move(mChain->mIOMem), mChain->mIOPtrs.clustersNative ? *mChain->mIOPtrs.clustersNative : o2::tpc::ClusterNativeAccess());
148 return nClusters;
149}
150
152{
153 mChain->ClearIOPointers();
154 for (uint32_t i = 0; i < mShiftedEvents.size(); i++) {
155 auto& ptr = std::get<0>(mShiftedEvents[i]);
156 for (uint32_t j = 0; j < NSECTORS; j++) {
157 mChain->mIOPtrs.nClusterData[j] += ptr.nClusterData[j];
158 if (config.overlayRaw) {
159 mChain->mIOPtrs.nRawClusters[j] += ptr.nRawClusters[j];
160 }
161 }
162 mChain->mIOPtrs.nMCLabelsTPC += ptr.nMCLabelsTPC;
163 mChain->mIOPtrs.nMCInfosTPC += ptr.nMCInfosTPC;
164 mChain->mIOPtrs.nMCInfosTPCCol += ptr.nMCInfosTPCCol;
165 SetDisplayInformation(i);
166 }
167 uint32_t nClustersTotal = 0;
168 uint32_t nClustersTotalRaw = 0;
169 uint32_t nClustersSectorOffset[NSECTORS] = {0};
170 for (uint32_t i = 0; i < NSECTORS; i++) {
171 nClustersSectorOffset[i] = nClustersTotal;
172 nClustersTotal += mChain->mIOPtrs.nClusterData[i];
173 nClustersTotalRaw += mChain->mIOPtrs.nRawClusters[i];
174 }
175 if (nClustersTotalRaw && nClustersTotalRaw != nClustersTotal) {
176 GPUError("Inconsitency between raw clusters and cluster data");
177 throw std::exception();
178 }
179 if (mChain->mIOPtrs.nMCLabelsTPC && nClustersTotal != mChain->mIOPtrs.nMCLabelsTPC) {
180 GPUError("Inconsitency between TPC clusters and MC labels");
181 throw std::exception();
182 }
183 mChain->AllocateIOMemory();
184 mChain->mIOPtrs.clustersNative = nullptr;
185
186 uint32_t nTrackOffset = 0;
187 uint32_t nColOffset = 0;
188 uint32_t nClustersEventOffset[NSECTORS] = {0};
189 for (uint32_t i = 0; i < mShiftedEvents.size(); i++) {
190 auto& ptr = std::get<0>(mShiftedEvents[i]);
191 uint32_t inEventOffset = 0;
192 for (uint32_t j = 0; j < NSECTORS; j++) {
193 memcpy((void*)&mChain->mIOMem.clusterData[j][nClustersEventOffset[j]], (void*)ptr.clusterData[j], ptr.nClusterData[j] * sizeof(ptr.clusterData[j][0]));
194 if (nClustersTotalRaw) {
195 memcpy((void*)&mChain->mIOMem.rawClusters[j][nClustersEventOffset[j]], (void*)ptr.rawClusters[j], ptr.nRawClusters[j] * sizeof(ptr.rawClusters[j][0]));
196 }
197 if (mChain->mIOPtrs.nMCLabelsTPC) {
198 memcpy((void*)&mChain->mIOMem.mcLabelsTPC[nClustersSectorOffset[j] + nClustersEventOffset[j]], (void*)&ptr.mcLabelsTPC[inEventOffset], ptr.nClusterData[j] * sizeof(ptr.mcLabelsTPC[0]));
199 }
200 for (uint32_t k = 0; k < ptr.nClusterData[j]; k++) {
201 mChain->mIOMem.clusterData[j][nClustersEventOffset[j] + k].id = nClustersSectorOffset[j] + nClustersEventOffset[j] + k;
202 if (mChain->mIOPtrs.nMCLabelsTPC) {
203 for (int32_t l = 0; l < 3; l++) {
204 auto& label = mChain->mIOMem.mcLabelsTPC[nClustersSectorOffset[j] + nClustersEventOffset[j] + k].fClusterID[l];
205 if (label.fMCID >= 0) {
206 label.fMCID += nTrackOffset;
207 }
208 }
209 }
210 }
211
212 nClustersEventOffset[j] += ptr.nClusterData[j];
213 inEventOffset += ptr.nClusterData[j];
214 }
215
216 memcpy((void*)&mChain->mIOMem.mcInfosTPC[nTrackOffset], (void*)ptr.mcInfosTPC, ptr.nMCInfosTPC * sizeof(ptr.mcInfosTPC[0]));
217 for (uint32_t j = 0; j < ptr.nMCInfosTPCCol; j++) {
218 mChain->mIOMem.mcInfosTPCCol[nColOffset + j] = ptr.mcInfosTPCCol[j];
219 mChain->mIOMem.mcInfosTPCCol[nColOffset + j].first += nTrackOffset;
220 }
221 nTrackOffset += ptr.nMCInfosTPC;
222 nColOffset += ptr.nMCInfosTPCCol;
223 }
224
225 GPUInfo("Merged %d events, %u clusters total", (int32_t)mShiftedEvents.size(), nClustersTotal);
226
227 mShiftedEvents.clear();
228}
229
231{
232 if (config.nTotalEventsInTF && mNTotalCollisions >= config.nTotalEventsInTF) {
233 return (2);
234 }
235
236 int64_t nBunch = -DRIFT_TIME / config.bunchSpacing;
237 int64_t lastBunch = config.timeFrameLen / config.bunchSpacing;
238 int64_t lastTFBunch = lastBunch - DRIFT_TIME / config.bunchSpacing;
239 int32_t nCollisions = 0, nBorderCollisions = 0, nTrainCollissions = 0, nMultipleCollisions = 0, nTrainMultipleCollisions = 0;
240 int32_t nTrain = 0;
241 int32_t mcMin = -1, mcMax = -1;
242 uint32_t nTotalClusters = 0;
243 while (nBunch < lastBunch) {
244 for (int32_t iTrain = 0; iTrain < config.bunchTrainCount && nBunch < lastBunch; iTrain++) {
245 int32_t nCollisionsInTrain = 0;
246 for (int32_t iBunch = 0; iBunch < config.bunchCount && nBunch < lastBunch; iBunch++) {
247 const bool inTF = nBunch >= 0 && nBunch < lastTFBunch && (config.nTotalEventsInTF == 0 || nCollisions < mNTotalCollisions + config.nTotalEventsInTF);
248 if (mcMin == -1 && inTF) {
249 mcMin = mChain->mIOPtrs.nMCInfosTPC;
250 }
251 if (mcMax == -1 && nBunch >= 0 && !inTF) {
252 mcMax = mChain->mIOPtrs.nMCInfosTPC;
253 }
254 int32_t nInBunchPileUp = 0;
255 double randVal = mDisUniReal(inTF ? mRndGen2 : mRndGen1);
256 double p = exp(-mCollisionProbability);
257 double p2 = p;
258 while (randVal > p) {
259 if (config.noBorder && (nBunch < 0 || nBunch >= lastTFBunch)) {
260 break;
261 }
262 if (nCollisionsInTrain >= mNEventsInDirectory) {
263 GPUError("Error: insuffient events for mixing!");
264 return (1);
265 }
266 if (nCollisionsInTrain == 0 && config.noEventRepeat == 0) {
267 memset(mEventUsed.data(), 0, mNEventsInDirectory * sizeof(mEventUsed[0]));
268 }
269 if (inTF) {
270 nCollisions++;
271 } else {
272 nBorderCollisions++;
273 }
274 int32_t useEvent;
275 if (config.noEventRepeat == 1) {
276 useEvent = mSimBunchNoRepeatEvent;
277 } else {
278 while (mEventUsed[useEvent = (inTF && config.eventStride ? (mEventStride += config.eventStride) : mDisUniInt(inTF ? mRndGen2 : mRndGen1)) % mNEventsInDirectory]) {
279 ;
280 }
281 }
282 if (config.noEventRepeat) {
283 mSimBunchNoRepeatEvent++;
284 }
285 mEventUsed[useEvent] = 1;
286 double shift = (double)nBunch * (double)config.bunchSpacing * (double)TPCZ / (double)DRIFT_TIME;
287 int32_t nClusters = ReadEventShifted(useEvent, shift, 0, (double)config.timeFrameLen * (double)TPCZ / (double)DRIFT_TIME, true);
288 if (nClusters < 0) {
289 GPUError("Unexpected error");
290 return (1);
291 }
292 nTotalClusters += nClusters;
293 printf("Placing event %4d+%d (ID %4d) at z %7.3f (time %'dns) %s(collisions %4d, bunch %6ld, train %3d) (%'10d clusters, %'10d MC labels, %'10d track MC info)\n", nCollisions, nBorderCollisions, useEvent, shift, (int32_t)(nBunch * config.bunchSpacing), inTF ? " inside" : "outside",
294 nCollisions, nBunch, nTrain, nClusters, mChain->mIOPtrs.nMCLabelsTPC, mChain->mIOPtrs.nMCInfosTPC);
295 nInBunchPileUp++;
296 nCollisionsInTrain++;
297 p2 *= mCollisionProbability / nInBunchPileUp;
298 p += p2;
299 if (config.noEventRepeat && mSimBunchNoRepeatEvent >= mNEventsInDirectory) {
300 nBunch = lastBunch;
301 }
302 }
303 if (nInBunchPileUp > 1) {
304 nMultipleCollisions++;
305 }
306 nBunch++;
307 }
308 nBunch += mTrainDist - config.bunchCount;
309 if (nCollisionsInTrain) {
310 nTrainCollissions++;
311 }
312 if (nCollisionsInTrain > 1) {
313 nTrainMultipleCollisions++;
314 }
315 nTrain++;
316 }
317 nBunch += mMaxBunchesFull - mTrainDist * config.bunchTrainCount;
318 }
319 mNTotalCollisions += nCollisions;
320 GPUInfo("Timeframe statistics: collisions: %d+%d in %d trains (inside / outside), average rate %f (pile up: in bunch %d, in train %d)", nCollisions, nBorderCollisions, nTrainCollissions, (float)nCollisions / (float)(config.timeFrameLen - DRIFT_TIME) * 1e9, nMultipleCollisions,
321 nTrainMultipleCollisions);
323 GPUInfo("\tTotal clusters: %u, MC Labels %d, MC Infos %d", nTotalClusters, (int32_t)mChain->mIOPtrs.nMCLabelsTPC, (int32_t)mChain->mIOPtrs.nMCInfosTPC);
324
325 if (!config.noBorder && mChain->GetQA()) {
326 mChain->GetQA()->SetMCTrackRange(mcMin, mcMax);
327 }
328 return (0);
329}
330
332{
333 for (int32_t iEventInTimeframe = 0; iEventInTimeframe < config.nMerge; iEventInTimeframe++) {
334 float shift;
335 if (config.shiftFirstEvent || iEventInTimeframe) {
336 if (config.randomizeDistance) {
337 shift = mDisUniReal(mRndGen2);
338 if (config.shiftFirstEvent) {
339 shift = (iEventInTimeframe + shift) * config.averageDistance;
340 } else {
341 if (iEventInTimeframe == 0) {
342 shift = 0;
343 } else {
344 shift = (iEventInTimeframe - 0.5f + shift) * config.averageDistance;
345 }
346 }
347 } else {
348 if (config.shiftFirstEvent) {
349 shift = config.averageDistance * (iEventInTimeframe + 0.5f);
350 } else {
351 shift = config.averageDistance * (iEventInTimeframe);
352 }
353 }
354 } else {
355 shift = 0.f;
356 }
357
358 if (ReadEventShifted(iEvent * config.nMerge + iEventInTimeframe, shift) < 0) {
359 return (1);
360 }
361 }
363 return (0);
364}
365
366void GPUReconstructionTimeframe::SetDisplayInformation(int32_t iCol)
367{
368 if (mChain->GetEventDisplay()) {
369 for (uint32_t sl = 0; sl < NSECTORS; sl++) {
370 mChain->GetEventDisplay()->SetCollisionFirstCluster(iCol, sl, mChain->mIOPtrs.nClusterData[sl]);
371 }
372 mChain->GetEventDisplay()->SetCollisionFirstCluster(iCol, NSECTORS, mChain->mIOPtrs.nMCInfosTPC);
373 }
374}
Helper class to access correction maps.
uint64_t exp(uint64_t base, uint8_t exp) noexcept
int32_t i
GPUChain * chain
constexpr int p2()
uint32_t j
Definition RawData.h:0
Definition of TPCFastTransform class.
TBranch * ptr
int nClusters
const CorrectionMapsHelper * GetTPCTransformHelper() const
const GPUQA * GetQA() const
GPUDisplayInterface * GetEventDisplay()
GPUTrackingInOutPointers & mIOPtrs
struct o2::gpu::GPUChainTracking::InOutMemory mIOMem
virtual void SetCollisionFirstCluster(uint32_t collision, int32_t sector, int32_t cluster)=0
void SetMCTrackRange(int32_t min, int32_t max)
Definition GPUQA.h:47
int32_t ReadEventShifted(int32_t i, float shiftZ, float minZ=-1e6, float maxZ=-1e6, bool silent=false)
GPUReconstructionTimeframe(GPUChainTracking *rec, int32_t(*read)(int32_t), int32_t nEvents)
GLuint GLsizei const GLchar * label
Definition glcorearb.h:2519
GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat maxZ
Definition glcorearb.h:2910
GLfloat GLfloat minZ
Definition glcorearb.h:2910
GPUSettingsStandalone configStandalone
Definition genEvents.cxx:48
std::unique_ptr< GPUTPCMCInfo[]> mcInfosTPC
std::unique_ptr< GPUTPCMCInfoCol[]> mcInfosTPCCol
std::unique_ptr< AliHLTTPCClusterMCLabel[]> mcLabelsTPC
std::unique_ptr< GPUTPCClusterData[]> clusterData[NSECTORS]
std::unique_ptr< AliHLTTPCRawCluster[]> rawClusters[NSECTORS]
const o2::tpc::ClusterNativeAccess * clustersNative
const int nEvents
Definition test_Fifo.cxx:27