Project
Loading...
Searching...
No Matches
GPUReconstructionTimeframe.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
16#include "GPUReconstruction.h"
17#include "GPUChainTracking.h"
18#include "GPUChainTrackingGetters.inc"
20#include "GPUQA.h"
22#include "GPUTPCMCInfo.h"
23#include "GPUTPCClusterData.h"
24#include "AliHLTTPCRawCluster.h"
25#include "TPCFastTransform.h"
27#include "GPUO2DataTypes.h"
28#include "GPUSettings.h"
29
30#include <cstdio>
31#include <exception>
32#include <memory>
33#include <cstring>
34
35using namespace o2::gpu;
36
37namespace o2::gpu
38{
39extern GPUSettingsStandalone configStandalone;
40}
41static auto& config = configStandalone.TF;
42
43GPUReconstructionTimeframe::GPUReconstructionTimeframe(GPUChainTracking* chain, int32_t (*read)(int32_t), int32_t nEvents) : mChain(chain), mReadEvent(read), mNEventsInDirectory(nEvents), mDisUniReal(0.f, 1.f), mRndGen1(configStandalone.seed), mRndGen2(mDisUniInt(mRndGen1))
44{
45 mMaxBunchesFull = TIME_ORBIT / config.bunchSpacing;
46 mMaxBunches = (TIME_ORBIT - config.abortGapTime) / config.bunchSpacing;
47
48 if (config.overlayRaw && chain->GetTPCTransformHelper() == nullptr) {
49 GPUInfo("Overlay Raw Events requires TPC Fast Transform");
50 throw std::exception();
51 }
52 if (config.bunchSim) {
53 if (config.bunchCount * config.bunchTrainCount > mMaxBunches) {
54 GPUInfo("Invalid timeframe settings: too many colliding bunches requested!");
55 throw std::exception();
56 }
57 mTrainDist = mMaxBunches / config.bunchTrainCount;
58 mCollisionProbability = (float)config.interactionRate * (float)(mMaxBunchesFull * config.bunchSpacing / 1e9f) / (float)(config.bunchCount * config.bunchTrainCount);
59 GPUInfo("Timeframe settings: %d trains of %d bunches, bunch spacing: %d, train spacing: %dx%d, filled bunches %d / %d (%d), collision probability %f, mixing %d events", config.bunchTrainCount, config.bunchCount, config.bunchSpacing, mTrainDist, config.bunchSpacing,
60 config.bunchCount * config.bunchTrainCount, mMaxBunches, mMaxBunchesFull, mCollisionProbability, mNEventsInDirectory);
61 }
62
63 mEventStride = configStandalone.seed;
64 mSimBunchNoRepeatEvent = configStandalone.StartEvent;
65 mEventUsed.resize(mNEventsInDirectory);
66 if (config.noEventRepeat == 2) {
67 memset(mEventUsed.data(), 0, mNEventsInDirectory * sizeof(mEventUsed[0]));
68 }
69}
70
71int32_t GPUReconstructionTimeframe::ReadEventShifted(int32_t iEvent, float shiftZ, float minZ, float maxZ, bool silent)
72{
73 mReadEvent(iEvent);
74 if (config.overlayRaw) {
75 float shiftTTotal = (((double)config.timeFrameLen - DRIFT_TIME) * ((double)TPCZ / (double)DRIFT_TIME) - shiftZ) / mChain->GetTPCTransformHelper()->getCorrMap()->getVDrift();
76 for (uint32_t iSector = 0; iSector < NSECTORS; iSector++) {
77 for (uint32_t j = 0; j < mChain->mIOPtrs.nRawClusters[iSector]; j++) {
78 auto& tmp = mChain->mIOMem.rawClusters[iSector][j];
79 tmp.fTime += shiftTTotal;
80 }
81 }
82 }
83 if (shiftZ != 0.f) {
84 for (uint32_t iSector = 0; iSector < NSECTORS; iSector++) {
85 for (uint32_t j = 0; j < mChain->mIOPtrs.nClusterData[iSector]; j++) {
86 auto& tmp = mChain->mIOMem.clusterData[iSector][j];
87 tmp.z += iSector < NSECTORS / 2 ? shiftZ : -shiftZ;
88 }
89 }
90 for (uint32_t i = 0; i < mChain->mIOPtrs.nMCInfosTPC; i++) {
91 auto& tmp = mChain->mIOMem.mcInfosTPC[i];
92 tmp.z += i < NSECTORS / 2 ? shiftZ : -shiftZ;
93 }
94 }
95
96 // Remove clusters outside boundaries
97 uint32_t nClusters = 0;
98 uint32_t removed = 0;
99 if (minZ > -1e6 || maxZ > -1e6) {
100 uint32_t currentClusterTotal = 0;
101 for (uint32_t iSector = 0; iSector < NSECTORS; iSector++) {
102 uint32_t currentClusterSector = 0;
103 bool doRaw = config.overlayRaw && mChain->mIOPtrs.nClusterData[iSector] == mChain->mIOPtrs.nRawClusters[iSector];
104 for (uint32_t i = 0; i < mChain->mIOPtrs.nClusterData[iSector]; i++) {
105 float sign = iSector < NSECTORS / 2 ? 1 : -1;
106 if (sign * mChain->mIOMem.clusterData[iSector][i].z >= minZ && sign * mChain->mIOMem.clusterData[iSector][i].z <= maxZ) {
107 if (currentClusterSector != i) {
108 mChain->mIOMem.clusterData[iSector][currentClusterSector] = mChain->mIOMem.clusterData[iSector][i];
109 if (doRaw) {
110 mChain->mIOMem.rawClusters[iSector][currentClusterSector] = mChain->mIOMem.rawClusters[iSector][i];
111 }
112 }
113 if (mChain->mIOPtrs.nMCLabelsTPC > currentClusterTotal && nClusters != currentClusterTotal) {
114 mChain->mIOMem.mcLabelsTPC[nClusters] = mChain->mIOMem.mcLabelsTPC[currentClusterTotal];
115 }
116 // GPUInfo("Keeping Cluster ID %d (ID in sector %d) Z=%f (sector %d) --> %d (sector %d)", currentClusterTotal, i, mChain->mIOMem.clusterData[iSector][i].fZ, iSector, nClusters, currentClusterSector);
117 currentClusterSector++;
118 nClusters++;
119 } else {
120 // GPUInfo("Removing Cluster ID %d (ID in sector %d) Z=%f (sector %d)", currentClusterTotal, i, mChain->mIOMem.clusterData[iSector][i].fZ, iSector);
121 removed++;
122 }
123 currentClusterTotal++;
124 }
125 mChain->mIOPtrs.nClusterData[iSector] = currentClusterSector;
126 if (doRaw) {
127 mChain->mIOPtrs.nRawClusters[iSector] = currentClusterSector;
128 }
129 }
130 if (mChain->mIOPtrs.nMCLabelsTPC) {
132 }
133 } else {
134 for (uint32_t i = 0; i < NSECTORS; i++) {
135 nClusters += mChain->mIOPtrs.nClusterData[i];
136 }
137 }
138
139 if (!silent) {
140 GPUInfo("Read %u Clusters with %d MC labels and %d MC tracks", nClusters, (int32_t)mChain->mIOPtrs.nMCLabelsTPC, (int32_t)mChain->mIOPtrs.nMCInfosTPC);
141 if (minZ > -1e6 || maxZ > 1e6) {
142 GPUInfo("\tRemoved %u / %u clusters", removed, nClusters + removed);
143 }
144 }
145
146 mShiftedEvents.emplace_back(mChain->mIOPtrs, std::move(mChain->mIOMem), mChain->mIOPtrs.clustersNative ? *mChain->mIOPtrs.clustersNative : o2::tpc::ClusterNativeAccess());
147 return nClusters;
148}
149
151{
152 mChain->ClearIOPointers();
153 for (uint32_t i = 0; i < mShiftedEvents.size(); i++) {
154 auto& ptr = std::get<0>(mShiftedEvents[i]);
155 for (uint32_t j = 0; j < NSECTORS; j++) {
156 mChain->mIOPtrs.nClusterData[j] += ptr.nClusterData[j];
157 if (config.overlayRaw) {
158 mChain->mIOPtrs.nRawClusters[j] += ptr.nRawClusters[j];
159 }
160 }
161 mChain->mIOPtrs.nMCLabelsTPC += ptr.nMCLabelsTPC;
162 mChain->mIOPtrs.nMCInfosTPC += ptr.nMCInfosTPC;
163 mChain->mIOPtrs.nMCInfosTPCCol += ptr.nMCInfosTPCCol;
164 SetDisplayInformation(i);
165 }
166 uint32_t nClustersTotal = 0;
167 uint32_t nClustersTotalRaw = 0;
168 uint32_t nClustersSectorOffset[NSECTORS] = {0};
169 for (uint32_t i = 0; i < NSECTORS; i++) {
170 nClustersSectorOffset[i] = nClustersTotal;
171 nClustersTotal += mChain->mIOPtrs.nClusterData[i];
172 nClustersTotalRaw += mChain->mIOPtrs.nRawClusters[i];
173 }
174 if (nClustersTotalRaw && nClustersTotalRaw != nClustersTotal) {
175 GPUError("Inconsitency between raw clusters and cluster data");
176 throw std::exception();
177 }
178 if (mChain->mIOPtrs.nMCLabelsTPC && nClustersTotal != mChain->mIOPtrs.nMCLabelsTPC) {
179 GPUError("Inconsitency between TPC clusters and MC labels");
180 throw std::exception();
181 }
182 mChain->AllocateIOMemory();
183 mChain->mIOPtrs.clustersNative = nullptr;
184
185 uint32_t nTrackOffset = 0;
186 uint32_t nColOffset = 0;
187 uint32_t nClustersEventOffset[NSECTORS] = {0};
188 for (uint32_t i = 0; i < mShiftedEvents.size(); i++) {
189 auto& ptr = std::get<0>(mShiftedEvents[i]);
190 uint32_t inEventOffset = 0;
191 for (uint32_t j = 0; j < NSECTORS; j++) {
192 memcpy((void*)&mChain->mIOMem.clusterData[j][nClustersEventOffset[j]], (void*)ptr.clusterData[j], ptr.nClusterData[j] * sizeof(ptr.clusterData[j][0]));
193 if (nClustersTotalRaw) {
194 memcpy((void*)&mChain->mIOMem.rawClusters[j][nClustersEventOffset[j]], (void*)ptr.rawClusters[j], ptr.nRawClusters[j] * sizeof(ptr.rawClusters[j][0]));
195 }
196 if (mChain->mIOPtrs.nMCLabelsTPC) {
197 memcpy((void*)&mChain->mIOMem.mcLabelsTPC[nClustersSectorOffset[j] + nClustersEventOffset[j]], (void*)&ptr.mcLabelsTPC[inEventOffset], ptr.nClusterData[j] * sizeof(ptr.mcLabelsTPC[0]));
198 }
199 for (uint32_t k = 0; k < ptr.nClusterData[j]; k++) {
200 mChain->mIOMem.clusterData[j][nClustersEventOffset[j] + k].id = nClustersSectorOffset[j] + nClustersEventOffset[j] + k;
201 if (mChain->mIOPtrs.nMCLabelsTPC) {
202 for (int32_t l = 0; l < 3; l++) {
203 auto& label = mChain->mIOMem.mcLabelsTPC[nClustersSectorOffset[j] + nClustersEventOffset[j] + k].fClusterID[l];
204 if (label.fMCID >= 0) {
205 label.fMCID += nTrackOffset;
206 }
207 }
208 }
209 }
210
211 nClustersEventOffset[j] += ptr.nClusterData[j];
212 inEventOffset += ptr.nClusterData[j];
213 }
214
215 memcpy((void*)&mChain->mIOMem.mcInfosTPC[nTrackOffset], (void*)ptr.mcInfosTPC, ptr.nMCInfosTPC * sizeof(ptr.mcInfosTPC[0]));
216 for (uint32_t j = 0; j < ptr.nMCInfosTPCCol; j++) {
217 mChain->mIOMem.mcInfosTPCCol[nColOffset + j] = ptr.mcInfosTPCCol[j];
218 mChain->mIOMem.mcInfosTPCCol[nColOffset + j].first += nTrackOffset;
219 }
220 nTrackOffset += ptr.nMCInfosTPC;
221 nColOffset += ptr.nMCInfosTPCCol;
222 }
223
224 GPUInfo("Merged %d events, %u clusters total", (int32_t)mShiftedEvents.size(), nClustersTotal);
225
226 mShiftedEvents.clear();
227}
228
230{
231 if (config.nTotalEventsInTF && mNTotalCollisions >= config.nTotalEventsInTF) {
232 return (2);
233 }
234
235 int64_t nBunch = -DRIFT_TIME / config.bunchSpacing;
236 int64_t lastBunch = config.timeFrameLen / config.bunchSpacing;
237 int64_t lastTFBunch = lastBunch - DRIFT_TIME / config.bunchSpacing;
238 int32_t nCollisions = 0, nBorderCollisions = 0, nTrainCollissions = 0, nMultipleCollisions = 0, nTrainMultipleCollisions = 0;
239 int32_t nTrain = 0;
240 int32_t mcMin = -1, mcMax = -1;
241 uint32_t nTotalClusters = 0;
242 while (nBunch < lastBunch) {
243 for (int32_t iTrain = 0; iTrain < config.bunchTrainCount && nBunch < lastBunch; iTrain++) {
244 int32_t nCollisionsInTrain = 0;
245 for (int32_t iBunch = 0; iBunch < config.bunchCount && nBunch < lastBunch; iBunch++) {
246 const bool inTF = nBunch >= 0 && nBunch < lastTFBunch && (config.nTotalEventsInTF == 0 || nCollisions < mNTotalCollisions + config.nTotalEventsInTF);
247 if (mcMin == -1 && inTF) {
248 mcMin = mChain->mIOPtrs.nMCInfosTPC;
249 }
250 if (mcMax == -1 && nBunch >= 0 && !inTF) {
251 mcMax = mChain->mIOPtrs.nMCInfosTPC;
252 }
253 int32_t nInBunchPileUp = 0;
254 double randVal = mDisUniReal(inTF ? mRndGen2 : mRndGen1);
255 double p = exp(-mCollisionProbability);
256 double p2 = p;
257 while (randVal > p) {
258 if (config.noBorder && (nBunch < 0 || nBunch >= lastTFBunch)) {
259 break;
260 }
261 if (nCollisionsInTrain >= mNEventsInDirectory) {
262 GPUError("Error: insuffient events for mixing!");
263 return (1);
264 }
265 if (nCollisionsInTrain == 0 && config.noEventRepeat == 0) {
266 memset(mEventUsed.data(), 0, mNEventsInDirectory * sizeof(mEventUsed[0]));
267 }
268 if (inTF) {
269 nCollisions++;
270 } else {
271 nBorderCollisions++;
272 }
273 int32_t useEvent;
274 if (config.noEventRepeat == 1) {
275 useEvent = mSimBunchNoRepeatEvent;
276 } else {
277 while (mEventUsed[useEvent = (inTF && config.eventStride ? (mEventStride += config.eventStride) : mDisUniInt(inTF ? mRndGen2 : mRndGen1)) % mNEventsInDirectory]) {
278 ;
279 }
280 }
281 if (config.noEventRepeat) {
282 mSimBunchNoRepeatEvent++;
283 }
284 mEventUsed[useEvent] = 1;
285 double shift = (double)nBunch * (double)config.bunchSpacing * (double)TPCZ / (double)DRIFT_TIME;
286 int32_t nClusters = ReadEventShifted(useEvent, shift, 0, (double)config.timeFrameLen * (double)TPCZ / (double)DRIFT_TIME, true);
287 if (nClusters < 0) {
288 GPUError("Unexpected error");
289 return (1);
290 }
291 nTotalClusters += nClusters;
292 printf("Placing event %4d+%d (ID %4d) at z %7.3f (time %'dns) %s(collisions %4d, bunch %6ld, train %3d) (%'10d clusters, %'10d MC labels, %'10d track MC info)\n", nCollisions, nBorderCollisions, useEvent, shift, (int32_t)(nBunch * config.bunchSpacing), inTF ? " inside" : "outside",
293 nCollisions, nBunch, nTrain, nClusters, mChain->mIOPtrs.nMCLabelsTPC, mChain->mIOPtrs.nMCInfosTPC);
294 nInBunchPileUp++;
295 nCollisionsInTrain++;
296 p2 *= mCollisionProbability / nInBunchPileUp;
297 p += p2;
298 if (config.noEventRepeat && mSimBunchNoRepeatEvent >= mNEventsInDirectory) {
299 nBunch = lastBunch;
300 }
301 }
302 if (nInBunchPileUp > 1) {
303 nMultipleCollisions++;
304 }
305 nBunch++;
306 }
307 nBunch += mTrainDist - config.bunchCount;
308 if (nCollisionsInTrain) {
309 nTrainCollissions++;
310 }
311 if (nCollisionsInTrain > 1) {
312 nTrainMultipleCollisions++;
313 }
314 nTrain++;
315 }
316 nBunch += mMaxBunchesFull - mTrainDist * config.bunchTrainCount;
317 }
318 mNTotalCollisions += nCollisions;
319 GPUInfo("Timeframe statistics: collisions: %d+%d in %d trains (inside / outside), average rate %f (pile up: in bunch %d, in train %d)", nCollisions, nBorderCollisions, nTrainCollissions, (float)nCollisions / (float)(config.timeFrameLen - DRIFT_TIME) * 1e9, nMultipleCollisions,
320 nTrainMultipleCollisions);
322 GPUInfo("\tTotal clusters: %u, MC Labels %d, MC Infos %d", nTotalClusters, (int32_t)mChain->mIOPtrs.nMCLabelsTPC, (int32_t)mChain->mIOPtrs.nMCInfosTPC);
323
324 if (!config.noBorder && mChain->GetQA()) {
325 mChain->GetQA()->SetMCTrackRange(mcMin, mcMax);
326 }
327 return (0);
328}
329
331{
332 for (int32_t iEventInTimeframe = 0; iEventInTimeframe < config.nMerge; iEventInTimeframe++) {
333 float shift;
334 if (config.shiftFirstEvent || iEventInTimeframe) {
335 if (config.randomizeDistance) {
336 shift = mDisUniReal(mRndGen2);
337 if (config.shiftFirstEvent) {
338 shift = (iEventInTimeframe + shift) * config.averageDistance;
339 } else {
340 if (iEventInTimeframe == 0) {
341 shift = 0;
342 } else {
343 shift = (iEventInTimeframe - 0.5f + shift) * config.averageDistance;
344 }
345 }
346 } else {
347 if (config.shiftFirstEvent) {
348 shift = config.averageDistance * (iEventInTimeframe + 0.5f);
349 } else {
350 shift = config.averageDistance * (iEventInTimeframe);
351 }
352 }
353 } else {
354 shift = 0.f;
355 }
356
357 if (ReadEventShifted(iEvent * config.nMerge + iEventInTimeframe, shift) < 0) {
358 return (1);
359 }
360 }
362 return (0);
363}
364
365void GPUReconstructionTimeframe::SetDisplayInformation(int32_t iCol)
366{
367 if (mChain->GetEventDisplay()) {
368 for (uint32_t sl = 0; sl < NSECTORS; sl++) {
369 mChain->GetEventDisplay()->SetCollisionFirstCluster(iCol, sl, mChain->mIOPtrs.nClusterData[sl]);
370 }
371 mChain->GetEventDisplay()->SetCollisionFirstCluster(iCol, NSECTORS, mChain->mIOPtrs.nMCInfosTPC);
372 }
373}
Helper class to access correction maps.
uint64_t exp(uint64_t base, uint8_t exp) noexcept
int32_t i
GPUChain * chain
constexpr int p2()
uint32_t j
Definition RawData.h:0
Definition of TPCFastTransform class.
TBranch * ptr
int nClusters
const CorrectionMapsHelper * GetTPCTransformHelper() const
const GPUQA * GetQA() const
GPUDisplayInterface * GetEventDisplay()
GPUTrackingInOutPointers & mIOPtrs
struct o2::gpu::GPUChainTracking::InOutMemory mIOMem
virtual void SetCollisionFirstCluster(uint32_t collision, int32_t sector, int32_t cluster)=0
void SetMCTrackRange(int32_t min, int32_t max)
Definition GPUQA.h:47
int32_t ReadEventShifted(int32_t i, float shiftZ, float minZ=-1e6, float maxZ=-1e6, bool silent=false)
GPUReconstructionTimeframe(GPUChainTracking *rec, int32_t(*read)(int32_t), int32_t nEvents)
GLuint GLsizei const GLchar * label
Definition glcorearb.h:2519
GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat maxZ
Definition glcorearb.h:2910
GLfloat GLfloat minZ
Definition glcorearb.h:2910
GPUSettingsStandalone configStandalone
Definition genEvents.cxx:47
std::unique_ptr< GPUTPCMCInfo[]> mcInfosTPC
std::unique_ptr< GPUTPCMCInfoCol[]> mcInfosTPCCol
std::unique_ptr< AliHLTTPCClusterMCLabel[]> mcLabelsTPC
std::unique_ptr< GPUTPCClusterData[]> clusterData[NSECTORS]
std::unique_ptr< AliHLTTPCRawCluster[]> rawClusters[NSECTORS]
const o2::tpc::ClusterNativeAccess * clustersNative
const int nEvents
Definition test_Fifo.cxx:27