Project
Loading...
Searching...
No Matches
GPUReconstructionTimeframe.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
16#include "GPUReconstruction.h"
18#include "GPUQA.h"
20#include "GPUTPCMCInfo.h"
21#include "GPUTPCClusterData.h"
22#include "AliHLTTPCRawCluster.h"
23#include "TPCFastTransform.h"
25#include "GPUO2DataTypes.h"
26
27#include <cstdio>
28#include <exception>
29#include <memory>
30#include <cstring>
31
32#include "utils/qconfig.h"
33
34using namespace o2::gpu;
35
36namespace o2::gpu
37{
38extern GPUSettingsStandalone configStandalone;
39}
40static auto& config = configStandalone.TF;
41
42GPUReconstructionTimeframe::GPUReconstructionTimeframe(GPUChainTracking* chain, int32_t (*read)(int32_t), int32_t nEvents) : mChain(chain), mReadEvent(read), mNEventsInDirectory(nEvents), mDisUniReal(0.f, 1.f), mRndGen1(configStandalone.seed), mRndGen2(mDisUniInt(mRndGen1))
43{
44 mMaxBunchesFull = TIME_ORBIT / config.bunchSpacing;
45 mMaxBunches = (TIME_ORBIT - config.abortGapTime) / config.bunchSpacing;
46
47 if (config.overlayRaw && chain->GetTPCTransformHelper() == nullptr) {
48 GPUInfo("Overlay Raw Events requires TPC Fast Transform");
49 throw std::exception();
50 }
51 if (config.bunchSim) {
52 if (config.bunchCount * config.bunchTrainCount > mMaxBunches) {
53 GPUInfo("Invalid timeframe settings: too many colliding bunches requested!");
54 throw std::exception();
55 }
56 mTrainDist = mMaxBunches / config.bunchTrainCount;
57 mCollisionProbability = (float)config.interactionRate * (float)(mMaxBunchesFull * config.bunchSpacing / 1e9f) / (float)(config.bunchCount * config.bunchTrainCount);
58 GPUInfo("Timeframe settings: %d trains of %d bunches, bunch spacing: %d, train spacing: %dx%d, filled bunches %d / %d (%d), collision probability %f, mixing %d events", config.bunchTrainCount, config.bunchCount, config.bunchSpacing, mTrainDist, config.bunchSpacing,
59 config.bunchCount * config.bunchTrainCount, mMaxBunches, mMaxBunchesFull, mCollisionProbability, mNEventsInDirectory);
60 }
61
62 mEventStride = configStandalone.seed;
63 mSimBunchNoRepeatEvent = configStandalone.StartEvent;
64 mEventUsed.resize(mNEventsInDirectory);
65 if (config.noEventRepeat == 2) {
66 memset(mEventUsed.data(), 0, mNEventsInDirectory * sizeof(mEventUsed[0]));
67 }
68}
69
70int32_t GPUReconstructionTimeframe::ReadEventShifted(int32_t iEvent, float shiftZ, float minZ, float maxZ, bool silent)
71{
72 mReadEvent(iEvent);
73 if (config.overlayRaw) {
74 float shiftTTotal = (((double)config.timeFrameLen - DRIFT_TIME) * ((double)TPCZ / (double)DRIFT_TIME) - shiftZ) / mChain->GetTPCTransformHelper()->getCorrMap()->getVDrift();
75 for (uint32_t iSector = 0; iSector < NSECTORS; iSector++) {
76 for (uint32_t j = 0; j < mChain->mIOPtrs.nRawClusters[iSector]; j++) {
77 auto& tmp = mChain->mIOMem.rawClusters[iSector][j];
78 tmp.fTime += shiftTTotal;
79 }
80 }
81 }
82 if (shiftZ != 0.f) {
83 for (uint32_t iSector = 0; iSector < NSECTORS; iSector++) {
84 for (uint32_t j = 0; j < mChain->mIOPtrs.nClusterData[iSector]; j++) {
85 auto& tmp = mChain->mIOMem.clusterData[iSector][j];
86 tmp.z += iSector < NSECTORS / 2 ? shiftZ : -shiftZ;
87 }
88 }
89 for (uint32_t i = 0; i < mChain->mIOPtrs.nMCInfosTPC; i++) {
90 auto& tmp = mChain->mIOMem.mcInfosTPC[i];
91 tmp.z += i < NSECTORS / 2 ? shiftZ : -shiftZ;
92 }
93 }
94
95 // Remove clusters outside boundaries
96 uint32_t nClusters = 0;
97 uint32_t removed = 0;
98 if (minZ > -1e6 || maxZ > -1e6) {
99 uint32_t currentClusterTotal = 0;
100 for (uint32_t iSector = 0; iSector < NSECTORS; iSector++) {
101 uint32_t currentClusterSector = 0;
102 bool doRaw = config.overlayRaw && mChain->mIOPtrs.nClusterData[iSector] == mChain->mIOPtrs.nRawClusters[iSector];
103 for (uint32_t i = 0; i < mChain->mIOPtrs.nClusterData[iSector]; i++) {
104 float sign = iSector < NSECTORS / 2 ? 1 : -1;
105 if (sign * mChain->mIOMem.clusterData[iSector][i].z >= minZ && sign * mChain->mIOMem.clusterData[iSector][i].z <= maxZ) {
106 if (currentClusterSector != i) {
107 mChain->mIOMem.clusterData[iSector][currentClusterSector] = mChain->mIOMem.clusterData[iSector][i];
108 if (doRaw) {
109 mChain->mIOMem.rawClusters[iSector][currentClusterSector] = mChain->mIOMem.rawClusters[iSector][i];
110 }
111 }
112 if (mChain->mIOPtrs.nMCLabelsTPC > currentClusterTotal && nClusters != currentClusterTotal) {
113 mChain->mIOMem.mcLabelsTPC[nClusters] = mChain->mIOMem.mcLabelsTPC[currentClusterTotal];
114 }
115 // GPUInfo("Keeping Cluster ID %d (ID in sector %d) Z=%f (sector %d) --> %d (sector %d)", currentClusterTotal, i, mChain->mIOMem.clusterData[iSector][i].fZ, iSector, nClusters, currentClusterSector);
116 currentClusterSector++;
117 nClusters++;
118 } else {
119 // GPUInfo("Removing Cluster ID %d (ID in sector %d) Z=%f (sector %d)", currentClusterTotal, i, mChain->mIOMem.clusterData[iSector][i].fZ, iSector);
120 removed++;
121 }
122 currentClusterTotal++;
123 }
124 mChain->mIOPtrs.nClusterData[iSector] = currentClusterSector;
125 if (doRaw) {
126 mChain->mIOPtrs.nRawClusters[iSector] = currentClusterSector;
127 }
128 }
129 if (mChain->mIOPtrs.nMCLabelsTPC) {
131 }
132 } else {
133 for (uint32_t i = 0; i < NSECTORS; i++) {
134 nClusters += mChain->mIOPtrs.nClusterData[i];
135 }
136 }
137
138 if (!silent) {
139 GPUInfo("Read %u Clusters with %d MC labels and %d MC tracks", nClusters, (int32_t)mChain->mIOPtrs.nMCLabelsTPC, (int32_t)mChain->mIOPtrs.nMCInfosTPC);
140 if (minZ > -1e6 || maxZ > 1e6) {
141 GPUInfo("\tRemoved %u / %u clusters", removed, nClusters + removed);
142 }
143 }
144
145 mShiftedEvents.emplace_back(mChain->mIOPtrs, std::move(mChain->mIOMem), mChain->mIOPtrs.clustersNative ? *mChain->mIOPtrs.clustersNative : o2::tpc::ClusterNativeAccess());
146 return nClusters;
147}
148
150{
151 mChain->ClearIOPointers();
152 for (uint32_t i = 0; i < mShiftedEvents.size(); i++) {
153 auto& ptr = std::get<0>(mShiftedEvents[i]);
154 for (uint32_t j = 0; j < NSECTORS; j++) {
155 mChain->mIOPtrs.nClusterData[j] += ptr.nClusterData[j];
156 if (config.overlayRaw) {
157 mChain->mIOPtrs.nRawClusters[j] += ptr.nRawClusters[j];
158 }
159 }
160 mChain->mIOPtrs.nMCLabelsTPC += ptr.nMCLabelsTPC;
161 mChain->mIOPtrs.nMCInfosTPC += ptr.nMCInfosTPC;
162 mChain->mIOPtrs.nMCInfosTPCCol += ptr.nMCInfosTPCCol;
163 SetDisplayInformation(i);
164 }
165 uint32_t nClustersTotal = 0;
166 uint32_t nClustersTotalRaw = 0;
167 uint32_t nClustersSectorOffset[NSECTORS] = {0};
168 for (uint32_t i = 0; i < NSECTORS; i++) {
169 nClustersSectorOffset[i] = nClustersTotal;
170 nClustersTotal += mChain->mIOPtrs.nClusterData[i];
171 nClustersTotalRaw += mChain->mIOPtrs.nRawClusters[i];
172 }
173 if (nClustersTotalRaw && nClustersTotalRaw != nClustersTotal) {
174 GPUError("Inconsitency between raw clusters and cluster data");
175 throw std::exception();
176 }
177 if (mChain->mIOPtrs.nMCLabelsTPC && nClustersTotal != mChain->mIOPtrs.nMCLabelsTPC) {
178 GPUError("Inconsitency between TPC clusters and MC labels");
179 throw std::exception();
180 }
181 mChain->AllocateIOMemory();
182 mChain->mIOPtrs.clustersNative = nullptr;
183
184 uint32_t nTrackOffset = 0;
185 uint32_t nColOffset = 0;
186 uint32_t nClustersEventOffset[NSECTORS] = {0};
187 for (uint32_t i = 0; i < mShiftedEvents.size(); i++) {
188 auto& ptr = std::get<0>(mShiftedEvents[i]);
189 uint32_t inEventOffset = 0;
190 for (uint32_t j = 0; j < NSECTORS; j++) {
191 memcpy((void*)&mChain->mIOMem.clusterData[j][nClustersEventOffset[j]], (void*)ptr.clusterData[j], ptr.nClusterData[j] * sizeof(ptr.clusterData[j][0]));
192 if (nClustersTotalRaw) {
193 memcpy((void*)&mChain->mIOMem.rawClusters[j][nClustersEventOffset[j]], (void*)ptr.rawClusters[j], ptr.nRawClusters[j] * sizeof(ptr.rawClusters[j][0]));
194 }
195 if (mChain->mIOPtrs.nMCLabelsTPC) {
196 memcpy((void*)&mChain->mIOMem.mcLabelsTPC[nClustersSectorOffset[j] + nClustersEventOffset[j]], (void*)&ptr.mcLabelsTPC[inEventOffset], ptr.nClusterData[j] * sizeof(ptr.mcLabelsTPC[0]));
197 }
198 for (uint32_t k = 0; k < ptr.nClusterData[j]; k++) {
199 mChain->mIOMem.clusterData[j][nClustersEventOffset[j] + k].id = nClustersSectorOffset[j] + nClustersEventOffset[j] + k;
200 if (mChain->mIOPtrs.nMCLabelsTPC) {
201 for (int32_t l = 0; l < 3; l++) {
202 auto& label = mChain->mIOMem.mcLabelsTPC[nClustersSectorOffset[j] + nClustersEventOffset[j] + k].fClusterID[l];
203 if (label.fMCID >= 0) {
204 label.fMCID += nTrackOffset;
205 }
206 }
207 }
208 }
209
210 nClustersEventOffset[j] += ptr.nClusterData[j];
211 inEventOffset += ptr.nClusterData[j];
212 }
213
214 memcpy((void*)&mChain->mIOMem.mcInfosTPC[nTrackOffset], (void*)ptr.mcInfosTPC, ptr.nMCInfosTPC * sizeof(ptr.mcInfosTPC[0]));
215 for (uint32_t j = 0; j < ptr.nMCInfosTPCCol; j++) {
216 mChain->mIOMem.mcInfosTPCCol[nColOffset + j] = ptr.mcInfosTPCCol[j];
217 mChain->mIOMem.mcInfosTPCCol[nColOffset + j].first += nTrackOffset;
218 }
219 nTrackOffset += ptr.nMCInfosTPC;
220 nColOffset += ptr.nMCInfosTPCCol;
221 }
222
223 GPUInfo("Merged %d events, %u clusters total", (int32_t)mShiftedEvents.size(), nClustersTotal);
224
225 mShiftedEvents.clear();
226}
227
229{
230 if (config.nTotalEventsInTF && mNTotalCollisions >= config.nTotalEventsInTF) {
231 return (2);
232 }
233
234 int64_t nBunch = -DRIFT_TIME / config.bunchSpacing;
235 int64_t lastBunch = config.timeFrameLen / config.bunchSpacing;
236 int64_t lastTFBunch = lastBunch - DRIFT_TIME / config.bunchSpacing;
237 int32_t nCollisions = 0, nBorderCollisions = 0, nTrainCollissions = 0, nMultipleCollisions = 0, nTrainMultipleCollisions = 0;
238 int32_t nTrain = 0;
239 int32_t mcMin = -1, mcMax = -1;
240 uint32_t nTotalClusters = 0;
241 while (nBunch < lastBunch) {
242 for (int32_t iTrain = 0; iTrain < config.bunchTrainCount && nBunch < lastBunch; iTrain++) {
243 int32_t nCollisionsInTrain = 0;
244 for (int32_t iBunch = 0; iBunch < config.bunchCount && nBunch < lastBunch; iBunch++) {
245 const bool inTF = nBunch >= 0 && nBunch < lastTFBunch && (config.nTotalEventsInTF == 0 || nCollisions < mNTotalCollisions + config.nTotalEventsInTF);
246 if (mcMin == -1 && inTF) {
247 mcMin = mChain->mIOPtrs.nMCInfosTPC;
248 }
249 if (mcMax == -1 && nBunch >= 0 && !inTF) {
250 mcMax = mChain->mIOPtrs.nMCInfosTPC;
251 }
252 int32_t nInBunchPileUp = 0;
253 double randVal = mDisUniReal(inTF ? mRndGen2 : mRndGen1);
254 double p = exp(-mCollisionProbability);
255 double p2 = p;
256 while (randVal > p) {
257 if (config.noBorder && (nBunch < 0 || nBunch >= lastTFBunch)) {
258 break;
259 }
260 if (nCollisionsInTrain >= mNEventsInDirectory) {
261 GPUError("Error: insuffient events for mixing!");
262 return (1);
263 }
264 if (nCollisionsInTrain == 0 && config.noEventRepeat == 0) {
265 memset(mEventUsed.data(), 0, mNEventsInDirectory * sizeof(mEventUsed[0]));
266 }
267 if (inTF) {
268 nCollisions++;
269 } else {
270 nBorderCollisions++;
271 }
272 int32_t useEvent;
273 if (config.noEventRepeat == 1) {
274 useEvent = mSimBunchNoRepeatEvent;
275 } else {
276 while (mEventUsed[useEvent = (inTF && config.eventStride ? (mEventStride += config.eventStride) : mDisUniInt(inTF ? mRndGen2 : mRndGen1)) % mNEventsInDirectory]) {
277 ;
278 }
279 }
280 if (config.noEventRepeat) {
281 mSimBunchNoRepeatEvent++;
282 }
283 mEventUsed[useEvent] = 1;
284 double shift = (double)nBunch * (double)config.bunchSpacing * (double)TPCZ / (double)DRIFT_TIME;
285 int32_t nClusters = ReadEventShifted(useEvent, shift, 0, (double)config.timeFrameLen * (double)TPCZ / (double)DRIFT_TIME, true);
286 if (nClusters < 0) {
287 GPUError("Unexpected error");
288 return (1);
289 }
290 nTotalClusters += nClusters;
291 printf("Placing event %4d+%d (ID %4d) at z %7.3f (time %'dns) %s(collisions %4d, bunch %6ld, train %3d) (%'10d clusters, %'10d MC labels, %'10d track MC info)\n", nCollisions, nBorderCollisions, useEvent, shift, (int32_t)(nBunch * config.bunchSpacing), inTF ? " inside" : "outside",
292 nCollisions, nBunch, nTrain, nClusters, mChain->mIOPtrs.nMCLabelsTPC, mChain->mIOPtrs.nMCInfosTPC);
293 nInBunchPileUp++;
294 nCollisionsInTrain++;
295 p2 *= mCollisionProbability / nInBunchPileUp;
296 p += p2;
297 if (config.noEventRepeat && mSimBunchNoRepeatEvent >= mNEventsInDirectory) {
298 nBunch = lastBunch;
299 }
300 }
301 if (nInBunchPileUp > 1) {
302 nMultipleCollisions++;
303 }
304 nBunch++;
305 }
306 nBunch += mTrainDist - config.bunchCount;
307 if (nCollisionsInTrain) {
308 nTrainCollissions++;
309 }
310 if (nCollisionsInTrain > 1) {
311 nTrainMultipleCollisions++;
312 }
313 nTrain++;
314 }
315 nBunch += mMaxBunchesFull - mTrainDist * config.bunchTrainCount;
316 }
317 mNTotalCollisions += nCollisions;
318 GPUInfo("Timeframe statistics: collisions: %d+%d in %d trains (inside / outside), average rate %f (pile up: in bunch %d, in train %d)", nCollisions, nBorderCollisions, nTrainCollissions, (float)nCollisions / (float)(config.timeFrameLen - DRIFT_TIME) * 1e9, nMultipleCollisions,
319 nTrainMultipleCollisions);
321 GPUInfo("\tTotal clusters: %u, MC Labels %d, MC Infos %d", nTotalClusters, (int32_t)mChain->mIOPtrs.nMCLabelsTPC, (int32_t)mChain->mIOPtrs.nMCInfosTPC);
322
323 if (!config.noBorder && mChain->GetQA()) {
324 mChain->GetQA()->SetMCTrackRange(mcMin, mcMax);
325 }
326 return (0);
327}
328
330{
331 for (int32_t iEventInTimeframe = 0; iEventInTimeframe < config.nMerge; iEventInTimeframe++) {
332 float shift;
333 if (config.shiftFirstEvent || iEventInTimeframe) {
334 if (config.randomizeDistance) {
335 shift = mDisUniReal(mRndGen2);
336 if (config.shiftFirstEvent) {
337 shift = (iEventInTimeframe + shift) * config.averageDistance;
338 } else {
339 if (iEventInTimeframe == 0) {
340 shift = 0;
341 } else {
342 shift = (iEventInTimeframe - 0.5f + shift) * config.averageDistance;
343 }
344 }
345 } else {
346 if (config.shiftFirstEvent) {
347 shift = config.averageDistance * (iEventInTimeframe + 0.5f);
348 } else {
349 shift = config.averageDistance * (iEventInTimeframe);
350 }
351 }
352 } else {
353 shift = 0.f;
354 }
355
356 if (ReadEventShifted(iEvent * config.nMerge + iEventInTimeframe, shift) < 0) {
357 return (1);
358 }
359 }
361 return (0);
362}
363
364void GPUReconstructionTimeframe::SetDisplayInformation(int32_t iCol)
365{
366 if (mChain->GetEventDisplay()) {
367 for (uint32_t sl = 0; sl < NSECTORS; sl++) {
368 mChain->GetEventDisplay()->SetCollisionFirstCluster(iCol, sl, mChain->mIOPtrs.nClusterData[sl]);
369 }
370 mChain->GetEventDisplay()->SetCollisionFirstCluster(iCol, NSECTORS, mChain->mIOPtrs.nMCInfosTPC);
371 }
372}
Helper class to access correction maps.
uint64_t exp(uint64_t base, uint8_t exp) noexcept
int32_t i
GPUChain * chain
constexpr int p2()
uint32_t j
Definition RawData.h:0
Definition of TPCFastTransform class.
TBranch * ptr
int nClusters
const CorrectionMapsHelper * GetTPCTransformHelper() const
const GPUQA * GetQA() const
GPUDisplayInterface * GetEventDisplay()
GPUTrackingInOutPointers & mIOPtrs
struct o2::gpu::GPUChainTracking::InOutMemory mIOMem
virtual void SetCollisionFirstCluster(uint32_t collision, int32_t sector, int32_t cluster)=0
void SetMCTrackRange(int32_t min, int32_t max)
Definition GPUQA.h:47
int32_t ReadEventShifted(int32_t i, float shiftZ, float minZ=-1e6, float maxZ=-1e6, bool silent=false)
GPUReconstructionTimeframe(GPUChainTracking *rec, int32_t(*read)(int32_t), int32_t nEvents)
GLuint GLsizei const GLchar * label
Definition glcorearb.h:2519
GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat maxZ
Definition glcorearb.h:2910
GLfloat GLfloat minZ
Definition glcorearb.h:2910
GPUSettingsStandalone configStandalone
Definition genEvents.cxx:47
std::unique_ptr< GPUTPCMCInfo[]> mcInfosTPC
std::unique_ptr< GPUTPCMCInfoCol[]> mcInfosTPCCol
std::unique_ptr< AliHLTTPCClusterMCLabel[]> mcLabelsTPC
std::unique_ptr< GPUTPCClusterData[]> clusterData[NSECTORS]
std::unique_ptr< AliHLTTPCRawCluster[]> rawClusters[NSECTORS]
const o2::tpc::ClusterNativeAccess * clustersNative
const int nEvents
Definition test_Fifo.cxx:27