Project
Loading...
Searching...
No Matches
GPUReconstructionCPU.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
18#include "GPUChain.h"
19#include "GPUDefParametersRuntime.h"
20#include "GPUTPCGMMergedTrack.h"
22#include "GPUTRDTrackletWord.h"
24#include "GPUTPCMCInfo.h"
25#include "GPUTRDTrack.h"
26#include "GPUTRDTracker.h"
27#include "AliHLTTPCRawCluster.h"
29#include "GPUMemoryResource.h"
30#include "GPUConstantMem.h"
31#include "GPULogging.h"
33#include "GPUReconstructionProcessingKernels.inc"
34
35#include <atomic>
36#include <ctime>
37
38#ifndef _WIN32
39#include <unistd.h>
40#endif
41
42using namespace o2::gpu;
43
46
48
50{
51 Exit(); // Needs to be identical to GPU backend bahavior in order to avoid calling abstract methods later in the destructor
52}
53
54template <class T, int32_t I, typename... Args>
55inline void GPUReconstructionCPU::runKernelBackend(const krnlSetupTime& _xyz, const Args&... args)
56{
57 auto& x = _xyz.x;
58 auto& y = _xyz.y;
59 if (x.device == krnlDeviceType::Device) {
60 throw std::runtime_error("Cannot run device kernel on host");
61 }
62 if (x.nThreads != 1) {
63 throw std::runtime_error("Cannot run device kernel on host with nThreads != 1");
64 }
65 int32_t nThreads = getNKernelHostThreads(false);
66 if (nThreads > 1) {
67 if (GetProcessingSettings().debugLevel >= 5) {
68 GPUInfo("Running %d Threads", mThreading->activeThreads->max_concurrency());
69 }
70 tbb::this_task_arena::isolate([&] {
71 mThreading->activeThreads->execute([&] {
72 tbb::parallel_for(tbb::blocked_range<uint32_t>(0, x.nBlocks, 1), [&](const tbb::blocked_range<uint32_t>& r) {
73 typename T::GPUSharedMemory smem;
74 for (uint32_t iB = r.begin(); iB < r.end(); iB++) {
75 T::template Thread<I>(x.nBlocks, 1, iB, 0, smem, T::Processor(*mHostConstantMem)[y.index], args...);
76 }
77 });
78 });
79 });
80 } else {
81 for (uint32_t iB = 0; iB < x.nBlocks; iB++) {
82 typename T::GPUSharedMemory smem;
83 T::template Thread<I>(x.nBlocks, 1, iB, 0, smem, T::Processor(*mHostConstantMem)[y.index], args...);
84 }
85 }
86}
87
88template <>
89inline void GPUReconstructionCPU::runKernelBackend<GPUMemClean16, 0>(const krnlSetupTime& _xyz, void* const& ptr, uint64_t const& size)
90{
91 int32_t nThreads = std::max<int32_t>(1, std::min<int32_t>(size / (16 * 1024 * 1024), getNKernelHostThreads(true)));
92 if (nThreads > 1) {
93 tbb::parallel_for(0, nThreads, [&](int iThread) {
94 size_t threadSize = size / nThreads;
95 if (threadSize % 4096) {
96 threadSize += 4096 - threadSize % 4096;
97 }
98 size_t offset = threadSize * iThread;
99 size_t mySize = std::min<size_t>(threadSize, size - offset);
100 if (mySize) {
101 memset((char*)ptr + offset, 0, mySize);
102 } // clang-format off
103 }, tbb::static_partitioner()); // clang-format on
104 } else {
105 memset(ptr, 0, size);
106 }
107}
108
109template <class S, int32_t I>
111{
112 if (gpu == -1) {
113 gpu = IsGPU();
114 }
115 const auto num = GetKernelNum<S, I>();
116 const auto* p = gpu ? mParDevice : mParCPU;
117 GPUReconstructionProcessing::krnlProperties ret = {p->par_LB_maxThreads[num], p->par_LB_minBlocks[num], p->par_LB_forceBlocks[num]};
118 if (ret.nThreads == 0) {
119 ret.nThreads = gpu ? mThreadCount : 1u;
120 }
121 if (ret.minBlocks == 0) {
122 ret.minBlocks = 1;
123 }
124 return ret;
125}
126
127#define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward, x_types, ...) \
128 template GPUReconstructionProcessing::krnlProperties GPUReconstructionCPU::getKernelProperties<GPUCA_M_KRNL_TEMPLATE(x_class)>(int gpu);
129#include "GPUReconstructionKernelList.h"
130#undef GPUCA_KRNL
131
132size_t GPUReconstructionCPU::TransferMemoryInternal(GPUMemoryResource* res, int32_t stream, deviceEvent* ev, deviceEvent* evList, int32_t nEvents, bool toGPU, const void* src, void* dst) { return 0; }
133size_t GPUReconstructionCPU::GPUMemCpy(void* dst, const void* src, size_t size, int32_t stream, int32_t toGPU, deviceEvent* ev, deviceEvent* evList, int32_t nEvents) { return 0; }
134size_t GPUReconstructionCPU::GPUMemCpyAlways(bool onGpu, void* dst, const void* src, size_t size, int32_t stream, int32_t toGPU, deviceEvent* ev, deviceEvent* evList, int32_t nEvents)
135{
136 memcpy(dst, src, size);
137 return 0;
138}
139size_t GPUReconstructionCPU::WriteToConstantMemory(size_t offset, const void* src, size_t size, int32_t stream, deviceEvent* ev) { return 0; }
140int32_t GPUReconstructionCPU::GPUDebug(const char* state, int32_t stream, bool force) { return 0; }
141size_t GPUReconstructionCPU::TransferMemoryResourcesHelper(GPUProcessor* proc, int32_t stream, bool all, bool toGPU)
142{
145 size_t n = 0;
146 for (uint32_t i = 0; i < mMemoryResources.size(); i++) {
148 if (res.mPtr == nullptr) {
149 continue;
150 }
151 if (proc && res.mProcessor != proc) {
152 continue;
153 }
155 continue;
156 }
157 if (!GetProcessingSettings().keepAllMemory && !all && (res.mType & exc) && !(res.mType & inc)) {
158 continue;
159 }
160 if (toGPU) {
162 } else {
164 }
165 }
166 return n;
167}
168
170{
171// Get Thread ID
172#if defined(__APPLE__)
173 return (0); // syscall is deprecated on MacOS..., only needed for GPU support which we don't do on Mac anyway
174#elif defined(_WIN32)
175 return ((int32_t)(size_t)GetCurrentThread());
176#else
177 return ((int32_t)syscall(SYS_gettid));
178#endif
179}
180
182{
184 mThreading->activeThreads = std::make_unique<tbb::task_arena>(mActiveHostKernelThreads);
185 if (GetProcessingSettings().memoryAllocationStrategy == GPUMemoryResource::ALLOCATION_GLOBAL) {
186 if (mMaster == nullptr) {
189 }
190 mHostMemoryBase = operator new(mHostMemorySize, std::align_val_t(GPUCA_BUFFER_ALIGNMENT));
191 }
194 }
195 if (GetProcessingSettings().inKernelParallel) {
197 }
199 return 0;
200}
201
203{
204 if (GetProcessingSettings().memoryAllocationStrategy == GPUMemoryResource::ALLOCATION_GLOBAL) {
205 if (mMaster == nullptr) {
206 operator delete(mHostMemoryBase, std::align_val_t(GPUCA_BUFFER_ALIGNMENT));
207 }
209 mHostMemorySize = 0;
210 }
211 return 0;
212}
213
215{
216 mMemoryScalers->temporaryFactor = 1.;
217 if (GetProcessingSettings().memoryScalingFuzz) {
218 static std::mt19937 rng;
219 static std::uniform_int_distribution<uint64_t> dist(0, 1000000);
220 uint64_t fuzzFactor = GetProcessingSettings().memoryScalingFuzz == 1 ? dist(rng) : GetProcessingSettings().memoryScalingFuzz;
221 GPUInfo("Fuzzing memory scaling factor with %lu", fuzzFactor);
222 mMemoryScalers->fuzzScalingFactor(fuzzFactor);
223 }
224
225 mStatNEvents++;
227
228 if (GetProcessingSettings().debugLevel >= 3 || GetProcessingSettings().allocDebugLevel) {
229 GPUInfo("Allocated memory when starting processing %34s", "");
231 }
233 const std::clock_t cpuTimerStart = std::clock();
234 if (GetProcessingSettings().doublePipeline) {
235 int32_t retVal = EnqueuePipeline();
236 if (retVal) {
237 return retVal;
238 }
239 } else {
240 if (mSlaves.size() || mMaster) {
241 WriteConstantParams(); // Reinitialize // TODO: Get this in sync with GPUChainTracking::DoQueuedUpdates, and consider the doublePipeline
242 }
243 for (uint32_t i = 0; i < mChains.size(); i++) {
244 int32_t retVal = mChains[i]->RunChain();
245 if (retVal) {
246 return retVal;
247 }
248 }
249 if (GetProcessingSettings().tpcFreeAllocatedMemoryAfterProcessing) {
251 }
252 }
254 mStatCPUTime += (double)(std::clock() - cpuTimerStart) / CLOCKS_PER_SEC;
255 if (GetProcessingSettings().debugLevel >= 3 || GetProcessingSettings().allocDebugLevel) {
256 GPUInfo("Allocated memory when ending processing %36s", "");
258 }
259
261 std::string nEventReport;
262 if (GetProcessingSettings().debugLevel >= 0 && mStatNEvents > 1) {
263 nEventReport += " (avergage of " + std::to_string(mStatNEvents) + " runs)";
264 }
265 double kernelTotal = 0;
266 std::vector<double> kernelStepTimes(GPUDataTypes::N_RECO_STEPS, 0.);
267
268 if (GetProcessingSettings().debugLevel >= 1) {
269 for (uint32_t i = 0; i < mTimers.size(); i++) {
270 double time = 0;
271 if (mTimers[i] == nullptr) {
272 continue;
273 }
274 for (int32_t j = 0; j < mTimers[i]->num; j++) {
275 HighResTimer& timer = mTimers[i]->timer[j];
276 time += timer.GetElapsedTime();
277 if (GetProcessingSettings().resetTimers) {
278 timer.Reset();
279 }
280 }
281
282 uint32_t type = mTimers[i]->type;
283 if (type == 0) {
284 kernelTotal += time;
285 int32_t stepNum = getRecoStepNum(mTimers[i]->step);
286 kernelStepTimes[stepNum] += time;
287 }
288 char bandwidth[256] = "";
289 if (mTimers[i]->memSize && mStatNEvents && time != 0.) {
290 snprintf(bandwidth, 256, " (%8.3f GB/s - %'14zu bytes - %'14zu per call)", mTimers[i]->memSize / time * 1e-9, mTimers[i]->memSize / mStatNEvents, mTimers[i]->memSize / mStatNEvents / mTimers[i]->count);
291 }
292 printf("Execution Time: Task (%c %8ux): %50s Time: %'10.0f us%s\n", type == 0 ? 'K' : 'C', mTimers[i]->count, mTimers[i]->name.c_str(), time * 1000000 / mStatNEvents, bandwidth);
293 if (GetProcessingSettings().resetTimers) {
294 mTimers[i]->count = 0;
295 mTimers[i]->memSize = 0;
296 }
297 }
298 }
299 if (GetProcessingSettings().recoTaskTiming) {
300 for (int32_t i = 0; i < GPUDataTypes::N_RECO_STEPS; i++) {
301 if (kernelStepTimes[i] != 0. || mTimersRecoSteps[i].timerTotal.GetElapsedTime() != 0.) {
302 printf("Execution Time: Step : %11s %38s Time: %'10.0f us %64s ( Total Time : %'14.0f us, CPU Time : %'14.0f us, %'7.2fx )\n", "Tasks",
303 GPUDataTypes::RECO_STEP_NAMES[i], kernelStepTimes[i] * 1000000 / mStatNEvents, "", mTimersRecoSteps[i].timerTotal.GetElapsedTime() * 1000000 / mStatNEvents, mTimersRecoSteps[i].timerCPU * 1000000 / mStatNEvents, mTimersRecoSteps[i].timerCPU / mTimersRecoSteps[i].timerTotal.GetElapsedTime());
304 }
305 if (mTimersRecoSteps[i].bytesToGPU) {
306 printf("Execution Time: Step (D %8ux): %11s %38s Time: %'10.0f us (%8.3f GB/s - %'14zu bytes - %'14zu per call)\n", mTimersRecoSteps[i].countToGPU, "DMA to GPU", GPUDataTypes::RECO_STEP_NAMES[i], mTimersRecoSteps[i].timerToGPU.GetElapsedTime() * 1000000 / mStatNEvents,
308 }
309 if (mTimersRecoSteps[i].bytesToHost) {
310 printf("Execution Time: Step (D %8ux): %11s %38s Time: %'10.0f us (%8.3f GB/s - %'14zu bytes - %'14zu per call)\n", mTimersRecoSteps[i].countToHost, "DMA to Host", GPUDataTypes::RECO_STEP_NAMES[i], mTimersRecoSteps[i].timerToHost.GetElapsedTime() * 1000000 / mStatNEvents,
312 }
313 if (GetProcessingSettings().resetTimers) {
321 }
322 }
323 for (int32_t i = 0; i < GPUDataTypes::N_GENERAL_STEPS; i++) {
324 if (mTimersGeneralSteps[i].GetElapsedTime() != 0.) {
325 printf("Execution Time: General Step : %50s Time: %'10.0f us\n", GPUDataTypes::GENERAL_STEP_NAMES[i], mTimersGeneralSteps[i].GetElapsedTime() * 1000000 / mStatNEvents);
326 }
327 }
328 if (GetProcessingSettings().debugLevel >= 1) {
329 mStatKernelTime = kernelTotal * 1000000 / mStatNEvents;
330 printf("Execution Time: Total : %50s Time: %'10.0f us%s\n", "Total Kernel", mStatKernelTime, nEventReport.c_str());
331 }
332 printf("Execution Time: Total : %50s Time: %'10.0f us ( CPU Time : %'10.0f us, %7.2fx ) %s\n", "Total Wall", mStatWallTime, mStatCPUTime * 1000000 / mStatNEvents, mStatCPUTime / mTimerTotal.GetElapsedTime(), nEventReport.c_str());
333 } else if (GetProcessingSettings().debugLevel >= 0) {
334 GPUInfo("Total Wall Time: %10.0f us%s", mStatWallTime, nEventReport.c_str());
335 }
336 if (GetProcessingSettings().resetTimers) {
337 mStatNEvents = 0;
338 mStatCPUTime = 0;
340 }
341
342 return 0;
343}
344
346{
347 for (uint32_t i = 0; i < mProcessors.size(); i++) {
348 if (mProcessors[i].proc->mGPUProcessorType != GPUProcessor::PROCESSOR_TYPE_DEVICE && mProcessors[i].proc->mLinkedProcessor) {
349 mProcessors[i].proc->mLinkedProcessor->InitGPUProcessor(this, GPUProcessor::PROCESSOR_TYPE_DEVICE);
350 }
351 }
352}
353
354void GPUReconstructionCPU::UpdateParamOccupancyMap(const uint32_t* mapHost, const uint32_t* mapGPU, uint32_t occupancyTotal, int32_t stream)
355{
356 param().occupancyMap = mapHost;
357 param().occupancyTotal = occupancyTotal;
358 if (IsGPU()) {
359 if (!((size_t)&param().occupancyTotal - (size_t)&param().occupancyMap == sizeof(param().occupancyMap) && sizeof(param().occupancyMap) == sizeof(size_t) && sizeof(param().occupancyTotal) < sizeof(size_t))) {
360 throw std::runtime_error("occupancy data not consecutive in GPUParam");
361 }
362 const auto holdContext = GetThreadContext();
363 size_t tmp[2] = {(size_t)mapGPU, 0};
364 memcpy(&tmp[1], &occupancyTotal, sizeof(occupancyTotal));
365 WriteToConstantMemory((char*)&processors()->param.occupancyMap - (char*)processors(), &tmp, sizeof(param().occupancyMap) + sizeof(param().occupancyTotal), stream);
366 }
367}
int16_t time
Definition RawEventData.h:4
int32_t i
#define GPUCA_BUFFER_ALIGNMENT
int32_t retVal
Online TRD tracker based on extrapolated TPC tracks.
Used for storing the MC labels for the TRD tracklets.
TRD Tracklet word for GPU tracker - 32bit tracklet info + half chamber ID + index.
uint32_t j
Definition RawData.h:0
uint32_t res
Definition RawData.h:0
TBranch * ptr
double num
void Reset()
Definition timer.cxx:108
void Start()
Definition timer.cxx:64
double GetElapsedTime()
Definition timer.cxx:115
void Stop()
Definition timer.cxx:76
static constexpr const char *const GENERAL_STEP_NAMES[]
static constexpr const char *const RECO_STEP_NAMES[]
static constexpr int32_t N_RECO_STEPS
static constexpr int32_t N_GENERAL_STEPS
ProcessorType mGPUProcessorType
virtual size_t GPUMemCpy(void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void runKernelBackend(const krnlSetupTime &_xyz, const Args &... args)
virtual size_t GPUMemCpyAlways(bool onGpu, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
static constexpr krnlRunRange krnlRunRangeNone
size_t TransferMemoryResourceToHost(GPUMemoryResource *res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void UpdateParamOccupancyMap(const uint32_t *mapHost, const uint32_t *mapGPU, uint32_t occupancyTotal, int32_t stream=-1)
size_t TransferMemoryResourceToGPU(GPUMemoryResource *res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
GPUProcessorProcessors mProcShadow
krnlProperties getKernelProperties(int gpu=-1)
virtual int32_t GPUDebug(const char *state="UNKNOWN", int32_t stream=-1, bool force=false)
static constexpr krnlEvent krnlEventNone
size_t WriteToConstantMemory(size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr) override
virtual size_t TransferMemoryInternal(GPUMemoryResource *res, int32_t stream, deviceEvent *ev, deviceEvent *evList, int32_t nEvents, bool toGPU, const void *src, void *dst)
RecoStepTimerMeta mTimersRecoSteps[GPUDataTypes::N_RECO_STEPS]
std::vector< std::unique_ptr< timerMeta > > mTimers
HighResTimer mTimersGeneralSteps[GPUDataTypes::N_GENERAL_STEPS]
virtual std::unique_ptr< threadContext > GetThreadContext() override
std::vector< std::unique_ptr< GPUChain > > mChains
std::unique_ptr< GPUMemorySizeScalers > mMemoryScalers
std::vector< GPUReconstruction * > mSlaves
std::vector< GPUMemoryResource > mMemoryResources
std::vector< ProcessorData > mProcessors
static GPUReconstruction * GPUReconstruction_Create_CPU(const GPUSettingsDeviceBackend &cfg)
void ClearAllocatedMemory(bool clearOutputs=true)
int32_t getRecoStepNum(RecoStep step, bool validCheck=true)
const GPUSettingsProcessing & GetProcessingSettings() const
int32_t EnqueuePipeline(bool terminate=false)
std::shared_ptr< GPUReconstructionThreading > mThreading
GLdouble n
Definition glcorearb.h:1982
GLint GLenum GLint x
Definition glcorearb.h:403
GLenum src
Definition glcorearb.h:1767
GLint GLsizei count
Definition glcorearb.h:399
GLsizeiptr size
Definition glcorearb.h:659
GLint GLint GLsizei GLint GLenum GLenum type
Definition glcorearb.h:275
GLenum GLenum dst
Definition glcorearb.h:1767
GLintptr offset
Definition glcorearb.h:660
GLboolean r
Definition glcorearb.h:1233
GLenum GLfloat param
Definition glcorearb.h:271
GLuint GLuint stream
Definition glcorearb.h:1806
std::string to_string(gsl::span< T, Size > span)
Definition common.h:52
const uint32_t * occupancyMap
Definition GPUParam.h:64
const int nEvents
Definition test_Fifo.cxx:27