Project
Loading...
Searching...
No Matches
GPUReconstructionCPU.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
14
18#include "GPUChain.h"
19
20#include "GPUTPCClusterData.h"
21#include "GPUTPCSectorOutput.h"
23#include "GPUTPCGMMergedTrack.h"
25#include "GPUTRDTrackletWord.h"
27#include "GPUTPCMCInfo.h"
28#include "GPUTRDTrack.h"
29#include "GPUTRDTracker.h"
30#include "AliHLTTPCRawCluster.h"
32#include "GPUMemoryResource.h"
33#include "GPUConstantMem.h"
35#include <atomic>
36#include <ctime>
37
38#define GPUCA_LOGGING_PRINTF
39#include "GPULogging.h"
40
41#ifndef _WIN32
42#include <unistd.h>
43#endif
44
45using namespace o2::gpu;
47
50
52
54{
55 Exit(); // Needs to be identical to GPU backend bahavior in order to avoid calling abstract methods later in the destructor
56}
57
58template <class T, int32_t I, typename... Args>
59inline int32_t GPUReconstructionCPUBackend::runKernelBackendInternal(const krnlSetupTime& _xyz, const Args&... args)
60{
61 auto& x = _xyz.x;
62 auto& y = _xyz.y;
63 if (x.device == krnlDeviceType::Device) {
64 throw std::runtime_error("Cannot run device kernel on host");
65 }
66 if (x.nThreads != 1) {
67 throw std::runtime_error("Cannot run device kernel on host with nThreads != 1");
68 }
69 uint32_t num = y.num == 0 || y.num == -1 ? 1 : y.num;
70 for (uint32_t k = 0; k < num; k++) {
71 int32_t nThreads = getNKernelHostThreads(false);
72 if (nThreads > 1) {
73 if (mProcessingSettings.debugLevel >= 5) {
74 printf("Running %d Threads\n", nThreads);
75 }
76 tbb::this_task_arena::isolate([&] {
77 mThreading->activeThreads->execute([&] {
78 tbb::parallel_for(tbb::blocked_range<uint32_t>(0, x.nBlocks, 1), [&](const tbb::blocked_range<uint32_t>& r) {
79 typename T::GPUSharedMemory smem;
80 for (uint32_t iB = r.begin(); iB < r.end(); iB++) {
81 T::template Thread<I>(x.nBlocks, 1, iB, 0, smem, T::Processor(*mHostConstantMem)[y.start + k], args...);
82 }
83 });
84 });
85 });
86 } else {
87 for (uint32_t iB = 0; iB < x.nBlocks; iB++) {
88 typename T::GPUSharedMemory smem;
89 T::template Thread<I>(x.nBlocks, 1, iB, 0, smem, T::Processor(*mHostConstantMem)[y.start + k], args...);
90 }
91 }
92 }
93 return 0;
94}
95
96template <>
97inline int32_t GPUReconstructionCPUBackend::runKernelBackendInternal<GPUMemClean16, 0>(const krnlSetupTime& _xyz, void* const& ptr, uint64_t const& size)
98{
99 int32_t nnThreads = std::max<int32_t>(1, std::min<int32_t>(size / (16 * 1024 * 1024), getNKernelHostThreads(true)));
100 if (nnThreads > 1) {
101 tbb::parallel_for(0, nnThreads, [&](int iThread) {
102 size_t threadSize = size / nnThreads;
103 if (threadSize % 4096) {
104 threadSize += 4096 - threadSize % 4096;
105 }
106 size_t offset = threadSize * iThread;
107 size_t mySize = std::min<size_t>(threadSize, size - offset);
108 if (mySize) {
109 memset((char*)ptr + offset, 0, mySize);
110 } // clang-format off
111 }, tbb::static_partitioner()); // clang-format on
112 } else {
113 memset(ptr, 0, size);
114 }
115 return 0;
116}
117
118template <class T, int32_t I, typename... Args>
120{
121 return std::apply([this, &args](auto&... vals) { return runKernelBackendInternal<T, I, Args...>(args.s, vals...); }, args.v);
122}
123
124template <class T, int32_t I>
129
130#define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward, x_types) \
131 template int32_t GPUReconstructionCPUBackend::runKernelBackend<GPUCA_M_KRNL_TEMPLATE(x_class)>(const krnlSetupArgs<GPUCA_M_KRNL_TEMPLATE(x_class) GPUCA_M_STRIP(x_types)>& args); \
132 template krnlProperties GPUReconstructionCPUBackend::getKernelPropertiesBackend<GPUCA_M_KRNL_TEMPLATE(x_class)>();
133#include "GPUReconstructionKernelList.h"
134#undef GPUCA_KRNL
135
136size_t GPUReconstructionCPU::TransferMemoryInternal(GPUMemoryResource* res, int32_t stream, deviceEvent* ev, deviceEvent* evList, int32_t nEvents, bool toGPU, const void* src, void* dst) { return 0; }
137size_t GPUReconstructionCPU::GPUMemCpy(void* dst, const void* src, size_t size, int32_t stream, int32_t toGPU, deviceEvent* ev, deviceEvent* evList, int32_t nEvents) { return 0; }
138size_t GPUReconstructionCPU::GPUMemCpyAlways(bool onGpu, void* dst, const void* src, size_t size, int32_t stream, int32_t toGPU, deviceEvent* ev, deviceEvent* evList, int32_t nEvents)
139{
140 memcpy(dst, src, size);
141 return 0;
142}
143size_t GPUReconstructionCPU::WriteToConstantMemory(size_t offset, const void* src, size_t size, int32_t stream, deviceEvent* ev) { return 0; }
144int32_t GPUReconstructionCPU::GPUDebug(const char* state, int32_t stream, bool force) { return 0; }
145size_t GPUReconstructionCPU::TransferMemoryResourcesHelper(GPUProcessor* proc, int32_t stream, bool all, bool toGPU)
146{
149 size_t n = 0;
150 for (uint32_t i = 0; i < mMemoryResources.size(); i++) {
152 if (res.mPtr == nullptr) {
153 continue;
154 }
155 if (proc && res.mProcessor != proc) {
156 continue;
157 }
159 continue;
160 }
161 if (!mProcessingSettings.keepAllMemory && !all && (res.mType & exc) && !(res.mType & inc)) {
162 continue;
163 }
164 if (toGPU) {
166 } else {
168 }
169 }
170 return n;
171}
172
174{
175// Get Thread ID
176#if defined(__APPLE__)
177 return (0); // syscall is deprecated on MacOS..., only needed for GPU support which we don't do on Mac anyway
178#elif defined(_WIN32)
179 return ((int32_t)(size_t)GetCurrentThread());
180#else
181 return ((int32_t)syscall(SYS_gettid));
182#endif
183}
184
186{
188 mThreading->activeThreads = std::make_unique<tbb::task_arena>(mActiveHostKernelThreads);
189 if (mProcessingSettings.memoryAllocationStrategy == GPUMemoryResource::ALLOCATION_GLOBAL) {
190 if (mMaster == nullptr) {
193 }
195 }
198 }
199 if (mProcessingSettings.inKernelParallel) {
201 }
203 return 0;
204}
205
207{
208 if (mProcessingSettings.memoryAllocationStrategy == GPUMemoryResource::ALLOCATION_GLOBAL) {
209 if (mMaster == nullptr) {
211 }
213 mHostMemorySize = 0;
214 }
215 return 0;
216}
217
219{
220 mMemoryScalers->temporaryFactor = 1.;
221 mStatNEvents++;
223
225 const std::clock_t cpuTimerStart = std::clock();
226 if (mProcessingSettings.doublePipeline) {
227 int32_t retVal = EnqueuePipeline();
228 if (retVal) {
229 return retVal;
230 }
231 } else {
232 if (mSlaves.size() || mMaster) {
233 WriteConstantParams(); // Reinitialize // TODO: Get this in sync with GPUChainTracking::DoQueuedUpdates, and consider the doublePipeline
234 }
235 for (uint32_t i = 0; i < mChains.size(); i++) {
236 int32_t retVal = mChains[i]->RunChain();
237 if (retVal) {
238 return retVal;
239 }
240 }
241 }
243 mStatCPUTime += (double)(std::clock() - cpuTimerStart) / CLOCKS_PER_SEC;
244
246 std::string nEventReport;
247 if (GetProcessingSettings().debugLevel >= 0 && mStatNEvents > 1) {
248 nEventReport += " (avergage of " + std::to_string(mStatNEvents) + " runs)";
249 }
250 double kernelTotal = 0;
251 std::vector<double> kernelStepTimes(GPUDataTypes::N_RECO_STEPS, 0.);
252
253 if (GetProcessingSettings().debugLevel >= 1) {
254 for (uint32_t i = 0; i < mTimers.size(); i++) {
255 double time = 0;
256 if (mTimers[i] == nullptr) {
257 continue;
258 }
259 for (int32_t j = 0; j < mTimers[i]->num; j++) {
260 HighResTimer& timer = mTimers[i]->timer[j];
261 time += timer.GetElapsedTime();
262 if (mProcessingSettings.resetTimers) {
263 timer.Reset();
264 }
265 }
266
267 uint32_t type = mTimers[i]->type;
268 if (type == 0) {
269 kernelTotal += time;
270 int32_t stepNum = getRecoStepNum(mTimers[i]->step);
271 kernelStepTimes[stepNum] += time;
272 }
273 char bandwidth[256] = "";
274 if (mTimers[i]->memSize && mStatNEvents && time != 0.) {
275 snprintf(bandwidth, 256, " (%8.3f GB/s - %'14zu bytes - %'14zu per call)", mTimers[i]->memSize / time * 1e-9, mTimers[i]->memSize / mStatNEvents, mTimers[i]->memSize / mStatNEvents / mTimers[i]->count);
276 }
277 printf("Execution Time: Task (%c %8ux): %50s Time: %'10.0f us%s\n", type == 0 ? 'K' : 'C', mTimers[i]->count, mTimers[i]->name.c_str(), time * 1000000 / mStatNEvents, bandwidth);
278 if (mProcessingSettings.resetTimers) {
279 mTimers[i]->count = 0;
280 mTimers[i]->memSize = 0;
281 }
282 }
283 }
284 if (GetProcessingSettings().recoTaskTiming) {
285 for (int32_t i = 0; i < GPUDataTypes::N_RECO_STEPS; i++) {
286 if (kernelStepTimes[i] != 0. || mTimersRecoSteps[i].timerTotal.GetElapsedTime() != 0.) {
287 printf("Execution Time: Step : %11s %38s Time: %'10.0f us %64s ( Total Time : %'14.0f us, CPU Time : %'14.0f us, %'7.2fx )\n", "Tasks",
288 GPUDataTypes::RECO_STEP_NAMES[i], kernelStepTimes[i] * 1000000 / mStatNEvents, "", mTimersRecoSteps[i].timerTotal.GetElapsedTime() * 1000000 / mStatNEvents, mTimersRecoSteps[i].timerCPU * 1000000 / mStatNEvents, mTimersRecoSteps[i].timerCPU / mTimersRecoSteps[i].timerTotal.GetElapsedTime());
289 }
290 if (mTimersRecoSteps[i].bytesToGPU) {
291 printf("Execution Time: Step (D %8ux): %11s %38s Time: %'10.0f us (%8.3f GB/s - %'14zu bytes - %'14zu per call)\n", mTimersRecoSteps[i].countToGPU, "DMA to GPU", GPUDataTypes::RECO_STEP_NAMES[i], mTimersRecoSteps[i].timerToGPU.GetElapsedTime() * 1000000 / mStatNEvents,
293 }
294 if (mTimersRecoSteps[i].bytesToHost) {
295 printf("Execution Time: Step (D %8ux): %11s %38s Time: %'10.0f us (%8.3f GB/s - %'14zu bytes - %'14zu per call)\n", mTimersRecoSteps[i].countToHost, "DMA to Host", GPUDataTypes::RECO_STEP_NAMES[i], mTimersRecoSteps[i].timerToHost.GetElapsedTime() * 1000000 / mStatNEvents,
297 }
298 if (mProcessingSettings.resetTimers) {
306 }
307 }
308 for (int32_t i = 0; i < GPUDataTypes::N_GENERAL_STEPS; i++) {
309 if (mTimersGeneralSteps[i].GetElapsedTime() != 0.) {
310 printf("Execution Time: General Step : %50s Time: %'10.0f us\n", GPUDataTypes::GENERAL_STEP_NAMES[i], mTimersGeneralSteps[i].GetElapsedTime() * 1000000 / mStatNEvents);
311 }
312 }
313 if (GetProcessingSettings().debugLevel >= 1) {
314 mStatKernelTime = kernelTotal * 1000000 / mStatNEvents;
315 printf("Execution Time: Total : %50s Time: %'10.0f us%s\n", "Total Kernel", mStatKernelTime, nEventReport.c_str());
316 }
317 printf("Execution Time: Total : %50s Time: %'10.0f us ( CPU Time : %'10.0f us, %7.2fx ) %s\n", "Total Wall", mStatWallTime, mStatCPUTime * 1000000 / mStatNEvents, mStatCPUTime / mTimerTotal.GetElapsedTime(), nEventReport.c_str());
318 } else if (GetProcessingSettings().debugLevel >= 0) {
319 GPUInfo("Total Wall Time: %10.0f us%s", mStatWallTime, nEventReport.c_str());
320 }
321 if (mProcessingSettings.resetTimers) {
322 mStatNEvents = 0;
323 mStatCPUTime = 0;
325 }
326
327 return 0;
328}
329
331{
332 for (uint32_t i = 0; i < mProcessors.size(); i++) {
333 if (mProcessors[i].proc->mGPUProcessorType != GPUProcessor::PROCESSOR_TYPE_DEVICE && mProcessors[i].proc->mLinkedProcessor) {
334 mProcessors[i].proc->mLinkedProcessor->InitGPUProcessor(this, GPUProcessor::PROCESSOR_TYPE_DEVICE);
335 }
336 }
337}
338
339void GPUReconstructionCPU::UpdateParamOccupancyMap(const uint32_t* mapHost, const uint32_t* mapGPU, uint32_t occupancyTotal, int32_t stream)
340{
341 param().occupancyMap = mapHost;
342 param().occupancyTotal = occupancyTotal;
343 if (IsGPU()) {
344 if (!((size_t)&param().occupancyTotal - (size_t)&param().occupancyMap == sizeof(param().occupancyMap) && sizeof(param().occupancyMap) == sizeof(size_t) && sizeof(param().occupancyTotal) < sizeof(size_t))) {
345 throw std::runtime_error("occupancy data not consecutive in GPUParam");
346 }
347 const auto threadContext = GetThreadContext();
348 size_t tmp[2] = {(size_t)mapGPU, 0};
349 memcpy(&tmp[1], &occupancyTotal, sizeof(occupancyTotal));
350 WriteToConstantMemory((char*)&processors()->param.occupancyMap - (char*)processors(), &tmp, sizeof(param().occupancyMap) + sizeof(param().occupancyTotal), stream);
351 }
352}
int16_t time
Definition RawEventData.h:4
int32_t i
#define GPUCA_OPERATOR_NEW_ALIGNMENT
int32_t retVal
Online TRD tracker based on extrapolated TPC tracks.
Used for storing the MC labels for the TRD tracklets.
TRD Tracklet word for GPU tracker - 32bit tracklet info + half chamber ID + index.
uint32_t j
Definition RawData.h:0
uint32_t res
Definition RawData.h:0
TBranch * ptr
double num
void Reset()
Definition timer.cxx:101
void Start()
Definition timer.cxx:57
double GetElapsedTime()
Definition timer.cxx:108
void Stop()
Definition timer.cxx:69
static constexpr const char *const GENERAL_STEP_NAMES[]
static constexpr const char *const RECO_STEP_NAMES[]
static constexpr int32_t N_RECO_STEPS
static constexpr int32_t N_GENERAL_STEPS
ProcessorType mGPUProcessorType
int32_t runKernelBackendInternal(const gpu_reconstruction_kernels::krnlSetupTime &_xyz, const Args &... args)
int32_t runKernelBackend(const gpu_reconstruction_kernels::krnlSetupArgs< T, I, Args... > &args)
gpu_reconstruction_kernels::krnlProperties getKernelPropertiesBackend()
virtual size_t GPUMemCpy(void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
virtual size_t GPUMemCpyAlways(bool onGpu, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
static constexpr krnlRunRange krnlRunRangeNone
size_t TransferMemoryResourceToHost(GPUMemoryResource *res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void UpdateParamOccupancyMap(const uint32_t *mapHost, const uint32_t *mapGPU, uint32_t occupancyTotal, int32_t stream=-1)
size_t TransferMemoryResourceToGPU(GPUMemoryResource *res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
virtual int32_t GPUDebug(const char *state="UNKNOWN", int32_t stream=-1, bool force=false)
static constexpr krnlEvent krnlEventNone
size_t WriteToConstantMemory(size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr) override
virtual size_t TransferMemoryInternal(GPUMemoryResource *res, int32_t stream, deviceEvent *ev, deviceEvent *evList, int32_t nEvents, bool toGPU, const void *src, void *dst)
virtual std::unique_ptr< gpu_reconstruction_kernels::threadContext > GetThreadContext() override
RecoStepTimerMeta mTimersRecoSteps[GPUDataTypes::N_RECO_STEPS]
std::vector< std::unique_ptr< timerMeta > > mTimers
HighResTimer mTimersGeneralSteps[GPUDataTypes::N_GENERAL_STEPS]
std::vector< std::unique_ptr< GPUChain > > mChains
std::unique_ptr< GPUMemorySizeScalers > mMemoryScalers
std::vector< GPUReconstruction * > mSlaves
std::vector< GPUMemoryResource > mMemoryResources
std::unique_ptr< GPUConstantMem > mHostConstantMem
std::vector< ProcessorData > mProcessors
GPUSettingsProcessing mProcessingSettings
static GPUReconstruction * GPUReconstruction_Create_CPU(const GPUSettingsDeviceBackend &cfg)
void ClearAllocatedMemory(bool clearOutputs=true)
int32_t getRecoStepNum(RecoStep step, bool validCheck=true)
const GPUSettingsProcessing & GetProcessingSettings() const
int32_t EnqueuePipeline(bool terminate=false)
std::shared_ptr< GPUReconstructionThreading > mThreading
GLdouble n
Definition glcorearb.h:1982
GLint GLenum GLint x
Definition glcorearb.h:403
GLenum src
Definition glcorearb.h:1767
GLint GLsizei count
Definition glcorearb.h:399
GLsizeiptr size
Definition glcorearb.h:659
GLint GLint GLsizei GLint GLenum GLenum type
Definition glcorearb.h:275
GLenum GLenum dst
Definition glcorearb.h:1767
GLintptr offset
Definition glcorearb.h:660
GLboolean r
Definition glcorearb.h:1233
GLenum GLfloat param
Definition glcorearb.h:271
GLuint GLuint stream
Definition glcorearb.h:1806
std::string to_string(gsl::span< T, Size > span)
Definition common.h:52
std::tuple< typename std::conditional<(sizeof(Args) > sizeof(void *)), const Args &, const Args >::type... > v
const uint32_t * occupancyMap
Definition GPUParam.h:64
const int nEvents
Definition test_Fifo.cxx:27