19#include "GPUDefParametersRuntime.h"
35#include "GPUReconstructionProcessingKernels.inc"
56template <
class T, int32_t I,
typename... Args>
62 throw std::runtime_error(
"Cannot run device kernel on host");
64 if (
x.nThreads != 1) {
65 throw std::runtime_error(
"Cannot run device kernel on host with nThreads != 1");
70 printf(
"Running %d Threads\n",
mThreading->activeThreads->max_concurrency());
72 tbb::this_task_arena::isolate([&] {
74 tbb::parallel_for(tbb::blocked_range<uint32_t>(0,
x.nBlocks, 1), [&](
const tbb::blocked_range<uint32_t>&
r) {
75 typename T::GPUSharedMemory smem;
76 for (uint32_t iB = r.begin(); iB < r.end(); iB++) {
77 T::template Thread<I>(x.nBlocks, 1, iB, 0, smem, T::Processor(*mHostConstantMem)[y.index], args...);
83 for (uint32_t iB = 0; iB <
x.nBlocks; iB++) {
84 typename T::GPUSharedMemory smem;
85 T::template Thread<I>(
x.nBlocks, 1, iB, 0, smem, T::Processor(*mHostConstantMem)[
y.index], args...);
91inline void GPUReconstructionCPU::runKernelBackend<GPUMemClean16, 0>(
const krnlSetupTime& _xyz,
void*
const&
ptr, uint64_t
const&
size)
95 tbb::parallel_for(0, nThreads, [&](
int iThread) {
96 size_t threadSize =
size / nThreads;
97 if (threadSize % 4096) {
98 threadSize += 4096 - threadSize % 4096;
100 size_t offset = threadSize * iThread;
101 size_t mySize = std::min<size_t>(threadSize,
size -
offset);
105 }, tbb::static_partitioner());
111template <
class S,
int32_t I>
117 const auto num = GetKernelNum<S, I>();
129#define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward, x_types, ...) \
130 template GPUReconstructionProcessing::krnlProperties GPUReconstructionCPU::getKernelProperties<GPUCA_M_KRNL_TEMPLATE(x_class)>(int gpu);
131#include "GPUReconstructionKernelList.h"
143size_t GPUReconstructionCPU::TransferMemoryResourcesHelper(
GPUProcessor* proc, int32_t
stream,
bool all,
bool toGPU)
150 if (
res.mPtr ==
nullptr) {
153 if (proc &&
res.mProcessor != proc) {
174#if defined(__APPLE__)
177 return ((int32_t)(
size_t)GetCurrentThread());
179 return ((int32_t)syscall(SYS_gettid));
223 printf(
"Allocated memory when starting processing %34s",
"");
227 const std::clock_t cpuTimerStart = std::clock();
237 for (uint32_t
i = 0;
i <
mChains.size();
i++) {
248 mStatCPUTime += (double)(std::clock() - cpuTimerStart) / CLOCKS_PER_SEC;
250 printf(
"Allocated memory when ending processing %36s",
"");
255 std::string nEventReport;
259 double kernelTotal = 0;
263 for (uint32_t
i = 0;
i <
mTimers.size();
i++) {
280 kernelStepTimes[stepNum] +=
time;
282 char bandwidth[256] =
"";
286 printf(
"Execution Time: Task (%c %8ux): %50s Time: %'10.0f us%s\n",
type == 0 ?
'K' :
'C',
mTimers[
i]->count,
mTimers[
i]->name.c_str(),
time * 1000000 /
mStatNEvents, bandwidth);
295 if (kernelStepTimes[
i] != 0. ||
mTimersRecoSteps[
i].timerTotal.GetElapsedTime() != 0.) {
296 printf(
"Execution Time: Step : %11s %38s Time: %'10.0f us %64s ( Total Time : %'14.0f us, CPU Time : %'14.0f us, %'7.2fx )\n",
"Tasks",
324 printf(
"Execution Time: Total : %50s Time: %'10.0f us%s\n",
"Total Kernel",
mStatKernelTime, nEventReport.c_str());
328 GPUInfo(
"Total Wall Time: %10.0f us%s",
mStatWallTime, nEventReport.c_str());
353 if (!((
size_t)&
param().occupancyTotal - (
size_t)&
param().occupancyMap ==
sizeof(
param().occupancyMap) &&
sizeof(
param().occupancyMap) ==
sizeof(
size_t) &&
sizeof(
param().occupancyTotal) <
sizeof(
size_t))) {
354 throw std::runtime_error(
"occupancy data not consecutive in GPUParam");
357 size_t tmp[2] = {(size_t)mapGPU, 0};
358 memcpy(&tmp[1], &occupancyTotal,
sizeof(occupancyTotal));
#define GPUCA_BUFFER_ALIGNMENT
Online TRD tracker based on extrapolated TPC tracks.
Used for storing the MC labels for the TRD tracklets.
TRD Tracklet word for GPU tracker - 32bit tracklet info + half chamber ID + index.
static constexpr const char *const GENERAL_STEP_NAMES[]
static constexpr const char *const RECO_STEP_NAMES[]
static constexpr int32_t N_RECO_STEPS
static constexpr int32_t N_GENERAL_STEPS
ProcessorType mGPUProcessorType
~GPUReconstructionCPU() override
virtual size_t GPUMemCpy(void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void runKernelBackend(const krnlSetupTime &_xyz, const Args &... args)
virtual size_t GPUMemCpyAlways(bool onGpu, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
static constexpr krnlRunRange krnlRunRangeNone
size_t TransferMemoryResourceToHost(GPUMemoryResource *res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
int32_t InitDevice() override
void UpdateParamOccupancyMap(const uint32_t *mapHost, const uint32_t *mapGPU, uint32_t occupancyTotal, int32_t stream=-1)
size_t TransferMemoryResourceToGPU(GPUMemoryResource *res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
int32_t RunChains() override
GPUProcessorProcessors mProcShadow
krnlProperties getKernelProperties(int gpu=-1)
void ResetDeviceProcessorTypes()
int32_t ExitDevice() override
virtual int32_t GPUDebug(const char *state="UNKNOWN", int32_t stream=-1, bool force=false)
static constexpr krnlEvent krnlEventNone
size_t WriteToConstantMemory(size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr) override
virtual size_t TransferMemoryInternal(GPUMemoryResource *res, int32_t stream, deviceEvent *ev, deviceEvent *evList, int32_t nEvents, bool toGPU, const void *src, void *dst)
RecoStepTimerMeta mTimersRecoSteps[GPUDataTypes::N_RECO_STEPS]
int32_t mActiveHostKernelThreads
std::vector< std::unique_ptr< timerMeta > > mTimers
GPUDefParameters * mParCPU
HighResTimer mTimersGeneralSteps[GPUDataTypes::N_GENERAL_STEPS]
int32_t getNKernelHostThreads(bool splitCores)
virtual std::unique_ptr< threadContext > GetThreadContext() override
GPUDefParameters * mParDevice
std::vector< std::unique_ptr< GPUChain > > mChains
GPUReconstruction * mMaster
std::unique_ptr< GPUMemorySizeScalers > mMemoryScalers
GPUConstantMem * processors()
uint32_t mNEventsProcessed
std::vector< GPUReconstruction * > mSlaves
std::vector< GPUMemoryResource > mMemoryResources
std::vector< ProcessorData > mProcessors
void WriteConstantParams()
static GPUReconstruction * GPUReconstruction_Create_CPU(const GPUSettingsDeviceBackend &cfg)
void ClearAllocatedMemory(bool clearOutputs=true)
void PrintMemoryOverview()
int32_t getRecoStepNum(RecoStep step, bool validCheck=true)
const GPUSettingsProcessing & GetProcessingSettings() const
int32_t EnqueuePipeline(bool terminate=false)
std::shared_ptr< GPUReconstructionThreading > mThreading
void * mHostMemoryPoolEnd
void * mHostMemoryPermanent
GLint GLint GLsizei GLint GLenum GLenum type
std::string to_string(gsl::span< T, Size > span)
GPUConstantMem * mProcessorsProc
const uint32_t * occupancyMap