19#include "GPUDefParametersRuntime.h"
34#include "GPUReconstructionProcessingKernels.inc"
55template <
class T, int32_t I,
typename... Args>
61 throw std::runtime_error(
"Cannot run device kernel on host");
63 if (
x.nThreads != 1) {
64 throw std::runtime_error(
"Cannot run device kernel on host with nThreads != 1");
69 printf(
"Running %d Threads\n",
mThreading->activeThreads->max_concurrency());
71 tbb::this_task_arena::isolate([&] {
73 tbb::parallel_for(tbb::blocked_range<uint32_t>(0,
x.nBlocks, 1), [&](
const tbb::blocked_range<uint32_t>&
r) {
74 typename T::GPUSharedMemory smem;
75 for (uint32_t iB = r.begin(); iB < r.end(); iB++) {
76 T::template Thread<I>(x.nBlocks, 1, iB, 0, smem, T::Processor(*mHostConstantMem)[y.index], args...);
82 for (uint32_t iB = 0; iB <
x.nBlocks; iB++) {
83 typename T::GPUSharedMemory smem;
84 T::template Thread<I>(
x.nBlocks, 1, iB, 0, smem, T::Processor(*mHostConstantMem)[
y.index], args...);
90inline void GPUReconstructionCPU::runKernelBackend<GPUMemClean16, 0>(
const krnlSetupTime& _xyz,
void*
const&
ptr, uint64_t
const&
size)
94 tbb::parallel_for(0, nThreads, [&](
int iThread) {
95 size_t threadSize =
size / nThreads;
96 if (threadSize % 4096) {
97 threadSize += 4096 - threadSize % 4096;
99 size_t offset = threadSize * iThread;
100 size_t mySize = std::min<size_t>(threadSize,
size -
offset);
104 }, tbb::static_partitioner());
110template <
class S,
int32_t I>
116 const auto num = GetKernelNum<S, I>();
128#define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward, x_types, ...) \
129 template GPUReconstructionProcessing::krnlProperties GPUReconstructionCPU::getKernelProperties<GPUCA_M_KRNL_TEMPLATE(x_class)>(int gpu);
130#include "GPUReconstructionKernelList.h"
142size_t GPUReconstructionCPU::TransferMemoryResourcesHelper(
GPUProcessor* proc, int32_t
stream,
bool all,
bool toGPU)
149 if (
res.mPtr ==
nullptr) {
152 if (proc &&
res.mProcessor != proc) {
173#if defined(__APPLE__)
176 return ((int32_t)(
size_t)GetCurrentThread());
178 return ((int32_t)syscall(SYS_gettid));
219 static std::mt19937 rng;
220 static std::uniform_int_distribution<uint64_t> dist(0, 1000000);
222 GPUInfo(
"Fuzzing memory scaling factor with %lu", fuzzFactor);
230 printf(
"Allocated memory when starting processing %34s",
"");
234 const std::clock_t cpuTimerStart = std::clock();
244 for (uint32_t
i = 0;
i <
mChains.size();
i++) {
255 mStatCPUTime += (double)(std::clock() - cpuTimerStart) / CLOCKS_PER_SEC;
257 printf(
"Allocated memory when ending processing %36s",
"");
262 std::string nEventReport;
266 double kernelTotal = 0;
270 for (uint32_t
i = 0;
i <
mTimers.size();
i++) {
287 kernelStepTimes[stepNum] +=
time;
289 char bandwidth[256] =
"";
293 printf(
"Execution Time: Task (%c %8ux): %50s Time: %'10.0f us%s\n",
type == 0 ?
'K' :
'C',
mTimers[
i]->count,
mTimers[
i]->name.c_str(),
time * 1000000 /
mStatNEvents, bandwidth);
302 if (kernelStepTimes[
i] != 0. ||
mTimersRecoSteps[
i].timerTotal.GetElapsedTime() != 0.) {
303 printf(
"Execution Time: Step : %11s %38s Time: %'10.0f us %64s ( Total Time : %'14.0f us, CPU Time : %'14.0f us, %'7.2fx )\n",
"Tasks",
331 printf(
"Execution Time: Total : %50s Time: %'10.0f us%s\n",
"Total Kernel",
mStatKernelTime, nEventReport.c_str());
335 GPUInfo(
"Total Wall Time: %10.0f us%s",
mStatWallTime, nEventReport.c_str());
360 if (!((
size_t)&
param().occupancyTotal - (
size_t)&
param().occupancyMap ==
sizeof(
param().occupancyMap) &&
sizeof(
param().occupancyMap) ==
sizeof(
size_t) &&
sizeof(
param().occupancyTotal) <
sizeof(
size_t))) {
361 throw std::runtime_error(
"occupancy data not consecutive in GPUParam");
364 size_t tmp[2] = {(size_t)mapGPU, 0};
365 memcpy(&tmp[1], &occupancyTotal,
sizeof(occupancyTotal));
#define GPUCA_BUFFER_ALIGNMENT
Online TRD tracker based on extrapolated TPC tracks.
Used for storing the MC labels for the TRD tracklets.
TRD Tracklet word for GPU tracker - 32bit tracklet info + half chamber ID + index.
static constexpr const char *const GENERAL_STEP_NAMES[]
static constexpr const char *const RECO_STEP_NAMES[]
static constexpr int32_t N_RECO_STEPS
static constexpr int32_t N_GENERAL_STEPS
ProcessorType mGPUProcessorType
~GPUReconstructionCPU() override
virtual size_t GPUMemCpy(void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
void runKernelBackend(const krnlSetupTime &_xyz, const Args &... args)
virtual size_t GPUMemCpyAlways(bool onGpu, void *dst, const void *src, size_t size, int32_t stream, int32_t toGPU, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
static constexpr krnlRunRange krnlRunRangeNone
size_t TransferMemoryResourceToHost(GPUMemoryResource *res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
int32_t InitDevice() override
void UpdateParamOccupancyMap(const uint32_t *mapHost, const uint32_t *mapGPU, uint32_t occupancyTotal, int32_t stream=-1)
size_t TransferMemoryResourceToGPU(GPUMemoryResource *res, int32_t stream=-1, deviceEvent *ev=nullptr, deviceEvent *evList=nullptr, int32_t nEvents=1)
int32_t RunChains() override
GPUProcessorProcessors mProcShadow
krnlProperties getKernelProperties(int gpu=-1)
void ResetDeviceProcessorTypes()
int32_t ExitDevice() override
virtual int32_t GPUDebug(const char *state="UNKNOWN", int32_t stream=-1, bool force=false)
static constexpr krnlEvent krnlEventNone
size_t WriteToConstantMemory(size_t offset, const void *src, size_t size, int32_t stream=-1, deviceEvent *ev=nullptr) override
virtual size_t TransferMemoryInternal(GPUMemoryResource *res, int32_t stream, deviceEvent *ev, deviceEvent *evList, int32_t nEvents, bool toGPU, const void *src, void *dst)
RecoStepTimerMeta mTimersRecoSteps[GPUDataTypes::N_RECO_STEPS]
int32_t mActiveHostKernelThreads
std::vector< std::unique_ptr< timerMeta > > mTimers
GPUDefParameters * mParCPU
HighResTimer mTimersGeneralSteps[GPUDataTypes::N_GENERAL_STEPS]
int32_t getNKernelHostThreads(bool splitCores)
virtual std::unique_ptr< threadContext > GetThreadContext() override
GPUDefParameters * mParDevice
std::vector< std::unique_ptr< GPUChain > > mChains
GPUReconstruction * mMaster
std::unique_ptr< GPUMemorySizeScalers > mMemoryScalers
GPUConstantMem * processors()
uint32_t mNEventsProcessed
std::vector< GPUReconstruction * > mSlaves
std::vector< GPUMemoryResource > mMemoryResources
std::vector< ProcessorData > mProcessors
void WriteConstantParams()
static GPUReconstruction * GPUReconstruction_Create_CPU(const GPUSettingsDeviceBackend &cfg)
void ClearAllocatedMemory(bool clearOutputs=true)
void PrintMemoryOverview()
int32_t getRecoStepNum(RecoStep step, bool validCheck=true)
const GPUSettingsProcessing & GetProcessingSettings() const
int32_t EnqueuePipeline(bool terminate=false)
std::shared_ptr< GPUReconstructionThreading > mThreading
void * mHostMemoryPoolEnd
void * mHostMemoryPermanent
GLint GLint GLsizei GLint GLenum GLenum type
std::string to_string(gsl::span< T, Size > span)
GPUConstantMem * mProcessorsProc
const uint32_t * occupancyMap