59 mNContexts = mConfig->configProcessing.doublePipeline ? 2 : 1;
61 if (mConfig->configWorkflow.inputs.isSet(GPUDataTypes::InOutType::TPCRaw)) {
62 mConfig->configGRP.needsClusterer = 1;
64 if (mConfig->configWorkflow.inputs.isSet(GPUDataTypes::InOutType::TPCCompressedClusters)) {
65 mConfig->configGRP.doCompClusterDecode = 1;
67 for (uint32_t
i = 0;
i < mNContexts;
i++) {
69 mConfig->configDeviceBackend.master = mCtx[0].mRec.get();
72 mConfig->configDeviceBackend.master =
nullptr;
73 if (mCtx[
i].mRec ==
nullptr) {
74 GPUError(
"Error obtaining instance of GPUReconstruction");
80 for (uint32_t
i = 0;
i < mNContexts;
i++) {
81 mCtx[
i].mChain = mCtx[
i].mRec->AddChain<
GPUChainTracking>(mConfig->configInterface.maxTPCHits, mConfig->configInterface.maxTRDTracklets);
83 mCtx[
i].mChain->SetQAFromForeignChain(mCtx[0].mChain);
85 mCtx[
i].mChain->mConfigDisplay = &mConfig->configDisplay;
86 mCtx[
i].mChain->mConfigQA = &mConfig->configQA;
87 mCtx[
i].mRec->SetSettings(&mConfig->configGRP, &mConfig->configReconstruction, &mConfig->configProcessing, &mConfig->configWorkflow);
88 mCtx[
i].mChain->SetCalibObjects(mConfig->configCalib);
90 if (
i == 0 && mConfig->configWorkflow.steps.isSet(GPUDataTypes::RecoStep::ITSTracking)) {
95 if (mConfig->configInterface.outputToExternalBuffers) {
96 for (uint32_t
j = 0;
j < mCtx[
i].mOutputRegions->count();
j++) {
97 mCtx[
i].mChain->SetSubOutputControl(
j, &mCtx[
i].mOutputRegions->asArray()[
j]);
100 dummy.
set([](
size_t size) ->
void* {
throw std::runtime_error(
"invalid output memory request, no common output buffer set");
return nullptr; });
101 mCtx[
i].mRec->SetOutputControl(dummy);
104 for (uint32_t
i = 0;
i < mNContexts;
i++) {
105 if (
i == 0 && mCtx[
i].mRec->Init()) {
111 mCtx[
i].mRec->MemoryScalers()->factor *= 2;
114 if (mConfig->configProcessing.doublePipeline) {
115 mInternals->pipelineThread.reset(
new std::thread([
this]() { mCtx[0].mRec->RunPipelineWorker(); }));
140 mCtx[0].mChain->ClearIOPointers();
141 mCtx[0].mChain->mIOPtrs = *
data;
143 snprintf(fname, 1024,
"event.%d.dump", nEvent);
144 mCtx[0].mChain->DumpData(fname);
147 if (mConfig->configProcessing.runMC) {
148 mCtx[0].mChain->ForceInitQA();
149 snprintf(fname, 1024,
"mc.%d.dump", nEvent);
150 mCtx[0].mChain->GetQA()->UpdateChain(mCtx[0].mChain);
151 mCtx[0].mChain->GetQA()->DumpO2MCData(fname);
165 if (mNContexts <= iThread) {
169 mCtx[iThread].mChain->mIOPtrs = *
data;
172 if (mConfig->configInterface.outputToExternalBuffers) {
173 for (uint32_t
i = 0;
i < mCtx[iThread].mOutputRegions->count();
i++) {
179 mCtx[iThread].mOutputRegions->asArray()[
i].reset();
185 auto inputWaitCallback = [
this, iThread, inputUpdateCallback, &
data, &outputs, &setOutputs]() {
188 if (inputUpdateCallback->
callback) {
189 inputUpdateCallback->
callback(updatedData, updatedOutputs);
190 mCtx[iThread].mChain->mIOPtrs = *updatedData;
191 outputs = updatedOutputs;
200 if (inputUpdateCallback) {
201 mCtx[iThread].mChain->SetFinalInputCallback(inputWaitCallback);
203 mCtx[iThread].mChain->SetFinalInputCallback(
nullptr);
205 if (!inputUpdateCallback || !inputUpdateCallback->
callback) {
209 int32_t
retVal = mCtx[iThread].mRec->RunChains();
213 if (mConfig->configQA.shipToQC && mCtx[iThread].mChain->QARanForTF()) {
214 outputs->
qa.
hist1 = &mCtx[iThread].mChain->GetQA()->getHistograms1D();
215 outputs->
qa.
hist2 = &mCtx[iThread].mChain->GetQA()->getHistograms2D();
216 outputs->
qa.
hist3 = &mCtx[iThread].mChain->GetQA()->getHistograms1Dd();
217 outputs->
qa.
hist4 = &mCtx[iThread].mChain->GetQA()->getGraphs();
220 *
data = mCtx[iThread].mChain->mIOPtrs;
std::unique_ptr< GPUReconstruction > mRec
std::unique_ptr< GPUTrackingOutputs > mOutputRegions
GPUChainTracking * mChain