Project
Loading...
Searching...
No Matches
OrtInterface.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
15
16#include "ML/OrtInterface.h"
18
19// ONNX includes
20#include <onnxruntime_cxx_api.h>
21
22#include <sstream>
23
24namespace o2
25{
26
27namespace ml
28{
29
30struct OrtModel::OrtVariables { // The actual implementation is hidden in the .cxx file
31 // ORT runtime objects
32 Ort::RunOptions runOptions;
33 std::shared_ptr<Ort::Env> env = nullptr;
34 std::shared_ptr<Ort::Session> session = nullptr;
35 Ort::SessionOptions sessionOptions;
36 Ort::AllocatorWithDefaultOptions allocator;
37 Ort::MemoryInfo memoryInfo = Ort::MemoryInfo("Cpu", OrtAllocatorType::OrtDeviceAllocator, 0, OrtMemType::OrtMemTypeDefault);
38 std::unique_ptr<Ort::IoBinding> ioBinding = nullptr;
39};
40
41// General purpose
42void OrtModel::initOptions(std::unordered_map<std::string, std::string> optionsMap)
43{
44 mPImplOrt = new OrtVariables();
45
46 // Load from options map
47 if (!optionsMap.contains("model-path")) {
48 LOG(fatal) << "(ORT) Model path cannot be empty!";
49 }
50
51 if (!optionsMap["model-path"].empty()) {
52 mModelPath = optionsMap["model-path"];
53 mDeviceType = (optionsMap.contains("device-type") ? optionsMap["device-type"] : "CPU");
54 mDeviceId = (optionsMap.contains("device-id") ? std::stoi(optionsMap["device-id"]) : -1);
55 mAllocateDeviceMemory = (optionsMap.contains("allocate-device-memory") ? std::stoi(optionsMap["allocate-device-memory"]) : 0);
56 mIntraOpNumThreads = (optionsMap.contains("intra-op-num-threads") ? std::stoi(optionsMap["intra-op-num-threads"]) : 0);
57 mInterOpNumThreads = (optionsMap.contains("inter-op-num-threads") ? std::stoi(optionsMap["inter-op-num-threads"]) : 0);
58 mLoggingLevel = (optionsMap.contains("logging-level") ? std::stoi(optionsMap["logging-level"]) : 0);
59 mEnableProfiling = (optionsMap.contains("enable-profiling") ? std::stoi(optionsMap["enable-profiling"]) : 0);
60 mEnableOptimizations = (optionsMap.contains("enable-optimizations") ? std::stoi(optionsMap["enable-optimizations"]) : 0);
61 mEnvName = (optionsMap.contains("onnx-environment-name") ? optionsMap["onnx-environment-name"] : "onnx_model_inference");
62
63 if (mDeviceType == "CPU") {
64 (mPImplOrt->sessionOptions).SetIntraOpNumThreads(mIntraOpNumThreads);
65 (mPImplOrt->sessionOptions).SetInterOpNumThreads(mInterOpNumThreads);
66 if (mIntraOpNumThreads > 1 || mInterOpNumThreads > 1) {
67 (mPImplOrt->sessionOptions).SetExecutionMode(ExecutionMode::ORT_PARALLEL);
68 } else if (mIntraOpNumThreads == 1) {
69 (mPImplOrt->sessionOptions).SetExecutionMode(ExecutionMode::ORT_SEQUENTIAL);
70 }
71 if (mLoggingLevel < 2) {
72 LOG(info) << "(ORT) CPU execution provider set with " << mIntraOpNumThreads << " (mIntraOpNumThreads) and " << mInterOpNumThreads << " (mInterOpNumThreads) threads";
73 }
74 }
75
76 // OrtROCMProviderOptions rocm_options{};
77 // (mPImplOrt->sessionOptions).AppendExecutionProvider_ROCM(rocm_options);
78
79 (mPImplOrt->sessionOptions).DisableMemPattern();
80 (mPImplOrt->sessionOptions).DisableCpuMemArena();
81
82 if (mEnableProfiling) {
83 if (optionsMap.contains("profiling-output-path")) {
84 (mPImplOrt->sessionOptions).EnableProfiling((optionsMap["profiling-output-path"] + "/ORT_LOG_").c_str());
85 } else {
86 LOG(warning) << "(ORT) If profiling is enabled, optionsMap[\"profiling-output-path\"] should be set. Disabling profiling for now.";
87 (mPImplOrt->sessionOptions).DisableProfiling();
88 }
89 } else {
90 (mPImplOrt->sessionOptions).DisableProfiling();
91 }
92
93 (mPImplOrt->sessionOptions).SetGraphOptimizationLevel(GraphOptimizationLevel(mEnableOptimizations));
94 (mPImplOrt->sessionOptions).SetLogSeverityLevel(OrtLoggingLevel(mLoggingLevel));
95
96 mInitialized = true;
97 } else {
98 LOG(fatal) << "(ORT) Model path cannot be empty!";
99 }
100}
101
103{
104 mPImplOrt->env = std::make_shared<Ort::Env>(
105 OrtLoggingLevel(mLoggingLevel),
106 (mEnvName.empty() ? "ORT" : mEnvName.c_str()),
107 // Integrate ORT logging into Fairlogger
108 [](void* param, OrtLoggingLevel severity, const char* category, const char* logid, const char* code_location, const char* message) {
109 if (severity == ORT_LOGGING_LEVEL_VERBOSE) {
110 LOG(debug) << "(ORT) [" << logid << "|" << category << "|" << code_location << "]: " << message;
111 } else if (severity == ORT_LOGGING_LEVEL_INFO) {
112 LOG(info) << "(ORT) [" << logid << "|" << category << "|" << code_location << "]: " << message;
113 } else if (severity == ORT_LOGGING_LEVEL_WARNING) {
114 LOG(warning) << "(ORT) [" << logid << "|" << category << "|" << code_location << "]: " << message;
115 } else if (severity == ORT_LOGGING_LEVEL_ERROR) {
116 LOG(error) << "(ORT) [" << logid << "|" << category << "|" << code_location << "]: " << message;
117 } else if (severity == ORT_LOGGING_LEVEL_FATAL) {
118 LOG(fatal) << "(ORT) [" << logid << "|" << category << "|" << code_location << "]: " << message;
119 } else {
120 LOG(info) << "(ORT) [" << logid << "|" << category << "|" << code_location << "]: " << message;
121 }
122 },
123 (void*)3);
124 (mPImplOrt->env)->DisableTelemetryEvents(); // Disable telemetry events
125}
126
128{
129 if (mAllocateDeviceMemory) {
130 memoryOnDevice(mDeviceId);
131 }
132 mPImplOrt->session = std::make_shared<Ort::Session>(*mPImplOrt->env, mModelPath.c_str(), mPImplOrt->sessionOptions);
133 mPImplOrt->ioBinding = std::make_unique<Ort::IoBinding>(*mPImplOrt->session);
134
135 setIO();
136
137 if (mLoggingLevel < 2) {
138 LOG(info) << "(ORT) Model loaded successfully! (inputs: " << printShape(mInputShapes, mInputNames) << ", outputs: " << printShape(mOutputShapes, mInputNames) << ")";
139 }
140}
141
142void OrtModel::memoryOnDevice(int32_t deviceIndex)
143{
144 if (deviceIndex >= 0) {
145 (mPImplOrt->runOptions).AddConfigEntry("disable_synchronize_execution_providers", "1");
146 (mPImplOrt->sessionOptions).AddConfigEntry("session.use_device_allocator_for_initializers", "1"); // See kOrtSessionOptionsUseDeviceAllocatorForInitializers, https://github.com/microsoft/onnxruntime/blob/main/include/onnxruntime/core/session/onnxruntime_session_options_config_keys.h
147 (mPImplOrt->sessionOptions).AddConfigEntry("session.use_env_allocators", "1"); // This should enable to use the volatile memory allocation defined in O2/GPU/GPUTracking/TPCClusterFinder/GPUTPCNNClusterizerHost.cxx; not working yet: ONNX still assigns new memory at init time
148 (mPImplOrt->sessionOptions).AddConfigEntry("session_options.enable_cpu_mem_arena", "0"); // This should enable to use the volatile memory allocation defined in O2/GPU/GPUTracking/TPCClusterFinder/GPUTPCNNClusterizerHost.cxx; not working yet: ONNX still assigns new memory at init time
149 // Arena memory shrinkage comes at performance cost
151 // (mPImplOrt->runOptions).AddConfigEntry("memory.enable_memory_arena_shrinkage", ("gpu:" + std::to_string(deviceIndex)).c_str()); // See kOrtRunOptionsConfigEnableMemoryArenaShrinkage, https://github.com/microsoft/onnxruntime/blob/90c263f471bbce724e77d8e62831d3a9fa838b2f/include/onnxruntime/core/session/onnxruntime_run_options_config_keys.h#L27
152
153 std::string dev_mem_str = "";
154 if (mDeviceType == "ROCM") {
155 dev_mem_str = "Hip";
156 }
157 if (mDeviceType == "CUDA") {
158 dev_mem_str = "Cuda";
159 }
160 mPImplOrt->memoryInfo = Ort::MemoryInfo(dev_mem_str.c_str(), OrtAllocatorType::OrtDeviceAllocator, deviceIndex, OrtMemType::OrtMemTypeDefault);
161 if (mLoggingLevel < 2) {
162 LOG(info) << "(ORT) Memory info set to on-device memory for device type " << mDeviceType << " with ID " << deviceIndex << " and mPImplOrt pointer " << mPImplOrt;
163 }
164 }
165}
166
168{
169 mPImplOrt->session = std::make_shared<Ort::Session>(*(mPImplOrt->env), mModelPath.c_str(), mPImplOrt->sessionOptions);
170}
171
172// Getters
173Ort::SessionOptions* OrtModel::getSessionOptions()
174{
175 return &mPImplOrt->sessionOptions;
176}
177
178Ort::MemoryInfo* OrtModel::getMemoryInfo()
179{
180 return &mPImplOrt->memoryInfo;
181}
182
184{
185 return (mPImplOrt->env).get();
186}
187
188template <class I, class O>
189std::vector<O> OrtModel::v2v(std::vector<I>& input, bool clearInput)
190{
191 if constexpr (std::is_same_v<I, O>) {
192 return input;
193 } else {
194 std::vector<O> output(input.size());
195 std::transform(std::begin(input), std::end(input), std::begin(output), [](I f) { return O(f); });
196 if (clearInput) {
197 input.clear();
198 }
199 return output;
200 }
201}
202
204{
205 for (size_t i = 0; i < (mPImplOrt->session)->GetInputCount(); ++i) {
206 mInputNames.push_back((mPImplOrt->session)->GetInputNameAllocated(i, mPImplOrt->allocator).get());
207 }
208 for (size_t i = 0; i < (mPImplOrt->session)->GetInputCount(); ++i) {
209 mInputShapes.emplace_back((mPImplOrt->session)->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
210 }
211 for (size_t i = 0; i < (mPImplOrt->session)->GetOutputCount(); ++i) {
212 mOutputNames.push_back((mPImplOrt->session)->GetOutputNameAllocated(i, mPImplOrt->allocator).get());
213 }
214 for (size_t i = 0; i < (mPImplOrt->session)->GetOutputCount(); ++i) {
215 mOutputShapes.emplace_back((mPImplOrt->session)->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
216 }
217
218 mInputNamesChar.resize(mInputNames.size(), nullptr);
219 std::transform(std::begin(mInputNames), std::end(mInputNames), std::begin(mInputNamesChar),
220 [&](const std::string& str) { return str.c_str(); });
221 mOutputNamesChar.resize(mOutputNames.size(), nullptr);
222 std::transform(std::begin(mOutputNames), std::end(mOutputNames), std::begin(mOutputNamesChar),
223 [&](const std::string& str) { return str.c_str(); });
224
225 mInputShapesCopy = mInputShapes;
226 mOutputShapesCopy = mOutputShapes;
227 mInputSizePerNode.resize(mInputShapes.size(), 1);
228 mOutputSizePerNode.resize(mOutputShapes.size(), 1);
229 mInputsTotal = 1;
230 for (size_t i = 0; i < mInputShapes.size(); ++i) {
231 if (mInputShapes[i].size() > 0) {
232 for (size_t j = 1; j < mInputShapes[i].size(); ++j) {
233 if (mInputShapes[i][j] > 0) {
234 mInputsTotal *= mInputShapes[i][j];
235 mInputSizePerNode[i] *= mInputShapes[i][j];
236 }
237 }
238 }
239 }
240 mOutputsTotal = 1;
241 for (size_t i = 0; i < mOutputShapes.size(); ++i) {
242 if (mOutputShapes[i].size() > 0) {
243 for (size_t j = 1; j < mOutputShapes[i].size(); ++j) {
244 if (mOutputShapes[i][j] > 0) {
245 mOutputsTotal *= mOutputShapes[i][j];
246 mOutputSizePerNode[i] *= mOutputShapes[i][j];
247 }
248 }
249 }
250 }
251}
252
253void OrtModel::setEnv(Ort::Env* env)
254{
255 mPImplOrt->env = std::shared_ptr<Ort::Env>(env);
256}
257
258// Inference
259template <class I, class O>
260std::vector<O> OrtModel::inference(std::vector<I>& input)
261{
262 std::vector<int64_t> inputShape = mInputShapes[0];
263 inputShape[0] = input.size();
264 for (size_t i = 1; i < mInputShapes[0].size(); ++i) {
265 inputShape[0] /= mInputShapes[0][i];
266 }
267 std::vector<Ort::Value> inputTensor;
268 if constexpr (std::is_same_v<I, OrtDataType::Float16_t>) {
269 inputTensor.emplace_back(Ort::Value::CreateTensor<Ort::Float16_t>(mPImplOrt->memoryInfo, reinterpret_cast<Ort::Float16_t*>(input.data()), input.size(), inputShape.data(), inputShape.size()));
270 } else {
271 inputTensor.emplace_back(Ort::Value::CreateTensor<I>(mPImplOrt->memoryInfo, input.data(), input.size(), inputShape.data(), inputShape.size()));
272 }
273 // input.clear();
274 auto outputTensors = (mPImplOrt->session)->Run(mPImplOrt->runOptions, mInputNamesChar.data(), inputTensor.data(), inputTensor.size(), mOutputNamesChar.data(), mOutputNamesChar.size());
275 O* outputValues = outputTensors[0].template GetTensorMutableData<O>();
276 std::vector<O> outputValuesVec{outputValues, outputValues + inputShape[0] * mOutputShapes[0][1]};
277 outputTensors.clear();
278 return outputValuesVec;
279}
280
281template std::vector<float> OrtModel::inference<float, float>(std::vector<float>&);
282template std::vector<float> OrtModel::inference<OrtDataType::Float16_t, float>(std::vector<OrtDataType::Float16_t>&);
283template std::vector<OrtDataType::Float16_t> OrtModel::inference<OrtDataType::Float16_t, OrtDataType::Float16_t>(std::vector<OrtDataType::Float16_t>&);
284
285template <class I, class O>
286void OrtModel::inference(I* input, int64_t input_size, O* output)
287{
288 // std::vector<std::string> providers = Ort::GetAvailableProviders();
289 // for (const auto& provider : providers) {
290 // LOG(info) << "Available Execution Provider: " << provider;
291 // }
292 std::vector<int64_t> inputShape{input_size, (int64_t)mInputShapes[0][1]};
293 Ort::Value inputTensor = Ort::Value(nullptr);
294 if constexpr (std::is_same_v<I, OrtDataType::Float16_t>) {
295 inputTensor = Ort::Value::CreateTensor<Ort::Float16_t>(mPImplOrt->memoryInfo, reinterpret_cast<Ort::Float16_t*>(input), input_size * mInputShapes[0][1], inputShape.data(), inputShape.size());
296 } else {
297 inputTensor = Ort::Value::CreateTensor<I>(mPImplOrt->memoryInfo, input, input_size * mInputShapes[0][1], inputShape.data(), inputShape.size());
298 }
299 (mPImplOrt->ioBinding)->BindInput(mInputNames[0].c_str(), inputTensor);
300
301 std::vector<int64_t> outputShape{input_size, mOutputShapes[0][1]};
302 Ort::Value outputTensor = Ort::Value(nullptr);
303 if constexpr (std::is_same_v<O, OrtDataType::Float16_t>) {
304 outputTensor = Ort::Value::CreateTensor<Ort::Float16_t>(mPImplOrt->memoryInfo, reinterpret_cast<Ort::Float16_t*>(output), input_size * mOutputShapes[0][1], outputShape.data(), outputShape.size());
305 } else {
306 outputTensor = Ort::Value::CreateTensor<O>(mPImplOrt->memoryInfo, output, input_size * mOutputShapes[0][1], outputShape.data(), outputShape.size());
307 }
308 (mPImplOrt->ioBinding)->BindOutput(mOutputNames[0].c_str(), outputTensor);
309
310 (mPImplOrt->session)->Run(mPImplOrt->runOptions, *mPImplOrt->ioBinding);
311}
312
313template void OrtModel::inference<OrtDataType::Float16_t, OrtDataType::Float16_t>(OrtDataType::Float16_t*, int64_t, OrtDataType::Float16_t*);
314template void OrtModel::inference<OrtDataType::Float16_t, float>(OrtDataType::Float16_t*, int64_t, float*);
315template void OrtModel::inference<float, OrtDataType::Float16_t>(float*, int64_t, OrtDataType::Float16_t*);
316template void OrtModel::inference<float, float>(float*, int64_t, float*);
317
318template <class I, class O>
319void OrtModel::inference(I** input, int64_t input_size, O* output)
320{
321 std::vector<Ort::Value> inputTensors(mInputShapesCopy.size());
322
323 for (size_t i = 0; i < mInputShapesCopy.size(); ++i) {
324
325 mInputShapesCopy[i][0] = input_size; // batch-size
326 mOutputShapesCopy[i][0] = input_size; // batch-size
327
328 if constexpr (std::is_same_v<I, OrtDataType::Float16_t>) {
329 inputTensors[i] = Ort::Value::CreateTensor<Ort::Float16_t>(
330 mPImplOrt->memoryInfo,
331 reinterpret_cast<Ort::Float16_t*>(input[i]),
332 mInputSizePerNode[i] * input_size,
333 mInputShapesCopy[i].data(),
334 mInputShapesCopy[i].size());
335 } else {
336 inputTensors[i] = Ort::Value::CreateTensor<I>(
337 mPImplOrt->memoryInfo,
338 input[i],
339 mInputSizePerNode[i] * input_size,
340 mInputShapesCopy[i].data(),
341 mInputShapesCopy[i].size());
342 }
343 }
344
345 Ort::Value outputTensor = Ort::Value(nullptr);
346 if constexpr (std::is_same_v<O, OrtDataType::Float16_t>) {
347 outputTensor = Ort::Value::CreateTensor<Ort::Float16_t>(
348 mPImplOrt->memoryInfo,
349 reinterpret_cast<Ort::Float16_t*>(output),
350 mOutputSizePerNode[0] * input_size, // assumes that there is only one output node
351 mOutputShapesCopy[0].data(),
352 mOutputShapesCopy[0].size());
353 } else {
354 outputTensor = Ort::Value::CreateTensor<O>(
355 mPImplOrt->memoryInfo,
356 output,
357 mOutputSizePerNode[0] * input_size, // assumes that there is only one output node
358 mOutputShapesCopy[0].data(),
359 mOutputShapesCopy[0].size());
360 }
361
362 // === Run inference ===
363 mPImplOrt->session->Run(
364 mPImplOrt->runOptions,
365 mInputNamesChar.data(),
366 inputTensors.data(),
367 mInputNamesChar.size(),
368 mOutputNamesChar.data(),
369 &outputTensor,
370 mOutputNamesChar.size());
371}
372
373template void OrtModel::inference<OrtDataType::Float16_t, OrtDataType::Float16_t>(OrtDataType::Float16_t**, int64_t, OrtDataType::Float16_t*);
374template void OrtModel::inference<OrtDataType::Float16_t, float>(OrtDataType::Float16_t**, int64_t, float*);
375template void OrtModel::inference<float, OrtDataType::Float16_t>(float**, int64_t, OrtDataType::Float16_t*);
376template void OrtModel::inference<float, float>(float**, int64_t, float*);
377
378template <class I, class O>
379std::vector<O> OrtModel::inference(std::vector<std::vector<I>>& inputs)
380{
381 std::vector<Ort::Value> input_tensors;
382
383 for (size_t i = 0; i < inputs.size(); ++i) {
384
385 mInputShapesCopy[i][0] = inputs[i].size() / mInputSizePerNode[i]; // batch-size
386
387 if constexpr (std::is_same_v<I, OrtDataType::Float16_t>) {
388 input_tensors.emplace_back(
389 Ort::Value::CreateTensor<Ort::Float16_t>(
390 mPImplOrt->memoryInfo,
391 reinterpret_cast<Ort::Float16_t*>(inputs[i].data()),
392 mInputSizePerNode[i] * mInputShapesCopy[i][0],
393 mInputShapesCopy[i].data(),
394 mInputShapesCopy[i].size()));
395 } else {
396 input_tensors.emplace_back(
397 Ort::Value::CreateTensor<I>(
398 mPImplOrt->memoryInfo,
399 inputs[i].data(),
400 mInputSizePerNode[i] * mInputShapesCopy[i][0],
401 mInputShapesCopy[i].data(),
402 mInputShapesCopy[i].size()));
403 }
404 }
405
406 int32_t totalOutputSize = mOutputsTotal * mInputShapesCopy[0][0];
407
408 // === Run inference ===
409 auto output_tensors = mPImplOrt->session->Run(
410 mPImplOrt->runOptions,
411 mInputNamesChar.data(),
412 input_tensors.data(),
413 input_tensors.size(),
414 mOutputNamesChar.data(),
415 mOutputNamesChar.size());
416
417 // === Extract output values ===
418 O* output_data = output_tensors[0].template GetTensorMutableData<O>();
419 std::vector<O> output_vec(output_data, output_data + totalOutputSize);
420 output_tensors.clear();
421 return output_vec;
422}
423
424template std::vector<float> OrtModel::inference<float, float>(std::vector<std::vector<float>>&);
425template std::vector<OrtDataType::Float16_t> OrtModel::inference<OrtDataType::Float16_t, OrtDataType::Float16_t>(std::vector<std::vector<OrtDataType::Float16_t>>&);
426
427// Release session
428void OrtModel::release(bool profilingEnabled)
429{
430 // if (profilingEnabled) {
431 // mPImplOrt->session->EndProfiling();
432 // }
433 LOG(info) << "(ORT) Size of mPImplOrt: " << sizeof(*mPImplOrt) << " bytes";
434}
435
436// private
437std::string OrtModel::printShape(const std::vector<int64_t>& v)
438{
439 std::stringstream ss("");
440 for (size_t i = 0; i < v.size() - 1; i++) {
441 ss << v[i] << "x";
442 }
443 ss << v[v.size() - 1];
444 return ss.str();
445}
446
447std::string OrtModel::printShape(const std::vector<std::vector<int64_t>>& v, std::vector<std::string>& n)
448{
449 std::stringstream ss("");
450 for (size_t i = 0; i < v.size(); i++) {
451 ss << n[i] << " -> (";
452 for (size_t j = 0; j < v[i].size() - 1; j++) {
453 ss << v[i][j] << "x";
454 }
455 ss << v[i][v[i].size() - 1] << "); ";
456 }
457 return ss.str();
458}
459
460} // namespace ml
461
462} // namespace o2
int32_t i
void output(const std::map< std::string, ChannelStat > &channels)
Definition rawdump.cxx:197
A header library for loading ONNX models and inferencing them on CPU and GPU.
uint32_t j
Definition RawData.h:0
std::ostringstream debug
void initOptions(std::unordered_map< std::string, std::string > optionsMap)
void memoryOnDevice(int32_t=0)
Ort::Env * getEnv()
void release(bool=false)
void setEnv(Ort::Env *)
std::vector< O > v2v(std::vector< I > &, bool=true)
Ort::MemoryInfo * getMemoryInfo()
std::vector< O > inference(std::vector< I > &)
Ort::SessionOptions * getSessionOptions()
GLdouble n
Definition glcorearb.h:1982
GLsizeiptr size
Definition glcorearb.h:659
const GLdouble * v
Definition glcorearb.h:832
GLdouble f
Definition glcorearb.h:310
GLuint GLsizei const GLchar * message
Definition glcorearb.h:2517
GLenum GLfloat param
Definition glcorearb.h:271
GLenum GLenum severity
Definition glcorearb.h:2513
a couple of static helper functions to create timestamp values for CCDB queries or override obsolete ...
void empty(int)
Ort::AllocatorWithDefaultOptions allocator
std::shared_ptr< Ort::Env > env
std::shared_ptr< Ort::Session > session
ONNX session.
Ort::SessionOptions sessionOptions
std::unique_ptr< Ort::IoBinding > ioBinding
LOG(info)<< "Compressed in "<< sw.CpuTime()<< " s"
const std::string str