![]() |
Project
|
A header library for loading ONNX models and inferencing them on CPU and GPU. More...
#include "ML/OrtInterface.h"#include "ML/3rdparty/GPUORTFloat16.h"#include <onnxruntime_cxx_api.h>#include <sstream>Go to the source code of this file.
Classes | |
| struct | o2::ml::OrtModel::OrtVariables |
Namespaces | |
| namespace | o2 |
| a couple of static helper functions to create timestamp values for CCDB queries or override obsolete objects | |
| namespace | o2::ml |
Functions | |
| template std::vector< float > | o2::ml::o2::ml::OrtModel::inference< OrtDataType::Float16_t, float > (std::vector< OrtDataType::Float16_t > &) |
| template std::vector< OrtDataType::Float16_t > | o2::ml::o2::ml::OrtModel::inference< OrtDataType::Float16_t, OrtDataType::Float16_t > (std::vector< OrtDataType::Float16_t > &) |
| template void | o2::ml::OrtModel::inference< OrtDataType::Float16_t, OrtDataType::Float16_t > (OrtDataType::Float16_t *, int64_t, OrtDataType::Float16_t *) |
| template void | o2::ml::OrtModel::inference< OrtDataType::Float16_t, float > (OrtDataType::Float16_t *, int64_t, float *) |
| template void | o2::ml::OrtModel::inference< float, OrtDataType::Float16_t > (float *, int64_t, OrtDataType::Float16_t *) |
| template void | o2::ml::OrtModel::inference< OrtDataType::Float16_t, OrtDataType::Float16_t > (OrtDataType::Float16_t **, int64_t, OrtDataType::Float16_t *) |
| template void | o2::ml::OrtModel::inference< OrtDataType::Float16_t, float > (OrtDataType::Float16_t **, int64_t, float *) |
| template void | o2::ml::OrtModel::inference< float, OrtDataType::Float16_t > (float **, int64_t, OrtDataType::Float16_t *) |
| template std::vector< OrtDataType::Float16_t > | o2::ml::OrtModel::inference< OrtDataType::Float16_t, OrtDataType::Float16_t > (std::vector< std::vector< OrtDataType::Float16_t > > &) |
A header library for loading ONNX models and inferencing them on CPU and GPU.
Definition in file OrtInterface.cxx.