Project
Loading...
Searching...
No Matches
FastSimulations.cxx
Go to the documentation of this file.
1// Copyright 2019-2020 CERN and copyright holders of ALICE O2.
2// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders.
3// All rights not expressly granted are reserved.
4//
5// This software is distributed under the terms of the GNU General Public
6// License v3 (GPL Version 3), copied verbatim in the file "COPYING".
7//
8// In applying this license CERN does not waive the privileges and immunities
9// granted to it by virtue of its status as an Intergovernmental Organization
10// or submit itself to any jurisdiction.
11
16
17#include "FastSimulations.h"
18
19#include "Utils.h"
20
21#include <fstream>
22
23using namespace o2::zdc::fastsim;
24
25//-------------------------------------------------NeuralFAstSimulation------------------------------------------------------
26
27NeuralFastSimulation::NeuralFastSimulation(const std::string& modelPath,
28 OrtAllocatorType allocatorType,
29 OrtMemType memoryType,
30 int64_t batchSize) : mModelPath(modelPath), mSession(nullptr), mMemoryInfo(Ort::MemoryInfo::CreateCpu(allocatorType, memoryType)), mBatchSize(batchSize)
31{
32}
33
35{
36 // create the session object
37 Ort::SessionOptions options;
38 options.SetIntraOpNumThreads(1); // one thread since we might have multiple workers in parallel anyway
39 mSession = new Ort::Session(mEnv, mModelPath.c_str(), options);
41}
42
44{
45 return mBatchSize;
46}
47
49{
50 for (size_t i = 0; i < mSession->GetInputCount(); ++i) {
51 mInputNames.push_back(mSession->GetInputNameAllocated(i, mAllocator).get());
52 }
53 for (size_t i = 0; i < mSession->GetInputCount(); ++i) {
54 mInputShapes.emplace_back(mSession->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
55 }
56 for (size_t i = 0; i < mSession->GetOutputCount(); ++i) {
57 mOutputNames.push_back(mSession->GetOutputNameAllocated(i, mAllocator).get());
58 }
59
60 // Prevent negative values from being passed as tensor shape
61 // Which is no problem in python implementation of ONNX where -1 means that
62 // shape has to be figured out by library. In C++ this is illegal
63 for (auto& shape : mInputShapes) {
64 for (auto& elem : shape) {
65 if (elem < 0) {
66 elem = mBatchSize;
67 }
68 }
69 }
70}
71
72void NeuralFastSimulation::setTensors(std::vector<std::vector<float>>& input)
73{
74 for (size_t i = 0; i < mInputShapes.size(); ++i) {
75 mInputTensors.emplace_back(Ort::Value::CreateTensor<float>(
76 mMemoryInfo, input[i].data(), input[i].size(), mInputShapes[i].data(), mInputShapes[i].size()));
77 }
78}
79
80//---------------------------------------------------------Conditional-------------------------------------------------------
81ConditionalModelSimulation::ConditionalModelSimulation(const std::string& modelPath, const int64_t batchSize) : NeuralFastSimulation(modelPath, OrtDeviceAllocator, OrtMemTypeCPU, batchSize)
82{
84}
85
86bool ConditionalModelSimulation::setInput(std::vector<std::vector<float>>& input)
87{
88 // Checks if number of inputs matches
89 if (mSession->GetInputCount() != input.size()) {
90 return false;
91 }
92 setTensors(input);
93 return true;
94}
95
97{
98 // Run simulation (single event) with default run options
99 std::vector<const char*> tmpInputs;
100 std::vector<const char*> tmpOutputs;
101 for (unsigned int i = 0; i < mInputNames.size(); i++) {
102 tmpInputs.emplace_back(mInputNames[i].c_str());
103 }
104 for (unsigned int i = 0; i < mOutputNames.size(); i++) {
105 tmpOutputs.emplace_back(mOutputNames[i].c_str());
106 }
107 mModelOutput = mSession->Run(Ort::RunOptions{nullptr},
108 tmpInputs.data(),
109 mInputTensors.data(),
110 mInputTensors.size(),
111 tmpOutputs.data(),
112 mOutputNames.size());
113 mInputTensors.clear();
114}
115
116const std::vector<Ort::Value>& ConditionalModelSimulation::getResult()
117{
118 return mModelOutput;
119}
120
121//------------------------------------------------------BatchHandler---------------------------------------------------------
122
123BatchHandler::BatchHandler(size_t batchSize) : mBatchSize(batchSize) {}
124
126{
127 static o2::zdc::fastsim::BatchHandler instance(batchSize);
128 return instance;
129}
130
131std::optional<std::vector<std::vector<float>>> BatchHandler::getBatch(const std::vector<float>& input)
132{
133 std::scoped_lock guard(mMutex);
134 mBatch.emplace_back(input);
135 if (mBatch.size() == mBatchSize) {
136 auto value = std::optional(std::move(mBatch));
137 mBatch.clear();
138 return value;
139 } else {
140 return std::nullopt;
141 }
142}
143
144//---------------------------------------------------------Utils-------------------------------------------------------------
145std::optional<std::pair<std::vector<float>, std::vector<float>>> o2::zdc::fastsim::loadScales(const std::string& path)
146{
147 std::fstream file(path, std::fstream::in);
148 if (!file.is_open()) {
149 return std::nullopt;
150 }
151
152 auto means = parse_block(file, "#means");
153 if (means.empty()) {
154 return std::nullopt;
155 }
156
157 auto scales = parse_block(file, "#scales");
158 if (scales.empty()) {
159 return std::nullopt;
160 }
161
162 if (means.size() != scales.size()) {
163 return std::nullopt;
164 }
165 return std::make_pair(std::move(means), std::move(scales));
166}
int32_t i
Meyers Singleton thread safe singleton. Responsible for collecting particle data for batch processing...
std::optional< std::vector< std::vector< float > > > getBatch(const std::vector< float > &input)
static BatchHandler & getInstance(size_t batchSize)
BatchHandler(const BatchHandler &)=delete
bool setInput(std::vector< std::vector< float > > &input) override
Implements setInput.
ConditionalModelSimulation(const std::string &modelPath, int64_t batchSize)
const std::vector< Ort::Value > & getResult() override
Returns single model output as const&. Returned vector is of size 1.
Abstract class providing interface for various specialized implementations.
std::vector< Ort::Value > mInputTensors
Container for input tensors.
std::vector< std::string > mOutputNames
std::string mModelPath
model path (where to find the ONNX model)
std::vector< std::vector< int64_t > > mInputShapes
std::vector< std::string > mInputNames
Input/Output names and input shape.
void setInputOutputData()
Sets models metadata (input/output layers names, inputs shape) in onnx session.
NeuralFastSimulation(const std::string &modelPath, OrtAllocatorType allocatorType, OrtMemType memoryType, int64_t batchSize)
void setTensors(std::vector< std::vector< float > > &input)
Converts flattend input data to Ort::Value. Tensor shapes are taken from loaded model metadata.
Ort::AllocatorWithDefaultOptions mAllocator
void initRunSession()
(late) init session and provide mechanism to customize ONNX session with external options
GLsizeiptr size
Definition glcorearb.h:659
GLsizei const GLfloat * value
Definition glcorearb.h:819
GLboolean * data
Definition glcorearb.h:298
GLsizei const GLchar *const * path
Definition glcorearb.h:3591
std::vector< float > parse_block(std::istream &input, const std::string &option)
Parses .txt file containing scales for model. Function will search for given marker (option) and read...
Definition Utils.cxx:58
std::optional< std::pair< std::vector< float >, std::vector< float > > > loadScales(const std::string &path)
loads and parse model scales from file at path