10 m_Model = tflite::FlatBufferModel::BuildFromFile(m_Params.
m_ModelPath.c_str());
12 m_TfLiteInterpreter = std::make_unique<Interpreter>();
13 tflite::ops::builtin::BuiltinOpResolver resolver;
15 tflite::InterpreterBuilder builder(*m_Model, resolver);
16 builder(&m_TfLiteInterpreter);
17 m_TfLiteInterpreter->AllocateTensors();
19 int status = kTfLiteError;
27 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
31 status = m_TfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
32 if (status != kTfLiteOk)
34 LogAndThrow(
"Could not register ArmNN TfLite Delegate to TfLiteInterpreter");
39 std::cout <<
"Running on TfLite without ArmNN delegate\n";
44 for(
unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
50 int input = m_TfLiteInterpreter->inputs()[inputIndex];
52 TfLiteIntArray* inputDims = m_TfLiteInterpreter->tensor(input)->dims;
54 unsigned int inputSize = 1;
55 for (
unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
57 inputSize *= inputDims->data[dim];
60 const auto& inputName = m_TfLiteInterpreter->tensor(input)->name;
61 const auto& dataType = m_TfLiteInterpreter->tensor(input)->type;
67 auto inputData = m_TfLiteInterpreter->typed_tensor<
float>(input);
68 PopulateTensorWithData<float>(inputData, inputSize, dataFile, inputName);
73 auto inputData = m_TfLiteInterpreter->typed_tensor<int32_t>(input);
74 PopulateTensorWithData<int32_t>(inputData, inputSize, dataFile, inputName);
79 auto inputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(input);
80 PopulateTensorWithData<uint8_t>(inputData, inputSize, dataFile, inputName);
85 auto inputData = m_TfLiteInterpreter->typed_tensor<int16_t>(input);
86 PopulateTensorWithData<int16_t>(inputData, inputSize, dataFile, inputName);
91 auto inputData = m_TfLiteInterpreter->typed_tensor<int8_t>(input);
92 PopulateTensorWithData<int8_t>(inputData, inputSize, dataFile, inputName);
106 std::vector<const void*> results;
112 status = m_TfLiteInterpreter->Invoke();
120 for (
unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex)
122 auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex];
123 TfLiteIntArray* outputDims = m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
125 FILE* outputTensorFile = stdout;
129 if (outputTensorFile == NULL)
132 "\", cannot be created. Defaulting to stdout. Error was: " + std::strerror(errno));
136 ARMNN_LOG(info) <<
"Writing output " << outputIndex <<
"' of iteration: " << x+1 <<
" to file: '" 141 for (
unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
143 outputSize *= outputDims->data[dim];
146 std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name <<
": ";
147 results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation);
149 switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)
154 auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<
float>(tfLiteDelegateOutputId);
156 for (
int i = 0; i < outputSize; ++i)
158 fprintf(outputTensorFile,
"%f ", tfLiteDelegateOutputData[i]);
164 auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
165 for (
int i = 0; i < outputSize; ++i)
167 fprintf(outputTensorFile,
"%d ", tfLiteDelegateOutputData[i]);
173 auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
174 for (
int i = 0; i < outputSize; ++i)
176 fprintf(outputTensorFile,
"%u ", tfLiteDelegateOutputData[i]);
182 auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
183 for (
int i = 0; i < outputSize; ++i)
185 fprintf(outputTensorFile,
"%d ", tfLiteDelegateOutputData[i]);
195 std::cout << std::endl;
206 for (
unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex)
208 auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex];
210 switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)
214 result = ComputeRMSE<float>(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation,
215 otherOutput[outputIndex],
216 m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes);
222 result = ComputeRMSE<int32_t>(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation,
223 otherOutput[outputIndex],
224 m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes);
229 result = ComputeRMSE<uint8_t>(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation,
230 otherOutput[outputIndex],
231 m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes);
236 result = ComputeRMSE<int8_t>(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation,
237 otherOutput[outputIndex],
238 m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes);
246 std::cout <<
"RMSE of " 247 << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name
248 <<
": " << result << std::endl;
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
void LogAndThrow(std::string eMsg)
#define ARMNN_LOG(severity)
std::chrono::high_resolution_clock::time_point GetTimeNow()
TfLiteExecutor m_TfLiteExecutor
std::vector< std::string > m_OutputTensorFiles
void SetExternalProfilingParams(const arm::pipe::ProfilingOptions &externalProfilingParams)
TfLiteExecutor(const ExecuteNetworkParams &m_Params)
bool m_GenerateTensorData
Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter.
void CompareAndPrintResult(std::vector< const void *> otherOutput) override
Compare the output with the result of another IExecutor.
std::vector< std::string > m_InputNames
std::vector< const void * > Execute() override
Execute the given network.
std::vector< std::string > m_InputTensorDataFilePaths
TfLiteDelegate * TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
void TfLiteArmnnDelegateDelete(TfLiteDelegate *tfLiteDelegate)
bool CheckInferenceTimeThreshold(const std::chrono::duration< double, std::milli > &duration, const double &thresholdTime)
Given a measured duration and a threshold time tell the user whether we succeeded or not...
const arm::pipe::ProfilingOptions & GetExternalProfilingParams() const