ArmNN
 22.08
TfLiteExecutor Class Reference

#include <TfliteExecutor.hpp>

Inheritance diagram for TfLiteExecutor:
IExecutor

Public Member Functions

 TfLiteExecutor (const ExecuteNetworkParams &m_Params)
 
std::vector< const void * > Execute () override
 Execute the given network. More...
 
void PrintNetworkInfo () override
 Print available information about the network. More...
 
void CompareAndPrintResult (std::vector< const void *> otherOutput) override
 Compare the output with the result of another IExecutor. More...
 
- Public Member Functions inherited from IExecutor
virtual ~IExecutor ()
 

Detailed Description

Definition at line 21 of file TfliteExecutor.hpp.

Constructor & Destructor Documentation

◆ TfLiteExecutor()

TfLiteExecutor ( const ExecuteNetworkParams m_Params)

Definition at line 8 of file TfliteExecutor.cpp.

References ExecuteNetworkParams::ArmNNTfLiteDelegate, DelegateOptions::GetExternalProfilingParams(), LogAndThrow(), ExecuteNetworkParams::m_GenerateTensorData, ExecuteNetworkParams::m_InputNames, ExecuteNetworkParams::m_InputTensorDataFilePaths, ExecuteNetworkParams::m_ModelPath, ExecuteNetworkParams::m_TfLiteExecutor, DelegateOptions::SetExternalProfilingParams(), armnnDelegate::TfLiteArmnnDelegateCreate(), and armnnDelegate::TfLiteArmnnDelegateDelete().

8  : m_Params(params)
9 {
10  m_Model = tflite::FlatBufferModel::BuildFromFile(m_Params.m_ModelPath.c_str());
11 
12  m_TfLiteInterpreter = std::make_unique<Interpreter>();
13  tflite::ops::builtin::BuiltinOpResolver resolver;
14 
15  tflite::InterpreterBuilder builder(*m_Model, resolver);
16  builder(&m_TfLiteInterpreter);
17  m_TfLiteInterpreter->AllocateTensors();
18 
19  int status = kTfLiteError;
21  {
22  // Create the Armnn Delegate
23  // Populate a DelegateOptions from the ExecuteNetworkParams.
24  armnnDelegate::DelegateOptions delegateOptions = m_Params.ToDelegateOptions();
25  delegateOptions.SetExternalProfilingParams(delegateOptions.GetExternalProfilingParams());
26 
27  std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
28  theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
30  // Register armnn_delegate to TfLiteInterpreter
31  status = m_TfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
32  if (status != kTfLiteOk)
33  {
34  LogAndThrow("Could not register ArmNN TfLite Delegate to TfLiteInterpreter");
35  }
36  }
37  else
38  {
39  std::cout << "Running on TfLite without ArmNN delegate\n";
40  }
41 
42  const size_t numInputs = m_Params.m_InputNames.size();
43 
44  for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
45  {
48  : armnn::MakeOptional<std::string>(m_Params.m_InputTensorDataFilePaths[inputIndex]);
49 
50  int input = m_TfLiteInterpreter->inputs()[inputIndex];
51 
52  TfLiteIntArray* inputDims = m_TfLiteInterpreter->tensor(input)->dims;
53 
54  unsigned int inputSize = 1;
55  for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
56  {
57  inputSize *= inputDims->data[dim];
58  }
59 
60  const auto& inputName = m_TfLiteInterpreter->tensor(input)->name;
61  const auto& dataType = m_TfLiteInterpreter->tensor(input)->type;
62 
63  switch (dataType)
64  {
65  case kTfLiteFloat32:
66  {
67  auto inputData = m_TfLiteInterpreter->typed_tensor<float>(input);
68  PopulateTensorWithData<float>(inputData, inputSize, dataFile, inputName);
69  break;
70  }
71  case kTfLiteInt32:
72  {
73  auto inputData = m_TfLiteInterpreter->typed_tensor<int32_t>(input);
74  PopulateTensorWithData<int32_t>(inputData, inputSize, dataFile, inputName);
75  break;
76  }
77  case kTfLiteUInt8:
78  {
79  auto inputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(input);
80  PopulateTensorWithData<uint8_t>(inputData, inputSize, dataFile, inputName);
81  break;
82  }
83  case kTfLiteInt16:
84  {
85  auto inputData = m_TfLiteInterpreter->typed_tensor<int16_t>(input);
86  PopulateTensorWithData<int16_t>(inputData, inputSize, dataFile, inputName);
87  break;
88  }
89  case kTfLiteInt8:
90  {
91  auto inputData = m_TfLiteInterpreter->typed_tensor<int8_t>(input);
92  PopulateTensorWithData<int8_t>(inputData, inputSize, dataFile, inputName);
93  break;
94  }
95  default:
96  {
97  LogAndThrow("Unsupported input tensor data type");
98  }
99  }
100  }
101 }
void LogAndThrow(std::string eMsg)
Copyright (c) 2021 ARM Limited and Contributors.
void SetExternalProfilingParams(const arm::pipe::ProfilingOptions &externalProfilingParams)
std::vector< std::string > m_InputNames
TfLiteDelegate * TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
void TfLiteArmnnDelegateDelete(TfLiteDelegate *tfLiteDelegate)
Optional< T > MakeOptional(Args &&... args)
Utility template that constructs an object of type T in-place and wraps it inside an Optional<T> obje...
Definition: Optional.hpp:305
const arm::pipe::ProfilingOptions & GetExternalProfilingParams() const

Member Function Documentation

◆ CompareAndPrintResult()

void CompareAndPrintResult ( std::vector< const void *>  otherOutput)
overridevirtual

Compare the output with the result of another IExecutor.

Implements IExecutor.

Definition at line 204 of file TfliteExecutor.cpp.

205 {
206  for (unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex)
207  {
208  auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex];
209  float result = 0;
210  switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)
211  {
212  case kTfLiteFloat32:
213  {
214  result = ComputeRMSE<float>(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation,
215  otherOutput[outputIndex],
216  m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes);
217 
218  break;
219  }
220  case kTfLiteInt32:
221  {
222  result = ComputeRMSE<int32_t>(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation,
223  otherOutput[outputIndex],
224  m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes);
225  break;
226  }
227  case kTfLiteUInt8:
228  {
229  result = ComputeRMSE<uint8_t>(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation,
230  otherOutput[outputIndex],
231  m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes);
232  break;
233  }
234  case kTfLiteInt8:
235  {
236  result = ComputeRMSE<int8_t>(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation,
237  otherOutput[outputIndex],
238  m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->bytes);
239  break;
240  }
241  default:
242  {
243  }
244  }
245 
246  std::cout << "RMSE of "
247  << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name
248  << ": " << result << std::endl;
249  }
250 };

◆ Execute()

std::vector< const void * > Execute ( )
overridevirtual

Execute the given network.

Returns
std::vector<const void*> A type erased vector of the outputs, that can be compared with the output of another IExecutor

Implements IExecutor.

Definition at line 103 of file TfliteExecutor.cpp.

References ARMNN_LOG, CheckInferenceTimeThreshold(), armnn::GetTimeDuration(), armnn::GetTimeNow(), LogAndThrow(), ExecuteNetworkParams::m_DontPrintOutputs, ExecuteNetworkParams::m_Iterations, ExecuteNetworkParams::m_OutputTensorFiles, ExecuteNetworkParams::m_ReuseBuffers, and ExecuteNetworkParams::m_ThresholdTime.

104 {
105  int status = 0;
106  std::vector<const void*> results;
107  for (size_t x = 0; x < m_Params.m_Iterations; x++)
108  {
109  // Start timer to record inference time in milliseconds.
110  const auto start_time = armnn::GetTimeNow();
111  // Run the inference
112  status = m_TfLiteInterpreter->Invoke();
113  const auto duration = armnn::GetTimeDuration(start_time);
114 
115  if (m_Params.m_DontPrintOutputs || m_Params.m_ReuseBuffers)
116  {
117  break;
118  }
119  // Print out the output
120  for (unsigned int outputIndex = 0; outputIndex < m_TfLiteInterpreter->outputs().size(); ++outputIndex)
121  {
122  auto tfLiteDelegateOutputId = m_TfLiteInterpreter->outputs()[outputIndex];
123  TfLiteIntArray* outputDims = m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
124  // If we've been asked to write to a file then set a file output stream. Otherwise use stdout.
125  FILE* outputTensorFile = stdout;
126  if (!m_Params.m_OutputTensorFiles.empty())
127  {
128  outputTensorFile = fopen(m_Params.m_OutputTensorFiles[outputIndex].c_str(), "w");
129  if (outputTensorFile == NULL)
130  {
131  LogAndThrow("Specified output tensor file, \"" + m_Params.m_OutputTensorFiles[outputIndex] +
132  "\", cannot be created. Defaulting to stdout. Error was: " + std::strerror(errno));
133  }
134  else
135  {
136  ARMNN_LOG(info) << "Writing output " << outputIndex << "' of iteration: " << x+1 << " to file: '"
137  << m_Params.m_OutputTensorFiles[outputIndex] << "'";
138  }
139  }
140  long outputSize = 1;
141  for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
142  {
143  outputSize *= outputDims->data[dim];
144  }
145 
146  std::cout << m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->name << ": ";
147  results.push_back(m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->allocation);
148 
149  switch (m_TfLiteInterpreter->tensor(tfLiteDelegateOutputId)->type)
150  {
151 
152  case kTfLiteFloat32:
153  {
154  auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
155 
156  for (int i = 0; i < outputSize; ++i)
157  {
158  fprintf(outputTensorFile, "%f ", tfLiteDelegateOutputData[i]);
159  }
160  break;
161  }
162  case kTfLiteInt32:
163  {
164  auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
165  for (int i = 0; i < outputSize; ++i)
166  {
167  fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
168  }
169  break;
170  }
171  case kTfLiteUInt8:
172  {
173  auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
174  for (int i = 0; i < outputSize; ++i)
175  {
176  fprintf(outputTensorFile, "%u ", tfLiteDelegateOutputData[i]);
177  }
178  break;
179  }
180  case kTfLiteInt8:
181  {
182  auto tfLiteDelegateOutputData = m_TfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
183  for (int i = 0; i < outputSize; ++i)
184  {
185  fprintf(outputTensorFile, "%d ", tfLiteDelegateOutputData[i]);
186  }
187  break;
188  }
189  default:
190  {
191  LogAndThrow("Unsupported output type");
192  }
193  }
194 
195  std::cout << std::endl;
196  }
197  CheckInferenceTimeThreshold(duration, m_Params.m_ThresholdTime);
198  }
199 
200  std::cout << status;
201  return results;
202 }
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
void LogAndThrow(std::string eMsg)
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
std::vector< std::string > m_OutputTensorFiles
bool CheckInferenceTimeThreshold(const std::chrono::duration< double, std::milli > &duration, const double &thresholdTime)
Given a measured duration and a threshold time tell the user whether we succeeded or not...

◆ PrintNetworkInfo()

void PrintNetworkInfo ( )
inlineoverridevirtual

Print available information about the network.

Implements IExecutor.

Definition at line 27 of file TfliteExecutor.hpp.

27 {};

The documentation for this class was generated from the following files: