ArmNN
 21.05
ExecuteNetwork.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 
9 #include <armnn/Logging.hpp>
10 #include <Filesystem.hpp>
11 #include <InferenceTest.hpp>
12 
13 #if defined(ARMNN_SERIALIZER)
15 #endif
16 #if defined(ARMNN_TF_LITE_PARSER)
18 #endif
19 #if defined(ARMNN_ONNX_PARSER)
21 #endif
22 #if defined(ARMNN_TFLITE_DELEGATE)
23 #include <armnn_delegate.hpp>
24 #include <DelegateOptions.hpp>
25 
26 #include <tensorflow/lite/builtin_ops.h>
27 #include <tensorflow/lite/c/builtin_op_data.h>
28 #include <tensorflow/lite/c/common.h>
29 #include <tensorflow/lite/optional_debug_tools.h>
30 #include <tensorflow/lite/kernels/builtin_op_kernels.h>
31 #include <tensorflow/lite/interpreter.h>
32 #include <tensorflow/lite/kernels/register.h>
33 #endif
34 
35 #include <future>
36 #if defined(ARMNN_TFLITE_DELEGATE)
37 int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
38  const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
39 {
40  using namespace tflite;
41 
42  std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str());
43 
44  auto tfLiteInterpreter = std::make_unique<Interpreter>();
45  tflite::ops::builtin::BuiltinOpResolver resolver;
46 
47  tflite::InterpreterBuilder builder(*model, resolver);
48  builder(&tfLiteInterpreter);
49  tfLiteInterpreter->AllocateTensors();
50 
51  int status = 0;
53  {
54  // Create the Armnn Delegate
55  armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
56  std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
57  theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
59  // Register armnn_delegate to TfLiteInterpreter
60  status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
61  if (status == kTfLiteError)
62  {
63  ARMNN_LOG(fatal) << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
64  return EXIT_FAILURE;
65  }
66  }
67  else
68  {
69  std::cout << "Running on TfLite without ArmNN delegate\n";
70  }
71 
72 
73  std::vector<std::string> inputBindings;
74  for (const std::string& inputName: params.m_InputNames)
75  {
76  inputBindings.push_back(inputName);
77  }
78 
81  : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
82 
83  const size_t numInputs = inputBindings.size();
84 
85  for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
86  {
87  int input = tfLiteInterpreter->inputs()[inputIndex];
88  TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
89 
90  long inputSize = 1;
91  for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
92  {
93  inputSize *= inputDims->data[dim];
94  }
95 
96  if (params.m_InputTypes[inputIndex].compare("float") == 0)
97  {
98  auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
99 
100  if(inputData == NULL)
101  {
102  ARMNN_LOG(fatal) << "Input tensor is null, input type: "
103  "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
104  return EXIT_FAILURE;
105  }
106 
107  std::vector<float> tensorData;
108  PopulateTensorWithDataGeneric<float>(tensorData,
109  params.m_InputTensorShapes[inputIndex]->GetNumElements(),
110  dataFile,
111  [](const std::string& s)
112  { return std::stof(s); });
113 
114  std::copy(tensorData.begin(), tensorData.end(), inputData);
115  }
116  else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0)
117  {
118  auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
119 
120  if(inputData == NULL)
121  {
122  ARMNN_LOG(fatal) << "Input tensor is null, input type: "
123  "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
124  return EXIT_FAILURE;
125  }
126 
127  std::vector<int8_t> tensorData;
128  PopulateTensorWithDataGeneric<int8_t>(tensorData,
129  params.m_InputTensorShapes[inputIndex]->GetNumElements(),
130  dataFile,
131  [](const std::string& s)
132  { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
133 
134  std::copy(tensorData.begin(), tensorData.end(), inputData);
135  }
136  else if (params.m_InputTypes[inputIndex].compare("int") == 0)
137  {
138  auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
139 
140  if(inputData == NULL)
141  {
142  ARMNN_LOG(fatal) << "Input tensor is null, input type: "
143  "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
144  return EXIT_FAILURE;
145  }
146 
147  std::vector<int32_t> tensorData;
148  PopulateTensorWithDataGeneric<int32_t>(tensorData,
149  params.m_InputTensorShapes[inputIndex]->GetNumElements(),
150  dataFile,
151  [](const std::string& s)
152  { return std::stoi(s); });
153 
154  std::copy(tensorData.begin(), tensorData.end(), inputData);
155  }
156  else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
157  {
158  auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
159 
160  if(inputData == NULL)
161  {
162  ARMNN_LOG(fatal) << "Input tensor is null, input type: "
163  "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
164  return EXIT_FAILURE;
165  }
166 
167  std::vector<uint8_t> tensorData;
168  PopulateTensorWithDataGeneric<uint8_t>(tensorData,
169  params.m_InputTensorShapes[inputIndex]->GetNumElements(),
170  dataFile,
171  [](const std::string& s)
172  { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
173 
174  std::copy(tensorData.begin(), tensorData.end(), inputData);
175  }
176  else
177  {
178  ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
179  return EXIT_FAILURE;
180  }
181  }
182 
183  for (size_t x = 0; x < params.m_Iterations; x++)
184  {
185  // Run the inference
186  status = tfLiteInterpreter->Invoke();
187 
188  // Print out the output
189  for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
190  {
191  auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
192  TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
193 
194  long outputSize = 1;
195  for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
196  {
197  outputSize *= outputDims->data[dim];
198  }
199 
200  std::cout << params.m_OutputNames[outputIndex] << ": ";
201  if (params.m_OutputTypes[outputIndex].compare("float") == 0)
202  {
203  auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
204  if(tfLiteDelageOutputData == NULL)
205  {
206  ARMNN_LOG(fatal) << "Output tensor is null, output type: "
207  "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
208  return EXIT_FAILURE;
209  }
210 
211  for (int i = 0; i < outputSize; ++i)
212  {
213  printf("%f ", tfLiteDelageOutputData[i]);
214  }
215  }
216  else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
217  {
218  auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
219  if(tfLiteDelageOutputData == NULL)
220  {
221  ARMNN_LOG(fatal) << "Output tensor is null, output type: "
222  "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
223  return EXIT_FAILURE;
224  }
225 
226  for (int i = 0; i < outputSize; ++i)
227  {
228  printf("%d ", tfLiteDelageOutputData[i]);
229  }
230  }
231  else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0)
232  {
233  auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
234  if(tfLiteDelageOutputData == NULL)
235  {
236  ARMNN_LOG(fatal) << "Output tensor is null, output type: "
237  "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
238  return EXIT_FAILURE;
239  }
240 
241  for (int i = 0; i < outputSize; ++i)
242  {
243  printf("%d ", tfLiteDelageOutputData[i]);
244  }
245  }
246  else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
247  {
248  auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
249  if(tfLiteDelageOutputData == NULL)
250  {
251  ARMNN_LOG(fatal) << "Output tensor is null, output type: "
252  "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
253  return EXIT_FAILURE;
254  }
255 
256  for (int i = 0; i < outputSize; ++i)
257  {
258  printf("%u ", tfLiteDelageOutputData[i]);
259  }
260  }
261  else
262  {
263  ARMNN_LOG(fatal) << "Output tensor is null, output type: "
264  "\"" << params.m_OutputTypes[outputIndex] <<
265  "\" may be incorrect. Output type can be specified with -z argument";
266  return EXIT_FAILURE;
267  }
268  std::cout << std::endl;
269  }
270  }
271 
272  return status;
273 }
274 #endif
275 template<typename TParser, typename TDataType>
276 int MainImpl(const ExecuteNetworkParams& params,
277  const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
278 {
279  using TContainer =
280  mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
281 
282  std::vector<std::vector<TContainer>> inputs;
283  std::vector<std::vector<TContainer>> outputs;
284 
285  try
286  {
287  // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
288  typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
289  inferenceModelParams.m_ModelPath = params.m_ModelPath;
290  inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
291  inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
292  inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
293  inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
294  inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
295  inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
296  inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
297  inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath;
298  inferenceModelParams.m_SaveCachedNetwork = params.m_SaveCachedNetwork;
299  inferenceModelParams.m_CachedNetworkFilePath = params.m_CachedNetworkFilePath;
300  inferenceModelParams.m_NumberOfThreads = params.m_NumberOfThreads;
301  inferenceModelParams.m_MLGOTuningFilePath = params.m_MLGOTuningFilePath;
302  inferenceModelParams.m_AsyncEnabled = params.m_Concurrent;
303 
304  for(const std::string& inputName: params.m_InputNames)
305  {
306  inferenceModelParams.m_InputBindings.push_back(inputName);
307  }
308 
309  for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
310  {
311  inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
312  }
313 
314  for(const std::string& outputName: params.m_OutputNames)
315  {
316  inferenceModelParams.m_OutputBindings.push_back(outputName);
317  }
318 
319  inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
320  inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
321  inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
322 
323  InferenceModel<TParser, TDataType> model(inferenceModelParams,
324  params.m_EnableProfiling,
325  params.m_DynamicBackendsPath,
326  runtime);
327 
328  const size_t numInputs = inferenceModelParams.m_InputBindings.size();
329 
331  armnn::MakeOptional<QuantizationParams>(
332  model.GetInputQuantizationParams()) :
334 
335  for(unsigned int j = 0; j < params.m_SimultaneousIterations ; ++j)
336  {
337  std::vector<TContainer> inputDataContainers;
338  for(unsigned int i = 0; i < numInputs; ++i)
339  {
342  armnn::MakeOptional<std::string>(
343  params.m_InputTensorDataFilePaths[(j * numInputs) + i]);
344 
345  unsigned int numElements = model.GetInputSize(i);
346  if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
347  {
348  // If the user has provided a tensor shape for the current input,
349  // override numElements
350  numElements = params.m_InputTensorShapes[i]->GetNumElements();
351  }
352 
353  TContainer tensorData;
354  PopulateTensorWithData(tensorData,
355  numElements,
356  params.m_InputTypes[i],
357  qParams,
358  dataFile);
359 
360  inputDataContainers.push_back(tensorData);
361  }
362  inputs.push_back(inputDataContainers);
363  }
364 
365  const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
366 
367  for (unsigned int j = 0; j < params.m_SimultaneousIterations; ++j)
368  {
369  std::vector <TContainer> outputDataContainers;
370  for (unsigned int i = 0; i < numOutputs; ++i)
371  {
372  if (params.m_OutputTypes[i].compare("float") == 0)
373  {
374  outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
375  } else if (params.m_OutputTypes[i].compare("int") == 0)
376  {
377  outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
378  } else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
379  {
380  outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
381  } else if (params.m_OutputTypes[i].compare("qsymms8") == 0)
382  {
383  outputDataContainers.push_back(std::vector<int8_t>(model.GetOutputSize(i)));
384  } else
385  {
386  ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
387  return EXIT_FAILURE;
388  }
389  }
390  outputs.push_back(outputDataContainers);
391  }
392 
393  if (!params.m_Concurrent)
394  {
395  // Synchronous Execution
396  for (size_t x = 0; x < params.m_Iterations; x++)
397  {
398  // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
399  auto inference_duration = model.Run(inputs[0], outputs[0]);
400 
401  if (params.m_GenerateTensorData)
402  {
403  ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
404  }
405 
406  // Print output tensors
407  const auto& infosOut = model.GetOutputBindingInfos();
408  for (size_t i = 0; i < numOutputs; i++)
409  {
410  const armnn::TensorInfo& infoOut = infosOut[i].second;
411  auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
412 
413  TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
414  infoOut,
415  outputTensorFile,
416  params.m_DequantizeOutput);
417  mapbox::util::apply_visitor(printer, outputs[0][i]);
418  }
419 
420  ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
421  << std::fixed << inference_duration.count() << " ms\n";
422 
423  // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
424  if (params.m_ThresholdTime != 0.0)
425  {
426  ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
427  << std::fixed << params.m_ThresholdTime << " ms";
428  auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
429  ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
430  << std::fixed << thresholdMinusInference << " ms" << "\n";
431 
432  if (thresholdMinusInference < 0)
433  {
434  std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
435  ARMNN_LOG(fatal) << errorMessage;
436  }
437  }
438  }
439  }
440  else
441  {
442  try
443  {
444  ARMNN_LOG(info) << "Asynchronous Execution... \n";
445  std::vector<std::future<std::tuple<armnn::profiling::ProfilingGuid,
446  std::chrono::duration<double, std::milli>>>> inferenceResults;
447  inferenceResults.reserve(params.m_SimultaneousIterations);
448 
449  // Create WorkingMemHandles for each inference
450  std::vector<std::unique_ptr<armnn::experimental::IWorkingMemHandle>> workingMemHandles;
451  workingMemHandles.reserve(params.m_SimultaneousIterations);
452  for (unsigned int i = 0; i < params.m_SimultaneousIterations; ++i)
453  {
454  workingMemHandles.push_back(model.CreateWorkingMemHandle());
455  }
456 
457  // Run each inference in its own thread
458  for (unsigned int i = 0; i < params.m_SimultaneousIterations; ++i)
459  {
460  armnn::experimental::IWorkingMemHandle& workingMemHandleRef = *workingMemHandles[i].get();
461  inferenceResults.push_back(std::async(
462  std::launch::async, [&model, &workingMemHandleRef, &inputs, &outputs, i]() {
463  return model.RunAsync(workingMemHandleRef, inputs[i], outputs[i]);
464  }
465  ));
466  }
467 
468  // Check the results
469  for (unsigned int j = 0; j < inferenceResults.size(); ++j)
470  {
471  // Get the results
472  auto inferenceResult = inferenceResults[j].get();
473  auto inference_duration = std::get<1>(inferenceResult);
474  auto inferenceID = std::get<0>(inferenceResult);
475 
476  if (params.m_GenerateTensorData)
477  {
478  ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
479  }
480 
481  // Print output tensors
482  const auto& infosOut = model.GetOutputBindingInfos();
483  for (size_t i = 0; i < numOutputs; i++)
484  {
485  const armnn::TensorInfo& infoOut = infosOut[i].second;
486  auto outputTensorFile = params.m_OutputTensorFiles.empty()
487  ? ""
488  : params.m_OutputTensorFiles[(j * numOutputs) + i];
489 
490  TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
491  infoOut,
492  outputTensorFile,
493  params.m_DequantizeOutput);
494  mapbox::util::apply_visitor(printer, outputs[j][i]);
495  }
496 
497  ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
498  << std::fixed << inference_duration.count() << " ms\n";
499 
500  // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
501  if (params.m_ThresholdTime != 0.0)
502  {
503  ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
504  << std::fixed << params.m_ThresholdTime << " ms";
505  auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
506  ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
507  << std::fixed << thresholdMinusInference << " ms" << "\n";
508 
509  if (thresholdMinusInference < 0)
510  {
511  ARMNN_LOG(fatal) << "Elapsed inference time is greater than provided threshold time. \n";
512  }
513  }
514  ARMNN_LOG(info) << "Asynchronous Execution is finished for Inference ID: " << inferenceID << " \n";
515 
516  }
517  }
518  catch (const armnn::Exception& e)
519  {
520  ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
521  return EXIT_FAILURE;
522  }
523 
524  }
525  }
526  catch (const armnn::Exception& e)
527  {
528  ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
529  return EXIT_FAILURE;
530  }
531 
532  return EXIT_SUCCESS;
533 }
534 
535 
536 // MAIN
537 int main(int argc, const char* argv[])
538 {
539  // Configures logging for both the ARMNN library and this test program.
540  #ifdef NDEBUG
542  #else
544  #endif
545  armnn::ConfigureLogging(true, true, level);
546 
547 
548  // Get ExecuteNetwork parameters and runtime options from command line
549  ProgramOptions ProgramOptions(argc, argv);
550 
551  // Create runtime
552  std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions));
553 
554  std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat;
555 
556  // Forward to implementation based on the parser type
557  if (modelFormat.find("armnn") != std::string::npos)
558  {
559  #if defined(ARMNN_SERIALIZER)
560  return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime);
561  #else
562  ARMNN_LOG(fatal) << "Not built with serialization support.";
563  return EXIT_FAILURE;
564  #endif
565  }
566  else if (modelFormat.find("onnx") != std::string::npos)
567  {
568  #if defined(ARMNN_ONNX_PARSER)
569  return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime);
570  #else
571  ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
572  return EXIT_FAILURE;
573  #endif
574  }
575  else if(modelFormat.find("tflite") != std::string::npos)
576  {
578  {
579  #if defined(ARMNN_TF_LITE_PARSER)
580  return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
581  #else
582  ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
583  return EXIT_FAILURE;
584  #endif
585  }
586  else if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
588  ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
590  {
591  #if defined(ARMNN_TF_LITE_DELEGATE)
592  return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
593  #else
594  ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support.";
595  return EXIT_FAILURE;
596  #endif
597  }
598  }
599  else
600  {
601  ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
602  << "'. Please include 'tflite' or 'onnx'";
603  return EXIT_FAILURE;
604  }
605 }
ExecuteNetworkParams m_ExNetParams
std::vector< std::string > m_InputTypes
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
std::vector< TensorShapePtr > m_InputTensorShapes
QuantizationParams GetInputQuantizationParams(unsigned int inputIndex=0u) const
const std::vector< armnn::BindingPointInfo > & GetOutputBindingInfos() const
void ConfigureLogging(bool printToStandardOutput, bool printToDebugOutput, LogSeverity severity)
Configures the logging behaviour of the ARMNN library.
Definition: Utils.cpp:18
virtual const char * what() const noexcept override
Definition: Exceptions.cpp:32
armnn::IRuntime::CreationOptions m_RuntimeOptions
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
void PopulateTensorWithData(TContainer &tensorData, unsigned int numElements, const std::string &dataTypeStr, const armnn::Optional< QuantizationParams > &qParams, const armnn::Optional< std::string > &dataFile)
std::vector< std::string > m_OutputNames
Copyright (c) 2021 ARM Limited and Contributors.
std::vector< std::string > m_OutputTensorFiles
unsigned int GetOutputSize(unsigned int outputIndex=0u) const
std::vector< std::string > m_InputBindings
std::vector< armnn::BackendId > m_ComputeDevices
std::tuple< armnn::profiling::ProfilingGuid, std::chrono::duration< double, std::milli > > RunAsync(armnn::experimental::IWorkingMemHandle &workingMemHandleRef, const std::vector< TContainer > &inputContainers, std::vector< TContainer > &outputContainers)
std::vector< std::string > m_OutputTypes
std::vector< armnn::TensorShape > m_InputShapes
Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter.
std::vector< std::string > m_OutputBindings
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_InputNames
std::vector< std::string > m_InputTensorDataFilePaths
Holds and parses program options for the ExecuteNetwork application.
TfLiteDelegate * TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
std::unique_ptr< armnn::experimental::IWorkingMemHandle > CreateWorkingMemHandle()
mapbox::util::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char >, std::vector< int8_t > > TContainer
std::chrono::duration< double, std::milli > Run(const std::vector< TContainer > &inputContainers, std::vector< TContainer > &outputContainers)
int main(int argc, const char *argv[])
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
unsigned int GetInputSize(unsigned int inputIndex=0u) const
void TfLiteArmnnDelegateDelete(TfLiteDelegate *tfLiteDelegate)
Optional< T > MakeOptional(Args &&... args)
Utility template that constructs an object of type T in-place and wraps it inside an Optional<T> obje...
Definition: Optional.hpp:305
int MainImpl(const ExecuteNetworkParams &params, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
LogSeverity
Definition: Utils.hpp:13