ArmNN
 20.08
InferenceModel.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/ArmNN.hpp>
9 #include <armnn/Logging.hpp>
10 #include <armnn/utility/Timer.hpp>
12 #include <armnn/utility/Assert.hpp>
13 
14 #if defined(ARMNN_SERIALIZER)
16 #endif
17 #if defined(ARMNN_TF_LITE_PARSER)
19 #endif
20 #if defined(ARMNN_ONNX_PARSER)
22 #endif
23 
24 #include <Filesystem.hpp>
25 #include <HeapProfiling.hpp>
26 #include <TensorIOUtils.hpp>
27 
29 #include <boost/exception/exception.hpp>
30 #include <boost/exception/diagnostic_information.hpp>
31 #include <boost/format.hpp>
32 #include <boost/program_options.hpp>
33 #include <boost/variant.hpp>
34 
35 #include <algorithm>
36 #include <iterator>
37 #include <fstream>
38 #include <map>
39 #include <string>
40 #include <vector>
41 #include <type_traits>
42 
43 namespace
44 {
45 
46 inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
48 {
49  if (backendIds.empty())
50  {
51  return false;
52  }
53 
55 
56  bool allValid = true;
57  for (const auto& backendId : backendIds)
58  {
59  if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
60  {
61  allValid = false;
62  if (invalidBackendIds)
63  {
64  if (!invalidBackendIds.value().empty())
65  {
66  invalidBackendIds.value() += ", ";
67  }
68  invalidBackendIds.value() += backendId;
69  }
70  }
71  }
72  return allValid;
73 }
74 
75 } // anonymous namespace
76 
78 {
80 
81 using QuantizationParams = std::pair<float,int32_t>;
82 
83 struct Params
84 {
85  std::string m_ModelPath;
86  std::vector<std::string> m_InputBindings;
87  std::vector<armnn::TensorShape> m_InputShapes;
88  std::vector<std::string> m_OutputBindings;
89  std::vector<armnn::BackendId> m_ComputeDevices;
90  std::string m_DynamicBackendsPath;
91  size_t m_SubgraphId;
99 
101  : m_ComputeDevices{}
102  , m_SubgraphId(0)
103  , m_IsModelBinary(true)
105  , m_EnableFp16TurboMode(false)
106  , m_EnableBf16TurboMode(false)
108  , m_ParseUnsupported(false)
109  , m_InferOutputShape(false)
110  {}
111 };
112 
113 } // namespace InferenceModelInternal
114 
115 template <typename IParser>
117 {
118 public:
120 
121  static armnn::INetworkPtr Create(const Params& params,
122  std::vector<armnn::BindingPointInfo>& inputBindings,
123  std::vector<armnn::BindingPointInfo>& outputBindings)
124  {
125  const std::string& modelPath = params.m_ModelPath;
126 
127  // Create a network from a file on disk
128  auto parser(IParser::Create());
129 
130  std::map<std::string, armnn::TensorShape> inputShapes;
131  if (!params.m_InputShapes.empty())
132  {
133  const size_t numInputShapes = params.m_InputShapes.size();
134  const size_t numInputBindings = params.m_InputBindings.size();
135  if (numInputShapes < numInputBindings)
136  {
137  throw armnn::Exception(boost::str(boost::format(
138  "Not every input has its tensor shape specified: expected=%1%, got=%2%")
139  % numInputBindings % numInputShapes));
140  }
141 
142  for (size_t i = 0; i < numInputShapes; i++)
143  {
144  inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
145  }
146  }
147 
148  std::vector<std::string> requestedOutputs = params.m_OutputBindings;
149  armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
150 
151  {
152  ARMNN_SCOPED_HEAP_PROFILING("Parsing");
153  // Handle text and binary input differently by calling the corresponding parser function
154  network = (params.m_IsModelBinary ?
155  parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
156  parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
157  }
158 
159  for (const std::string& inputLayerName : params.m_InputBindings)
160  {
161  inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
162  }
163 
164  for (const std::string& outputLayerName : params.m_OutputBindings)
165  {
166  outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
167  }
168 
169  return network;
170  }
171 };
172 
173 #if defined(ARMNN_SERIALIZER)
174 template <>
175 struct CreateNetworkImpl<armnnDeserializer::IDeserializer>
176 {
177 public:
178  using IParser = armnnDeserializer::IDeserializer;
180 
181  static armnn::INetworkPtr Create(const Params& params,
182  std::vector<armnn::BindingPointInfo>& inputBindings,
183  std::vector<armnn::BindingPointInfo>& outputBindings)
184  {
185  auto parser(IParser::Create());
186  ARMNN_ASSERT(parser);
187 
188  armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
189 
190  {
191  ARMNN_SCOPED_HEAP_PROFILING("Parsing");
192 
193  std::error_code errorCode;
194  fs::path pathToFile(params.m_ModelPath);
195  if (!fs::exists(pathToFile, errorCode))
196  {
197  throw armnn::FileNotFoundException(boost::str(
198  boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
199  params.m_ModelPath %
200  errorCode %
201  CHECK_LOCATION().AsString()));
202  }
203  std::ifstream file(params.m_ModelPath, std::ios::binary);
204 
205  network = parser->CreateNetworkFromBinary(file);
206  }
207 
208  unsigned int subgraphId = boost::numeric_cast<unsigned int>(params.m_SubgraphId);
209 
210  for (const std::string& inputLayerName : params.m_InputBindings)
211  {
213  parser->GetNetworkInputBindingInfo(subgraphId, inputLayerName);
214  inputBindings.push_back(std::make_pair(inputBinding.m_BindingId, inputBinding.m_TensorInfo));
215  }
216 
217  for (const std::string& outputLayerName : params.m_OutputBindings)
218  {
220  parser->GetNetworkOutputBindingInfo(subgraphId, outputLayerName);
221  outputBindings.push_back(std::make_pair(outputBinding.m_BindingId, outputBinding.m_TensorInfo));
222  }
223 
224  return network;
225  }
226 };
227 #endif
228 
229 #if defined(ARMNN_TF_LITE_PARSER)
230 template <>
231 struct CreateNetworkImpl<armnnTfLiteParser::ITfLiteParser>
232 {
233 public:
234  using IParser = armnnTfLiteParser::ITfLiteParser;
236 
237  static armnn::INetworkPtr Create(const Params& params,
238  std::vector<armnn::BindingPointInfo>& inputBindings,
239  std::vector<armnn::BindingPointInfo>& outputBindings)
240  {
241  const std::string& modelPath = params.m_ModelPath;
242 
243  // Create a network from a file on disk
244  IParser::TfLiteParserOptions options;
245  options.m_StandInLayerForUnsupported = params.m_ParseUnsupported;
246  options.m_InferAndValidate = params.m_InferOutputShape;
247  auto parser(IParser::Create(options));
248 
249  armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
250 
251  {
252  ARMNN_SCOPED_HEAP_PROFILING("Parsing");
253  network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
254  }
255 
256  for (const std::string& inputLayerName : params.m_InputBindings)
257  {
258  armnn::BindingPointInfo inputBinding =
259  parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
260  inputBindings.push_back(inputBinding);
261  }
262 
263  for (const std::string& outputLayerName : params.m_OutputBindings)
264  {
265  armnn::BindingPointInfo outputBinding =
266  parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
267  outputBindings.push_back(outputBinding);
268  }
269 
270  return network;
271  }
272 };
273 #endif
274 
275 #if defined(ARMNN_ONNX_PARSER)
276 template <>
277 struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
278 {
279 public:
280  using IParser = armnnOnnxParser::IOnnxParser;
283 
284  static armnn::INetworkPtr Create(const Params& params,
285  std::vector<BindingPointInfo>& inputBindings,
286  std::vector<BindingPointInfo>& outputBindings)
287  {
288  const std::string& modelPath = params.m_ModelPath;
289 
290  // Create a network from a file on disk
291  auto parser(IParser::Create());
292 
293  armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
294 
295  {
296  ARMNN_SCOPED_HEAP_PROFILING("Parsing");
297  network = (params.m_IsModelBinary ?
298  parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
299  parser->CreateNetworkFromTextFile(modelPath.c_str()));
300  }
301 
302  for (const std::string& inputLayerName : params.m_InputBindings)
303  {
304  BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
305  inputBindings.push_back(inputBinding);
306  }
307 
308  for (const std::string& outputLayerName : params.m_OutputBindings)
309  {
310  BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
311  outputBindings.push_back(outputBinding);
312  }
313 
314  return network;
315  }
316 };
317 #endif
318 
319 
320 
321 template <typename IParser, typename TDataType>
323 {
324 public:
325  using DataType = TDataType;
328  using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
329 
331  {
332  std::string m_ModelDir;
333  std::vector<std::string> m_ComputeDevices;
338  std::string m_Labels;
339 
340  std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
341  {
342  std::vector<armnn::BackendId> backendIds;
343  std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
344  return backendIds;
345  }
346  };
347 
348  static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
349  {
350  namespace po = boost::program_options;
351 
352  const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
353 
354  const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
356 
357  desc.add_options()
358  ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
359  "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
360  ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
361  default_value(defaultComputes, armnn::stringUtils::StringConcat(defaultComputes, ", "))->
362  multitoken(), backendsMessage.c_str())
363  ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
364  "Path where to load any available dynamic backend from. "
365  "If left empty (the default), dynamic backends will not be used.")
366  ("labels,l", po::value<std::string>(&options.m_Labels),
367  "Text file containing one image filename - correct label pair per line, "
368  "used to test the accuracy of the network.")
369  ("visualize-optimized-model,v",
370  po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
371  "Produce a dot file useful for visualizing the graph post optimization."
372  "The file will have the same name as the model with the .dot extention.")
373  ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
374  "If this option is enabled FP32 layers, weights and biases will be converted "
375  "to FP16 where the backend supports it.")
376  ("bf16-turbo-mode", po::value<bool>(&options.m_EnableBf16TurboMode)->default_value(false),
377  "If this option is enabled FP32 layers, weights and biases will be converted "
378  "to BF16 where the backend supports it.");
379  }
380 
381  InferenceModel(const Params& params,
382  bool enableProfiling,
383  const std::string& dynamicBackendsPath,
384  const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
385  : m_EnableProfiling(enableProfiling)
386  , m_DynamicBackendsPath(dynamicBackendsPath)
387  {
388  if (runtime)
389  {
390  m_Runtime = runtime;
391  }
392  else
393  {
395  options.m_EnableGpuProfiling = m_EnableProfiling;
397  m_Runtime = std::move(armnn::IRuntime::Create(options));
398  }
399 
400  std::string invalidBackends;
401  if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
402  {
403  throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
404  }
405 
406  const auto parsing_start_time = armnn::GetTimeNow();
408 
409  ARMNN_LOG(info) << "Network parsing time: " << std::setprecision(2)
410  << std::fixed << armnn::GetTimeDuration(parsing_start_time).count() << " ms\n";
411 
413  {
414  ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
415 
416  armnn::OptimizerOptions options;
417  options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
418  options.m_ReduceFp32ToBf16 = params.m_EnableBf16TurboMode;
419  options.m_Debug = params.m_PrintIntermediateLayers;
420 
421  const auto optimization_start_time = armnn::GetTimeNow();
422  optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
423 
424  ARMNN_LOG(info) << "Optimization time: " << std::setprecision(2)
425  << std::fixed << armnn::GetTimeDuration(optimization_start_time).count() << " ms\n";
426 
427  if (!optNet)
428  {
429  throw armnn::Exception("Optimize returned nullptr");
430  }
431  }
432 
434  {
435  fs::path filename = params.m_ModelPath;
436  filename.replace_extension("dot");
437  std::fstream file(filename.c_str(), std::ios_base::out);
438  optNet->SerializeToDot(file);
439  }
440 
441  armnn::Status ret;
442  {
443  ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
444  ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
445  }
446 
447  if (ret == armnn::Status::Failure)
448  {
449  throw armnn::Exception("IRuntime::LoadNetwork failed");
450  }
451  }
452 
453  void CheckInputIndexIsValid(unsigned int inputIndex) const
454  {
455  if (m_InputBindings.size() < inputIndex + 1)
456  {
457  throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
458  }
459  }
460 
461  void CheckOutputIndexIsValid(unsigned int outputIndex) const
462  {
463  if (m_OutputBindings.size() < outputIndex + 1)
464  {
465  throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
466  }
467  }
468 
469  unsigned int GetInputSize(unsigned int inputIndex = 0u) const
470  {
471  CheckInputIndexIsValid(inputIndex);
472  return m_InputBindings[inputIndex].second.GetNumElements();
473  }
474 
475  unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
476  {
477  CheckOutputIndexIsValid(outputIndex);
478  return m_OutputBindings[outputIndex].second.GetNumElements();
479  }
480 
481  std::chrono::duration<double, std::milli> Run(
482  const std::vector<TContainer>& inputContainers,
483  std::vector<TContainer>& outputContainers)
484  {
485  for (unsigned int i = 0; i < outputContainers.size(); ++i)
486  {
487  const unsigned int expectedOutputDataSize = GetOutputSize(i);
488 
489  boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
490  {
491  const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
492  if (actualOutputDataSize < expectedOutputDataSize)
493  {
494  unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
495  throw armnn::Exception(
496  boost::str(boost::format("Not enough data for output #%1%: expected "
497  "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
498  }
499  },
500  outputContainers[i]);
501  }
502 
503  std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
504  if (profiler)
505  {
506  profiler->EnableProfiling(m_EnableProfiling);
507  }
508 
509  // Start timer to record inference time in EnqueueWorkload (in milliseconds)
510  const auto start_time = armnn::GetTimeNow();
511 
512  armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
513  MakeInputTensors(inputContainers),
514  MakeOutputTensors(outputContainers));
515 
516  const auto duration = armnn::GetTimeDuration(start_time);
517 
518  // if profiling is enabled print out the results
519  if (profiler && profiler->IsProfilingEnabled())
520  {
521  profiler->Print(std::cout);
522  }
523 
524  if (ret == armnn::Status::Failure)
525  {
526  throw armnn::Exception("IRuntime::EnqueueWorkload failed");
527  }
528  else
529  {
530  return duration;
531  }
532  }
533 
534  const armnn::BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
535  {
536  CheckInputIndexIsValid(inputIndex);
537  return m_InputBindings[inputIndex];
538  }
539 
540  const std::vector<armnn::BindingPointInfo>& GetInputBindingInfos() const
541  {
542  return m_InputBindings;
543  }
544 
545  const armnn::BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
546  {
547  CheckOutputIndexIsValid(outputIndex);
548  return m_OutputBindings[outputIndex];
549  }
550 
551  const std::vector<armnn::BindingPointInfo>& GetOutputBindingInfos() const
552  {
553  return m_OutputBindings;
554  }
555 
556  QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
557  {
558  CheckOutputIndexIsValid(outputIndex);
559  return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
560  m_OutputBindings[outputIndex].second.GetQuantizationOffset());
561  }
562 
563  QuantizationParams GetInputQuantizationParams(unsigned int inputIndex = 0u) const
564  {
565  CheckInputIndexIsValid(inputIndex);
566  return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
567  m_InputBindings[inputIndex].second.GetQuantizationOffset());
568  }
569 
570  std::vector<QuantizationParams> GetAllQuantizationParams() const
571  {
572  std::vector<QuantizationParams> quantizationParams;
573  for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
574  {
575  quantizationParams.push_back(GetQuantizationParams(i));
576  }
577  return quantizationParams;
578  }
579 
580 private:
581  armnn::NetworkId m_NetworkIdentifier;
582  std::shared_ptr<armnn::IRuntime> m_Runtime;
583 
584  std::vector<armnn::BindingPointInfo> m_InputBindings;
585  std::vector<armnn::BindingPointInfo> m_OutputBindings;
586  bool m_EnableProfiling;
587  std::string m_DynamicBackendsPath;
588 
589  template<typename TContainer>
590  armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
591  {
592  return armnnUtils::MakeInputTensors(m_InputBindings, inputDataContainers);
593  }
594 
595  template<typename TContainer>
596  armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
597  {
598  return armnnUtils::MakeOutputTensors(m_OutputBindings, outputDataContainers);
599  }
600 };
boost::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char > > TContainer
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
BackendIdSet GetBackendIds() const
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
std::unordered_set< BackendId > BackendIdSet
Definition: BackendId.hpp:191
QuantizationParams GetInputQuantizationParams(unsigned int inputIndex=0u) const
const std::vector< armnn::BindingPointInfo > & GetOutputBindingInfos() const
armnn::InputTensors MakeInputTensors(const std::vector< armnn::BindingPointInfo > &inputBindings, const std::vector< std::reference_wrapper< TContainer >> &inputDataContainers)
const armnn::BindingPointInfo & GetOutputBindingInfo(unsigned int outputIndex=0u) const
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:105
BackendRegistry & BackendRegistryInstance()
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:324
const armnn::BindingPointInfo & GetInputBindingInfo(unsigned int inputIndex=0u) const
armnn::BindingPointInfo BindingPointInfo
int NetworkId
Definition: IRuntime.hpp:20
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
InferenceModelInternal::QuantizationParams QuantizationParams
std::string GetBackendIdsAsString() const
void CheckInputIndexIsValid(unsigned int inputIndex) const
unsigned int GetOutputSize(unsigned int outputIndex=0u) const
std::vector< std::string > m_InputBindings
InferenceModel(const Params &params, bool enableProfiling, const std::string &dynamicBackendsPath, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
std::vector< armnn::TensorShape > m_InputShapes
armnn::InputTensors MakeInputTensors(const std::vector< armnn::BindingPointInfo > &inputBindings, const std::vector< TContainer > &inputDataContainers)
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1014
std::vector< std::string > m_OutputBindings
std::vector< armnn::BackendId > m_ComputeDevices
#define ARMNN_SCOPED_HEAP_PROFILING(TAG)
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:325
Status
enumeration
Definition: Types.hpp:26
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:593
QuantizationParams GetQuantizationParams(unsigned int outputIndex=0u) const
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
std::vector< QuantizationParams > GetAllQuantizationParams() const
std::pair< float, int32_t > QuantizationParams
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
armnn::OutputTensors MakeOutputTensors(const std::vector< armnn::BindingPointInfo > &outputBindings, std::vector< TContainer > &outputDataContainers)
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:59
bool m_EnableGpuProfiling
Setting this flag will allow the user to obtain GPU profiling information from the runtime...
Definition: IRuntime.hpp:55
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
std::pair< armnn::LayerBindingId, armnn::TensorInfo > BindingPointInfo
Definition: Tensor.hpp:245
static armnn::INetworkPtr Create(const Params &params, std::vector< armnn::BindingPointInfo > &inputBindings, std::vector< armnn::BindingPointInfo > &outputBindings)
std::chrono::duration< double, std::milli > Run(const std::vector< TContainer > &inputContainers, std::vector< TContainer > &outputContainers)
std::vector< armnn::BackendId > GetComputeDevicesAsBackendIds()
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
std::vector< std::string > m_ComputeDevices
unsigned int GetInputSize(unsigned int inputIndex=0u) const
armnn::OutputTensors MakeOutputTensors(const std::vector< armnn::BindingPointInfo > &outputBindings, const std::vector< std::reference_wrapper< TContainer >> &outputDataContainers)
static void AddCommandLineOptions(boost::program_options::options_description &desc, CommandLineOptions &options)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
std::string StringConcat(const std::vector< std::string > &strings, std::string seperator="")
Takes a vector of strings and concatenates them together into one long std::string with an optional s...
Definition: StringUtils.hpp:93
void CheckOutputIndexIsValid(unsigned int outputIndex) const
const std::vector< armnn::BindingPointInfo > & GetInputBindingInfos() const