ArmNN
 20.05
InferenceModel< IParser, TDataType > Class Template Reference

#include <InferenceModel.hpp>

Classes

struct  CommandLineOptions
 

Public Types

using DataType = TDataType
 
using Params = InferenceModelInternal::Params
 
using QuantizationParams = InferenceModelInternal::QuantizationParams
 
using TContainer = boost::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char > >
 

Public Member Functions

 InferenceModel (const Params &params, bool enableProfiling, const std::string &dynamicBackendsPath, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
 
void CheckInputIndexIsValid (unsigned int inputIndex) const
 
void CheckOutputIndexIsValid (unsigned int outputIndex) const
 
unsigned int GetInputSize (unsigned int inputIndex=0u) const
 
unsigned int GetOutputSize (unsigned int outputIndex=0u) const
 
std::chrono::duration< double, std::milli > Run (const std::vector< TContainer > &inputContainers, std::vector< TContainer > &outputContainers)
 
const armnn::BindingPointInfoGetInputBindingInfo (unsigned int inputIndex=0u) const
 
const std::vector< armnn::BindingPointInfo > & GetInputBindingInfos () const
 
const armnn::BindingPointInfoGetOutputBindingInfo (unsigned int outputIndex=0u) const
 
const std::vector< armnn::BindingPointInfo > & GetOutputBindingInfos () const
 
QuantizationParams GetQuantizationParams (unsigned int outputIndex=0u) const
 
QuantizationParams GetInputQuantizationParams (unsigned int inputIndex=0u) const
 
std::vector< QuantizationParamsGetAllQuantizationParams () const
 

Static Public Member Functions

static void AddCommandLineOptions (boost::program_options::options_description &desc, CommandLineOptions &options)
 

Detailed Description

template<typename IParser, typename TDataType>
class InferenceModel< IParser, TDataType >

Definition at line 319 of file InferenceModel.hpp.

Member Typedef Documentation

◆ DataType

using DataType = TDataType

Definition at line 322 of file InferenceModel.hpp.

◆ Params

Definition at line 323 of file InferenceModel.hpp.

◆ QuantizationParams

◆ TContainer

using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char> >

Definition at line 325 of file InferenceModel.hpp.

Constructor & Destructor Documentation

◆ InferenceModel()

InferenceModel ( const Params params,
bool  enableProfiling,
const std::string &  dynamicBackendsPath,
const std::shared_ptr< armnn::IRuntime > &  runtime = nullptr 
)
inline

Definition at line 378 of file InferenceModel.hpp.

References ARMNN_LOG, ARMNN_SCOPED_HEAP_PROFILING, CreateNetworkImpl< IParser >::Create(), IRuntime::Create(), armnn::Failure, armnn::GetTimeDuration(), armnn::GetTimeNow(), Params::m_ComputeDevices, OptimizerOptions::m_Debug, IRuntime::CreationOptions::m_DynamicBackendsPath, Params::m_DynamicBackendsPath, Params::m_EnableBf16TurboMode, Params::m_EnableFp16TurboMode, IRuntime::CreationOptions::m_EnableGpuProfiling, m_EnableProfiling, Params::m_InputBindings, Params::m_ModelPath, Params::m_OutputBindings, Params::m_PrintIntermediateLayers, OptimizerOptions::m_ReduceFp32ToBf16, OptimizerOptions::m_ReduceFp32ToFp16, Params::m_VisualizePostOptimizationModel, armnn::Optimize(), and options.

382  : m_EnableProfiling(enableProfiling)
383  , m_DynamicBackendsPath(dynamicBackendsPath)
384  {
385  if (runtime)
386  {
387  m_Runtime = runtime;
388  }
389  else
390  {
392  options.m_EnableGpuProfiling = m_EnableProfiling;
393  options.m_DynamicBackendsPath = m_DynamicBackendsPath;
394  m_Runtime = std::move(armnn::IRuntime::Create(options));
395  }
396 
397  std::string invalidBackends;
398  if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
399  {
400  throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
401  }
402 
403  const auto parsing_start_time = armnn::GetTimeNow();
404  armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
405 
406  ARMNN_LOG(info) << "Network parsing time: " << std::setprecision(2)
407  << std::fixed << armnn::GetTimeDuration(parsing_start_time).count() << " ms\n";
408 
410  {
411  ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
412 
414  options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
415  options.m_ReduceFp32ToBf16 = params.m_EnableBf16TurboMode;
416  options.m_Debug = params.m_PrintIntermediateLayers;
417 
418  const auto optimization_start_time = armnn::GetTimeNow();
419  optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
420 
421  ARMNN_LOG(info) << "Optimization time: " << std::setprecision(2)
422  << std::fixed << armnn::GetTimeDuration(optimization_start_time).count() << " ms\n";
423 
424  if (!optNet)
425  {
426  throw armnn::Exception("Optimize returned nullptr");
427  }
428  }
429 
430  if (params.m_VisualizePostOptimizationModel)
431  {
432  boost::filesystem::path filename = params.m_ModelPath;
433  filename.replace_extension("dot");
434  std::fstream file(filename.c_str(), std::ios_base::out);
435  optNet->SerializeToDot(file);
436  }
437 
438  armnn::Status ret;
439  {
440  ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
441  ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
442  }
443 
444  if (ret == armnn::Status::Failure)
445  {
446  throw armnn::Exception("IRuntime::LoadNetwork failed");
447  }
448  }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:31
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1003
#define ARMNN_SCOPED_HEAP_PROFILING(TAG)
Status
enumeration
Definition: Types.hpp:26
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:573
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:59
bool m_EnableGpuProfiling
Setting this flag will allow the user to obtain GPU profiling information from the runtime...
Definition: IRuntime.hpp:55
static armnn::INetworkPtr Create(const Params &params, std::vector< armnn::BindingPointInfo > &inputBindings, std::vector< armnn::BindingPointInfo > &outputBindings)
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
armnn::Runtime::CreationOptions::ExternalProfilingOptions options

Member Function Documentation

◆ AddCommandLineOptions()

static void AddCommandLineOptions ( boost::program_options::options_description &  desc,
CommandLineOptions options 
)
inlinestatic

Definition at line 345 of file InferenceModel.hpp.

References armnn::BackendRegistryInstance(), BackendRegistry::GetBackendIdsAsString(), InferenceModel< IParser, TDataType >::CommandLineOptions::m_ComputeDevices, InferenceModel< IParser, TDataType >::CommandLineOptions::m_DynamicBackendsPath, InferenceModel< IParser, TDataType >::CommandLineOptions::m_EnableBf16TurboMode, InferenceModel< IParser, TDataType >::CommandLineOptions::m_EnableFp16TurboMode, InferenceModel< IParser, TDataType >::CommandLineOptions::m_Labels, InferenceModel< IParser, TDataType >::CommandLineOptions::m_ModelDir, InferenceModel< IParser, TDataType >::CommandLineOptions::m_VisualizePostOptimizationModel, and armnn::stringUtils::StringConcat().

Referenced by ClassifierTestCaseProvider< TDatabase, InferenceModel >::AddCommandLineOptions().

346  {
347  namespace po = boost::program_options;
348 
349  const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
350 
351  const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
353 
354  desc.add_options()
355  ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
356  "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
357  ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
358  default_value(defaultComputes, armnn::stringUtils::StringConcat(defaultComputes, ", "))->
359  multitoken(), backendsMessage.c_str())
360  ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
361  "Path where to load any available dynamic backend from. "
362  "If left empty (the default), dynamic backends will not be used.")
363  ("labels,l", po::value<std::string>(&options.m_Labels),
364  "Text file containing one image filename - correct label pair per line, "
365  "used to test the accuracy of the network.")
366  ("visualize-optimized-model,v",
367  po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
368  "Produce a dot file useful for visualizing the graph post optimization."
369  "The file will have the same name as the model with the .dot extention.")
370  ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
371  "If this option is enabled FP32 layers, weights and biases will be converted "
372  "to FP16 where the backend supports it.")
373  ("bf16-turbo-mode", po::value<bool>(&options.m_EnableBf16TurboMode)->default_value(false),
374  "If this option is enabled FP32 layers, weights and biases will be converted "
375  "to BF16 where the backend supports it.");
376  }
BackendRegistry & BackendRegistryInstance()
std::string GetBackendIdsAsString() const
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
std::string StringConcat(const std::vector< std::string > &strings, std::string seperator="")
Takes a vector of strings and concatenates them together into one long std::string with an optional s...
Definition: StringUtils.hpp:93

◆ CheckInputIndexIsValid()

void CheckInputIndexIsValid ( unsigned int  inputIndex) const
inline

Definition at line 450 of file InferenceModel.hpp.

References Params::m_InputBindings.

451  {
452  if (m_InputBindings.size() < inputIndex + 1)
453  {
454  throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
455  }
456  }
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46

◆ CheckOutputIndexIsValid()

void CheckOutputIndexIsValid ( unsigned int  outputIndex) const
inline

Definition at line 458 of file InferenceModel.hpp.

References Params::m_OutputBindings.

459  {
460  if (m_OutputBindings.size() < outputIndex + 1)
461  {
462  throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
463  }
464  }
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46

◆ GetAllQuantizationParams()

std::vector<QuantizationParams> GetAllQuantizationParams ( ) const
inline

Definition at line 567 of file InferenceModel.hpp.

References Params::m_DynamicBackendsPath, m_EnableProfiling, Params::m_InputBindings, Params::m_OutputBindings, armnnUtils::MakeInputTensors(), MakeInputTensors(), armnnUtils::MakeOutputTensors(), and MakeOutputTensors().

568  {
569  std::vector<QuantizationParams> quantizationParams;
570  for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
571  {
572  quantizationParams.push_back(GetQuantizationParams(i));
573  }
574  return quantizationParams;
575  }
QuantizationParams GetQuantizationParams(unsigned int outputIndex=0u) const

◆ GetInputBindingInfo()

const armnn::BindingPointInfo& GetInputBindingInfo ( unsigned int  inputIndex = 0u) const
inline

Definition at line 531 of file InferenceModel.hpp.

References Params::m_InputBindings.

Referenced by main().

532  {
533  CheckInputIndexIsValid(inputIndex);
534  return m_InputBindings[inputIndex];
535  }
void CheckInputIndexIsValid(unsigned int inputIndex) const

◆ GetInputBindingInfos()

const std::vector<armnn::BindingPointInfo>& GetInputBindingInfos ( ) const
inline

Definition at line 537 of file InferenceModel.hpp.

References Params::m_InputBindings.

538  {
539  return m_InputBindings;
540  }

◆ GetInputQuantizationParams()

QuantizationParams GetInputQuantizationParams ( unsigned int  inputIndex = 0u) const
inline

Definition at line 560 of file InferenceModel.hpp.

References Params::m_InputBindings.

Referenced by MainImpl().

561  {
562  CheckInputIndexIsValid(inputIndex);
563  return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
564  m_InputBindings[inputIndex].second.GetQuantizationOffset());
565  }
void CheckInputIndexIsValid(unsigned int inputIndex) const

◆ GetInputSize()

unsigned int GetInputSize ( unsigned int  inputIndex = 0u) const
inline

Definition at line 466 of file InferenceModel.hpp.

References Params::m_InputBindings.

Referenced by MainImpl().

467  {
468  CheckInputIndexIsValid(inputIndex);
469  return m_InputBindings[inputIndex].second.GetNumElements();
470  }
void CheckInputIndexIsValid(unsigned int inputIndex) const

◆ GetOutputBindingInfo()

const armnn::BindingPointInfo& GetOutputBindingInfo ( unsigned int  outputIndex = 0u) const
inline

Definition at line 542 of file InferenceModel.hpp.

References Params::m_OutputBindings.

543  {
544  CheckOutputIndexIsValid(outputIndex);
545  return m_OutputBindings[outputIndex];
546  }
void CheckOutputIndexIsValid(unsigned int outputIndex) const

◆ GetOutputBindingInfos()

const std::vector<armnn::BindingPointInfo>& GetOutputBindingInfos ( ) const
inline

Definition at line 548 of file InferenceModel.hpp.

References Params::m_OutputBindings.

Referenced by MainImpl().

549  {
550  return m_OutputBindings;
551  }

◆ GetOutputSize()

unsigned int GetOutputSize ( unsigned int  outputIndex = 0u) const
inline

Definition at line 472 of file InferenceModel.hpp.

References Params::m_OutputBindings.

Referenced by main(), and MainImpl().

473  {
474  CheckOutputIndexIsValid(outputIndex);
475  return m_OutputBindings[outputIndex].second.GetNumElements();
476  }
void CheckOutputIndexIsValid(unsigned int outputIndex) const

◆ GetQuantizationParams()

QuantizationParams GetQuantizationParams ( unsigned int  outputIndex = 0u) const
inline

Definition at line 553 of file InferenceModel.hpp.

References Params::m_OutputBindings.

554  {
555  CheckOutputIndexIsValid(outputIndex);
556  return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
557  m_OutputBindings[outputIndex].second.GetQuantizationOffset());
558  }
void CheckOutputIndexIsValid(unsigned int outputIndex) const

◆ Run()

std::chrono::duration<double, std::milli> Run ( const std::vector< TContainer > &  inputContainers,
std::vector< TContainer > &  outputContainers 
)
inline

Definition at line 478 of file InferenceModel.hpp.

References armnn::Failure, armnn::GetTimeDuration(), armnn::GetTimeNow(), m_EnableProfiling, MakeInputTensors(), MakeOutputTensors(), and armnn::numeric_cast().

Referenced by MainImpl().

481  {
482  for (unsigned int i = 0; i < outputContainers.size(); ++i)
483  {
484  const unsigned int expectedOutputDataSize = GetOutputSize(i);
485 
486  boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
487  {
488  const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
489  if (actualOutputDataSize < expectedOutputDataSize)
490  {
491  unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
492  throw armnn::Exception(
493  boost::str(boost::format("Not enough data for output #%1%: expected "
494  "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
495  }
496  },
497  outputContainers[i]);
498  }
499 
500  std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
501  if (profiler)
502  {
503  profiler->EnableProfiling(m_EnableProfiling);
504  }
505 
506  // Start timer to record inference time in EnqueueWorkload (in milliseconds)
507  const auto start_time = armnn::GetTimeNow();
508 
509  armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
510  MakeInputTensors(inputContainers),
511  MakeOutputTensors(outputContainers));
512 
513  const auto duration = armnn::GetTimeDuration(start_time);
514 
515  // if profiling is enabled print out the results
516  if (profiler && profiler->IsProfilingEnabled())
517  {
518  profiler->Print(std::cout);
519  }
520 
521  if (ret == armnn::Status::Failure)
522  {
523  throw armnn::Exception("IRuntime::EnqueueWorkload failed");
524  }
525  else
526  {
527  return duration;
528  }
529  }
std::chrono::duration< double, std::milli > GetTimeDuration(std::chrono::high_resolution_clock::time_point start_time)
Definition: Timer.hpp:19
std::chrono::high_resolution_clock::time_point GetTimeNow()
Definition: Timer.hpp:14
armnn::OutputTensors MakeOutputTensors(const std::vector< armnn::BindingPointInfo > &outputBindings, const std::vector< TContainer > &outputDataContainers)
unsigned int GetOutputSize(unsigned int outputIndex=0u) const
Status
enumeration
Definition: Types.hpp:26
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::InputTensors MakeInputTensors(const std::vector< armnn::BindingPointInfo > &inputBindings, const std::vector< TContainer > &inputDataContainers)

The documentation for this class was generated from the following file: