From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- 20.02/class_inference_model.xhtml | 760 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 760 insertions(+) create mode 100644 20.02/class_inference_model.xhtml (limited to '20.02/class_inference_model.xhtml') diff --git a/20.02/class_inference_model.xhtml b/20.02/class_inference_model.xhtml new file mode 100644 index 0000000000..3c3e2481e9 --- /dev/null +++ b/20.02/class_inference_model.xhtml @@ -0,0 +1,760 @@ + + + + + + + + + + + + + +ArmNN: InferenceModel< IParser, TDataType > Class Template Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
InferenceModel< IParser, TDataType > Class Template Reference
+
+
+ +

#include <InferenceModel.hpp>

+ + + + +

+Classes

struct  CommandLineOptions
 
+ + + + + + + + + +

+Public Types

using DataType = TDataType
 
using Params = InferenceModelInternal::Params
 
using QuantizationParams = InferenceModelInternal::QuantizationParams
 
using TContainer = boost::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char > >
 
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Public Member Functions

 InferenceModel (const Params &params, bool enableProfiling, const std::string &dynamicBackendsPath, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
 
void CheckInputIndexIsValid (unsigned int inputIndex) const
 
void CheckOutputIndexIsValid (unsigned int outputIndex) const
 
unsigned int GetInputSize (unsigned int inputIndex=0u) const
 
unsigned int GetOutputSize (unsigned int outputIndex=0u) const
 
std::chrono::duration< double, std::milli > Run (const std::vector< TContainer > &inputContainers, std::vector< TContainer > &outputContainers)
 
const armnn::BindingPointInfoGetInputBindingInfo (unsigned int inputIndex=0u) const
 
const std::vector< armnn::BindingPointInfo > & GetInputBindingInfos () const
 
const armnn::BindingPointInfoGetOutputBindingInfo (unsigned int outputIndex=0u) const
 
const std::vector< armnn::BindingPointInfo > & GetOutputBindingInfos () const
 
QuantizationParams GetQuantizationParams (unsigned int outputIndex=0u) const
 
QuantizationParams GetInputQuantizationParams (unsigned int inputIndex=0u) const
 
std::vector< QuantizationParamsGetAllQuantizationParams () const
 
+ + + +

+Static Public Member Functions

static void AddCommandLineOptions (boost::program_options::options_description &desc, CommandLineOptions &options)
 
+

Detailed Description

+

template<typename IParser, typename TDataType>
+class InferenceModel< IParser, TDataType >

+ + +

Definition at line 316 of file InferenceModel.hpp.

+

Member Typedef Documentation

+ +

◆ DataType

+ +
+
+ + + + +
using DataType = TDataType
+
+ +

Definition at line 319 of file InferenceModel.hpp.

+ +
+
+ +

◆ Params

+ +
+
+ +

Definition at line 320 of file InferenceModel.hpp.

+ +
+
+ +

◆ QuantizationParams

+ +
+
+ +

Definition at line 321 of file InferenceModel.hpp.

+ +
+
+ +

◆ TContainer

+ +
+
+ + + + +
using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char> >
+
+ +

Definition at line 322 of file InferenceModel.hpp.

+ +
+
+

Constructor & Destructor Documentation

+ +

◆ InferenceModel()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
InferenceModel (const Paramsparams,
bool enableProfiling,
const std::string & dynamicBackendsPath,
const std::shared_ptr< armnn::IRuntime > & runtime = nullptr 
)
+
+inline
+
+ +

Definition at line 371 of file InferenceModel.hpp.

+ +

References ARMNN_SCOPED_HEAP_PROFILING, IRuntime::Create(), CreateNetworkImpl< IParser >::Create(), armnn::Failure, Params::m_ComputeDevices, OptimizerOptions::m_Debug, IRuntime::CreationOptions::m_DynamicBackendsPath, Params::m_DynamicBackendsPath, Params::m_EnableFp16TurboMode, IRuntime::CreationOptions::m_EnableGpuProfiling, m_EnableProfiling, Params::m_InputBindings, Params::m_ModelPath, Params::m_OutputBindings, Params::m_PrintIntermediateLayers, OptimizerOptions::m_ReduceFp32ToFp16, Params::m_VisualizePostOptimizationModel, armnn::Optimize(), and options.

+
375  : m_EnableProfiling(enableProfiling)
376  , m_DynamicBackendsPath(dynamicBackendsPath)
377  {
378  if (runtime)
379  {
380  m_Runtime = runtime;
381  }
382  else
383  {
386  options.m_DynamicBackendsPath = m_DynamicBackendsPath;
387  m_Runtime = std::move(armnn::IRuntime::Create(options));
388  }
389 
390  std::string invalidBackends;
391  if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
392  {
393  throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
394  }
395 
396  armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
397 
399  {
400  ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
401 
403  options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
404  options.m_Debug = params.m_PrintIntermediateLayers;
405 
406  optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
407  if (!optNet)
408  {
409  throw armnn::Exception("Optimize returned nullptr");
410  }
411  }
412 
413  if (params.m_VisualizePostOptimizationModel)
414  {
415  boost::filesystem::path filename = params.m_ModelPath;
416  filename.replace_extension("dot");
417  std::fstream file(filename.c_str(), std::ios_base::out);
418  optNet->SerializeToDot(file);
419  }
420 
421  armnn::Status ret;
422  {
423  ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
424  ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
425  }
426 
427  if (ret == armnn::Status::Failure)
428  {
429  throw armnn::Exception("IRuntime::LoadNetwork failed");
430  }
431  }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
+
options m_EnableProfiling
+ + + +
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
+ +
#define ARMNN_SCOPED_HEAP_PROFILING(TAG)
+
Status
enumeration
Definition: Types.hpp:26
+
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
+ + +
std::string m_DynamicBackendsPath
Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive Only a...
Definition: IRuntime.hpp:58
+
bool m_EnableGpuProfiling
Setting this flag will allow the user to obtain GPU profiling information from the runtime...
Definition: IRuntime.hpp:54
+
static armnn::INetworkPtr Create(const Params &params, std::vector< armnn::BindingPointInfo > &inputBindings, std::vector< armnn::BindingPointInfo > &outputBindings)
+ +
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
+
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
+
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
+
+
+
+

Member Function Documentation

+ +

◆ AddCommandLineOptions()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
static void AddCommandLineOptions (boost::program_options::options_description & desc,
CommandLineOptionsoptions 
)
+
+inlinestatic
+
+ +

Definition at line 341 of file InferenceModel.hpp.

+ +

References armnn::BackendRegistryInstance(), BackendRegistry::GetBackendIdsAsString(), InferenceModel< IParser, TDataType >::CommandLineOptions::m_ComputeDevices, InferenceModel< IParser, TDataType >::CommandLineOptions::m_DynamicBackendsPath, InferenceModel< IParser, TDataType >::CommandLineOptions::m_EnableFp16TurboMode, InferenceModel< IParser, TDataType >::CommandLineOptions::m_Labels, InferenceModel< IParser, TDataType >::CommandLineOptions::m_ModelDir, and InferenceModel< IParser, TDataType >::CommandLineOptions::m_VisualizePostOptimizationModel.

+ +

Referenced by ClassifierTestCaseProvider< TDatabase, InferenceModel >::AddCommandLineOptions().

+
342  {
343  namespace po = boost::program_options;
344 
345  const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
346 
347  const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
349 
350  desc.add_options()
351  ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
352  "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
353  ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
354  default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
355  multitoken(), backendsMessage.c_str())
356  ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
357  "Path where to load any available dynamic backend from. "
358  "If left empty (the default), dynamic backends will not be used.")
359  ("labels,l", po::value<std::string>(&options.m_Labels),
360  "Text file containing one image filename - correct label pair per line, "
361  "used to test the accuracy of the network.")
362  ("visualize-optimized-model,v",
363  po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
364  "Produce a dot file useful for visualizing the graph post optimization."
365  "The file will have the same name as the model with the .dot extention.")
366  ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
367  "If this option is enabled FP32 layers, weights and biases will be converted "
368  "to FP16 where the backend supports it.");
369  }
BackendRegistry & BackendRegistryInstance()
+
std::string GetBackendIdsAsString() const
+
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
+
+
+
+ +

◆ CheckInputIndexIsValid()

+ +
+
+ + + + + +
+ + + + + + + + +
void CheckInputIndexIsValid (unsigned int inputIndex) const
+
+inline
+
+ +

Definition at line 433 of file InferenceModel.hpp.

+ +

References Params::m_InputBindings.

+
434  {
435  if (m_InputBindings.size() < inputIndex + 1)
436  {
437  throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
438  }
439  }
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
+
+
+
+ +

◆ CheckOutputIndexIsValid()

+ +
+
+ + + + + +
+ + + + + + + + +
void CheckOutputIndexIsValid (unsigned int outputIndex) const
+
+inline
+
+ +

Definition at line 441 of file InferenceModel.hpp.

+ +

References Params::m_OutputBindings.

+
442  {
443  if (m_OutputBindings.size() < outputIndex + 1)
444  {
445  throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
446  }
447  }
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
+
+
+
+ +

◆ GetAllQuantizationParams()

+ +
+
+ + + + + +
+ + + + + + + +
std::vector<QuantizationParams> GetAllQuantizationParams () const
+
+inline
+
+ +

Definition at line 550 of file InferenceModel.hpp.

+ +

References Params::m_DynamicBackendsPath, m_EnableProfiling, Params::m_InputBindings, Params::m_OutputBindings, armnnUtils::MakeInputTensors(), and armnnUtils::MakeOutputTensors().

+
551  {
552  std::vector<QuantizationParams> quantizationParams;
553  for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
554  {
555  quantizationParams.push_back(GetQuantizationParams(i));
556  }
557  return quantizationParams;
558  }
QuantizationParams GetQuantizationParams(unsigned int outputIndex=0u) const
+
+
+
+ +

◆ GetInputBindingInfo()

+ +
+
+ + + + + +
+ + + + + + + + +
const armnn::BindingPointInfo& GetInputBindingInfo (unsigned int inputIndex = 0u) const
+
+inline
+
+ +

Definition at line 514 of file InferenceModel.hpp.

+ +

References Params::m_InputBindings.

+ +

Referenced by main().

+
515  {
516  CheckInputIndexIsValid(inputIndex);
517  return m_InputBindings[inputIndex];
518  }
void CheckInputIndexIsValid(unsigned int inputIndex) const
+
+
+
+ +

◆ GetInputBindingInfos()

+ +
+
+ + + + + +
+ + + + + + + +
const std::vector<armnn::BindingPointInfo>& GetInputBindingInfos () const
+
+inline
+
+ +

Definition at line 520 of file InferenceModel.hpp.

+ +

References Params::m_InputBindings.

+
521  {
522  return m_InputBindings;
523  }
+
+
+ +

◆ GetInputQuantizationParams()

+ +
+
+ + + + + +
+ + + + + + + + +
QuantizationParams GetInputQuantizationParams (unsigned int inputIndex = 0u) const
+
+inline
+
+ +

Definition at line 543 of file InferenceModel.hpp.

+ +

References Params::m_InputBindings.

+ +

Referenced by MainImpl().

+
544  {
545  CheckInputIndexIsValid(inputIndex);
546  return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
547  m_InputBindings[inputIndex].second.GetQuantizationOffset());
548  }
void CheckInputIndexIsValid(unsigned int inputIndex) const
+
+
+
+ +

◆ GetInputSize()

+ +
+
+ + + + + +
+ + + + + + + + +
unsigned int GetInputSize (unsigned int inputIndex = 0u) const
+
+inline
+
+ +

Definition at line 449 of file InferenceModel.hpp.

+ +

References Params::m_InputBindings.

+ +

Referenced by MainImpl().

+
450  {
451  CheckInputIndexIsValid(inputIndex);
452  return m_InputBindings[inputIndex].second.GetNumElements();
453  }
void CheckInputIndexIsValid(unsigned int inputIndex) const
+
+
+
+ +

◆ GetOutputBindingInfo()

+ +
+
+ + + + + +
+ + + + + + + + +
const armnn::BindingPointInfo& GetOutputBindingInfo (unsigned int outputIndex = 0u) const
+
+inline
+
+ +

Definition at line 525 of file InferenceModel.hpp.

+ +

References Params::m_OutputBindings.

+
526  {
527  CheckOutputIndexIsValid(outputIndex);
528  return m_OutputBindings[outputIndex];
529  }
void CheckOutputIndexIsValid(unsigned int outputIndex) const
+
+
+
+ +

◆ GetOutputBindingInfos()

+ +
+
+ + + + + +
+ + + + + + + +
const std::vector<armnn::BindingPointInfo>& GetOutputBindingInfos () const
+
+inline
+
+ +

Definition at line 531 of file InferenceModel.hpp.

+ +

References Params::m_OutputBindings.

+ +

Referenced by MainImpl().

+
532  {
533  return m_OutputBindings;
534  }
+
+
+ +

◆ GetOutputSize()

+ +
+
+ + + + + +
+ + + + + + + + +
unsigned int GetOutputSize (unsigned int outputIndex = 0u) const
+
+inline
+
+ +

Definition at line 455 of file InferenceModel.hpp.

+ +

References Params::m_OutputBindings.

+ +

Referenced by main(), and MainImpl().

+
456  {
457  CheckOutputIndexIsValid(outputIndex);
458  return m_OutputBindings[outputIndex].second.GetNumElements();
459  }
void CheckOutputIndexIsValid(unsigned int outputIndex) const
+
+
+
+ +

◆ GetQuantizationParams()

+ +
+
+ + + + + +
+ + + + + + + + +
QuantizationParams GetQuantizationParams (unsigned int outputIndex = 0u) const
+
+inline
+
+ +

Definition at line 536 of file InferenceModel.hpp.

+ +

References Params::m_OutputBindings.

+
537  {
538  CheckOutputIndexIsValid(outputIndex);
539  return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
540  m_OutputBindings[outputIndex].second.GetQuantizationOffset());
541  }
void CheckOutputIndexIsValid(unsigned int outputIndex) const
+
+
+
+ +

◆ Run()

+ +
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + +
std::chrono::duration<double, std::milli> Run (const std::vector< TContainer > & inputContainers,
std::vector< TContainer > & outputContainers 
)
+
+inline
+
+ +

Definition at line 461 of file InferenceModel.hpp.

+ +

References armnn::Failure, m_EnableProfiling, armnnUtils::MakeInputTensors(), armnnUtils::MakeOutputTensors(), and armnn::numeric_cast().

+ +

Referenced by MainImpl().

+
464  {
465  for (unsigned int i = 0; i < outputContainers.size(); ++i)
466  {
467  const unsigned int expectedOutputDataSize = GetOutputSize(i);
468 
469  boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
470  {
471  const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
472  if (actualOutputDataSize < expectedOutputDataSize)
473  {
474  unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
475  throw armnn::Exception(
476  boost::str(boost::format("Not enough data for output #%1%: expected "
477  "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
478  }
479  },
480  outputContainers[i]);
481  }
482 
483  std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
484  if (profiler)
485  {
486  profiler->EnableProfiling(m_EnableProfiling);
487  }
488 
489  // Start timer to record inference time in EnqueueWorkload (in milliseconds)
490  const auto start_time = GetCurrentTime();
491 
492  armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
493  MakeInputTensors(inputContainers),
494  MakeOutputTensors(outputContainers));
495 
496  const auto end_time = GetCurrentTime();
497 
498  // if profiling is enabled print out the results
499  if (profiler && profiler->IsProfilingEnabled())
500  {
501  profiler->Print(std::cout);
502  }
503 
504  if (ret == armnn::Status::Failure)
505  {
506  throw armnn::Exception("IRuntime::EnqueueWorkload failed");
507  }
508  else
509  {
510  return std::chrono::duration<double, std::milli>(end_time - start_time);
511  }
512  }
options m_EnableProfiling
+
unsigned int GetOutputSize(unsigned int outputIndex=0u) const
+
armnn::InputTensors MakeInputTensors(const std::vector< armnn::BindingPointInfo > &inputBindings, const std::vector< TContainer > &inputDataContainers)
+
Status
enumeration
Definition: Types.hpp:26
+
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
+
armnn::OutputTensors MakeOutputTensors(const std::vector< armnn::BindingPointInfo > &outputBindings, std::vector< TContainer > &outputDataContainers)
+ +
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
+
+
+
+
The documentation for this class was generated from the following file: +
+
+ + + + -- cgit v1.2.1