ArmNN
 22.08
ExecuteNetworkParams Struct Reference

Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter. More...

#include <ExecuteNetworkParams.hpp>

Public Types

enum  TfLiteExecutor { ArmNNTfLiteParser, ArmNNTfLiteDelegate, TfliteInterpreter }
 

Public Member Functions

void ValidateParams ()
 

Public Attributes

bool m_AllowExpandedDims
 
std::string m_CachedNetworkFilePath
 
std::vector< armnn::BackendIdm_ComputeDevices
 
bool m_Concurrent
 
bool m_DequantizeOutput
 
std::string m_DynamicBackendsPath
 
bool m_EnableBf16TurboMode
 
bool m_EnableFastMath = false
 
bool m_EnableFp16TurboMode
 
bool m_EnableLayerDetails = false
 
bool m_EnableProfiling
 
bool m_GenerateTensorData
 
bool m_InferOutputShape = false
 
bool m_EnableDelegate = false
 
bool m_IsModelBinary
 
std::vector< std::string > m_InputNames
 
std::vector< std::string > m_InputTensorDataFilePaths
 
std::vector< armnn::TensorShapem_InputTensorShapes
 
size_t m_Iterations
 
std::string m_ModelPath
 
unsigned int m_NumberOfThreads
 
bool m_OutputDetailsToStdOut
 
bool m_OutputDetailsOnlyToStdOut
 
std::vector< std::string > m_OutputNames
 
std::vector< std::string > m_OutputTensorFiles
 
bool m_ParseUnsupported = false
 
bool m_PrintIntermediate
 
bool m_DontPrintOutputs
 
bool m_QuantizeInput
 
bool m_SaveCachedNetwork
 
size_t m_SubgraphId
 
double m_ThresholdTime
 
int m_TuningLevel
 
std::string m_TuningPath
 
std::string m_MLGOTuningFilePath
 
TfLiteExecutor m_TfLiteExecutor
 
size_t m_ThreadPoolSize
 
bool m_ImportInputsIfAligned
 
bool m_ReuseBuffers
 
std::string m_ComparisonFile
 
std::vector< armnn::BackendIdm_ComparisonComputeDevices
 
bool m_CompareWithTflite
 

Detailed Description

Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter.

Definition at line 17 of file ExecuteNetworkParams.hpp.

Member Enumeration Documentation

◆ TfLiteExecutor

enum TfLiteExecutor
strong
Enumerator
ArmNNTfLiteParser 
ArmNNTfLiteDelegate 
TfliteInterpreter 

Definition at line 19 of file ExecuteNetworkParams.hpp.

20  {
21  ArmNNTfLiteParser,
22  ArmNNTfLiteDelegate,
23  TfliteInterpreter
24  };

Member Function Documentation

◆ ValidateParams()

void ValidateParams ( )

Definition at line 49 of file ExecuteNetworkParams.cpp.

References ARMNN_LOG, CheckClTuningParameter(), CheckRequestedBackendsAreValid(), armnn::DetailsWithEvents, armnn::InferAndValidate, m_AllowExpandedDims, m_CachedNetworkFilePath, m_ComputeDevices, OptimizerOptions::m_Debug, m_DynamicBackendsPath, m_EnableBf16TurboMode, m_EnableFastMath, m_EnableFp16TurboMode, m_EnableProfiling, m_GenerateTensorData, m_InferOutputShape, m_InputNames, m_InputTensorDataFilePaths, m_InputTensorShapes, m_MLGOTuningFilePath, OptimizerOptions::m_ModelOptions, m_NumberOfThreads, m_PrintIntermediate, OptimizerOptions::m_ProfilingEnabled, OptimizerOptions::m_ReduceFp32ToBf16, OptimizerOptions::m_ReduceFp32ToFp16, m_SaveCachedNetwork, OptimizerOptions::m_shapeInferenceMethod, m_ThresholdTime, m_TuningLevel, m_TuningPath, DelegateOptions::SetDynamicBackendsPath(), DelegateOptions::SetGpuProfilingState(), DelegateOptions::SetInternalProfilingParams(), DelegateOptions::SetOptimizerOptions(), armnn::ValidateOnly, and ValidatePaths().

Referenced by ProgramOptions::ValidateExecuteNetworkParams().

50 {
51  if (m_DynamicBackendsPath == "")
52  {
53  // Check compute devices are valid unless they are dynamically loaded at runtime
54  std::string invalidBackends;
56  {
57  ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
58  << invalidBackends;
59  }
60  }
62 
64  {
65  throw armnn::InvalidArgumentException("BFloat16 and Float16 turbo mode cannot be "
66  "enabled at the same time.");
67  }
68 
69  // Check input tensor shapes
70  if ((m_InputTensorShapes.size() != 0) &&
71  (m_InputTensorShapes.size() != m_InputNames.size()))
72  {
73  throw armnn::InvalidArgumentException("input-name and input-tensor-shape must have "
74  "the same amount of elements. ");
75  }
76 
77  if (m_InputTensorDataFilePaths.size() != 0)
78  {
80  {
81  throw armnn::InvalidArgumentException("One or more input data file paths are not valid.");
82  }
83 
84  if (m_InputTensorDataFilePaths.size() < m_InputNames.size())
85  {
87  fmt::format("According to the number of input names the user provided the network has {} "
88  "inputs. But only {} input-tensor-data file paths were provided. Each input of the "
89  "model is expected to be stored in it's own file.",
90  m_InputNames.size(),
92  }
93  }
94 
95  // Check that threshold time is not less than zero
96  if (m_ThresholdTime < 0)
97  {
98  throw armnn::InvalidArgumentException("Threshold time supplied as a command line argument is less than zero.");
99  }
100 
101  // Warn if ExecuteNetwork will generate dummy input data
103  {
104  ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
105  }
106 
108  {
109  throw armnn::InvalidArgumentException("infer-output-shape and allow-expanded-dims cannot be used together.");
110  }
111 }
std::vector< armnn::TensorShape > m_InputTensorShapes
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
void CheckClTuningParameter(const int &tuningLevel, const std::string &tuningPath, const std::vector< armnn::BackendId > computeDevices)
std::vector< armnn::BackendId > m_ComputeDevices
bool CheckRequestedBackendsAreValid(const std::vector< armnn::BackendId > &backendIds, armnn::Optional< std::string &> invalidBackendIds=armnn::EmptyOptional())
std::vector< std::string > m_InputNames
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
std::vector< std::string > m_InputTensorDataFilePaths

Member Data Documentation

◆ m_AllowExpandedDims

bool m_AllowExpandedDims

Definition at line 26 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions(), and ValidateParams().

◆ m_CachedNetworkFilePath

std::string m_CachedNetworkFilePath

Definition at line 27 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions(), and ValidateParams().

◆ m_CompareWithTflite

bool m_CompareWithTflite

Definition at line 67 of file ExecuteNetworkParams.hpp.

Referenced by main(), and ProgramOptions::ProgramOptions().

◆ m_ComparisonComputeDevices

std::vector<armnn::BackendId> m_ComparisonComputeDevices

Definition at line 66 of file ExecuteNetworkParams.hpp.

Referenced by main(), and ProgramOptions::ParseOptions().

◆ m_ComparisonFile

std::string m_ComparisonFile

Definition at line 65 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions().

◆ m_ComputeDevices

std::vector<armnn::BackendId> m_ComputeDevices

Definition at line 28 of file ExecuteNetworkParams.hpp.

Referenced by main(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_Concurrent

◆ m_DequantizeOutput

bool m_DequantizeOutput

Definition at line 30 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions().

◆ m_DontPrintOutputs

bool m_DontPrintOutputs

◆ m_DynamicBackendsPath

std::string m_DynamicBackendsPath

◆ m_EnableBf16TurboMode

bool m_EnableBf16TurboMode

Definition at line 32 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions(), and ValidateParams().

◆ m_EnableDelegate

bool m_EnableDelegate = false

Definition at line 39 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ParseOptions().

◆ m_EnableFastMath

bool m_EnableFastMath = false

Definition at line 33 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions(), and ValidateParams().

◆ m_EnableFp16TurboMode

bool m_EnableFp16TurboMode

Definition at line 34 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions(), and ValidateParams().

◆ m_EnableLayerDetails

bool m_EnableLayerDetails = false

Definition at line 35 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions().

◆ m_EnableProfiling

◆ m_GenerateTensorData

◆ m_ImportInputsIfAligned

bool m_ImportInputsIfAligned

◆ m_InferOutputShape

bool m_InferOutputShape = false

◆ m_InputNames

◆ m_InputTensorDataFilePaths

std::vector<std::string> m_InputTensorDataFilePaths

◆ m_InputTensorShapes

std::vector<armnn::TensorShape> m_InputTensorShapes

◆ m_IsModelBinary

bool m_IsModelBinary

Definition at line 40 of file ExecuteNetworkParams.hpp.

Referenced by ArmNNExecutor::CompareAndPrintResult().

◆ m_Iterations

◆ m_MLGOTuningFilePath

std::string m_MLGOTuningFilePath

◆ m_ModelPath

◆ m_NumberOfThreads

unsigned int m_NumberOfThreads

Definition at line 46 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions(), and ValidateParams().

◆ m_OutputDetailsOnlyToStdOut

bool m_OutputDetailsOnlyToStdOut

◆ m_OutputDetailsToStdOut

bool m_OutputDetailsToStdOut

◆ m_OutputNames

std::vector<std::string> m_OutputNames

Definition at line 49 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ParseOptions().

◆ m_OutputTensorFiles

std::vector<std::string> m_OutputTensorFiles

◆ m_ParseUnsupported

bool m_ParseUnsupported = false

◆ m_PrintIntermediate

bool m_PrintIntermediate

Definition at line 52 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions(), and ValidateParams().

◆ m_QuantizeInput

bool m_QuantizeInput

Definition at line 54 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions().

◆ m_ReuseBuffers

◆ m_SaveCachedNetwork

bool m_SaveCachedNetwork

Definition at line 55 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions(), and ValidateParams().

◆ m_SubgraphId

size_t m_SubgraphId

Definition at line 56 of file ExecuteNetworkParams.hpp.

Referenced by ProgramOptions::ProgramOptions().

◆ m_TfLiteExecutor

◆ m_ThreadPoolSize

◆ m_ThresholdTime

◆ m_TuningLevel

int m_TuningLevel

◆ m_TuningPath

std::string m_TuningPath

The documentation for this struct was generated from the following files: