ArmNN
 22.02
ExecuteNetworkParams Struct Reference

Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter. More...

#include <ExecuteNetworkParams.hpp>

Public Types

enum  TfLiteExecutor { ArmNNTfLiteParser, ArmNNTfLiteDelegate, TfliteInterpreter }
 
using TensorShapePtr = std::unique_ptr< armnn::TensorShape >
 

Public Member Functions

void ValidateParams ()
 

Public Attributes

std::string m_CachedNetworkFilePath
 
std::vector< armnn::BackendIdm_ComputeDevices
 
bool m_Concurrent
 
bool m_DequantizeOutput
 
std::string m_DynamicBackendsPath
 
bool m_EnableBf16TurboMode
 
bool m_EnableFastMath = false
 
bool m_EnableFp16TurboMode
 
bool m_EnableLayerDetails = false
 
bool m_EnableProfiling
 
bool m_GenerateTensorData
 
bool m_InferOutputShape = false
 
bool m_EnableDelegate = false
 
std::vector< std::string > m_InputNames
 
std::vector< std::string > m_InputTensorDataFilePaths
 
std::vector< TensorShapePtrm_InputTensorShapes
 
std::vector< std::string > m_InputTypes
 
bool m_IsModelBinary
 
size_t m_Iterations
 
std::string m_ModelFormat
 
std::string m_ModelPath
 
unsigned int m_NumberOfThreads
 
bool m_OutputDetailsToStdOut
 
bool m_OutputDetailsOnlyToStdOut
 
std::vector< std::string > m_OutputNames
 
std::vector< std::string > m_OutputTensorFiles
 
std::vector< std::string > m_OutputTypes
 
bool m_ParseUnsupported = false
 
bool m_PrintIntermediate
 
bool m_DontPrintOutputs
 
bool m_QuantizeInput
 
bool m_SaveCachedNetwork
 
size_t m_SimultaneousIterations
 
size_t m_SubgraphId
 
double m_ThresholdTime
 
int m_TuningLevel
 
std::string m_TuningPath
 
std::string m_MLGOTuningFilePath
 
TfLiteExecutor m_TfLiteExecutor
 
size_t m_ThreadPoolSize
 

Detailed Description

Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter.

Definition at line 17 of file ExecuteNetworkParams.hpp.

Member Typedef Documentation

◆ TensorShapePtr

using TensorShapePtr = std::unique_ptr<armnn::TensorShape>

Definition at line 19 of file ExecuteNetworkParams.hpp.

Member Enumeration Documentation

◆ TfLiteExecutor

enum TfLiteExecutor
strong
Enumerator
ArmNNTfLiteParser 
ArmNNTfLiteDelegate 
TfliteInterpreter 

Definition at line 21 of file ExecuteNetworkParams.hpp.

22  {
23  ArmNNTfLiteParser,
24  ArmNNTfLiteDelegate,
25  TfliteInterpreter
26  };

Member Function Documentation

◆ ValidateParams()

void ValidateParams ( )

Definition at line 111 of file ExecuteNetworkParams.cpp.

References ARMNN_LOG, CheckClTuningParameter(), CheckModelFormat(), armnn::DetailsWithEvents, armnn::InferAndValidate, IsModelBinary(), m_CachedNetworkFilePath, m_ComputeDevices, OptimizerOptions::m_Debug, m_DynamicBackendsPath, m_EnableBf16TurboMode, m_EnableFastMath, m_EnableFp16TurboMode, m_EnableLayerDetails, m_EnableProfiling, m_GenerateTensorData, m_InferOutputShape, m_InputNames, m_InputTensorDataFilePaths, m_InputTensorShapes, m_InputTypes, m_IsModelBinary, m_Iterations, m_MLGOTuningFilePath, m_ModelFormat, OptimizerOptions::m_ModelOptions, m_ModelPath, m_NumberOfThreads, m_OutputNames, m_OutputTensorFiles, m_OutputTypes, m_PrintIntermediate, OptimizerOptions::m_ProfilingEnabled, OptimizerOptions::m_ReduceFp32ToBf16, OptimizerOptions::m_ReduceFp32ToFp16, m_SaveCachedNetwork, OptimizerOptions::m_shapeInferenceMethod, m_ThresholdTime, m_TuningLevel, m_TuningPath, DelegateOptions::SetDynamicBackendsPath(), DelegateOptions::SetGpuProfilingState(), DelegateOptions::SetInternalProfilingParams(), DelegateOptions::SetOptimizerOptions(), DelegateOptions::SetSerializeToDot(), armnn::ValidateOnly, and ValidatePaths().

Referenced by ProgramOptions::ValidateExecuteNetworkParams().

112 {
113  if (m_DynamicBackendsPath == "")
114  {
115  // Check compute devices are valid unless they are dynamically loaded at runtime
116  std::string invalidBackends;
117  if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
118  {
119  ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
120  << invalidBackends;
121  }
122  }
123 
125 
127  {
128  throw armnn::InvalidArgumentException("BFloat16 and Float16 turbo mode cannot be "
129  "enabled at the same time.");
130  }
131 
133 
135 
136  // Check input tensor shapes
137  if ((m_InputTensorShapes.size() != 0) &&
138  (m_InputTensorShapes.size() != m_InputNames.size()))
139  {
140  throw armnn::InvalidArgumentException("input-name and input-tensor-shape must have "
141  "the same amount of elements. ");
142  }
143 
144  if (m_InputTensorDataFilePaths.size() != 0)
145  {
147  {
148  throw armnn::InvalidArgumentException("One or more input data file paths are not valid.");
149  }
150 
151  if (m_InputTensorDataFilePaths.size() < m_InputNames.size())
152  {
154  fmt::format("According to the number of input names the user provided the network has {} "
155  "inputs. But only {} input-tensor-data file paths were provided. Each input of the "
156  "model is expected to be stored in it's own file.",
157  m_InputNames.size(),
159  }
160  else if (m_InputTensorDataFilePaths.size() % m_InputNames.size() != 0)
161  {
163  fmt::format("According to the number of input names the user provided the network has {} "
164  "inputs. The user specified {} input-tensor-data file paths which is not "
165  "divisible by the number of inputs.",
166  m_InputNames.size(),
168  }
169  }
170 
171  if (m_InputTypes.size() == 0)
172  {
173  //Defaults the value of all inputs to "float"
174  m_InputTypes.assign(m_InputNames.size(), "float");
175  }
176  else if ((m_InputTypes.size() != 0) &&
177  (m_InputTypes.size() != m_InputNames.size()))
178  {
179  throw armnn::InvalidArgumentException("input-name and input-type must have the same amount of elements.");
180  }
181 
182  // Make sure that the number of input files given is divisible by the number of inputs of the model
183  if (!(m_InputTensorDataFilePaths.size() % m_InputNames.size() == 0))
184  {
186  fmt::format("The number of input-tensor-data files ({0}) is not divisible by the "
187  "number of inputs ({1} according to the number of input names).",
189  m_InputNames.size()));
190  }
191 
192  if (m_OutputTypes.size() == 0)
193  {
194  //Defaults the value of all outputs to "float"
195  m_OutputTypes.assign(m_OutputNames.size(), "float");
196  }
197  else if ((m_OutputTypes.size() != 0) &&
198  (m_OutputTypes.size() != m_OutputNames.size()))
199  {
200  throw armnn::InvalidArgumentException("output-name and output-type must have the same amount of elements.");
201  }
202 
203  // Make sure that the number of output files given is equal to the number of outputs of the model
204  // or equal to the number of outputs of the model multiplied with the number of iterations
205  if (!m_OutputTensorFiles.empty())
206  {
207  if ((m_OutputTensorFiles.size() != m_OutputNames.size()) &&
208  (m_OutputTensorFiles.size() != m_OutputNames.size() * m_Iterations))
209  {
210  std::stringstream errmsg;
211  auto numOutputs = m_OutputNames.size();
213  fmt::format("The user provided {0} output-tensor files. The only allowed number of output-tensor "
214  "files is the number of outputs of the network ({1} according to the number of "
215  "output names) or the number of outputs multiplied with the number of times the "
216  "network should be executed (NumOutputs * NumIterations = {1} * {2} = {3}).",
217  m_OutputTensorFiles.size(),
218  numOutputs,
219  m_Iterations,
220  numOutputs*m_Iterations));
221  }
222  }
223 
224  // Check that threshold time is not less than zero
225  if (m_ThresholdTime < 0)
226  {
227  throw armnn::InvalidArgumentException("Threshold time supplied as a command line argument is less than zero.");
228  }
229 
230  // Warn if ExecuteNetwork will generate dummy input data
232  {
233  ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
234  }
235 }
std::vector< std::string > m_InputTypes
std::vector< TensorShapePtr > m_InputTensorShapes
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
std::vector< std::string > m_OutputNames
std::vector< std::string > m_OutputTensorFiles
void CheckClTuningParameter(const int &tuningLevel, const std::string &tuningPath, const std::vector< armnn::BackendId > computeDevices)
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_OutputTypes
std::vector< std::string > m_InputNames
void CheckModelFormat(const std::string &modelFormat)
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
std::vector< std::string > m_InputTensorDataFilePaths
bool IsModelBinary(const std::string &modelFormat)

Member Data Documentation

◆ m_CachedNetworkFilePath

std::string m_CachedNetworkFilePath

◆ m_ComputeDevices

std::vector<armnn::BackendId> m_ComputeDevices

Definition at line 29 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_Concurrent

bool m_Concurrent

◆ m_DequantizeOutput

bool m_DequantizeOutput

Definition at line 31 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_DontPrintOutputs

bool m_DontPrintOutputs

◆ m_DynamicBackendsPath

std::string m_DynamicBackendsPath

Definition at line 32 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_EnableBf16TurboMode

bool m_EnableBf16TurboMode

◆ m_EnableDelegate

bool m_EnableDelegate = false

◆ m_EnableFastMath

bool m_EnableFastMath = false

◆ m_EnableFp16TurboMode

bool m_EnableFp16TurboMode

◆ m_EnableLayerDetails

bool m_EnableLayerDetails = false

◆ m_EnableProfiling

bool m_EnableProfiling

◆ m_GenerateTensorData

bool m_GenerateTensorData

◆ m_InferOutputShape

bool m_InferOutputShape = false

◆ m_InputNames

std::vector<std::string> m_InputNames

◆ m_InputTensorDataFilePaths

std::vector<std::string> m_InputTensorDataFilePaths

◆ m_InputTensorShapes

std::vector<TensorShapePtr> m_InputTensorShapes

◆ m_InputTypes

std::vector<std::string> m_InputTypes

◆ m_IsModelBinary

bool m_IsModelBinary

Definition at line 45 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ValidateParams().

◆ m_Iterations

◆ m_MLGOTuningFilePath

std::string m_MLGOTuningFilePath

◆ m_ModelFormat

std::string m_ModelFormat

Definition at line 47 of file ExecuteNetworkParams.hpp.

Referenced by main(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_ModelPath

std::string m_ModelPath

◆ m_NumberOfThreads

unsigned int m_NumberOfThreads

◆ m_OutputDetailsOnlyToStdOut

bool m_OutputDetailsOnlyToStdOut

Definition at line 51 of file ExecuteNetworkParams.hpp.

Referenced by main(), MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_OutputDetailsToStdOut

bool m_OutputDetailsToStdOut

Definition at line 50 of file ExecuteNetworkParams.hpp.

Referenced by main(), MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_OutputNames

std::vector<std::string> m_OutputNames

◆ m_OutputTensorFiles

std::vector<std::string> m_OutputTensorFiles

◆ m_OutputTypes

std::vector<std::string> m_OutputTypes

◆ m_ParseUnsupported

bool m_ParseUnsupported = false

Definition at line 55 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_PrintIntermediate

bool m_PrintIntermediate

◆ m_QuantizeInput

bool m_QuantizeInput

Definition at line 58 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_SaveCachedNetwork

bool m_SaveCachedNetwork

◆ m_SimultaneousIterations

size_t m_SimultaneousIterations

◆ m_SubgraphId

size_t m_SubgraphId

Definition at line 61 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_TfLiteExecutor

TfLiteExecutor m_TfLiteExecutor

◆ m_ThreadPoolSize

size_t m_ThreadPoolSize

◆ m_ThresholdTime

double m_ThresholdTime

◆ m_TuningLevel

int m_TuningLevel

◆ m_TuningPath

std::string m_TuningPath

The documentation for this struct was generated from the following files: