ArmNN
 21.05
ExecuteNetworkParams Struct Reference

Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter. More...

#include <ExecuteNetworkParams.hpp>

Public Types

enum  TfLiteExecutor { ArmNNTfLiteParser, ArmNNTfLiteDelegate, TfliteInterpreter }
 
using TensorShapePtr = std::unique_ptr< armnn::TensorShape >
 

Public Member Functions

void ValidateParams ()
 

Public Attributes

std::string m_CachedNetworkFilePath
 
std::vector< armnn::BackendIdm_ComputeDevices
 
bool m_Concurrent
 
bool m_DequantizeOutput
 
std::string m_DynamicBackendsPath
 
bool m_EnableBf16TurboMode
 
bool m_EnableFastMath = false
 
bool m_EnableFp16TurboMode
 
bool m_EnableLayerDetails = false
 
bool m_EnableProfiling
 
bool m_GenerateTensorData
 
bool m_InferOutputShape = false
 
bool m_EnableDelegate = false
 
std::vector< std::string > m_InputNames
 
std::vector< std::string > m_InputTensorDataFilePaths
 
std::vector< TensorShapePtrm_InputTensorShapes
 
std::vector< std::string > m_InputTypes
 
bool m_IsModelBinary
 
size_t m_Iterations
 
std::string m_ModelFormat
 
std::string m_ModelPath
 
unsigned int m_NumberOfThreads
 
std::vector< std::string > m_OutputNames
 
std::vector< std::string > m_OutputTensorFiles
 
std::vector< std::string > m_OutputTypes
 
bool m_ParseUnsupported = false
 
bool m_PrintIntermediate
 
bool m_QuantizeInput
 
bool m_SaveCachedNetwork
 
size_t m_SimultaneousIterations
 
size_t m_SubgraphId
 
double m_ThresholdTime
 
int m_TuningLevel
 
std::string m_TuningPath
 
std::string m_MLGOTuningFilePath
 
TfLiteExecutor m_TfLiteExecutor
 

Detailed Description

Holds all parameters necessary to execute a network Check ExecuteNetworkProgramOptions.cpp for a description of each parameter.

Definition at line 13 of file ExecuteNetworkParams.hpp.

Member Typedef Documentation

◆ TensorShapePtr

using TensorShapePtr = std::unique_ptr<armnn::TensorShape>

Definition at line 15 of file ExecuteNetworkParams.hpp.

Member Enumeration Documentation

◆ TfLiteExecutor

enum TfLiteExecutor
strong
Enumerator
ArmNNTfLiteParser 
ArmNNTfLiteDelegate 
TfliteInterpreter 

Definition at line 17 of file ExecuteNetworkParams.hpp.

18  {
19  ArmNNTfLiteParser,
20  ArmNNTfLiteDelegate,
21  TfliteInterpreter
22  };

Member Function Documentation

◆ ValidateParams()

void ValidateParams ( )

Definition at line 111 of file ExecuteNetworkParams.cpp.

References ARMNN_LOG, CheckClTuningParameter(), CheckModelFormat(), IsModelBinary(), m_ComputeDevices, m_Concurrent, m_DynamicBackendsPath, m_EnableBf16TurboMode, m_EnableFp16TurboMode, m_GenerateTensorData, m_InputNames, m_InputTensorDataFilePaths, m_InputTensorShapes, m_InputTypes, m_IsModelBinary, m_ModelFormat, m_OutputNames, m_OutputTensorFiles, m_OutputTypes, m_SimultaneousIterations, m_ThresholdTime, m_TuningLevel, m_TuningPath, and ValidatePaths().

Referenced by ProgramOptions::ValidateExecuteNetworkParams().

112 {
113  // Set to true if it is preferred to throw an exception rather than use ARMNN_LOG
114  bool throwExc = false;
115 
116  try
117  {
118  if (m_DynamicBackendsPath == "")
119  {
120  // Check compute devices are valid unless they are dynamically loaded at runtime
121  std::string invalidBackends;
122  if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
123  {
124  ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
125  << invalidBackends;
126  }
127  }
128 
130 
132  {
133  ARMNN_LOG(fatal) << "BFloat16 and Float16 turbo mode cannot be enabled at the same time.";
134  }
135 
137 
139 
140  // Check number of simultaneous iterations
141  if ((m_SimultaneousIterations < 1))
142  {
143  ARMNN_LOG(fatal) << "simultaneous-iterations cannot be less than 1. ";
144  }
145 
146  // Check input tensor shapes
147  if ((m_InputTensorShapes.size() != 0) &&
148  (m_InputTensorShapes.size() != m_InputNames.size()))
149  {
150  ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements. ";
151  }
152 
153  if (m_InputTensorDataFilePaths.size() != 0)
154  {
156  {
157  ARMNN_LOG(fatal) << "One or more input data file paths are not valid. ";
158  }
159 
160  if (!m_Concurrent && m_InputTensorDataFilePaths.size() != m_InputNames.size())
161  {
162  ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements. ";
163  }
164 
166  {
167  ARMNN_LOG(fatal) << "There is not enough input data for " << m_SimultaneousIterations << " execution.";
168  }
170  {
171  ARMNN_LOG(fatal) << "There is more input data for " << m_SimultaneousIterations << " execution.";
172  }
173  }
174 
175  if ((m_OutputTensorFiles.size() != 0) &&
176  (m_OutputTensorFiles.size() != m_OutputNames.size()))
177  {
178  ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements. ";
179  }
180 
181  if ((m_OutputTensorFiles.size() != 0)
183  {
184  ARMNN_LOG(fatal) << "There is not enough output data for " << m_SimultaneousIterations << " execution.";
185  }
186 
187  if (m_InputTypes.size() == 0)
188  {
189  //Defaults the value of all inputs to "float"
190  m_InputTypes.assign(m_InputNames.size(), "float");
191  }
192  else if ((m_InputTypes.size() != 0) &&
193  (m_InputTypes.size() != m_InputNames.size()))
194  {
195  ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
196  }
197 
198  if (m_OutputTypes.size() == 0)
199  {
200  //Defaults the value of all outputs to "float"
201  m_OutputTypes.assign(m_OutputNames.size(), "float");
202  }
203  else if ((m_OutputTypes.size() != 0) &&
204  (m_OutputTypes.size() != m_OutputNames.size()))
205  {
206  ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
207  }
208 
209  // Check that threshold time is not less than zero
210  if (m_ThresholdTime < 0)
211  {
212  ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
213  }
214  }
215  catch (std::string& exc)
216  {
217  if (throwExc)
218  {
220  }
221  else
222  {
223  std::cout << exc;
224  exit(EXIT_FAILURE);
225  }
226  }
227  // Check turbo modes
228 
229  // Warn if ExecuteNetwork will generate dummy input data
231  {
232  ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
233  }
234 }
std::vector< std::string > m_InputTypes
std::vector< TensorShapePtr > m_InputTensorShapes
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
std::vector< std::string > m_OutputNames
std::vector< std::string > m_OutputTensorFiles
void CheckClTuningParameter(const int &tuningLevel, const std::string &tuningPath, const std::vector< armnn::BackendId > computeDevices)
std::vector< armnn::BackendId > m_ComputeDevices
std::vector< std::string > m_OutputTypes
std::vector< std::string > m_InputNames
void CheckModelFormat(const std::string &modelFormat)
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
std::vector< std::string > m_InputTensorDataFilePaths
bool IsModelBinary(const std::string &modelFormat)

Member Data Documentation

◆ m_CachedNetworkFilePath

std::string m_CachedNetworkFilePath

Definition at line 24 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_ComputeDevices

std::vector<armnn::BackendId> m_ComputeDevices

Definition at line 25 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_Concurrent

bool m_Concurrent

◆ m_DequantizeOutput

bool m_DequantizeOutput

Definition at line 27 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_DynamicBackendsPath

std::string m_DynamicBackendsPath

Definition at line 28 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_EnableBf16TurboMode

bool m_EnableBf16TurboMode

◆ m_EnableDelegate

bool m_EnableDelegate = false

◆ m_EnableFastMath

bool m_EnableFastMath = false

Definition at line 30 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_EnableFp16TurboMode

bool m_EnableFp16TurboMode

◆ m_EnableLayerDetails

bool m_EnableLayerDetails = false

Definition at line 32 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_EnableProfiling

bool m_EnableProfiling

◆ m_GenerateTensorData

bool m_GenerateTensorData

Definition at line 34 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_InferOutputShape

bool m_InferOutputShape = false

Definition at line 35 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_InputNames

std::vector<std::string> m_InputNames

Definition at line 37 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_InputTensorDataFilePaths

std::vector<std::string> m_InputTensorDataFilePaths

Definition at line 38 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_InputTensorShapes

std::vector<TensorShapePtr> m_InputTensorShapes

Definition at line 39 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_InputTypes

std::vector<std::string> m_InputTypes

Definition at line 40 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_IsModelBinary

bool m_IsModelBinary

Definition at line 41 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ValidateParams().

◆ m_Iterations

size_t m_Iterations

Definition at line 42 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_MLGOTuningFilePath

std::string m_MLGOTuningFilePath

◆ m_ModelFormat

std::string m_ModelFormat

Definition at line 43 of file ExecuteNetworkParams.hpp.

Referenced by main(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_ModelPath

std::string m_ModelPath

Definition at line 44 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_NumberOfThreads

unsigned int m_NumberOfThreads

Definition at line 45 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_OutputNames

std::vector<std::string> m_OutputNames

Definition at line 46 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_OutputTensorFiles

std::vector<std::string> m_OutputTensorFiles

Definition at line 47 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_OutputTypes

std::vector<std::string> m_OutputTypes

Definition at line 48 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), ProgramOptions::ParseOptions(), and ValidateParams().

◆ m_ParseUnsupported

bool m_ParseUnsupported = false

Definition at line 49 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_PrintIntermediate

bool m_PrintIntermediate

Definition at line 50 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_QuantizeInput

bool m_QuantizeInput

Definition at line 51 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_SaveCachedNetwork

bool m_SaveCachedNetwork

Definition at line 52 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_SimultaneousIterations

size_t m_SimultaneousIterations

◆ m_SubgraphId

size_t m_SubgraphId

Definition at line 54 of file ExecuteNetworkParams.hpp.

Referenced by MainImpl(), and ProgramOptions::ProgramOptions().

◆ m_TfLiteExecutor

TfLiteExecutor m_TfLiteExecutor

Definition at line 59 of file ExecuteNetworkParams.hpp.

Referenced by main(), and ProgramOptions::ParseOptions().

◆ m_ThresholdTime

double m_ThresholdTime

◆ m_TuningLevel

int m_TuningLevel

◆ m_TuningPath

std::string m_TuningPath

The documentation for this struct was generated from the following files: