ArmNN
 23.02
OptimizerOptions Struct Reference

ArmNN performs an optimization on each model/network before it gets loaded for execution. More...

#include <INetwork.hpp>

Public Member Functions

 OptimizerOptions ()
 
 OptimizerOptions (bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled, ModelOptions modelOptions={}, bool exportEnabled=false, bool debugToFile=false)
 
 OptimizerOptions (bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16=false, ShapeInferenceMethod shapeInferenceMethod=armnn::ShapeInferenceMethod::ValidateOnly, bool importEnabled=false, ModelOptions modelOptions={}, bool exportEnabled=false, bool debugToFile=false, bool allowExpandedDims=false)
 
const std::string ToString () const
 

Public Attributes

bool m_ReduceFp32ToFp16
 Reduces all Fp32 operators in the model to Fp16 for faster processing. More...
 
bool m_Debug
 Add debug data for easier troubleshooting. More...
 
bool m_DebugToFile
 Pass debug data to separate output files for easier troubleshooting. More...
 
bool m_ReduceFp32ToBf16
 @Note This feature has been replaced by enabling Fast Math in compute library backend options. More...
 
ShapeInferenceMethod m_shapeInferenceMethod
 Infer output size when not available. More...
 
bool m_ImportEnabled
 Enable Import. More...
 
ModelOptions m_ModelOptions
 Enable Model Options. More...
 
bool m_ProfilingEnabled
 Enable profiling dump of the optimizer phase. More...
 
bool m_ExportEnabled
 Enable Export. More...
 
bool m_AllowExpandedDims
 When calculating tensor sizes, dimensions of size == 1 will be ignored. More...
 

Detailed Description

ArmNN performs an optimization on each model/network before it gets loaded for execution.

OptimizerOptions provides a set of features that allows the user to customize this optimization on a per model basis.

Examples
CustomMemoryAllocatorSample.cpp.

Definition at line 137 of file INetwork.hpp.

Constructor & Destructor Documentation

◆ OptimizerOptions() [1/3]

OptimizerOptions ( )
inline

Definition at line 139 of file INetwork.hpp.

140  : m_ReduceFp32ToFp16(false)
141  , m_Debug(false)
142  , m_DebugToFile(false)
143  , m_ReduceFp32ToBf16(false)
145  , m_ImportEnabled(false)
146  , m_ModelOptions()
147  , m_ProfilingEnabled(false)
148  , m_ExportEnabled(false)
149  , m_AllowExpandedDims(false)
150  {}

References armnn::ValidateOnly.

◆ OptimizerOptions() [2/3]

OptimizerOptions ( bool  reduceFp32ToFp16,
bool  debug,
bool  reduceFp32ToBf16,
bool  importEnabled,
ModelOptions  modelOptions = {},
bool  exportEnabled = false,
bool  debugToFile = false 
)
inline

Definition at line 152 of file INetwork.hpp.

153  {}, bool exportEnabled = false, bool debugToFile = false)
154  : m_ReduceFp32ToFp16(reduceFp32ToFp16)
155  , m_Debug(debug)
156  , m_DebugToFile(debugToFile)
157  , m_ReduceFp32ToBf16(reduceFp32ToBf16)
159  , m_ImportEnabled(importEnabled)
160  , m_ModelOptions(modelOptions)
161  , m_ProfilingEnabled(false)
162  , m_ExportEnabled(exportEnabled)
163  , m_AllowExpandedDims(false)
164  {
165  }

◆ OptimizerOptions() [3/3]

OptimizerOptions ( bool  reduceFp32ToFp16,
bool  debug,
bool  reduceFp32ToBf16 = false,
ShapeInferenceMethod  shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
bool  importEnabled = false,
ModelOptions  modelOptions = {},
bool  exportEnabled = false,
bool  debugToFile = false,
bool  allowExpandedDims = false 
)
inline

Definition at line 167 of file INetwork.hpp.

169  {}, bool exportEnabled = false,
170  bool debugToFile = false, bool allowExpandedDims = false)
171  : m_ReduceFp32ToFp16(reduceFp32ToFp16)
172  , m_Debug(debug)
173  , m_DebugToFile(debugToFile)
174  , m_ReduceFp32ToBf16(reduceFp32ToBf16)
175  , m_shapeInferenceMethod(shapeInferenceMethod)
176  , m_ImportEnabled(importEnabled)
177  , m_ModelOptions(modelOptions)
178  , m_ProfilingEnabled(false)
179  , m_ExportEnabled(exportEnabled)
180  , m_AllowExpandedDims(allowExpandedDims)
181  {
182  }

Member Function Documentation

◆ ToString()

const std::string ToString ( ) const
inline

Definition at line 184 of file INetwork.hpp.

185  {
186  std::stringstream stream;
187  stream << "OptimizerOptions: \n";
188  stream << "\tReduceFp32ToFp16: " << m_ReduceFp32ToFp16 << "\n";
189  stream << "\tReduceFp32ToBf16: " << m_ReduceFp32ToBf16 << "\n";
190  stream << "\tDebug: " << m_Debug << "\n";
191  stream << "\tDebug to file: " << m_DebugToFile << "\n";
192  stream << "\tShapeInferenceMethod: " <<
193  (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
194  stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
195  stream << "\tExportEnabled: " << m_ExportEnabled << "\n";
196  stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
197  stream << "\tAllowExpandedDims: " << m_AllowExpandedDims << "\n";
198 
199  stream << "\tModelOptions: \n";
200  for (auto optionsGroup : m_ModelOptions)
201  {
202  for (size_t i=0; i < optionsGroup.GetOptionCount(); i++)
203  {
204  const armnn::BackendOptions::BackendOption option = optionsGroup.GetOption(i);
205  stream << "\t\tBackend: " << optionsGroup.GetBackendId() << "\n"
206  << "\t\t\tOption: " << option.GetName() << "\n"
207  << "\t\t\tValue: " << std::string(option.GetValue().ToString()) << "\n";
208  }
209  }
210 
211  return stream.str();
212  }

References BackendOptions::BackendOption::GetName(), BackendOptions::BackendOption::GetValue(), OptimizerOptions::m_AllowExpandedDims, OptimizerOptions::m_Debug, OptimizerOptions::m_DebugToFile, OptimizerOptions::m_ExportEnabled, OptimizerOptions::m_ImportEnabled, OptimizerOptions::m_ModelOptions, OptimizerOptions::m_ProfilingEnabled, OptimizerOptions::m_ReduceFp32ToBf16, OptimizerOptions::m_ReduceFp32ToFp16, OptimizerOptions::m_shapeInferenceMethod, BackendOptions::Var::ToString(), and armnn::ValidateOnly.

Referenced by armnn::Optimize().

Member Data Documentation

◆ m_AllowExpandedDims

bool m_AllowExpandedDims

When calculating tensor sizes, dimensions of size == 1 will be ignored.

Definition at line 247 of file INetwork.hpp.

Referenced by OptimizerOptions::ToString().

◆ m_Debug

bool m_Debug

Add debug data for easier troubleshooting.

Definition at line 222 of file INetwork.hpp.

Referenced by armnn::Optimize(), and OptimizerOptions::ToString().

◆ m_DebugToFile

bool m_DebugToFile

Pass debug data to separate output files for easier troubleshooting.

Definition at line 225 of file INetwork.hpp.

Referenced by armnn::Optimize(), and OptimizerOptions::ToString().

◆ m_ExportEnabled

bool m_ExportEnabled

Enable Export.

Definition at line 244 of file INetwork.hpp.

Referenced by armnn::Optimize(), and OptimizerOptions::ToString().

◆ m_ImportEnabled

bool m_ImportEnabled

Enable Import.

Examples
CustomMemoryAllocatorSample.cpp.

Definition at line 235 of file INetwork.hpp.

Referenced by armnn::Optimize(), and OptimizerOptions::ToString().

◆ m_ModelOptions

◆ m_ProfilingEnabled

bool m_ProfilingEnabled

Enable profiling dump of the optimizer phase.

Definition at line 241 of file INetwork.hpp.

Referenced by armnn::Optimize(), ArmnnDriverImpl::PrepareArmnnModel(), ArmnnDriverImpl::PrepareArmnnModelFromCache(), and OptimizerOptions::ToString().

◆ m_ReduceFp32ToBf16

bool m_ReduceFp32ToBf16

@Note This feature has been replaced by enabling Fast Math in compute library backend options.

This is currently a placeholder option

Definition at line 229 of file INetwork.hpp.

Referenced by armnn::Optimize(), and OptimizerOptions::ToString().

◆ m_ReduceFp32ToFp16

bool m_ReduceFp32ToFp16

Reduces all Fp32 operators in the model to Fp16 for faster processing.

@Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers between layers that weren't in Fp32 in the first place or if the operator is not supported in Fp16. The overhead of these conversions can lead to a slower overall performance if too many conversions are required.

Definition at line 219 of file INetwork.hpp.

Referenced by armnn::Optimize(), ArmnnDriverImpl::PrepareArmnnModel(), ArmnnDriverImpl::PrepareArmnnModelFromCache(), and OptimizerOptions::ToString().

◆ m_shapeInferenceMethod

ShapeInferenceMethod m_shapeInferenceMethod

Infer output size when not available.

Definition at line 232 of file INetwork.hpp.

Referenced by armnn::Optimize(), and OptimizerOptions::ToString().


The documentation for this struct was generated from the following file:
armnn::BackendOptions::BackendOption::GetValue
Var GetValue() const
Definition: BackendOptions.hpp:252
armnn::OptimizerOptions::m_AllowExpandedDims
bool m_AllowExpandedDims
When calculating tensor sizes, dimensions of size == 1 will be ignored.
Definition: INetwork.hpp:247
armnn::OptimizerOptions::m_ExportEnabled
bool m_ExportEnabled
Enable Export.
Definition: INetwork.hpp:244
armnn::OptimizerOptions::m_ReduceFp32ToFp16
bool m_ReduceFp32ToFp16
Reduces all Fp32 operators in the model to Fp16 for faster processing.
Definition: INetwork.hpp:219
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::OptimizerOptions::m_ModelOptions
ModelOptions m_ModelOptions
Enable Model Options.
Definition: INetwork.hpp:238
armnn::BackendOptions::Var::ToString
std::string ToString()
Definition: BackendOptions.hpp:124
armnn::BackendOptions::BackendOption::GetName
std::string GetName() const
Definition: BackendOptions.hpp:251
armnn::OptimizerOptions::m_ProfilingEnabled
bool m_ProfilingEnabled
Enable profiling dump of the optimizer phase.
Definition: INetwork.hpp:241
armnn::OptimizerOptions::m_DebugToFile
bool m_DebugToFile
Pass debug data to separate output files for easier troubleshooting.
Definition: INetwork.hpp:225
armnn::BackendOptions::BackendOption
Definition: BackendOptions.hpp:215
armnn::OptimizerOptions::m_ReduceFp32ToBf16
bool m_ReduceFp32ToBf16
@Note This feature has been replaced by enabling Fast Math in compute library backend options.
Definition: INetwork.hpp:229
armnn::OptimizerOptions::m_shapeInferenceMethod
ShapeInferenceMethod m_shapeInferenceMethod
Infer output size when not available.
Definition: INetwork.hpp:232
armnn::OptimizerOptions::m_Debug
bool m_Debug
Add debug data for easier troubleshooting.
Definition: INetwork.hpp:222
armnn::OptimizerOptions::m_ImportEnabled
bool m_ImportEnabled
Enable Import.
Definition: INetwork.hpp:235