ArmNN
 22.05
Params Struct Reference

#include <InferenceModel.hpp>

Public Member Functions

 Params ()
 

Public Attributes

std::string m_ModelPath
 
std::vector< std::string > m_InputBindings
 
std::vector< armnn::TensorShapem_InputShapes
 
std::vector< std::string > m_OutputBindings
 
std::vector< armnn::BackendIdm_ComputeDevices
 
std::string m_DynamicBackendsPath
 
size_t m_SubgraphId
 
bool m_AllowExpandedDims
 
bool m_IsModelBinary
 
bool m_VisualizePostOptimizationModel
 
bool m_EnableFp16TurboMode
 
bool m_EnableBf16TurboMode
 
bool m_PrintIntermediateLayers
 
bool m_ParseUnsupported
 
bool m_InferOutputShape
 
bool m_EnableFastMath
 
bool m_SaveCachedNetwork
 
bool m_OutputDetailsToStdOut
 
bool m_OutputDetailsOnlyToStdOut
 
std::string m_CachedNetworkFilePath
 
unsigned int m_NumberOfThreads
 
std::string m_MLGOTuningFilePath
 
bool m_AsyncEnabled
 
size_t m_ThreadPoolSize
 
bool m_ImportInputsIfAligned
 

Detailed Description

Definition at line 89 of file InferenceModel.hpp.

Constructor & Destructor Documentation

◆ Params()

Params ( )
inline

Definition at line 118 of file InferenceModel.hpp.

References Params::m_AllowExpandedDims, Params::m_AsyncEnabled, Params::m_CachedNetworkFilePath, Params::m_EnableBf16TurboMode, Params::m_EnableFastMath, Params::m_EnableFp16TurboMode, Params::m_ImportInputsIfAligned, Params::m_InferOutputShape, Params::m_IsModelBinary, Params::m_MLGOTuningFilePath, Params::m_NumberOfThreads, Params::m_OutputDetailsOnlyToStdOut, Params::m_OutputDetailsToStdOut, Params::m_ParseUnsupported, Params::m_PrintIntermediateLayers, Params::m_SaveCachedNetwork, Params::m_SubgraphId, Params::m_ThreadPoolSize, and Params::m_VisualizePostOptimizationModel.

119  : m_ComputeDevices{}
120  , m_SubgraphId(0)
121  , m_AllowExpandedDims(false)
122  , m_IsModelBinary(true)
124  , m_EnableFp16TurboMode(false)
125  , m_EnableBf16TurboMode(false)
127  , m_ParseUnsupported(false)
128  , m_InferOutputShape(false)
129  , m_EnableFastMath(false)
130  , m_SaveCachedNetwork(false)
131  , m_OutputDetailsToStdOut(false)
134  , m_NumberOfThreads(0)
136  , m_AsyncEnabled(false)
137  , m_ThreadPoolSize(0)
138  , m_ImportInputsIfAligned(false)
139  {}
std::vector< armnn::BackendId > m_ComputeDevices

Member Data Documentation

◆ m_AllowExpandedDims

bool m_AllowExpandedDims

◆ m_AsyncEnabled

bool m_AsyncEnabled

◆ m_CachedNetworkFilePath

std::string m_CachedNetworkFilePath

◆ m_ComputeDevices

std::vector<armnn::BackendId> m_ComputeDevices

◆ m_DynamicBackendsPath

◆ m_EnableBf16TurboMode

bool m_EnableBf16TurboMode

◆ m_EnableFastMath

bool m_EnableFastMath

◆ m_EnableFp16TurboMode

bool m_EnableFp16TurboMode

◆ m_ImportInputsIfAligned

◆ m_InferOutputShape

◆ m_InputBindings

◆ m_InputShapes

std::vector<armnn::TensorShape> m_InputShapes

Definition at line 93 of file InferenceModel.hpp.

Referenced by CreateNetworkImpl< IParser >::Create(), and MainImpl().

◆ m_IsModelBinary

bool m_IsModelBinary

◆ m_MLGOTuningFilePath

std::string m_MLGOTuningFilePath

◆ m_ModelPath

◆ m_NumberOfThreads

unsigned int m_NumberOfThreads

◆ m_OutputBindings

◆ m_OutputDetailsOnlyToStdOut

bool m_OutputDetailsOnlyToStdOut

◆ m_OutputDetailsToStdOut

bool m_OutputDetailsToStdOut

◆ m_ParseUnsupported

bool m_ParseUnsupported

◆ m_PrintIntermediateLayers

bool m_PrintIntermediateLayers

◆ m_SaveCachedNetwork

bool m_SaveCachedNetwork

◆ m_SubgraphId

size_t m_SubgraphId

◆ m_ThreadPoolSize

size_t m_ThreadPoolSize

◆ m_VisualizePostOptimizationModel

bool m_VisualizePostOptimizationModel

The documentation for this struct was generated from the following file: