From f86be93b7492b381370cae7bf71eca8572a0cbae Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Tue, 24 Aug 2021 16:27:15 +0100 Subject: IVGCVSW-5924 Update 21.08 Doxygen Documents * Also updated latest symlink. Signed-off-by: Matthew Sloyan Change-Id: If9b4e0e52464abdf797b9eb858ae19bcc64c2aea --- ...truct_inference_model_internal_1_1_params.xhtml | 622 +++++++++++++++++++++ 1 file changed, 622 insertions(+) create mode 100644 21.08/struct_inference_model_internal_1_1_params.xhtml (limited to '21.08/struct_inference_model_internal_1_1_params.xhtml') diff --git a/21.08/struct_inference_model_internal_1_1_params.xhtml b/21.08/struct_inference_model_internal_1_1_params.xhtml new file mode 100644 index 0000000000..d024983f1b --- /dev/null +++ b/21.08/struct_inference_model_internal_1_1_params.xhtml @@ -0,0 +1,622 @@ + + + + + + + + + + + + + +ArmNN: Params Struct Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  21.08 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
Params Struct Reference
+
+
+ +

#include <InferenceModel.hpp>

+ + + + +

+Public Member Functions

 Params ()
 
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Public Attributes

std::string m_ModelPath
 
std::vector< std::string > m_InputBindings
 
std::vector< armnn::TensorShapem_InputShapes
 
std::vector< std::string > m_OutputBindings
 
std::vector< armnn::BackendIdm_ComputeDevices
 
std::string m_DynamicBackendsPath
 
size_t m_SubgraphId
 
bool m_IsModelBinary
 
bool m_VisualizePostOptimizationModel
 
bool m_EnableFp16TurboMode
 
bool m_EnableBf16TurboMode
 
bool m_PrintIntermediateLayers
 
bool m_ParseUnsupported
 
bool m_InferOutputShape
 
bool m_EnableFastMath
 
bool m_SaveCachedNetwork
 
bool m_OutputDetailsToStdOut
 
std::string m_CachedNetworkFilePath
 
unsigned int m_NumberOfThreads
 
std::string m_MLGOTuningFilePath
 
bool m_AsyncEnabled
 
size_t m_ThreadPoolSize
 
+

Detailed Description

+
+

Definition at line 85 of file InferenceModel.hpp.

+

Constructor & Destructor Documentation

+ +

◆ Params()

+ +
+
+ + + + + +
+ + + + + + + +
Params ()
+
+inline
+
+ +

Definition at line 111 of file InferenceModel.hpp.

+ +

References Params::m_AsyncEnabled, Params::m_CachedNetworkFilePath, Params::m_EnableBf16TurboMode, Params::m_EnableFastMath, Params::m_EnableFp16TurboMode, Params::m_InferOutputShape, Params::m_IsModelBinary, Params::m_MLGOTuningFilePath, Params::m_NumberOfThreads, Params::m_OutputDetailsToStdOut, Params::m_ParseUnsupported, Params::m_PrintIntermediateLayers, Params::m_SaveCachedNetwork, Params::m_SubgraphId, Params::m_ThreadPoolSize, and Params::m_VisualizePostOptimizationModel.

+
112  : m_ComputeDevices{}
113  , m_SubgraphId(0)
114  , m_IsModelBinary(true)
116  , m_EnableFp16TurboMode(false)
117  , m_EnableBf16TurboMode(false)
119  , m_ParseUnsupported(false)
120  , m_InferOutputShape(false)
121  , m_EnableFastMath(false)
122  , m_SaveCachedNetwork(false)
123  , m_OutputDetailsToStdOut(false)
125  , m_NumberOfThreads(0)
127  , m_AsyncEnabled(false)
128  , m_ThreadPoolSize(0)
129  {}
+ + + + + + +
std::vector< armnn::BackendId > m_ComputeDevices
+ + + + + + + + + +
+
+
+

Member Data Documentation

+ +

◆ m_AsyncEnabled

+ +
+
+ + + + +
bool m_AsyncEnabled
+
+
+ +

◆ m_CachedNetworkFilePath

+ +
+
+ + + + +
std::string m_CachedNetworkFilePath
+
+
+ +

◆ m_ComputeDevices

+ +
+
+ + + + +
std::vector<armnn::BackendId> m_ComputeDevices
+
+ +

Definition at line 91 of file InferenceModel.hpp.

+ +

Referenced by InferenceModel< IParser, TDataType >::InferenceModel(), main(), and MainImpl().

+ +
+
+ +

◆ m_DynamicBackendsPath

+ +
+
+ + + + +
std::string m_DynamicBackendsPath
+
+
+ +

◆ m_EnableBf16TurboMode

+ +
+
+ + + + +
bool m_EnableBf16TurboMode
+
+
+ +

◆ m_EnableFastMath

+ +
+
+ + + + +
bool m_EnableFastMath
+
+
+ +

◆ m_EnableFp16TurboMode

+ +
+
+ + + + +
bool m_EnableFp16TurboMode
+
+
+ +

◆ m_InferOutputShape

+ +
+
+ + + + +
bool m_InferOutputShape
+
+
+ +

◆ m_InputBindings

+ + + +

◆ m_InputShapes

+ +
+
+ + + + +
std::vector<armnn::TensorShape> m_InputShapes
+
+ +

Definition at line 89 of file InferenceModel.hpp.

+ +

Referenced by CreateNetworkImpl< IParser >::Create(), and MainImpl().

+ +
+
+ +

◆ m_IsModelBinary

+ +
+
+ + + + +
bool m_IsModelBinary
+
+ +

Definition at line 94 of file InferenceModel.hpp.

+ +

Referenced by CreateNetworkImpl< IParser >::Create(), main(), MainImpl(), and Params::Params().

+ +
+
+ +

◆ m_MLGOTuningFilePath

+ +
+
+ + + + +
std::string m_MLGOTuningFilePath
+
+
+ +

◆ m_ModelPath

+ + + +

◆ m_NumberOfThreads

+ +
+
+ + + + +
unsigned int m_NumberOfThreads
+
+
+ +

◆ m_OutputBindings

+ + + +

◆ m_OutputDetailsToStdOut

+ +
+
+ + + + +
bool m_OutputDetailsToStdOut
+
+
+ +

◆ m_ParseUnsupported

+ +
+
+ + + + +
bool m_ParseUnsupported
+
+ +

Definition at line 99 of file InferenceModel.hpp.

+ +

Referenced by CreateNetworkImpl< IParser >::Create(), MainImpl(), and Params::Params().

+ +
+
+ +

◆ m_PrintIntermediateLayers

+ +
+
+ + + + +
bool m_PrintIntermediateLayers
+
+
+ +

◆ m_SaveCachedNetwork

+ +
+
+ + + + +
bool m_SaveCachedNetwork
+
+
+ +

◆ m_SubgraphId

+ +
+
+ + + + +
size_t m_SubgraphId
+
+ +

Definition at line 93 of file InferenceModel.hpp.

+ +

Referenced by CreateNetworkImpl< IParser >::Create(), MainImpl(), and Params::Params().

+ +
+
+ +

◆ m_ThreadPoolSize

+ +
+
+ + + + +
size_t m_ThreadPoolSize
+
+
+ +

◆ m_VisualizePostOptimizationModel

+ +
+
+ + + + +
bool m_VisualizePostOptimizationModel
+
+
+
The documentation for this struct was generated from the following file: +
+
+ + + + -- cgit v1.2.1