aboutsummaryrefslogtreecommitdiff
path: root/include/armnn/INetwork.hpp
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2022-10-14 15:50:33 +0100
committerKeithARM <keith.davis@arm.com>2022-10-19 10:33:40 +0000
commit15f9c68adef324cd0158cea3d021c0f6bef5eecf (patch)
tree1cd48b345d182fd19efdc40a32e2540befd8f925 /include/armnn/INetwork.hpp
parent7bbf56598010041ea46c3fa9d32604db777ee26e (diff)
downloadarmnn-15f9c68adef324cd0158cea3d021c0f6bef5eecf.tar.gz
MLCE-545 INT8 TFLite model execution abnormal
* Add functionality to print output tensors to file in tempdir * UnitTests Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: Idfb4c186544187db1fecdfca11c662540f645439
Diffstat (limited to 'include/armnn/INetwork.hpp')
-rw-r--r--include/armnn/INetwork.hpp11
1 files changed, 9 insertions, 2 deletions
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 0289a90e71..687f2c3e81 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -129,6 +129,7 @@ struct OptimizerOptions
OptimizerOptions()
: m_ReduceFp32ToFp16(false)
, m_Debug(false)
+ , m_DebugToFile(false)
, m_ReduceFp32ToBf16(false)
, m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
, m_ImportEnabled(false)
@@ -139,9 +140,10 @@ struct OptimizerOptions
{}
OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
- ModelOptions modelOptions = {}, bool exportEnabled = false)
+ ModelOptions modelOptions = {}, bool exportEnabled = false, bool debugToFile = false)
: m_ReduceFp32ToFp16(reduceFp32ToFp16)
, m_Debug(debug)
+ , m_DebugToFile(debugToFile)
, m_ReduceFp32ToBf16(reduceFp32ToBf16)
, m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
, m_ImportEnabled(importEnabled)
@@ -159,9 +161,10 @@ struct OptimizerOptions
OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false,
- bool allowExpandedDims = false)
+ bool debugToFile = false, bool allowExpandedDims = false)
: m_ReduceFp32ToFp16(reduceFp32ToFp16)
, m_Debug(debug)
+ , m_DebugToFile(debugToFile)
, m_ReduceFp32ToBf16(reduceFp32ToBf16)
, m_shapeInferenceMethod(shapeInferenceMethod)
, m_ImportEnabled(importEnabled)
@@ -183,6 +186,7 @@ struct OptimizerOptions
stream << "\tReduceFp32ToFp16: " << m_ReduceFp32ToFp16 << "\n";
stream << "\tReduceFp32ToBf16: " << m_ReduceFp32ToBf16 << "\n";
stream << "\tDebug: " << m_Debug << "\n";
+ stream << "\tDebug to file: " << m_DebugToFile << "\n";
stream << "\tShapeInferenceMethod: " <<
(m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
@@ -215,6 +219,9 @@ struct OptimizerOptions
// Add debug data for easier troubleshooting
bool m_Debug;
+ // Pass debug data to separate output files for easier troubleshooting
+ bool m_DebugToFile;
+
/// Reduces all Fp32 operators in the model to Bf16 for faster processing.
/// @Note This feature works best if all operators of the model are in Fp32. ArmNN will add conversion layers
/// between layers that weren't in Fp32 in the first place or if the operator is not supported in Bf16.