aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2022-10-14 15:50:33 +0100
committerKeithARM <keith.davis@arm.com>2022-10-19 10:33:40 +0000
commit15f9c68adef324cd0158cea3d021c0f6bef5eecf (patch)
tree1cd48b345d182fd19efdc40a32e2540befd8f925 /tests
parent7bbf56598010041ea46c3fa9d32604db777ee26e (diff)
downloadarmnn-15f9c68adef324cd0158cea3d021c0f6bef5eecf.tar.gz
MLCE-545 INT8 TFLite model execution abnormal
* Add functionality to print output tensors to file in tempdir * UnitTests Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: Idfb4c186544187db1fecdfca11c662540f645439
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ArmNNExecutor.cpp1
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp1
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp1
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp5
-rw-r--r--tests/InferenceModel.hpp3
5 files changed, 11 insertions, 0 deletions
diff --git a/tests/ExecuteNetwork/ArmNNExecutor.cpp b/tests/ExecuteNetwork/ArmNNExecutor.cpp
index 797c09a1b2..330a239763 100644
--- a/tests/ExecuteNetwork/ArmNNExecutor.cpp
+++ b/tests/ExecuteNetwork/ArmNNExecutor.cpp
@@ -510,6 +510,7 @@ armnn::IOptimizedNetworkPtr ArmNNExecutor::OptimizeNetwork(armnn::INetwork* netw
options.m_ReduceFp32ToFp16 = m_Params.m_EnableFp16TurboMode;
options.m_ReduceFp32ToBf16 = m_Params.m_EnableBf16TurboMode;
options.m_Debug = m_Params.m_PrintIntermediate;
+ options.m_DebugToFile = m_Params.m_PrintIntermediateOutputsToFile;
options.m_shapeInferenceMethod = m_Params.m_InferOutputShape ?
armnn::ShapeInferenceMethod::InferAndValidate :
armnn::ShapeInferenceMethod::ValidateOnly;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index 17c08717e4..155a4c4a8b 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -126,6 +126,7 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
options.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
options.m_ReduceFp32ToBf16 = m_EnableBf16TurboMode;
options.m_Debug = m_PrintIntermediate;
+ options.m_DebugToFile = m_PrintIntermediateOutputsToFile;
options.m_ProfilingEnabled = m_EnableProfiling;
delegateOptions.SetInternalProfilingParams(m_EnableProfiling, armnn::ProfilingDetailsMethod::DetailsWithEvents);
options.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index e60e3b8877..020dbdcced 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -50,6 +50,7 @@ struct ExecuteNetworkParams
std::vector<std::string> m_OutputTensorFiles;
bool m_ParseUnsupported = false;
bool m_PrintIntermediate;
+ bool m_PrintIntermediateOutputsToFile;
bool m_DontPrintOutputs;
bool m_QuantizeInput;
bool m_SaveCachedNetwork;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index de7bc051c7..5f19a1498c 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -289,6 +289,11 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediate)->default_value("false")
->implicit_value("true"))
+ ("F,print-intermediate-layers-to-file",
+ "If this option is enabled, the output of every graph layer will be printed within separate files.",
+ cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediateOutputsToFile)->default_value("false")
+ ->implicit_value("true"))
+
("parse-unsupported",
"Add unsupported operators as stand-in layers (where supported by parser)",
cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index 268f60301c..d837fc1fcf 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -68,6 +68,7 @@ struct Params
bool m_EnableFp16TurboMode;
bool m_EnableBf16TurboMode;
bool m_PrintIntermediateLayers;
+ bool m_PrintIntermediateLayersToFile;
bool m_ParseUnsupported;
bool m_InferOutputShape;
bool m_EnableFastMath;
@@ -91,6 +92,7 @@ struct Params
, m_EnableFp16TurboMode(false)
, m_EnableBf16TurboMode(false)
, m_PrintIntermediateLayers(false)
+ , m_PrintIntermediateLayersToFile(false)
, m_ParseUnsupported(false)
, m_InferOutputShape(false)
, m_EnableFastMath(false)
@@ -452,6 +454,7 @@ public:
options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
options.m_ReduceFp32ToBf16 = params.m_EnableBf16TurboMode;
options.m_Debug = params.m_PrintIntermediateLayers;
+ options.m_DebugToFile = params.m_PrintIntermediateLayersToFile;
options.m_shapeInferenceMethod = params.m_InferOutputShape ?
armnn::ShapeInferenceMethod::InferAndValidate : armnn::ShapeInferenceMethod::ValidateOnly;
options.m_ProfilingEnabled = m_EnableProfiling;