From 03bf98a8bc51ad20eef4b9ca5fbf6ce15e063721 Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Mon, 30 May 2022 15:20:36 +0100 Subject: IVGCVSW-6873 Import inputs but don't export outputs fails. Only one bool is used to indicate whether inputs should be imported. However, its possible for the user to want to import inputs but not export outputs. In addition it's possible for a user to enabled import during optimize but then pass a memory source that does not require import. * Add m_ExportEnabled to INetwork.hpp. * Modify Network::dNetwork to consider both m_ImportEnabled and m_ExportEnabled. * Add ValidateSourcesMatchOptimizedNetwork to LoadedNetwork to validate import options between optimize and network load. * Update the TfLite delegate consider exportEnabled flag in the optimizer. !armnn-internal-tests:425350 Signed-off-by: Colm Donelan Change-Id: I776eab81595898e43f91ab40306962eae61329f4 --- include/armnn/INetwork.hpp | 11 +++++++++-- include/armnn/Version.hpp | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include/armnn') diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 89b4776d39..475367ece5 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -144,10 +144,11 @@ struct OptimizerOptions , m_ImportEnabled(false) , m_ModelOptions() , m_ProfilingEnabled(false) + , m_ExportEnabled(false) {} OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled, - ModelOptions modelOptions = {}) + ModelOptions modelOptions = {}, bool exportEnabled = false) : m_ReduceFp32ToFp16(reduceFp32ToFp16) , m_Debug(debug) , m_ReduceFp32ToBf16(reduceFp32ToBf16) @@ -155,6 +156,7 @@ struct OptimizerOptions , m_ImportEnabled(importEnabled) , m_ModelOptions(modelOptions) , m_ProfilingEnabled(false) + , m_ExportEnabled(exportEnabled) { if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16) { @@ -164,7 +166,7 @@ struct OptimizerOptions OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false, ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly, - bool importEnabled = false, ModelOptions modelOptions = {}) + bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false) : m_ReduceFp32ToFp16(reduceFp32ToFp16) , m_Debug(debug) , m_ReduceFp32ToBf16(reduceFp32ToBf16) @@ -172,6 +174,7 @@ struct OptimizerOptions , m_ImportEnabled(importEnabled) , m_ModelOptions(modelOptions) , m_ProfilingEnabled(false) + , m_ExportEnabled(exportEnabled) { if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16) { @@ -189,6 +192,7 @@ struct OptimizerOptions stream << "\tShapeInferenceMethod: " << (m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n"; stream << "\tImportEnabled: " << m_ImportEnabled << "\n"; + stream << "\tExportEnabled: " << m_ExportEnabled << "\n"; stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n"; stream << "\tModelOptions: \n"; @@ -234,6 +238,9 @@ struct OptimizerOptions // Enable profiling dump of the optimizer phase bool m_ProfilingEnabled; + + // Enable Export + bool m_ExportEnabled; }; class IWorkloadFactory; diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp index d41c4ec8af..7951eacf1d 100644 --- a/include/armnn/Version.hpp +++ b/include/armnn/Version.hpp @@ -10,7 +10,7 @@ #define STRINGIFY_MACRO(s) #s // ArmNN version components -#define ARMNN_MAJOR_VERSION 29 +#define ARMNN_MAJOR_VERSION 30 #define ARMNN_MINOR_VERSION 0 #define ARMNN_PATCH_VERSION 0 -- cgit v1.2.1