aboutsummaryrefslogtreecommitdiff
path: root/delegate
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2022-05-16 23:10:42 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2022-05-18 14:19:19 +0000
commit80512b0e1e98cb6782f5526fc8308ae6a571cd59 (patch)
treebad2f314defadd4b340343d99b6e157b46622039 /delegate
parentd0420cb62b71f9c015d2b0e889011899051e56c0 (diff)
downloadarmnn-80512b0e1e98cb6782f5526fc8308ae6a571cd59.tar.gz
IVGCVSW-6929 Support for models with implicit expanded
dimensions * Added allow-expanded-dims to TFLite parser and ArmNN delegate * If true ArmNN will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must still match. * This allows us to support models where tensors have expanded dimensions (i.e. extra dimensions with a size of 1). * Fixed bug in Network where it assumed that only the first option could be ShapeInferenceMethod. * Fixed bug where m_ShapeInferenceMethod was lost when copying or moving Graphs. * Changed Delegate to pass "infer-output-shape", "allow-expanded-dims" and other BackendOptions through to the Network during construction. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Ibe7c5ae6597796fc9164cb07bd372bd7f8f8cacf
Diffstat (limited to 'delegate')
-rw-r--r--delegate/include/DelegateOptions.hpp11
-rw-r--r--delegate/src/DelegateOptions.cpp18
-rw-r--r--delegate/src/armnn_delegate.cpp2
3 files changed, 30 insertions, 1 deletions
diff --git a/delegate/include/DelegateOptions.hpp b/delegate/include/DelegateOptions.hpp
index 7f7eaa5bb9..d789ea7285 100644
--- a/delegate/include/DelegateOptions.hpp
+++ b/delegate/include/DelegateOptions.hpp
@@ -163,6 +163,17 @@ public:
* Possible values: [filenameString] \n
* Description: Serialize the optimized network to the file specified in "dot" format.
*
+ * Option key: "infer-output-shape" \n
+ * Possible values: ["true"/"false"] \n
+ * Description: Infers output tensor shape from input tensor shape and validate where applicable.
+ *
+ * Option key: "allow-expanded-dims" \n
+ * Possible values: ["true"/"false"] \n
+ * Description: If true will disregard dimensions with a size of 1 when validating tensor shapes but tensor
+ * sizes must still match. \n
+ * This is an Experimental parameter that is incompatible with "infer-output-shape". \n
+ * This parameter may be removed in a later update.
+ *
* @param[in] option_keys Delegate option names
* @param[in] options_values Delegate option values
* @param[in] num_options Number of delegate options
diff --git a/delegate/src/DelegateOptions.cpp b/delegate/src/DelegateOptions.cpp
index 9413a4689a..f3e13c90c6 100644
--- a/delegate/src/DelegateOptions.cpp
+++ b/delegate/src/DelegateOptions.cpp
@@ -156,6 +156,24 @@ DelegateOptions::DelegateOptions(char const* const* options_keys,
{
optimizerOptions.m_Debug = armnn::stringUtils::StringToBool(options_values[i]);
}
+ // Infer output-shape
+ else if (std::string(options_keys[i]) == std::string("infer-output-shape"))
+ {
+ armnn::BackendOptions backendOption("ShapeInferenceMethod",
+ {
+ { "InferAndValidate", armnn::stringUtils::StringToBool(options_values[i]) }
+ });
+ optimizerOptions.m_ModelOptions.push_back(backendOption);
+ }
+ // Allow expanded dims
+ else if (std::string(options_keys[i]) == std::string("allow-expanded-dims"))
+ {
+ armnn::BackendOptions backendOption("AllowExpandedDims",
+ {
+ { "AllowExpandedDims", armnn::stringUtils::StringToBool(options_values[i]) }
+ });
+ optimizerOptions.m_ModelOptions.push_back(backendOption);
+ }
// Process memory-import
else if (std::string(options_keys[i]) == std::string("memory-import"))
{
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 4d71f26b09..6e1a91f9e4 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -308,7 +308,7 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
DelegateData delegateData(delegate->m_Options.GetBackends());
// Build ArmNN Network
- armnn::NetworkOptions networkOptions = {};
+ armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().m_ModelOptions;
armnn::NetworkId networkId;
delegateData.m_Network = armnn::INetwork::Create(networkOptions);