aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2022-05-16 23:10:42 +0100
committerRyan OShea <ryan.oshea3@arm.com>2022-05-19 11:06:34 +0100
commit21fe06fad6760a0d453f2de9c8dd790983ae940c (patch)
treebad2f314defadd4b340343d99b6e157b46622039 /tests
parentb5e03cc39cdabc49bf117c119073f60e9d36a474 (diff)
downloadarmnn-21fe06fad6760a0d453f2de9c8dd790983ae940c.tar.gz
IVGCVSW-6929 Support for models with implicit expanded
dimensions * Added allow-expanded-dims to TFLite parser and ArmNN delegate * If true ArmNN will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must still match. * This allows us to support models where tensors have expanded dimensions (i.e. extra dimensions with a size of 1). * Fixed bug in Network where it assumed that only the first option could be ShapeInferenceMethod. * Fixed bug where m_ShapeInferenceMethod was lost when copying or moving Graphs. * Changed Delegate to pass "infer-output-shape", "allow-expanded-dims" and other BackendOptions through to the Network during construction. Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: Ibe7c5ae6597796fc9164cb07bd372bd7f8f8cacf
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp1
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.cpp21
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkParams.hpp1
-rw-r--r--tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp7
-rw-r--r--tests/InferenceModel.hpp3
5 files changed, 33 insertions, 0 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index ddabf3c11f..f0a3d0821e 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -389,6 +389,7 @@ int MainImpl(const ExecuteNetworkParams& params,
// Creates an InferenceModel, which will parse the model and load it into an IRuntime.
typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
inferenceModelParams.m_ModelPath = params.m_ModelPath;
+ inferenceModelParams.m_AllowExpandedDims = params.m_AllowExpandedDims;
inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
index b3d18cdfd1..cc75bb4323 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
@@ -232,6 +232,11 @@ void ExecuteNetworkParams::ValidateParams()
{
ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
}
+
+ if (m_AllowExpandedDims && m_InferOutputShape)
+ {
+ throw armnn::InvalidArgumentException("infer-output-shape and allow-expanded-dims cannot be used together.");
+ }
}
#if defined(ARMNN_TFLITE_DELEGATE)
@@ -277,6 +282,22 @@ armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
options.m_ModelOptions.push_back(gpuAcc);
options.m_ModelOptions.push_back(cpuAcc);
+ if (m_InferOutputShape)
+ {
+ armnn::BackendOptions networkOption("ShapeInferenceMethod",
+ {
+ {"InferAndValidate", true}
+ });
+ options.m_ModelOptions.push_back(networkOption);
+ }
+ if (m_AllowExpandedDims)
+ {
+ armnn::BackendOptions networkOption("AllowExpandedDims",
+ {
+ {"AllowExpandedDims", true}
+ });
+ options.m_ModelOptions.push_back(networkOption);
+ }
delegateOptions.SetOptimizerOptions(options);
// If v,visualize-optimized-model is enabled then construct a file name for the dot file.
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
index 04a073311d..5ef2b6ea7c 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
@@ -25,6 +25,7 @@ struct ExecuteNetworkParams
TfliteInterpreter
};
+ bool m_AllowExpandedDims;
std::string m_CachedNetworkFilePath;
std::vector<armnn::BackendId> m_ComputeDevices;
bool m_Concurrent;
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
index c84c79ea78..ad35092c1d 100644
--- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
@@ -228,6 +228,13 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
"parser)",
cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
+ ("allow-expanded-dims",
+ "If true will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must "
+ "still match. This is an Experimental parameter that is incompatible with infer-output-shape. "
+ "This parameter may be removed in a later update. ",
+ cxxopts::value<bool>(m_ExNetParams.m_AllowExpandedDims)->default_value("false")
+ ->implicit_value("true"))
+
("iterations",
"Number of iterations to run the network for, default is set to 1. "
"If you wish to run the model with different input data for every execution you can do so by "
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp
index e2a1a97568..93716e1a6f 100644
--- a/tests/InferenceModel.hpp
+++ b/tests/InferenceModel.hpp
@@ -95,6 +95,7 @@ struct Params
std::vector<armnn::BackendId> m_ComputeDevices;
std::string m_DynamicBackendsPath;
size_t m_SubgraphId;
+ bool m_AllowExpandedDims;
bool m_IsModelBinary;
bool m_VisualizePostOptimizationModel;
bool m_EnableFp16TurboMode;
@@ -117,6 +118,7 @@ struct Params
Params()
: m_ComputeDevices{}
, m_SubgraphId(0)
+ , m_AllowExpandedDims(false)
, m_IsModelBinary(true)
, m_VisualizePostOptimizationModel(false)
, m_EnableFp16TurboMode(false)
@@ -268,6 +270,7 @@ public:
// Create a network from a file on disk
IParser::TfLiteParserOptions options;
+ options.m_AllowExpandedDims = params.m_AllowExpandedDims;
options.m_StandInLayerForUnsupported = params.m_ParseUnsupported;
options.m_InferAndValidate = params.m_InferOutputShape;
auto parser(IParser::Create(options));