aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2022-06-21 11:31:47 +0000
committerJames Conroy <james.conroy@arm.com>2022-06-21 11:31:47 +0000
commita0f8b15d4ddb5075f380003ff31b271d389d3b66 (patch)
tree5a4f351ee688bd760449c82c455b0e324b11f88d
parent03bf98a8bc51ad20eef4b9ca5fbf6ce15e063721 (diff)
downloadarmnn-a0f8b15d4ddb5075f380003ff31b271d389d3b66.tar.gz
Revert "IVGCVSW-6873 Import inputs but don't export outputs fails."
This reverts commit 03bf98a8bc51ad20eef4b9ca5fbf6ce15e063721. Reason for revert: Caused failures in tests located in internal repo. Change-Id: If35cb0ede349b270e4e7827324382e09455d8cfa
-rw-r--r--delegate/include/Version.hpp2
-rw-r--r--delegate/src/armnn_delegate.cpp14
-rw-r--r--delegate/src/test/DelegateOptionsTest.cpp2
-rw-r--r--include/armnn/INetwork.hpp11
-rw-r--r--include/armnn/Version.hpp2
-rw-r--r--include/armnnOnnxParser/Version.hpp2
-rw-r--r--include/armnnTfLiteParser/Version.hpp2
-rw-r--r--python/pyarmnn/README.md14
-rw-r--r--python/pyarmnn/examples/image_classification/README.md2
-rw-r--r--python/pyarmnn/examples/object_detection/README.md2
-rw-r--r--python/pyarmnn/examples/speech_recognition/README.md2
-rw-r--r--python/pyarmnn/src/pyarmnn/_version.py4
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i7
-rw-r--r--python/pyarmnn/test/test_modeloption.py9
-rw-r--r--python/pyarmnn/test/test_runtime.py4
-rw-r--r--python/pyarmnn/test/test_setup.py8
-rw-r--r--python/pyarmnn/test/test_version.py4
-rw-r--r--samples/ObjectDetection/Readme.md4
-rw-r--r--src/armnn/LoadedNetwork.cpp96
-rw-r--r--src/armnn/Network.cpp21
-rw-r--r--src/armnn/Network.hpp1
-rw-r--r--src/armnn/Runtime.hpp4
-rw-r--r--src/armnn/test/RuntimeTests.cpp187
-rw-r--r--src/armnn/test/TensorHandleStrategyTest.cpp2
-rw-r--r--src/backends/backendsCommon/test/CompatibilityTests.cpp2
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp32
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp2
-rw-r--r--src/backends/cl/test/ClCustomAllocatorTests.cpp1
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp2
-rw-r--r--src/backends/cl/test/ClImportTensorHandleTests.cpp7
-rw-r--r--src/backends/cl/test/ClOptimizedNetworkTests.cpp2
-rw-r--r--src/backends/neon/test/NeonFallbackTests.cpp6
-rw-r--r--src/backends/neon/test/NeonOptimizedNetworkTests.cpp2
33 files changed, 69 insertions, 393 deletions
diff --git a/delegate/include/Version.hpp b/delegate/include/Version.hpp
index c14857e320..34555b2c6f 100644
--- a/delegate/include/Version.hpp
+++ b/delegate/include/Version.hpp
@@ -14,7 +14,7 @@ namespace armnnDelegate
// ArmNN Delegate version components
#define DELEGATE_MAJOR_VERSION 26
-#define DELEGATE_MINOR_VERSION 1
+#define DELEGATE_MINOR_VERSION 0
#define DELEGATE_PATCH_VERSION 0
/// DELEGATE_VERSION: "X.Y.Z"
diff --git a/delegate/src/armnn_delegate.cpp b/delegate/src/armnn_delegate.cpp
index 1b6d68eb7a..bb2f3c319a 100644
--- a/delegate/src/armnn_delegate.cpp
+++ b/delegate/src/armnn_delegate.cpp
@@ -394,20 +394,14 @@ ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
// Load graph into runtime
std::string errorMessage;
armnn::Status loadingStatus;
- armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
- armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
- // There's a bit of an assumption here that the delegate will only support Malloc memory source.
+ armnn::MemorySource memorySource = armnn::MemorySource::Undefined;
if (delegate->m_Options.GetOptimizerOptions().m_ImportEnabled)
{
- inputSource = armnn::MemorySource::Malloc;
- }
- if (delegate->m_Options.GetOptimizerOptions().m_ExportEnabled)
- {
- outputSource = armnn::MemorySource::Malloc;
+ memorySource = armnn::MemorySource::Malloc;
}
armnn::INetworkProperties networkProperties(false,
- inputSource,
- outputSource,
+ memorySource,
+ memorySource,
delegate->m_Options.GetInternalProfilingState(),
delegate->m_Options.GetInternalProfilingDetail());
loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
diff --git a/delegate/src/test/DelegateOptionsTest.cpp b/delegate/src/test/DelegateOptionsTest.cpp
index c9f1530968..126bf30a25 100644
--- a/delegate/src/test/DelegateOptionsTest.cpp
+++ b/delegate/src/test/DelegateOptionsTest.cpp
@@ -173,7 +173,7 @@ TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
});
modelOptions.push_back(cpuAcc);
- armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+ armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions);
armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 475367ece5..89b4776d39 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -144,11 +144,10 @@ struct OptimizerOptions
, m_ImportEnabled(false)
, m_ModelOptions()
, m_ProfilingEnabled(false)
- , m_ExportEnabled(false)
{}
OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
- ModelOptions modelOptions = {}, bool exportEnabled = false)
+ ModelOptions modelOptions = {})
: m_ReduceFp32ToFp16(reduceFp32ToFp16)
, m_Debug(debug)
, m_ReduceFp32ToBf16(reduceFp32ToBf16)
@@ -156,7 +155,6 @@ struct OptimizerOptions
, m_ImportEnabled(importEnabled)
, m_ModelOptions(modelOptions)
, m_ProfilingEnabled(false)
- , m_ExportEnabled(exportEnabled)
{
if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
{
@@ -166,7 +164,7 @@ struct OptimizerOptions
OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
- bool importEnabled = false, ModelOptions modelOptions = {}, bool exportEnabled = false)
+ bool importEnabled = false, ModelOptions modelOptions = {})
: m_ReduceFp32ToFp16(reduceFp32ToFp16)
, m_Debug(debug)
, m_ReduceFp32ToBf16(reduceFp32ToBf16)
@@ -174,7 +172,6 @@ struct OptimizerOptions
, m_ImportEnabled(importEnabled)
, m_ModelOptions(modelOptions)
, m_ProfilingEnabled(false)
- , m_ExportEnabled(exportEnabled)
{
if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
{
@@ -192,7 +189,6 @@ struct OptimizerOptions
stream << "\tShapeInferenceMethod: " <<
(m_shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly ? "ValidateOnly" : "InferAndValidate") << "\n";
stream << "\tImportEnabled: " << m_ImportEnabled << "\n";
- stream << "\tExportEnabled: " << m_ExportEnabled << "\n";
stream << "\tProfilingEnabled: " << m_ProfilingEnabled << "\n";
stream << "\tModelOptions: \n";
@@ -238,9 +234,6 @@ struct OptimizerOptions
// Enable profiling dump of the optimizer phase
bool m_ProfilingEnabled;
-
- // Enable Export
- bool m_ExportEnabled;
};
class IWorkloadFactory;
diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp
index 7951eacf1d..d41c4ec8af 100644
--- a/include/armnn/Version.hpp
+++ b/include/armnn/Version.hpp
@@ -10,7 +10,7 @@
#define STRINGIFY_MACRO(s) #s
// ArmNN version components
-#define ARMNN_MAJOR_VERSION 30
+#define ARMNN_MAJOR_VERSION 29
#define ARMNN_MINOR_VERSION 0
#define ARMNN_PATCH_VERSION 0
diff --git a/include/armnnOnnxParser/Version.hpp b/include/armnnOnnxParser/Version.hpp
index 33a2846263..ed9d8690ec 100644
--- a/include/armnnOnnxParser/Version.hpp
+++ b/include/armnnOnnxParser/Version.hpp
@@ -14,7 +14,7 @@ namespace armnnOnnxParser
// OnnxParser version components
#define ONNX_PARSER_MAJOR_VERSION 24
-#define ONNX_PARSER_MINOR_VERSION 5
+#define ONNX_PARSER_MINOR_VERSION 4
#define ONNX_PARSER_PATCH_VERSION 0
/// ONNX_PARSER_VERSION: "X.Y.Z"
diff --git a/include/armnnTfLiteParser/Version.hpp b/include/armnnTfLiteParser/Version.hpp
index 5db527ec8c..eee2124678 100644
--- a/include/armnnTfLiteParser/Version.hpp
+++ b/include/armnnTfLiteParser/Version.hpp
@@ -14,7 +14,7 @@ namespace armnnTfLiteParser
// TfLiteParser version components
#define TFLITE_PARSER_MAJOR_VERSION 24
-#define TFLITE_PARSER_MINOR_VERSION 5
+#define TFLITE_PARSER_MINOR_VERSION 4
#define TFLITE_PARSER_PATCH_VERSION 0
/// TFLITE_PARSER_VERSION: "X.Y.Z"
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index 6d8b42d41a..7dc8d8693a 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -91,14 +91,14 @@ This step will put all generated files under `./src/pyarmnn/_generated` folder a
```bash
$ python setup.py sdist
```
-As the result you will get `./dist/pyarmnn-30.0.0.tar.gz` file. As you can see it is platform independent.
+As the result you will get `./dist/pyarmnn-29.0.0.tar.gz` file. As you can see it is platform independent.
##### 5. Build the binary package
```bash
$ python setup.py bdist_wheel
```
-As the result you will get something like `./dist/pyarmnn-30.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
+As the result you will get something like `./dist/pyarmnn-29.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
is platform dependent.
# PyArmNN installation
@@ -107,8 +107,8 @@ PyArmNN can be distributed as a source package or a binary package (wheel).
Binary package is platform dependent, the name of the package will indicate the platform it was built for, e.g.:
-* Linux x86 64bit machine: pyarmnn-30.0.0-cp36-cp36m-*linux_x86_64*.whl
-* Linux Aarch 64 bit machine: pyarmnn-30.0.0-cp36-cp36m-*linux_aarch64*.whl
+* Linux x86 64bit machine: pyarmnn-29.0.0-cp36-cp36m-*linux_x86_64*.whl
+* Linux Aarch 64 bit machine: pyarmnn-29.0.0-cp36-cp36m-*linux_aarch64*.whl
The source package is platform independent but installation involves compilation of Arm NN python extension. You will need to have g++ compatible with C++ 14 standard and a python development library installed on the build machine.
@@ -126,7 +126,7 @@ $ gcc --print-search-dirs
```
Install PyArmNN from binary by pointing to the wheel file:
```bash
-$ pip install /path/to/pyarmnn-30.0.0-cp36-cp36m-linux_aarch64.whl
+$ pip install /path/to/pyarmnn-29.0.0-cp36-cp36m-linux_aarch64.whl
```
## Installing from source package
@@ -143,7 +143,7 @@ $ export ARMNN_INCLUDE=/full/path/to/armnn/include:/full/path/to/armnn/profilin
Install PyArmNN as follows:
```bash
-$ pip install /path/to/pyarmnn-30.0.0.tar.gz
+$ pip install /path/to/pyarmnn-29.0.0.tar.gz
```
If PyArmNN installation script fails to find Arm NN libraries it will raise an error like this
@@ -157,7 +157,7 @@ $ pip show pyarmnn
You can also verify it by running the following and getting output similar to below:
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'30.0.0'
+'29.0.0'
```
# PyArmNN API overview
diff --git a/python/pyarmnn/examples/image_classification/README.md b/python/pyarmnn/examples/image_classification/README.md
index a360f01148..7275a2523f 100644
--- a/python/pyarmnn/examples/image_classification/README.md
+++ b/python/pyarmnn/examples/image_classification/README.md
@@ -20,7 +20,7 @@ $ pip show pyarmnn
You can also verify it by running the following and getting output similar to below:
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'30.0.0'
+'29.0.0'
```
##### Dependencies
diff --git a/python/pyarmnn/examples/object_detection/README.md b/python/pyarmnn/examples/object_detection/README.md
index 215cf772a2..7a946ad6f5 100644
--- a/python/pyarmnn/examples/object_detection/README.md
+++ b/python/pyarmnn/examples/object_detection/README.md
@@ -54,7 +54,7 @@ $ pip show pyarmnn
You can also verify it by running the following and getting output similar to below:
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'30.0.0'
+'29.0.0'
```
##### Dependencies
diff --git a/python/pyarmnn/examples/speech_recognition/README.md b/python/pyarmnn/examples/speech_recognition/README.md
index d5fee8a010..854cdaf03b 100644
--- a/python/pyarmnn/examples/speech_recognition/README.md
+++ b/python/pyarmnn/examples/speech_recognition/README.md
@@ -18,7 +18,7 @@ You can also verify it by running the following and getting output similar to be
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'30.0.0'
+'29.0.0'
```
### Dependencies
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
index d1b1ca290c..7c0940e7f3 100644
--- a/python/pyarmnn/src/pyarmnn/_version.py
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -3,7 +3,7 @@
# SPDX-License-Identifier: MIT
import os
-version_info = (30, 0, 0)
+version_info = (29, 0, 0)
__dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
@@ -24,7 +24,7 @@ def check_armnn_version(installed_armnn_version: str, expected_armnn_version: st
"""Compares expected Arm NN version and Arm NN version used to build the package.
Args:
- installed_armnn_version (str): Arm NN version used to generate the package (e.g. 30.0.0)
+ installed_armnn_version (str): Arm NN version used to generate the package (e.g. 29.0.0)
expected_armnn_version (str): Expected Arm NN version
Returns:
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index 55b6795c90..a2f57a3aa9 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -29,7 +29,7 @@ Contains:
that can not be reduced will be left in Fp32.
m_ReduceFp32ToFp16 (bool): Reduces Fp32 network to Fp16 for faster processing. Layers
that can not be reduced will be left in Fp32.
- m_ImportEnabled (bool): Enable memory import of inport tensors.
+ m_ImportEnabled (bool): Enable memory import.
m_shapeInferenceMethod: The ShapeInferenceMethod modifies how the output shapes are treated.
When ValidateOnly is selected, the output shapes are inferred from the input parameters
of the layer and any mismatch is reported.
@@ -38,7 +38,6 @@ Contains:
with tensors which rank or dimension sizes are not specified explicitly, however this
information can be calculated from the inputs.
m_ModelOptions: List of backends optimisation options.
- m_ExportEnabled (bool): Enable memory export of output tensors.
") OptimizerOptions;
@@ -52,8 +51,7 @@ struct OptimizerOptions
bool reduceFp32ToBf16 = false,
ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
bool importEnabled = false,
- std::vector<armnn::BackendOptions> modelOptions = {},
- bool exportEnabled = false);
+ std::vector<armnn::BackendOptions> modelOptions = {});
bool m_ReduceFp32ToBf16;
bool m_ReduceFp32ToFp16;
@@ -61,7 +59,6 @@ struct OptimizerOptions
ShapeInferenceMethod m_shapeInferenceMethod;
bool m_ImportEnabled;
std::vector<armnn::BackendOptions> m_ModelOptions;
- bool m_ExportEnabled;
};
%model_options_clear;
diff --git a/python/pyarmnn/test/test_modeloption.py b/python/pyarmnn/test/test_modeloption.py
index a47d2da358..c03d4a8cce 100644
--- a/python/pyarmnn/test/test_modeloption.py
+++ b/python/pyarmnn/test/test_modeloption.py
@@ -71,8 +71,7 @@ def test_optimizer_options_with_model_opt():
False,
ShapeInferenceMethod_InferAndValidate,
True,
- [a],
- True)
+ [a])
mo = oo.m_ModelOptions
@@ -113,8 +112,7 @@ def test_optimizer_options_fail():
False,
ShapeInferenceMethod_InferAndValidate,
True,
- a,
- True)
+ a)
assert "Wrong number or type of arguments" in str(err.value)
@@ -124,8 +122,7 @@ def test_optimizer_options_fail():
True,
ShapeInferenceMethod_InferAndValidate,
True,
- [a],
- True)
+ [a])
assert "BFloat16 and Float16 optimization cannot be enabled at the same time" in str(err.value)
diff --git a/python/pyarmnn/test/test_runtime.py b/python/pyarmnn/test/test_runtime.py
index a6c4e1dcc0..a37772c5df 100644
--- a/python/pyarmnn/test/test_runtime.py
+++ b/python/pyarmnn/test/test_runtime.py
@@ -156,8 +156,8 @@ def test_load_network_properties_provided(random_runtime):
opt_network, _ = ann.Optimize(network, preferred_backends,
runtime.GetDeviceSpec(), ann.OptimizerOptions())
- inputSource = ann.MemorySource_Undefined
- outputSource = ann.MemorySource_Undefined
+ inputSource = ann.MemorySource_Malloc
+ outputSource = ann.MemorySource_Malloc
properties = ann.INetworkProperties(False, inputSource, outputSource)
net_id, messages = runtime.LoadNetwork(opt_network, properties)
assert "" == messages
diff --git a/python/pyarmnn/test/test_setup.py b/python/pyarmnn/test/test_setup.py
index 27feda2647..4a6f930cbf 100644
--- a/python/pyarmnn/test/test_setup.py
+++ b/python/pyarmnn/test/test_setup.py
@@ -87,15 +87,15 @@ def test_gcc_serch_path():
def test_armnn_version():
- check_armnn_version('30.0.0', '30.0.0')
+ check_armnn_version('29.0.0', '29.0.0')
def test_incorrect_armnn_version():
with pytest.raises(AssertionError) as err:
- check_armnn_version('30.0.0', '30.1.0')
+ check_armnn_version('29.0.0', '29.1.0')
- assert 'Expected ArmNN version is 30.1.0 but installed ArmNN version is 30.0.0' in str(err.value)
+ assert 'Expected ArmNN version is 29.1.0 but installed ArmNN version is 29.0.0' in str(err.value)
def test_armnn_version_patch_does_not_matter():
- check_armnn_version('30.0.0', '30.0.1')
+ check_armnn_version('29.0.0', '29.0.1')
diff --git a/python/pyarmnn/test/test_version.py b/python/pyarmnn/test/test_version.py
index 83606ab15b..f74ae020cf 100644
--- a/python/pyarmnn/test/test_version.py
+++ b/python/pyarmnn/test/test_version.py
@@ -18,7 +18,7 @@ def test_dev_version():
importlib.reload(v)
- assert "30.0.0.dev1" == v.__version__
+ assert "29.0.0.dev1" == v.__version__
del os.environ["PYARMNN_DEV_VER"]
del v
@@ -30,7 +30,7 @@ def test_arm_version_not_affected():
importlib.reload(v)
- assert "30.0.0" == v.__arm_ml_version__
+ assert "29.0.0" == v.__arm_ml_version__
del os.environ["PYARMNN_DEV_VER"]
del v
diff --git a/samples/ObjectDetection/Readme.md b/samples/ObjectDetection/Readme.md
index bd84e26001..194a3e918d 100644
--- a/samples/ObjectDetection/Readme.md
+++ b/samples/ObjectDetection/Readme.md
@@ -253,8 +253,8 @@ From the build directory, copy the following to the host platform:
The full list of libs after cross-compilation to copy on your board:
```
libarmnn.so
-libarmnn.so.30
-libarmnn.so.30.0
+libarmnn.so.29
+libarmnn.so.29.0
For Arm NN public C++ API mode:
libarmnnTfLiteParser.so
libarmnnTfLiteParser.so.24.4
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index a27add921e..ec79d5da3e 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -84,87 +84,6 @@ void AddWorkloadStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils
} // anonymous
-/**
- * This function performs a sanity check to ensure that the combination of input and output memory source matches the
- * values for importEnabled and exportEnabled that were specified during optimization. During optimization the tensor
- * handle factories are chosen based on whether import and export are enabled. If the user then specifies something
- * incompatible here it can lead to problems.
- *
- * @param optimizedOptions
- * @param networkProperties
- */
-void ValidateSourcesMatchOptimizedNetwork(std::vector<BackendOptions> optimizedOptions,
- const INetworkProperties& networkProperties)
-{
- // Find the "Global" backend options. During the optimize phase the values of importEnabled and exportEnabled are
- // added as backend options.
- const vector<BackendOptions>::iterator& backendItr =
- find_if(optimizedOptions.begin(), optimizedOptions.end(), [](const BackendOptions& backend) {
- if (backend.GetBackendId().Get() == "Global")
- {
- return true;
- }
- else
- {
- return false;
- }
- });
- bool importEnabled = false;
- bool exportEnabled = false;
- if (backendItr != optimizedOptions.end())
- {
- // Find the importEnabled and exportEnabled values.
- for (size_t i = 0; i < backendItr->GetOptionCount(); i++)
- {
- const BackendOptions::BackendOption& option = backendItr->GetOption(i);
- if (option.GetName() == "ImportEnabled")
- {
- importEnabled = option.GetValue().AsBool();
- }
- if (option.GetName() == "ExportEnabled")
- {
- exportEnabled = option.GetValue().AsBool();
- }
- }
- }
-
- // Now that we have values for import and export compare them to the MemorySource variables.
- // Any value of MemorySource that's not "Undefined" implies that we need to do an import of some kind.
- if ((networkProperties.m_InputSource == MemorySource::Undefined && importEnabled) ||
- (networkProperties.m_InputSource != MemorySource::Undefined && !importEnabled))
- {
- auto message = fmt::format("The input memory source specified, '{0}',", networkProperties.m_InputSource);
- if (!importEnabled)
- {
- message.append(" requires that memory import be enabled. However, "
- "it was disabled when this network was optimized.");
- }
- else
- {
- message.append(" requires that memory import be disabled. However, "
- "it was enabled when this network was optimized.");
- }
- throw InvalidArgumentException(message);
- }
-
- if ((networkProperties.m_OutputSource == MemorySource::Undefined && exportEnabled) ||
- (networkProperties.m_OutputSource != MemorySource::Undefined && !exportEnabled))
- {
- auto message = fmt::format("The output memory source specified, '{0}',", networkProperties.m_OutputSource);
- if (!exportEnabled)
- {
- message.append(" requires that memory export be enabled. However, "
- "it was disabled when this network was optimized.");
- }
- else
- {
- message.append(" requires that memory export be disabled. However, "
- "it was enabled when this network was optimized.");
- }
- throw InvalidArgumentException(message);
- }
-} // anonymous
-
std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
std::string& errorMessage,
const INetworkProperties& networkProperties,
@@ -217,11 +136,6 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
profiler->EnableNetworkDetailsToStdOut(networkProperties.m_OutputNetworkDetailsMethod);
- // We need to check that the memory sources match up with the values of import and export specified during the
- // optimize phase. If they don't this will throw an exception.
- ValidateSourcesMatchOptimizedNetwork(m_OptimizedNetwork.get()->pOptimizedNetworkImpl->GetModelOptions(),
- m_NetworkProperties);
-
//First create tensor handlers, backends and workload factories.
//Handlers are created before workloads are.
//Because workload creation can modify some of the handlers,
@@ -1525,7 +1439,7 @@ std::vector<ImportedInputId> LoadedNetwork::ImportInputs(const InputTensors& inp
ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
- if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
+ if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_InputSource))
{
throw MemoryImportException(
fmt::format("ImportInputs: Memory Import failed, backend: "
@@ -1537,7 +1451,7 @@ std::vector<ImportedInputId> LoadedNetwork::ImportInputs(const InputTensors& inp
std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
inputTensor.second.GetMemoryArea());
- if (tensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource))
+ if (tensorHandle->Import(passThroughTensorHandle->Map(), m_NetworkProperties.m_InputSource))
{
importedInputs.push_back(m_CurImportedInputId++);
passThroughTensorHandle->Unmap();
@@ -1650,14 +1564,14 @@ std::vector<ImportedOutputId> LoadedNetwork::ImportOutputs(const OutputTensors&
ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
- if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
+ if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_OutputSource))
{
throw MemoryImportException(fmt::format("ImportInputs: Memory Import failed, backend: "
"{} does not support importing from source {}"
- , factoryId, forceImportMemorySource));
+ , factoryId, m_NetworkProperties.m_OutputSource));
}
- if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
+ if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), m_NetworkProperties.m_OutputSource))
{
importedOutputs.push_back(m_CurImportedOutputId++);
}
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 9520c1399e..f2ba94f597 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1362,7 +1362,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backen
ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
OutputSlot& outputSlot,
TensorHandleFactoryRegistry& registry,
- bool exportEnabled)
+ bool importEnabled)
{
// First ensure the from backends can support the TensorHandeAPI
Layer& layer = outputSlot.GetOwningLayer();
@@ -1390,7 +1390,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
for (auto&& pref : srcPrefs)
{
- if (exportEnabled)
+ if (importEnabled)
{
ITensorHandleFactory* factory = registry.GetFactory(pref);
if (outputConnection)
@@ -1602,13 +1602,12 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
BackendsMap& backends,
TensorHandleFactoryRegistry& registry,
bool importEnabled,
- bool exportEnabled,
Optional<std::vector<std::string>&> errMessages)
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_SelectTensorHandleStrategy");
OptimizationResult result;
- optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer)
+ optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
{
ARMNN_ASSERT(layer);
@@ -1633,7 +1632,7 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
break;
default:
- slotOption = CalculateSlotOption(backends, outputSlot, registry, exportEnabled);
+ slotOption = CalculateSlotOption(backends, outputSlot, registry, importEnabled);
break;
}
outputSlot.SetTensorHandleFactory(slotOption);
@@ -1697,15 +1696,7 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
- // We need to pass on the information about whether import and export is enabled to the LoadNetwork phase.
- // The mechanism to do that is to add model options to the optimized network.
- armnn::BackendOptions importExport("Global",
- {{"ImportEnabled", options.m_ImportEnabled},
- {"ExportEnabled", options.m_ExportEnabled}});
- ModelOptions optimizedOptions(options.m_ModelOptions);
- optimizedOptions.push_back(importExport);
-
- auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions),
+ auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
&IOptimizedNetwork::Destroy);
IOptimizedNetwork* optNetObjPtr = optNet.get();
@@ -1828,9 +1819,7 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
backends,
tensorHandleFactoryRegistry,
options.m_ImportEnabled,
- options.m_ExportEnabled,
messages);
-
if (strategyResult.m_Error)
{
// Failed to apply the backend-specific optimizations
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 2d34cfc3e2..6c7c2f5c7e 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -300,7 +300,6 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
BackendsMap& backends,
TensorHandleFactoryRegistry& registry,
bool importEnabled,
- bool exportEnabled,
Optional<std::vector<std::string>&> errMessages);
OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp
index f5dfadf948..376cdbc000 100644
--- a/src/armnn/Runtime.hpp
+++ b/src/armnn/Runtime.hpp
@@ -56,9 +56,9 @@ public:
armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const;
std::vector<ImportedInputId> ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
- MemorySource forceImportMemorySource);
+ MemorySource forceImportMemorySource = MemorySource::Undefined);
std::vector<ImportedOutputId> ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
- MemorySource forceImportMemorySource);
+ MemorySource forceImportMemorySource = MemorySource::Undefined);
void ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds);
void ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds);
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 59f65541b8..3cbe8848df 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -93,7 +93,7 @@ TEST_CASE("RuntimePreImportInputs")
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
std::string er;
- armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
+ armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Undefined);
runtime->LoadNetwork(networkId,
Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
er,
@@ -107,7 +107,7 @@ TEST_CASE("RuntimePreImportInputs")
ConstTensor inputTensor2({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData2.data());
Tensor outputTensor({{4}, armnn::DataType::Signed32}, output.data());
- auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}}, MemorySource::Malloc);
+ auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}});
CHECK(importedInputVec1.size() == 1);
CHECK(importedInputVec1[0] == 0);
@@ -118,7 +118,7 @@ TEST_CASE("RuntimePreImportInputs")
CHECK(val == 30);
}
- auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
+ auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
CHECK(importedInputVec2.size() == 1);
CHECK(importedInputVec2[0] == 1);
@@ -146,7 +146,7 @@ TEST_CASE("RuntimePreImportInputs")
// Incorrect layer binding id and ImportedInputId
CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{-2, inputTensor2}}, {{2, outputTensor}}, {10});,
armnn::InvalidArgumentException);
- auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
+ auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
CHECK(importedInputVec3[0] == 2);
// Too many ImportedInputIds
CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1, 2});,
@@ -175,7 +175,6 @@ TEST_CASE("RuntimePreImportInputs")
// Trying to delete unknown pre-imported tensor
CHECK_THROWS_AS(runtime->ClearImportedInputs(networkId, {10});, armnn::InvalidArgumentException);
}
-
TEST_CASE("RuntimePreImportOutputs")
{
armnn::IRuntime::CreationOptions options;
@@ -217,7 +216,7 @@ TEST_CASE("RuntimePreImportOutputs")
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
std::string er;
- armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
+ armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Malloc);
runtime->LoadNetwork(networkId,
Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
er,
@@ -258,7 +257,7 @@ TEST_CASE("RuntimePreImportOutputs")
runtime->Execute(*memHandle.get(),inputTensors, {output1, output2});
testOutputs();
- auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 }, MemorySource::Malloc);
+ auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 });
CHECK(importedOutputVec.size() == 2);
CHECK(importedOutputVec[0] == 0);
CHECK(importedOutputVec[1] == 1);
@@ -272,7 +271,7 @@ TEST_CASE("RuntimePreImportOutputs")
runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {0});
testOutputs();
- auto importedInputVec = runtime->ImportInputs(networkId, inputTensors, MemorySource::Malloc);
+ auto importedInputVec = runtime->ImportInputs(networkId, inputTensors);
CHECK(importedInputVec.size() == 2);
CHECK(importedInputVec[0] == 0);
CHECK(importedInputVec[1] == 1);
@@ -1294,176 +1293,4 @@ TEST_CASE("ProfilingPostOptimisationStructureCpuRef")
VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuRef);
}
-TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
-{
- // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
- // that network but specify that the import memory source is Malloc.
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- armnn::NetworkId networkId = 1;
- armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
-
- auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
- auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- auto addLayer = testNetwork->AddAdditionLayer("add layer");
- auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
-
- TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
-
- inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
- inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
- inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
- addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-
- OptimizerOptions optimizedOptions;
- // Hard set import and export to off.
- optimizedOptions.m_ImportEnabled = false;
- optimizedOptions.m_ExportEnabled = false;
- IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
- CHECK(optNet);
-
- std::string er;
- // Load the network passing an import memory source.
- armnn::INetworkProperties networkProperties1(true, MemorySource::Malloc, MemorySource::Undefined);
- // There should be an InvalidArgumentException.
- runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
- CHECK(er.find("However, it was disabled when this network was optimized") != -1);
-}
-
-TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
-{
- // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
- // that network but specify that the export memory source as Malloc.
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- armnn::NetworkId networkId = 1;
- armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
-
- auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
- auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- auto addLayer = testNetwork->AddAdditionLayer("add layer");
- auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
-
- TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
-
- inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
- inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
- inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
- addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-
- OptimizerOptions optimizedOptions;
- // Hard set import and export to off.
- optimizedOptions.m_ImportEnabled = false;
- optimizedOptions.m_ExportEnabled = false;
- IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
- CHECK(optNet);
-
- std::string er;
- // Load the network passing an import memory source.
- armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Malloc);
- // There should be an InvalidArgumentException.
- runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
- CHECK(er.find("However, it was disabled when this network was optimized") != -1);
-}
-
-TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
-{
- // In this test case we'll optimize a network with import enabled. Then we'll attempt to load
- // that network but specify that the import memory source is Undefined.
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- armnn::NetworkId networkId = 1;
- armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
-
- auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
- auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- auto addLayer = testNetwork->AddAdditionLayer("add layer");
- auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
-
- TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
-
- inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
- inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
- inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
- addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-
- OptimizerOptions optimizedOptions;
- // Hard set import and export to off.
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = false;
- IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
- CHECK(optNet);
-
- std::string er;
- // Load the network passing an import memory source.
- armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
- // There should be an InvalidArgumentException.
- runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
- CHECK(er.find("However, it was enabled when this network was optimized") != -1);
-}
-
-TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
-{
- // In this test case we'll optimize a network with export enabled. Then we'll attempt to load
- // that network but specify that the export memory source is Undefined.
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- armnn::NetworkId networkId = 1;
- armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
-
- auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
- auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
- auto addLayer = testNetwork->AddAdditionLayer("add layer");
- auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
-
- TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
-
- inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
- inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
- inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
- addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
-
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-
- OptimizerOptions optimizedOptions;
- // Hard set import and export to off.
- optimizedOptions.m_ImportEnabled = false;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
- CHECK(optNet);
-
- std::string er;
- // Load the network passing an import memory source.
- armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
- // There should be an InvalidArgumentException.
- runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
- CHECK(er.find("However, it was enabled when this network was optimized") != -1);
-}
-
}
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 2ea3c2abf1..c591fffa43 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -342,7 +342,7 @@ TEST_CASE("TensorHandleSelectionStrategy")
graph.TopologicalSort();
std::vector<std::string> errors;
- auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
+ auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
CHECK(result.m_Error == false);
CHECK(result.m_Warning == false);
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index 9c85ffcfc3..c69a4b5f91 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -73,7 +73,7 @@ TEST_CASE("Neon_Cl_DirectCompatibility_Test")
graph.TopologicalSort();
std::vector<std::string> errors;
- auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
+ auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
CHECK(result.m_Error == false);
CHECK(result.m_Warning == false);
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index cc5aa23ca3..77901df444 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -204,9 +204,7 @@ inline void ImportNonAlignedInputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
CHECK(optNet);
// Loads it into the runtime.
@@ -271,10 +269,7 @@ inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
CHECK(optNet);
// Loads it into the runtime.
@@ -345,10 +340,7 @@ inline void ImportAlignedPointerTest(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
CHECK(optNet);
// Loads it into the runtime.
@@ -432,9 +424,7 @@ inline void ImportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
INFO("Load Network");
// Load it into the runtime. It should pass.
@@ -524,9 +514,7 @@ inline void ExportOnlyWorkload(std::vector<BackendId> backends)
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
// optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
INFO("Load Network");
// Load it into the runtime. It should pass.
@@ -613,10 +601,7 @@ inline void ImportAndExportWorkload(std::vector<BackendId> backends)
input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32, 0.0f, 0, true));
pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 1, 4 }, DataType::Float32));
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
INFO("Load Network");
// Load it into the runtime. It should pass.
@@ -709,10 +694,7 @@ inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<Backend
activation->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 1 }, DataType::Float32));
// Optimize the network
- OptimizerOptions optimizedOptions;
- optimizedOptions.m_ImportEnabled = true;
- optimizedOptions.m_ExportEnabled = true;
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
// Loads it into the runtime.
NetworkId netId;
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index cd865def71..bcea0610db 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -421,7 +421,7 @@ TEST_CASE("OptimizeNetworkCopy")
std::vector<armnn::BackendId> preferredBackends { "CpuRef" };
armnn::ModelOptions modelOptions;
- armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+ armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions);
std::vector<std::string> errorMessages;
// optimize the network.
diff --git a/src/backends/cl/test/ClCustomAllocatorTests.cpp b/src/backends/cl/test/ClCustomAllocatorTests.cpp
index 251c98fcad..139e688dc2 100644
--- a/src/backends/cl/test/ClCustomAllocatorTests.cpp
+++ b/src/backends/cl/test/ClCustomAllocatorTests.cpp
@@ -120,7 +120,6 @@ TEST_CASE("ClCustomAllocatorTest")
// Optimise ArmNN network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
armnn::IOptimizedNetworkPtr optNet = Optimize(*myNetwork, {"GpuAcc"}, run->GetDeviceSpec(), optOptions);
CHECK(optNet);
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 51a983a681..6ac94337ba 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -50,7 +50,6 @@ TEST_CASE("ClImportEnabledFallbackToNeon")
// optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -331,7 +330,6 @@ TEST_CASE("ClImportEnabledFallbackSubgraphToNeon")
// optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 9a075d2b7d..20537b3c81 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -142,7 +142,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd")
// Optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -339,7 +338,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd")
// Optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -472,7 +470,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE
// Optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -616,7 +613,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE
// Optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -751,7 +747,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16
// Optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(network.GetGraph(), backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -901,7 +896,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo
// Optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
@@ -1123,7 +1117,6 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver
// Optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = false;
- optOptions.m_ExportEnabled = false;
std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optOptions);
CHECK(optNet);
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index 6648759a9a..cf17eae208 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -130,7 +130,7 @@ TEST_CASE("FastMathEnabledTestOnGpuAcc")
auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
- CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values.
+ CHECK(modelOptionsOut.size() == 1);
CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
}
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 8e0e0ab99b..d2de843fd9 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -60,7 +60,6 @@ TEST_CASE("FallbackImportToCpuAcc")
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -204,7 +203,6 @@ TEST_CASE("FallbackPaddingCopyToCpuAcc")
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -340,7 +338,6 @@ TEST_CASE("FallbackImportFromCpuAcc")
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -485,7 +482,6 @@ TEST_CASE("FallbackPaddingCopyFromCpuAcc")
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -750,7 +746,6 @@ TEST_CASE("NeonImportEnabledFallbackToCl")
// optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
@@ -1042,7 +1037,6 @@ TEST_CASE("NeonImportEnabledFallbackSubgraphToCl")
// optimize the network
OptimizerOptions optOptions;
optOptions.m_ImportEnabled = true;
- optOptions.m_ExportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
Graph& graph = GetGraphForTesting(optNet.get());
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index dcda9bfd07..9b448b270d 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -106,7 +106,7 @@ TEST_CASE("FastMathEnabledTestOnCpuAcc")
auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
- CHECK(modelOptionsOut.size() == 2); // FastMathEnabled and the Global to hold the import export values.
+ CHECK(modelOptionsOut.size() == 1);
CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
}