aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/LoadedNetwork.cpp96
-rw-r--r--src/armnn/Network.cpp21
-rw-r--r--src/armnn/Network.hpp1
-rw-r--r--src/armnn/Runtime.hpp4
-rw-r--r--src/armnn/test/RuntimeTests.cpp187
-rw-r--r--src/armnn/test/TensorHandleStrategyTest.cpp2
6 files changed, 291 insertions, 20 deletions
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index ec79d5da3e..a27add921e 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -84,6 +84,87 @@ void AddWorkloadStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils
} // anonymous
+/**
+ * This function performs a sanity check to ensure that the combination of input and output memory source matches the
+ * values for importEnabled and exportEnabled that were specified during optimization. During optimization the tensor
+ * handle factories are chosen based on whether import and export are enabled. If the user then specifies something
+ * incompatible here it can lead to problems.
+ *
+ * @param optimizedOptions
+ * @param networkProperties
+ */
+void ValidateSourcesMatchOptimizedNetwork(std::vector<BackendOptions> optimizedOptions,
+ const INetworkProperties& networkProperties)
+{
+ // Find the "Global" backend options. During the optimize phase the values of importEnabled and exportEnabled are
+ // added as backend options.
+ const vector<BackendOptions>::iterator& backendItr =
+ find_if(optimizedOptions.begin(), optimizedOptions.end(), [](const BackendOptions& backend) {
+ if (backend.GetBackendId().Get() == "Global")
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ });
+ bool importEnabled = false;
+ bool exportEnabled = false;
+ if (backendItr != optimizedOptions.end())
+ {
+ // Find the importEnabled and exportEnabled values.
+ for (size_t i = 0; i < backendItr->GetOptionCount(); i++)
+ {
+ const BackendOptions::BackendOption& option = backendItr->GetOption(i);
+ if (option.GetName() == "ImportEnabled")
+ {
+ importEnabled = option.GetValue().AsBool();
+ }
+ if (option.GetName() == "ExportEnabled")
+ {
+ exportEnabled = option.GetValue().AsBool();
+ }
+ }
+ }
+
+ // Now that we have values for import and export compare them to the MemorySource variables.
+ // Any value of MemorySource that's not "Undefined" implies that we need to do an import of some kind.
+ if ((networkProperties.m_InputSource == MemorySource::Undefined && importEnabled) ||
+ (networkProperties.m_InputSource != MemorySource::Undefined && !importEnabled))
+ {
+ auto message = fmt::format("The input memory source specified, '{0}',", networkProperties.m_InputSource);
+ if (!importEnabled)
+ {
+ message.append(" requires that memory import be enabled. However, "
+ "it was disabled when this network was optimized.");
+ }
+ else
+ {
+ message.append(" requires that memory import be disabled. However, "
+ "it was enabled when this network was optimized.");
+ }
+ throw InvalidArgumentException(message);
+ }
+
+ if ((networkProperties.m_OutputSource == MemorySource::Undefined && exportEnabled) ||
+ (networkProperties.m_OutputSource != MemorySource::Undefined && !exportEnabled))
+ {
+ auto message = fmt::format("The output memory source specified, '{0}',", networkProperties.m_OutputSource);
+ if (!exportEnabled)
+ {
+ message.append(" requires that memory export be enabled. However, "
+ "it was disabled when this network was optimized.");
+ }
+ else
+ {
+ message.append(" requires that memory export be disabled. However, "
+ "it was enabled when this network was optimized.");
+ }
+ throw InvalidArgumentException(message);
+ }
+} // anonymous
+
std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
std::string& errorMessage,
const INetworkProperties& networkProperties,
@@ -136,6 +217,11 @@ LoadedNetwork::LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
profiler->EnableNetworkDetailsToStdOut(networkProperties.m_OutputNetworkDetailsMethod);
+ // We need to check that the memory sources match up with the values of import and export specified during the
+ // optimize phase. If they don't this will throw an exception.
+ ValidateSourcesMatchOptimizedNetwork(m_OptimizedNetwork.get()->pOptimizedNetworkImpl->GetModelOptions(),
+ m_NetworkProperties);
+
//First create tensor handlers, backends and workload factories.
//Handlers are created before workloads are.
//Because workload creation can modify some of the handlers,
@@ -1439,7 +1525,7 @@ std::vector<ImportedInputId> LoadedNetwork::ImportInputs(const InputTensors& inp
ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
- if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_InputSource))
+ if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
{
throw MemoryImportException(
fmt::format("ImportInputs: Memory Import failed, backend: "
@@ -1451,7 +1537,7 @@ std::vector<ImportedInputId> LoadedNetwork::ImportInputs(const InputTensors& inp
std::make_unique<ConstPassthroughTensorHandle>(inputTensor.second.GetInfo(),
inputTensor.second.GetMemoryArea());
- if (tensorHandle->Import(passThroughTensorHandle->Map(), m_NetworkProperties.m_InputSource))
+ if (tensorHandle->Import(passThroughTensorHandle->Map(), forceImportMemorySource))
{
importedInputs.push_back(m_CurImportedInputId++);
passThroughTensorHandle->Unmap();
@@ -1564,14 +1650,14 @@ std::vector<ImportedOutputId> LoadedNetwork::ImportOutputs(const OutputTensors&
ITensorHandle* tensorHandle = importedTensorHandlePin.m_TensorHandle.get();
- if (!CheckFlag(tensorHandle->GetImportFlags(), m_NetworkProperties.m_OutputSource))
+ if (!CheckFlag(tensorHandle->GetImportFlags(), forceImportMemorySource))
{
throw MemoryImportException(fmt::format("ImportInputs: Memory Import failed, backend: "
"{} does not support importing from source {}"
- , factoryId, m_NetworkProperties.m_OutputSource));
+ , factoryId, forceImportMemorySource));
}
- if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), m_NetworkProperties.m_OutputSource))
+ if (tensorHandle->Import(outputTensor.second.GetMemoryArea(), forceImportMemorySource))
{
importedOutputs.push_back(m_CurImportedOutputId++);
}
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index f2ba94f597..9520c1399e 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1362,7 +1362,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backen
ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
OutputSlot& outputSlot,
TensorHandleFactoryRegistry& registry,
- bool importEnabled)
+ bool exportEnabled)
{
// First ensure the from backends can support the TensorHandeAPI
Layer& layer = outputSlot.GetOwningLayer();
@@ -1390,7 +1390,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
for (auto&& pref : srcPrefs)
{
- if (importEnabled)
+ if (exportEnabled)
{
ITensorHandleFactory* factory = registry.GetFactory(pref);
if (outputConnection)
@@ -1602,12 +1602,13 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
BackendsMap& backends,
TensorHandleFactoryRegistry& registry,
bool importEnabled,
+ bool exportEnabled,
Optional<std::vector<std::string>&> errMessages)
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_SelectTensorHandleStrategy");
OptimizationResult result;
- optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
+ optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer)
{
ARMNN_ASSERT(layer);
@@ -1632,7 +1633,7 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
break;
default:
- slotOption = CalculateSlotOption(backends, outputSlot, registry, importEnabled);
+ slotOption = CalculateSlotOption(backends, outputSlot, registry, exportEnabled);
break;
}
outputSlot.SetTensorHandleFactory(slotOption);
@@ -1696,7 +1697,15 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
- auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
+ // We need to pass on the information about whether import and export is enabled to the LoadNetwork phase.
+ // The mechanism to do that is to add model options to the optimized network.
+ armnn::BackendOptions importExport("Global",
+ {{"ImportEnabled", options.m_ImportEnabled},
+ {"ExportEnabled", options.m_ExportEnabled}});
+ ModelOptions optimizedOptions(options.m_ModelOptions);
+ optimizedOptions.push_back(importExport);
+
+ auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions),
&IOptimizedNetwork::Destroy);
IOptimizedNetwork* optNetObjPtr = optNet.get();
@@ -1819,7 +1828,9 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
backends,
tensorHandleFactoryRegistry,
options.m_ImportEnabled,
+ options.m_ExportEnabled,
messages);
+
if (strategyResult.m_Error)
{
// Failed to apply the backend-specific optimizations
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 6c7c2f5c7e..2d34cfc3e2 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -300,6 +300,7 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
BackendsMap& backends,
TensorHandleFactoryRegistry& registry,
bool importEnabled,
+ bool exportEnabled,
Optional<std::vector<std::string>&> errMessages);
OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
diff --git a/src/armnn/Runtime.hpp b/src/armnn/Runtime.hpp
index 376cdbc000..f5dfadf948 100644
--- a/src/armnn/Runtime.hpp
+++ b/src/armnn/Runtime.hpp
@@ -56,9 +56,9 @@ public:
armnn::TensorInfo GetOutputTensorInfo(NetworkId networkId, LayerBindingId layerId) const;
std::vector<ImportedInputId> ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
- MemorySource forceImportMemorySource = MemorySource::Undefined);
+ MemorySource forceImportMemorySource);
std::vector<ImportedOutputId> ImportOutputs(NetworkId networkId, const OutputTensors& outputTensors,
- MemorySource forceImportMemorySource = MemorySource::Undefined);
+ MemorySource forceImportMemorySource);
void ClearImportedInputs(NetworkId networkId, const std::vector<ImportedInputId> inputIds);
void ClearImportedOutputs(NetworkId networkId, const std::vector<ImportedOutputId> outputIds);
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 3cbe8848df..59f65541b8 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -93,7 +93,7 @@ TEST_CASE("RuntimePreImportInputs")
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
std::string er;
- armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Undefined);
+ armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
runtime->LoadNetwork(networkId,
Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
er,
@@ -107,7 +107,7 @@ TEST_CASE("RuntimePreImportInputs")
ConstTensor inputTensor2({{4}, armnn::DataType::Signed32, 0.0f, 0, true}, inputData2.data());
Tensor outputTensor({{4}, armnn::DataType::Signed32}, output.data());
- auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}});
+ auto importedInputVec1 = runtime->ImportInputs(networkId, {{0, inputTensor1}}, MemorySource::Malloc);
CHECK(importedInputVec1.size() == 1);
CHECK(importedInputVec1[0] == 0);
@@ -118,7 +118,7 @@ TEST_CASE("RuntimePreImportInputs")
CHECK(val == 30);
}
- auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
+ auto importedInputVec2 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
CHECK(importedInputVec2.size() == 1);
CHECK(importedInputVec2[0] == 1);
@@ -146,7 +146,7 @@ TEST_CASE("RuntimePreImportInputs")
// Incorrect layer binding id and ImportedInputId
CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {{-2, inputTensor2}}, {{2, outputTensor}}, {10});,
armnn::InvalidArgumentException);
- auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}});
+ auto importedInputVec3 = runtime->ImportInputs(networkId, {{1, inputTensor2}}, MemorySource::Malloc);
CHECK(importedInputVec3[0] == 2);
// Too many ImportedInputIds
CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), {}, {{2, outputTensor}}, {0, 1, 2});,
@@ -175,6 +175,7 @@ TEST_CASE("RuntimePreImportInputs")
// Trying to delete unknown pre-imported tensor
CHECK_THROWS_AS(runtime->ClearImportedInputs(networkId, {10});, armnn::InvalidArgumentException);
}
+
TEST_CASE("RuntimePreImportOutputs")
{
armnn::IRuntime::CreationOptions options;
@@ -216,7 +217,7 @@ TEST_CASE("RuntimePreImportOutputs")
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
std::string er;
- armnn::INetworkProperties networkProperties(true, MemorySource::Malloc, MemorySource::Malloc);
+ armnn::INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
runtime->LoadNetwork(networkId,
Optimize(*testNetwork, backends, runtime->GetDeviceSpec()),
er,
@@ -257,7 +258,7 @@ TEST_CASE("RuntimePreImportOutputs")
runtime->Execute(*memHandle.get(),inputTensors, {output1, output2});
testOutputs();
- auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 });
+ auto importedOutputVec = runtime->ImportOutputs(networkId, {output1, output2 }, MemorySource::Malloc);
CHECK(importedOutputVec.size() == 2);
CHECK(importedOutputVec[0] == 0);
CHECK(importedOutputVec[1] == 1);
@@ -271,7 +272,7 @@ TEST_CASE("RuntimePreImportOutputs")
runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {0});
testOutputs();
- auto importedInputVec = runtime->ImportInputs(networkId, inputTensors);
+ auto importedInputVec = runtime->ImportInputs(networkId, inputTensors, MemorySource::Malloc);
CHECK(importedInputVec.size() == 2);
CHECK(importedInputVec[0] == 0);
CHECK(importedInputVec[1] == 1);
@@ -1293,4 +1294,176 @@ TEST_CASE("ProfilingPostOptimisationStructureCpuRef")
VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuRef);
}
+TEST_CASE("RuntimeOptimizeImportOff_LoadNetworkImportOn")
+{
+ // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
+ // that network but specify that the import memory source is Malloc.
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+ armnn::NetworkId networkId = 1;
+ armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+ auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+ auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+ TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+ inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+ inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+ inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+ OptimizerOptions optimizedOptions;
+ // Hard set import and export to off.
+ optimizedOptions.m_ImportEnabled = false;
+ optimizedOptions.m_ExportEnabled = false;
+ IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ std::string er;
+ // Load the network passing an import memory source.
+ armnn::INetworkProperties networkProperties1(true, MemorySource::Malloc, MemorySource::Undefined);
+ // There should be an InvalidArgumentException.
+ runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+ CHECK(er.find("However, it was disabled when this network was optimized") != -1);
+}
+
+TEST_CASE("RuntimeOptimizeExportOff_LoadNetworkExportOn")
+{
+ // In this test case we'll optimize a network with both import and export disabled. Then we'll attempt to load
+ // that network but specify that the export memory source as Malloc.
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+ armnn::NetworkId networkId = 1;
+ armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+ auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+ auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+ TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+ inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+ inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+ inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+ OptimizerOptions optimizedOptions;
+ // Hard set import and export to off.
+ optimizedOptions.m_ImportEnabled = false;
+ optimizedOptions.m_ExportEnabled = false;
+ IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ std::string er;
+ // Load the network passing an import memory source.
+ armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Malloc);
+ // There should be an InvalidArgumentException.
+ runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+ CHECK(er.find("However, it was disabled when this network was optimized") != -1);
+}
+
+TEST_CASE("RuntimeOptimizeImportOn_LoadNetworkImportOff")
+{
+ // In this test case we'll optimize a network with import enabled. Then we'll attempt to load
+ // that network but specify that the import memory source is Undefined.
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+ armnn::NetworkId networkId = 1;
+ armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+ auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+ auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+ TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+ inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+ inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+ inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+ OptimizerOptions optimizedOptions;
+ // Hard set import and export to off.
+ optimizedOptions.m_ImportEnabled = true;
+ optimizedOptions.m_ExportEnabled = false;
+ IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ std::string er;
+ // Load the network passing an import memory source.
+ armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
+ // There should be an InvalidArgumentException.
+ runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+ CHECK(er.find("However, it was enabled when this network was optimized") != -1);
+}
+
+TEST_CASE("RuntimeOptimizeExportOn_LoadNetworkExportOff")
+{
+ // In this test case we'll optimize a network with export enabled. Then we'll attempt to load
+ // that network but specify that the export memory source is Undefined.
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+ armnn::NetworkId networkId = 1;
+ armnn::INetworkPtr testNetwork(armnn::INetwork::Create());
+
+ auto inputLayer1 = testNetwork->AddInputLayer(0, "input 1 layer");
+ auto inputLayer2 = testNetwork->AddInputLayer(1, "input 2 layer");
+ auto addLayer = testNetwork->AddAdditionLayer("add layer");
+ auto outputLayer = testNetwork->AddOutputLayer(2, "output layer");
+
+ TensorInfo tensorInfo{{4}, armnn::DataType::Signed32};
+
+ inputLayer1->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0));
+ inputLayer1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ inputLayer2->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1));
+ inputLayer2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ addLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+ OptimizerOptions optimizedOptions;
+ // Hard set import and export to off.
+ optimizedOptions.m_ImportEnabled = false;
+ optimizedOptions.m_ExportEnabled = true;
+ IOptimizedNetworkPtr optNet = Optimize(*testNetwork, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ std::string er;
+ // Load the network passing an import memory source.
+ armnn::INetworkProperties networkProperties1(true, MemorySource::Undefined, MemorySource::Undefined);
+ // There should be an InvalidArgumentException.
+ runtime->LoadNetwork(networkId, std::move(optNet), er, networkProperties1);
+ CHECK(er.find("However, it was enabled when this network was optimized") != -1);
+}
+
}
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index c591fffa43..2ea3c2abf1 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -342,7 +342,7 @@ TEST_CASE("TensorHandleSelectionStrategy")
graph.TopologicalSort();
std::vector<std::string> errors;
- auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
+ auto result = SelectTensorHandleStrategy(graph, backends, registry, true, true, errors);
CHECK(result.m_Error == false);
CHECK(result.m_Warning == false);