aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/Network.cpp
diff options
context:
space:
mode:
authorColm Donelan <colm.donelan@arm.com>2022-05-30 15:20:36 +0100
committerColm Donelan <colm.donelan@arm.com>2022-06-20 15:04:10 +0000
commit03bf98a8bc51ad20eef4b9ca5fbf6ce15e063721 (patch)
tree161e9f9efdeffb72cf6237b7a67dc2e990eb97c4 /src/armnn/Network.cpp
parent8f397a1efed11e17e9f8cb12b53a72b7e32ab978 (diff)
downloadarmnn-03bf98a8bc51ad20eef4b9ca5fbf6ce15e063721.tar.gz
IVGCVSW-6873 Import inputs but don't export outputs fails.
Only one bool is used to indicate whether inputs should be imported. However, its possible for the user to want to import inputs but not export outputs. In addition it's possible for a user to enabled import during optimize but then pass a memory source that does not require import. * Add m_ExportEnabled to INetwork.hpp. * Modify Network::dNetwork to consider both m_ImportEnabled and m_ExportEnabled. * Add ValidateSourcesMatchOptimizedNetwork to LoadedNetwork to validate import options between optimize and network load. * Update the TfLite delegate consider exportEnabled flag in the optimizer. !armnn-internal-tests:425350 Signed-off-by: Colm Donelan <colm.donelan@arm.com> Change-Id: I776eab81595898e43f91ab40306962eae61329f4
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r--src/armnn/Network.cpp21
1 files changed, 16 insertions, 5 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index f2ba94f597..9520c1399e 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1362,7 +1362,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backen
ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
OutputSlot& outputSlot,
TensorHandleFactoryRegistry& registry,
- bool importEnabled)
+ bool exportEnabled)
{
// First ensure the from backends can support the TensorHandeAPI
Layer& layer = outputSlot.GetOwningLayer();
@@ -1390,7 +1390,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
std::map<ITensorHandleFactory::FactoryId, int> factoryScores;
for (auto&& pref : srcPrefs)
{
- if (importEnabled)
+ if (exportEnabled)
{
ITensorHandleFactory* factory = registry.GetFactory(pref);
if (outputConnection)
@@ -1602,12 +1602,13 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
BackendsMap& backends,
TensorHandleFactoryRegistry& registry,
bool importEnabled,
+ bool exportEnabled,
Optional<std::vector<std::string>&> errMessages)
{
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_SelectTensorHandleStrategy");
OptimizationResult result;
- optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
+ optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer)
{
ARMNN_ASSERT(layer);
@@ -1632,7 +1633,7 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
slotOption = CalculateSlotOptionForOutput(backends, outputSlot, registry);
break;
default:
- slotOption = CalculateSlotOption(backends, outputSlot, registry, importEnabled);
+ slotOption = CalculateSlotOption(backends, outputSlot, registry, exportEnabled);
break;
}
outputSlot.SetTensorHandleFactory(slotOption);
@@ -1696,7 +1697,15 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
std::unique_ptr<Graph> graph = std::make_unique<Graph>(inGraph);
- auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
+ // We need to pass on the information about whether import and export is enabled to the LoadNetwork phase.
+ // The mechanism to do that is to add model options to the optimized network.
+ armnn::BackendOptions importExport("Global",
+ {{"ImportEnabled", options.m_ImportEnabled},
+ {"ExportEnabled", options.m_ExportEnabled}});
+ ModelOptions optimizedOptions(options.m_ModelOptions);
+ optimizedOptions.push_back(importExport);
+
+ auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), optimizedOptions),
&IOptimizedNetwork::Destroy);
IOptimizedNetwork* optNetObjPtr = optNet.get();
@@ -1819,7 +1828,9 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
backends,
tensorHandleFactoryRegistry,
options.m_ImportEnabled,
+ options.m_ExportEnabled,
messages);
+
if (strategyResult.m_Error)
{
// Failed to apply the backend-specific optimizations