aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-01-24 14:06:23 +0000
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-01-30 14:03:28 +0000
commitadddddb6cbcb777d92a8c464c9ad0cb9aecc76a3 (patch)
treeb15de32bf9f8612f66e1ae23d2f8009e80e7d0e6
parentd089b74bebbcc8518fb0f4eacb7e6569ae170199 (diff)
downloadarmnn-adddddb6cbcb777d92a8c464c9ad0cb9aecc76a3.tar.gz
IVGCVSW-2458 Refactor the Optimize function (Network.cpp) so that
subgraphs are optimized by the backends * Added a new method OptimizeSubGraph to the backend interface * Refactored the Optimize function so that the backend-specific optimization is performed by the backend itself (through the new OptimizeSubGraph interface method) * Added a new ApplyBackendOptimizations function to apply the new changes * Added some new convenient constructors to the SubGraph class * Added AddLayer method and a pointer to the parent graph to the SubGraph class * Updated the sub-graph unit tests to match the changes * Added SelectSubGraphs and ReplaceSubGraphConnections overloads that work with sub-graphs * Removed unused code and minor refactoring where necessary Change-Id: I46181794c6a9e3b10558944f804e06a8f693a6d0
-rw-r--r--include/armnn/BackendId.hpp2
-rw-r--r--src/armnn/Graph.cpp63
-rw-r--r--src/armnn/Graph.hpp33
-rw-r--r--src/armnn/Network.cpp203
-rw-r--r--src/armnn/NetworkUtils.cpp45
-rw-r--r--src/armnn/NetworkUtils.hpp8
-rw-r--r--src/armnn/SubGraph.cpp144
-rw-r--r--src/armnn/SubGraph.hpp72
-rw-r--r--src/armnn/SubGraphSelector.cpp37
-rw-r--r--src/armnn/SubGraphSelector.hpp14
-rw-r--r--src/armnn/test/SubGraphTests.cpp100
-rw-r--r--src/backends/backendsCommon/IBackendInternal.hpp4
-rw-r--r--src/backends/cl/ClBackend.cpp9
-rw-r--r--src/backends/cl/ClBackend.hpp5
-rw-r--r--src/backends/neon/NeonBackend.cpp9
-rw-r--r--src/backends/neon/NeonBackend.hpp5
-rw-r--r--src/backends/reference/RefBackend.cpp11
-rw-r--r--src/backends/reference/RefBackend.hpp5
18 files changed, 526 insertions, 243 deletions
diff --git a/include/armnn/BackendId.hpp b/include/armnn/BackendId.hpp
index 129cbb5d46..89725c165a 100644
--- a/include/armnn/BackendId.hpp
+++ b/include/armnn/BackendId.hpp
@@ -125,6 +125,8 @@ public:
return m_Id < other.m_Id;
}
+ bool IsCpuRef() const { return m_Id == GetComputeDeviceAsCString(Compute::CpuRef); }
+
const std::string& Get() const { return m_Id; }
private:
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 831d85e404..1bd4fbd85a 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -2,7 +2,9 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "Graph.hpp"
+#include "SubGraph.hpp"
#include "LayersFwd.hpp"
#include <armnn/Utils.hpp>
@@ -17,7 +19,6 @@
#include <DotSerializer.hpp>
#include <sstream>
-
namespace armnn
{
@@ -238,7 +239,7 @@ const Graph& Graph::TopologicalSort() const
it->ResetPriority();
}
- auto compareLayerPriority = [](const LayersList::value_type& layerA, const LayersList::value_type& layerB)
+ auto compareLayerPriority = [](const LayerList::value_type& layerA, const LayerList::value_type& layerB)
{
return layerA->GetPriority() < layerB->GetPriority();
};
@@ -306,44 +307,72 @@ void Graph::SubstituteSubGraph(std::unique_ptr<SubGraph> subGraph, IConnectableL
EraseSubGraphLayers(*subGraph);
}
+void Graph::SubstituteSubGraph(std::unique_ptr<SubGraph> subGraph, const SubGraph& substituteSubGraph)
+{
+ BOOST_ASSERT(subGraph);
+
+ ReplaceSubGraphConnections(*subGraph, substituteSubGraph);
+ EraseSubGraphLayers(*subGraph);
+}
+
void Graph::ReplaceSubGraphConnections(const SubGraph& subGraph, IConnectableLayer* substituteLayer)
{
BOOST_ASSERT(substituteLayer != nullptr);
BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), substituteLayer) != m_Layers.end(),
- "Substitue layer is not a member of graph");
+ "Substitute layer is not a member of graph");
+
+ SubGraph substituteSubGraph(subGraph, substituteLayer);
+ ReplaceSubGraphConnections(subGraph, substituteSubGraph);
+}
+
+void Graph::ReplaceSubGraphConnections(const SubGraph& subGraph, const SubGraph& substituteSubGraph)
+{
+ BOOST_ASSERT_MSG(!substituteSubGraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
+
+ const SubGraph::Layers& substituteSubGraphLayers = substituteSubGraph.GetLayers();
+ std::for_each(substituteSubGraphLayers.begin(), substituteSubGraphLayers.end(), [&](Layer* layer)
+ {
+ BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
+ "Substitute layer is not a member of graph");
+ });
const SubGraph::InputSlots& subGraphInputSlots = subGraph.GetInputSlots();
const SubGraph::OutputSlots& subGraphOutputSlots = subGraph.GetOutputSlots();
- const unsigned int numInputSlots = boost::numeric_cast<unsigned int>(subGraphInputSlots.size());
- const unsigned int numOutputSlots = boost::numeric_cast<unsigned int>(subGraphOutputSlots.size());
+ unsigned int subGraphNumInputSlots = boost::numeric_cast<unsigned int>(subGraphInputSlots.size());
+ unsigned int subGraphNumOutputSlots = boost::numeric_cast<unsigned int>(subGraphOutputSlots.size());
+
+ const SubGraph::InputSlots& substituteSubGraphInputSlots = substituteSubGraph.GetInputSlots();
+ const SubGraph::OutputSlots& substituteSubGraphOutputSlots = substituteSubGraph.GetOutputSlots();
+
+ BOOST_ASSERT(subGraphNumInputSlots == substituteSubGraphInputSlots.size());
+ BOOST_ASSERT(subGraphNumOutputSlots == substituteSubGraphOutputSlots.size());
- BOOST_ASSERT(numInputSlots == substituteLayer->GetNumInputSlots());
- BOOST_ASSERT(numOutputSlots == substituteLayer->GetNumOutputSlots());
+ // Disconnect the sub-graph and replace it with the substitute sub-graph
- // Disconnect the sub-graph and replace it with the substitute layer
// Step 1: process input slots
- for(unsigned int inputSlotIdx = 0u; inputSlotIdx < numInputSlots; ++inputSlotIdx)
+ for (unsigned int inputSlotIdx = 0; inputSlotIdx < subGraphNumInputSlots; ++inputSlotIdx)
{
InputSlot* subGraphInputSlot = subGraphInputSlots.at(inputSlotIdx);
- BOOST_ASSERT(subGraphInputSlot != nullptr);
+ BOOST_ASSERT(subGraphInputSlot);
IOutputSlot* connectedOutputSlot = subGraphInputSlot->GetConnection();
- BOOST_ASSERT(connectedOutputSlot != nullptr);
+ BOOST_ASSERT(connectedOutputSlot);
connectedOutputSlot->Disconnect(*subGraphInputSlot);
- IInputSlot& substituteInputSlot = substituteLayer->GetInputSlot(inputSlotIdx);
- connectedOutputSlot->Connect(substituteInputSlot);
+ IInputSlot* substituteInputSlot = substituteSubGraphInputSlots.at(inputSlotIdx);
+ BOOST_ASSERT(substituteInputSlot);
+ connectedOutputSlot->Connect(*substituteInputSlot);
}
// Step 2: process output slots
- for(unsigned int outputSlotIdx = 0u; outputSlotIdx < numOutputSlots; ++outputSlotIdx)
+ for(unsigned int outputSlotIdx = 0; outputSlotIdx < subGraphNumOutputSlots; ++outputSlotIdx)
{
OutputSlot* subGraphOutputSlot = subGraphOutputSlots.at(outputSlotIdx);
- BOOST_ASSERT(subGraphOutputSlot != nullptr);
+ BOOST_ASSERT(subGraphOutputSlot);
- OutputSlot* substituteOutputSlot = boost::polymorphic_downcast<OutputSlot*>(
- &substituteLayer->GetOutputSlot(outputSlotIdx));
+ OutputSlot* substituteOutputSlot = substituteSubGraphOutputSlots.at(outputSlotIdx);
+ BOOST_ASSERT(substituteOutputSlot);
subGraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
}
}
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index 8f93f56b4a..8046977411 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -6,7 +6,6 @@
#include "LayersFwd.hpp"
#include "IGraphObservable.hpp"
-#include "SubGraph.hpp"
#include <armnn/Types.hpp>
#include <armnn/TensorFwd.hpp>
@@ -25,21 +24,23 @@
namespace armnn
{
+class SubGraph;
+
class Graph
{
public:
- template <typename CVLayerT>
- static CVLayerT* PtrCast(Layer* const layer)
+ template <typename LayerType>
+ static LayerType* PtrCast(Layer* const layer)
{
- return boost::polymorphic_downcast<CVLayerT*>(layer);
+ return boost::polymorphic_downcast<LayerType*>(layer);
}
- using LayersList = std::list<Layer*>;
- using Iterator = LayersList::const_iterator; // Const so pointers in the list can't be modified externally.
- using ConstIterator = boost::transform_iterator<decltype(&PtrCast<const Layer>), Iterator>;
+ using LayerList = std::list<Layer*>;
+ using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
using IteratorDifference = Iterator::difference_type;
- using ConstIteratorInputs = boost::transform_iterator<decltype(&PtrCast<const InputLayer>), Iterator>;
+ using ConstIterator = boost::transform_iterator<decltype(&PtrCast<const Layer>), Iterator>;
+ using ConstIteratorInputs = boost::transform_iterator<decltype(&PtrCast<const InputLayer>), Iterator>;
using ConstIteratorOutputs = boost::transform_iterator<decltype(&PtrCast<const OutputLayer>), Iterator>;
/// Wrapper class returned by Graph::GetInputLayers()
@@ -49,13 +50,13 @@ public:
ConstIteratorInputs begin() const
{
- return { m_Graph.m_Layers.begin(), &PtrCast<const InputLayer> };
+ return { m_Graph.m_Layers.begin(), &(PtrCast<const InputLayer>) };
}
ConstIteratorInputs end() const
{
return { std::next(m_Graph.m_Layers.begin(), static_cast<IteratorDifference>(m_Graph.GetNumInputs())),
- &PtrCast<const InputLayer> };
+ &(PtrCast<const InputLayer>) };
}
const Graph& m_Graph;
@@ -69,12 +70,12 @@ public:
ConstIteratorOutputs begin() const
{
return { std::prev(m_Graph.m_Layers.end(), static_cast<IteratorDifference>(m_Graph.GetNumOutputs())),
- &PtrCast<const OutputLayer> };
+ &(PtrCast<const OutputLayer>) };
}
ConstIteratorOutputs end() const
{
- return { m_Graph.m_Layers.end(), &PtrCast<const OutputLayer> };
+ return { m_Graph.m_Layers.end(), &(PtrCast<const OutputLayer>) };
}
const Graph& m_Graph;
@@ -127,9 +128,9 @@ public:
Iterator end() { return m_Layers.end(); }
/// Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.
- ConstIterator begin() const { return {m_Layers.begin(), &PtrCast<const Layer>}; }
+ ConstIterator begin() const { return {m_Layers.begin(), &(PtrCast<const Layer>)}; }
/// Returns const iterator pointing to the end of the list. Lowercase for range-based for loops.
- ConstIterator end() const { return {m_Layers.end(), &PtrCast<const Layer>}; }
+ ConstIterator end() const { return {m_Layers.end(), &(PtrCast<const Layer>)}; }
/// Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops.
ConstIterator cbegin() const { return begin(); }
@@ -161,6 +162,7 @@ public:
void AddCopyLayers();
void SubstituteSubGraph(std::unique_ptr<SubGraph> subGraph, IConnectableLayer* substituteLayer);
+ void SubstituteSubGraph(std::unique_ptr<SubGraph> subGraph, const SubGraph& substituteSubGraph);
void InferTensorInfos();
@@ -214,10 +216,11 @@ private:
std::unordered_map<const Layer*, Iterator> m_PosInGraphMap;
void ReplaceSubGraphConnections(const SubGraph& subGraph, IConnectableLayer* substituteLayer);
+ void ReplaceSubGraphConnections(const SubGraph& subGraph, const SubGraph& substituteSubGraph);
void EraseSubGraphLayers(const SubGraph &subGraph);
/// Mutable to allow sorting on const object.
- mutable LayersList m_Layers;
+ mutable LayerList m_Layers;
mutable bool m_LayersInOrder;
std::map<const GraphEvent, std::list<IGraphObservable*>> m_Views;
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 662a9ccd3c..7897a81d1e 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -295,70 +295,112 @@ OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
return result;
}
-OptimizationResult InsertPreCompiledLayers(OptimizedNetwork* optNetObjPtr,
- const IBackendInternalUniquePtr& backendObjPtr,
- BackendSettings& backendSettings,
- Optional<std::vector<std::string>&> errMessages)
+OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
+ BackendSettings& backendSettings,
+ SubGraph& subGraph,
+ Optional<std::vector<std::string>&> errMessages)
{
- BOOST_ASSERT(backendObjPtr);
+ Graph::Iterator firstLayer = subGraph.begin();
+ Graph::Iterator lastLayer = subGraph.end();
+ return AssignBackends(optNetObjPtr,
+ backendSettings,
+ firstLayer,
+ lastLayer,
+ errMessages);
+}
+
+OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
+ BackendSettings& backendSettings,
+ Optional<std::vector<std::string>&> errMessages)
+{
+ BOOST_ASSERT(optNetObjPtr);
OptimizationResult result;
- // Select sub-graphs based on backend
- SubGraphSelector::SubGraphs subGraphs =
- SubGraphSelector::SelectSubGraphs(optNetObjPtr->GetGraph(),
- // select layers assigned to requested backend
- [&](const Layer& layer)
- {
- return layer.GetType() != LayerType::Input &&
- layer.GetType() != LayerType::Output &&
- layer.GetBackendId() == backendObjPtr->GetId();
- });
-
- if (subGraphs.empty())
- {
- // No sub-graphs found -> return with no error
- return result;
- }
+ // Get the optimized graph
+ Graph& optGraph = optNetObjPtr->GetGraph();
- // Convert sub-graphs and substitute them with pre-compiled layers
- unsigned int index = 0u;
- for (auto& subGraph : subGraphs)
+ // Get the entire graph as a sub-graph
+ SubGraph mainSubGraph(optGraph);
+
+ // Run backend specific optimizations
+ auto const& backendRegistry = BackendRegistryInstance();
+ for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
{
- // Create a pre-compiled layer
- PreCompiledLayer* preCompiledLayer = CreatePreCompiledLayer(optNetObjPtr->GetGraph(),
- *subGraph,
- index++,
- backendObjPtr);
- if (preCompiledLayer)
+ auto backendFactory = backendRegistry.GetFactory(selectedBackend);
+ auto backendObjPtr = backendFactory();
+ BOOST_ASSERT(backendObjPtr);
+
+ // Select sub-graphs based on backend
+ SubGraphSelector::SubGraphs subGraphs =
+ SubGraphSelector::SelectSubGraphs(mainSubGraph,
+ // Select layers assigned to the requested backend
+ [&backendObjPtr](const Layer& layer)
+ {
+ return layer.GetType() != LayerType::Input &&
+ layer.GetType() != LayerType::Output &&
+ layer.GetBackendId() == backendObjPtr->GetId();
+ });
+ if (subGraphs.empty())
{
- // Substitute sub-graph with pre-compiled layer in graph
- optNetObjPtr->GetGraph().SubstituteSubGraph(std::move(subGraph), preCompiledLayer);
+ // No sub-graphs found, try with next selected backend
+ continue;
}
- else
+
+ // Try to optimize each sub-graph
+ for (auto& subGraph : subGraphs)
{
- // Failed to create pre-compiled layer from sub-graph ->
- // re-assign sub-graph layers to other available backends
- std::stringstream warningMsg;
- warningMsg << "Sub-graph #" << index << " failed to compile on "
- << backendObjPtr->GetId() << ". Re-assigning backends to "
- << subGraph->GetLayers().size() << " layers inside sub-graph";
- ReportWarning(warningMsg.str(), errMessages);
-
- backendSettings.m_IgnoredBackends = { backendObjPtr->GetId() };
-
- Graph::Iterator firstLayer = subGraph->begin();
- Graph::Iterator lastLayer = subGraph->end();
- OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
- backendSettings,
- firstLayer,
- lastLayer,
- errMessages);
-
- if (reassignmentResult.m_Error)
+ // Try to optimize the current sub-graph
+ bool optimizationAttempted = false;
+ SubGraph::SubGraphPtr optSubGraph = backendObjPtr->OptimizeSubGraph(*subGraph, optimizationAttempted);
+
+ // Check if the optimization has been attempted
+ if (!optimizationAttempted)
{
- result.m_Error = true;
- return result;
+ // No optimization attempted, keep the current sub-graph as it is and move to the next one
+ continue;
+ }
+
+ // Optimization attempted, check the resulting optimized sub-graph
+ if (optSubGraph)
+ {
+ // Sub-graph optimized, substitute the sub-graph with the new optimized one in the main optimized graph
+ optGraph.SubstituteSubGraph(std::move(subGraph), *optSubGraph);
+
+ // Assign the current backend to the optimized sub-graph
+ std::for_each(optSubGraph->begin(), optSubGraph->end(), [&selectedBackend](Layer* l)
+ {
+ BOOST_ASSERT(l);
+ l->SetBackendId(selectedBackend);
+ });
+
+ // Recreate the sub-graph representing the entire graph
+ mainSubGraph.Update(optGraph);
+ }
+ else
+ {
+ // An error occurred: the optimization was attempted but not performed, try different backends
+ std::stringstream warningMsg;
+ warningMsg << "Sub-graph failed to get optimized on " << backendObjPtr->GetId() << ". "
+ << "Re-assigning backends to " << subGraph->GetLayers().size() << " layers inside sub-graph";
+ ReportWarning(warningMsg.str(), errMessages);
+
+ // Failed to optimize the given sub-graph, re-assign the sub-graph layers to other available backends
+ if (!backendObjPtr->GetId().IsCpuRef())
+ {
+ // Add the current backend to the list of backends to ignore
+ backendSettings.m_IgnoredBackends.insert(backendObjPtr->GetId());
+ }
+ OptimizationResult reassignmentResult = AssignBackends(optNetObjPtr,
+ backendSettings,
+ *subGraph,
+ errMessages);
+ if (reassignmentResult.m_Error)
+ {
+ // Failed to re-assign one of the remaining backends to each layer of the sub-graph
+ result.m_Error = true;
+ return result;
+ }
}
}
}
@@ -384,22 +426,25 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
OptimizedNetwork* optNetObjPtr = boost::polymorphic_downcast<OptimizedNetwork*>(optNet.get());
+ // Get the optimized graph
+ Graph& optGraph = optNetObjPtr->GetGraph();
+
// Perform optimisation passes
using namespace optimizations;
- Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(SquashEqualPermuteSiblings(),
- SquashEqualReshapeSiblings(),
- OptimizeInversePermutes(),
- MovePermuteUp(),
- PermuteAsReshape(),
- OptimizeConsecutiveReshapes()));
+ Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(),
+ SquashEqualReshapeSiblings(),
+ OptimizeInversePermutes(),
+ MovePermuteUp(),
+ PermuteAsReshape(),
+ OptimizeConsecutiveReshapes()));
- // Infer the tensor infos for all output slots. Throws an exception on failure.
- optNetObjPtr->GetGraph().InferTensorInfos();
+ // Infer the tensor infos for all output slots. Throws an exception on failure
+ optGraph.InferTensorInfos();
// If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16
if (options.m_ReduceFp32ToFp16)
{
- Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
+ Optimizer::Pass(optGraph, MakeOptimizations(Fp32NetworkToFp16Converter()));
}
// Initialize backend settings
@@ -414,8 +459,8 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
}
// Assign an available backend to each layer
- Graph::Iterator firstLayer = optNetObjPtr->GetGraph().begin();
- Graph::Iterator lastLayer = optNetObjPtr->GetGraph().end();
+ Graph::Iterator firstLayer = optGraph.begin();
+ Graph::Iterator lastLayer = optGraph.end();
OptimizationResult assigBackendsResult = AssignBackends(optNetObjPtr,
backendSettings,
firstLayer,
@@ -427,22 +472,31 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
}
- Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(OptimizeInverseConversionsFp16(),
- OptimizeInverseConversionsFp32()));
+ Optimizer::Pass(optGraph, MakeOptimizations(OptimizeInverseConversionsFp16(),
+ OptimizeInverseConversionsFp32()));
+
+ // Apply the backend-specific optimizations
+ OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
+ backendSettings,
+ errMessages);
+ if (backendOptimizationResult.m_Error)
+ {
+ // Failed to apply the backend-specific optimizations
+ return IOptimizedNetworkPtr(nullptr, &IOptimizedNetwork::Destroy);
+ }
- // If the debug flag is set, then insert a DebugLayer after each layer.
- // NOTE: This optimization can only happen strictly after the PreCompiled layers have
- // already been inserted
+ // If the debug flag is set, then insert a DebugLayer after each layer
+ // Doing this after applying the backend optimizations as they might have changed some layers
if (options.m_Debug)
{
- Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(InsertDebugLayer()));
+ Optimizer::Pass(optGraph, MakeOptimizations(InsertDebugLayer()));
}
- optNetObjPtr->GetGraph().AddCopyLayers();
+ optGraph.AddCopyLayers();
// Convert constants
- Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsFloatToHalf()));
- Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(ConvertConstantsHalfToFloat()));
+ Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsFloatToHalf()));
+ Optimizer::Pass(optGraph, MakeOptimizations(ConvertConstantsHalfToFloat()));
// Run backend specific optimizations
for (auto&& chosenBackend : backendSettings.m_SelectedBackends)
@@ -461,7 +515,6 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
return optNet;
}
-
Network::Network()
: m_Graph(std::make_unique<Graph>())
{
diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp
index 735a6244d5..d0c36dd34f 100644
--- a/src/armnn/NetworkUtils.cpp
+++ b/src/armnn/NetworkUtils.cpp
@@ -111,49 +111,4 @@ std::vector<DebugLayer*> InsertDebugLayerAfter(Graph& graph, Layer& layer)
return debugLayers;
}
-PreCompiledLayer* CreatePreCompiledLayer(Graph& graph,
- const SubGraph& subGraph,
- unsigned int subGraphIndex,
- const IBackendInternalUniquePtr& backendObjPtr)
-{
- BOOST_ASSERT(backendObjPtr);
-
- IBackendInternal::ISubGraphConverterPtr converter =
- backendObjPtr->CreateSubGraphConverter(std::make_shared<SubGraph>(subGraph));
- if (!converter)
- {
- return nullptr;
- }
-
- try
- {
- // Attempt to convert and compile sub-graph
- auto preCompiledObject = converter->GetOutput();
- }
- catch (std::exception&)
- {
- return nullptr;
- }
-
- // Create pre-compiled layer
- std::string name = "pre-compiled" + std::to_string(subGraphIndex);
- PreCompiledLayer* preCompiledLayer = graph.AddLayer<PreCompiledLayer>(
- PreCompiledDescriptor(subGraph.GetNumInputSlots(), subGraph.GetNumOutputSlots()), name.c_str());
-
- // Copy output tensor infos from sub-graph
- for (unsigned int i = 0u; i < subGraph.GetNumOutputSlots(); i++)
- {
- preCompiledLayer->GetOutputSlot(i).SetTensorInfo(subGraph.GetOutputSlot(i)->GetTensorInfo());
- }
-
- // Assign pre-compiled object to layer
- preCompiledLayer->SetPreCompiledObject(converter->GetOutput());
-
- // Set the backend-id for the pre-compiled layer
- BackendId backendId = backendObjPtr->GetId();
- preCompiledLayer->SetBackendId(backendId);
-
- return preCompiledLayer;
-}
-
} // namespace armnn
diff --git a/src/armnn/NetworkUtils.hpp b/src/armnn/NetworkUtils.hpp
index 1a520b7195..421c52a6a7 100644
--- a/src/armnn/NetworkUtils.hpp
+++ b/src/armnn/NetworkUtils.hpp
@@ -7,9 +7,6 @@
#include "DeviceSpec.hpp"
#include "Graph.hpp"
-#include "SubGraph.hpp"
-
-#include <backendsCommon/IBackendInternal.hpp>
namespace armnn
{
@@ -20,9 +17,4 @@ std::vector<ConvertFp32ToFp16Layer*> InsertConvertFp32ToFp16LayersAfter(Graph& g
std::vector<DebugLayer*> InsertDebugLayerAfter(Graph& graph, Layer& layer);
-PreCompiledLayer* CreatePreCompiledLayer(Graph& graph,
- const SubGraph& subGraph,
- unsigned int subGraphIndex,
- const IBackendInternalUniquePtr& backendObject);
-
} // namespace armnn
diff --git a/src/armnn/SubGraph.cpp b/src/armnn/SubGraph.cpp
index 74a1838ef0..d0fc760c15 100644
--- a/src/armnn/SubGraph.cpp
+++ b/src/armnn/SubGraph.cpp
@@ -3,33 +3,147 @@
// SPDX-License-Identifier: MIT
//
-#include "Layer.hpp"
#include "SubGraph.hpp"
+#include "Graph.hpp"
#include <boost/numeric/conversion/cast.hpp>
+#include <utility>
+
namespace armnn
{
-SubGraph::SubGraph()
+namespace
+{
+
+template <class C>
+void AssertIfNullsOrDuplicates(const C& container, const std::string& errorMessage)
+{
+ using T = typename C::value_type;
+ std::unordered_set<T> duplicateSet;
+ std::for_each(container.begin(), container.end(), [&duplicateSet, &errorMessage](const T& i)
+ {
+ // Ignore unused for release builds
+ boost::ignore_unused(errorMessage);
+
+ // Check if the item is valid
+ BOOST_ASSERT_MSG(i, errorMessage.c_str());
+
+ // Check if a duplicate has been found
+ BOOST_ASSERT_MSG(duplicateSet.find(i) == duplicateSet.end(), errorMessage.c_str());
+
+ duplicateSet.insert(i);
+ });
+}
+
+} // anonymous namespace
+
+SubGraph::SubGraph(Graph& graph)
+ : m_InputSlots{}
+ , m_OutputSlots{}
+ , m_Layers(graph.begin(), graph.end())
+ , m_ParentGraph(&graph)
+{
+ CheckSubGraph();
+}
+
+SubGraph::SubGraph(Graph* parentGraph, InputSlots&& inputs, OutputSlots&& outputs, Layers&& layers)
+ : m_InputSlots{inputs}
+ , m_OutputSlots{outputs}
+ , m_Layers{layers}
+ , m_ParentGraph(parentGraph)
{
+ CheckSubGraph();
}
-SubGraph::SubGraph(InputSlots && inputs,
- OutputSlots && outputs,
- Layers && layers)
-: m_InputSlots{inputs}
-, m_OutputSlots{outputs}
-, m_Layers{layers}
+SubGraph::SubGraph(const SubGraph& referenceSubGraph, InputSlots&& inputs, OutputSlots&& outputs, Layers&& layers)
+ : m_InputSlots{inputs}
+ , m_OutputSlots{outputs}
+ , m_Layers{layers}
+ , m_ParentGraph(referenceSubGraph.m_ParentGraph)
{
+ CheckSubGraph();
+}
+
+SubGraph::SubGraph(const SubGraph& subGraph)
+ : m_InputSlots(subGraph.m_InputSlots.begin(), subGraph.m_InputSlots.end())
+ , m_OutputSlots(subGraph.m_OutputSlots.begin(), subGraph.m_OutputSlots.end())
+ , m_Layers(subGraph.m_Layers.begin(), subGraph.m_Layers.end())
+ , m_ParentGraph(subGraph.m_ParentGraph)
+{
+ CheckSubGraph();
+}
+
+SubGraph::SubGraph(SubGraph&& subGraph)
+ : m_InputSlots(std::move(subGraph.m_InputSlots))
+ , m_OutputSlots(std::move(subGraph.m_OutputSlots))
+ , m_Layers(std::move(subGraph.m_Layers))
+ , m_ParentGraph(std::exchange(subGraph.m_ParentGraph, nullptr))
+{
+ CheckSubGraph();
+}
+
+SubGraph::SubGraph(const SubGraph& referenceSubGraph, IConnectableLayer* layer)
+ : m_InputSlots{}
+ , m_OutputSlots{}
+ , m_Layers{boost::polymorphic_downcast<Layer*>(layer)}
+ , m_ParentGraph(referenceSubGraph.m_ParentGraph)
+{
+ unsigned int numInputSlots = layer->GetNumInputSlots();
+ m_InputSlots.resize(numInputSlots);
+ for (unsigned int i = 0; i < numInputSlots; i++)
+ {
+ m_InputSlots.at(i) = boost::polymorphic_downcast<InputSlot*>(&(layer->GetInputSlot(i)));
+ }
+
+ unsigned int numOutputSlots = layer->GetNumOutputSlots();
+ m_OutputSlots.resize(numOutputSlots);
+ for (unsigned int i = 0; i < numOutputSlots; i++)
+ {
+ m_OutputSlots.at(i) = boost::polymorphic_downcast<OutputSlot*>(&(layer->GetOutputSlot(i)));
+ }
+
+ CheckSubGraph();
+}
+
+void SubGraph::CheckSubGraph()
+{
+ // Check that the sub-graph has a valid parent graph
+ BOOST_ASSERT_MSG(m_ParentGraph, "Sub-graphs must have a parent graph");
+
+ // Check for invalid or duplicate input slots
+ AssertIfNullsOrDuplicates(m_InputSlots, "Sub-graphs cannot contain null or duplicate input slots");
+
+ // Check for invalid or duplicate output slots
+ AssertIfNullsOrDuplicates(m_OutputSlots, "Sub-graphs cannot contain null or duplicate output slots");
+
+ // Check for invalid or duplicate layers
+ AssertIfNullsOrDuplicates(m_Layers, "Sub-graphs cannot contain null or duplicate layers");
+
+ // Check that all the layers of the sub-graph belong to the parent graph
+ std::for_each(m_Layers.begin(), m_Layers.end(), [&](const Layer* l)
+ {
+ BOOST_ASSERT_MSG(std::find(m_ParentGraph->begin(), m_ParentGraph->end(), l) != m_ParentGraph->end(),
+ "Sub-graph layer is not a member of the parent graph");
+ });
+}
+
+void SubGraph::Update(Graph &graph)
+{
+ m_InputSlots.clear();
+ m_OutputSlots.clear();
+ m_Layers.assign(graph.begin(), graph.end());
+ m_ParentGraph = &graph;
+
+ CheckSubGraph();
}
-const SubGraph::InputSlots & SubGraph::GetInputSlots() const
+const SubGraph::InputSlots& SubGraph::GetInputSlots() const
{
return m_InputSlots;
}
-const SubGraph::OutputSlots & SubGraph::GetOutputSlots() const
+const SubGraph::OutputSlots& SubGraph::GetOutputSlots() const
{
return m_OutputSlots;
}
@@ -74,27 +188,27 @@ SubGraph::Layers::iterator SubGraph::begin()
return m_Layers.begin();
}
-SubGraph::Layers::iterator SubGraph::end()
+SubGraph::Iterator SubGraph::end()
{
return m_Layers.end();
}
-SubGraph::Layers::const_iterator SubGraph::begin() const
+SubGraph::ConstIterator SubGraph::begin() const
{
return m_Layers.begin();
}
-SubGraph::Layers::const_iterator SubGraph::end() const
+SubGraph::ConstIterator SubGraph::end() const
{
return m_Layers.end();
}
-SubGraph::Layers::const_iterator SubGraph::cbegin() const
+SubGraph::ConstIterator SubGraph::cbegin() const
{
return begin();
}
-SubGraph::Layers::const_iterator SubGraph::cend() const
+SubGraph::ConstIterator SubGraph::cend() const
{
return end();
}
diff --git a/src/armnn/SubGraph.hpp b/src/armnn/SubGraph.hpp
index d22377daff..81166f1285 100644
--- a/src/armnn/SubGraph.hpp
+++ b/src/armnn/SubGraph.hpp
@@ -6,6 +6,7 @@
#pragma once
#include "Layer.hpp"
+#include "Graph.hpp"
#include <vector>
#include <list>
@@ -22,18 +23,46 @@ namespace armnn
class SubGraph final
{
public:
- using InputSlots = std::vector<InputSlot *>;
- using OutputSlots = std::vector<OutputSlot *>;
+ using SubGraphPtr = std::unique_ptr<SubGraph>;
+ using InputSlots = std::vector<InputSlot*>;
+ using OutputSlots = std::vector<OutputSlot*>;
using Layers = std::list<Layer*>;
+ using Iterator = Layers::iterator;
+ using ConstIterator = Layers::const_iterator;
- SubGraph();
- SubGraph(InputSlots && inputs,
- OutputSlots && outputs,
- Layers && layers);
+ /// Empty subgraphs are not allowed, they must at least have a parent graph.
+ SubGraph() = delete;
- const InputSlots & GetInputSlots() const;
- const OutputSlots & GetOutputSlots() const;
- const Layers & GetLayers() const;
+ /// Constructs a sub-graph from the entire given graph.
+ SubGraph(Graph& graph);
+
+ /// Constructs a sub-graph with the given arguments and binds it to the specified parent graph.
+ SubGraph(Graph* parentGraph, InputSlots&& inputs, OutputSlots&& outputs, Layers&& layers);
+
+ /// Constructs a sub-graph with the given arguments and uses the specified sub-graph to get a reference
+ /// to the parent graph.
+ SubGraph(const SubGraph& referenceSubGraph, InputSlots&& inputs, OutputSlots&& outputs, Layers&& layers);
+
+ /// Copy-constructor.
+ SubGraph(const SubGraph& subGraph);
+
+ /// Move-constructor.
+ SubGraph(SubGraph&& subGraph);
+
+ /// Constructs a sub-graph with only the given layer and uses the specified sub-graph to get a reference
+ /// to the parent graph.
+ SubGraph(const SubGraph& referenceSubGraph, IConnectableLayer* layer);
+
+ /// Updates this sub-graph with the contents of the whole given graph.
+ void Update(Graph& graph);
+
+ /// Adds a new layer, of type LayerType, to the graph this sub-graph is a view of.
+ template <typename LayerT, typename... Args>
+ LayerT* AddLayer(Args&&... args) const;
+
+ const InputSlots& GetInputSlots() const;
+ const OutputSlots& GetOutputSlots() const;
+ const Layers& GetLayers() const;
const InputSlot* GetInputSlot(unsigned int index) const;
InputSlot* GetInputSlot(unsigned int index);
@@ -44,19 +73,32 @@ public:
unsigned int GetNumInputSlots() const;
unsigned int GetNumOutputSlots() const;
- Layers::iterator begin();
- Layers::iterator end();
+ Iterator begin();
+ Iterator end();
- Layers::const_iterator begin() const;
- Layers::const_iterator end() const;
+ ConstIterator begin() const;
+ ConstIterator end() const;
- Layers::const_iterator cbegin() const;
- Layers::const_iterator cend() const;
+ ConstIterator cbegin() const;
+ ConstIterator cend() const;
private:
+ void CheckSubGraph();
+
InputSlots m_InputSlots;
OutputSlots m_OutputSlots;
Layers m_Layers;
+
+ /// Pointer to the graph this sub-graph is a view of.
+ Graph* m_ParentGraph;
};
+template <typename LayerT, typename... Args>
+LayerT* SubGraph::AddLayer(Args&&... args) const
+{
+ BOOST_ASSERT(m_ParentGraph);
+
+ return m_ParentGraph->AddLayer<LayerT>(args...);
+}
+
} // namespace armnn
diff --git a/src/armnn/SubGraphSelector.cpp b/src/armnn/SubGraphSelector.cpp
index d0542fd41f..4abf01c88f 100644
--- a/src/armnn/SubGraphSelector.cpp
+++ b/src/armnn/SubGraphSelector.cpp
@@ -69,25 +69,25 @@ struct LayerSelectionInfo
return m_Layer->GetType() == armnn::LayerType::Input;
}
- void CollectNonSelectedInputs(SubGraph::InputSlots& slots,
+ void CollectNonSelectedInputs(SubGraph::InputSlots& inputSlots,
const SubGraphSelector::LayerSelectorFunction& selector)
{
for (auto&& slot = m_Layer->BeginInputSlots(); slot != m_Layer->EndInputSlots(); ++slot)
{
OutputSlot* parentLayerOutputSlot = slot->GetConnectedOutputSlot();
- BOOST_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The slots must be connected here.");
+ BOOST_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
if (parentLayerOutputSlot)
{
Layer& parentLayer = parentLayerOutputSlot->GetOwningLayer();
if (selector(parentLayer) == false)
{
- slots.push_back(&(*slot));
+ inputSlots.push_back(&(*slot));
}
}
}
}
- void CollectNonSelectedOutputSlots(SubGraph::OutputSlots& slots,
+ void CollectNonSelectedOutputSlots(SubGraph::OutputSlots& outputSlots,
const SubGraphSelector::LayerSelectorFunction& selector)
{
for (auto&& slot = m_Layer->BeginOutputSlots(); slot != m_Layer->EndOutputSlots(); ++slot)
@@ -97,7 +97,7 @@ struct LayerSelectionInfo
Layer& childLayer = childLayerInputSlot->GetOwningLayer();
if (selector(childLayer) == false)
{
- slots.push_back(&(*slot));
+ outputSlots.push_back(&(*slot));
}
}
}
@@ -112,12 +112,18 @@ struct LayerSelectionInfo
} // namespace <anonymous>
SubGraphSelector::SubGraphs
-SubGraphSelector::SelectSubGraphs(Graph& graph,
- const LayerSelectorFunction& selector)
+SubGraphSelector::SelectSubGraphs(Graph& graph, const LayerSelectorFunction& selector)
+{
+ SubGraph subGraph(graph);
+ return SubGraphSelector::SelectSubGraphs(subGraph, selector);
+}
+
+SubGraphSelector::SubGraphs
+SubGraphSelector::SelectSubGraphs(SubGraph& subGraph, const LayerSelectorFunction& selector)
{
LayerSelectionInfo::LayerInfoContainer layerInfo;
- for (auto& layer : graph)
+ for (auto& layer : subGraph)
{
layerInfo.emplace(layer, LayerSelectionInfo{layer, selector});
}
@@ -127,7 +133,7 @@ SubGraphSelector::SelectSubGraphs(Graph& graph,
{
if (info.second.IsInputLayer())
{
- // for each input layer we mark the graph where subgraph
+ // For each input layer we mark the graph where subgraph
// splits need to happen because of the dependency between
// the selected and non-selected nodes
info.second.MarkChildrenSplits(layerInfo, splitNo, false);
@@ -159,20 +165,19 @@ SubGraphSelector::SelectSubGraphs(Graph& graph,
{
if (splitGraph.second.empty() == false)
{
- SubGraph::OutputSlots outputs;
SubGraph::InputSlots inputs;
+ SubGraph::OutputSlots outputs;
SubGraph::Layers layers;
for (auto&& infoPtr : splitGraph.second)
{
- infoPtr->CollectNonSelectedOutputSlots(outputs, selector);
infoPtr->CollectNonSelectedInputs(inputs, selector);
+ infoPtr->CollectNonSelectedOutputSlots(outputs, selector);
layers.push_back(infoPtr->m_Layer);
}
- result.emplace_back(
- std::make_unique<SubGraph>(
- std::move(inputs),
- std::move(outputs),
- std::move(layers)));
+ result.emplace_back(std::make_unique<SubGraph>(subGraph,
+ std::move(inputs),
+ std::move(outputs),
+ std::move(layers)));
}
}
diff --git a/src/armnn/SubGraphSelector.hpp b/src/armnn/SubGraphSelector.hpp
index f96b986a2e..371ba78f18 100644
--- a/src/armnn/SubGraphSelector.hpp
+++ b/src/armnn/SubGraphSelector.hpp
@@ -21,14 +21,12 @@ public:
using SubGraphs = std::vector<SubGraphPtr>;
using LayerSelectorFunction = std::function<bool(const Layer&)>;
- /// Selects subgraphs of a graph based on the selector function
- /// and the algorithm. Since the SubGraphs returns modifiable pointers
- /// the input and output slots of the graph:
- /// 1) the graph cannot be const
- /// 2) the caller need to make sure that the SubGraphs lifetime is
- /// shorter than the graph's
- static SubGraphs SelectSubGraphs(Graph& graph,
- const LayerSelectorFunction& selector);
+ /// Selects subgraphs from a graph based on the selector function and the algorithm.
+ /// Since the SubGraphs object returns modifiable pointers to the input and output slots of the graph:
+ /// 1) the graph/sub-graph cannot be const
+ /// 2) the caller needs to make sure that the SubGraphs lifetime is shorter than the parent graph's
+ static SubGraphs SelectSubGraphs(Graph& graph, const LayerSelectorFunction& selector);
+ static SubGraphs SelectSubGraphs(SubGraph& subGraph, const LayerSelectorFunction& selector);
private:
// this is a utility class, don't construct or copy
diff --git a/src/armnn/test/SubGraphTests.cpp b/src/armnn/test/SubGraphTests.cpp
index 9e49197ea6..e5b444a076 100644
--- a/src/armnn/test/SubGraphTests.cpp
+++ b/src/armnn/test/SubGraphTests.cpp
@@ -34,7 +34,7 @@ bool AreAnySubGraphLayersPresentInGraph(const SubGraph::Layers &subGraphLayers,
//
// this helper only works if all layers where the inputs connect to are not selected
//
-SubGraph::InputSlots CreateInputsFrom(const std::vector<Layer *> & layers)
+SubGraph::InputSlots CreateInputsFrom(const std::vector<Layer*>& layers)
{
SubGraph::InputSlots result;
for (auto&& layer : layers)
@@ -50,7 +50,7 @@ SubGraph::InputSlots CreateInputsFrom(const std::vector<Layer *> & layers)
//
// this helper only works if all layers where the outputs connect to are not selected
//
-SubGraph::OutputSlots CreateOutputsFrom(const std::vector<Layer *> & layers)
+SubGraph::OutputSlots CreateOutputsFrom(const std::vector<Layer*>& layers)
{
SubGraph::OutputSlots result;
for (auto && layer : layers)
@@ -67,11 +67,12 @@ SubGraph::OutputSlots CreateOutputsFrom(const std::vector<Layer *> & layers)
// this takes the inputs, outputs and layers as a copy and the move these copies into the
// resulting subgraph, so the pass bay value is intentional
//
-SubGraphSelector::SubGraphPtr CreateSubGraphFrom(SubGraph::InputSlots inputs,
- SubGraph::OutputSlots outputs,
- SubGraph::Layers layers)
+SubGraphSelector::SubGraphPtr CreateSubGraphFrom(Graph& graph,
+ SubGraph::InputSlots&& inputs,
+ SubGraph::OutputSlots&& outputs,
+ SubGraph::Layers&& layers)
{
- return std::make_unique<SubGraph>(std::move(inputs), std::move(outputs), std::move(layers));
+ return std::make_unique<SubGraph>(&graph, std::move(inputs), std::move(outputs), std::move(layers));
}
template <typename T, typename Iterator>
@@ -146,8 +147,10 @@ BOOST_AUTO_TEST_CASE(SingleInputSingleOutput)
convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubGraphSelector::SubGraphPtr subGraph =
- CreateSubGraphFrom(CreateInputsFrom({convLayer1}), CreateOutputsFrom({convLayer2}), {});
+ SubGraphSelector::SubGraphPtr subGraph = CreateSubGraphFrom(graph,
+ CreateInputsFrom({convLayer1}),
+ CreateOutputsFrom({convLayer2}),
+ {});
// Save sub-graph connections for comparison after substitution
IOutputSlot* subGraphInputConn = subGraph->GetInputSlot(0)->GetConnection();
@@ -192,8 +195,10 @@ BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubGraphSelector::SubGraphPtr subGraph =
- CreateSubGraphFrom(CreateInputsFrom({convLayer1, convLayer2}), CreateOutputsFrom({mergerLayer}), {});
+ SubGraphSelector::SubGraphPtr subGraph = CreateSubGraphFrom(graph,
+ CreateInputsFrom({convLayer1, convLayer2}),
+ CreateOutputsFrom({mergerLayer}),
+ {});
// Save sub-graph connections for comparison after substitution
IOutputSlot* subGraphInputConn1 = subGraph->GetInputSlot(0)->GetConnection();
@@ -240,8 +245,10 @@ BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubGraphSelector::SubGraphPtr subGraph =
- CreateSubGraphFrom(CreateInputsFrom({splitterLayer}), CreateOutputsFrom({convLayer1, convLayer2}), {});
+ SubGraphSelector::SubGraphPtr subGraph = CreateSubGraphFrom(graph,
+ CreateInputsFrom({splitterLayer}),
+ CreateOutputsFrom({convLayer1, convLayer2}),
+ {});
// Save sub-graph connections for comparison after substitution
IOutputSlot* subGraphInputConn1 = subGraph->GetInputSlot(0)->GetConnection();
@@ -290,7 +297,8 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubGraphSelector::SubGraphPtr subGraph = CreateSubGraphFrom(CreateInputsFrom({convLayer1, convLayer2}),
+ SubGraphSelector::SubGraphPtr subGraph = CreateSubGraphFrom(graph,
+ CreateInputsFrom({convLayer1, convLayer2}),
CreateOutputsFrom({convLayer1, convLayer2}),
{});
@@ -336,8 +344,10 @@ BOOST_AUTO_TEST_CASE(EraseReplacedLayers)
graph.AddLayer<OutputLayer>(0, "output");
// Construct sub-graph
- SubGraphSelector::SubGraphPtr subGraph =
- CreateSubGraphFrom({}, {}, {splitterLayer, convLayer1, convLayer2, mergerLayer});
+ SubGraphSelector::SubGraphPtr subGraph = CreateSubGraphFrom(graph,
+ {},
+ {},
+ {splitterLayer, convLayer1, convLayer2, mergerLayer});
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(0, 0);
@@ -357,6 +367,36 @@ BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE(SubGraphSelection)
+BOOST_AUTO_TEST_CASE(SubGraphForEmptyGraph)
+{
+ Graph graph;
+ SubGraph subGraph(graph);
+
+ BOOST_TEST(subGraph.GetInputSlots().empty());
+ BOOST_TEST(subGraph.GetOutputSlots().empty());
+ BOOST_TEST(subGraph.GetLayers().empty());
+}
+
+BOOST_AUTO_TEST_CASE(SubGraphForEntireGraph)
+{
+ Graph graph;
+
+ auto output = graph.AddLayer<OutputLayer>(0, "output");
+ auto mid0 = graph.InsertNewLayer<ActivationLayer>(output->GetInputSlot(0),
+ ActivationDescriptor{},
+ "mid0");
+ auto mid1 = graph.InsertNewLayer<ActivationLayer>(mid0->GetInputSlot(0),
+ ActivationDescriptor{},
+ "mid1");
+ graph.InsertNewLayer<InputLayer>(mid1->GetInputSlot(0), 0, "input");
+
+ SubGraph subGraph(graph);
+
+ BOOST_TEST(subGraph.GetInputSlots().empty());
+ BOOST_TEST(subGraph.GetOutputSlots().empty());
+ BOOST_TEST(subGraph.GetLayers().size() == graph.GetNumLayers());
+}
+
BOOST_AUTO_TEST_CASE(NoSubGraphsForNoMatch)
{
Graph graph;
@@ -390,7 +430,8 @@ BOOST_AUTO_TEST_CASE(OneSubGraphsSelectedASingleMatch)
BOOST_TEST(subGraphs.size() == 1);
if (subGraphs.size() == 1)
{
- auto expected = CreateSubGraphFrom(CreateInputsFrom({output}),
+ auto expected = CreateSubGraphFrom(graph,
+ CreateInputsFrom({output}),
// outputs of 'output' will be empty
CreateOutputsFrom({output}),
{output});
@@ -425,7 +466,8 @@ BOOST_AUTO_TEST_CASE(MultipleLayersSelectedInTheMiddle)
BOOST_TEST(subGraphs.size() == 1);
if (subGraphs.size() == 1)
{
- auto expected = CreateSubGraphFrom(CreateInputsFrom({mid1}),
+ auto expected = CreateSubGraphFrom(graph,
+ CreateInputsFrom({mid1}),
CreateOutputsFrom({mid0}),
{mid1, mid0});
@@ -496,11 +538,13 @@ BOOST_AUTO_TEST_CASE(IslandInTheMiddle)
});
// expected results to test against
- auto largerSubGraph = CreateSubGraphFrom(CreateInputsFrom({m1, m4}),
+ auto largerSubGraph = CreateSubGraphFrom(graph,
+ CreateInputsFrom({m1, m4}),
CreateOutputsFrom({m3, m4}),
{m1, m4, m2, m3});
- auto smallerSubGraph = CreateSubGraphFrom(CreateInputsFrom({m5}),
+ auto smallerSubGraph = CreateSubGraphFrom(graph,
+ CreateInputsFrom({m5}),
CreateOutputsFrom({m5}),
{m5});
@@ -572,11 +616,13 @@ BOOST_AUTO_TEST_CASE(MultipleSimpleSubGraphs)
});
// expected results to test against
- auto largerSubGraph = CreateSubGraphFrom(CreateInputsFrom({m1}),
+ auto largerSubGraph = CreateSubGraphFrom(graph,
+ CreateInputsFrom({m1}),
CreateOutputsFrom({m2}),
{m1, m2});
- auto smallerSubGraph = CreateSubGraphFrom(CreateInputsFrom({m3}),
+ auto smallerSubGraph = CreateSubGraphFrom(graph,
+ CreateInputsFrom({m3}),
CreateOutputsFrom({m3}),
{m3});
@@ -644,7 +690,8 @@ BOOST_AUTO_TEST_CASE(SimpleLinearTest)
BOOST_CHECK(subGraphs.size() == 1);
if(subGraphs.size() == 1)
{
- auto expected = CreateSubGraphFrom(CreateInputsFrom({layerM1}),
+ auto expected = CreateSubGraphFrom(graph,
+ CreateInputsFrom({layerM1}),
CreateOutputsFrom({layerM2}),
{layerM1, layerM2});
@@ -699,7 +746,8 @@ BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
BOOST_CHECK(subGraphs.size() == 1);
if (subGraphs.size() == 1)
{
- auto expected = CreateSubGraphFrom(CreateInputsFrom({layerM1, layerM2}),
+ auto expected = CreateSubGraphFrom(graph,
+ CreateInputsFrom({layerM1, layerM2}),
CreateOutputsFrom({layerM3}),
{layerM1, layerM2, layerM3});
@@ -755,7 +803,8 @@ BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
BOOST_CHECK(subGraphs.size() == 1);
if(subGraphs.size() == 1)
{
- auto expected = CreateSubGraphFrom(CreateInputsFrom({layerM1}),
+ auto expected = CreateSubGraphFrom(graph,
+ CreateInputsFrom({layerM1}),
CreateOutputsFrom({layerM2, layerM3}),
{layerM1, layerM2, layerM3});
@@ -819,7 +868,8 @@ BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
BOOST_CHECK(subGraphs.size() == 1);
if (subGraphs.size() == 1)
{
- auto expected = CreateSubGraphFrom(CreateInputsFrom({m1, m2}),
+ auto expected = CreateSubGraphFrom(graph,
+ CreateInputsFrom({m1, m2}),
CreateOutputsFrom({m4, m5}),
{m1, m2, m3, m4, m5});
diff --git a/src/backends/backendsCommon/IBackendInternal.hpp b/src/backends/backendsCommon/IBackendInternal.hpp
index 2e6b056798..6e6d47fc90 100644
--- a/src/backends/backendsCommon/IBackendInternal.hpp
+++ b/src/backends/backendsCommon/IBackendInternal.hpp
@@ -43,6 +43,8 @@ public:
using ISubGraphConverterPtr = std::unique_ptr<ISubGraphConverter>;
+ using SubGraphUniquePtr = std::unique_ptr<SubGraph>;
+
virtual IMemoryManagerUniquePtr CreateMemoryManager() const = 0;
virtual IWorkloadFactoryPtr CreateWorkloadFactory(
@@ -54,6 +56,8 @@ public:
virtual Optimizations GetOptimizations() const = 0;
virtual ILayerSupportSharedPtr GetLayerSupport() const = 0;
+
+ virtual SubGraphUniquePtr OptimizeSubGraph(const SubGraph& subGraph, bool& optimizationAttempted) const = 0;
};
using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>;
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 2f9dfa9755..dfa5e7c777 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -80,4 +80,13 @@ IBackendInternal::ILayerSupportSharedPtr ClBackend::GetLayerSupport() const
return layerSupport;
}
+IBackendInternal::SubGraphUniquePtr ClBackend::OptimizeSubGraph(const SubGraph& subGraph,
+ bool& optimizationAttempted) const
+{
+ // Not trying to optimize the given sub-graph
+ optimizationAttempted = false;
+
+ return SubGraphUniquePtr{};
+}
+
} // namespace armnn
diff --git a/src/backends/cl/ClBackend.hpp b/src/backends/cl/ClBackend.hpp
index 84b5b9a9d2..8133ce49f6 100644
--- a/src/backends/cl/ClBackend.hpp
+++ b/src/backends/cl/ClBackend.hpp
@@ -30,6 +30,9 @@ public:
IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+
+ IBackendInternal::SubGraphUniquePtr OptimizeSubGraph(const SubGraph& subGraph,
+ bool& optimizationAttempted) const override;
};
-} // namespace armnn \ No newline at end of file
+} // namespace armnn
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index ce97a1d03c..046685a65a 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -80,4 +80,13 @@ IBackendInternal::ILayerSupportSharedPtr NeonBackend::GetLayerSupport() const
return layerSupport;
}
+IBackendInternal::SubGraphUniquePtr NeonBackend::OptimizeSubGraph(const SubGraph& subGraph,
+ bool& optimizationAttempted) const
+{
+ // Not trying to optimize the given sub-graph
+ optimizationAttempted = false;
+
+ return SubGraphUniquePtr{};
+}
+
} // namespace armnn
diff --git a/src/backends/neon/NeonBackend.hpp b/src/backends/neon/NeonBackend.hpp
index 3b1d186a06..634704571c 100644
--- a/src/backends/neon/NeonBackend.hpp
+++ b/src/backends/neon/NeonBackend.hpp
@@ -30,6 +30,9 @@ public:
IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+
+ IBackendInternal::SubGraphUniquePtr OptimizeSubGraph(const SubGraph& subGraph,
+ bool& optimizationAttempted) const override;
};
-} // namespace armnn \ No newline at end of file
+} // namespace armnn
diff --git a/src/backends/reference/RefBackend.cpp b/src/backends/reference/RefBackend.cpp
index e4f468c15e..d5f5348642 100644
--- a/src/backends/reference/RefBackend.cpp
+++ b/src/backends/reference/RefBackend.cpp
@@ -73,4 +73,13 @@ IBackendInternal::ILayerSupportSharedPtr RefBackend::GetLayerSupport() const
return layerSupport;
}
-} // namespace armnn \ No newline at end of file
+IBackendInternal::SubGraphUniquePtr RefBackend::OptimizeSubGraph(const SubGraph& subGraph,
+ bool& optimizationAttempted) const
+{
+ // Not trying to optimize the given sub-graph
+ optimizationAttempted = false;
+
+ return SubGraphUniquePtr{};
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/RefBackend.hpp b/src/backends/reference/RefBackend.hpp
index 51366221c7..6305bf568c 100644
--- a/src/backends/reference/RefBackend.hpp
+++ b/src/backends/reference/RefBackend.hpp
@@ -30,6 +30,9 @@ public:
IBackendInternal::Optimizations GetOptimizations() const override;
IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
+
+ IBackendInternal::SubGraphUniquePtr OptimizeSubGraph(const SubGraph& subGraph,
+ bool& optimizationAttempted) const override;
};
-} // namespace armnn \ No newline at end of file
+} // namespace armnn