aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/Graph.cpp36
-rw-r--r--src/armnn/Graph.hpp4
-rw-r--r--src/armnn/Layer.cpp5
-rw-r--r--src/armnn/Layer.hpp2
-rw-r--r--src/armnn/Network.cpp267
-rw-r--r--src/armnn/Network.hpp8
-rw-r--r--src/armnn/SubgraphView.cpp168
-rw-r--r--src/armnn/SubgraphView.hpp71
-rw-r--r--src/armnn/SubgraphViewSelector.cpp62
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp393
-rw-r--r--src/armnn/test/UnitTests.hpp2
11 files changed, 767 insertions, 251 deletions
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 6d24e50bdc..cdb323432c 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -445,10 +445,13 @@ void Graph::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substi
void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
{
// Look through each layer in the new subgraph and add any that are not already a member of this graph
- substituteSubgraph.ForEachLayer([this](Layer* layer)
+ substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
{
- if (std::find(std::begin(m_Layers), std::end(m_Layers), layer) == std::end(m_Layers))
+ if (std::find(std::begin(m_Layers),
+ std::end(m_Layers),
+ iConnectableLayer) == std::end(m_Layers))
{
+ auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
layer->Reparent(*this, m_Layers.end());
m_LayersInOrder = false;
}
@@ -461,24 +464,26 @@ void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& subst
void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
{
- ARMNN_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
+ ARMNN_ASSERT_MSG(!substituteSubgraph.GetIConnectableLayers().empty(),
+ "New sub-graph used for substitution must not be empty");
- const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
- std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
+ const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
+ std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
{
IgnoreUnused(layer);
+ layer = PolymorphicDowncast<Layer*>(layer);
ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
"Substitute layer is not a member of graph");
});
- const SubgraphView::InputSlots& subgraphInputSlots = subgraph.GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraph.GetOutputSlots();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraph.GetIOutputSlots();
unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
- const SubgraphView::InputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetInputSlots();
- const SubgraphView::OutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetOutputSlots();
+ const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
+ const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
@@ -488,7 +493,7 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
// Step 1: process input slots
for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
{
- InputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
+ IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
ARMNN_ASSERT(subgraphInputSlot);
IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
@@ -503,19 +508,24 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
// Step 2: process output slots
for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
{
- OutputSlot* subgraphOutputSlot = subgraphOutputSlots.at(outputSlotIdx);
+ auto subgraphOutputSlot =
+ PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
ARMNN_ASSERT(subgraphOutputSlot);
- OutputSlot* substituteOutputSlot = substituteSubgraphOutputSlots.at(outputSlotIdx);
+ auto substituteOutputSlot =
+ PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
ARMNN_ASSERT(substituteOutputSlot);
+
subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
}
}
void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
{
- for (auto layer : subgraph.GetLayers())
+
+ for (auto iConnectableLayer : subgraph.GetIConnectableLayers())
{
+ auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
EraseLayer(layer);
}
subgraph.Clear();
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index d49b5e513f..d71149d069 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -48,7 +48,9 @@ public:
}
using LayerList = std::list<Layer*>;
- using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
+
+ // Const so pointers in the list can't be modified externally.
+ using Iterator = LayerList::const_iterator;
using IteratorDifference = Iterator::difference_type;
using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>;
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index e0d0f913c3..98fc14b56e 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -480,4 +480,9 @@ void Layer::ExecuteStrategy(IStrategy& strategy) const
strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
}
+const IConnectableLayer& OutputSlot::GetOwningIConnectableLayer() const
+{
+ return m_OwningLayer;
+}
+
} // namespace armnn
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index f665d155d1..f2ea6cb26d 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -114,6 +114,8 @@ public:
Layer& GetOwningLayer() const { return m_OwningLayer; }
+ const IConnectableLayer& GetOwningIConnectableLayer() const override;
+
LayerGuid GetOwningLayerGuid() const override;
const OutputHandler& GetOutputHandler() const { return m_OutputHandler; }
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index e89c6fe407..de60e11eef 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -916,6 +916,120 @@ OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
}
}
+// Refactor to allow passing the IConnectableLayer* rather than Layer Iterator
+// on Graph and SubgraphView which are different types.
+void AssignBackendsIConnectable(OptimizedNetworkImpl* optNetObjPtr,
+ IConnectableLayer* it,
+ Optional<std::vector<std::string>&> errMessages,
+ OptimizationResult& result,
+ BackendSettings& backendSettings,
+ std::vector<BackendId>& availablePreferredBackends)
+{
+ auto ReturnError = [&](const Layer* layer)
+ {
+ return ReturnWithError(result, layer, backendSettings, errMessages);
+ };
+
+ auto layer = PolymorphicDowncast<Layer*>(it);
+
+ if (layer->GetType() == LayerType::Input)
+ {
+ return;
+ }
+
+ DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
+ layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
+ DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
+ layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
+
+ std::string reasonIfUnsupported;
+ bool found = false;
+ if (!CheckScaleSetOnQuantizedType(layer, errMessages))
+ {
+ // don't bomb immediately, find all the quantized outputs
+ // which haven't had a scale set and report them all back.
+ result.m_Error = true;
+ }
+
+ // First try assign layer to hint backend
+ if (layer->GetBackendHint().has_value() &&
+ backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
+ AttemptBackendAssignment(backendSettings,
+ optNetObjPtr->GetGraph(),
+ layer,
+ layer->GetBackendHint().value(),
+ dataTypeIn,
+ dataTypeOut,
+ availablePreferredBackends,
+ reasonIfUnsupported,
+ errMessages).IsOk())
+ {
+ found = true;
+ backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
+ }
+ else
+ {
+ // Try assign layer to prefered list of backends
+ for (const auto& backend : availablePreferredBackends)
+ {
+ if (layer->GetBackendHint().has_value() &&
+ layer->GetBackendHint().value() == backend)
+ {
+ continue; //Don't re-test the backend hint
+ }
+
+ OptimizationResult res = AttemptBackendAssignment(backendSettings,
+ optNetObjPtr->GetGraph(),
+ layer,
+ backend,
+ dataTypeIn,
+ dataTypeOut,
+ availablePreferredBackends,
+ reasonIfUnsupported,
+ errMessages);
+
+ if (res.IsOk())
+ {
+ found = true;
+ backendSettings.m_SelectedBackends.insert(backend);
+ break;
+ }
+ else if (res.IsError())
+ {
+ result = res; // Cannot continue.
+ // Note: we don't need to log the error as it would already
+ // be logged in AttemptBackendAssignment().
+ }
+ else
+ {
+ ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
+ }
+ }
+ }
+
+ // If the layer is unsupported by any devices, log and return a null network.
+ if (!found)
+ {
+ // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
+ // fallback we should set the compute device on the layer to CpuRef (these are not
+ // available as accelerated operations, or are only available under certain
+ // conditions, currently they comprise MemCopy, Constant, Permute)
+ armnn::LayerType layerType = layer->GetType();
+ if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
+ layerType == armnn::LayerType::Constant ||
+ layerType == armnn::LayerType::Permute))
+ {
+ BackendId cpuBackendId(armnn::Compute::CpuRef);
+ layer->SetBackendId(cpuBackendId);
+ backendSettings.m_SelectedBackends.insert(cpuBackendId);
+ }
+ else
+ {
+ result = ReturnError(layer);
+ }
+ }
+
+}
OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
BackendSettings& backendSettings,
@@ -926,13 +1040,6 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_AssignBackends");
OptimizationResult result;
- // Helper lambda to compose meaningful error message before returning with error
- auto ReturnError = [&](const Layer* layer)
- {
- return ReturnWithError(result, layer, backendSettings, errMessages);
- };
-
-
auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
if (availablePreferredBackends.empty())
{
@@ -946,109 +1053,61 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
for (auto it = firstLayer; it != lastLayer; ++it)
{
- auto layer = *it;
+ AssignBackendsIConnectable(optNetObjPtr,
+ *it,
+ errMessages,
+ result,
+ backendSettings,
+ availablePreferredBackends);
+ }
+
+ for (auto it = firstLayer; it != lastLayer; ++it)
+ {
+ auto layer = PolymorphicDowncast<Layer*>(*it);
- if (layer->GetType() == LayerType::Input)
+ if(layer->GetType() == LayerType::Input)
{
- continue;
+ BackendId connectedBackendId = layer->GetOutputSlot(0).GetConnection(0)->GetOwningLayer().GetBackendId();
+ layer->SetBackendId(connectedBackendId);
}
+ }
- DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
- layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
- DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
- layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
+ return result;
+}
- std::string reasonIfUnsupported;
- bool found = false;
- if (!CheckScaleSetOnQuantizedType(layer, errMessages))
- {
- // don't bomb immediately, find all the quantized outputs
- // which haven't had a scale set and report them all back.
- result.m_Error = true;
- }
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
+ BackendSettings& backendSettings,
+ SubgraphView::IConnectableLayerIterator& firstLayer,
+ SubgraphView::IConnectableLayerIterator& lastLayer,
+ Optional<std::vector<std::string>&> errMessages)
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_AssignBackends");
+ OptimizationResult result;
- // First try assign layer to hint backend
- if (layer->GetBackendHint().has_value() &&
- backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
- AttemptBackendAssignment(backendSettings,
- optNetObjPtr->GetGraph(),
- layer,
- layer->GetBackendHint().value(),
- dataTypeIn,
- dataTypeOut,
- availablePreferredBackends,
- reasonIfUnsupported,
- errMessages).IsOk())
- {
- found = true;
- backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
- }
- else
- {
- // Try assign layer to prefered list of backends
- for (const auto& backend : availablePreferredBackends)
- {
- if (layer->GetBackendHint().has_value() &&
- layer->GetBackendHint().value() == backend)
- {
- continue; //Don't re-test the backend hint
- }
+ auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
+ if (availablePreferredBackends.empty())
+ {
+ std::stringstream failureMsg;
+ failureMsg << "No preferred backends are available";
+ ReportError(failureMsg.str(), errMessages);
- OptimizationResult res = AttemptBackendAssignment(backendSettings,
- optNetObjPtr->GetGraph(),
- layer,
- backend,
- dataTypeIn,
- dataTypeOut,
- availablePreferredBackends,
- reasonIfUnsupported,
- errMessages);
-
- if (res.IsOk())
- {
- found = true;
- backendSettings.m_SelectedBackends.insert(backend);
- break;
- }
- else if (res.IsError())
- {
- return res; // Cannot continue.
- // Note: we don't need to log the error as it would already
- // be logged in AttemptBackendAssignment().
- }
- else
- {
- ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
- }
- }
- }
+ result.m_Error = true;
+ return result;
+ }
- // If the layer is unsupported by any devices, log and return a null network.
- if (!found)
- {
- // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
- // fallback we should set the compute device on the layer to CpuRef (these are not
- // available as accelerated operations, or are only available under certain
- // conditions, currently they comprise MemCopy, Constant, Permute)
- armnn::LayerType layerType = layer->GetType();
- if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
- layerType == armnn::LayerType::Constant ||
- layerType == armnn::LayerType::Permute))
- {
- BackendId cpuBackendId(armnn::Compute::CpuRef);
- layer->SetBackendId(cpuBackendId);
- backendSettings.m_SelectedBackends.insert(cpuBackendId);
- }
- else
- {
- return ReturnError(layer);
- }
- }
+ for (auto it = firstLayer; it != lastLayer; ++it)
+ {
+ AssignBackendsIConnectable(optNetObjPtr,
+ *it,
+ errMessages,
+ result,
+ backendSettings,
+ availablePreferredBackends);
}
for (auto it = firstLayer; it != lastLayer; ++it)
{
- auto layer = *it;
+ auto layer = PolymorphicDowncast<Layer*>(*it);
if(layer->GetType() == LayerType::Input)
{
@@ -1065,8 +1124,8 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
SubgraphView& subgraph,
Optional<std::vector<std::string>&> errMessages)
{
- Graph::Iterator firstLayer = subgraph.begin();
- Graph::Iterator lastLayer = subgraph.end();
+ SubgraphView::IConnectableLayerIterator firstLayer = subgraph.beginIConnectable();
+ SubgraphView::IConnectableLayerIterator lastLayer = subgraph.endIConnectable();
return AssignBackends(optNetObjPtr,
backendSettings,
firstLayer,
@@ -1118,6 +1177,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
// Select layers assigned to the requested backend
[&backendObjPtr](const Layer& layer)
{
+
return layer.GetType() != LayerType::Input &&
layer.GetType() != LayerType::Output &&
layer.GetBackendId() == backendObjPtr->GetId();
@@ -1145,10 +1205,11 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
// Assign the current backend to the optimized sub-graph
- std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
+ const SubgraphView::IConnectableLayers& subgraphLayers = replacementSubgraph.GetIConnectableLayers();
+ std::for_each(subgraphLayers.begin(), subgraphLayers.end(), [&selectedBackend](IConnectableLayer* l)
{
ARMNN_ASSERT(l);
- l->SetBackendId(selectedBackend);
+ PolymorphicDowncast<Layer*>(l)->SetBackendId(selectedBackend);
});
}
@@ -1171,7 +1232,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
{
// An error occurred: the optimization was attempted but not performed, try different backends
std::stringstream subgraphMsg;
- subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
+ subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetIConnectableLayers().size()
<< " layers inside sub-graph " << count++;
ReportWarning(subgraphMsg.str(), errMessages);
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index ee84d7b33b..a2ef0d8364 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -20,6 +20,7 @@
#include "Graph.hpp"
#include "Layer.hpp"
#include "OptimizedNetworkImpl.hpp"
+#include "SubgraphView.hpp"
namespace armnn
{
@@ -301,4 +302,11 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
Graph::Iterator& lastLayer,
Optional<std::vector<std::string>&> errMessages);
+
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
+ BackendSettings& backendSettings,
+ SubgraphView::IConnectableLayerIterator& firstLayer,
+ SubgraphView::IConnectableLayerIterator& lastLayer,
+ Optional<std::vector<std::string>&> errMessages);
+
} // namespace armnn
diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp
index 0d41889e15..2de4510b7b 100644
--- a/src/armnn/SubgraphView.cpp
+++ b/src/armnn/SubgraphView.cpp
@@ -44,24 +44,69 @@ SubgraphView::SubgraphView(Graph& graph)
: m_InputSlots{}
, m_OutputSlots{}
, m_Layers(graph.begin(), graph.end())
+ , m_IConnectableLayers(graph.begin(), graph.end())
{
ArrangeBySortOrder();
CheckSubgraph();
}
+/// IConnectable Duplication to maintain backwards compatibility
SubgraphView::SubgraphView(InputSlots&& inputs, OutputSlots&& outputs, Layers&& layers)
- : m_InputSlots{inputs}
- , m_OutputSlots{outputs}
- , m_Layers{layers}
+ : m_InputSlots{InputSlots{inputs.begin(), inputs.end()}}
+ , m_IInputSlots{IInputSlots{inputs.begin(), inputs.end()}}
+ , m_OutputSlots{OutputSlots{outputs.begin(), outputs.end()}}
+ , m_IOutputSlots{IOutputSlots{outputs.begin(), outputs.end()}}
+ , m_Layers(layers)
+ , m_IConnectableLayers(IConnectableLayers{layers.begin(), layers.end()})
{
ArrangeBySortOrder();
CheckSubgraph();
}
+/// IConnectable Duplication to maintain backwards compatibility
+SubgraphView::SubgraphView(SubgraphView::IConnectableLayers &&layers,
+ SubgraphView::IInputSlots &&inputs,
+ SubgraphView::IOutputSlots &&outputs)
+ : m_IInputSlots{inputs}
+ , m_IOutputSlots{outputs}
+ , m_IConnectableLayers(IConnectableLayers{layers.begin(), layers.end()})
+{
+ // Cast from IConnectableLayer to Layer for backward compatibility
+ auto f = [](IConnectableLayer* value)
+ {
+ return PolymorphicDowncast<Layer*>(value);
+ };
+ std::transform(layers.begin(), layers.end(), std::back_inserter(m_Layers), f);
+
+
+ m_InputSlots.resize(inputs.size());
+ m_IInputSlots.resize(inputs.size());
+ for (unsigned int i = 0; i < inputs.size(); i++)
+ {
+ m_InputSlots.at(i) = PolymorphicDowncast<InputSlot*>(inputs[i]);
+ m_IInputSlots.at(i) = inputs[i];
+ }
+
+ m_OutputSlots.resize(outputs.size());
+ m_IOutputSlots.resize(outputs.size());
+ for (unsigned int i = 0; i < outputs.size(); i++)
+ {
+ m_OutputSlots.at(i) = PolymorphicDowncast<OutputSlot*>(outputs[i]);
+ m_IOutputSlots.at(i) = outputs[i];
+ }
+
+ ArrangeBySortOrder();
+ CheckSubgraph();
+}
+
SubgraphView::SubgraphView(const SubgraphView& subgraph)
: m_InputSlots(subgraph.m_InputSlots.begin(), subgraph.m_InputSlots.end())
+ , m_IInputSlots(subgraph.m_IInputSlots.begin(), subgraph.m_IInputSlots.end())
, m_OutputSlots(subgraph.m_OutputSlots.begin(), subgraph.m_OutputSlots.end())
+ , m_IOutputSlots(subgraph.m_IOutputSlots.begin(), subgraph.m_IOutputSlots.end())
, m_Layers(subgraph.m_Layers.begin(), subgraph.m_Layers.end())
+ , m_IConnectableLayers(IConnectableLayers{subgraph.m_IConnectableLayers.begin(),
+ subgraph.m_IConnectableLayers.end()})
{
ArrangeBySortOrder();
CheckSubgraph();
@@ -69,30 +114,36 @@ SubgraphView::SubgraphView(const SubgraphView& subgraph)
SubgraphView::SubgraphView(SubgraphView&& subgraph)
: m_InputSlots(std::move(subgraph.m_InputSlots))
+ , m_IInputSlots(std::move(subgraph.m_IInputSlots))
, m_OutputSlots(std::move(subgraph.m_OutputSlots))
+ , m_IOutputSlots(std::move(subgraph.m_IOutputSlots))
, m_Layers(std::move(subgraph.m_Layers))
+ , m_IConnectableLayers(std::move(subgraph.m_IConnectableLayers))
{
ArrangeBySortOrder();
CheckSubgraph();
}
SubgraphView::SubgraphView(IConnectableLayer* layer)
- : m_InputSlots{}
- , m_OutputSlots{}
- , m_Layers{PolymorphicDowncast<Layer*>(layer)}
+ : m_Layers{PolymorphicDowncast<Layer*>(layer)}
+ , m_IConnectableLayers{layer}
{
unsigned int numInputSlots = layer->GetNumInputSlots();
m_InputSlots.resize(numInputSlots);
+ m_IInputSlots.resize(numInputSlots);
for (unsigned int i = 0; i < numInputSlots; i++)
{
m_InputSlots.at(i) = PolymorphicDowncast<InputSlot*>(&(layer->GetInputSlot(i)));
+ m_IInputSlots.at(i) = &(layer->GetInputSlot(i));
}
unsigned int numOutputSlots = layer->GetNumOutputSlots();
m_OutputSlots.resize(numOutputSlots);
+ m_IOutputSlots.resize(numOutputSlots);
for (unsigned int i = 0; i < numOutputSlots; i++)
{
m_OutputSlots.at(i) = PolymorphicDowncast<OutputSlot*>(&(layer->GetOutputSlot(i)));
+ m_IOutputSlots.at(i) = &(layer->GetOutputSlot(i));
}
CheckSubgraph();
@@ -101,8 +152,11 @@ SubgraphView::SubgraphView(IConnectableLayer* layer)
SubgraphView& SubgraphView::operator=(SubgraphView&& other)
{
m_InputSlots = std::move(other.m_InputSlots);
+ m_IInputSlots = std::move(other.m_IInputSlots);
m_OutputSlots = std::move(other.m_OutputSlots);
+ m_IOutputSlots = std::move(other.m_IOutputSlots);
m_Layers = std::move(other.m_Layers);
+ m_IConnectableLayers = std::move(other.m_IConnectableLayers);
CheckSubgraph();
@@ -119,6 +173,16 @@ void SubgraphView::CheckSubgraph()
// Check for invalid or duplicate layers
AssertIfNullsOrDuplicates(m_Layers, "Sub-graphs cannot contain null or duplicate layers");
+
+ // Check for invalid or duplicate input slots
+ AssertIfNullsOrDuplicates(m_IInputSlots, "Sub-graphs cannot contain null or duplicate IInputSlots");
+
+ // Check for invalid or duplicate output slots
+ AssertIfNullsOrDuplicates(m_IOutputSlots, "Sub-graphs cannot contain null or duplicate IOutputSlots");
+
+ // Check for invalid or duplicate layers
+ AssertIfNullsOrDuplicates(m_IConnectableLayers,
+ "Sub-graphs cannot contain null or duplicate IConnectableLayers");
}
const SubgraphView::InputSlots& SubgraphView::GetInputSlots() const
@@ -126,39 +190,69 @@ const SubgraphView::InputSlots& SubgraphView::GetInputSlots() const
return m_InputSlots;
}
+const SubgraphView::IInputSlots& SubgraphView::GetIInputSlots() const
+{
+ return m_IInputSlots;
+}
+
const SubgraphView::OutputSlots& SubgraphView::GetOutputSlots() const
{
return m_OutputSlots;
}
+const SubgraphView::IOutputSlots& SubgraphView::GetIOutputSlots() const
+{
+ return m_IOutputSlots;
+}
+
const InputSlot* SubgraphView::GetInputSlot(unsigned int index) const
{
return m_InputSlots.at(index);
}
+const IInputSlot* SubgraphView::GetIInputSlot(unsigned int index) const
+{
+ return m_IInputSlots.at(index);
+}
+
InputSlot* SubgraphView::GetInputSlot(unsigned int index)
{
return m_InputSlots.at(index);
}
+IInputSlot* SubgraphView::GetIInputSlot(unsigned int index)
+{
+ return m_IInputSlots.at(index);
+}
+
const OutputSlot* SubgraphView::GetOutputSlot(unsigned int index) const
{
return m_OutputSlots.at(index);
}
+const IOutputSlot* SubgraphView::GetIOutputSlot(unsigned int index) const
+{
+ return m_IOutputSlots.at(index);
+}
+
OutputSlot* SubgraphView::GetOutputSlot(unsigned int index)
{
return m_OutputSlots.at(index);
}
+IOutputSlot* SubgraphView::GetIOutputSlot(unsigned int index)
+{
+ return m_IOutputSlots.at(index);
+}
+
unsigned int SubgraphView::GetNumInputSlots() const
{
- return armnn::numeric_cast<unsigned int>(m_InputSlots.size());
+ return armnn::numeric_cast<unsigned int>(m_IInputSlots.size());
}
unsigned int SubgraphView::GetNumOutputSlots() const
{
- return armnn::numeric_cast<unsigned int>(m_OutputSlots.size());
+ return armnn::numeric_cast<unsigned int>(m_IOutputSlots.size());
}
const SubgraphView::Layers& SubgraphView::GetLayers() const
@@ -166,6 +260,11 @@ const SubgraphView::Layers& SubgraphView::GetLayers() const
return m_Layers;
}
+const SubgraphView::IConnectableLayers& SubgraphView::GetIConnectableLayers() const
+{
+ return m_IConnectableLayers;
+}
+
SubgraphView::Iterator SubgraphView::begin()
{
return m_Layers.begin();
@@ -176,6 +275,17 @@ SubgraphView::Iterator SubgraphView::end()
return m_Layers.end();
}
+// IConnectable Duplication to maintain backwards compatibility
+SubgraphView::IConnectableLayerIterator SubgraphView::beginIConnectable()
+{
+ return m_IConnectableLayers.begin();
+}
+
+SubgraphView::IConnectableLayerIterator SubgraphView::endIConnectable()
+{
+ return m_IConnectableLayers.end();
+}
+
SubgraphView::ConstIterator SubgraphView::begin() const
{
return m_Layers.begin();
@@ -186,14 +296,42 @@ SubgraphView::ConstIterator SubgraphView::end() const
return m_Layers.end();
}
+// IConnectable Duplication to maintain backwards compatibility
+SubgraphView::ConstIConnectableIterator SubgraphView::beginIConnectable() const
+{
+ return m_IConnectableLayers.begin();
+}
+
+SubgraphView::ConstIConnectableIterator SubgraphView::endIConnectable() const
+{
+ return m_IConnectableLayers.end();
+}
+
SubgraphView::ConstIterator SubgraphView::cbegin() const
{
+ // Ignore deprecated call as this is internal to SubgraphView
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return begin();
+ ARMNN_NO_DEPRECATE_WARN_END
}
SubgraphView::ConstIterator SubgraphView::cend() const
{
+ // Ignore deprecated call as this is internal to SubgraphView
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return end();
+ ARMNN_NO_DEPRECATE_WARN_END
+}
+
+// IConnectable Duplication to maintain backwards compatibility
+SubgraphView::ConstIConnectableIterator SubgraphView::cbeginIConnectable() const
+{
+ return beginIConnectable();
+}
+
+SubgraphView::ConstIConnectableIterator SubgraphView::cendIConnectable() const
+{
+ return endIConnectable();
}
void SubgraphView::Clear()
@@ -201,6 +339,10 @@ void SubgraphView::Clear()
m_InputSlots.clear();
m_OutputSlots.clear();
m_Layers.clear();
+
+ m_IInputSlots.clear();
+ m_IOutputSlots.clear();
+ m_IConnectableLayers.clear();
}
void SubgraphView::ArrangeBySortOrder()
@@ -212,6 +354,16 @@ void SubgraphView::ArrangeBySortOrder()
};
m_Layers.sort(compareLayerPriority);
+
+ using IConnectableLayersList = std::list<IConnectableLayer*>;
+ auto compareIConnectableLayerPriority = [](const IConnectableLayersList::value_type& layerA,
+ const IConnectableLayersList::value_type& layerB)
+ {
+ return PolymorphicDowncast<Layer*>(layerA)->GetPriority() <
+ PolymorphicDowncast<Layer*>(layerB)->GetPriority();
+ };
+
+ m_IConnectableLayers.sort(compareIConnectableLayerPriority);
}
} // namespace armnn
diff --git a/src/armnn/SubgraphView.hpp b/src/armnn/SubgraphView.hpp
index af6054283e..f2ca44cb68 100644
--- a/src/armnn/SubgraphView.hpp
+++ b/src/armnn/SubgraphView.hpp
@@ -34,19 +34,40 @@ public:
}
}
+ template <typename Func>
+ void ForEachIConnectableLayer(Func func) const
+ {
+ for (auto it = m_IConnectableLayers.begin(); it != m_IConnectableLayers.end(); )
+ {
+ auto next = std::next(it);
+ func(*it);
+ it = next;
+ }
+ }
+
using SubgraphViewPtr = std::unique_ptr<SubgraphView>;
using InputSlots = std::vector<InputSlot*>;
+ using IInputSlots = std::vector<IInputSlot*>;
using OutputSlots = std::vector<OutputSlot*>;
+ using IOutputSlots = std::vector<IOutputSlot*>;
using Layers = std::list<Layer*>;
+ using IConnectableLayers = std::list<IConnectableLayer*>;
using Iterator = Layers::iterator;
+ using IConnectableLayerIterator = IConnectableLayers::iterator;
using ConstIterator = Layers::const_iterator;
+ using ConstIConnectableIterator = IConnectableLayers::const_iterator;
/// Constructs a sub-graph from the entire given graph.
explicit SubgraphView(Graph& graph);
/// Constructs a sub-graph with the given arguments.
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use constructor with arguments: "
+ "IConnectableLayers, IInputSlots and IOutputSlots", "22.08")
SubgraphView(InputSlots&& inputs, OutputSlots&& outputs, Layers&& layers);
+ /// Constructs a sub-graph with the given arguments.
+ SubgraphView(IConnectableLayers&& layers, IInputSlots&& inputs, IOutputSlots&& outputs);
+
/// Copy-constructor.
SubgraphView(const SubgraphView& subgraph);
@@ -59,27 +80,74 @@ public:
/// Move-assignment operator.
SubgraphView& operator=(SubgraphView&& other);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIInputSlots() returning"
+ " public IInputSlots", "22.08")
const InputSlots& GetInputSlots() const;
+ const IInputSlots& GetIInputSlots() const;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIOutputSlots() returning"
+ " public IOutputSlots", "22.08")
const OutputSlots& GetOutputSlots() const;
+ const IOutputSlots& GetIOutputSlots() const;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIConnectableLayers() "
+ "returning public IConnectableLayers", "22.08")
const Layers& GetLayers() const;
+ const IConnectableLayers& GetIConnectableLayers() const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIInputSlot() returning public "
+ "IInputSlot", "22.08")
const InputSlot* GetInputSlot(unsigned int index) const;
+ const IInputSlot* GetIInputSlot(unsigned int index) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIInputSlot() returning public "
+ "IInputSlot", "22.08")
InputSlot* GetInputSlot(unsigned int index);
+ IInputSlot* GetIInputSlot(unsigned int index);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIOutputSlot() returning"
+ " public IOutputSlot", "22.08")
const OutputSlot* GetOutputSlot(unsigned int index) const;
+ const IOutputSlot* GetIOutputSlot(unsigned int index) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIOutputSlot() returning"
+ " public IOutputSlot", "22.08")
OutputSlot* GetOutputSlot(unsigned int index);
+ IOutputSlot* GetIOutputSlot(unsigned int index);
unsigned int GetNumInputSlots() const;
unsigned int GetNumOutputSlots() const;
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "IConnectableLayerIterator, until that occurs in 23.02; please use "
+ "beginIConnectable() returning public IConnectableLayerIterator", "23.02")
Iterator begin();
+ IConnectableLayerIterator beginIConnectable();
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "IConnectableLayerIterator, until that occurs in 23.02; please use "
+ "endIConnectable() returning public IConnectableLayerIterator", "23.02")
Iterator end();
+ IConnectableLayerIterator endIConnectable();
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "ConstIConnectableIterator, until that occurs in 23.02; please use "
+ "beginIConnectable() returning public ConstIConnectableIterator", "23.02")
ConstIterator begin() const;
+ ConstIConnectableIterator beginIConnectable() const;
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "ConstIConnectableIterator, until that occurs in 23.02; please use "
+ "endIConnectable() returning public ConstIConnectableIterator", "23.02")
ConstIterator end() const;
+ ConstIConnectableIterator endIConnectable() const;
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "ConstIConnectableIterator, until that occurs in 23.02; please use "
+ "cbeginIConnectable() returning public ConstIConnectableIterator", "23.02")
ConstIterator cbegin() const;
+ ConstIConnectableIterator cbeginIConnectable() const;
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "ConstIConnectableIterator, until that occurs in 23.02; please use "
+ "cendIConnectable() returning public ConstIConnectableIterator", "23.02")
ConstIterator cend() const;
+ ConstIConnectableIterator cendIConnectable() const;
void Clear();
@@ -91,11 +159,14 @@ private:
/// The list of pointers to the input slots of the parent graph.
InputSlots m_InputSlots;
+ IInputSlots m_IInputSlots;
/// The list of pointers to the output slots of the parent graph.
OutputSlots m_OutputSlots;
+ IOutputSlots m_IOutputSlots;
/// The list of pointers to the layers of the parent graph.
Layers m_Layers;
+ IConnectableLayers m_IConnectableLayers;
};
} // namespace armnn
diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp
index 21fbb7cd80..e2c5f911a0 100644
--- a/src/armnn/SubgraphViewSelector.cpp
+++ b/src/armnn/SubgraphViewSelector.cpp
@@ -176,7 +176,7 @@ private:
/// Intermediate data structure to store information associated with a particular layer.
struct LayerSelectionInfo
{
- using LayerInfoContainer = std::map<Layer*, LayerSelectionInfo>;
+ using LayerInfoContainer = std::map<IConnectableLayer*, LayerSelectionInfo>;
using LayerInfoQueue = std::queue<LayerSelectionInfo*>;
LayerSelectionInfo(Layer* layer, const SubgraphViewSelector::LayerSelectorFunction& selector)
@@ -193,9 +193,11 @@ struct LayerSelectionInfo
}
void CollectNonSelectedInputs(LayerSelectionInfo::LayerInfoContainer& layerInfos,
- SubgraphView::InputSlots& inputSlots)
+ SubgraphView::IInputSlots& inputSlots)
{
- for (auto&& slot = m_Layer->BeginInputSlots(); slot != m_Layer->EndInputSlots(); ++slot)
+ for (auto&& slot = PolymorphicDowncast<Layer*>(m_Layer)->BeginInputSlots();
+ slot != PolymorphicDowncast<Layer*>(m_Layer)->EndInputSlots();
+ ++slot)
{
OutputSlot* parentLayerOutputSlot = slot->GetConnectedOutputSlot();
ARMNN_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
@@ -218,9 +220,11 @@ struct LayerSelectionInfo
}
void CollectNonSelectedOutputSlots(LayerSelectionInfo::LayerInfoContainer& layerInfos,
- SubgraphView::OutputSlots& outputSlots)
+ SubgraphView::IOutputSlots& outputSlots)
{
- for (auto&& slot = m_Layer->BeginOutputSlots(); slot != m_Layer->EndOutputSlots(); ++slot)
+ for (auto&& slot = PolymorphicDowncast<Layer*>(m_Layer)->BeginOutputSlots();
+ slot != PolymorphicDowncast<Layer*>(m_Layer)->EndOutputSlots();
+ ++slot)
{
for (InputSlot* childLayerInputSlot : slot->GetConnections())
{
@@ -240,7 +244,7 @@ struct LayerSelectionInfo
}
}
- Layer* m_Layer;
+ IConnectableLayer* m_Layer;
/// Which subgraph this layer has been assigned to. Only valid once m_IsProcessed is true.
/// Two layers with different m_Subgraph pointers may in fact have been merged into the same subgraph -
/// see the description of the PartialSubgraph class.
@@ -264,7 +268,7 @@ void ForEachLayerInput(LayerSelectionInfo::LayerInfoContainer& layerInfos,
LayerSelectionInfo& layerInfo,
Delegate function)
{
- Layer& layer = *layerInfo.m_Layer;
+ Layer& layer = *PolymorphicDowncast<Layer*>(layerInfo.m_Layer);
for (auto inputSlot : layer.GetInputSlots())
{
@@ -285,7 +289,7 @@ void ForEachLayerOutput(LayerSelectionInfo::LayerInfoContainer& layerInfos,
LayerSelectionInfo& layerInfo,
Delegate function)
{
- Layer& layer= *layerInfo.m_Layer;
+ Layer& layer = *PolymorphicDowncast<Layer*>(layerInfo.m_Layer);
for (auto& outputSlot : layer.GetOutputSlots())
{
@@ -387,9 +391,11 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
LayerSelectionInfo::LayerInfoContainer layerInfos;
LayerSelectionInfo::LayerInfoQueue processQueue;
- for (auto& layer : subgraph)
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraph.GetIConnectableLayers();
+ for (auto& layer : subgraphLayers)
{
- auto emplaced = layerInfos.emplace(layer, LayerSelectionInfo{layer, selector});
+
+ auto emplaced = layerInfos.emplace(layer, LayerSelectionInfo{PolymorphicDowncast<Layer*>(layer), selector});
LayerSelectionInfo& layerInfo = emplaced.first->second;
// Start with Input type layers
@@ -399,10 +405,10 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
}
}
- const SubgraphView::InputSlots& subgraphInputSlots = subgraph.GetInputSlots();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
for (auto& inputSlot : subgraphInputSlots)
{
- Layer& layer = inputSlot->GetOwningLayer();
+ Layer& layer = PolymorphicDowncast<InputSlot*>(inputSlot)->GetOwningLayer();
auto emplaced = layerInfos.emplace(&layer, LayerSelectionInfo{&layer, selector});
LayerSelectionInfo& layerInfo = emplaced.first->second;
@@ -463,9 +469,9 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
Subgraphs result;
for (auto& splitGraph : splitMap)
{
- SubgraphView::InputSlots inputs;
- SubgraphView::OutputSlots outputs;
- SubgraphView::Layers layers;
+ SubgraphView::IInputSlots inputs;
+ SubgraphView::IOutputSlots outputs;
+ SubgraphView::IConnectableLayers layers;
for (auto&& infoPtr : splitGraph.second)
{
infoPtr->CollectNonSelectedInputs(layerInfos, inputs);
@@ -475,24 +481,28 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
// Sort lists into deterministic order, not relying on pointer values which may be different on each execution.
// This makes debugging the optimised graph much easier as subsequent stages can also be deterministic.
- std::sort(inputs.begin(), inputs.end(), [](const InputSlot* a, const InputSlot* b)
+ std::sort(inputs.begin(), inputs.end(), [](const IInputSlot* a, const IInputSlot* b)
{
- const LayerGuid guidA = a->GetOwningLayer().GetGuid();
- const LayerGuid guidB = b->GetOwningLayer().GetGuid();
+ auto* castA = PolymorphicDowncast<const InputSlot*>(a);
+ auto* castB = PolymorphicDowncast<const InputSlot*>(b);
+ const LayerGuid guidA = castA->GetOwningLayer().GetGuid();
+ const LayerGuid guidB = castB->GetOwningLayer().GetGuid();
if (guidA < guidB)
{
return true;
}
else if (guidA == guidB)
{
- return (a->GetSlotIndex() < b->GetSlotIndex());
+ return (castA->GetSlotIndex() < castB->GetSlotIndex());
}
return false;
});
- std::sort(outputs.begin(), outputs.end(), [](const OutputSlot* a, const OutputSlot* b)
+ std::sort(outputs.begin(), outputs.end(), [](const IOutputSlot* a, const IOutputSlot* b)
{
- const LayerGuid guidA = a->GetOwningLayer().GetGuid();
- const LayerGuid guidB = b->GetOwningLayer().GetGuid();
+ auto* castA = PolymorphicDowncast<const OutputSlot*>(a);
+ auto* castB = PolymorphicDowncast<const OutputSlot*>(b);
+ const LayerGuid guidA = castA->GetOwningLayer().GetGuid();
+ const LayerGuid guidB = castB->GetOwningLayer().GetGuid();
if (guidA < guidB)
{
return true;
@@ -503,12 +513,12 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
}
return false;
});
- layers.sort([](const Layer* a, const Layer* b) { return a->GetGuid() < b->GetGuid(); });
+ layers.sort([](const IConnectableLayer* a, const IConnectableLayer* b) { return a->GetGuid() < b->GetGuid(); });
// Create a new sub-graph with the new lists of input/output slots and layer
- result.emplace_back(std::make_unique<SubgraphView>(std::move(inputs),
- std::move(outputs),
- std::move(layers)));
+ result.emplace_back(std::make_unique<SubgraphView>(std::move(layers),
+ std::move(inputs),
+ std::move(outputs)));
}
return result;
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 693daa2268..2ea465ea04 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -17,12 +17,13 @@
#include <queue>
#include <random>
#include <chrono>
+
using namespace armnn;
namespace
{
-bool AreAnySubgraphLayersPresentInGraph(const SubgraphView::Layers &subgraphLayers, const Graph &graph)
+bool AreAnySubgraphLayersPresentInGraph(const SubgraphView::IConnectableLayers &subgraphLayers, const Graph &graph)
{
for(auto&& layer : subgraphLayers)
{
@@ -52,6 +53,20 @@ SubgraphView::InputSlots CreateInputsFrom(const std::vector<Layer*>& layers)
return result;
}
+/// Duplication for IConnectableLayer
+SubgraphView::IInputSlots CreateIInputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
+{
+ SubgraphView::IInputSlots result;
+ for (auto&& layer : layers)
+ {
+ for (unsigned int i = 0 ; i < layer->GetNumInputSlots(); ++i)
+ {
+ result.push_back(&(layer->GetInputSlot(i)));
+ }
+ }
+ return result;
+}
+
//
// this helper only works if all layers where the outputs connect to are not selected
//
@@ -68,6 +83,20 @@ SubgraphView::OutputSlots CreateOutputsFrom(const std::vector<Layer*>& layers)
return result;
}
+/// Duplication for IConnectableLayer
+SubgraphView::IOutputSlots CreateIOutputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
+{
+ SubgraphView::IOutputSlots result;
+ for (auto &&layer: layers)
+ {
+ for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
+ {
+ result.push_back(&(layer->GetOutputSlot(i)));
+ }
+ }
+ return result;
+}
+
//
// this takes the inputs, outputs and layers as a copy and the move these copies into the
// resulting subgraph, so the pass by value is intentional
@@ -79,6 +108,13 @@ SubgraphViewSelector::SubgraphViewPtr CreateSubgraphViewFrom(SubgraphView::Input
return std::make_unique<SubgraphView>(std::move(inputs), std::move(outputs), std::move(layers));
}
+SubgraphViewSelector::SubgraphViewPtr CreateSubgraphViewFrom(SubgraphView::IConnectableLayers&& layers,
+ SubgraphView::IInputSlots&& inputs,
+ SubgraphView::IOutputSlots&& outputs)
+{
+ return std::make_unique<SubgraphView>(std::move(layers), std::move(inputs), std::move(outputs));
+}
+
template <typename T, typename Iterator>
std::vector<T> ToSortedArray(Iterator begin, Iterator end)
{
@@ -102,32 +138,181 @@ void CompareSubgraphViews(SubgraphViewSelector::SubgraphViewPtr& result,
if (result.get() != nullptr && expected.get() != nullptr)
{
- CHECK(result->GetInputSlots().size() == expected->GetInputSlots().size());
- CHECK(result->GetOutputSlots().size() == expected->GetOutputSlots().size());
- CHECK(result->GetLayers().size() == expected->GetLayers().size());
-
- auto resultLayers = ToSortedArray<Layer *>(result->GetLayers().begin(),
- result->GetLayers().end());
- auto expectedLayers = ToSortedArray<Layer *>(expected->GetLayers().begin(),
- expected->GetLayers().end());
+ CHECK(result->GetIInputSlots().size() == expected->GetIInputSlots().size());
+ CHECK(result->GetIOutputSlots().size() == expected->GetIOutputSlots().size());
+ CHECK(result->GetIConnectableLayers().size() == expected->GetIConnectableLayers().size());
+
+ auto resultLayers = ToSortedArray<IConnectableLayer*>(result->GetIConnectableLayers().begin(),
+ result->GetIConnectableLayers().end());
+ auto expectedLayers = ToSortedArray<IConnectableLayer*>(expected->GetIConnectableLayers().begin(),
+ expected->GetIConnectableLayers().end());
CompareVectors(resultLayers, expectedLayers);
- auto resultInputs = ToSortedArray<InputSlot *>(result->GetInputSlots().begin(),
- result->GetInputSlots().end());
- auto expectedInputs = ToSortedArray<InputSlot *>(expected->GetInputSlots().begin(),
- expected->GetInputSlots().end());
+ auto resultInputs = ToSortedArray<IInputSlot *>(result->GetIInputSlots().begin(),
+ result->GetIInputSlots().end());
+ auto expectedInputs = ToSortedArray<IInputSlot *>(expected->GetIInputSlots().begin(),
+ expected->GetIInputSlots().end());
CompareVectors(resultInputs, expectedInputs);
- auto resultOutputs = ToSortedArray<OutputSlot *>(result->GetOutputSlots().begin(),
- result->GetOutputSlots().end());
- auto expectedOutputs = ToSortedArray<OutputSlot *>(expected->GetOutputSlots().begin(),
- expected->GetOutputSlots().end());
+ auto resultOutputs = ToSortedArray<IOutputSlot *>(result->GetIOutputSlots().begin(),
+ result->GetIOutputSlots().end());
+ auto expectedOutputs = ToSortedArray<IOutputSlot *>(expected->GetIOutputSlots().begin(),
+ expected->GetIOutputSlots().end());
CompareVectors(resultOutputs, expectedOutputs);
}
}
} // namespace <anonymous>
+TEST_SUITE("SubgraphViewBackwardCompatibilityTests")
+{
+// Test that SubraphView has been converted to using IConnectableLayer/IInputSlot/IOutputSlot
+// in a backward compatible manner from ILayer/InputSlot/OutputSlot
+TEST_CASE("SubgraphViewIterators")
+{
+ INetworkPtr net(INetwork::Create());
+ IConnectableLayer* layer = net->AddInputLayer(1, "input");
+
+ SubgraphView subgraph{layer};
+
+ // cbeginIConnectable() and cendIConnectable()
+ bool found = false;
+ if (std::find(subgraph.cbeginIConnectable(), subgraph.cendIConnectable(), layer)
+ != subgraph.cendIConnectable())
+ {
+ found = true;
+ }
+ CHECK(found);
+ found = false;
+
+ // beginIConnectable() and endIConnectable()
+ if (std::find(subgraph.beginIConnectable(), subgraph.endIConnectable(), layer)
+ != subgraph.endIConnectable())
+ {
+ found = true;
+ }
+ CHECK(found);
+ found = false;
+
+ // GetIConnectableLayers returns IConnectableLayers initialized when calling constructor given IConnectableLayers
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraph.GetIConnectableLayers();
+ for (auto& iConnectableLayer : subgraphLayers)
+ {
+ if (std::string(iConnectableLayer->GetName()) == "input")
+ {
+ found = true;
+ }
+ }
+ CHECK(found);
+ found = false;
+
+ // Test GetLayers returns layers initialized when calling constructor given IConnectableLayers
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ const SubgraphView::Layers& subgraphLayersOld = subgraph.GetLayers();
+ ARMNN_NO_DEPRECATE_WARN_END
+ for (auto& layerOld : subgraphLayersOld)
+ {
+ if (std::string(layerOld->GetName()) == "input")
+ {
+ found = true;
+ }
+ }
+ CHECK(found);
+}
+
+TEST_CASE("SubgraphViewSlots")
+{
+ // Construct graph
+ Graph graph;
+
+ Layer* const inputLayer = graph.AddLayer<InputLayer>(0, "input");
+
+ Convolution2dDescriptor convDescriptor;
+ Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
+ Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
+
+ Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, "output");
+
+ inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
+ convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ // Construct sub-graph
+ SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom({},
+ CreateIInputsFrom({convLayer1}),
+ CreateIOutputsFrom({convLayer2}));
+
+ // Test that both old and new are initialized
+ CHECK(subgraph->GetIInputSlots().size() == 1);
+ CHECK(subgraph->GetIOutputSlots().size() == 1);
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ CHECK(subgraph->GetInputSlots().size() == 1);
+ CHECK(subgraph->GetOutputSlots().size() == 1);
+
+ // Check old and new pointing to same address
+ CHECK(subgraph->GetOutputSlot(0) == subgraph->GetIOutputSlot(0));
+ CHECK(subgraph->GetInputSlot(0) == subgraph->GetIInputSlot(0));
+ ARMNN_NO_DEPRECATE_WARN_END
+
+}
+
+TEST_CASE("SubgraphViewConstructors")
+{
+ // Construct graph
+ Graph graph;
+
+ Layer* const inputLayer = graph.AddLayer<InputLayer>(0, "input");
+
+ Convolution2dDescriptor convDescriptor;
+ Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
+ Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
+
+ Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, "output");
+
+ inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
+ convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ // Construct sub-graph
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom({inputLayer, convLayer1, convLayer2, outputLayer},
+ CreateIInputsFrom({convLayer1}),
+ CreateIOutputsFrom({convLayer2}));
+
+ // Copy Constructor
+ SubgraphView subgraph2(*subgraph.get());
+ CHECK(subgraph->GetIConnectableLayers() == subgraph2.GetIConnectableLayers());
+ CHECK(subgraph->GetIInputSlots() == subgraph2.GetIInputSlots());
+ CHECK(subgraph->GetIOutputSlots() == subgraph2.GetIOutputSlots());
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ CHECK(subgraph->GetLayers() == subgraph2.GetLayers());
+ CHECK(subgraph->GetInputSlots() == subgraph2.GetInputSlots());
+ CHECK(subgraph->GetOutputSlots() == subgraph2.GetOutputSlots());
+ ARMNN_NO_DEPRECATE_WARN_END
+
+ // Move Constructor
+ SubgraphView subgraph3(std::move(subgraph2));
+ CHECK(subgraph->GetIConnectableLayers() == subgraph3.GetIConnectableLayers());
+ CHECK(subgraph->GetIInputSlots() == subgraph3.GetIInputSlots());
+ CHECK(subgraph->GetIOutputSlots() == subgraph3.GetIOutputSlots());
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ CHECK(subgraph->GetLayers() == subgraph3.GetLayers());
+ CHECK(subgraph->GetInputSlots() == subgraph3.GetInputSlots());
+ CHECK(subgraph->GetOutputSlots() == subgraph3.GetOutputSlots());
+ ARMNN_NO_DEPRECATE_WARN_END
+
+ // Clear
+ subgraph.get()->Clear();
+ CHECK(subgraph->GetIConnectableLayers().size() == 0);
+ CHECK(subgraph->GetIInputSlots().size() == 0);
+ CHECK(subgraph->GetIOutputSlots().size() == 0);
+}
+
+} // SubgraphViewBackwardCompatibilityTests Test Suite end
+
TEST_SUITE("SubgraphSubstitution")
{
TEST_CASE("SingleInputSingleOutput")
@@ -148,17 +333,21 @@ TEST_CASE("SingleInputSingleOutput")
convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1}),
- CreateOutputsFrom({convLayer2}),
- {});
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom({},
+ CreateIInputsFrom({convLayer1}),
+ CreateIOutputsFrom({convLayer2}));
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn = subgraph->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ // Using GetIInputSlot/GetIIOutputSlot functions
+ IOutputSlot* subgraphInputConn = subgraph->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(1, 1);
- Layer* const preCompiledLayer = graph.AddLayer<PreCompiledLayer>(preCompiledDescriptor, "pre-compiled");
+
+ IConnectableLayer* const preCompiledLayer =
+ graph.AddLayer<PreCompiledLayer>(preCompiledDescriptor, "pre-compiled");
// Substitute sub-graph with pre-compiled layer
graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
@@ -191,8 +380,8 @@ TEST_CASE("SingleInputSingleOutputAddPrecompiledLayerSubstituteSubgraph1")
{});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn = subgraph->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraphInputConn = subgraph->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
PreCompiledDescriptor preCompiledDescriptor(1, 1);
CompiledBlobPtr compiledBlobPtr;
@@ -235,8 +424,8 @@ TEST_CASE("SingleInputSingleOutputAddPrecompiledLayerSubstituteSubgraph2")
{});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn = subgraph->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraphInputConn = subgraph->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
PreCompiledDescriptor preCompiledDescriptor(1, 1);
CompiledBlobPtr compiledBlobPtr;
@@ -275,13 +464,14 @@ TEST_CASE("SingleInputSingleOutputSubstituteGraph")
convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1}),
- CreateOutputsFrom({convLayer2}),
- {});
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom(CreateInputsFrom({convLayer1}),
+ CreateOutputsFrom({convLayer2}),
+ {});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn = subgraph->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraphInputConn = subgraph->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
// Construct second graph with a single pre-compiled layer
Graph substituteGraph;
@@ -327,15 +517,15 @@ TEST_CASE("MultiInputSingleOutput")
concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
- CreateOutputsFrom({concatLayer}),
- {});
+ auto subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
+ CreateOutputsFrom({concatLayer}),
+ {});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn1 = subgraph->GetInputSlot(0)->GetConnection();
- IOutputSlot* subgraphInputConn2 = subgraph->GetInputSlot(1)->GetConnection();
+ IOutputSlot* subgraphInputConn1 = subgraph->GetIInputSlot(0)->GetConnection();
+ IOutputSlot* subgraphInputConn2 = subgraph->GetIInputSlot(1)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(2, 1);
@@ -376,15 +566,16 @@ TEST_CASE("SingleInputMultiOutput")
concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({splitterLayer}),
- CreateOutputsFrom({convLayer1, convLayer2}),
- {});
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom(CreateInputsFrom({splitterLayer}),
+ CreateOutputsFrom({convLayer1, convLayer2}),
+ {});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn1 = subgraph->GetInputSlot(0)->GetConnection();
+ IOutputSlot* subgraphInputConn1 = subgraph->GetIInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn1 = subgraph->GetOutputSlot(0)->GetConnection(0);
- IInputSlot* subgraphOutputConn2 = subgraph->GetOutputSlot(1)->GetConnection(0);
+ IInputSlot* subgraphOutputConn1 = subgraph->GetIOutputSlot(0)->GetConnection(0);
+ IInputSlot* subgraphOutputConn2 = subgraph->GetIOutputSlot(1)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(1, 2);
@@ -427,16 +618,17 @@ TEST_CASE("MultiInputMultiOutput")
concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
- CreateOutputsFrom({convLayer1, convLayer2}),
- {});
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
+ CreateOutputsFrom({convLayer1, convLayer2}),
+ {});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn1 = subgraph->GetInputSlot(0)->GetConnection();
- IOutputSlot* subgraphInputConn2 = subgraph->GetInputSlot(1)->GetConnection();
+ IOutputSlot* subgraphInputConn1 = subgraph->GetIInputSlot(0)->GetConnection();
+ IOutputSlot* subgraphInputConn2 = subgraph->GetIInputSlot(1)->GetConnection();
- IInputSlot* subgraphOutputConn1 = subgraph->GetOutputSlot(0)->GetConnection(0);
- IInputSlot* subgraphOutputConn2 = subgraph->GetOutputSlot(1)->GetConnection(0);
+ IInputSlot* subgraphOutputConn1 = subgraph->GetIOutputSlot(0)->GetConnection(0);
+ IInputSlot* subgraphOutputConn2 = subgraph->GetIOutputSlot(1)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(2, 2);
@@ -453,7 +645,7 @@ TEST_CASE("MultiInputMultiOutput")
CHECK_EQ(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
}
-TEST_CASE("EraseReplacedLayers")
+TEST_CASE("EraseReplacedIConnectableLayers")
{
// Construct graph
Graph graph;
@@ -461,31 +653,31 @@ TEST_CASE("EraseReplacedLayers")
graph.AddLayer<InputLayer>(0, "input");
ViewsDescriptor splitterDescriptor(2);
- Layer* const splitterLayer = graph.AddLayer<SplitterLayer>(splitterDescriptor, "splitter");
+ IConnectableLayer* const splitterLayer = graph.AddLayer<SplitterLayer>(splitterDescriptor, "splitter");
Convolution2dDescriptor convDescriptor;
- Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
- Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
+ IConnectableLayer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
+ IConnectableLayer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
OriginsDescriptor concatDescriptor(2);
- Layer* const concatLayer = graph.AddLayer<ConcatLayer>(concatDescriptor, "concat");
+ IConnectableLayer* const concatLayer = graph.AddLayer<ConcatLayer>(concatDescriptor, "concat");
graph.AddLayer<OutputLayer>(0, "output");
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom({},
- {},
- {splitterLayer,
+ SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom({splitterLayer,
convLayer1,
convLayer2,
- concatLayer});
+ concatLayer},
+ {},
+ {});
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(0, 0);
Layer* const preCompiledLayer = graph.AddLayer<PreCompiledLayer>(preCompiledDescriptor, "pre-compiled");
// Save sub-graph layers for later verification
- const SubgraphView::Layers subgraphLayers = subgraph->GetLayers();
+ const SubgraphView::IConnectableLayers subgraphLayers = subgraph->GetIConnectableLayers();
// Substitute sub-graph with pre-compiled layer
graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
@@ -503,9 +695,9 @@ TEST_CASE("SubgraphForEmptyGraph")
Graph graph;
SubgraphView subgraph(graph);
- CHECK(subgraph.GetInputSlots().empty());
- CHECK(subgraph.GetOutputSlots().empty());
- CHECK(subgraph.GetLayers().empty());
+ CHECK(subgraph.GetIInputSlots().empty());
+ CHECK(subgraph.GetIOutputSlots().empty());
+ CHECK(subgraph.GetIConnectableLayers().empty());
}
TEST_CASE("SubgraphForEntireGraph")
@@ -523,9 +715,9 @@ TEST_CASE("SubgraphForEntireGraph")
SubgraphView subgraph(graph);
- CHECK(subgraph.GetInputSlots().empty());
- CHECK(subgraph.GetOutputSlots().empty());
- CHECK(subgraph.GetLayers().size() == graph.GetNumLayers());
+ CHECK(subgraph.GetIInputSlots().empty());
+ CHECK(subgraph.GetIOutputSlots().empty());
+ CHECK(subgraph.GetIConnectableLayers().size() == graph.GetNumLayers());
}
TEST_CASE("NoSubgraphsForNoMatch")
@@ -636,8 +828,9 @@ TEST_CASE("DisjointGraphs")
CHECK((subgraphs[1] != nullptr));
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
- if (std::find(subgraphs[0]->GetLayers().begin(), subgraphs[0]->GetLayers().end(), i0) !=
- subgraphs[0]->GetLayers().end())
+ if (std::find(subgraphs[0]->GetIConnectableLayers().begin(),
+ subgraphs[0]->GetIConnectableLayers().end(), i0) !=
+ subgraphs[0]->GetIConnectableLayers().end())
{
CompareSubgraphViews(subgraphs[0], expected1);
CompareSubgraphViews(subgraphs[1], expected2);
@@ -729,12 +922,12 @@ TEST_CASE("IslandInTheMiddle")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr& lhs, SubgraphViewSelector::SubgraphViewPtr& rhs)
{
- return (lhs->GetLayers().size() < rhs->GetLayers().size());
+ return (lhs->GetIConnectableLayers().size() < rhs->GetIConnectableLayers().size());
}
);
- CHECK(subgraphs[0]->GetLayers().size() == 2);
- CHECK(subgraphs[1]->GetLayers().size() == 5);
+ CHECK(subgraphs[0]->GetIConnectableLayers().size() == 2);
+ CHECK(subgraphs[1]->GetIConnectableLayers().size() == 5);
CompareSubgraphViews(subgraphs[0], smallerSubgraph);
CompareSubgraphViews(subgraphs[1], largerSubgraph);
@@ -804,12 +997,12 @@ TEST_CASE("MultipleSimpleSubgraphs")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr & lhs, SubgraphViewSelector::SubgraphViewPtr & rhs)
{
- return (lhs->GetLayers().size() < rhs->GetLayers().size());
+ return (lhs->GetIConnectableLayers().size() < rhs->GetIConnectableLayers().size());
}
);
- CHECK(subgraphs[0]->GetLayers().size() == 1);
- CHECK(subgraphs[1]->GetLayers().size() == 2);
+ CHECK(subgraphs[0]->GetIConnectableLayers().size() == 1);
+ CHECK(subgraphs[1]->GetIConnectableLayers().size() == 2);
CompareSubgraphViews(subgraphs[0], smallerSubgraph);
CompareSubgraphViews(subgraphs[1], largerSubgraph);
@@ -1097,7 +1290,7 @@ TEST_CASE("ValidMerge")
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
- if (subgraphs[0]->GetInputSlots().size() == 1)
+ if (subgraphs[0]->GetIInputSlots().size() == 1)
{
CompareSubgraphViews(subgraphs[0], expectedSubgraph0);
CompareSubgraphViews(subgraphs[1], expectedSubgraph1);
@@ -1196,7 +1389,7 @@ TEST_CASE("PropagatedDependencies")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr& lhs, SubgraphViewSelector::SubgraphViewPtr& rhs)
{
- return (lhs->GetLayers().size() < rhs->GetLayers().size());
+ return (lhs->GetIConnectableLayers().size() < rhs->GetIConnectableLayers().size());
}
);
@@ -1311,7 +1504,8 @@ TEST_CASE("Random")
for (uint32_t inputSlotIdx = 0; inputSlotIdx < layer->GetNumInputSlots(); ++inputSlotIdx)
{
InputSlot& inputSlot = layer->GetInputSlot(inputSlotIdx);
- uint32_t maxLayerDepthToConnectTo = layerDepths[layer]; // This prevents a connection causing a loop
+ uint32_t maxLayerDepthToConnectTo = layerDepths[layer];
+ // This prevents a connection causing a loop
// Finding a layer to connect to may take multiple attempts, so keep trying until it works.
while (inputSlot.GetConnectedOutputSlot() == nullptr)
{
@@ -1362,7 +1556,8 @@ TEST_CASE("Random")
for (std::unique_ptr<SubgraphView>& subgraph : subgraphs)
{
std::string name = std::to_string(i++);
- if (std::find(subgraph->begin(), subgraph->end(), layer) != subgraph->end())
+ if (std::find(subgraph->cbeginIConnectable(), subgraph->cendIConnectable(), layer)
+ != subgraph->cendIConnectable())
{
layerToSubgraph[layer] = subgraph.get();
break;
@@ -1397,10 +1592,10 @@ TEST_CASE("Random")
// encounter a layer that belongs to the subgraph that we started from.
for (std::unique_ptr<SubgraphView>& subgraph : subgraphs)
{
- for (InputSlot* inputSlot : subgraph->GetInputSlots())
+ for (IInputSlot* inSlot : subgraph->GetIInputSlots())
{
std::queue<Layer*> toProcess;
- toProcess.push(&inputSlot->GetConnectedOutputSlot()->GetOwningLayer());
+ toProcess.push(&PolymorphicDowncast<InputSlot*>(inSlot)->GetConnectedOutputSlot()->GetOwningLayer());
while (toProcess.size() > 0)
{
Layer* l = toProcess.front();
@@ -1462,15 +1657,15 @@ TEST_CASE("SingleSubgraph")
if (subgraphs[0].get() != nullptr)
{
- unsigned int numInputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetInputSlots().size());
- unsigned int numOutputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetOutputSlots().size());
+ unsigned int numInputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetIInputSlots().size());
+ unsigned int numOutputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetIOutputSlots().size());
CHECK((numInputSlots == 1));
CHECK((numOutputSlots == 1));
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn1 = subgraphs[0]->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn1 = subgraphs[0]->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraphInputConn1 = subgraphs[0]->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn1 = subgraphs[0]->GetIOutputSlot(0)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(numInputSlots, numOutputSlots);
@@ -1540,25 +1735,25 @@ TEST_CASE("MultipleSubgraphs")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr & lhs, SubgraphViewSelector::SubgraphViewPtr & rhs)
{
- return (lhs->GetInputSlots().size() < rhs->GetInputSlots().size());
+ return (lhs->GetIInputSlots().size() < rhs->GetIInputSlots().size());
}
);
- unsigned int numInputSlots1 = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetInputSlots().size());
- unsigned int numOutputSlots1 = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetOutputSlots().size());
+ unsigned int numInputSlots1 = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetIInputSlots().size());
+ unsigned int numOutputSlots1 = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetIOutputSlots().size());
- unsigned int numInputSlots2 = armnn::numeric_cast<unsigned int>(subgraphs[1]->GetInputSlots().size());
- unsigned int numOutputSlots2 = armnn::numeric_cast<unsigned int>(subgraphs[1]->GetOutputSlots().size());
+ unsigned int numInputSlots2 = armnn::numeric_cast<unsigned int>(subgraphs[1]->GetIInputSlots().size());
+ unsigned int numOutputSlots2 = armnn::numeric_cast<unsigned int>(subgraphs[1]->GetIOutputSlots().size());
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraph1InputConn = subgraphs[0]->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraph1OutputConn1 = subgraphs[0]->GetOutputSlot(0)->GetConnection(0);
- IInputSlot* subgraph1OutputConn2 = subgraphs[0]->GetOutputSlot(1)->GetConnection(0);
+ IOutputSlot* subgraph1InputConn = subgraphs[0]->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraph1OutputConn1 = subgraphs[0]->GetIOutputSlot(0)->GetConnection(0);
+ IInputSlot* subgraph1OutputConn2 = subgraphs[0]->GetIOutputSlot(1)->GetConnection(0);
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraph2InputConn1 = subgraphs[1]->GetInputSlot(0)->GetConnection();
- IOutputSlot* subgraph2InputConn2 = subgraphs[1]->GetInputSlot(1)->GetConnection();
- IInputSlot* subgraph2OutputConn = subgraphs[1]->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraph2InputConn1 = subgraphs[1]->GetIInputSlot(0)->GetConnection();
+ IOutputSlot* subgraph2InputConn2 = subgraphs[1]->GetIInputSlot(1)->GetConnection();
+ IInputSlot* subgraph2OutputConn = subgraphs[1]->GetIOutputSlot(0)->GetConnection(0);
PreCompiledDescriptor preCompiledDescriptor1(numInputSlots1, numOutputSlots1);
Layer* const preCompiledLayer1 = graph.AddLayer<PreCompiledLayer>(preCompiledDescriptor1, "pre-compiled1");
@@ -1655,13 +1850,13 @@ TEST_CASE("SubgraphCycles")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr & lhs, SubgraphViewSelector::SubgraphViewPtr & rhs)
{
- return (lhs->GetLayers().size() < rhs->GetLayers().size());
+ return (lhs->GetIConnectableLayers().size() < rhs->GetIConnectableLayers().size());
}
);
// one subgraph needs to be size=1 and the other one is 4
- CHECK(subgraphs[0]->GetLayers().size() == 1);
- CHECK(subgraphs[1]->GetLayers().size() == 2);
+ CHECK(subgraphs[0]->GetIConnectableLayers().size() == 1);
+ CHECK(subgraphs[1]->GetIConnectableLayers().size() == 2);
CompareSubgraphViews(subgraphs[0], outputSubgraph);
CompareSubgraphViews(subgraphs[1], inputSubgraph);
diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp
index 129a766729..7224cc8d06 100644
--- a/src/armnn/test/UnitTests.hpp
+++ b/src/armnn/test/UnitTests.hpp
@@ -6,4 +6,4 @@
#include "../../armnnTestUtils/UnitTests.hpp"
#pragma message("src/armnn/test/UnitTests.hpp has been deprecated, it is due for removal in 22.08 release." \
- " Please use from armnnTestUtils library, /src/armnnTestUtils/UnitTests.hpp) \ No newline at end of file
+ " Please use from armnnTestUtils library, /src/armnnTestUtils/UnitTests.hpp")