aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Murtagh <francis.murtagh@arm.com>2021-12-13 18:48:12 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2022-01-14 16:01:58 +0000
commit56ccf68c7858560f2ba00f19076b3cb112970881 (patch)
tree6e19cd38aa1d452ca3b9a9a1f68ff42dd64dc1d7
parent2db6d5aff3d4f596d4b4018a7b454c2a2c8f7122 (diff)
downloadarmnn-56ccf68c7858560f2ba00f19076b3cb112970881.tar.gz
IVGCVSW-6633 SubgraphView uses IConnectableLayer rather than Layer in its m_Layers
* Added IInputSlot, IOutputSlot and IConnectableLayer to SubgraphView * Deprecated old member functions * Removed deprecated calls in ArmNN * Added GetOwningIConnectableLayer function to IOutputSlot * Updates ArmNN Core Major version for IOutputSlot ABI break * Updated Minor version of TfliteParser, OnnxParser and Delegate Signed-off-by: Francis Murtagh <francis.murtagh@arm.com> Change-Id: I2a8611bfabf5ae09d3602fe6a4bef166e18117b9
-rw-r--r--delegate/include/Version.hpp2
-rw-r--r--include/armnn/Deprecated.hpp2
-rw-r--r--include/armnn/INetwork.hpp2
-rw-r--r--include/armnn/Version.hpp2
-rw-r--r--include/armnnOnnxParser/Version.hpp2
-rw-r--r--include/armnnTfLiteParser/Version.hpp2
-rw-r--r--python/pyarmnn/README.md14
-rw-r--r--python/pyarmnn/examples/image_classification/README.md2
-rw-r--r--python/pyarmnn/examples/object_detection/README.md2
-rw-r--r--python/pyarmnn/examples/speech_recognition/README.md2
-rw-r--r--python/pyarmnn/src/pyarmnn/_version.py4
-rw-r--r--python/pyarmnn/test/test_setup.py8
-rw-r--r--python/pyarmnn/test/test_version.py4
-rw-r--r--samples/ObjectDetection/Readme.md6
-rw-r--r--src/armnn/Graph.cpp36
-rw-r--r--src/armnn/Graph.hpp4
-rw-r--r--src/armnn/Layer.cpp5
-rw-r--r--src/armnn/Layer.hpp2
-rw-r--r--src/armnn/Network.cpp267
-rw-r--r--src/armnn/Network.hpp8
-rw-r--r--src/armnn/SubgraphView.cpp168
-rw-r--r--src/armnn/SubgraphView.hpp71
-rw-r--r--src/armnn/SubgraphViewSelector.cpp62
-rw-r--r--src/armnn/test/SubgraphViewTests.cpp393
-rw-r--r--src/armnn/test/UnitTests.hpp2
-rw-r--r--src/backends/aclCommon/ArmComputeSubgraphUtils.hpp56
-rw-r--r--src/backends/backendsCommon/OptimizationViews.cpp20
-rw-r--r--src/backends/backendsCommon/test/MockBackend.cpp14
-rw-r--r--src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp248
-rw-r--r--src/backends/cl/ClBackend.cpp18
-rw-r--r--src/backends/neon/NeonBackend.cpp18
31 files changed, 1012 insertions, 434 deletions
diff --git a/delegate/include/Version.hpp b/delegate/include/Version.hpp
index 0e84e9597f..88cf8ed4b4 100644
--- a/delegate/include/Version.hpp
+++ b/delegate/include/Version.hpp
@@ -14,7 +14,7 @@ namespace armnnDelegate
// ArmNN Delegate version components
#define DELEGATE_MAJOR_VERSION 25
-#define DELEGATE_MINOR_VERSION 0
+#define DELEGATE_MINOR_VERSION 1
#define DELEGATE_PATCH_VERSION 0
/// DELEGATE_VERSION: "X.Y.Z"
diff --git a/include/armnn/Deprecated.hpp b/include/armnn/Deprecated.hpp
index c493adb308..6b7fec6676 100644
--- a/include/armnn/Deprecated.hpp
+++ b/include/armnn/Deprecated.hpp
@@ -43,6 +43,8 @@ ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_DEPRECATED_MSG(message) [[deprecated(message)]]
#define ARMNN_DEPRECATED_MSG_REMOVAL_DATE(message, removed_in_release) \
[[deprecated("Expected to be removed in release " #removed_in_release ". " message)]]
+#define ARMNN_DEPRECATED_MSG_CHANGE_DATE(message, signature_changed_in_release) \
+[[deprecated("Expected to have signature changed in release " #signature_changed_in_release ". " message)]]
#if defined(__GNUC__) && (__GNUC__ < 6)
# define ARMNN_DEPRECATED_ENUM
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 5d25665dfe..a48ee25f72 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -53,6 +53,8 @@ public:
virtual LayerGuid GetOwningLayerGuid() const = 0;
+ virtual const IConnectableLayer& GetOwningIConnectableLayer() const = 0;
+
protected:
/// Not user deletable.
~IOutputSlot() {}
diff --git a/include/armnn/Version.hpp b/include/armnn/Version.hpp
index 3a5b568169..7e172246e3 100644
--- a/include/armnn/Version.hpp
+++ b/include/armnn/Version.hpp
@@ -10,7 +10,7 @@
#define STRINGIFY_MACRO(s) #s
// ArmNN version components
-#define ARMNN_MAJOR_VERSION 27
+#define ARMNN_MAJOR_VERSION 28
#define ARMNN_MINOR_VERSION 0
#define ARMNN_PATCH_VERSION 0
diff --git a/include/armnnOnnxParser/Version.hpp b/include/armnnOnnxParser/Version.hpp
index da3e392bc8..ed9d8690ec 100644
--- a/include/armnnOnnxParser/Version.hpp
+++ b/include/armnnOnnxParser/Version.hpp
@@ -14,7 +14,7 @@ namespace armnnOnnxParser
// OnnxParser version components
#define ONNX_PARSER_MAJOR_VERSION 24
-#define ONNX_PARSER_MINOR_VERSION 3
+#define ONNX_PARSER_MINOR_VERSION 4
#define ONNX_PARSER_PATCH_VERSION 0
/// ONNX_PARSER_VERSION: "X.Y.Z"
diff --git a/include/armnnTfLiteParser/Version.hpp b/include/armnnTfLiteParser/Version.hpp
index b0490cebec..eee2124678 100644
--- a/include/armnnTfLiteParser/Version.hpp
+++ b/include/armnnTfLiteParser/Version.hpp
@@ -14,7 +14,7 @@ namespace armnnTfLiteParser
// TfLiteParser version components
#define TFLITE_PARSER_MAJOR_VERSION 24
-#define TFLITE_PARSER_MINOR_VERSION 3
+#define TFLITE_PARSER_MINOR_VERSION 4
#define TFLITE_PARSER_PATCH_VERSION 0
/// TFLITE_PARSER_VERSION: "X.Y.Z"
diff --git a/python/pyarmnn/README.md b/python/pyarmnn/README.md
index dc3b8a1fb0..4ac36fafb4 100644
--- a/python/pyarmnn/README.md
+++ b/python/pyarmnn/README.md
@@ -91,14 +91,14 @@ This step will put all generated files under `./src/pyarmnn/_generated` folder a
```bash
$ python setup.py sdist
```
-As the result you will get `./dist/pyarmnn-27.0.0.tar.gz` file. As you can see it is platform independent.
+As the result you will get `./dist/pyarmnn-28.0.0.tar.gz` file. As you can see it is platform independent.
##### 5. Build the binary package
```bash
$ python setup.py bdist_wheel
```
-As the result you will get something like `./dist/pyarmnn-27.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
+As the result you will get something like `./dist/pyarmnn-28.0.0-cp36-cp36m-linux_x86_64.whl` file. As you can see it
is platform dependent.
# PyArmNN installation
@@ -107,8 +107,8 @@ PyArmNN can be distributed as a source package or a binary package (wheel).
Binary package is platform dependent, the name of the package will indicate the platform it was built for, e.g.:
-* Linux x86 64bit machine: pyarmnn-27.0.0-cp36-cp36m-*linux_x86_64*.whl
-* Linux Aarch 64 bit machine: pyarmnn-27.0.0-cp36-cp36m-*linux_aarch64*.whl
+* Linux x86 64bit machine: pyarmnn-28.0.0-cp36-cp36m-*linux_x86_64*.whl
+* Linux Aarch 64 bit machine: pyarmnn-28.0.0-cp36-cp36m-*linux_aarch64*.whl
The source package is platform independent but installation involves compilation of Arm NN python extension. You will need to have g++ compatible with C++ 14 standard and a python development library installed on the build machine.
@@ -126,7 +126,7 @@ $ gcc --print-search-dirs
```
Install PyArmNN from binary by pointing to the wheel file:
```bash
-$ pip install /path/to/pyarmnn-27.0.0-cp36-cp36m-linux_aarch64.whl
+$ pip install /path/to/pyarmnn-28.0.0-cp36-cp36m-linux_aarch64.whl
```
## Installing from source package
@@ -143,7 +143,7 @@ $ export ARMNN_INCLUDE=/full/path/to/armnn/include:/full/path/to/armnn/profilin
Install PyArmNN as follows:
```bash
-$ pip install /path/to/pyarmnn-27.0.0.tar.gz
+$ pip install /path/to/pyarmnn-28.0.0.tar.gz
```
If PyArmNN installation script fails to find Arm NN libraries it will raise an error like this
@@ -157,7 +157,7 @@ $ pip show pyarmnn
You can also verify it by running the following and getting output similar to below:
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'27.0.0'
+'28.0.0'
```
# PyArmNN API overview
diff --git a/python/pyarmnn/examples/image_classification/README.md b/python/pyarmnn/examples/image_classification/README.md
index c7dbc6f001..cc05bbcda5 100644
--- a/python/pyarmnn/examples/image_classification/README.md
+++ b/python/pyarmnn/examples/image_classification/README.md
@@ -20,7 +20,7 @@ $ pip show pyarmnn
You can also verify it by running the following and getting output similar to below:
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'27.0.0'
+'28.0.0'
```
##### Dependencies
diff --git a/python/pyarmnn/examples/object_detection/README.md b/python/pyarmnn/examples/object_detection/README.md
index 1e3bbc49a3..421cfae074 100644
--- a/python/pyarmnn/examples/object_detection/README.md
+++ b/python/pyarmnn/examples/object_detection/README.md
@@ -23,7 +23,7 @@ $ pip show pyarmnn
You can also verify it by running the following and getting output similar to below:
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'27.0.0'
+'28.0.0'
```
##### Dependencies
diff --git a/python/pyarmnn/examples/speech_recognition/README.md b/python/pyarmnn/examples/speech_recognition/README.md
index 7d6a4fc54b..c4096efcc5 100644
--- a/python/pyarmnn/examples/speech_recognition/README.md
+++ b/python/pyarmnn/examples/speech_recognition/README.md
@@ -18,7 +18,7 @@ You can also verify it by running the following and getting output similar to be
```bash
$ python -c "import pyarmnn as ann;print(ann.GetVersion())"
-'27.0.0'
+'28.0.0'
```
### Dependencies
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
index 803a66feec..e1566d22f7 100644
--- a/python/pyarmnn/src/pyarmnn/_version.py
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -3,7 +3,7 @@
# SPDX-License-Identifier: MIT
import os
-version_info = (27, 0, 0)
+version_info = (28, 0, 0)
__dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
@@ -24,7 +24,7 @@ def check_armnn_version(installed_armnn_version: str, expected_armnn_version: st
"""Compares expected Arm NN version and Arm NN version used to build the package.
Args:
- installed_armnn_version (str): Arm NN version used to generate the package (e.g. 27.0.0)
+ installed_armnn_version (str): Arm NN version used to generate the package (e.g. 28.0.0)
expected_armnn_version (str): Expected Arm NN version
Returns:
diff --git a/python/pyarmnn/test/test_setup.py b/python/pyarmnn/test/test_setup.py
index 90eb76e4da..6fab167788 100644
--- a/python/pyarmnn/test/test_setup.py
+++ b/python/pyarmnn/test/test_setup.py
@@ -87,15 +87,15 @@ def test_gcc_serch_path():
def test_armnn_version():
- check_armnn_version('27.0.0', '27.0.0')
+ check_armnn_version('28.0.0', '28.0.0')
def test_incorrect_armnn_version():
with pytest.raises(AssertionError) as err:
- check_armnn_version('27.0.0', '27.1.0')
+ check_armnn_version('28.0.0', '28.1.0')
- assert 'Expected ArmNN version is 27.1.0 but installed ArmNN version is 27.0.0' in str(err.value)
+ assert 'Expected ArmNN version is 28.1.0 but installed ArmNN version is 28.0.0' in str(err.value)
def test_armnn_version_patch_does_not_matter():
- check_armnn_version('27.0.0', '27.0.1')
+ check_armnn_version('28.0.0', '28.0.1')
diff --git a/python/pyarmnn/test/test_version.py b/python/pyarmnn/test/test_version.py
index 2bbcc0546a..dd60aa367e 100644
--- a/python/pyarmnn/test/test_version.py
+++ b/python/pyarmnn/test/test_version.py
@@ -18,7 +18,7 @@ def test_dev_version():
importlib.reload(v)
- assert "27.0.0.dev1" == v.__version__
+ assert "28.0.0.dev1" == v.__version__
del os.environ["PYARMNN_DEV_VER"]
del v
@@ -30,7 +30,7 @@ def test_arm_version_not_affected():
importlib.reload(v)
- assert "27.0.0" == v.__arm_ml_version__
+ assert "28.0.0" == v.__arm_ml_version__
del os.environ["PYARMNN_DEV_VER"]
del v
diff --git a/samples/ObjectDetection/Readme.md b/samples/ObjectDetection/Readme.md
index 95c6b73401..bc53373ce5 100644
--- a/samples/ObjectDetection/Readme.md
+++ b/samples/ObjectDetection/Readme.md
@@ -168,10 +168,10 @@ From the build directory, copy the following to the host platform:
The full list of libs after cross-compilation to copy on your board:
```
libarmnn.so
-libarmnn.so.27
-libarmnn.so.27.0
+libarmnn.so.28
+libarmnn.so.28.0
libarmnnTfLiteParser.so
-libarmnnTfLiteParser.so.24.3
+libarmnnTfLiteParser.so.24.4
libavcodec.so
libavcodec.so.58
libavcodec.so.58.54.100
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 6d24e50bdc..cdb323432c 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -445,10 +445,13 @@ void Graph::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substi
void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
{
// Look through each layer in the new subgraph and add any that are not already a member of this graph
- substituteSubgraph.ForEachLayer([this](Layer* layer)
+ substituteSubgraph.ForEachIConnectableLayer([this](IConnectableLayer* iConnectableLayer)
{
- if (std::find(std::begin(m_Layers), std::end(m_Layers), layer) == std::end(m_Layers))
+ if (std::find(std::begin(m_Layers),
+ std::end(m_Layers),
+ iConnectableLayer) == std::end(m_Layers))
{
+ auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
layer->Reparent(*this, m_Layers.end());
m_LayersInOrder = false;
}
@@ -461,24 +464,26 @@ void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& subst
void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
{
- ARMNN_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
+ ARMNN_ASSERT_MSG(!substituteSubgraph.GetIConnectableLayers().empty(),
+ "New sub-graph used for substitution must not be empty");
- const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
- std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
+ const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
+ std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
{
IgnoreUnused(layer);
+ layer = PolymorphicDowncast<Layer*>(layer);
ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
"Substitute layer is not a member of graph");
});
- const SubgraphView::InputSlots& subgraphInputSlots = subgraph.GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraph.GetOutputSlots();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraph.GetIOutputSlots();
unsigned int subgraphNumInputSlots = armnn::numeric_cast<unsigned int>(subgraphInputSlots.size());
unsigned int subgraphNumOutputSlots = armnn::numeric_cast<unsigned int>(subgraphOutputSlots.size());
- const SubgraphView::InputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetInputSlots();
- const SubgraphView::OutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetOutputSlots();
+ const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
+ const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
@@ -488,7 +493,7 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
// Step 1: process input slots
for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
{
- InputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
+ IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
ARMNN_ASSERT(subgraphInputSlot);
IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
@@ -503,19 +508,24 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
// Step 2: process output slots
for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
{
- OutputSlot* subgraphOutputSlot = subgraphOutputSlots.at(outputSlotIdx);
+ auto subgraphOutputSlot =
+ PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
ARMNN_ASSERT(subgraphOutputSlot);
- OutputSlot* substituteOutputSlot = substituteSubgraphOutputSlots.at(outputSlotIdx);
+ auto substituteOutputSlot =
+ PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
ARMNN_ASSERT(substituteOutputSlot);
+
subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
}
}
void Graph::EraseSubgraphLayers(SubgraphView &subgraph)
{
- for (auto layer : subgraph.GetLayers())
+
+ for (auto iConnectableLayer : subgraph.GetIConnectableLayers())
{
+ auto layer = PolymorphicDowncast<Layer*>(iConnectableLayer);
EraseLayer(layer);
}
subgraph.Clear();
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index d49b5e513f..d71149d069 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -48,7 +48,9 @@ public:
}
using LayerList = std::list<Layer*>;
- using Iterator = LayerList::const_iterator; // Const so pointers in the list can't be modified externally.
+
+ // Const so pointers in the list can't be modified externally.
+ using Iterator = LayerList::const_iterator;
using IteratorDifference = Iterator::difference_type;
using ConstIterator = TransformIterator<decltype(&PtrCast<const Layer>), Iterator>;
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index e0d0f913c3..98fc14b56e 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -480,4 +480,9 @@ void Layer::ExecuteStrategy(IStrategy& strategy) const
strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
}
+const IConnectableLayer& OutputSlot::GetOwningIConnectableLayer() const
+{
+ return m_OwningLayer;
+}
+
} // namespace armnn
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index f665d155d1..f2ea6cb26d 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -114,6 +114,8 @@ public:
Layer& GetOwningLayer() const { return m_OwningLayer; }
+ const IConnectableLayer& GetOwningIConnectableLayer() const override;
+
LayerGuid GetOwningLayerGuid() const override;
const OutputHandler& GetOutputHandler() const { return m_OutputHandler; }
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index e89c6fe407..de60e11eef 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -916,6 +916,120 @@ OptimizationResult AttemptBackendAssignment(BackendSettings& backendSettings,
}
}
+// Refactor to allow passing the IConnectableLayer* rather than Layer Iterator
+// on Graph and SubgraphView which are different types.
+void AssignBackendsIConnectable(OptimizedNetworkImpl* optNetObjPtr,
+ IConnectableLayer* it,
+ Optional<std::vector<std::string>&> errMessages,
+ OptimizationResult& result,
+ BackendSettings& backendSettings,
+ std::vector<BackendId>& availablePreferredBackends)
+{
+ auto ReturnError = [&](const Layer* layer)
+ {
+ return ReturnWithError(result, layer, backendSettings, errMessages);
+ };
+
+ auto layer = PolymorphicDowncast<Layer*>(it);
+
+ if (layer->GetType() == LayerType::Input)
+ {
+ return;
+ }
+
+ DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
+ layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
+ DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
+ layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
+
+ std::string reasonIfUnsupported;
+ bool found = false;
+ if (!CheckScaleSetOnQuantizedType(layer, errMessages))
+ {
+ // don't bomb immediately, find all the quantized outputs
+ // which haven't had a scale set and report them all back.
+ result.m_Error = true;
+ }
+
+ // First try assign layer to hint backend
+ if (layer->GetBackendHint().has_value() &&
+ backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
+ AttemptBackendAssignment(backendSettings,
+ optNetObjPtr->GetGraph(),
+ layer,
+ layer->GetBackendHint().value(),
+ dataTypeIn,
+ dataTypeOut,
+ availablePreferredBackends,
+ reasonIfUnsupported,
+ errMessages).IsOk())
+ {
+ found = true;
+ backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
+ }
+ else
+ {
+ // Try assign layer to prefered list of backends
+ for (const auto& backend : availablePreferredBackends)
+ {
+ if (layer->GetBackendHint().has_value() &&
+ layer->GetBackendHint().value() == backend)
+ {
+ continue; //Don't re-test the backend hint
+ }
+
+ OptimizationResult res = AttemptBackendAssignment(backendSettings,
+ optNetObjPtr->GetGraph(),
+ layer,
+ backend,
+ dataTypeIn,
+ dataTypeOut,
+ availablePreferredBackends,
+ reasonIfUnsupported,
+ errMessages);
+
+ if (res.IsOk())
+ {
+ found = true;
+ backendSettings.m_SelectedBackends.insert(backend);
+ break;
+ }
+ else if (res.IsError())
+ {
+ result = res; // Cannot continue.
+ // Note: we don't need to log the error as it would already
+ // be logged in AttemptBackendAssignment().
+ }
+ else
+ {
+ ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
+ }
+ }
+ }
+
+ // If the layer is unsupported by any devices, log and return a null network.
+ if (!found)
+ {
+ // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
+ // fallback we should set the compute device on the layer to CpuRef (these are not
+ // available as accelerated operations, or are only available under certain
+ // conditions, currently they comprise MemCopy, Constant, Permute)
+ armnn::LayerType layerType = layer->GetType();
+ if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
+ layerType == armnn::LayerType::Constant ||
+ layerType == armnn::LayerType::Permute))
+ {
+ BackendId cpuBackendId(armnn::Compute::CpuRef);
+ layer->SetBackendId(cpuBackendId);
+ backendSettings.m_SelectedBackends.insert(cpuBackendId);
+ }
+ else
+ {
+ result = ReturnError(layer);
+ }
+ }
+
+}
OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
BackendSettings& backendSettings,
@@ -926,13 +1040,6 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_AssignBackends");
OptimizationResult result;
- // Helper lambda to compose meaningful error message before returning with error
- auto ReturnError = [&](const Layer* layer)
- {
- return ReturnWithError(result, layer, backendSettings, errMessages);
- };
-
-
auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
if (availablePreferredBackends.empty())
{
@@ -946,109 +1053,61 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
for (auto it = firstLayer; it != lastLayer; ++it)
{
- auto layer = *it;
+ AssignBackendsIConnectable(optNetObjPtr,
+ *it,
+ errMessages,
+ result,
+ backendSettings,
+ availablePreferredBackends);
+ }
+
+ for (auto it = firstLayer; it != lastLayer; ++it)
+ {
+ auto layer = PolymorphicDowncast<Layer*>(*it);
- if (layer->GetType() == LayerType::Input)
+ if(layer->GetType() == LayerType::Input)
{
- continue;
+ BackendId connectedBackendId = layer->GetOutputSlot(0).GetConnection(0)->GetOwningLayer().GetBackendId();
+ layer->SetBackendId(connectedBackendId);
}
+ }
- DataType dataTypeIn = layer->GetNumInputSlots() == 0 ? DataType::Float32 :
- layer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType();
- DataType dataTypeOut = layer->GetNumOutputSlots() == 0 ? DataType::Float32 :
- layer->GetOutputSlot(0).GetTensorInfo().GetDataType();
+ return result;
+}
- std::string reasonIfUnsupported;
- bool found = false;
- if (!CheckScaleSetOnQuantizedType(layer, errMessages))
- {
- // don't bomb immediately, find all the quantized outputs
- // which haven't had a scale set and report them all back.
- result.m_Error = true;
- }
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
+ BackendSettings& backendSettings,
+ SubgraphView::IConnectableLayerIterator& firstLayer,
+ SubgraphView::IConnectableLayerIterator& lastLayer,
+ Optional<std::vector<std::string>&> errMessages)
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_AssignBackends");
+ OptimizationResult result;
- // First try assign layer to hint backend
- if (layer->GetBackendHint().has_value() &&
- backendSettings.IsBackendSupported(layer->GetBackendHint().value()) &&
- AttemptBackendAssignment(backendSettings,
- optNetObjPtr->GetGraph(),
- layer,
- layer->GetBackendHint().value(),
- dataTypeIn,
- dataTypeOut,
- availablePreferredBackends,
- reasonIfUnsupported,
- errMessages).IsOk())
- {
- found = true;
- backendSettings.m_SelectedBackends.insert(layer->GetBackendHint().value());
- }
- else
- {
- // Try assign layer to prefered list of backends
- for (const auto& backend : availablePreferredBackends)
- {
- if (layer->GetBackendHint().has_value() &&
- layer->GetBackendHint().value() == backend)
- {
- continue; //Don't re-test the backend hint
- }
+ auto availablePreferredBackends = backendSettings.GetAvailablePreferredBackends();
+ if (availablePreferredBackends.empty())
+ {
+ std::stringstream failureMsg;
+ failureMsg << "No preferred backends are available";
+ ReportError(failureMsg.str(), errMessages);
- OptimizationResult res = AttemptBackendAssignment(backendSettings,
- optNetObjPtr->GetGraph(),
- layer,
- backend,
- dataTypeIn,
- dataTypeOut,
- availablePreferredBackends,
- reasonIfUnsupported,
- errMessages);
-
- if (res.IsOk())
- {
- found = true;
- backendSettings.m_SelectedBackends.insert(backend);
- break;
- }
- else if (res.IsError())
- {
- return res; // Cannot continue.
- // Note: we don't need to log the error as it would already
- // be logged in AttemptBackendAssignment().
- }
- else
- {
- ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
- }
- }
- }
+ result.m_Error = true;
+ return result;
+ }
- // If the layer is unsupported by any devices, log and return a null network.
- if (!found)
- {
- // NOTE: if the layer is not an operation queue type AND we have not got CpuRef as a
- // fallback we should set the compute device on the layer to CpuRef (these are not
- // available as accelerated operations, or are only available under certain
- // conditions, currently they comprise MemCopy, Constant, Permute)
- armnn::LayerType layerType = layer->GetType();
- if (!backendSettings.IsCpuRefUsed() && (layerType == armnn::LayerType::MemCopy ||
- layerType == armnn::LayerType::Constant ||
- layerType == armnn::LayerType::Permute))
- {
- BackendId cpuBackendId(armnn::Compute::CpuRef);
- layer->SetBackendId(cpuBackendId);
- backendSettings.m_SelectedBackends.insert(cpuBackendId);
- }
- else
- {
- return ReturnError(layer);
- }
- }
+ for (auto it = firstLayer; it != lastLayer; ++it)
+ {
+ AssignBackendsIConnectable(optNetObjPtr,
+ *it,
+ errMessages,
+ result,
+ backendSettings,
+ availablePreferredBackends);
}
for (auto it = firstLayer; it != lastLayer; ++it)
{
- auto layer = *it;
+ auto layer = PolymorphicDowncast<Layer*>(*it);
if(layer->GetType() == LayerType::Input)
{
@@ -1065,8 +1124,8 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
SubgraphView& subgraph,
Optional<std::vector<std::string>&> errMessages)
{
- Graph::Iterator firstLayer = subgraph.begin();
- Graph::Iterator lastLayer = subgraph.end();
+ SubgraphView::IConnectableLayerIterator firstLayer = subgraph.beginIConnectable();
+ SubgraphView::IConnectableLayerIterator lastLayer = subgraph.endIConnectable();
return AssignBackends(optNetObjPtr,
backendSettings,
firstLayer,
@@ -1118,6 +1177,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
// Select layers assigned to the requested backend
[&backendObjPtr](const Layer& layer)
{
+
return layer.GetType() != LayerType::Input &&
layer.GetType() != LayerType::Output &&
layer.GetBackendId() == backendObjPtr->GetId();
@@ -1145,10 +1205,11 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
optGraph.SubstituteSubgraph(substitutableSubgraph, replacementSubgraph);
// Assign the current backend to the optimized sub-graph
- std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
+ const SubgraphView::IConnectableLayers& subgraphLayers = replacementSubgraph.GetIConnectableLayers();
+ std::for_each(subgraphLayers.begin(), subgraphLayers.end(), [&selectedBackend](IConnectableLayer* l)
{
ARMNN_ASSERT(l);
- l->SetBackendId(selectedBackend);
+ PolymorphicDowncast<Layer*>(l)->SetBackendId(selectedBackend);
});
}
@@ -1171,7 +1232,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
{
// An error occurred: the optimization was attempted but not performed, try different backends
std::stringstream subgraphMsg;
- subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetLayers().size()
+ subgraphMsg << "Re-assigning backends to " << failedSubgraph.GetIConnectableLayers().size()
<< " layers inside sub-graph " << count++;
ReportWarning(subgraphMsg.str(), errMessages);
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index ee84d7b33b..a2ef0d8364 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -20,6 +20,7 @@
#include "Graph.hpp"
#include "Layer.hpp"
#include "OptimizedNetworkImpl.hpp"
+#include "SubgraphView.hpp"
namespace armnn
{
@@ -301,4 +302,11 @@ OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
Graph::Iterator& lastLayer,
Optional<std::vector<std::string>&> errMessages);
+
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
+ BackendSettings& backendSettings,
+ SubgraphView::IConnectableLayerIterator& firstLayer,
+ SubgraphView::IConnectableLayerIterator& lastLayer,
+ Optional<std::vector<std::string>&> errMessages);
+
} // namespace armnn
diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp
index 0d41889e15..2de4510b7b 100644
--- a/src/armnn/SubgraphView.cpp
+++ b/src/armnn/SubgraphView.cpp
@@ -44,24 +44,69 @@ SubgraphView::SubgraphView(Graph& graph)
: m_InputSlots{}
, m_OutputSlots{}
, m_Layers(graph.begin(), graph.end())
+ , m_IConnectableLayers(graph.begin(), graph.end())
{
ArrangeBySortOrder();
CheckSubgraph();
}
+/// IConnectable Duplication to maintain backwards compatibility
SubgraphView::SubgraphView(InputSlots&& inputs, OutputSlots&& outputs, Layers&& layers)
- : m_InputSlots{inputs}
- , m_OutputSlots{outputs}
- , m_Layers{layers}
+ : m_InputSlots{InputSlots{inputs.begin(), inputs.end()}}
+ , m_IInputSlots{IInputSlots{inputs.begin(), inputs.end()}}
+ , m_OutputSlots{OutputSlots{outputs.begin(), outputs.end()}}
+ , m_IOutputSlots{IOutputSlots{outputs.begin(), outputs.end()}}
+ , m_Layers(layers)
+ , m_IConnectableLayers(IConnectableLayers{layers.begin(), layers.end()})
{
ArrangeBySortOrder();
CheckSubgraph();
}
+/// IConnectable Duplication to maintain backwards compatibility
+SubgraphView::SubgraphView(SubgraphView::IConnectableLayers &&layers,
+ SubgraphView::IInputSlots &&inputs,
+ SubgraphView::IOutputSlots &&outputs)
+ : m_IInputSlots{inputs}
+ , m_IOutputSlots{outputs}
+ , m_IConnectableLayers(IConnectableLayers{layers.begin(), layers.end()})
+{
+ // Cast from IConnectableLayer to Layer for backward compatibility
+ auto f = [](IConnectableLayer* value)
+ {
+ return PolymorphicDowncast<Layer*>(value);
+ };
+ std::transform(layers.begin(), layers.end(), std::back_inserter(m_Layers), f);
+
+
+ m_InputSlots.resize(inputs.size());
+ m_IInputSlots.resize(inputs.size());
+ for (unsigned int i = 0; i < inputs.size(); i++)
+ {
+ m_InputSlots.at(i) = PolymorphicDowncast<InputSlot*>(inputs[i]);
+ m_IInputSlots.at(i) = inputs[i];
+ }
+
+ m_OutputSlots.resize(outputs.size());
+ m_IOutputSlots.resize(outputs.size());
+ for (unsigned int i = 0; i < outputs.size(); i++)
+ {
+ m_OutputSlots.at(i) = PolymorphicDowncast<OutputSlot*>(outputs[i]);
+ m_IOutputSlots.at(i) = outputs[i];
+ }
+
+ ArrangeBySortOrder();
+ CheckSubgraph();
+}
+
SubgraphView::SubgraphView(const SubgraphView& subgraph)
: m_InputSlots(subgraph.m_InputSlots.begin(), subgraph.m_InputSlots.end())
+ , m_IInputSlots(subgraph.m_IInputSlots.begin(), subgraph.m_IInputSlots.end())
, m_OutputSlots(subgraph.m_OutputSlots.begin(), subgraph.m_OutputSlots.end())
+ , m_IOutputSlots(subgraph.m_IOutputSlots.begin(), subgraph.m_IOutputSlots.end())
, m_Layers(subgraph.m_Layers.begin(), subgraph.m_Layers.end())
+ , m_IConnectableLayers(IConnectableLayers{subgraph.m_IConnectableLayers.begin(),
+ subgraph.m_IConnectableLayers.end()})
{
ArrangeBySortOrder();
CheckSubgraph();
@@ -69,30 +114,36 @@ SubgraphView::SubgraphView(const SubgraphView& subgraph)
SubgraphView::SubgraphView(SubgraphView&& subgraph)
: m_InputSlots(std::move(subgraph.m_InputSlots))
+ , m_IInputSlots(std::move(subgraph.m_IInputSlots))
, m_OutputSlots(std::move(subgraph.m_OutputSlots))
+ , m_IOutputSlots(std::move(subgraph.m_IOutputSlots))
, m_Layers(std::move(subgraph.m_Layers))
+ , m_IConnectableLayers(std::move(subgraph.m_IConnectableLayers))
{
ArrangeBySortOrder();
CheckSubgraph();
}
SubgraphView::SubgraphView(IConnectableLayer* layer)
- : m_InputSlots{}
- , m_OutputSlots{}
- , m_Layers{PolymorphicDowncast<Layer*>(layer)}
+ : m_Layers{PolymorphicDowncast<Layer*>(layer)}
+ , m_IConnectableLayers{layer}
{
unsigned int numInputSlots = layer->GetNumInputSlots();
m_InputSlots.resize(numInputSlots);
+ m_IInputSlots.resize(numInputSlots);
for (unsigned int i = 0; i < numInputSlots; i++)
{
m_InputSlots.at(i) = PolymorphicDowncast<InputSlot*>(&(layer->GetInputSlot(i)));
+ m_IInputSlots.at(i) = &(layer->GetInputSlot(i));
}
unsigned int numOutputSlots = layer->GetNumOutputSlots();
m_OutputSlots.resize(numOutputSlots);
+ m_IOutputSlots.resize(numOutputSlots);
for (unsigned int i = 0; i < numOutputSlots; i++)
{
m_OutputSlots.at(i) = PolymorphicDowncast<OutputSlot*>(&(layer->GetOutputSlot(i)));
+ m_IOutputSlots.at(i) = &(layer->GetOutputSlot(i));
}
CheckSubgraph();
@@ -101,8 +152,11 @@ SubgraphView::SubgraphView(IConnectableLayer* layer)
SubgraphView& SubgraphView::operator=(SubgraphView&& other)
{
m_InputSlots = std::move(other.m_InputSlots);
+ m_IInputSlots = std::move(other.m_IInputSlots);
m_OutputSlots = std::move(other.m_OutputSlots);
+ m_IOutputSlots = std::move(other.m_IOutputSlots);
m_Layers = std::move(other.m_Layers);
+ m_IConnectableLayers = std::move(other.m_IConnectableLayers);
CheckSubgraph();
@@ -119,6 +173,16 @@ void SubgraphView::CheckSubgraph()
// Check for invalid or duplicate layers
AssertIfNullsOrDuplicates(m_Layers, "Sub-graphs cannot contain null or duplicate layers");
+
+ // Check for invalid or duplicate input slots
+ AssertIfNullsOrDuplicates(m_IInputSlots, "Sub-graphs cannot contain null or duplicate IInputSlots");
+
+ // Check for invalid or duplicate output slots
+ AssertIfNullsOrDuplicates(m_IOutputSlots, "Sub-graphs cannot contain null or duplicate IOutputSlots");
+
+ // Check for invalid or duplicate layers
+ AssertIfNullsOrDuplicates(m_IConnectableLayers,
+ "Sub-graphs cannot contain null or duplicate IConnectableLayers");
}
const SubgraphView::InputSlots& SubgraphView::GetInputSlots() const
@@ -126,39 +190,69 @@ const SubgraphView::InputSlots& SubgraphView::GetInputSlots() const
return m_InputSlots;
}
+const SubgraphView::IInputSlots& SubgraphView::GetIInputSlots() const
+{
+ return m_IInputSlots;
+}
+
const SubgraphView::OutputSlots& SubgraphView::GetOutputSlots() const
{
return m_OutputSlots;
}
+const SubgraphView::IOutputSlots& SubgraphView::GetIOutputSlots() const
+{
+ return m_IOutputSlots;
+}
+
const InputSlot* SubgraphView::GetInputSlot(unsigned int index) const
{
return m_InputSlots.at(index);
}
+const IInputSlot* SubgraphView::GetIInputSlot(unsigned int index) const
+{
+ return m_IInputSlots.at(index);
+}
+
InputSlot* SubgraphView::GetInputSlot(unsigned int index)
{
return m_InputSlots.at(index);
}
+IInputSlot* SubgraphView::GetIInputSlot(unsigned int index)
+{
+ return m_IInputSlots.at(index);
+}
+
const OutputSlot* SubgraphView::GetOutputSlot(unsigned int index) const
{
return m_OutputSlots.at(index);
}
+const IOutputSlot* SubgraphView::GetIOutputSlot(unsigned int index) const
+{
+ return m_IOutputSlots.at(index);
+}
+
OutputSlot* SubgraphView::GetOutputSlot(unsigned int index)
{
return m_OutputSlots.at(index);
}
+IOutputSlot* SubgraphView::GetIOutputSlot(unsigned int index)
+{
+ return m_IOutputSlots.at(index);
+}
+
unsigned int SubgraphView::GetNumInputSlots() const
{
- return armnn::numeric_cast<unsigned int>(m_InputSlots.size());
+ return armnn::numeric_cast<unsigned int>(m_IInputSlots.size());
}
unsigned int SubgraphView::GetNumOutputSlots() const
{
- return armnn::numeric_cast<unsigned int>(m_OutputSlots.size());
+ return armnn::numeric_cast<unsigned int>(m_IOutputSlots.size());
}
const SubgraphView::Layers& SubgraphView::GetLayers() const
@@ -166,6 +260,11 @@ const SubgraphView::Layers& SubgraphView::GetLayers() const
return m_Layers;
}
+const SubgraphView::IConnectableLayers& SubgraphView::GetIConnectableLayers() const
+{
+ return m_IConnectableLayers;
+}
+
SubgraphView::Iterator SubgraphView::begin()
{
return m_Layers.begin();
@@ -176,6 +275,17 @@ SubgraphView::Iterator SubgraphView::end()
return m_Layers.end();
}
+// IConnectable Duplication to maintain backwards compatibility
+SubgraphView::IConnectableLayerIterator SubgraphView::beginIConnectable()
+{
+ return m_IConnectableLayers.begin();
+}
+
+SubgraphView::IConnectableLayerIterator SubgraphView::endIConnectable()
+{
+ return m_IConnectableLayers.end();
+}
+
SubgraphView::ConstIterator SubgraphView::begin() const
{
return m_Layers.begin();
@@ -186,14 +296,42 @@ SubgraphView::ConstIterator SubgraphView::end() const
return m_Layers.end();
}
+// IConnectable Duplication to maintain backwards compatibility
+SubgraphView::ConstIConnectableIterator SubgraphView::beginIConnectable() const
+{
+ return m_IConnectableLayers.begin();
+}
+
+SubgraphView::ConstIConnectableIterator SubgraphView::endIConnectable() const
+{
+ return m_IConnectableLayers.end();
+}
+
SubgraphView::ConstIterator SubgraphView::cbegin() const
{
+ // Ignore deprecated call as this is internal to SubgraphView
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return begin();
+ ARMNN_NO_DEPRECATE_WARN_END
}
SubgraphView::ConstIterator SubgraphView::cend() const
{
+ // Ignore deprecated call as this is internal to SubgraphView
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
return end();
+ ARMNN_NO_DEPRECATE_WARN_END
+}
+
+// IConnectable Duplication to maintain backwards compatibility
+SubgraphView::ConstIConnectableIterator SubgraphView::cbeginIConnectable() const
+{
+ return beginIConnectable();
+}
+
+SubgraphView::ConstIConnectableIterator SubgraphView::cendIConnectable() const
+{
+ return endIConnectable();
}
void SubgraphView::Clear()
@@ -201,6 +339,10 @@ void SubgraphView::Clear()
m_InputSlots.clear();
m_OutputSlots.clear();
m_Layers.clear();
+
+ m_IInputSlots.clear();
+ m_IOutputSlots.clear();
+ m_IConnectableLayers.clear();
}
void SubgraphView::ArrangeBySortOrder()
@@ -212,6 +354,16 @@ void SubgraphView::ArrangeBySortOrder()
};
m_Layers.sort(compareLayerPriority);
+
+ using IConnectableLayersList = std::list<IConnectableLayer*>;
+ auto compareIConnectableLayerPriority = [](const IConnectableLayersList::value_type& layerA,
+ const IConnectableLayersList::value_type& layerB)
+ {
+ return PolymorphicDowncast<Layer*>(layerA)->GetPriority() <
+ PolymorphicDowncast<Layer*>(layerB)->GetPriority();
+ };
+
+ m_IConnectableLayers.sort(compareIConnectableLayerPriority);
}
} // namespace armnn
diff --git a/src/armnn/SubgraphView.hpp b/src/armnn/SubgraphView.hpp
index af6054283e..f2ca44cb68 100644
--- a/src/armnn/SubgraphView.hpp
+++ b/src/armnn/SubgraphView.hpp
@@ -34,19 +34,40 @@ public:
}
}
+ template <typename Func>
+ void ForEachIConnectableLayer(Func func) const
+ {
+ for (auto it = m_IConnectableLayers.begin(); it != m_IConnectableLayers.end(); )
+ {
+ auto next = std::next(it);
+ func(*it);
+ it = next;
+ }
+ }
+
using SubgraphViewPtr = std::unique_ptr<SubgraphView>;
using InputSlots = std::vector<InputSlot*>;
+ using IInputSlots = std::vector<IInputSlot*>;
using OutputSlots = std::vector<OutputSlot*>;
+ using IOutputSlots = std::vector<IOutputSlot*>;
using Layers = std::list<Layer*>;
+ using IConnectableLayers = std::list<IConnectableLayer*>;
using Iterator = Layers::iterator;
+ using IConnectableLayerIterator = IConnectableLayers::iterator;
using ConstIterator = Layers::const_iterator;
+ using ConstIConnectableIterator = IConnectableLayers::const_iterator;
/// Constructs a sub-graph from the entire given graph.
explicit SubgraphView(Graph& graph);
/// Constructs a sub-graph with the given arguments.
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use constructor with arguments: "
+ "IConnectableLayers, IInputSlots and IOutputSlots", "22.08")
SubgraphView(InputSlots&& inputs, OutputSlots&& outputs, Layers&& layers);
+ /// Constructs a sub-graph with the given arguments.
+ SubgraphView(IConnectableLayers&& layers, IInputSlots&& inputs, IOutputSlots&& outputs);
+
/// Copy-constructor.
SubgraphView(const SubgraphView& subgraph);
@@ -59,27 +80,74 @@ public:
/// Move-assignment operator.
SubgraphView& operator=(SubgraphView&& other);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIInputSlots() returning"
+ " public IInputSlots", "22.08")
const InputSlots& GetInputSlots() const;
+ const IInputSlots& GetIInputSlots() const;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIOutputSlots() returning"
+ " public IOutputSlots", "22.08")
const OutputSlots& GetOutputSlots() const;
+ const IOutputSlots& GetIOutputSlots() const;
+
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIConnectableLayers() "
+ "returning public IConnectableLayers", "22.08")
const Layers& GetLayers() const;
+ const IConnectableLayers& GetIConnectableLayers() const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIInputSlot() returning public "
+ "IInputSlot", "22.08")
const InputSlot* GetInputSlot(unsigned int index) const;
+ const IInputSlot* GetIInputSlot(unsigned int index) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIInputSlot() returning public "
+ "IInputSlot", "22.08")
InputSlot* GetInputSlot(unsigned int index);
+ IInputSlot* GetIInputSlot(unsigned int index);
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIOutputSlot() returning"
+ " public IOutputSlot", "22.08")
const OutputSlot* GetOutputSlot(unsigned int index) const;
+ const IOutputSlot* GetIOutputSlot(unsigned int index) const;
+ ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This function has been deprecated, please use GetIOutputSlot() returning"
+ " public IOutputSlot", "22.08")
OutputSlot* GetOutputSlot(unsigned int index);
+ IOutputSlot* GetIOutputSlot(unsigned int index);
unsigned int GetNumInputSlots() const;
unsigned int GetNumOutputSlots() const;
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "IConnectableLayerIterator, until that occurs in 23.02; please use "
+ "beginIConnectable() returning public IConnectableLayerIterator", "23.02")
Iterator begin();
+ IConnectableLayerIterator beginIConnectable();
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "IConnectableLayerIterator, until that occurs in 23.02; please use "
+ "endIConnectable() returning public IConnectableLayerIterator", "23.02")
Iterator end();
+ IConnectableLayerIterator endIConnectable();
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "ConstIConnectableIterator, until that occurs in 23.02; please use "
+ "beginIConnectable() returning public ConstIConnectableIterator", "23.02")
ConstIterator begin() const;
+ ConstIConnectableIterator beginIConnectable() const;
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "ConstIConnectableIterator, until that occurs in 23.02; please use "
+ "endIConnectable() returning public ConstIConnectableIterator", "23.02")
ConstIterator end() const;
+ ConstIConnectableIterator endIConnectable() const;
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "ConstIConnectableIterator, until that occurs in 23.02; please use "
+ "cbeginIConnectable() returning public ConstIConnectableIterator", "23.02")
ConstIterator cbegin() const;
+ ConstIConnectableIterator cbeginIConnectable() const;
+ ARMNN_DEPRECATED_MSG_CHANGE_DATE("This function is deprecated and will be changed to return an "
+ "ConstIConnectableIterator, until that occurs in 23.02; please use "
+ "cendIConnectable() returning public ConstIConnectableIterator", "23.02")
ConstIterator cend() const;
+ ConstIConnectableIterator cendIConnectable() const;
void Clear();
@@ -91,11 +159,14 @@ private:
/// The list of pointers to the input slots of the parent graph.
InputSlots m_InputSlots;
+ IInputSlots m_IInputSlots;
/// The list of pointers to the output slots of the parent graph.
OutputSlots m_OutputSlots;
+ IOutputSlots m_IOutputSlots;
/// The list of pointers to the layers of the parent graph.
Layers m_Layers;
+ IConnectableLayers m_IConnectableLayers;
};
} // namespace armnn
diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp
index 21fbb7cd80..e2c5f911a0 100644
--- a/src/armnn/SubgraphViewSelector.cpp
+++ b/src/armnn/SubgraphViewSelector.cpp
@@ -176,7 +176,7 @@ private:
/// Intermediate data structure to store information associated with a particular layer.
struct LayerSelectionInfo
{
- using LayerInfoContainer = std::map<Layer*, LayerSelectionInfo>;
+ using LayerInfoContainer = std::map<IConnectableLayer*, LayerSelectionInfo>;
using LayerInfoQueue = std::queue<LayerSelectionInfo*>;
LayerSelectionInfo(Layer* layer, const SubgraphViewSelector::LayerSelectorFunction& selector)
@@ -193,9 +193,11 @@ struct LayerSelectionInfo
}
void CollectNonSelectedInputs(LayerSelectionInfo::LayerInfoContainer& layerInfos,
- SubgraphView::InputSlots& inputSlots)
+ SubgraphView::IInputSlots& inputSlots)
{
- for (auto&& slot = m_Layer->BeginInputSlots(); slot != m_Layer->EndInputSlots(); ++slot)
+ for (auto&& slot = PolymorphicDowncast<Layer*>(m_Layer)->BeginInputSlots();
+ slot != PolymorphicDowncast<Layer*>(m_Layer)->EndInputSlots();
+ ++slot)
{
OutputSlot* parentLayerOutputSlot = slot->GetConnectedOutputSlot();
ARMNN_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
@@ -218,9 +220,11 @@ struct LayerSelectionInfo
}
void CollectNonSelectedOutputSlots(LayerSelectionInfo::LayerInfoContainer& layerInfos,
- SubgraphView::OutputSlots& outputSlots)
+ SubgraphView::IOutputSlots& outputSlots)
{
- for (auto&& slot = m_Layer->BeginOutputSlots(); slot != m_Layer->EndOutputSlots(); ++slot)
+ for (auto&& slot = PolymorphicDowncast<Layer*>(m_Layer)->BeginOutputSlots();
+ slot != PolymorphicDowncast<Layer*>(m_Layer)->EndOutputSlots();
+ ++slot)
{
for (InputSlot* childLayerInputSlot : slot->GetConnections())
{
@@ -240,7 +244,7 @@ struct LayerSelectionInfo
}
}
- Layer* m_Layer;
+ IConnectableLayer* m_Layer;
/// Which subgraph this layer has been assigned to. Only valid once m_IsProcessed is true.
/// Two layers with different m_Subgraph pointers may in fact have been merged into the same subgraph -
/// see the description of the PartialSubgraph class.
@@ -264,7 +268,7 @@ void ForEachLayerInput(LayerSelectionInfo::LayerInfoContainer& layerInfos,
LayerSelectionInfo& layerInfo,
Delegate function)
{
- Layer& layer = *layerInfo.m_Layer;
+ Layer& layer = *PolymorphicDowncast<Layer*>(layerInfo.m_Layer);
for (auto inputSlot : layer.GetInputSlots())
{
@@ -285,7 +289,7 @@ void ForEachLayerOutput(LayerSelectionInfo::LayerInfoContainer& layerInfos,
LayerSelectionInfo& layerInfo,
Delegate function)
{
- Layer& layer= *layerInfo.m_Layer;
+ Layer& layer = *PolymorphicDowncast<Layer*>(layerInfo.m_Layer);
for (auto& outputSlot : layer.GetOutputSlots())
{
@@ -387,9 +391,11 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
LayerSelectionInfo::LayerInfoContainer layerInfos;
LayerSelectionInfo::LayerInfoQueue processQueue;
- for (auto& layer : subgraph)
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraph.GetIConnectableLayers();
+ for (auto& layer : subgraphLayers)
{
- auto emplaced = layerInfos.emplace(layer, LayerSelectionInfo{layer, selector});
+
+ auto emplaced = layerInfos.emplace(layer, LayerSelectionInfo{PolymorphicDowncast<Layer*>(layer), selector});
LayerSelectionInfo& layerInfo = emplaced.first->second;
// Start with Input type layers
@@ -399,10 +405,10 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
}
}
- const SubgraphView::InputSlots& subgraphInputSlots = subgraph.GetInputSlots();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
for (auto& inputSlot : subgraphInputSlots)
{
- Layer& layer = inputSlot->GetOwningLayer();
+ Layer& layer = PolymorphicDowncast<InputSlot*>(inputSlot)->GetOwningLayer();
auto emplaced = layerInfos.emplace(&layer, LayerSelectionInfo{&layer, selector});
LayerSelectionInfo& layerInfo = emplaced.first->second;
@@ -463,9 +469,9 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
Subgraphs result;
for (auto& splitGraph : splitMap)
{
- SubgraphView::InputSlots inputs;
- SubgraphView::OutputSlots outputs;
- SubgraphView::Layers layers;
+ SubgraphView::IInputSlots inputs;
+ SubgraphView::IOutputSlots outputs;
+ SubgraphView::IConnectableLayers layers;
for (auto&& infoPtr : splitGraph.second)
{
infoPtr->CollectNonSelectedInputs(layerInfos, inputs);
@@ -475,24 +481,28 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
// Sort lists into deterministic order, not relying on pointer values which may be different on each execution.
// This makes debugging the optimised graph much easier as subsequent stages can also be deterministic.
- std::sort(inputs.begin(), inputs.end(), [](const InputSlot* a, const InputSlot* b)
+ std::sort(inputs.begin(), inputs.end(), [](const IInputSlot* a, const IInputSlot* b)
{
- const LayerGuid guidA = a->GetOwningLayer().GetGuid();
- const LayerGuid guidB = b->GetOwningLayer().GetGuid();
+ auto* castA = PolymorphicDowncast<const InputSlot*>(a);
+ auto* castB = PolymorphicDowncast<const InputSlot*>(b);
+ const LayerGuid guidA = castA->GetOwningLayer().GetGuid();
+ const LayerGuid guidB = castB->GetOwningLayer().GetGuid();
if (guidA < guidB)
{
return true;
}
else if (guidA == guidB)
{
- return (a->GetSlotIndex() < b->GetSlotIndex());
+ return (castA->GetSlotIndex() < castB->GetSlotIndex());
}
return false;
});
- std::sort(outputs.begin(), outputs.end(), [](const OutputSlot* a, const OutputSlot* b)
+ std::sort(outputs.begin(), outputs.end(), [](const IOutputSlot* a, const IOutputSlot* b)
{
- const LayerGuid guidA = a->GetOwningLayer().GetGuid();
- const LayerGuid guidB = b->GetOwningLayer().GetGuid();
+ auto* castA = PolymorphicDowncast<const OutputSlot*>(a);
+ auto* castB = PolymorphicDowncast<const OutputSlot*>(b);
+ const LayerGuid guidA = castA->GetOwningLayer().GetGuid();
+ const LayerGuid guidB = castB->GetOwningLayer().GetGuid();
if (guidA < guidB)
{
return true;
@@ -503,12 +513,12 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto
}
return false;
});
- layers.sort([](const Layer* a, const Layer* b) { return a->GetGuid() < b->GetGuid(); });
+ layers.sort([](const IConnectableLayer* a, const IConnectableLayer* b) { return a->GetGuid() < b->GetGuid(); });
// Create a new sub-graph with the new lists of input/output slots and layer
- result.emplace_back(std::make_unique<SubgraphView>(std::move(inputs),
- std::move(outputs),
- std::move(layers)));
+ result.emplace_back(std::make_unique<SubgraphView>(std::move(layers),
+ std::move(inputs),
+ std::move(outputs)));
}
return result;
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index 693daa2268..2ea465ea04 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -17,12 +17,13 @@
#include <queue>
#include <random>
#include <chrono>
+
using namespace armnn;
namespace
{
-bool AreAnySubgraphLayersPresentInGraph(const SubgraphView::Layers &subgraphLayers, const Graph &graph)
+bool AreAnySubgraphLayersPresentInGraph(const SubgraphView::IConnectableLayers &subgraphLayers, const Graph &graph)
{
for(auto&& layer : subgraphLayers)
{
@@ -52,6 +53,20 @@ SubgraphView::InputSlots CreateInputsFrom(const std::vector<Layer*>& layers)
return result;
}
+/// Duplication for IConnectableLayer
+SubgraphView::IInputSlots CreateIInputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
+{
+ SubgraphView::IInputSlots result;
+ for (auto&& layer : layers)
+ {
+ for (unsigned int i = 0 ; i < layer->GetNumInputSlots(); ++i)
+ {
+ result.push_back(&(layer->GetInputSlot(i)));
+ }
+ }
+ return result;
+}
+
//
// this helper only works if all layers where the outputs connect to are not selected
//
@@ -68,6 +83,20 @@ SubgraphView::OutputSlots CreateOutputsFrom(const std::vector<Layer*>& layers)
return result;
}
+/// Duplication for IConnectableLayer
+SubgraphView::IOutputSlots CreateIOutputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
+{
+ SubgraphView::IOutputSlots result;
+ for (auto &&layer: layers)
+ {
+ for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
+ {
+ result.push_back(&(layer->GetOutputSlot(i)));
+ }
+ }
+ return result;
+}
+
//
// this takes the inputs, outputs and layers as a copy and the move these copies into the
// resulting subgraph, so the pass by value is intentional
@@ -79,6 +108,13 @@ SubgraphViewSelector::SubgraphViewPtr CreateSubgraphViewFrom(SubgraphView::Input
return std::make_unique<SubgraphView>(std::move(inputs), std::move(outputs), std::move(layers));
}
+SubgraphViewSelector::SubgraphViewPtr CreateSubgraphViewFrom(SubgraphView::IConnectableLayers&& layers,
+ SubgraphView::IInputSlots&& inputs,
+ SubgraphView::IOutputSlots&& outputs)
+{
+ return std::make_unique<SubgraphView>(std::move(layers), std::move(inputs), std::move(outputs));
+}
+
template <typename T, typename Iterator>
std::vector<T> ToSortedArray(Iterator begin, Iterator end)
{
@@ -102,32 +138,181 @@ void CompareSubgraphViews(SubgraphViewSelector::SubgraphViewPtr& result,
if (result.get() != nullptr && expected.get() != nullptr)
{
- CHECK(result->GetInputSlots().size() == expected->GetInputSlots().size());
- CHECK(result->GetOutputSlots().size() == expected->GetOutputSlots().size());
- CHECK(result->GetLayers().size() == expected->GetLayers().size());
-
- auto resultLayers = ToSortedArray<Layer *>(result->GetLayers().begin(),
- result->GetLayers().end());
- auto expectedLayers = ToSortedArray<Layer *>(expected->GetLayers().begin(),
- expected->GetLayers().end());
+ CHECK(result->GetIInputSlots().size() == expected->GetIInputSlots().size());
+ CHECK(result->GetIOutputSlots().size() == expected->GetIOutputSlots().size());
+ CHECK(result->GetIConnectableLayers().size() == expected->GetIConnectableLayers().size());
+
+ auto resultLayers = ToSortedArray<IConnectableLayer*>(result->GetIConnectableLayers().begin(),
+ result->GetIConnectableLayers().end());
+ auto expectedLayers = ToSortedArray<IConnectableLayer*>(expected->GetIConnectableLayers().begin(),
+ expected->GetIConnectableLayers().end());
CompareVectors(resultLayers, expectedLayers);
- auto resultInputs = ToSortedArray<InputSlot *>(result->GetInputSlots().begin(),
- result->GetInputSlots().end());
- auto expectedInputs = ToSortedArray<InputSlot *>(expected->GetInputSlots().begin(),
- expected->GetInputSlots().end());
+ auto resultInputs = ToSortedArray<IInputSlot *>(result->GetIInputSlots().begin(),
+ result->GetIInputSlots().end());
+ auto expectedInputs = ToSortedArray<IInputSlot *>(expected->GetIInputSlots().begin(),
+ expected->GetIInputSlots().end());
CompareVectors(resultInputs, expectedInputs);
- auto resultOutputs = ToSortedArray<OutputSlot *>(result->GetOutputSlots().begin(),
- result->GetOutputSlots().end());
- auto expectedOutputs = ToSortedArray<OutputSlot *>(expected->GetOutputSlots().begin(),
- expected->GetOutputSlots().end());
+ auto resultOutputs = ToSortedArray<IOutputSlot *>(result->GetIOutputSlots().begin(),
+ result->GetIOutputSlots().end());
+ auto expectedOutputs = ToSortedArray<IOutputSlot *>(expected->GetIOutputSlots().begin(),
+ expected->GetIOutputSlots().end());
CompareVectors(resultOutputs, expectedOutputs);
}
}
} // namespace <anonymous>
+TEST_SUITE("SubgraphViewBackwardCompatibilityTests")
+{
+// Test that SubraphView has been converted to using IConnectableLayer/IInputSlot/IOutputSlot
+// in a backward compatible manner from ILayer/InputSlot/OutputSlot
+TEST_CASE("SubgraphViewIterators")
+{
+ INetworkPtr net(INetwork::Create());
+ IConnectableLayer* layer = net->AddInputLayer(1, "input");
+
+ SubgraphView subgraph{layer};
+
+ // cbeginIConnectable() and cendIConnectable()
+ bool found = false;
+ if (std::find(subgraph.cbeginIConnectable(), subgraph.cendIConnectable(), layer)
+ != subgraph.cendIConnectable())
+ {
+ found = true;
+ }
+ CHECK(found);
+ found = false;
+
+ // beginIConnectable() and endIConnectable()
+ if (std::find(subgraph.beginIConnectable(), subgraph.endIConnectable(), layer)
+ != subgraph.endIConnectable())
+ {
+ found = true;
+ }
+ CHECK(found);
+ found = false;
+
+ // GetIConnectableLayers returns IConnectableLayers initialized when calling constructor given IConnectableLayers
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraph.GetIConnectableLayers();
+ for (auto& iConnectableLayer : subgraphLayers)
+ {
+ if (std::string(iConnectableLayer->GetName()) == "input")
+ {
+ found = true;
+ }
+ }
+ CHECK(found);
+ found = false;
+
+ // Test GetLayers returns layers initialized when calling constructor given IConnectableLayers
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ const SubgraphView::Layers& subgraphLayersOld = subgraph.GetLayers();
+ ARMNN_NO_DEPRECATE_WARN_END
+ for (auto& layerOld : subgraphLayersOld)
+ {
+ if (std::string(layerOld->GetName()) == "input")
+ {
+ found = true;
+ }
+ }
+ CHECK(found);
+}
+
+TEST_CASE("SubgraphViewSlots")
+{
+ // Construct graph
+ Graph graph;
+
+ Layer* const inputLayer = graph.AddLayer<InputLayer>(0, "input");
+
+ Convolution2dDescriptor convDescriptor;
+ Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
+ Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
+
+ Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, "output");
+
+ inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
+ convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ // Construct sub-graph
+ SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom({},
+ CreateIInputsFrom({convLayer1}),
+ CreateIOutputsFrom({convLayer2}));
+
+ // Test that both old and new are initialized
+ CHECK(subgraph->GetIInputSlots().size() == 1);
+ CHECK(subgraph->GetIOutputSlots().size() == 1);
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ CHECK(subgraph->GetInputSlots().size() == 1);
+ CHECK(subgraph->GetOutputSlots().size() == 1);
+
+ // Check old and new pointing to same address
+ CHECK(subgraph->GetOutputSlot(0) == subgraph->GetIOutputSlot(0));
+ CHECK(subgraph->GetInputSlot(0) == subgraph->GetIInputSlot(0));
+ ARMNN_NO_DEPRECATE_WARN_END
+
+}
+
+TEST_CASE("SubgraphViewConstructors")
+{
+ // Construct graph
+ Graph graph;
+
+ Layer* const inputLayer = graph.AddLayer<InputLayer>(0, "input");
+
+ Convolution2dDescriptor convDescriptor;
+ Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
+ Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
+
+ Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, "output");
+
+ inputLayer->GetOutputSlot(0).Connect(convLayer1->GetInputSlot(0));
+ convLayer1->GetOutputSlot(0).Connect(convLayer2->GetInputSlot(0));
+ convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ // Construct sub-graph
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom({inputLayer, convLayer1, convLayer2, outputLayer},
+ CreateIInputsFrom({convLayer1}),
+ CreateIOutputsFrom({convLayer2}));
+
+ // Copy Constructor
+ SubgraphView subgraph2(*subgraph.get());
+ CHECK(subgraph->GetIConnectableLayers() == subgraph2.GetIConnectableLayers());
+ CHECK(subgraph->GetIInputSlots() == subgraph2.GetIInputSlots());
+ CHECK(subgraph->GetIOutputSlots() == subgraph2.GetIOutputSlots());
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ CHECK(subgraph->GetLayers() == subgraph2.GetLayers());
+ CHECK(subgraph->GetInputSlots() == subgraph2.GetInputSlots());
+ CHECK(subgraph->GetOutputSlots() == subgraph2.GetOutputSlots());
+ ARMNN_NO_DEPRECATE_WARN_END
+
+ // Move Constructor
+ SubgraphView subgraph3(std::move(subgraph2));
+ CHECK(subgraph->GetIConnectableLayers() == subgraph3.GetIConnectableLayers());
+ CHECK(subgraph->GetIInputSlots() == subgraph3.GetIInputSlots());
+ CHECK(subgraph->GetIOutputSlots() == subgraph3.GetIOutputSlots());
+
+ ARMNN_NO_DEPRECATE_WARN_BEGIN
+ CHECK(subgraph->GetLayers() == subgraph3.GetLayers());
+ CHECK(subgraph->GetInputSlots() == subgraph3.GetInputSlots());
+ CHECK(subgraph->GetOutputSlots() == subgraph3.GetOutputSlots());
+ ARMNN_NO_DEPRECATE_WARN_END
+
+ // Clear
+ subgraph.get()->Clear();
+ CHECK(subgraph->GetIConnectableLayers().size() == 0);
+ CHECK(subgraph->GetIInputSlots().size() == 0);
+ CHECK(subgraph->GetIOutputSlots().size() == 0);
+}
+
+} // SubgraphViewBackwardCompatibilityTests Test Suite end
+
TEST_SUITE("SubgraphSubstitution")
{
TEST_CASE("SingleInputSingleOutput")
@@ -148,17 +333,21 @@ TEST_CASE("SingleInputSingleOutput")
convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1}),
- CreateOutputsFrom({convLayer2}),
- {});
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom({},
+ CreateIInputsFrom({convLayer1}),
+ CreateIOutputsFrom({convLayer2}));
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn = subgraph->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ // Using GetIInputSlot/GetIIOutputSlot functions
+ IOutputSlot* subgraphInputConn = subgraph->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(1, 1);
- Layer* const preCompiledLayer = graph.AddLayer<PreCompiledLayer>(preCompiledDescriptor, "pre-compiled");
+
+ IConnectableLayer* const preCompiledLayer =
+ graph.AddLayer<PreCompiledLayer>(preCompiledDescriptor, "pre-compiled");
// Substitute sub-graph with pre-compiled layer
graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
@@ -191,8 +380,8 @@ TEST_CASE("SingleInputSingleOutputAddPrecompiledLayerSubstituteSubgraph1")
{});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn = subgraph->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraphInputConn = subgraph->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
PreCompiledDescriptor preCompiledDescriptor(1, 1);
CompiledBlobPtr compiledBlobPtr;
@@ -235,8 +424,8 @@ TEST_CASE("SingleInputSingleOutputAddPrecompiledLayerSubstituteSubgraph2")
{});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn = subgraph->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraphInputConn = subgraph->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
PreCompiledDescriptor preCompiledDescriptor(1, 1);
CompiledBlobPtr compiledBlobPtr;
@@ -275,13 +464,14 @@ TEST_CASE("SingleInputSingleOutputSubstituteGraph")
convLayer2->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1}),
- CreateOutputsFrom({convLayer2}),
- {});
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom(CreateInputsFrom({convLayer1}),
+ CreateOutputsFrom({convLayer2}),
+ {});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn = subgraph->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraphInputConn = subgraph->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
// Construct second graph with a single pre-compiled layer
Graph substituteGraph;
@@ -327,15 +517,15 @@ TEST_CASE("MultiInputSingleOutput")
concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
- CreateOutputsFrom({concatLayer}),
- {});
+ auto subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
+ CreateOutputsFrom({concatLayer}),
+ {});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn1 = subgraph->GetInputSlot(0)->GetConnection();
- IOutputSlot* subgraphInputConn2 = subgraph->GetInputSlot(1)->GetConnection();
+ IOutputSlot* subgraphInputConn1 = subgraph->GetIInputSlot(0)->GetConnection();
+ IOutputSlot* subgraphInputConn2 = subgraph->GetIInputSlot(1)->GetConnection();
- IInputSlot* subgraphOutputConn = subgraph->GetOutputSlot(0)->GetConnection(0);
+ IInputSlot* subgraphOutputConn = subgraph->GetIOutputSlot(0)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(2, 1);
@@ -376,15 +566,16 @@ TEST_CASE("SingleInputMultiOutput")
concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({splitterLayer}),
- CreateOutputsFrom({convLayer1, convLayer2}),
- {});
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom(CreateInputsFrom({splitterLayer}),
+ CreateOutputsFrom({convLayer1, convLayer2}),
+ {});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn1 = subgraph->GetInputSlot(0)->GetConnection();
+ IOutputSlot* subgraphInputConn1 = subgraph->GetIInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn1 = subgraph->GetOutputSlot(0)->GetConnection(0);
- IInputSlot* subgraphOutputConn2 = subgraph->GetOutputSlot(1)->GetConnection(0);
+ IInputSlot* subgraphOutputConn1 = subgraph->GetIOutputSlot(0)->GetConnection(0);
+ IInputSlot* subgraphOutputConn2 = subgraph->GetIOutputSlot(1)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(1, 2);
@@ -427,16 +618,17 @@ TEST_CASE("MultiInputMultiOutput")
concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
- CreateOutputsFrom({convLayer1, convLayer2}),
- {});
+ SubgraphViewSelector::SubgraphViewPtr subgraph =
+ CreateSubgraphViewFrom(CreateInputsFrom({convLayer1, convLayer2}),
+ CreateOutputsFrom({convLayer1, convLayer2}),
+ {});
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn1 = subgraph->GetInputSlot(0)->GetConnection();
- IOutputSlot* subgraphInputConn2 = subgraph->GetInputSlot(1)->GetConnection();
+ IOutputSlot* subgraphInputConn1 = subgraph->GetIInputSlot(0)->GetConnection();
+ IOutputSlot* subgraphInputConn2 = subgraph->GetIInputSlot(1)->GetConnection();
- IInputSlot* subgraphOutputConn1 = subgraph->GetOutputSlot(0)->GetConnection(0);
- IInputSlot* subgraphOutputConn2 = subgraph->GetOutputSlot(1)->GetConnection(0);
+ IInputSlot* subgraphOutputConn1 = subgraph->GetIOutputSlot(0)->GetConnection(0);
+ IInputSlot* subgraphOutputConn2 = subgraph->GetIOutputSlot(1)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(2, 2);
@@ -453,7 +645,7 @@ TEST_CASE("MultiInputMultiOutput")
CHECK_EQ(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
}
-TEST_CASE("EraseReplacedLayers")
+TEST_CASE("EraseReplacedIConnectableLayers")
{
// Construct graph
Graph graph;
@@ -461,31 +653,31 @@ TEST_CASE("EraseReplacedLayers")
graph.AddLayer<InputLayer>(0, "input");
ViewsDescriptor splitterDescriptor(2);
- Layer* const splitterLayer = graph.AddLayer<SplitterLayer>(splitterDescriptor, "splitter");
+ IConnectableLayer* const splitterLayer = graph.AddLayer<SplitterLayer>(splitterDescriptor, "splitter");
Convolution2dDescriptor convDescriptor;
- Layer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
- Layer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
+ IConnectableLayer* const convLayer1 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv1");
+ IConnectableLayer* const convLayer2 = graph.AddLayer<Convolution2dLayer>(convDescriptor, "conv2");
OriginsDescriptor concatDescriptor(2);
- Layer* const concatLayer = graph.AddLayer<ConcatLayer>(concatDescriptor, "concat");
+ IConnectableLayer* const concatLayer = graph.AddLayer<ConcatLayer>(concatDescriptor, "concat");
graph.AddLayer<OutputLayer>(0, "output");
// Construct sub-graph
- SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom({},
- {},
- {splitterLayer,
+ SubgraphViewSelector::SubgraphViewPtr subgraph = CreateSubgraphViewFrom({splitterLayer,
convLayer1,
convLayer2,
- concatLayer});
+ concatLayer},
+ {},
+ {});
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(0, 0);
Layer* const preCompiledLayer = graph.AddLayer<PreCompiledLayer>(preCompiledDescriptor, "pre-compiled");
// Save sub-graph layers for later verification
- const SubgraphView::Layers subgraphLayers = subgraph->GetLayers();
+ const SubgraphView::IConnectableLayers subgraphLayers = subgraph->GetIConnectableLayers();
// Substitute sub-graph with pre-compiled layer
graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
@@ -503,9 +695,9 @@ TEST_CASE("SubgraphForEmptyGraph")
Graph graph;
SubgraphView subgraph(graph);
- CHECK(subgraph.GetInputSlots().empty());
- CHECK(subgraph.GetOutputSlots().empty());
- CHECK(subgraph.GetLayers().empty());
+ CHECK(subgraph.GetIInputSlots().empty());
+ CHECK(subgraph.GetIOutputSlots().empty());
+ CHECK(subgraph.GetIConnectableLayers().empty());
}
TEST_CASE("SubgraphForEntireGraph")
@@ -523,9 +715,9 @@ TEST_CASE("SubgraphForEntireGraph")
SubgraphView subgraph(graph);
- CHECK(subgraph.GetInputSlots().empty());
- CHECK(subgraph.GetOutputSlots().empty());
- CHECK(subgraph.GetLayers().size() == graph.GetNumLayers());
+ CHECK(subgraph.GetIInputSlots().empty());
+ CHECK(subgraph.GetIOutputSlots().empty());
+ CHECK(subgraph.GetIConnectableLayers().size() == graph.GetNumLayers());
}
TEST_CASE("NoSubgraphsForNoMatch")
@@ -636,8 +828,9 @@ TEST_CASE("DisjointGraphs")
CHECK((subgraphs[1] != nullptr));
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
- if (std::find(subgraphs[0]->GetLayers().begin(), subgraphs[0]->GetLayers().end(), i0) !=
- subgraphs[0]->GetLayers().end())
+ if (std::find(subgraphs[0]->GetIConnectableLayers().begin(),
+ subgraphs[0]->GetIConnectableLayers().end(), i0) !=
+ subgraphs[0]->GetIConnectableLayers().end())
{
CompareSubgraphViews(subgraphs[0], expected1);
CompareSubgraphViews(subgraphs[1], expected2);
@@ -729,12 +922,12 @@ TEST_CASE("IslandInTheMiddle")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr& lhs, SubgraphViewSelector::SubgraphViewPtr& rhs)
{
- return (lhs->GetLayers().size() < rhs->GetLayers().size());
+ return (lhs->GetIConnectableLayers().size() < rhs->GetIConnectableLayers().size());
}
);
- CHECK(subgraphs[0]->GetLayers().size() == 2);
- CHECK(subgraphs[1]->GetLayers().size() == 5);
+ CHECK(subgraphs[0]->GetIConnectableLayers().size() == 2);
+ CHECK(subgraphs[1]->GetIConnectableLayers().size() == 5);
CompareSubgraphViews(subgraphs[0], smallerSubgraph);
CompareSubgraphViews(subgraphs[1], largerSubgraph);
@@ -804,12 +997,12 @@ TEST_CASE("MultipleSimpleSubgraphs")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr & lhs, SubgraphViewSelector::SubgraphViewPtr & rhs)
{
- return (lhs->GetLayers().size() < rhs->GetLayers().size());
+ return (lhs->GetIConnectableLayers().size() < rhs->GetIConnectableLayers().size());
}
);
- CHECK(subgraphs[0]->GetLayers().size() == 1);
- CHECK(subgraphs[1]->GetLayers().size() == 2);
+ CHECK(subgraphs[0]->GetIConnectableLayers().size() == 1);
+ CHECK(subgraphs[1]->GetIConnectableLayers().size() == 2);
CompareSubgraphViews(subgraphs[0], smallerSubgraph);
CompareSubgraphViews(subgraphs[1], largerSubgraph);
@@ -1097,7 +1290,7 @@ TEST_CASE("ValidMerge")
if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
{
- if (subgraphs[0]->GetInputSlots().size() == 1)
+ if (subgraphs[0]->GetIInputSlots().size() == 1)
{
CompareSubgraphViews(subgraphs[0], expectedSubgraph0);
CompareSubgraphViews(subgraphs[1], expectedSubgraph1);
@@ -1196,7 +1389,7 @@ TEST_CASE("PropagatedDependencies")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr& lhs, SubgraphViewSelector::SubgraphViewPtr& rhs)
{
- return (lhs->GetLayers().size() < rhs->GetLayers().size());
+ return (lhs->GetIConnectableLayers().size() < rhs->GetIConnectableLayers().size());
}
);
@@ -1311,7 +1504,8 @@ TEST_CASE("Random")
for (uint32_t inputSlotIdx = 0; inputSlotIdx < layer->GetNumInputSlots(); ++inputSlotIdx)
{
InputSlot& inputSlot = layer->GetInputSlot(inputSlotIdx);
- uint32_t maxLayerDepthToConnectTo = layerDepths[layer]; // This prevents a connection causing a loop
+ uint32_t maxLayerDepthToConnectTo = layerDepths[layer];
+ // This prevents a connection causing a loop
// Finding a layer to connect to may take multiple attempts, so keep trying until it works.
while (inputSlot.GetConnectedOutputSlot() == nullptr)
{
@@ -1362,7 +1556,8 @@ TEST_CASE("Random")
for (std::unique_ptr<SubgraphView>& subgraph : subgraphs)
{
std::string name = std::to_string(i++);
- if (std::find(subgraph->begin(), subgraph->end(), layer) != subgraph->end())
+ if (std::find(subgraph->cbeginIConnectable(), subgraph->cendIConnectable(), layer)
+ != subgraph->cendIConnectable())
{
layerToSubgraph[layer] = subgraph.get();
break;
@@ -1397,10 +1592,10 @@ TEST_CASE("Random")
// encounter a layer that belongs to the subgraph that we started from.
for (std::unique_ptr<SubgraphView>& subgraph : subgraphs)
{
- for (InputSlot* inputSlot : subgraph->GetInputSlots())
+ for (IInputSlot* inSlot : subgraph->GetIInputSlots())
{
std::queue<Layer*> toProcess;
- toProcess.push(&inputSlot->GetConnectedOutputSlot()->GetOwningLayer());
+ toProcess.push(&PolymorphicDowncast<InputSlot*>(inSlot)->GetConnectedOutputSlot()->GetOwningLayer());
while (toProcess.size() > 0)
{
Layer* l = toProcess.front();
@@ -1462,15 +1657,15 @@ TEST_CASE("SingleSubgraph")
if (subgraphs[0].get() != nullptr)
{
- unsigned int numInputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetInputSlots().size());
- unsigned int numOutputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetOutputSlots().size());
+ unsigned int numInputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetIInputSlots().size());
+ unsigned int numOutputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetIOutputSlots().size());
CHECK((numInputSlots == 1));
CHECK((numOutputSlots == 1));
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraphInputConn1 = subgraphs[0]->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraphOutputConn1 = subgraphs[0]->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraphInputConn1 = subgraphs[0]->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraphOutputConn1 = subgraphs[0]->GetIOutputSlot(0)->GetConnection(0);
// Construct dummy pre-compiled layer
PreCompiledDescriptor preCompiledDescriptor(numInputSlots, numOutputSlots);
@@ -1540,25 +1735,25 @@ TEST_CASE("MultipleSubgraphs")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr & lhs, SubgraphViewSelector::SubgraphViewPtr & rhs)
{
- return (lhs->GetInputSlots().size() < rhs->GetInputSlots().size());
+ return (lhs->GetIInputSlots().size() < rhs->GetIInputSlots().size());
}
);
- unsigned int numInputSlots1 = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetInputSlots().size());
- unsigned int numOutputSlots1 = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetOutputSlots().size());
+ unsigned int numInputSlots1 = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetIInputSlots().size());
+ unsigned int numOutputSlots1 = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetIOutputSlots().size());
- unsigned int numInputSlots2 = armnn::numeric_cast<unsigned int>(subgraphs[1]->GetInputSlots().size());
- unsigned int numOutputSlots2 = armnn::numeric_cast<unsigned int>(subgraphs[1]->GetOutputSlots().size());
+ unsigned int numInputSlots2 = armnn::numeric_cast<unsigned int>(subgraphs[1]->GetIInputSlots().size());
+ unsigned int numOutputSlots2 = armnn::numeric_cast<unsigned int>(subgraphs[1]->GetIOutputSlots().size());
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraph1InputConn = subgraphs[0]->GetInputSlot(0)->GetConnection();
- IInputSlot* subgraph1OutputConn1 = subgraphs[0]->GetOutputSlot(0)->GetConnection(0);
- IInputSlot* subgraph1OutputConn2 = subgraphs[0]->GetOutputSlot(1)->GetConnection(0);
+ IOutputSlot* subgraph1InputConn = subgraphs[0]->GetIInputSlot(0)->GetConnection();
+ IInputSlot* subgraph1OutputConn1 = subgraphs[0]->GetIOutputSlot(0)->GetConnection(0);
+ IInputSlot* subgraph1OutputConn2 = subgraphs[0]->GetIOutputSlot(1)->GetConnection(0);
// Save sub-graph connections for comparison after substitution
- IOutputSlot* subgraph2InputConn1 = subgraphs[1]->GetInputSlot(0)->GetConnection();
- IOutputSlot* subgraph2InputConn2 = subgraphs[1]->GetInputSlot(1)->GetConnection();
- IInputSlot* subgraph2OutputConn = subgraphs[1]->GetOutputSlot(0)->GetConnection(0);
+ IOutputSlot* subgraph2InputConn1 = subgraphs[1]->GetIInputSlot(0)->GetConnection();
+ IOutputSlot* subgraph2InputConn2 = subgraphs[1]->GetIInputSlot(1)->GetConnection();
+ IInputSlot* subgraph2OutputConn = subgraphs[1]->GetIOutputSlot(0)->GetConnection(0);
PreCompiledDescriptor preCompiledDescriptor1(numInputSlots1, numOutputSlots1);
Layer* const preCompiledLayer1 = graph.AddLayer<PreCompiledLayer>(preCompiledDescriptor1, "pre-compiled1");
@@ -1655,13 +1850,13 @@ TEST_CASE("SubgraphCycles")
std::sort(subgraphs.begin(), subgraphs.end(),
[](SubgraphViewSelector::SubgraphViewPtr & lhs, SubgraphViewSelector::SubgraphViewPtr & rhs)
{
- return (lhs->GetLayers().size() < rhs->GetLayers().size());
+ return (lhs->GetIConnectableLayers().size() < rhs->GetIConnectableLayers().size());
}
);
// one subgraph needs to be size=1 and the other one is 4
- CHECK(subgraphs[0]->GetLayers().size() == 1);
- CHECK(subgraphs[1]->GetLayers().size() == 2);
+ CHECK(subgraphs[0]->GetIConnectableLayers().size() == 1);
+ CHECK(subgraphs[1]->GetIConnectableLayers().size() == 2);
CompareSubgraphViews(subgraphs[0], outputSubgraph);
CompareSubgraphViews(subgraphs[1], inputSubgraph);
diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp
index 129a766729..7224cc8d06 100644
--- a/src/armnn/test/UnitTests.hpp
+++ b/src/armnn/test/UnitTests.hpp
@@ -6,4 +6,4 @@
#include "../../armnnTestUtils/UnitTests.hpp"
#pragma message("src/armnn/test/UnitTests.hpp has been deprecated, it is due for removal in 22.08 release." \
- " Please use from armnnTestUtils library, /src/armnnTestUtils/UnitTests.hpp) \ No newline at end of file
+ " Please use from armnnTestUtils library, /src/armnnTestUtils/UnitTests.hpp")
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
index 4367de1e28..74ab789402 100644
--- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
@@ -19,14 +19,15 @@ namespace
//
// this helper only works if all layers where the inputs connect to are not selected
//
-SubgraphView::InputSlots CreateInputsFrom(const std::vector<Layer*>& layers)
+
+SubgraphView::IInputSlots CreateIInputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
{
- SubgraphView::InputSlots result;
+ SubgraphView::IInputSlots result;
for (auto&& layer : layers)
{
- for (auto&& it = layer->BeginInputSlots(); it != layer->EndInputSlots(); ++it)
+ for (unsigned int i = 0 ; i < layer->GetNumInputSlots(); ++i)
{
- result.push_back(&(*it));
+ result.push_back(&(layer->GetInputSlot(i)));
}
}
return result;
@@ -35,14 +36,15 @@ SubgraphView::InputSlots CreateInputsFrom(const std::vector<Layer*>& layers)
//
// this helper only works if all layers where the outputs connect to are not selected
//
-SubgraphView::OutputSlots CreateOutputsFrom(const std::vector<Layer*>& layers)
+
+SubgraphView::IOutputSlots CreateIOutputsFrom(const std::vector<armnn::IConnectableLayer*>& layers)
{
- SubgraphView::OutputSlots result;
- for (auto&& layer : layers)
+ SubgraphView::IOutputSlots result;
+ for (auto &&layer: layers)
{
- for (auto&& it = layer->BeginOutputSlots(); it != layer->EndOutputSlots(); ++it)
+ for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i)
{
- result.push_back(&(*it));
+ result.push_back(&(layer->GetOutputSlot(i)));
}
}
return result;
@@ -83,9 +85,9 @@ inline void ReportUntouchedLayers(OptimizationViews& optimizationViews, std::map
for (const auto& pair : untouched)
{
Layer* layer = pair.second;
- SubgraphView subgraphView(CreateInputsFrom({layer}),
- CreateOutputsFrom({layer}),
- {layer});
+ SubgraphView subgraphView({layer},
+ CreateIInputsFrom({layer}),
+ CreateIOutputsFrom({layer}));
optimizationViews.AddUntouchedSubgraph(std::move(subgraphView));
}
}
@@ -100,9 +102,9 @@ LayerType* FuseLayer(OptimizationViews& optimizationViews,
replacementLayer->SetAdditionalInfoForObject(
std::make_shared<ActivationDescriptor>(activationDesc));
- SubgraphView substitutionSubgraph(CreateInputsFrom({baseLayer}),
- CreateOutputsFrom({activationLayer}),
- {baseLayer, activationLayer});
+ SubgraphView substitutionSubgraph({baseLayer, activationLayer},
+ CreateIInputsFrom({baseLayer}),
+ CreateIOutputsFrom({activationLayer}));
SubgraphView replacementSubgraph(replacementLayer);
optimizationViews.AddSubstitution({substitutionSubgraph, replacementSubgraph});
@@ -208,6 +210,11 @@ LayerType* FuseBatchNormalizationLayer(OptimizationViews& optimizationViews,
activationLayer,
activationDesc);
+ SubgraphView substitutionSubgraph({baseLayer, activationLayer},
+ CreateIInputsFrom({baseLayer}),
+ CreateIOutputsFrom({activationLayer}));
+ SubgraphView replacementSubgraph(replacementLayer);
+
return replacementLayer;
}
@@ -316,12 +323,12 @@ LayerType* FuseFullyConnectedLayer(OptimizationViews& optimizationViews,
// as currently only one axis is supported.
//
template<typename LayerType>
-std::vector<Layer*> ChainReduceLayers(OptimizationViews& optimizationViews,
+std::vector<IConnectableLayer*> ChainReduceLayers(OptimizationViews& optimizationViews,
LayerType* baseLayer,
ReduceDescriptor& desc)
{
// Vector of new chained layers, used for substitution.
- std::vector<Layer*> layers;
+ std::vector<IConnectableLayer*> layers;
// Vector of axes so each layer is reshaped correctly.
std::vector<uint32_t> axes;
@@ -348,9 +355,11 @@ std::vector<Layer*> ChainReduceLayers(OptimizationViews& optimizationViews,
// Add new layer to graph.
std::string layerName = "reduce_layer_" + std::to_string(i);
+
Layer* replacementLayer = PolymorphicDowncast<Layer*>(
optimizationViews.GetINetwork()->AddReduceLayer(newReduceDescriptor,
layerName.c_str()));
+
// Connect previous layer with new layer.
// The first and last layer will be connected when the subgraph is replaced.
if (!layers.empty())
@@ -370,7 +379,8 @@ std::vector<Layer*> ChainReduceLayers(OptimizationViews& optimizationViews,
}
// Check if the TensorInfo from the last layer equals the inferred output from the original layer.
- ARMNN_ASSERT(baseLayer->GetOutputSlot(0).GetTensorInfo() == layers.back()->GetOutputSlot().GetTensorInfo());
+ ARMNN_ASSERT(baseLayer->GetOutputSlot(0).GetTensorInfo() ==
+ PolymorphicDowncast<Layer*>(layers.back())->GetOutputSlot().GetTensorInfo());
return layers;
}
@@ -381,14 +391,14 @@ std::vector<Layer*> ChainReduceLayers(OptimizationViews& optimizationViews,
template<typename LayerType>
void ReplaceLayers(OptimizationViews& optimizationViews,
LayerType* baseLayer,
- std::vector<Layer*>& layers)
+ std::vector<IConnectableLayer*>& layers)
{
- std::list<Layer*> replacementLayers(layers.begin(), layers.end());
+ std::list<IConnectableLayer*> replacementLayers(layers.begin(), layers.end());
SubgraphView substitutionSubgraph(baseLayer);
- SubgraphView replacementSubgraph(CreateInputsFrom({replacementLayers.front()}),
- CreateOutputsFrom({replacementLayers.back()}),
- std::move(replacementLayers));
+ SubgraphView replacementSubgraph(std::move(replacementLayers),
+ CreateIInputsFrom({replacementLayers.front()}),
+ CreateIOutputsFrom({replacementLayers.back()}));
optimizationViews.AddSubstitution({substitutionSubgraph, replacementSubgraph});
}
diff --git a/src/backends/backendsCommon/OptimizationViews.cpp b/src/backends/backendsCommon/OptimizationViews.cpp
index eee2c67ea9..e81a6912a1 100644
--- a/src/backends/backendsCommon/OptimizationViews.cpp
+++ b/src/backends/backendsCommon/OptimizationViews.cpp
@@ -17,24 +17,28 @@ bool OptimizationViews::Validate(const armnn::SubgraphView& originalSubgraph) co
bool valid = true;
// Create a copy of the layer list from the original subgraph and sort it
- SubgraphView::Layers originalLayers = originalSubgraph.GetLayers();
+ SubgraphView::IConnectableLayers originalLayers = originalSubgraph.GetIConnectableLayers();
originalLayers.sort();
// Create a new list based on the sum of all the subgraphs and sort it
- SubgraphView::Layers countedLayers;
+ SubgraphView::IConnectableLayers countedLayers;
for (auto& failed : m_FailedOptimizations)
{
- countedLayers.insert(countedLayers.end(), failed.GetLayers().begin(), failed.GetLayers().end());
+ countedLayers.insert(countedLayers.end(),
+ failed.GetIConnectableLayers().begin(),
+ failed.GetIConnectableLayers().end());
}
for (auto& untouched : m_UntouchedSubgraphs)
{
- countedLayers.insert(countedLayers.end(), untouched.GetLayers().begin(), untouched.GetLayers().end());
+ countedLayers.insert(countedLayers.end(),
+ untouched.GetIConnectableLayers().begin(),
+ untouched.GetIConnectableLayers().end());
}
for (auto& successful : m_SuccesfulOptimizations)
{
countedLayers.insert(countedLayers.end(),
- successful.m_SubstitutableSubgraph.GetLayers().begin(),
- successful.m_SubstitutableSubgraph.GetLayers().end());
+ successful.m_SubstitutableSubgraph.GetIConnectableLayers().begin(),
+ successful.m_SubstitutableSubgraph.GetIConnectableLayers().end());
}
countedLayers.sort();
@@ -56,8 +60,8 @@ bool OptimizationViews::Validate(const armnn::SubgraphView& originalSubgraph) co
bool validSubstitution = true;
const SubgraphView& replacement = substitution.m_ReplacementSubgraph;
const SubgraphView& old = substitution.m_SubstitutableSubgraph;
- validSubstitution &= replacement.GetInputSlots().size() == old.GetInputSlots().size();
- validSubstitution &= replacement.GetOutputSlots().size() == old.GetOutputSlots().size();
+ validSubstitution &= replacement.GetIInputSlots().size() == old.GetIInputSlots().size();
+ validSubstitution &= replacement.GetIOutputSlots().size() == old.GetIOutputSlots().size();
valid &= validSubstitution;
}
}
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
index d95cfc3a34..2ce14f92a4 100644
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ b/src/backends/backendsCommon/test/MockBackend.cpp
@@ -130,21 +130,21 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
OptimizationViews optimizationViews;
// Get the layers of the input sub-graph
- const SubgraphView::Layers& subgraphLayers = subgraph.GetLayers();
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraph.GetIConnectableLayers();
// Parse the layers
- SubgraphView::Layers supportedLayers;
- SubgraphView::Layers unsupportedLayers;
- SubgraphView::Layers untouchedLayers;
+ SubgraphView::IConnectableLayers supportedLayers;
+ SubgraphView::IConnectableLayers unsupportedLayers;
+ SubgraphView::IConnectableLayers untouchedLayers;
std::for_each(subgraphLayers.begin(),
subgraphLayers.end(),
- [&](Layer* layer)
+ [&](IConnectableLayer* layer)
{
- bool supported = IsLayerSupported(layer);
+ bool supported = IsLayerSupported(PolymorphicDowncast<Layer*>(layer));
if (supported)
{
// Layer supported, check if it's optimizable
- bool optimizable = IsLayerOptimizable(layer);
+ bool optimizable = IsLayerOptimizable(PolymorphicDowncast<Layer*>(layer));
if (optimizable)
{
// Layer fully supported
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index 4dd6bc955d..8036b41fb2 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -56,6 +56,18 @@ std::vector<SlotType*> ConvertReferenceTypeToPointerType(const std::vector<SlotT
return output;
}
+// Convert from vector of Slots* (Input/Output) to vector of ISlots* (IInput/IOutput)
+template <typename SlotType, typename ResultSlotType>
+std::vector<ResultSlotType*> ConvertSlotsToISlots(const std::vector<SlotType*> input)
+{
+ std::vector<ResultSlotType*> output;
+ for (auto slot : input)
+ {
+ output.push_back(PolymorphicDowncast<ResultSlotType*>(slot));
+ }
+ return output;
+}
+
// Convenience function to add an input layer to a graph
Layer* AddInputLayer(Graph& graph,
const std::string& layerName,
@@ -125,19 +137,20 @@ AdditionLayer* AddAdditionaLayer(Graph& graph,
void CheckSubstitution(const OptimizationViews::SubstitutionPair& substitution,
const ExpectedSubgraphSize& expectedSubstitutableSubgraphSize,
const ExpectedSubgraphSize& expectedReplacementSubgraphSize,
- const SubgraphView::InputSlots& expectedSubstitutableInputSlots,
- const SubgraphView::OutputSlots& expectedSubstitutableOutputSlots,
- const SubgraphView::Layers& expectedSubstitutableLayers)
+ const SubgraphView::IInputSlots& expectedSubstitutableInputSlots,
+ const SubgraphView::IOutputSlots& expectedSubstitutableOutputSlots,
+ const SubgraphView::IConnectableLayers& expectedSubstitutableLayers)
{
- const SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
- const SubgraphView::InputSlots& substitutableSubgraphInputSlots = substitutableSubgraph.GetInputSlots();
- const SubgraphView::OutputSlots& substitutableSubgraphOutputSlots = substitutableSubgraph.GetOutputSlots();
- const SubgraphView::Layers& substitutableSubgraphLayers = substitutableSubgraph.GetLayers();
+ const SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph;
+ const SubgraphView::IInputSlots& substitutableSubgraphInputSlots = substitutableSubgraph.GetIInputSlots();
+ const SubgraphView::IOutputSlots& substitutableSubgraphOutputSlots = substitutableSubgraph.GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& substitutableSubgraphLayers =
+ substitutableSubgraph.GetIConnectableLayers();
- const SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
- const SubgraphView::InputSlots& replacementSubgraphInputSlots = replacementSubgraph.GetInputSlots();
- const SubgraphView::OutputSlots& replacementSubgraphOutputSlots = replacementSubgraph.GetOutputSlots();
- const SubgraphView::Layers& replacementSubgraphLayers = replacementSubgraph.GetLayers();
+ const SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph;
+ const SubgraphView::IInputSlots& replacementSubgraphInputSlots = replacementSubgraph.GetIInputSlots();
+ const SubgraphView::IOutputSlots& replacementSubgraphOutputSlots = replacementSubgraph.GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& replacementSubgraphLayers = replacementSubgraph.GetIConnectableLayers();
CHECK(substitutableSubgraphInputSlots.size() == expectedSubstitutableSubgraphSize.m_NumInputSlots);
CHECK(substitutableSubgraphOutputSlots.size() == expectedSubstitutableSubgraphSize.m_NumOutputSlots);
@@ -157,7 +170,7 @@ void CheckSubstitution(const OptimizationViews::SubstitutionPair& substitution,
CHECK(std::all_of(replacementSubgraphLayers.begin(),
replacementSubgraphLayers.end(),
- [](const Layer* layer)
+ [](const IConnectableLayer* layer)
{
return layer->GetType() == LayerType::PreCompiled;
}));
@@ -166,13 +179,13 @@ void CheckSubstitution(const OptimizationViews::SubstitutionPair& substitution,
// Convenience function to check that the given failed subgraph matches the specified expected values
void CheckFailedSubgraph(const SubgraphView& failedSubgraph,
const ExpectedSubgraphSize& expectedFailedSubgraphSize,
- const SubgraphView::InputSlots& expectedFailedInputSlots,
- const SubgraphView::OutputSlots& expectedFailedOutputSlots,
- const SubgraphView::Layers& expectedFailedLayers)
+ const SubgraphView::IInputSlots& expectedFailedInputSlots,
+ const SubgraphView::IOutputSlots& expectedFailedOutputSlots,
+ const SubgraphView::IConnectableLayers& expectedFailedLayers)
{
- const SubgraphView::InputSlots& failedSubgraphInputSlots = failedSubgraph.GetInputSlots();
- const SubgraphView::OutputSlots& failedSubgraphOutputSlots = failedSubgraph.GetOutputSlots();
- const SubgraphView::Layers& failedSubgraphLayers = failedSubgraph.GetLayers();
+ const SubgraphView::IInputSlots& failedSubgraphInputSlots = failedSubgraph.GetIInputSlots();
+ const SubgraphView::IOutputSlots& failedSubgraphOutputSlots = failedSubgraph.GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& failedSubgraphLayers = failedSubgraph.GetIConnectableLayers();
CHECK(failedSubgraphInputSlots.size() == expectedFailedSubgraphSize.m_NumInputSlots);
CHECK(failedSubgraphOutputSlots.size() == expectedFailedSubgraphSize.m_NumOutputSlots);
@@ -186,13 +199,13 @@ void CheckFailedSubgraph(const SubgraphView& failedSubgraph,
// Convenience function to check that the given untouched subgraph matches the specified expected values
void CheckUntouchedSubgraph(const SubgraphView& untouchedSubgraph,
const ExpectedSubgraphSize& expectedUntouchedSubgraphSize,
- const SubgraphView::InputSlots& expectedUntouchedInputSlots,
- const SubgraphView::OutputSlots& expectedUntouchedOutputSlots,
- const SubgraphView::Layers& expectedUntouchedLayers)
+ const SubgraphView::IInputSlots& expectedUntouchedInputSlots,
+ const SubgraphView::IOutputSlots& expectedUntouchedOutputSlots,
+ const SubgraphView::IConnectableLayers& expectedUntouchedLayers)
{
- const SubgraphView::InputSlots& untouchedSubgraphInputSlots = untouchedSubgraph.GetInputSlots();
- const SubgraphView::OutputSlots& untouchedSubgraphOutputSlots = untouchedSubgraph.GetOutputSlots();
- const SubgraphView::Layers& untouchedSubgraphLayers = untouchedSubgraph.GetLayers();
+ const SubgraphView::IInputSlots& untouchedSubgraphInputSlots = untouchedSubgraph.GetIInputSlots();
+ const SubgraphView::IOutputSlots& untouchedSubgraphOutputSlots = untouchedSubgraph.GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& untouchedSubgraphLayers = untouchedSubgraph.GetIConnectableLayers();
CHECK(untouchedSubgraphInputSlots.size() == expectedUntouchedSubgraphSize.m_NumInputSlots);
CHECK(untouchedSubgraphOutputSlots.size() == expectedUntouchedSubgraphSize.m_NumOutputSlots);
@@ -552,9 +565,9 @@ void FullyUnsupporteSubgraphTestImpl1()
SubgraphView::SubgraphViewPtr subgraphPtr = BuildFullyUnsupportedSubgraph1(graph, layersInGraph);
CHECK((subgraphPtr != nullptr));
- const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
- const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraphPtr->GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraphPtr->GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraphPtr->GetIConnectableLayers();
CHECK(subgraphInputSlots.size() == 1);
CHECK(subgraphOutputSlots.size() == 1);
@@ -616,9 +629,9 @@ void FullyUnsupporteSubgraphTestImpl2()
SubgraphView::SubgraphViewPtr subgraphPtr = BuildFullyUnsupportedSubgraph2(graph, layersInGraph);
CHECK((subgraphPtr != nullptr));
- const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
- const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraphPtr->GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraphPtr->GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraphPtr->GetIConnectableLayers();
CHECK(subgraphInputSlots.size() == 1);
CHECK(subgraphOutputSlots.size() == 1);
@@ -659,7 +672,7 @@ void FullyUnsupporteSubgraphTestImpl2()
const OptimizationViews::Subgraphs& failedSubgraphs = optimizationViews.GetFailedSubgraphs();
CHECK(failedSubgraphs.size() == 1);
- std::list<Layer*> expectedFailedLayers{ layersInGraph.at("pooling1 layer"),
+ std::list<IConnectableLayer*> expectedFailedLayers{ layersInGraph.at("pooling1 layer"),
layersInGraph.at("pooling2 layer"),
layersInGraph.at("pooling3 layer") };
@@ -671,7 +684,7 @@ void FullyUnsupporteSubgraphTestImpl2()
subgraphOutputSlots,
subgraphLayers);
- const SubgraphView::Layers& failedSubgraphLayers = failedSubgraph.GetLayers();
+ const SubgraphView::IConnectableLayers& failedSubgraphLayers = failedSubgraph.GetIConnectableLayers();
CHECK_EQ(failedSubgraphLayers.front() + 0, expectedFailedLayers.front() + 0);
CHECK_EQ(failedSubgraphLayers.front() + 1, expectedFailedLayers.front() + 1);
@@ -694,9 +707,9 @@ void FullyOptimizableSubgraphTestImpl1()
SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyOptimizableSubgraph1(graph, layersInGraph);
CHECK((subgraphPtr != nullptr));
- const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
- const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraphPtr->GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraphPtr->GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraphPtr->GetIConnectableLayers();
CHECK(subgraphInputSlots.size() == 1);
CHECK(subgraphOutputSlots.size() == 1);
@@ -759,13 +772,13 @@ void FullyOptimizableSubgraphTestImpl2()
SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyOptimizableSubgraph2(graph, layersInGraph);
CHECK((subgraphPtr != nullptr));
- const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
- const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraphPtr->GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraphPtr->GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraphPtr->GetIConnectableLayers();
- CHECK(subgraphPtr->GetInputSlots().size() == 1);
- CHECK(subgraphPtr->GetOutputSlots().size() == 1);
- CHECK(subgraphPtr->GetLayers().size() == 5);
+ CHECK(subgraphPtr->GetIInputSlots().size() == 1);
+ CHECK(subgraphPtr->GetIOutputSlots().size() == 1);
+ CHECK(subgraphPtr->GetIConnectableLayers().size() == 5);
CHECK(Contains(layersInGraph, "conv1 layer"));
CHECK(Contains(layersInGraph, "conv2 layer"));
@@ -798,7 +811,7 @@ void FullyOptimizableSubgraphTestImpl2()
const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions();
CHECK(substitutions.size() == 1);
- std::list<Layer*> expectedSubstitutableLayers{ layersInGraph.at("conv1 layer"),
+ std::list<IConnectableLayer*> expectedSubstitutableLayers{ layersInGraph.at("conv1 layer"),
layersInGraph.at("conv2 layer"),
layersInGraph.at("conv3 layer"),
layersInGraph.at("conv4 layer"),
@@ -813,7 +826,8 @@ void FullyOptimizableSubgraphTestImpl2()
subgraphOutputSlots,
expectedSubstitutableLayers);
- const SubgraphView::Layers& substitutableSubgraphLayers = substitution.m_SubstitutableSubgraph.GetLayers();
+ const SubgraphView::IConnectableLayers& substitutableSubgraphLayers =
+ substitution.m_SubstitutableSubgraph.GetIConnectableLayers();
CHECK_EQ(substitutableSubgraphLayers.front() + 0, expectedSubstitutableLayers.front() + 0);
CHECK_EQ(substitutableSubgraphLayers.front() + 1, expectedSubstitutableLayers.front() + 1);
@@ -845,9 +859,9 @@ void PartiallySupportedSubgraphTestImpl()
SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallySupportedSubgraph(graph, layersInGraph);
CHECK((subgraphPtr != nullptr));
- const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
- const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraphPtr->GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraphPtr->GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraphPtr->GetIConnectableLayers();
CHECK(subgraphInputSlots.size() == 1);
CHECK(subgraphOutputSlots.size() == 1);
@@ -885,25 +899,30 @@ void PartiallySupportedSubgraphTestImpl()
CHECK(substitutions.size() == 2);
// Sort into a consistent order
std::sort(substitutions.begin(), substitutions.end(), [](auto s1, auto s2) {
- return strcmp(s1.m_SubstitutableSubgraph.GetLayers().front()->GetName(),
- s2.m_SubstitutableSubgraph.GetLayers().front()->GetName()) < 0;
+ return strcmp(s1.m_SubstitutableSubgraph.GetIConnectableLayers().front()->GetName(),
+ s2.m_SubstitutableSubgraph.GetIConnectableLayers().front()->GetName()) < 0;
});
std::vector<ExpectedSubgraphSize> expectedSubstitutableSubgraphSizes{ { 1, 1, 1 },
{ 1, 1, 1 } };
std::vector<ExpectedSubgraphSize> expectedReplacementSubgraphSizes{ { 1, 1, 1 },
{ 1, 1, 1 } };
- std::vector<SubgraphView::InputSlots> expectedSubstitutableInputSlots
+ std::vector<SubgraphView::IInputSlots> expectedSubstitutableInputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetInputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer")->GetInputSlots())
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetInputSlots())),
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer")->GetInputSlots()))
};
- std::vector<SubgraphView::OutputSlots> expectedSubstitutableOutputSlots
+
+ std::vector<SubgraphView::IOutputSlots> expectedSubstitutableOutputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetOutputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer")->GetOutputSlots())
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetOutputSlots())),
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer")->GetOutputSlots()))
};
- std::vector<SubgraphView::Layers> expectedSubstitutableLayers
+ std::vector<SubgraphView::IConnectableLayers> expectedSubstitutableLayers
{
{ layersInGraph.at("conv1 layer") },
{ layersInGraph.at("conv2 layer") }
@@ -927,22 +946,27 @@ void PartiallySupportedSubgraphTestImpl()
CHECK(failedSubgraphs.size() == 2);
// Sort into a consistent order
std::sort(failedSubgraphs.begin(), failedSubgraphs.end(), [](auto s1, auto s2) {
- return strcmp(s1.GetLayers().front()->GetName(), s2.GetLayers().front()->GetName()) < 0;
+ return strcmp(s1.GetIConnectableLayers().front()->GetName(),
+ s2.GetIConnectableLayers().front()->GetName()) < 0;
});
std::vector<ExpectedSubgraphSize> expectedFailedSubgraphSizes{ { 1, 1, 2 },
{ 1, 1, 1 } };
- std::vector<SubgraphView::InputSlots> expectedFailedInputSlots
+ std::vector<SubgraphView::IInputSlots> expectedFailedInputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("pooling1 layer")->GetInputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("pooling3 layer")->GetInputSlots())
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("pooling1 layer")->GetInputSlots())),
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("pooling3 layer")->GetInputSlots()))
};
- std::vector<SubgraphView::OutputSlots> expectedFailedOutputSlots
+ std::vector<SubgraphView::IOutputSlots> expectedFailedOutputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("pooling2 layer")->GetOutputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("pooling3 layer")->GetOutputSlots())
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("pooling2 layer")->GetOutputSlots())),
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("pooling3 layer")->GetOutputSlots()))
};
- std::vector<SubgraphView::Layers> expectedFailedLayers
+ std::vector<SubgraphView::IConnectableLayers> expectedFailedLayers
{
{ layersInGraph.at("pooling1 layer"),
layersInGraph.at("pooling2 layer") },
@@ -975,9 +999,9 @@ void FullyUnoptimizableSubgraphTestImpl1()
SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyUnoptimizableSubgraph1(graph, layersInGraph);
CHECK((subgraphPtr != nullptr));
- const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
- const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraphPtr->GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraphPtr->GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraphPtr->GetIConnectableLayers();
CHECK(subgraphInputSlots.size() == 1);
CHECK(subgraphOutputSlots.size() == 1);
@@ -1039,9 +1063,9 @@ void PartiallyOptimizableSubgraphTestImpl1()
SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallyOptimizableSubgraph1(graph, layersInGraph);
CHECK((subgraphPtr != nullptr));
- const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
- const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraphPtr->GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraphPtr->GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraphPtr->GetIConnectableLayers();
CHECK(subgraphInputSlots.size() == 1);
CHECK(subgraphOutputSlots.size() == 1);
@@ -1079,8 +1103,9 @@ void PartiallyOptimizableSubgraphTestImpl1()
CHECK(substitutions.size() == 3);
// Sort into a consistent order
std::sort(substitutions.begin(), substitutions.end(),
- [](auto s1, auto s2) { return strcmp(s1.m_SubstitutableSubgraph.GetLayers().front()->GetName(),
- s2.m_SubstitutableSubgraph.GetLayers().front()->GetName()) < 0; });
+ [](auto s1, auto s2)
+ { return strcmp(s1.m_SubstitutableSubgraph.GetIConnectableLayers().front()->GetName(),
+ s2.m_SubstitutableSubgraph.GetIConnectableLayers().front()->GetName()) < 0; });
std::vector<ExpectedSubgraphSize> expectedSubstitutableSubgraphSizes{ { 1, 1, 1 },
{ 1, 1, 1 },
@@ -1088,19 +1113,25 @@ void PartiallyOptimizableSubgraphTestImpl1()
std::vector<ExpectedSubgraphSize> expectedReplacementSubgraphSizes{ { 1, 1, 1 },
{ 1, 1, 1 },
{ 1, 1, 1 } };
- std::vector<SubgraphView::InputSlots> expectedSubstitutableInputSlots
+ std::vector<SubgraphView::IInputSlots> expectedSubstitutableInputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetInputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv3 layer")->GetInputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv5 layer")->GetInputSlots())
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetInputSlots())),
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv3 layer")->GetInputSlots())),
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv5 layer")->GetInputSlots()))
};
- std::vector<SubgraphView::OutputSlots> expectedSubstitutableOutputSlots
+ std::vector<SubgraphView::IOutputSlots> expectedSubstitutableOutputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetOutputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv3 layer")->GetOutputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv5 layer")->GetOutputSlots())
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetOutputSlots())),
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv3 layer")->GetOutputSlots())),
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv5 layer")->GetOutputSlots()))
};
- std::vector<SubgraphView::Layers> expectedSubstitutableLayers
+ std::vector<SubgraphView::IConnectableLayers> expectedSubstitutableLayers
{
{ layersInGraph.at("conv1 layer") },
{ layersInGraph.at("conv3 layer") },
@@ -1131,22 +1162,27 @@ void PartiallyOptimizableSubgraphTestImpl1()
CHECK(untouchedSubgraphs.size() == 2);
// Sort into a consistent order
std::sort(untouchedSubgraphs.begin(), untouchedSubgraphs.end(), [](auto s1, auto s2) {
- return strcmp(s1.GetLayers().front()->GetName(), s2.GetLayers().front()->GetName()) < 0;
+ return strcmp(s1.GetIConnectableLayers().front()->GetName(),
+ s2.GetIConnectableLayers().front()->GetName()) < 0;
});
std::vector<ExpectedSubgraphSize> expectedUntouchedSubgraphSizes{ { 1, 1, 1 },
{ 1, 1, 1 } };
- std::vector<SubgraphView::InputSlots> expectedUntouchedInputSlots
+ std::vector<SubgraphView::IInputSlots> expectedUntouchedInputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetInputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv4 layer unoptimizable")->GetInputSlots())
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetInputSlots())),
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv4 layer unoptimizable")->GetInputSlots()))
};
- std::vector<SubgraphView::OutputSlots> expectedUntouchedOutputSlots
+ std::vector<SubgraphView::IOutputSlots> expectedUntouchedOutputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetOutputSlots()),
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv4 layer unoptimizable")->GetOutputSlots())
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetOutputSlots())),
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv4 layer unoptimizable")->GetOutputSlots()))
};
- std::vector<SubgraphView::Layers> expectedUntouchedLayers
+ std::vector<SubgraphView::IConnectableLayers> expectedUntouchedLayers
{
{ layersInGraph.at("conv2 layer unoptimizable") },
{ layersInGraph.at("conv4 layer unoptimizable") }
@@ -1173,9 +1209,9 @@ void PartiallyOptimizableSubgraphTestImpl2()
SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallyOptimizableSubgraph2(graph, layersInGraph);
CHECK((subgraphPtr != nullptr));
- const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots();
- const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
- const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers();
+ const SubgraphView::IInputSlots& subgraphInputSlots = subgraphPtr->GetIInputSlots();
+ const SubgraphView::IOutputSlots& subgraphOutputSlots = subgraphPtr->GetIOutputSlots();
+ const SubgraphView::IConnectableLayers& subgraphLayers = subgraphPtr->GetIConnectableLayers();
CHECK(subgraphInputSlots.size() == 2);
CHECK(subgraphOutputSlots.size() == 1);
@@ -1214,15 +1250,21 @@ void PartiallyOptimizableSubgraphTestImpl2()
ExpectedSubgraphSize expectedSubstitutableSubgraphSizes{ 2, 1, 3 };
ExpectedSubgraphSize expectedReplacementSubgraphSizes{ 2, 1, 1 };
- SubgraphView::InputSlots expectedSubstitutableInputSlots = {
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetInputSlots()[0]),
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv3 layer")->GetInputSlots()[0])
+ SubgraphView::IInputSlots expectedSubstitutableInputSlots
+ {
+ ConvertSlotsToISlots<InputSlot, IInputSlot>({
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetInputSlots()[0])})[0],
+ ConvertSlotsToISlots<InputSlot, IInputSlot>({
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv3 layer")->GetInputSlots()[0])})[0]
};
- SubgraphView::OutputSlots expectedSubstitutableOutputSlots =
+
+ SubgraphView::IOutputSlots expectedSubstitutableOutputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("add layer")->GetOutputSlots()[0])
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("add layer")->GetOutputSlots()))
};
- SubgraphView::Layers expectedSubstitutableLayers
+
+ SubgraphView::IConnectableLayers expectedSubstitutableLayers
{
layersInGraph.at("conv1 layer"),
layersInGraph.at("conv3 layer"),
@@ -1250,15 +1292,17 @@ void PartiallyOptimizableSubgraphTestImpl2()
CHECK(untouchedSubgraphs.size() == 1);
std::vector<ExpectedSubgraphSize> expectedUntouchedSubgraphSizes{ { 1, 1, 1 } };
- std::vector<SubgraphView::InputSlots> expectedUntouchedInputSlots
+ std::vector<SubgraphView::IInputSlots> expectedUntouchedInputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetInputSlots())
+ ConvertSlotsToISlots<InputSlot, IInputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetInputSlots()))
};
- std::vector<SubgraphView::OutputSlots> expectedUntouchedOutputSlots
+ std::vector<SubgraphView::IOutputSlots> expectedUntouchedOutputSlots
{
- ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetOutputSlots())
+ ConvertSlotsToISlots<OutputSlot, IOutputSlot>(
+ ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetOutputSlots()))
};
- std::vector<SubgraphView::Layers> expectedUntouchedLayers
+ std::vector<SubgraphView::IConnectableLayers> expectedUntouchedLayers
{
{ layersInGraph.at("conv2 layer unoptimizable") }
};
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 339c1aa398..cf5f50025a 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -227,18 +227,18 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
{
OptimizationViews optimizationViews;
- auto it = subgraph.end();
+ auto it = subgraph.endIConnectable();
bool isFastMathEnabled = false;
std::map<LayerGuid, Layer*> untouched;
- while (it != subgraph.begin())
+ while (it != subgraph.beginIConnectable())
{
--it;
- Layer& base = **it;
+ Layer& base = *(PolymorphicDowncast<Layer*>(*it));
untouched.insert({base.GetGuid(), &base});
}
- it = subgraph.end();
+ it = subgraph.endIConnectable();
#if defined(ARMCOMPUTECL_ENABLED)
IBackendInternal::IBackendSpecificModelContextPtr modelContextPtr = CreateBackendSpecificModelContext(modelOptions);
@@ -251,10 +251,10 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
}
}
#endif
- while (it != subgraph.begin())
+ while (it != subgraph.beginIConnectable())
{
--it;
- Layer& base = **it;
+ Layer& base = *(PolymorphicDowncast<Layer*>(*it));
// Fuse activation into previous layer if supported by backend
if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
@@ -498,9 +498,9 @@ OptimizationViews ClBackend::OptimizeSubgraphView(const SubgraphView& subgraph,
if (!reduceDescriptor.m_vAxis.empty() && reduceDescriptor.m_vAxis.size() > 1)
{
// Add new layers to the graph and connect them.
- std::vector<Layer*> layers = ChainReduceLayers<ReduceLayer>(optimizationViews,
- baseLayer,
- reduceDescriptor);
+ std::vector<IConnectableLayer*> layers = ChainReduceLayers<ReduceLayer>(optimizationViews,
+ baseLayer,
+ reduceDescriptor);
// Replace existing baselayer with new subgraph.
ReplaceLayers<ReduceLayer>(optimizationViews, baseLayer, layers);
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index aa5ba03075..54af14e30b 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -132,21 +132,21 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
{
OptimizationViews optimizationViews;
- auto it = subgraph.end();
+ auto it = subgraph.endIConnectable();
std::map<LayerGuid, Layer*> untouched;
- while (it != subgraph.begin())
+ while (it != subgraph.beginIConnectable())
{
--it;
- Layer& base = **it;
+ Layer& base = *(PolymorphicDowncast<Layer*>(*it));
untouched.insert({base.GetGuid(), &base});
}
- it = subgraph.end();
- while (it != subgraph.begin())
+ it = subgraph.endIConnectable();
+ while (it != subgraph.beginIConnectable())
{
--it;
- Layer& base = **it;
+ Layer& base = *(PolymorphicDowncast<Layer*>(*it));
// Fuse activation into previous layer if supported by backend
if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
@@ -390,9 +390,9 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
if (!reduceDescriptor.m_vAxis.empty() && reduceDescriptor.m_vAxis.size() > 1)
{
// Add new layers to the graph and connect them.
- std::vector<Layer*> layers = ChainReduceLayers<ReduceLayer>(optimizationViews,
- baseLayer,
- reduceDescriptor);
+ std::vector<IConnectableLayer*> layers = ChainReduceLayers<ReduceLayer>(optimizationViews,
+ baseLayer,
+ reduceDescriptor);
// Replace existing baselayer with new subgraph.
ReplaceLayers<ReduceLayer>(optimizationViews, baseLayer, layers);