From f02e6cd728a0bceea221120ec039a4b66436d51d Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Fri, 17 May 2019 12:15:30 +0100 Subject: IVGCVSW-3030 Add unit testing for the Optimization API * Added OptimizeSubgraphViewTests file covering a number of use cases for the Optimization API * Fixed a bug in the sub-graph selector algorithm that skipped the first layer in a sub-graph if it wasn't an input layer * Changed the graph splitting logic to make use of maps instead of unordered_maps to keep the split sub-graphs in consistent order between executions * Added more common unit test utils * Minor fixes to comply to the include file conventions Change-Id: Iad464eaedd004109e5ef41aa487cea3ad86177d3 Signed-off-by: Matteo Martincigh --- src/armnn/SubgraphViewSelector.cpp | 32 +- src/backends/backendsCommon/common.mk | 1 + .../backendsCommon/test/ArithmeticTestImpl.hpp | 4 +- src/backends/backendsCommon/test/CMakeLists.txt | 2 + .../backendsCommon/test/CommonTestUtils.cpp | 59 + .../backendsCommon/test/CommonTestUtils.hpp | 64 +- .../backendsCommon/test/Conv2dTestImpl.hpp | 0 .../test/DetectionPostProcessTestImpl.hpp | 3 +- .../backendsCommon/test/GatherEndToEndTestImpl.hpp | 3 +- .../backendsCommon/test/MergerTestImpl.hpp | 4 +- src/backends/backendsCommon/test/MockBackend.cpp | 1 + .../test/OptimizeSubgraphViewTests.cpp | 1277 ++++++++++++++++++++ 12 files changed, 1430 insertions(+), 20 deletions(-) create mode 100644 src/backends/backendsCommon/test/CommonTestUtils.cpp mode change 100755 => 100644 src/backends/backendsCommon/test/Conv2dTestImpl.hpp create mode 100644 src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp index cc821ec956..8e4de0b5f8 100644 --- a/src/armnn/SubgraphViewSelector.cpp +++ b/src/armnn/SubgraphViewSelector.cpp @@ -7,7 +7,7 @@ #include "Graph.hpp" #include #include -#include +#include #include namespace armnn @@ -19,7 +19,7 @@ namespace struct LayerSelectionInfo { using SplitId = uint32_t; - using LayerInfoContainer = std::unordered_map; + using LayerInfoContainer = std::map; using LayerInfoQueue = std::queue; static constexpr uint32_t InitialSplitId() { return 1; } @@ -56,7 +56,8 @@ struct LayerSelectionInfo { Layer& parentLayer = parentLayerOutputSlot->GetOwningLayer(); auto parentInfo = layerInfos.find(&parentLayer); - if (m_SplitId != parentInfo->second.m_SplitId) + if (parentInfo == layerInfos.end() || + m_SplitId != parentInfo->second.m_SplitId) { inputSlots.push_back(&(*slot)); } @@ -73,7 +74,8 @@ struct LayerSelectionInfo { Layer& childLayer = childLayerInputSlot->GetOwningLayer(); auto childInfo = layerInfos.find(&childLayer); - if (m_SplitId != childInfo->second.m_SplitId) + if (childInfo == layerInfos.end() || + m_SplitId != childInfo->second.m_SplitId) { outputSlots.push_back(&(*slot)); } @@ -112,7 +114,10 @@ void ForEachLayerInput(LayerSelectionInfo::LayerInfoContainer& layerInfos, Layer& inputLayer = connectedInput->GetOwningLayer(); auto parentInfo = layerInfos.find(&inputLayer); - function(parentInfo->second); + if (parentInfo != layerInfos.end()) + { + function(parentInfo->second); + } } } @@ -130,7 +135,10 @@ void ForEachLayerOutput(LayerSelectionInfo::LayerInfoContainer& layerInfos, Layer& childLayer = output->GetOwningLayer(); auto childInfo = layerInfos.find(&childLayer); - function(childInfo->second); + if (childInfo != layerInfos.end()) + { + function(childInfo->second); + } } } } @@ -213,6 +221,16 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto } } + const SubgraphView::InputSlots& subgraphInputSlots = subgraph.GetInputSlots(); + for (auto& inputSlot : subgraphInputSlots) + { + Layer& layer = inputSlot->GetOwningLayer(); + auto emplaced = layerInfos.emplace(&layer, LayerSelectionInfo{&layer, selector}); + LayerSelectionInfo& layerInfo = emplaced.first->second; + + processQueue.push(&layerInfo); + } + while (!processQueue.empty()) { LayerSelectionInfo& layerInfo = *processQueue.front(); @@ -246,7 +264,7 @@ SubgraphViewSelector::SelectSubgraphs(SubgraphView& subgraph, const LayerSelecto // Collect all selected layers keyed by split id into a map using SelectionInfoPtrs = std::vector; - std::unordered_map splitMap; + std::map splitMap; for (auto& info : layerInfos) { if (info.second.m_IsSelected) diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index c993bfb674..90d3d16ebb 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -23,6 +23,7 @@ COMMON_SOURCES := \ # up by the Android.mk file in the root of ArmNN COMMON_TEST_SOURCES := \ + test/CommonTestUtils.cpp \ test/JsonPrinterTestImpl.cpp \ test/LayerTests.cpp \ test/TensorCopyUtils.cpp \ diff --git a/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp b/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp index 29dafd36b4..d0e85dd31d 100644 --- a/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp +++ b/src/backends/backendsCommon/test/ArithmeticTestImpl.hpp @@ -4,12 +4,12 @@ // #pragma once +#include "CommonTestUtils.hpp" + #include #include -#include - #include #include diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index ab63679268..83a1da5cd9 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -9,6 +9,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources BackendIdTests.cpp BackendRegistryTests.cpp BatchNormTestImpl.hpp + CommonTestUtils.cpp CommonTestUtils.hpp Conv2dTestImpl.hpp ConvertFp16ToFp32TestImpl.hpp @@ -34,6 +35,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources MockBackend.hpp MockBackendId.hpp OptimizedNetworkTests.cpp + OptimizeSubgraphViewTests.cpp PermuteTestImpl.hpp Pooling2dTestImpl.hpp QuantizeHelper.hpp diff --git a/src/backends/backendsCommon/test/CommonTestUtils.cpp b/src/backends/backendsCommon/test/CommonTestUtils.cpp new file mode 100644 index 0000000000..7685626935 --- /dev/null +++ b/src/backends/backendsCommon/test/CommonTestUtils.cpp @@ -0,0 +1,59 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "CommonTestUtils.hpp" + +#include + +using namespace armnn; + +void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo, + unsigned int fromIndex, unsigned int toIndex) +{ + from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex)); + from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo); +} + +SubgraphView::InputSlots CreateInputsFrom(const std::vector& layers) +{ + SubgraphView::InputSlots result; + for (auto&& layer : layers) + { + for (auto&& it = layer->BeginInputSlots(); it != layer->EndInputSlots(); ++it) + { + result.push_back(&(*it)); + } + } + return result; +} + +SubgraphView::OutputSlots CreateOutputsFrom(const std::vector& layers) +{ + SubgraphView::OutputSlots result; + for (auto && layer : layers) + { + for (auto&& it = layer->BeginOutputSlots(); it != layer->EndOutputSlots(); ++it) + { + result.push_back(&(*it)); + } + } + return result; +} + +SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(SubgraphView::InputSlots&& inputs, + SubgraphView::OutputSlots&& outputs, + SubgraphView::Layers&& layers) +{ + return std::make_unique(std::move(inputs), std::move(outputs), std::move(layers)); +} + +armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& backendId) +{ + auto& backendRegistry = BackendRegistryInstance(); + auto backendFactory = backendRegistry.GetFactory(backendId); + auto backendObjPtr = backendFactory(); + + return backendObjPtr; +} diff --git a/src/backends/backendsCommon/test/CommonTestUtils.hpp b/src/backends/backendsCommon/test/CommonTestUtils.hpp index 68180fb289..5da0228842 100644 --- a/src/backends/backendsCommon/test/CommonTestUtils.hpp +++ b/src/backends/backendsCommon/test/CommonTestUtils.hpp @@ -2,21 +2,71 @@ // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // + #pragma once #include +#include +#include + +#include +#include -using namespace armnn; +#include -namespace +// Connects two layers +void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo, + unsigned int fromIndex = 0, unsigned int toIndex = 0); + +// Checks that two collections have the exact same contents (in any order) +// The given collections do not have to contain duplicates +// Cannot use std::sort here because std lists have their own std::list::sort method +template +bool AreEqual(const CollectionType& lhs, const CollectionType& rhs) { + if (lhs.size() != rhs.size()) + { + return false; + } + + auto lhs_it = std::find_if(lhs.begin(), lhs.end(), [&rhs](auto& item) + { + return std::find(rhs.begin(), rhs.end(), item) == rhs.end(); + }); -// Connects two layers. -void Connect(IConnectableLayer* from, IConnectableLayer* to, const TensorInfo& tensorInfo, - unsigned int fromIndex = 0, unsigned int toIndex = 0) + return lhs_it == lhs.end(); +} + +// Checks that the given collection contains the specified item +template +bool Contains(const CollectionType& collection, const typename CollectionType::value_type& item) { - from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex)); - from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo); + return std::find(collection.begin(), collection.end(), item) != collection.end(); } +// Checks that the given map contains the specified key +template +bool Contains(const MapType& map, const typename MapType::key_type& key) +{ + return map.find(key) != map.end(); } + +template +void SetWeightAndBias(ConvolutionLayer* layer, const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& biasInfo) +{ + layer->m_Weight = std::make_unique(weightInfo); + layer->m_Bias = std::make_unique(biasInfo); + + layer->m_Weight->Allocate(); + layer->m_Bias->Allocate(); +} + +armnn::SubgraphView::InputSlots CreateInputsFrom(const std::vector& layers); + +armnn::SubgraphView::OutputSlots CreateOutputsFrom(const std::vector& layers); + +armnn::SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(armnn::SubgraphView::InputSlots&& inputs, + armnn::SubgraphView::OutputSlots&& outputs, + armnn::SubgraphView::Layers&& layers); + +armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& backendId); diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp old mode 100755 new mode 100644 diff --git a/src/backends/backendsCommon/test/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessTestImpl.hpp index 9a53456e81..5a42550a5f 100644 --- a/src/backends/backendsCommon/test/DetectionPostProcessTestImpl.hpp +++ b/src/backends/backendsCommon/test/DetectionPostProcessTestImpl.hpp @@ -5,8 +5,9 @@ #pragma once +#include "CommonTestUtils.hpp" + #include -#include #include namespace{ diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp index ce689d7ab3..1c97bef467 100644 --- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp @@ -5,8 +5,9 @@ #pragma once +#include "CommonTestUtils.hpp" + #include -#include #include namespace{ diff --git a/src/backends/backendsCommon/test/MergerTestImpl.hpp b/src/backends/backendsCommon/test/MergerTestImpl.hpp index 35ab2bc861..edf5e2b7be 100644 --- a/src/backends/backendsCommon/test/MergerTestImpl.hpp +++ b/src/backends/backendsCommon/test/MergerTestImpl.hpp @@ -4,12 +4,12 @@ // #pragma once +#include "CommonTestUtils.hpp" + #include #include -#include - #include #include diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp index eca38cdddf..4eb96b5336 100644 --- a/src/backends/backendsCommon/test/MockBackend.cpp +++ b/src/backends/backendsCommon/test/MockBackend.cpp @@ -29,6 +29,7 @@ bool IsLayerSupported(const armnn::Layer* layer) { case armnn::LayerType::Input: case armnn::LayerType::Output: + case armnn::LayerType::Addition: case armnn::LayerType::Convolution2d: // Layer supported return true; diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp new file mode 100644 index 0000000000..a6e835612b --- /dev/null +++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp @@ -0,0 +1,1277 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "CommonTestUtils.hpp" +#include "MockBackend.hpp" +#include "MockBackendId.hpp" + +#include +#include + +#include + +#include + +#include + +using namespace armnn; + +namespace +{ + +// The expected number of layers, input and output slots in a subgraph after a test +struct ExpectedSubgraphSize +{ + size_t m_NumInputSlots = 0; + size_t m_NumOutputSlots = 0; + size_t m_NumLayers = 0; +}; + +// Keep the layers organized by layer name +using LayerNameToLayerMap = std::unordered_map; + +// Used to convert input and output slots from reference type (as stored in graphs) to +// pointer type (as stored in subgraphs) +template +SlotType* ConvertReferenceTypeToPointerType(const SlotType& input) +{ + return const_cast(&input); +} + +// Used to convert input and output slots from reference type (as stored in graphs) to +// pointer type (as stored in subgraphs), array version +template +std::vector ConvertReferenceTypeToPointerType(const std::vector& input) +{ + std::vector output; + std::transform(input.begin(), + input.end(), + std::back_inserter(output), + [](const SlotType& inputItem) + { + return ConvertReferenceTypeToPointerType(inputItem); + }); + + return output; +} + +// Convenience function to add an input layer to a graph +Layer* AddInputLayer(Graph& graph, + const std::string& layerName, + const TensorInfo& inputInfo, + LayerBindingId inputId = 0) +{ + Layer* const inputLayer = graph.AddLayer(inputId, layerName.c_str()); + BOOST_TEST(inputLayer); + inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + return inputLayer; +} + +// Convenience function to add an output layer to a graph +Layer* AddOutputLayer(Graph& graph, + const std::string& layerName) +{ + Layer* const outputLayer = graph.AddLayer(0, layerName.c_str()); + BOOST_TEST(outputLayer); + return outputLayer; +} + +// Convenience function to add a convolution layer to a graph +Convolution2dLayer* AddConvolutionLayer(Graph& graph, + LayerNameToLayerMap& layersInGraph, + const Convolution2dDescriptor& convolutionDescriptor, + const std::string& layerName, + const TensorInfo& weightInfo, + const TensorInfo& biasInfo, + const TensorInfo& outputInfo) +{ + Convolution2dLayer* const convLayer = graph.AddLayer(convolutionDescriptor, layerName.c_str()); + BOOST_TEST(convLayer); + SetWeightAndBias(convLayer, weightInfo, biasInfo); + convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + layersInGraph.insert(std::make_pair(convLayer->GetName(), convLayer)); + return convLayer; +} + +// Convenience function to add a pooling layer to a graph +Pooling2dLayer* AddPoolingLayer(Graph& graph, + LayerNameToLayerMap& layersInGraph, + const Pooling2dDescriptor& poolingDescriptor, + const std::string& layerName, + const TensorInfo& outputInfo) +{ + Pooling2dLayer* const poolingLayer = graph.AddLayer(poolingDescriptor, layerName.c_str()); + BOOST_TEST(poolingLayer); + poolingLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + layersInGraph.insert(std::make_pair(poolingLayer->GetName(), poolingLayer)); + return poolingLayer; +} + +// Convenience function to add an addition layer to a graph +AdditionLayer* AddAdditionaLayer(Graph& graph, + LayerNameToLayerMap& layersInGraph, + const std::string& layerName, + const TensorInfo& outputInfo) +{ + AdditionLayer* const additionLayer = graph.AddLayer(layerName.c_str()); + BOOST_TEST(additionLayer); + additionLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + layersInGraph.insert(std::make_pair(additionLayer->GetName(), additionLayer)); + return additionLayer; +} + +// Convenience function to check that the given substitution matches the specified expected values +void CheckSubstitution(const OptimizationViews::SubstitutionPair& substitution, + const ExpectedSubgraphSize& expectedSubstitutableSubgraphSize, + const ExpectedSubgraphSize& expectedReplacementSubgraphSize, + const SubgraphView::InputSlots& expectedSubstitutableInputSlots, + const SubgraphView::OutputSlots& expectedSubstitutableOutputSlots, + const SubgraphView::Layers& expectedSubstitutableLayers) +{ + const SubgraphView& substitutableSubgraph = substitution.m_SubstitutableSubgraph; + const SubgraphView::InputSlots& substitutableSubgraphInputSlots = substitutableSubgraph.GetInputSlots(); + const SubgraphView::OutputSlots& substitutableSubgraphOutputSlots = substitutableSubgraph.GetOutputSlots(); + const SubgraphView::Layers& substitutableSubgraphLayers = substitutableSubgraph.GetLayers(); + + const SubgraphView& replacementSubgraph = substitution.m_ReplacementSubgraph; + const SubgraphView::InputSlots& replacementSubgraphInputSlots = replacementSubgraph.GetInputSlots(); + const SubgraphView::OutputSlots& replacementSubgraphOutputSlots = replacementSubgraph.GetOutputSlots(); + const SubgraphView::Layers& replacementSubgraphLayers = replacementSubgraph.GetLayers(); + + BOOST_TEST(substitutableSubgraphInputSlots.size() == expectedSubstitutableSubgraphSize.m_NumInputSlots); + BOOST_TEST(substitutableSubgraphOutputSlots.size() == expectedSubstitutableSubgraphSize.m_NumOutputSlots); + BOOST_TEST(substitutableSubgraphLayers.size() == expectedSubstitutableSubgraphSize.m_NumLayers); + + BOOST_TEST(AreEqual(substitutableSubgraphInputSlots, expectedSubstitutableInputSlots)); + BOOST_TEST(AreEqual(substitutableSubgraphOutputSlots, expectedSubstitutableOutputSlots)); + BOOST_TEST(AreEqual(substitutableSubgraphLayers, expectedSubstitutableLayers)); + + BOOST_TEST(replacementSubgraphInputSlots.size() == expectedReplacementSubgraphSize.m_NumInputSlots); + BOOST_TEST(replacementSubgraphOutputSlots.size() == expectedReplacementSubgraphSize.m_NumOutputSlots); + BOOST_TEST(replacementSubgraphLayers.size() == expectedReplacementSubgraphSize.m_NumLayers); + + BOOST_TEST(!AreEqual(replacementSubgraphInputSlots, expectedSubstitutableInputSlots)); + BOOST_TEST(!AreEqual(replacementSubgraphOutputSlots, expectedSubstitutableOutputSlots)); + BOOST_TEST(!AreEqual(replacementSubgraphLayers, expectedSubstitutableLayers)); + + BOOST_TEST(std::all_of(replacementSubgraphLayers.begin(), + replacementSubgraphLayers.end(), + [](const Layer* layer) + { + return layer->GetType() == LayerType::PreCompiled; + })); +} + +// Convenience function to check that the given failed subgraph matches the specified expected values +void CheckFailedSubgraph(const SubgraphView& failedSubgraph, + const ExpectedSubgraphSize& expectedFailedSubgraphSize, + const SubgraphView::InputSlots& expectedFailedInputSlots, + const SubgraphView::OutputSlots& expectedFailedOutputSlots, + const SubgraphView::Layers& expectedFailedLayers) +{ + const SubgraphView::InputSlots& failedSubgraphInputSlots = failedSubgraph.GetInputSlots(); + const SubgraphView::OutputSlots& failedSubgraphOutputSlots = failedSubgraph.GetOutputSlots(); + const SubgraphView::Layers& failedSubgraphLayers = failedSubgraph.GetLayers(); + + BOOST_TEST(failedSubgraphInputSlots.size() == expectedFailedSubgraphSize.m_NumInputSlots); + BOOST_TEST(failedSubgraphOutputSlots.size() == expectedFailedSubgraphSize.m_NumOutputSlots); + BOOST_TEST(failedSubgraphLayers.size() == expectedFailedSubgraphSize.m_NumLayers); + + BOOST_TEST(AreEqual(failedSubgraphInputSlots, expectedFailedInputSlots)); + BOOST_TEST(AreEqual(failedSubgraphOutputSlots, expectedFailedOutputSlots)); + BOOST_TEST(AreEqual(failedSubgraphLayers, expectedFailedLayers)); +} + +// Convenience function to check that the given untouched subgraph matches the specified expected values +void CheckUntouchedSubgraph(const SubgraphView& untouchedSubgraph, + const ExpectedSubgraphSize& expectedUntouchedSubgraphSize, + const SubgraphView::InputSlots& expectedUntouchedInputSlots, + const SubgraphView::OutputSlots& expectedUntouchedOutputSlots, + const SubgraphView::Layers& expectedUntouchedLayers) +{ + const SubgraphView::InputSlots& untouchedSubgraphInputSlots = untouchedSubgraph.GetInputSlots(); + const SubgraphView::OutputSlots& untouchedSubgraphOutputSlots = untouchedSubgraph.GetOutputSlots(); + const SubgraphView::Layers& untouchedSubgraphLayers = untouchedSubgraph.GetLayers(); + + BOOST_TEST(untouchedSubgraphInputSlots.size() == expectedUntouchedSubgraphSize.m_NumInputSlots); + BOOST_TEST(untouchedSubgraphOutputSlots.size() == expectedUntouchedSubgraphSize.m_NumOutputSlots); + BOOST_TEST(untouchedSubgraphLayers.size() == expectedUntouchedSubgraphSize.m_NumLayers); + + BOOST_TEST(AreEqual(untouchedSubgraphInputSlots, expectedUntouchedInputSlots)); + BOOST_TEST(AreEqual(untouchedSubgraphOutputSlots, expectedUntouchedOutputSlots)); + BOOST_TEST(AreEqual(untouchedSubgraphLayers, expectedUntouchedLayers)); +} + +// Creates a subgraph containing only a single unsupported layer (only convolutions are unsupported by the mock backend) +SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph) +{ + const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + + Pooling2dDescriptor poolingDescriptor; + poolingDescriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + poolingDescriptor.m_PoolWidth = 2; + poolingDescriptor.m_PoolHeight = 2; + poolingDescriptor.m_StrideX = 2; + poolingDescriptor.m_StrideY = 2; + poolingDescriptor.m_PadLeft = 1; + poolingDescriptor.m_PadRight = 1; + poolingDescriptor.m_PadTop = 1; + poolingDescriptor.m_PadBottom = 1; + poolingDescriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + poolingDescriptor.m_DataLayout = DataLayout::NHWC; + + // Construct the graph + Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo); + Pooling2dLayer* const poolingLayer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor, + "pooling layer", outputInfo); + Layer* const outputLayer = AddOutputLayer(graph, "output layer"); + + // Connect the network + inputLayer->GetOutputSlot(0).Connect(poolingLayer->GetInputSlot(0)); + poolingLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + // Create the subgraph view for the whole network + return CreateSubgraphViewFrom(CreateInputsFrom({poolingLayer}), + CreateOutputsFrom({poolingLayer}), + {poolingLayer}); +} + +// Creates a subgraph containing only unsupported layers (only convolutions are unsupported by the mock backend) +SubgraphView::SubgraphViewPtr BuildFullyUnsupportedSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph) +{ + const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + + Pooling2dDescriptor poolingDescriptor; + poolingDescriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + poolingDescriptor.m_PoolWidth = 2; + poolingDescriptor.m_PoolHeight = 2; + poolingDescriptor.m_StrideX = 2; + poolingDescriptor.m_StrideY = 2; + poolingDescriptor.m_PadLeft = 1; + poolingDescriptor.m_PadRight = 1; + poolingDescriptor.m_PadTop = 1; + poolingDescriptor.m_PadBottom = 1; + poolingDescriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + poolingDescriptor.m_DataLayout = DataLayout::NHWC; + + // Construct the graph + Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo); + Pooling2dLayer* const pooling1Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor, + "pooling1 layer", outputInfo); + Pooling2dLayer* const pooling2Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor, + "pooling2 layer", outputInfo); + Pooling2dLayer* const pooling3Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor, + "pooling3 layer", outputInfo); + Layer* const outputLayer = AddOutputLayer(graph, "output layer"); + + // Connect the network + inputLayer->GetOutputSlot(0).Connect(pooling1Layer->GetInputSlot(0)); + pooling1Layer->GetOutputSlot(0).Connect(pooling2Layer->GetInputSlot(0)); + pooling2Layer->GetOutputSlot(0).Connect(pooling3Layer->GetInputSlot(0)); + pooling3Layer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + // Create the subgraph view for the whole network + return CreateSubgraphViewFrom(CreateInputsFrom({pooling1Layer}), + CreateOutputsFrom({pooling3Layer}), + {pooling1Layer, + pooling2Layer, + pooling3Layer}); +} + +// Creates a simple subgraph with only one convolution layer, supported by the mock backend +SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph) +{ + const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0); + const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0); + + Convolution2dDescriptor convolutionDescriptor; + convolutionDescriptor.m_StrideX = 1; + convolutionDescriptor.m_StrideY = 1; + convolutionDescriptor.m_BiasEnabled = true; + convolutionDescriptor.m_DataLayout = DataLayout::NHWC; + + // Construct the graph + Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo); + Convolution2dLayer* const convLayer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv layer", weightInfo, biasInfo, outputInfo); + Layer* const outputLayer = AddOutputLayer(graph, "output layer"); + + // Connect the network + inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); + convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + // Create the subgraph view for the whole network + return CreateSubgraphViewFrom(CreateInputsFrom({convLayer}), + CreateOutputsFrom({convLayer}), + {convLayer}); +} + +// Creates a subgraph with five convolutions layers, all supported by the mock backend +SubgraphView::SubgraphViewPtr BuildFullyOptimizableSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph) +{ + const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0); + const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0); + + Convolution2dDescriptor convolutionDescriptor; + convolutionDescriptor.m_StrideX = 1; + convolutionDescriptor.m_StrideY = 1; + convolutionDescriptor.m_BiasEnabled = true; + convolutionDescriptor.m_DataLayout = DataLayout::NHWC; + + // Construct the graph + Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo); + Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv1 layer", weightInfo, biasInfo, outputInfo); + Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv2 layer", weightInfo, biasInfo, outputInfo); + Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv3 layer", weightInfo, biasInfo, outputInfo); + Convolution2dLayer* const conv4Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv4 layer", weightInfo, biasInfo, outputInfo); + Convolution2dLayer* const conv5Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv5 layer", weightInfo, biasInfo, outputInfo); + Layer* const outputLayer = AddOutputLayer(graph, "output layer"); + + // Connect the network + inputLayer->GetOutputSlot(0).Connect(conv1Layer->GetInputSlot(0)); + conv1Layer->GetOutputSlot(0).Connect(conv2Layer->GetInputSlot(0)); + conv2Layer->GetOutputSlot(0).Connect(conv3Layer->GetInputSlot(0)); + conv3Layer->GetOutputSlot(0).Connect(conv4Layer->GetInputSlot(0)); + conv4Layer->GetOutputSlot(0).Connect(conv5Layer->GetInputSlot(0)); + conv5Layer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + // Create the subgraph view for the whole network + return CreateSubgraphViewFrom(CreateInputsFrom({conv1Layer}), + CreateOutputsFrom({conv5Layer}), + {conv1Layer, + conv2Layer, + conv3Layer, + conv4Layer, + conv5Layer}); +} + +// Creates a subgraph with both supported and unsupported layers +// (only convolutions are unsupported by the mock backend) +SubgraphView::SubgraphViewPtr BuildPartiallySupportedSubgraph(Graph& graph, LayerNameToLayerMap& layersInGraph) +{ + const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0); + const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0); + + Convolution2dDescriptor convolutionDescriptor; + convolutionDescriptor.m_StrideX = 1; + convolutionDescriptor.m_StrideY = 1; + convolutionDescriptor.m_BiasEnabled = true; + convolutionDescriptor.m_DataLayout = DataLayout::NHWC; + + Pooling2dDescriptor poolingDescriptor; + poolingDescriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + poolingDescriptor.m_PoolWidth = 2; + poolingDescriptor.m_PoolHeight = 2; + poolingDescriptor.m_StrideX = 2; + poolingDescriptor.m_StrideY = 2; + poolingDescriptor.m_PadLeft = 1; + poolingDescriptor.m_PadRight = 1; + poolingDescriptor.m_PadTop = 1; + poolingDescriptor.m_PadBottom = 1; + poolingDescriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + poolingDescriptor.m_DataLayout = DataLayout::NHWC; + + // Construct the graph + Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo); + Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv1 layer", weightInfo, biasInfo, outputInfo); + Pooling2dLayer* const pooling1Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor, + "pooling1 layer", outputInfo); + Pooling2dLayer* const pooling2Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor, + "pooling2 layer", outputInfo); + Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv2 layer", weightInfo, biasInfo, outputInfo); + Pooling2dLayer* const pooling3Layer = AddPoolingLayer(graph, layersInGraph, poolingDescriptor, + "pooling3 layer", outputInfo); + Layer* const outputLayer = AddOutputLayer(graph, "output layer"); + + // Connect the network + inputLayer->GetOutputSlot(0).Connect(conv1Layer->GetInputSlot(0)); + conv1Layer->GetOutputSlot(0).Connect(pooling1Layer->GetInputSlot(0)); + pooling1Layer->GetOutputSlot(0).Connect(pooling2Layer->GetInputSlot(0)); + pooling2Layer->GetOutputSlot(0).Connect(conv2Layer->GetInputSlot(0)); + conv2Layer->GetOutputSlot(0).Connect(pooling3Layer->GetInputSlot(0)); + pooling3Layer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + // Create the subgraph view for the whole network + return CreateSubgraphViewFrom(CreateInputsFrom({conv1Layer}), + CreateOutputsFrom({pooling3Layer}), + {conv1Layer, + pooling1Layer, + pooling2Layer, + conv2Layer, + pooling3Layer}); +} + +// Creates a subgraph with only unoptimizable layers ("unoptimizable" is added to the layer's name) +SubgraphView::SubgraphViewPtr BuildFullyUnoptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph) +{ + const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0); + const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0); + + Convolution2dDescriptor convolutionDescriptor; + convolutionDescriptor.m_StrideX = 1; + convolutionDescriptor.m_StrideY = 1; + convolutionDescriptor.m_BiasEnabled = true; + convolutionDescriptor.m_DataLayout = DataLayout::NHWC; + + // Construct the graph + Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo); + Convolution2dLayer* const convLayer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv layer unoptimizable", weightInfo, biasInfo, + outputInfo); + Layer* const outputLayer = AddOutputLayer(graph, "output layer"); + + // Connect the network + inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0)); + convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + // Create the subgraph view for the whole network + return CreateSubgraphViewFrom(CreateInputsFrom({convLayer}), + CreateOutputsFrom({convLayer}), + {convLayer}); +} + +// Creates a subgraph with some unoptimizable layers ("unoptimizable" is added to the layer's name) +SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph1(Graph& graph, LayerNameToLayerMap& layersInGraph) +{ + const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0); + const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0); + + Convolution2dDescriptor convolutionDescriptor; + convolutionDescriptor.m_StrideX = 1; + convolutionDescriptor.m_StrideY = 1; + convolutionDescriptor.m_BiasEnabled = true; + convolutionDescriptor.m_DataLayout = DataLayout::NHWC; + + // Construct the graph + Layer* const inputLayer = AddInputLayer(graph, "input layer", inputInfo); + Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv1 layer", weightInfo, biasInfo, outputInfo); + Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv2 layer unoptimizable", weightInfo, biasInfo, + outputInfo); + Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv3 layer", weightInfo, biasInfo, outputInfo); + Convolution2dLayer* const conv4Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv4 layer unoptimizable", weightInfo, biasInfo, + outputInfo); + Convolution2dLayer* const conv5Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv5 layer", weightInfo, biasInfo, outputInfo); + Layer* const outputLayer = AddOutputLayer(graph, "output layer"); + + // Connect the network + inputLayer->GetOutputSlot(0).Connect(conv1Layer->GetInputSlot(0)); + conv1Layer->GetOutputSlot(0).Connect(conv2Layer->GetInputSlot(0)); + conv2Layer->GetOutputSlot(0).Connect(conv3Layer->GetInputSlot(0)); + conv3Layer->GetOutputSlot(0).Connect(conv4Layer->GetInputSlot(0)); + conv4Layer->GetOutputSlot(0).Connect(conv5Layer->GetInputSlot(0)); + conv5Layer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + // Create the subgraph view for the whole network + return CreateSubgraphViewFrom(CreateInputsFrom({conv1Layer}), + CreateOutputsFrom({conv5Layer}), + {conv1Layer, + conv2Layer, + conv3Layer, + conv4Layer, + conv5Layer}); +} + +// Creates a subgraph with some input unoptimizable layers ("unoptimizable" is added to the layer's name), +// this is meant to test input slots coming from different layers +SubgraphView::SubgraphViewPtr BuildPartiallyOptimizableSubgraph2(Graph& graph, LayerNameToLayerMap& layersInGraph) +{ + const TensorInfo inputInfo ({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo outputInfo({ 1, 16, 16, 16 }, DataType::QuantisedAsymm8, 1.0f, 0); + const TensorInfo weightInfo({ 16, 1, 1, 16 }, DataType::QuantisedAsymm8, 0.9f, 0); + const TensorInfo biasInfo ({ 1, 1, 1, 16 }, DataType::Signed32, 0.9f, 0); + + Convolution2dDescriptor convolutionDescriptor; + convolutionDescriptor.m_StrideX = 1; + convolutionDescriptor.m_StrideY = 1; + convolutionDescriptor.m_BiasEnabled = true; + convolutionDescriptor.m_DataLayout = DataLayout::NHWC; + + // Construct the graph + Layer* const input1Layer = AddInputLayer(graph, "input1 layer", inputInfo, 0); + Layer* const input2Layer = AddInputLayer(graph, "input2 layer", inputInfo, 1); + Convolution2dLayer* const conv1Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv1 layer", weightInfo, biasInfo, outputInfo); + Convolution2dLayer* const conv2Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv2 layer unoptimizable", weightInfo, biasInfo, + outputInfo); + Convolution2dLayer* const conv3Layer = AddConvolutionLayer(graph, layersInGraph, convolutionDescriptor, + "conv3 layer", weightInfo, biasInfo, outputInfo); + AdditionLayer* const addLayer = AddAdditionaLayer(graph, layersInGraph, "add layer", outputInfo); + Layer* const outputLayer = AddOutputLayer(graph, "output layer"); + + // Connect the network + input1Layer->GetOutputSlot(0).Connect(conv1Layer->GetInputSlot(0)); + input2Layer->GetOutputSlot(0).Connect(conv2Layer->GetInputSlot(0)); + conv1Layer->GetOutputSlot(0).Connect(addLayer->GetInputSlot(0)); + conv2Layer->GetOutputSlot(0).Connect(conv3Layer->GetInputSlot(0)); + conv3Layer->GetOutputSlot(0).Connect(addLayer->GetInputSlot(1)); + addLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + // Create the subgraph view for the whole network + return CreateSubgraphViewFrom(CreateInputsFrom({conv1Layer, + conv2Layer}), + CreateOutputsFrom({addLayer}), + {conv1Layer, + conv2Layer, + conv3Layer, + addLayer}); +} + +// The input subgraph contains only a single unsupported layer (only convolutions are unsupported by the mock backend) +void FullyUnsupporteSubgraphTestImpl1() +{ + Graph graph; + LayerNameToLayerMap layersInGraph; + + // Create an unsupported subgraph + SubgraphView::SubgraphViewPtr subgraphPtr = BuildFullyUnsupportedSubgraph1(graph, layersInGraph); + BOOST_TEST((subgraphPtr != nullptr)); + + const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots(); + const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots(); + const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers(); + + BOOST_TEST(subgraphInputSlots.size() == 1); + BOOST_TEST(subgraphOutputSlots.size() == 1); + BOOST_TEST(subgraphLayers.size() == 1); + + BOOST_TEST(Contains(layersInGraph, "pooling layer")); + + // Create a mock backend object + auto backendObjPtr = CreateBackendObject(MockBackendId()); + BOOST_TEST((backendObjPtr != nullptr)); + + // Optimize the subgraph + OptimizationViews optimizationViews; + + // Check that the optimization is carried out correctly, but no optimization is performed + BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr)); + + // ======================================================================= + // The expected results are: + // - No substitutions + // - Exactly one failed subgraph, corresponding to the whole original one + // - No untouched subgraphs + // ======================================================================= + + // ----------------------- + // Check the substitutions + // ----------------------- + + BOOST_TEST(optimizationViews.GetSubstitutions().empty()); + + // -------------------------- + // Check the failed subgraphs + // -------------------------- + + const OptimizationViews::Subgraphs& failedSubgraphs = optimizationViews.GetFailedSubgraphs(); + BOOST_TEST(failedSubgraphs.size() == 1); + + CheckFailedSubgraph(failedSubgraphs.at(0), + { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() }, + subgraphInputSlots, + subgraphOutputSlots, + subgraphLayers); + + // ----------------------------- + // Check the untouched subgraphs + // ----------------------------- + + BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty()); +} + +// The input subgraph contains only unsupported layers (only convolutions are unsupported by the mock backend) +void FullyUnsupporteSubgraphTestImpl2() +{ + Graph graph; + LayerNameToLayerMap layersInGraph; + + // Create an unsupported subgraph + SubgraphView::SubgraphViewPtr subgraphPtr = BuildFullyUnsupportedSubgraph2(graph, layersInGraph); + BOOST_TEST((subgraphPtr != nullptr)); + + const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots(); + const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots(); + const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers(); + + BOOST_TEST(subgraphInputSlots.size() == 1); + BOOST_TEST(subgraphOutputSlots.size() == 1); + BOOST_TEST(subgraphLayers.size() == 3); + + BOOST_TEST(Contains(layersInGraph, "pooling1 layer")); + BOOST_TEST(Contains(layersInGraph, "pooling2 layer")); + BOOST_TEST(Contains(layersInGraph, "pooling3 layer")); + + // Create a mock backend object + auto backendObjPtr = CreateBackendObject(MockBackendId()); + BOOST_TEST((backendObjPtr != nullptr)); + + // Optimize the subgraph + OptimizationViews optimizationViews; + + // Check that the optimization is carried out correctly, but no optimization is performed + BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr)); + + // ======================================================================= + // The expected results are: + // - No substitutions + // - Exactly one failed subgraph, corresponding to the whole original one + // - No untouched subgraphs + // ======================================================================= + + // ----------------------- + // Check the substitutions + // ----------------------- + + BOOST_TEST(optimizationViews.GetSubstitutions().empty()); + + // -------------------------- + // Check the failed subgraphs + // -------------------------- + + const OptimizationViews::Subgraphs& failedSubgraphs = optimizationViews.GetFailedSubgraphs(); + BOOST_TEST(failedSubgraphs.size() == 1); + + std::vector expectedFailedLayers{ layersInGraph.at("pooling1 layer"), + layersInGraph.at("pooling2 layer"), + layersInGraph.at("pooling3 layer") }; + + const SubgraphView& failedSubgraph = failedSubgraphs.at(0); + + CheckFailedSubgraph(failedSubgraph, + { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() }, + subgraphInputSlots, + subgraphOutputSlots, + subgraphLayers); + + const SubgraphView::Layers& failedSubgraphLayers = failedSubgraph.GetLayers(); + + BOOST_TEST(failedSubgraphLayers.front() + 0, expectedFailedLayers.at(0)); + BOOST_TEST(failedSubgraphLayers.front() + 1, expectedFailedLayers.at(1)); + BOOST_TEST(failedSubgraphLayers.front() + 2, expectedFailedLayers.at(2)); + + // ----------------------------- + // Check the untouched subgraphs + // ----------------------------- + + BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty()); +} + +// A simple case with only one layer (convolution) to optimize, supported by the mock backend +void FullyOptimizableSubgraphTestImpl1() +{ + Graph graph; + LayerNameToLayerMap layersInGraph; + + // Create a fully optimizable subgraph + SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyOptimizableSubgraph1(graph, layersInGraph); + BOOST_TEST((subgraphPtr != nullptr)); + + const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots(); + const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots(); + const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers(); + + BOOST_TEST(subgraphInputSlots.size() == 1); + BOOST_TEST(subgraphOutputSlots.size() == 1); + BOOST_TEST(subgraphLayers.size() == 1); + + BOOST_TEST(Contains(layersInGraph, "conv layer")); + + // Create a mock backend object + auto backendObjPtr = CreateBackendObject(MockBackendId()); + BOOST_TEST((backendObjPtr != nullptr)); + + // Optimize the subgraph + OptimizationViews optimizationViews; + + // Check that the optimization is carried out correctly + BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr)); + + // =========================================================================================== + // The expected results are: + // - Exactly one substitution, mapping the whole input subgraph to a new replacement subgraph + // - No failed subgraphs + // - No untouched subgraphs + // =========================================================================================== + + // ----------------------- + // Check the substitutions + // ----------------------- + + const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions(); + BOOST_TEST(substitutions.size() == 1); + + CheckSubstitution(substitutions.at(0), + { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() }, + { subgraphInputSlots.size(), subgraphOutputSlots.size(), 1 }, + subgraphInputSlots, + subgraphOutputSlots, + subgraphLayers); + + // -------------------------- + // Check the failed subgraphs + // -------------------------- + + BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty()); + + // ----------------------------- + // Check the untouched subgraphs + // ----------------------------- + + BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty()); +} + +// A case with five layers (all convolutions) to optimize, all supported by the mock backend +void FullyOptimizableSubgraphTestImpl2() +{ + Graph graph; + LayerNameToLayerMap layersInGraph; + + // Create a fully optimizable subgraph + SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyOptimizableSubgraph2(graph, layersInGraph); + BOOST_TEST((subgraphPtr != nullptr)); + + const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots(); + const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots(); + const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers(); + + BOOST_TEST(subgraphPtr->GetInputSlots().size() == 1); + BOOST_TEST(subgraphPtr->GetOutputSlots().size() == 1); + BOOST_TEST(subgraphPtr->GetLayers().size() == 5); + + BOOST_TEST(Contains(layersInGraph, "conv1 layer")); + BOOST_TEST(Contains(layersInGraph, "conv2 layer")); + BOOST_TEST(Contains(layersInGraph, "conv3 layer")); + BOOST_TEST(Contains(layersInGraph, "conv4 layer")); + BOOST_TEST(Contains(layersInGraph, "conv5 layer")); + + // Create a mock backend object + auto backendObjPtr = CreateBackendObject(MockBackendId()); + BOOST_TEST((backendObjPtr != nullptr)); + + // Optimize the subgraph + OptimizationViews optimizationViews; + + // Check that the optimization is carried out correctly + BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr)); + + // =========================================================================================== + // The expected results are: + // - Exactly one substitution, mapping the whole input subgraph to a new replacement subgraph + // - No failed subgraphs + // - No untouched subgraphs + // =========================================================================================== + + // ----------------------- + // Check the substitutions + // ----------------------- + + const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions(); + BOOST_TEST(substitutions.size() == 1); + + std::list expectedSubstitutableLayers{ layersInGraph.at("conv1 layer"), + layersInGraph.at("conv2 layer"), + layersInGraph.at("conv3 layer"), + layersInGraph.at("conv4 layer"), + layersInGraph.at("conv5 layer") }; + + const OptimizationViews::SubstitutionPair& substitution = substitutions.at(0); + + CheckSubstitution(substitution, + { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() }, + { subgraphInputSlots.size(), subgraphOutputSlots.size(), 1 }, + subgraphInputSlots, + subgraphOutputSlots, + expectedSubstitutableLayers); + + const SubgraphView::Layers& substitutableSubgraphLayers = substitution.m_SubstitutableSubgraph.GetLayers(); + + BOOST_TEST(substitutableSubgraphLayers.front() + 0, expectedSubstitutableLayers.front() + 0); + BOOST_TEST(substitutableSubgraphLayers.front() + 1, expectedSubstitutableLayers.front() + 1); + BOOST_TEST(substitutableSubgraphLayers.front() + 2, expectedSubstitutableLayers.front() + 2); + BOOST_TEST(substitutableSubgraphLayers.front() + 3, expectedSubstitutableLayers.front() + 3); + BOOST_TEST(substitutableSubgraphLayers.front() + 4, expectedSubstitutableLayers.front() + 4); + + // -------------------------- + // Check the failed subgraphs + // -------------------------- + + BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty()); + + // ----------------------------- + // Check the untouched subgraphs + // ----------------------------- + + BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty()); +} + +// The input subgraph contaions both supported and unsupported layers +// (but only convolutions are unsupported by the mock backend) +void PartiallySupportedSubgraphTestImpl() +{ + Graph graph; + LayerNameToLayerMap layersInGraph; + + // Create a fully optimizable subgraph + SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallySupportedSubgraph(graph, layersInGraph); + BOOST_TEST((subgraphPtr != nullptr)); + + const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots(); + const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots(); + const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers(); + + BOOST_TEST(subgraphInputSlots.size() == 1); + BOOST_TEST(subgraphOutputSlots.size() == 1); + BOOST_TEST(subgraphLayers.size() == 5); + + BOOST_TEST(Contains(layersInGraph, "conv1 layer")); + BOOST_TEST(Contains(layersInGraph, "pooling1 layer")); + BOOST_TEST(Contains(layersInGraph, "pooling2 layer")); + BOOST_TEST(Contains(layersInGraph, "conv2 layer")); + BOOST_TEST(Contains(layersInGraph, "pooling3 layer")); + + // Create a mock backend object + auto backendObjPtr = CreateBackendObject(MockBackendId()); + BOOST_TEST((backendObjPtr != nullptr)); + + // Optimize the subgraph + OptimizationViews optimizationViews; + + // Check that the optimization is carried out correctly + BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr)); + + // ======================================================================== + // The expected results are: + // - Exactly two substitution, corresponding to the supported layers + // - Exactly two failed subgraphs, corresponding to the unsupported layers + // - No untouched subgraphs + // ======================================================================== + + // ----------------------- + // Check the substitutions + // ----------------------- + + const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions(); + BOOST_TEST(substitutions.size() == 2); + + std::vector expectedSubstitutableSubgraphSizes{ { 1, 1, 1 }, + { 1, 1, 1 } }; + std::vector expectedReplacementSubgraphSizes{ { 1, 1, 1 }, + { 1, 1, 1 } }; + std::vector expectedSubstitutableInputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetInputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer")->GetInputSlots()) + }; + std::vector expectedSubstitutableOutputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetOutputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer")->GetOutputSlots()) + }; + std::vector expectedSubstitutableLayers + { + { layersInGraph.at("conv1 layer") }, + { layersInGraph.at("conv2 layer") } + }; + + for (size_t substitutionIndex = 0; substitutionIndex < substitutions.size(); substitutionIndex++) + { + CheckSubstitution(substitutions.at(substitutionIndex), + expectedSubstitutableSubgraphSizes.at(substitutionIndex), + expectedReplacementSubgraphSizes.at(substitutionIndex), + expectedSubstitutableInputSlots.at(substitutionIndex), + expectedSubstitutableOutputSlots.at(substitutionIndex), + expectedSubstitutableLayers.at(substitutionIndex)); + } + + // -------------------------- + // Check the failed subgraphs + // -------------------------- + + const OptimizationViews::Subgraphs& failedSubgraphs = optimizationViews.GetFailedSubgraphs(); + BOOST_TEST(failedSubgraphs.size() == 2); + + std::vector expectedFailedSubgraphSizes{ { 1, 1, 2 }, + { 1, 1, 1 } }; + std::vector expectedFailedInputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("pooling1 layer")->GetInputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("pooling3 layer")->GetInputSlots()) + }; + std::vector expectedFailedOutputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("pooling2 layer")->GetOutputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("pooling3 layer")->GetOutputSlots()) + }; + std::vector expectedFailedLayers + { + { layersInGraph.at("pooling1 layer"), + layersInGraph.at("pooling2 layer") }, + { layersInGraph.at("pooling3 layer") } + }; + + for (size_t failedIndex = 0; failedIndex < failedSubgraphs.size(); failedIndex++) + { + CheckFailedSubgraph(failedSubgraphs.at(failedIndex), + expectedFailedSubgraphSizes.at(failedIndex), + expectedFailedInputSlots.at(failedIndex), + expectedFailedOutputSlots.at(failedIndex), + expectedFailedLayers.at(failedIndex)); + } + + // ----------------------------- + // Check the untouched subgraphs + // ----------------------------- + + BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty()); +} + +// The input subgraph contains only unoptimizable layers ("unoptimizable" is added to the layer's name) +void FullyUnoptimizableSubgraphTestImpl1() +{ + Graph graph; + LayerNameToLayerMap layersInGraph; + + // Create a fully optimizable subgraph + SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyUnoptimizableSubgraph1(graph, layersInGraph); + BOOST_TEST((subgraphPtr != nullptr)); + + const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots(); + const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots(); + const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers(); + + BOOST_TEST(subgraphInputSlots.size() == 1); + BOOST_TEST(subgraphOutputSlots.size() == 1); + BOOST_TEST(subgraphLayers.size() == 1); + + BOOST_TEST(Contains(layersInGraph, "conv layer unoptimizable")); + + // Create a mock backend object + auto backendObjPtr = CreateBackendObject(MockBackendId()); + BOOST_TEST((backendObjPtr != nullptr)); + + // Optimize the subgraph + OptimizationViews optimizationViews; + + // Check that the optimization is carried out correctly + BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr)); + + // ============================================================================ + // The expected results are: + // - No substitutions + // - No failed subgraphs + // - Exactly one untouched subgraph, corresponding to the whole input subgraph + // ============================================================================ + + // ----------------------- + // Check the substitutions + // ----------------------- + + BOOST_TEST(optimizationViews.GetSubstitutions().empty()); + + // -------------------------- + // Check the failed subgraphs + // -------------------------- + + BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty()); + + // ----------------------------- + // Check the untouched subgraphs + // ----------------------------- + + const OptimizationViews::Subgraphs& untouchedSubgraphs = optimizationViews.GetUntouchedSubgraphs(); + BOOST_TEST(untouchedSubgraphs.size() == 1); + + CheckUntouchedSubgraph(untouchedSubgraphs.at(0), + { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() }, + subgraphInputSlots, + subgraphOutputSlots, + subgraphLayers); +} + +// The input subgraph contains some unoptimizable layers ("unoptimizable" is added to the layer's name) +void PartiallyOptimizableSubgraphTestImpl1() +{ + Graph graph; + LayerNameToLayerMap layersInGraph; + + // Create a fully optimizable subgraph + SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallyOptimizableSubgraph1(graph, layersInGraph); + BOOST_TEST((subgraphPtr != nullptr)); + + const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots(); + const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots(); + const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers(); + + BOOST_TEST(subgraphInputSlots.size() == 1); + BOOST_TEST(subgraphOutputSlots.size() == 1); + BOOST_TEST(subgraphLayers.size() == 5); + + BOOST_TEST(Contains(layersInGraph, "conv1 layer")); + BOOST_TEST(Contains(layersInGraph, "conv2 layer unoptimizable")); + BOOST_TEST(Contains(layersInGraph, "conv3 layer")); + BOOST_TEST(Contains(layersInGraph, "conv4 layer unoptimizable")); + BOOST_TEST(Contains(layersInGraph, "conv5 layer")); + + // Create a mock backend object + auto backendObjPtr = CreateBackendObject(MockBackendId()); + BOOST_TEST((backendObjPtr != nullptr)); + + // Optimize the subgraph + OptimizationViews optimizationViews; + + // Check that the optimization is carried out correctly + BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr)); + + // =============================================================================== + // The expected results are: + // - Exactly three substitutions, corresponding to the optimizable layers + // - No failed subgraphs + // - Exactly two untouched subgraphs, corresponding to the non-optimizable layers + // =============================================================================== + + // ----------------------- + // Check the substitutions + // ----------------------- + + const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions(); + BOOST_TEST(substitutions.size() == 3); + + std::vector expectedSubstitutableSubgraphSizes{ { 1, 1, 1 }, + { 1, 1, 1 }, + { 1, 1, 1 } }; + std::vector expectedReplacementSubgraphSizes{ { 1, 1, 1 }, + { 1, 1, 1 }, + { 1, 1, 1 } }; + std::vector expectedSubstitutableInputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetInputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("conv3 layer")->GetInputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("conv5 layer")->GetInputSlots()) + }; + std::vector expectedSubstitutableOutputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetOutputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("conv3 layer")->GetOutputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("conv5 layer")->GetOutputSlots()) + }; + std::vector expectedSubstitutableLayers + { + { layersInGraph.at("conv1 layer") }, + { layersInGraph.at("conv3 layer") }, + { layersInGraph.at("conv5 layer") } + }; + + for (size_t substitutionIndex = 0; substitutionIndex < substitutions.size(); substitutionIndex++) + { + CheckSubstitution(substitutions.at(substitutionIndex), + expectedSubstitutableSubgraphSizes.at(substitutionIndex), + expectedReplacementSubgraphSizes.at(substitutionIndex), + expectedSubstitutableInputSlots.at(substitutionIndex), + expectedSubstitutableOutputSlots.at(substitutionIndex), + expectedSubstitutableLayers.at(substitutionIndex)); + } + + // -------------------------- + // Check the failed subgraphs + // -------------------------- + + BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty()); + + // ----------------------------- + // Check the untouched subgraphs + // ----------------------------- + + const OptimizationViews::Subgraphs& untouchedSubgraphs = optimizationViews.GetUntouchedSubgraphs(); + BOOST_TEST(untouchedSubgraphs.size() == 2); + + std::vector expectedUntouchedSubgraphSizes{ { 1, 1, 1 }, + { 1, 1, 1 } }; + std::vector expectedUntouchedInputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetInputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("conv4 layer unoptimizable")->GetInputSlots()) + }; + std::vector expectedUntouchedOutputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetOutputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("conv4 layer unoptimizable")->GetOutputSlots()) + }; + std::vector expectedUntouchedLayers + { + { layersInGraph.at("conv2 layer unoptimizable") }, + { layersInGraph.at("conv4 layer unoptimizable") } + }; + + for (size_t untouchedIndex = 0; untouchedIndex < untouchedSubgraphs.size(); untouchedIndex++) + { + CheckUntouchedSubgraph(untouchedSubgraphs.at(untouchedIndex), + expectedUntouchedSubgraphSizes.at(untouchedIndex), + expectedUntouchedInputSlots.at(untouchedIndex), + expectedUntouchedOutputSlots.at(untouchedIndex), + expectedUntouchedLayers.at(untouchedIndex)); + } +} + +// The input subgraph contains some unoptimizable layers ("unoptimizable" is added to the layer's name), +// this is meant to test input slots coming from different layers +void PartiallyOptimizableSubgraphTestImpl2() +{ + Graph graph; + LayerNameToLayerMap layersInGraph; + + // Create a fully optimizable subgraph + SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallyOptimizableSubgraph2(graph, layersInGraph); + BOOST_TEST((subgraphPtr != nullptr)); + + const SubgraphView::InputSlots& subgraphInputSlots = subgraphPtr->GetInputSlots(); + const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots(); + const SubgraphView::Layers& subgraphLayers = subgraphPtr->GetLayers(); + + BOOST_TEST(subgraphInputSlots.size() == 2); + BOOST_TEST(subgraphOutputSlots.size() == 1); + BOOST_TEST(subgraphLayers.size() == 4); + + BOOST_TEST(Contains(layersInGraph, "conv1 layer")); + BOOST_TEST(Contains(layersInGraph, "conv2 layer unoptimizable")); + BOOST_TEST(Contains(layersInGraph, "conv3 layer")); + BOOST_TEST(Contains(layersInGraph, "add layer")); + + // Create a mock backend object + auto backendObjPtr = CreateBackendObject(MockBackendId()); + BOOST_TEST((backendObjPtr != nullptr)); + + // Optimize the subgraph + OptimizationViews optimizationViews; + + // Check that the optimization is carried out correctly + BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr)); + + // ============================================================================== + // The expected results are: + // - Exactly one substitution, corresponding to the optimizable layers + // - No failed subgraphs + // - Exactly two untouched subgraphs, corresponding to the non-optimizable layer + // ============================================================================== + + // ----------------------- + // Check the substitutions + // ----------------------- + + const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions(); + BOOST_TEST(substitutions.size() == 2); + + std::vector expectedSubstitutableSubgraphSizes{ { 1, 1, 1 }, + { 2, 1, 2 } }; + std::vector expectedReplacementSubgraphSizes{ { 1, 1, 1 }, + { 2, 1, 1 } }; + + SubgraphView::InputSlots expectedSubstitutableSubgraph2InputSlots = + ConvertReferenceTypeToPointerType(layersInGraph.at("conv3 layer")->GetInputSlots()); + expectedSubstitutableSubgraph2InputSlots.push_back( + ConvertReferenceTypeToPointerType(layersInGraph.at("add layer")->GetInputSlot(0))); + + std::vector expectedSubstitutableInputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetInputSlots()), + expectedSubstitutableSubgraph2InputSlots + }; + std::vector expectedSubstitutableOutputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv1 layer")->GetOutputSlots()), + ConvertReferenceTypeToPointerType(layersInGraph.at("add layer")->GetOutputSlots()) + }; + std::vector expectedSubstitutableLayers + { + { layersInGraph.at("conv1 layer") }, + { layersInGraph.at("conv3 layer"), + layersInGraph.at("add layer") } + }; + + for (size_t substitutionIndex = 0; substitutionIndex < substitutions.size(); substitutionIndex++) + { + CheckSubstitution(substitutions.at(substitutionIndex), + expectedSubstitutableSubgraphSizes.at(substitutionIndex), + expectedReplacementSubgraphSizes.at(substitutionIndex), + expectedSubstitutableInputSlots.at(substitutionIndex), + expectedSubstitutableOutputSlots.at(substitutionIndex), + expectedSubstitutableLayers.at(substitutionIndex)); + } + + // -------------------------- + // Check the failed subgraphs + // -------------------------- + + BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty()); + + // ----------------------------- + // Check the untouched subgraphs + // ----------------------------- + + const OptimizationViews::Subgraphs& untouchedSubgraphs = optimizationViews.GetUntouchedSubgraphs(); + BOOST_TEST(untouchedSubgraphs.size() == 1); + + std::vector expectedUntouchedSubgraphSizes{ { 1, 1, 1 } }; + std::vector expectedUntouchedInputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetInputSlots()) + }; + std::vector expectedUntouchedOutputSlots + { + ConvertReferenceTypeToPointerType(layersInGraph.at("conv2 layer unoptimizable")->GetOutputSlots()) + }; + std::vector expectedUntouchedLayers + { + { layersInGraph.at("conv2 layer unoptimizable") } + }; + + for (size_t untouchedIndex = 0; untouchedIndex < untouchedSubgraphs.size(); untouchedIndex++) + { + CheckUntouchedSubgraph(untouchedSubgraphs.at(untouchedIndex), + expectedUntouchedSubgraphSizes.at(untouchedIndex), + expectedUntouchedInputSlots.at(untouchedIndex), + expectedUntouchedOutputSlots.at(untouchedIndex), + expectedUntouchedLayers.at(untouchedIndex)); + } +} + +} // Anonymous namespace + +BOOST_AUTO_TEST_SUITE(OptimizeSubGraph) + +BOOST_AUTO_TEST_CASE(FullyUnsupportedSubgraph1) { FullyUnsupporteSubgraphTestImpl1(); } +BOOST_AUTO_TEST_CASE(FullyUnsupportedSubgraph2) { FullyUnsupporteSubgraphTestImpl2(); } +BOOST_AUTO_TEST_CASE(FullyOptimizableSubgraph1) { FullyOptimizableSubgraphTestImpl1(); } +BOOST_AUTO_TEST_CASE(FullyOptimizableSubgraph2) { FullyOptimizableSubgraphTestImpl2(); } +BOOST_AUTO_TEST_CASE(PartiallySupportedSubgraph) { PartiallySupportedSubgraphTestImpl(); } +BOOST_AUTO_TEST_CASE(FullyUnoptimizableSubgraph) { FullyUnoptimizableSubgraphTestImpl1(); } +BOOST_AUTO_TEST_CASE(PartiallyOptimizableSubgraph1) { PartiallyOptimizableSubgraphTestImpl1(); } +BOOST_AUTO_TEST_CASE(PartiallyOptimizableSubgraph2) { PartiallyOptimizableSubgraphTestImpl2(); } + +BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1