diff options
Diffstat (limited to 'src/armnn')
-rw-r--r-- | src/armnn/InternalTypes.cpp | 1 | ||||
-rw-r--r-- | src/armnn/InternalTypes.hpp | 1 | ||||
-rw-r--r-- | src/armnn/LayerSupport.cpp | 10 | ||||
-rw-r--r-- | src/armnn/LayersFwd.hpp | 2 | ||||
-rw-r--r-- | src/armnn/Network.cpp | 5 | ||||
-rw-r--r-- | src/armnn/Network.hpp | 2 | ||||
-rw-r--r-- | src/armnn/layers/MergeLayer.cpp | 65 | ||||
-rw-r--r-- | src/armnn/layers/MergeLayer.hpp | 47 | ||||
-rw-r--r-- | src/armnn/test/NetworkTests.cpp | 52 |
9 files changed, 185 insertions, 0 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index fe1542b162..93a4f94378 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -39,6 +39,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Maximum: return "Maximum"; case LayerType::Mean: return "Mean"; case LayerType::MemCopy: return "MemCopy"; + case LayerType::Merge: return "Merge"; case LayerType::Merger: return "Merger"; case LayerType::Minimum: return "Minimum"; case LayerType::Multiplication: return "Multiplication"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 1972e9c1b5..7c7c601d95 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -39,6 +39,7 @@ enum class LayerType Maximum, Mean, MemCopy, + Merge, Merger, Minimum, Multiplication, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index 030973306f..bc6eec891b 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -355,6 +355,16 @@ bool IsMemCopySupported(const BackendId &backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsMemCopySupported, input, output); } +bool IsMergeSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergeSupported, input0, input1, output); +} + bool IsMergerSupported(const BackendId& backend, std::vector<const TensorInfo*> inputs, const TensorInfo& output, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 9d87aeeee3..0bd68e04af 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -31,6 +31,7 @@ #include "layers/MaximumLayer.hpp" #include "layers/MeanLayer.hpp" #include "layers/MemCopyLayer.hpp" +#include "layers/MergeLayer.hpp" #include "layers/MergerLayer.hpp" #include "layers/MinimumLayer.hpp" #include "layers/MultiplicationLayer.hpp" @@ -102,6 +103,7 @@ DECLARE_LAYER(Lstm) DECLARE_LAYER(Maximum) DECLARE_LAYER(Mean) DECLARE_LAYER(MemCopy) +DECLARE_LAYER(Merge) DECLARE_LAYER(Merger) DECLARE_LAYER(Minimum) DECLARE_LAYER(Multiplication) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 6dbd4611df..73db2e88d7 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -966,6 +966,11 @@ IConnectableLayer* Network::AddGatherLayer(const char* name) return m_Graph->AddLayer<GatherLayer>(name); } +IConnectableLayer* Network::AddMergeLayer(const char* name) +{ + return m_Graph->AddLayer<MergeLayer>(name); +} + void Network::Accept(ILayerVisitor& visitor) const { for (auto layer : GetGraph()) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 782531acde..bb7b9eb6f4 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -174,6 +174,8 @@ public: IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) override; + IConnectableLayer* AddMergeLayer(const char* name = nullptr) override; + void Accept(ILayerVisitor& visitor) const override; private: diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp new file mode 100644 index 0000000000..1d4dc49379 --- /dev/null +++ b/src/armnn/layers/MergeLayer.cpp @@ -0,0 +1,65 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "MergeLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +namespace armnn +{ + +MergeLayer::MergeLayer(const char* name) + : Layer(2, 1, LayerType::Merge, name) +{} + +std::unique_ptr<IWorkload> MergeLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + return nullptr; +} + +MergeLayer* MergeLayer::Clone(Graph& graph) const +{ + return CloneBase<MergeLayer>(graph, GetName()); +} + +void MergeLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(2, CHECK_LOCATION()); + + std::vector<TensorShape> inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(), + }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "MergeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const +{ + BOOST_ASSERT(inputShapes.size() == 2); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "MergeLayer: TensorShapes set on inputs do not match", + inputShapes[0], + inputShapes[1] + ); + + return {inputShapes[0]}; +} + +void MergeLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitMergeLayer(this, GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp new file mode 100644 index 0000000000..66664ca952 --- /dev/null +++ b/src/armnn/layers/MergeLayer.hpp @@ -0,0 +1,47 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "Layer.hpp" + +namespace armnn +{ + +/// This layer dequantizes the input tensor. +class MergeLayer : public Layer +{ +public: + /// Makes a workload for the Merge type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + MergeLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref MergeLayer. + void ValidateTensorShapesFromInputs() override; + + /// Infers the output shapes from given input shapes. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a MergeLayer. + /// @param [in] name Optional name for the layer. + MergeLayer(const char* name); + + /// Default destructor + ~MergeLayer() = default; +}; + +} // namespace armnn diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp index 4de09a2804..dd8eb7773f 100644 --- a/src/armnn/test/NetworkTests.cpp +++ b/src/armnn/test/NetworkTests.cpp @@ -417,4 +417,56 @@ BOOST_AUTO_TEST_CASE(Network_AddQuantize) } +BOOST_AUTO_TEST_CASE(Network_AddMerge) +{ + struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy> + { + void VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name) override + { + m_Visited = true; + + BOOST_TEST(layer); + + std::string expectedName = std::string("merge"); + BOOST_TEST(std::string(layer->GetName()) == expectedName); + BOOST_TEST(std::string(name) == expectedName); + + BOOST_TEST(layer->GetNumInputSlots() == 2); + BOOST_TEST(layer->GetNumOutputSlots() == 1); + + const armnn::TensorInfo& infoIn0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo(); + BOOST_TEST((infoIn0.GetDataType() == armnn::DataType::Float32)); + + const armnn::TensorInfo& infoIn1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo(); + BOOST_TEST((infoIn1.GetDataType() == armnn::DataType::Float32)); + + const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo(); + BOOST_TEST((infoOut.GetDataType() == armnn::DataType::Float32)); + } + + bool m_Visited = false; + }; + + armnn::INetworkPtr network = armnn::INetwork::Create(); + + armnn::IConnectableLayer* input0 = network->AddInputLayer(0); + armnn::IConnectableLayer* input1 = network->AddInputLayer(1); + armnn::IConnectableLayer* merge = network->AddMergeLayer("merge"); + armnn::IConnectableLayer* output = network->AddOutputLayer(0); + + input0->GetOutputSlot(0).Connect(merge->GetInputSlot(0)); + input1->GetOutputSlot(0).Connect(merge->GetInputSlot(1)); + merge->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + const armnn::TensorInfo info({3,1}, armnn::DataType::Float32); + input0->GetOutputSlot(0).SetTensorInfo(info); + input1->GetOutputSlot(0).SetTensorInfo(info); + merge->GetOutputSlot(0).SetTensorInfo(info); + + Test testMerge; + network->Accept(testMerge); + + BOOST_TEST(testMerge.m_Visited == true); +} + BOOST_AUTO_TEST_SUITE_END() |