diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2019-04-05 15:25:46 +0100 |
---|---|---|
committer | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2019-04-05 17:11:02 +0100 |
commit | eff363d58992fb6384053259f9e1ee773f8cd4df (patch) | |
tree | e0bce8c4694ee15e016951f9168afbf9b75a9c79 /src/armnn | |
parent | 1f88630874fe346cd0cca8d8e38e0fb96cc1a3f4 (diff) | |
download | armnn-eff363d58992fb6384053259f9e1ee773f8cd4df.tar.gz |
IVGCVSW-2914 Add Switch Layer and no-op factory method
Change-Id: I6a6ece708a49e8a97c83a3e7fec11c88af1e1cfa
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Diffstat (limited to 'src/armnn')
-rw-r--r-- | src/armnn/InternalTypes.cpp | 1 | ||||
-rw-r--r-- | src/armnn/InternalTypes.hpp | 3 | ||||
-rw-r--r-- | src/armnn/LayerSupport.cpp | 11 | ||||
-rw-r--r-- | src/armnn/LayersFwd.hpp | 2 | ||||
-rw-r--r-- | src/armnn/Network.cpp | 5 | ||||
-rw-r--r-- | src/armnn/Network.hpp | 2 | ||||
-rw-r--r-- | src/armnn/layers/SwitchLayer.cpp | 60 | ||||
-rw-r--r-- | src/armnn/layers/SwitchLayer.hpp | 42 |
8 files changed, 125 insertions, 1 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index 93a4f94378..a811706dfe 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -57,6 +57,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Splitter: return "Splitter"; case LayerType::StridedSlice: return "StridedSlice"; case LayerType::Subtraction: return "Subtraction"; + case LayerType::Switch: return "Switch"; default: BOOST_ASSERT_MSG(false, "Unknown layer type"); return "Unknown"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 7c7c601d95..5765b5bcf1 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -57,9 +57,10 @@ enum class LayerType SpaceToBatchNd, Splitter, StridedSlice, + Subtraction, // Last layer goes here. LastLayer, - Subtraction = LastLayer + Switch = LastLayer }; const char* GetLayerTypeAsCString(LayerType type); diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index bc6eec891b..320d9cef74 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -530,4 +530,15 @@ bool IsSubtractionSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsSubtractionSupported, input0, input1, output); } +bool IsSwitchSupported(const BackendId& backend, + const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output0, + const TensorInfo& output1, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsSwitchSupported, input0, input1, output0, output1); +} + } // namespace armnn diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 0bd68e04af..31cfa66896 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -50,6 +50,7 @@ #include "layers/SplitterLayer.hpp" #include "layers/StridedSliceLayer.hpp" #include "layers/SubtractionLayer.hpp" +#include "layers/SwitchLayer.hpp" namespace armnn { @@ -122,5 +123,6 @@ DECLARE_LAYER(SpaceToBatchNd) DECLARE_LAYER(Splitter) DECLARE_LAYER(StridedSlice) DECLARE_LAYER(Subtraction) +DECLARE_LAYER(Switch) } diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 73db2e88d7..c1462c090d 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -971,6 +971,11 @@ IConnectableLayer* Network::AddMergeLayer(const char* name) return m_Graph->AddLayer<MergeLayer>(name); } +IConnectableLayer* Network::AddSwitchLayer(const char* name) +{ + return m_Graph->AddLayer<SwitchLayer>(name); +} + void Network::Accept(ILayerVisitor& visitor) const { for (auto layer : GetGraph()) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index bb7b9eb6f4..660ca87d13 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -176,6 +176,8 @@ public: IConnectableLayer* AddMergeLayer(const char* name = nullptr) override; + IConnectableLayer* AddSwitchLayer(const char* name = nullptr) override; + void Accept(ILayerVisitor& visitor) const override; private: diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp new file mode 100644 index 0000000000..eae6e0dfe2 --- /dev/null +++ b/src/armnn/layers/SwitchLayer.cpp @@ -0,0 +1,60 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "SwitchLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +namespace armnn +{ + +SwitchLayer::SwitchLayer(const char* name) + : Layer(2, 2, LayerType::Switch, name) +{} + +std::unique_ptr<IWorkload> SwitchLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + SwitchQueueDescriptor descriptor; + return factory.CreateSwitch(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +SwitchLayer* SwitchLayer::Clone(Graph& graph) const +{ + return CloneBase<SwitchLayer>(graph, GetName()); +} + +void SwitchLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(2, CHECK_LOCATION()); + + BOOST_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs."); + + // Assuming first input is the Input and second input is the Constant + std::vector<TensorShape> inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(), + GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "SwitchLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "SwitchLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(1).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void SwitchLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitSwitchLayer(this, GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp new file mode 100644 index 0000000000..bfda8c2b1b --- /dev/null +++ b/src/armnn/layers/SwitchLayer.hpp @@ -0,0 +1,42 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "Layer.hpp" + +namespace armnn +{ + +/// This layer calculates both true and false outputs for input. +class SwitchLayer : public Layer +{ +public: + /// Makes a workload for the Switch type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + SwitchLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref SwitchLayer. + void ValidateTensorShapesFromInputs() override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a SwitchLayer. + /// @param [in] name Optional name for the layer. + SwitchLayer(const char* name); + + /// Default destructor + ~SwitchLayer() = default; +}; + +} // namespace armnn |