From 2b8c1da565871b3e69567c2cfc46c8dcbef301aa Mon Sep 17 00:00:00 2001 From: Matthew Jackson Date: Thu, 4 Jul 2019 14:59:16 +0100 Subject: IVGCVSW-3418 Add Arm NN front end support for the new Stack layer * Added new StackLayer class * Made necessary changes to Descriptors, ILayerSupport, ILayerVisitor, etc. * Added unit tests Signed-off-by: Matthew Jackson Change-Id: Ieb97a928a342ffe1901c6058eb895711c358fd3d --- Android.mk | 1 + CMakeLists.txt | 2 + include/armnn/Descriptors.hpp | 23 +++ include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/ILayerSupport.hpp | 5 + include/armnn/ILayerVisitor.hpp | 8 ++ include/armnn/INetwork.hpp | 7 + include/armnn/LayerSupport.hpp | 8 ++ include/armnn/LayerVisitorBase.hpp | 4 + src/armnn/InternalTypes.hpp | 1 + src/armnn/LayersFwd.hpp | 2 + src/armnn/Network.cpp | 6 + src/armnn/Network.hpp | 3 + src/armnn/layers/StackLayer.cpp | 98 +++++++++++++ src/armnn/layers/StackLayer.hpp | 49 +++++++ src/armnn/test/InferOutputTests.cpp | 6 + src/armnn/test/InferOutputTests.hpp | 154 +++++++++++++++++++++ src/armnnSerializer/Serializer.cpp | 7 + src/armnnSerializer/Serializer.hpp | 4 + src/backends/backendsCommon/LayerSupportBase.cpp | 8 ++ src/backends/backendsCommon/LayerSupportBase.hpp | 5 + src/backends/backendsCommon/WorkloadData.cpp | 85 ++++++++++++ src/backends/backendsCommon/WorkloadData.hpp | 6 + src/backends/backendsCommon/WorkloadFactory.cpp | 33 +++++ src/backends/backendsCommon/WorkloadFactory.hpp | 3 + .../test/IsLayerSupportedTestImpl.hpp | 2 + 26 files changed, 531 insertions(+) create mode 100644 src/armnn/layers/StackLayer.cpp create mode 100644 src/armnn/layers/StackLayer.hpp diff --git a/Android.mk b/Android.mk index e2daeea37b..b9cc6e50e2 100644 --- a/Android.mk +++ b/Android.mk @@ -132,6 +132,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/SpaceToDepthLayer.cpp \ src/armnn/layers/SoftmaxLayer.cpp \ src/armnn/layers/SplitterLayer.cpp \ + src/armnn/layers/StackLayer.cpp \ src/armnn/layers/StridedSliceLayer.cpp \ src/armnn/layers/SubtractionLayer.cpp \ src/armnn/layers/SwitchLayer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index dc34b1aa92..7c163dc3ef 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -312,6 +312,8 @@ list(APPEND armnn_sources src/armnn/layers/SpaceToDepthLayer.cpp src/armnn/layers/SplitterLayer.hpp src/armnn/layers/SplitterLayer.cpp + src/armnn/layers/StackLayer.hpp + src/armnn/layers/StackLayer.cpp src/armnn/layers/StridedSliceLayer.cpp src/armnn/layers/StridedSliceLayer.hpp src/armnn/layers/SubtractionLayer.cpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index cb76615889..377f0705d7 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -648,6 +648,29 @@ struct PadDescriptor float m_PadValue; }; +/// A StackDescriptor for the StackLayer. +struct StackDescriptor +{ + StackDescriptor() + : m_Axis(0) + , m_NumInputs(0) + , m_InputShape() + {} + + StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape) + : m_Axis(axis) + , m_NumInputs(numInputs) + , m_InputShape(inputShape) + {} + + /// 0-based axis along which to stack the input tensors. + uint32_t m_Axis; + /// Number of input tensors. + uint32_t m_NumInputs; + /// Required shape of all input tensors. + TensorShape m_InputShape; +}; + /// A StridedSliceDescriptor for the StridedSliceLayer. struct StridedSliceDescriptor { diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index 9627ddc7f1..eddf91f4ce 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -30,6 +30,7 @@ struct ResizeDescriptor; struct SoftmaxDescriptor; struct SpaceToBatchNdDescriptor; struct SpaceToDepthDescriptor; +struct StackDescriptor; struct StridedSliceDescriptor; struct TransposeConvolution2dDescriptor; struct ViewsDescriptor; diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index 53dd29d87e..3cc6eabe9f 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -271,6 +271,11 @@ public: const ViewsDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const = 0; + virtual bool IsStackSupported(const std::vector inputs, + const TensorInfo& output, + const StackDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const = 0; + virtual bool IsStridedSliceSupported(const TensorInfo& input, const TensorInfo& output, const StridedSliceDescriptor& descriptor, diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp index 86cf4a33cf..6e5b5463ac 100644 --- a/include/armnn/ILayerVisitor.hpp +++ b/include/armnn/ILayerVisitor.hpp @@ -370,6 +370,14 @@ public: const ViewsDescriptor& splitterDescriptor, const char* name = nullptr) = 0; + /// Function a stack layer should call back to when its Accept(ILayerVisitor&) function is invoked. + /// @param layer - pointer to the layer which is calling back to this visit function. + /// @param stackDescriptor - Parameters for the stack operation. + /// @param name - Optional name for the layer. + virtual void VisitStackLayer(const IConnectableLayer* layer, + const StackDescriptor& stackDescriptor, + const char* name = nullptr) = 0; + /// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. /// @param stridedSliceDescriptor - Parameters for the strided slice operation. diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 1f1b51095b..9e88c9279d 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -451,6 +451,13 @@ public: const Optional& biases, const char* name = nullptr) = 0; + /// Adds a stack layer to the network. + /// @param descriptor - Description of the stack layer. + /// @param name - Optional name for the layer. + /// @return - Interface for configuring the layer. + virtual IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor, + const char* name = nullptr) = 0; + virtual void Accept(ILayerVisitor& visitor) const = 0; protected: diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp index 65f9d089ba..6a3f1774bd 100644 --- a/include/armnn/LayerSupport.hpp +++ b/include/armnn/LayerSupport.hpp @@ -359,6 +359,14 @@ bool IsSplitterSupported(const BackendId& backend, char* reasonIfUnsupported = nullptr, size_t reasonIfUnsupportedMaxLength = 1024); +/// Deprecated in favor of IBackend and ILayerSupport interfaces +bool IsStackSupported(const BackendId& backend, + const std::vector inputs, + const TensorInfo& output, + const StackDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + /// Deprecated in favor of IBackend and ILayerSupport interfaces bool IsStridedSliceSupported(const BackendId& backend, const TensorInfo& input, diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp index d657154b47..f107e9fb68 100644 --- a/include/armnn/LayerVisitorBase.hpp +++ b/include/armnn/LayerVisitorBase.hpp @@ -188,6 +188,10 @@ public: const ViewsDescriptor&, const char*) override { DefaultPolicy::Apply(__func__); } + void VisitStackLayer(const IConnectableLayer*, + const StackDescriptor&, + const char*) override { DefaultPolicy::Apply(__func__); } + void VisitStridedSliceLayer(const IConnectableLayer*, const StridedSliceDescriptor&, const char*) override { DefaultPolicy::Apply(__func__); } diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index b097265d81..bf095ac8a2 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -58,6 +58,7 @@ enum class LayerType SpaceToBatchNd, SpaceToDepth, Splitter, + Stack, StridedSlice, Subtraction, Switch, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 0f9633a58c..b3f7adc02c 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -50,6 +50,7 @@ #include "layers/SpaceToBatchNdLayer.hpp" #include "layers/SpaceToDepthLayer.hpp" #include "layers/SplitterLayer.hpp" +#include "layers/StackLayer.hpp" #include "layers/StridedSliceLayer.hpp" #include "layers/SubtractionLayer.hpp" #include "layers/SwitchLayer.hpp" @@ -126,6 +127,7 @@ DECLARE_LAYER(Softmax) DECLARE_LAYER(SpaceToBatchNd) DECLARE_LAYER(SpaceToDepth) DECLARE_LAYER(Splitter) +DECLARE_LAYER(Stack) DECLARE_LAYER(StridedSlice) DECLARE_LAYER(Subtraction) DECLARE_LAYER(Switch) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 3b7a1cf2b3..29493816a8 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1422,6 +1422,12 @@ IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvol return layer; } +IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor, + const char* name) +{ + return m_Graph->AddLayer(stackDescriptor, name); +} + void Network::Accept(ILayerVisitor& visitor) const { for (auto layer : GetGraph()) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 7fc5b651d0..8a99debb47 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -197,6 +197,9 @@ public: const Optional& biases, const char* name = nullptr) override; + IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor, + const char* name = nullptr) override; + void Accept(ILayerVisitor& visitor) const override; private: diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp new file mode 100644 index 0000000000..59bc8d5a13 --- /dev/null +++ b/src/armnn/layers/StackLayer.cpp @@ -0,0 +1,98 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "StackLayer.hpp" +#include "LayerCloneBase.hpp" + +#include +#include +#include + +#include + +namespace armnn +{ + +StackLayer::StackLayer(const StackDescriptor& param, const char* name) + : LayerWithParameters(param.m_NumInputs, 1, LayerType::Stack, param, name) +{ +} + +std::unique_ptr StackLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const +{ + StackQueueDescriptor descriptor; + return factory.CreateStack(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +StackLayer* StackLayer::Clone(Graph& graph) const +{ + return CloneBase(graph, m_Param, GetName()); +} + +std::vector StackLayer::InferOutputShapes(const std::vector& inputShapes) const +{ + const TensorShape& inputShape = m_Param.m_InputShape; + const unsigned int inputNumDimensions = inputShape.GetNumDimensions(); + const unsigned int axis = m_Param.m_Axis; + + BOOST_ASSERT(axis <= inputNumDimensions); + + unsigned int dimensionSizes[inputNumDimensions + 1]; + for (unsigned int i = 0; i < axis; ++i) + { + dimensionSizes[i] = inputShape[i]; + } + + dimensionSizes[axis] = m_Param.m_NumInputs; + + for (unsigned int i = axis + 1; i < inputNumDimensions + 1; ++i) + { + dimensionSizes[i] = inputShape[i-1]; + } + + TensorShape targetShape = TensorShape(inputNumDimensions + 1, dimensionSizes); + + return std::vector({ targetShape }); +} + +void StackLayer::ValidateTensorShapesFromInputs() +{ + // Validates Stack layer. + ConditionalThrowIfNotEqual( + "StackLayer: Num Input Slots must match Num Inputs.", + m_Param.m_NumInputs, + GetNumInputSlots()); + + VerifyLayerConnections(m_Param.m_NumInputs, CHECK_LOCATION()); + + // Constructs and validates input shapes + std::vector inputShapes; + for (unsigned int i = 0; i < GetNumInputSlots(); ++i) + { + TensorShape inputShape = GetInputSlot(i).GetConnection()->GetTensorInfo().GetShape(); + if (inputShape != m_Param.m_InputShape) + { + throw LayerValidationException("ConcatLayer: TensorShape set on InputSlot[" + + std::to_string(i) + + "] does not match defined input shape"); + } + inputShapes.push_back(inputShape); + } + + auto inferredShapes = InferOutputShapes(inputShapes); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual( + "StackLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void StackLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitStackLayer(this, GetParameters(), GetName()); +} + +} // namespace armnn armnn diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp new file mode 100644 index 0000000000..6c845972d0 --- /dev/null +++ b/src/armnn/layers/StackLayer.hpp @@ -0,0 +1,49 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +/// This layer represents a stack operation. +class StackLayer : public LayerWithParameters +{ +public: + /// Makes a workload for the Stack type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptrCreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + StackLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref StackLayer. + void ValidateTensorShapesFromInputs() override; + + /// By default returns inputShapes if the number of inputs are equal to number of outputs, + /// otherwise infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector InferOutputShapes(const std::vector& inputShapes) const override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a StackLayer. + /// @param [in] param StackDescriptor to configure the stack operation. + /// @param [in] name Optional name for the layer. + StackLayer(const StackDescriptor& param, const char* name); + + /// Default destructor + ~StackLayer() = default; +}; + +} // namespace diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp index 6ce56e9805..24ae8b263b 100644 --- a/src/armnn/test/InferOutputTests.cpp +++ b/src/armnn/test/InferOutputTests.cpp @@ -25,4 +25,10 @@ ARMNN_SIMPLE_TEST_CASE(PreluInferOutputShapeNoMatch, PreluInferOut ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsMatch, PreluValidateTensorShapesFromInputsMatchTest) ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsNoMatch, PreluValidateTensorShapesFromInputsNoMatchTest) +// Stack +ARMNN_SIMPLE_TEST_CASE(StackInferOutputShapeFromInputsMatch, StackInferOutputShapeFromInputsMatchTest) +ARMNN_SIMPLE_TEST_CASE(StackInferOutputShapeFromInputsNoMatch, StackInferOutputShapeFromInputsNoMatchTest) +ARMNN_SIMPLE_TEST_CASE(StackValidateTensorShapesFromInputsMatch, StackValidateTensorShapesFromInputsMatchTest) +ARMNN_SIMPLE_TEST_CASE(StackValidateTensorShapesFromInputsNoMatch, StackValidateTensorShapesFromInputsNoMatchTest) + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp index 6e5602a296..47eabd3cb0 100644 --- a/src/armnn/test/InferOutputTests.hpp +++ b/src/armnn/test/InferOutputTests.hpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -193,3 +194,156 @@ void PreluValidateTensorShapesFromInputsNoMatchTest() // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException); } + +void StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor, + const std::vector& inputShapes, + std::vector& outputShapes) +{ + armnn::Graph graph; + armnn::StackLayer* const stackLayer = graph.AddLayer(descriptor, "stack"); + outputShapes = stackLayer->InferOutputShapes(inputShapes); +} + +void StackInferOutputShapeFromInputsMatchTest() +{ + armnn::Graph graph; + + armnn::StackDescriptor descriptor; + descriptor.m_Axis = 1; + descriptor.m_NumInputs = 3; + descriptor.m_InputShape = armnn::TensorShape + ( + { 4, 2 } // Defined input shape + ); + + const std::vector inputShapes + { + { 4, 2 }, // Actual input shapes + { 4, 2 }, + { 4, 2 } + }; + + std::vector outputShapes; + BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes)); + + armnn::TensorShape expectedOutputShape + ( + { 4, 3, 2 } + ); + BOOST_CHECK(outputShapes.size() == 1); + BOOST_CHECK(outputShapes[0] == expectedOutputShape); +} + +void StackInferOutputShapeFromInputsNoMatchTest() +{ + armnn::Graph graph; + + armnn::StackDescriptor descriptor; + descriptor.m_Axis = 1; + descriptor.m_NumInputs = 3; + descriptor.m_InputShape = armnn::TensorShape + ( + { 4, 2 } // Defined input shape + ); + + const std::vector inputShapes + { + { 4, 2 }, // Actual input shapes + { 4, 5 }, // Incorrectly shaped input tensor + { 4, 2 } + }; + + // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes + std::vector outputShapes; + BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes)); + + armnn::TensorShape expectedOutputShape + ( + { 4, 3, 2 } + ); + BOOST_CHECK(outputShapes.size() == 1); + BOOST_CHECK(outputShapes[0] == expectedOutputShape); +} + +void CreateStackLayerHelper(armnn::Graph& graph, + const armnn::StackDescriptor& descriptor, + const std::vector& inputShapes, + const armnn::TensorShape& outputShape) +{ + // Creates the Stack layer + armnn::Layer* const stackLayer = graph.AddLayer(descriptor, "stack"); + + // Creates extra layers + std::vector inputs; + for (unsigned int i=0; i(static_cast(i), "input")); + } + armnn::Layer* const output = graph.AddLayer(0, "output"); + + // Connects up + std::vector inputTensorInfos; + for (unsigned int i=0; i inputShapes + { + { 2, 5 }, // Actual input shapes + { 2, 5 }, + { 2, 5 } + }; + + // Creates the Stack layer + CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 }); + + // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs + BOOST_CHECK_NO_THROW(graph.InferTensorInfos()); +} + +void StackValidateTensorShapesFromInputsNoMatchTest() +{ + armnn::Graph graph; + + armnn::StackDescriptor descriptor; + descriptor.m_Axis = 0; + descriptor.m_NumInputs = 3; + descriptor.m_InputShape = armnn::TensorShape + ( + { 2, 5 } // Defined input shape + ); + + const std::vector inputShapes + { + { 2, 5 }, // Actual input shapes + { 2, 2 }, // Incorrectly shaped input tensor + { 2, 5 } + }; + + // Creates the Stack layer + CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 }); + + // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs + BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException); +} diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index c9719aad04..0a9e3353d7 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -925,6 +925,13 @@ void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer* CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer); } +void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer, + const armnn::StackDescriptor& stackDescriptor, + const char* name) +{ + throw UnimplementedException("SerializerVisitor::VisitStackLayer not yet implemented"); +} + void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer, const armnn::StridedSliceDescriptor& stridedSliceDescriptor, const char* name) diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 0383d10555..8404a7f093 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -202,6 +202,10 @@ public: const armnn::ViewsDescriptor& viewsDescriptor, const char* name = nullptr) override; + void VisitStackLayer(const armnn::IConnectableLayer* layer, + const armnn::StackDescriptor& stackDescriptor, + const char* name = nullptr) override; + void VisitStridedSliceLayer(const armnn::IConnectableLayer* layer, const armnn::StridedSliceDescriptor& stridedSliceDescriptor, const char* name = nullptr) override; diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index ea22fac9ce..26b98a22b2 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -415,6 +415,14 @@ bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsStackSupported(const std::vector inputs, + const TensorInfo& output, + const StackDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input, const TensorInfo& output, const StridedSliceDescriptor& descriptor, diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 36b8e77c38..dad07981fd 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -257,6 +257,11 @@ public: const ViewsDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsStackSupported(const std::vector inputs, + const TensorInfo& output, + const StackDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsStridedSliceSupported(const TensorInfo& input, const TensorInfo& output, const StridedSliceDescriptor& descriptor, diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 324c1debc0..878602391c 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -581,6 +581,91 @@ void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const "ConcatQueueDescriptor"); } +//--------------------------------------------------------------- +void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateNumOutputs(workloadInfo, "StackQueueDescriptor", 1); + + if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size()) + { + throw InvalidArgumentException("StackQueueDescriptor: Must have the defined number of input tensors."); + } + + // All inputs must have the same shape, which is defined in parameters + const TensorShape& inputShape = m_Parameters.m_InputShape; + for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i) + { + if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape) + { + throw InvalidArgumentException("StackQueueDescriptor: All input tensor shapes " + "must match the defined shape."); + } + } + + // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive), + // since the output tensor has an additional dimension. + if (m_Parameters.m_Axis > inputShape.GetNumDimensions()) + { + throw InvalidArgumentException("StackQueueDescriptor: Axis may not be greater " + "than the number of input dimensions."); + } + + // Output shape must be as inferred from the input shape + const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape(); + for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i) + { + if (outputShape[i] != inputShape[i]) + { + throw InvalidArgumentException("StackQueueDescriptor: Output tensor must " + "match shape inferred from input tensor."); + } + } + + if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs) + { + throw InvalidArgumentException("StackQueueDescriptor: Output tensor must " + "match shape inferred from input tensor."); + } + + for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i) + { + if (outputShape[i] != inputShape[i-1]) + { + throw InvalidArgumentException("StackQueueDescriptor: Output tensor must " + "match shape inferred from input tensor."); + } + } + + // Check the supported data types + std::vector supportedTypes = + { + DataType::Float32, + DataType::Float16, + DataType::Boolean, + DataType::Signed32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], + supportedTypes, + "StackQueueDescriptor"); + + for (unsigned int i = 1; i < workloadInfo.m_InputTensorInfos.size(); ++i) + { + ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_InputTensorInfos[i], + "StackQueueDescriptor", + "InputTensor[0]", + "InputTensor[" + std::to_string(i) + "]"); + } + ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_OutputTensorInfos[0], + "StackQueueDescriptor", + "InputTensor[0]", + "OutputTensor[0]"); +} + //--------------------------------------------------------------- void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index d241f7b24f..f3d50699e6 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -110,6 +110,12 @@ struct ConcatQueueDescriptor : QueueDescriptorWithParameters // Deprecated. Use ConcatQueueDescriptor instead using MergerQueueDescriptor = ConcatQueueDescriptor; +// Stack layer workload data. +struct StackQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + // Activation layer workload data. struct ActivationQueueDescriptor : QueueDescriptorWithParameters { diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 1c23e1774b..a24a325b2d 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -729,6 +729,33 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::Stack: + { + auto cLayer = boost::polymorphic_downcast(&layer); + + // Get vector of all inputs. + auto getTensorInfo = [&dataType](const InputSlot& slot) + { + return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType); + }; + auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo); + auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo); + std::vector inputs(beginI, endI); + + auto getTensorInfoPtr = [](const TensorInfo& info) + { + return &info; + }; + auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr); + auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr); + std::vector inputPtrs(beginPtr, endPtr); + + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + + result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason); + + break; + } case LayerType::StridedSlice: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -1130,6 +1157,12 @@ std::unique_ptr IWorkloadFactory::CreateSpaceToDepth(const SpaceToDep return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::unique_ptr(); +} + std::unique_ptr IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, const WorkloadInfo& Info) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index e09640f760..749a258a9d 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -189,6 +189,9 @@ public: virtual std::unique_ptr CreateSplitter(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr CreateStack(const StackQueueDescriptor& descriptor, + const WorkloadInfo& Info) const; + virtual std::unique_ptr CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor, const WorkloadInfo& Info) const; diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index b02ab7b301..6aff7596b5 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -408,6 +408,8 @@ DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth) DECLARE_LAYER_POLICY_2_PARAM(Splitter) +DECLARE_LAYER_POLICY_2_PARAM(Stack) + DECLARE_LAYER_POLICY_2_PARAM(StridedSlice) DECLARE_LAYER_POLICY_1_PARAM(Subtraction) -- cgit v1.2.1