aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Jackson <matthew.jackson@arm.com>2019-07-04 14:59:16 +0100
committerÁron Virginás-Tar <aron.virginas-tar@arm.com>2019-07-10 12:06:51 +0000
commit2b8c1da565871b3e69567c2cfc46c8dcbef301aa (patch)
tree682327de212e273405cb257028568db997644c35
parentad5293a86e315049de36afd723dcd1a7e70681a7 (diff)
downloadarmnn-2b8c1da565871b3e69567c2cfc46c8dcbef301aa.tar.gz
IVGCVSW-3418 Add Arm NN front end support for the new Stack layer
* Added new StackLayer class * Made necessary changes to Descriptors, ILayerSupport, ILayerVisitor, etc. * Added unit tests Signed-off-by: Matthew Jackson <matthew.jackson@arm.com> Change-Id: Ieb97a928a342ffe1901c6058eb895711c358fd3d
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--include/armnn/Descriptors.hpp23
-rw-r--r--include/armnn/DescriptorsFwd.hpp1
-rw-r--r--include/armnn/ILayerSupport.hpp5
-rw-r--r--include/armnn/ILayerVisitor.hpp8
-rw-r--r--include/armnn/INetwork.hpp7
-rw-r--r--include/armnn/LayerSupport.hpp8
-rw-r--r--include/armnn/LayerVisitorBase.hpp4
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp6
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/StackLayer.cpp98
-rw-r--r--src/armnn/layers/StackLayer.hpp49
-rw-r--r--src/armnn/test/InferOutputTests.cpp6
-rw-r--r--src/armnn/test/InferOutputTests.hpp154
-rw-r--r--src/armnnSerializer/Serializer.cpp7
-rw-r--r--src/armnnSerializer/Serializer.hpp4
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp8
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp85
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp6
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp33
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp3
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
26 files changed, 531 insertions, 0 deletions
diff --git a/Android.mk b/Android.mk
index e2daeea37b..b9cc6e50e2 100644
--- a/Android.mk
+++ b/Android.mk
@@ -132,6 +132,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/SpaceToDepthLayer.cpp \
src/armnn/layers/SoftmaxLayer.cpp \
src/armnn/layers/SplitterLayer.cpp \
+ src/armnn/layers/StackLayer.cpp \
src/armnn/layers/StridedSliceLayer.cpp \
src/armnn/layers/SubtractionLayer.cpp \
src/armnn/layers/SwitchLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index dc34b1aa92..7c163dc3ef 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -312,6 +312,8 @@ list(APPEND armnn_sources
src/armnn/layers/SpaceToDepthLayer.cpp
src/armnn/layers/SplitterLayer.hpp
src/armnn/layers/SplitterLayer.cpp
+ src/armnn/layers/StackLayer.hpp
+ src/armnn/layers/StackLayer.cpp
src/armnn/layers/StridedSliceLayer.cpp
src/armnn/layers/StridedSliceLayer.hpp
src/armnn/layers/SubtractionLayer.cpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index cb76615889..377f0705d7 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -648,6 +648,29 @@ struct PadDescriptor
float m_PadValue;
};
+/// A StackDescriptor for the StackLayer.
+struct StackDescriptor
+{
+ StackDescriptor()
+ : m_Axis(0)
+ , m_NumInputs(0)
+ , m_InputShape()
+ {}
+
+ StackDescriptor(uint32_t axis, uint32_t numInputs, const TensorShape& inputShape)
+ : m_Axis(axis)
+ , m_NumInputs(numInputs)
+ , m_InputShape(inputShape)
+ {}
+
+ /// 0-based axis along which to stack the input tensors.
+ uint32_t m_Axis;
+ /// Number of input tensors.
+ uint32_t m_NumInputs;
+ /// Required shape of all input tensors.
+ TensorShape m_InputShape;
+};
+
/// A StridedSliceDescriptor for the StridedSliceLayer.
struct StridedSliceDescriptor
{
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index 9627ddc7f1..eddf91f4ce 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -30,6 +30,7 @@ struct ResizeDescriptor;
struct SoftmaxDescriptor;
struct SpaceToBatchNdDescriptor;
struct SpaceToDepthDescriptor;
+struct StackDescriptor;
struct StridedSliceDescriptor;
struct TransposeConvolution2dDescriptor;
struct ViewsDescriptor;
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 53dd29d87e..3cc6eabe9f 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -271,6 +271,11 @@ public:
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsStackSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index 86cf4a33cf..6e5b5463ac 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -370,6 +370,14 @@ public:
const ViewsDescriptor& splitterDescriptor,
const char* name = nullptr) = 0;
+ /// Function a stack layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param stackDescriptor - Parameters for the stack operation.
+ /// @param name - Optional name for the layer.
+ virtual void VisitStackLayer(const IConnectableLayer* layer,
+ const StackDescriptor& stackDescriptor,
+ const char* name = nullptr) = 0;
+
/// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param stridedSliceDescriptor - Parameters for the strided slice operation.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index 1f1b51095b..9e88c9279d 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -451,6 +451,13 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) = 0;
+ /// Adds a stack layer to the network.
+ /// @param descriptor - Description of the stack layer.
+ /// @param name - Optional name for the layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
+ const char* name = nullptr) = 0;
+
virtual void Accept(ILayerVisitor& visitor) const = 0;
protected:
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 65f9d089ba..6a3f1774bd 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -360,6 +360,14 @@ bool IsSplitterSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsStackSupported(const BackendId& backend,
+ const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsStridedSliceSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index d657154b47..f107e9fb68 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -188,6 +188,10 @@ public:
const ViewsDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
+ void VisitStackLayer(const IConnectableLayer*,
+ const StackDescriptor&,
+ const char*) override { DefaultPolicy::Apply(__func__); }
+
void VisitStridedSliceLayer(const IConnectableLayer*,
const StridedSliceDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index b097265d81..bf095ac8a2 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -58,6 +58,7 @@ enum class LayerType
SpaceToBatchNd,
SpaceToDepth,
Splitter,
+ Stack,
StridedSlice,
Subtraction,
Switch,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 0f9633a58c..b3f7adc02c 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -50,6 +50,7 @@
#include "layers/SpaceToBatchNdLayer.hpp"
#include "layers/SpaceToDepthLayer.hpp"
#include "layers/SplitterLayer.hpp"
+#include "layers/StackLayer.hpp"
#include "layers/StridedSliceLayer.hpp"
#include "layers/SubtractionLayer.hpp"
#include "layers/SwitchLayer.hpp"
@@ -126,6 +127,7 @@ DECLARE_LAYER(Softmax)
DECLARE_LAYER(SpaceToBatchNd)
DECLARE_LAYER(SpaceToDepth)
DECLARE_LAYER(Splitter)
+DECLARE_LAYER(Stack)
DECLARE_LAYER(StridedSlice)
DECLARE_LAYER(Subtraction)
DECLARE_LAYER(Switch)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 3b7a1cf2b3..29493816a8 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1422,6 +1422,12 @@ IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvol
return layer;
}
+IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
+}
+
void Network::Accept(ILayerVisitor& visitor) const
{
for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 7fc5b651d0..8a99debb47 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -197,6 +197,9 @@ public:
const Optional<ConstTensor>& biases,
const char* name = nullptr) override;
+ IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
+ const char* name = nullptr) override;
+
void Accept(ILayerVisitor& visitor) const override;
private:
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
new file mode 100644
index 0000000000..59bc8d5a13
--- /dev/null
+++ b/src/armnn/layers/StackLayer.cpp
@@ -0,0 +1,98 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "StackLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <queue>
+
+namespace armnn
+{
+
+StackLayer::StackLayer(const StackDescriptor& param, const char* name)
+ : LayerWithParameters(param.m_NumInputs, 1, LayerType::Stack, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> StackLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+{
+ StackQueueDescriptor descriptor;
+ return factory.CreateStack(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+StackLayer* StackLayer::Clone(Graph& graph) const
+{
+ return CloneBase<StackLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ const TensorShape& inputShape = m_Param.m_InputShape;
+ const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
+ const unsigned int axis = m_Param.m_Axis;
+
+ BOOST_ASSERT(axis <= inputNumDimensions);
+
+ unsigned int dimensionSizes[inputNumDimensions + 1];
+ for (unsigned int i = 0; i < axis; ++i)
+ {
+ dimensionSizes[i] = inputShape[i];
+ }
+
+ dimensionSizes[axis] = m_Param.m_NumInputs;
+
+ for (unsigned int i = axis + 1; i < inputNumDimensions + 1; ++i)
+ {
+ dimensionSizes[i] = inputShape[i-1];
+ }
+
+ TensorShape targetShape = TensorShape(inputNumDimensions + 1, dimensionSizes);
+
+ return std::vector<TensorShape>({ targetShape });
+}
+
+void StackLayer::ValidateTensorShapesFromInputs()
+{
+ // Validates Stack layer.
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "StackLayer: Num Input Slots must match Num Inputs.",
+ m_Param.m_NumInputs,
+ GetNumInputSlots());
+
+ VerifyLayerConnections(m_Param.m_NumInputs, CHECK_LOCATION());
+
+ // Constructs and validates input shapes
+ std::vector<TensorShape> inputShapes;
+ for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
+ {
+ TensorShape inputShape = GetInputSlot(i).GetConnection()->GetTensorInfo().GetShape();
+ if (inputShape != m_Param.m_InputShape)
+ {
+ throw LayerValidationException("ConcatLayer: TensorShape set on InputSlot[" +
+ std::to_string(i) +
+ "] does not match defined input shape");
+ }
+ inputShapes.push_back(inputShape);
+ }
+
+ auto inferredShapes = InferOutputShapes(inputShapes);
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "StackLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void StackLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitStackLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn armnn
diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp
new file mode 100644
index 0000000000..6c845972d0
--- /dev/null
+++ b/src/armnn/layers/StackLayer.hpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a stack operation.
+class StackLayer : public LayerWithParameters<StackDescriptor>
+{
+public:
+ /// Makes a workload for the Stack type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ StackLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref StackLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a StackLayer.
+ /// @param [in] param StackDescriptor to configure the stack operation.
+ /// @param [in] name Optional name for the layer.
+ StackLayer(const StackDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~StackLayer() = default;
+};
+
+} // namespace
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
index 6ce56e9805..24ae8b263b 100644
--- a/src/armnn/test/InferOutputTests.cpp
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -25,4 +25,10 @@ ARMNN_SIMPLE_TEST_CASE(PreluInferOutputShapeNoMatch, PreluInferOut
ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsMatch, PreluValidateTensorShapesFromInputsMatchTest)
ARMNN_SIMPLE_TEST_CASE(PreluValidateTensorShapesFromInputsNoMatch, PreluValidateTensorShapesFromInputsNoMatchTest)
+// Stack
+ARMNN_SIMPLE_TEST_CASE(StackInferOutputShapeFromInputsMatch, StackInferOutputShapeFromInputsMatchTest)
+ARMNN_SIMPLE_TEST_CASE(StackInferOutputShapeFromInputsNoMatch, StackInferOutputShapeFromInputsNoMatchTest)
+ARMNN_SIMPLE_TEST_CASE(StackValidateTensorShapesFromInputsMatch, StackValidateTensorShapesFromInputsMatchTest)
+ARMNN_SIMPLE_TEST_CASE(StackValidateTensorShapesFromInputsNoMatch, StackValidateTensorShapesFromInputsNoMatchTest)
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index 6e5602a296..47eabd3cb0 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -13,6 +13,7 @@
#include <layers/BatchToSpaceNdLayer.hpp>
#include <layers/SpaceToDepthLayer.hpp>
#include <layers/PreluLayer.hpp>
+#include <layers/StackLayer.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/test/unit_test.hpp>
@@ -193,3 +194,156 @@ void PreluValidateTensorShapesFromInputsNoMatchTest()
// Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
}
+
+void StackInferOutputShapeImpl(const armnn::StackDescriptor descriptor,
+ const std::vector<armnn::TensorShape>& inputShapes,
+ std::vector<armnn::TensorShape>& outputShapes)
+{
+ armnn::Graph graph;
+ armnn::StackLayer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
+ outputShapes = stackLayer->InferOutputShapes(inputShapes);
+}
+
+void StackInferOutputShapeFromInputsMatchTest()
+{
+ armnn::Graph graph;
+
+ armnn::StackDescriptor descriptor;
+ descriptor.m_Axis = 1;
+ descriptor.m_NumInputs = 3;
+ descriptor.m_InputShape = armnn::TensorShape
+ (
+ { 4, 2 } // Defined input shape
+ );
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 4, 2 }, // Actual input shapes
+ { 4, 2 },
+ { 4, 2 }
+ };
+
+ std::vector<armnn::TensorShape> outputShapes;
+ BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+ armnn::TensorShape expectedOutputShape
+ (
+ { 4, 3, 2 }
+ );
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void StackInferOutputShapeFromInputsNoMatchTest()
+{
+ armnn::Graph graph;
+
+ armnn::StackDescriptor descriptor;
+ descriptor.m_Axis = 1;
+ descriptor.m_NumInputs = 3;
+ descriptor.m_InputShape = armnn::TensorShape
+ (
+ { 4, 2 } // Defined input shape
+ );
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 4, 2 }, // Actual input shapes
+ { 4, 5 }, // Incorrectly shaped input tensor
+ { 4, 2 }
+ };
+
+ // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
+ std::vector<armnn::TensorShape> outputShapes;
+ BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+ armnn::TensorShape expectedOutputShape
+ (
+ { 4, 3, 2 }
+ );
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void CreateStackLayerHelper(armnn::Graph& graph,
+ const armnn::StackDescriptor& descriptor,
+ const std::vector<armnn::TensorShape>& inputShapes,
+ const armnn::TensorShape& outputShape)
+{
+ // Creates the Stack layer
+ armnn::Layer* const stackLayer = graph.AddLayer<armnn::StackLayer>(descriptor, "stack");
+
+ // Creates extra layers
+ std::vector<armnn::Layer*> inputs;
+ for (unsigned int i=0; i<inputShapes.size(); ++i)
+ {
+ inputs.push_back(graph.AddLayer<armnn::InputLayer>(static_cast<int>(i), "input"));
+ }
+ armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+ // Connects up
+ std::vector<armnn::TensorInfo> inputTensorInfos;
+ for (unsigned int i=0; i<inputs.size(); ++i)
+ {
+ inputTensorInfos.push_back(armnn::TensorInfo(inputShapes[i], armnn::DataType::Float32));
+ }
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+ for (unsigned int i=0; i<inputs.size(); ++i)
+ {
+ Connect(inputs[i], stackLayer, inputTensorInfos[i], 0, i);
+ }
+ Connect(stackLayer, output, outputTensorInfo, 0, 0);
+}
+
+void StackValidateTensorShapesFromInputsMatchTest()
+{
+ armnn::Graph graph;
+
+ armnn::StackDescriptor descriptor;
+ descriptor.m_Axis = 0;
+ descriptor.m_NumInputs = 3;
+ descriptor.m_InputShape = armnn::TensorShape
+ (
+ { 2, 5 } // Defined input shape
+ );
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 2, 5 }, // Actual input shapes
+ { 2, 5 },
+ { 2, 5 }
+ };
+
+ // Creates the Stack layer
+ CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
+
+ // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
+ BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+}
+
+void StackValidateTensorShapesFromInputsNoMatchTest()
+{
+ armnn::Graph graph;
+
+ armnn::StackDescriptor descriptor;
+ descriptor.m_Axis = 0;
+ descriptor.m_NumInputs = 3;
+ descriptor.m_InputShape = armnn::TensorShape
+ (
+ { 2, 5 } // Defined input shape
+ );
+
+ const std::vector<armnn::TensorShape> inputShapes
+ {
+ { 2, 5 }, // Actual input shapes
+ { 2, 2 }, // Incorrectly shaped input tensor
+ { 2, 5 }
+ };
+
+ // Creates the Stack layer
+ CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
+
+ // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
+ BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
+}
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index c9719aad04..0a9e3353d7 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -925,6 +925,13 @@ void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer*
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
}
+void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
+ const armnn::StackDescriptor& stackDescriptor,
+ const char* name)
+{
+ throw UnimplementedException("SerializerVisitor::VisitStackLayer not yet implemented");
+}
+
void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 0383d10555..8404a7f093 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -202,6 +202,10 @@ public:
const armnn::ViewsDescriptor& viewsDescriptor,
const char* name = nullptr) override;
+ void VisitStackLayer(const armnn::IConnectableLayer* layer,
+ const armnn::StackDescriptor& stackDescriptor,
+ const char* name = nullptr) override;
+
void VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name = nullptr) override;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index ea22fac9ce..26b98a22b2 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -415,6 +415,14 @@ bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 36b8e77c38..dad07981fd 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -257,6 +257,11 @@ public:
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsStackSupported(const std::vector<const TensorInfo*> inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 324c1debc0..878602391c 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -582,6 +582,91 @@ void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
}
//---------------------------------------------------------------
+void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateNumOutputs(workloadInfo, "StackQueueDescriptor", 1);
+
+ if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Must have the defined number of input tensors.");
+ }
+
+ // All inputs must have the same shape, which is defined in parameters
+ const TensorShape& inputShape = m_Parameters.m_InputShape;
+ for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
+ {
+ if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: All input tensor shapes "
+ "must match the defined shape.");
+ }
+ }
+
+ // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
+ // since the output tensor has an additional dimension.
+ if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Axis may not be greater "
+ "than the number of input dimensions.");
+ }
+
+ // Output shape must be as inferred from the input shape
+ const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
+ for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
+ {
+ if (outputShape[i] != inputShape[i])
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+ "match shape inferred from input tensor.");
+ }
+ }
+
+ if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+ "match shape inferred from input tensor.");
+ }
+
+ for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
+ {
+ if (outputShape[i] != inputShape[i-1])
+ {
+ throw InvalidArgumentException("StackQueueDescriptor: Output tensor must "
+ "match shape inferred from input tensor.");
+ }
+ }
+
+ // Check the supported data types
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::Float16,
+ DataType::Boolean,
+ DataType::Signed32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+ supportedTypes,
+ "StackQueueDescriptor");
+
+ for (unsigned int i = 1; i < workloadInfo.m_InputTensorInfos.size(); ++i)
+ {
+ ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_InputTensorInfos[i],
+ "StackQueueDescriptor",
+ "InputTensor[0]",
+ "InputTensor[" + std::to_string(i) + "]");
+ }
+ ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_OutputTensorInfos[0],
+ "StackQueueDescriptor",
+ "InputTensor[0]",
+ "OutputTensor[0]");
+}
+
+//---------------------------------------------------------------
void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
ValidateNumInputs(workloadInfo, "FullyConnectedQueueDescriptor", 1);
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index d241f7b24f..f3d50699e6 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -110,6 +110,12 @@ struct ConcatQueueDescriptor : QueueDescriptorWithParameters<OriginsDescriptor>
// Deprecated. Use ConcatQueueDescriptor instead
using MergerQueueDescriptor = ConcatQueueDescriptor;
+// Stack layer workload data.
+struct StackQueueDescriptor : QueueDescriptorWithParameters<StackDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
// Activation layer workload data.
struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescriptor>
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 1c23e1774b..a24a325b2d 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -729,6 +729,33 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::Stack:
+ {
+ auto cLayer = boost::polymorphic_downcast<const StackLayer*>(&layer);
+
+ // Get vector of all inputs.
+ auto getTensorInfo = [&dataType](const InputSlot& slot)
+ {
+ return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
+ };
+ auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfo);
+ auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfo);
+ std::vector<TensorInfo> inputs(beginI, endI);
+
+ auto getTensorInfoPtr = [](const TensorInfo& info)
+ {
+ return &info;
+ };
+ auto beginPtr = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
+ auto endPtr = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+ std::vector<const TensorInfo*> inputPtrs(beginPtr, endPtr);
+
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject->IsStackSupported(inputPtrs, output, cLayer->GetParameters(), reason);
+
+ break;
+ }
case LayerType::StridedSlice:
{
auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
@@ -1130,6 +1157,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDep
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index e09640f760..749a258a9d 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -189,6 +189,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
+
virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index b02ab7b301..6aff7596b5 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -408,6 +408,8 @@ DECLARE_LAYER_POLICY_2_PARAM(SpaceToDepth)
DECLARE_LAYER_POLICY_2_PARAM(Splitter)
+DECLARE_LAYER_POLICY_2_PARAM(Stack)
+
DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
DECLARE_LAYER_POLICY_1_PARAM(Subtraction)