diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/armnn/InternalTypes.hpp | 1 | ||||
-rw-r--r-- | src/armnn/LayersFwd.hpp | 2 | ||||
-rw-r--r-- | src/armnn/Network.cpp | 6 | ||||
-rw-r--r-- | src/armnn/Network.hpp | 3 | ||||
-rw-r--r-- | src/armnn/layers/ResizeLayer.cpp | 76 | ||||
-rw-r--r-- | src/armnn/layers/ResizeLayer.hpp | 49 | ||||
-rw-r--r-- | src/armnnSerializer/Serializer.cpp | 7 | ||||
-rw-r--r-- | src/armnnSerializer/Serializer.hpp | 4 | ||||
-rw-r--r-- | src/backends/backendsCommon/LayerSupportBase.cpp | 8 | ||||
-rw-r--r-- | src/backends/backendsCommon/LayerSupportBase.hpp | 5 | ||||
-rw-r--r-- | src/backends/backendsCommon/WorkloadData.hpp | 5 | ||||
-rw-r--r-- | src/backends/backendsCommon/WorkloadFactory.cpp | 17 | ||||
-rw-r--r-- | src/backends/backendsCommon/WorkloadFactory.hpp | 3 | ||||
-rw-r--r-- | src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp | 2 |
14 files changed, 188 insertions, 0 deletions
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index dc3dc17c02..6c49eaca3a 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -53,6 +53,7 @@ enum class LayerType Quantize, Reshape, ResizeBilinear, + Resize, Rsqrt, Softmax, SpaceToBatchNd, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 9837cd349d..2e049ecbda 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -45,6 +45,7 @@ #include "layers/QuantizeLayer.hpp" #include "layers/ReshapeLayer.hpp" #include "layers/ResizeBilinearLayer.hpp" +#include "layers/ResizeLayer.hpp" #include "layers/RsqrtLayer.hpp" #include "layers/SoftmaxLayer.hpp" #include "layers/SpaceToBatchNdLayer.hpp" @@ -120,6 +121,7 @@ DECLARE_LAYER(PreCompiled) DECLARE_LAYER(Prelu) DECLARE_LAYER(Quantize) DECLARE_LAYER(Reshape) +DECLARE_LAYER(Resize) DECLARE_LAYER(ResizeBilinear) DECLARE_LAYER(Rsqrt) DECLARE_LAYER(Softmax) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 58ccfb7813..63432da0ff 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1188,6 +1188,12 @@ resizeDescriptor, const char* name) return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name); } +IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor& +resizeDescriptor, const char* name) +{ + return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor,name); +} + IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc, const char* name) { diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 8db968a3f9..f0dfb1dd07 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -137,6 +137,9 @@ public: IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc, const char* name = nullptr) override; + IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor, + const char* name = nullptr) override; + IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc, const char* name = nullptr) override; diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp new file mode 100644 index 0000000000..44b4d9df5f --- /dev/null +++ b/src/armnn/layers/ResizeLayer.cpp @@ -0,0 +1,76 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "ResizeLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include <armnn/TypesUtils.hpp> + +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +#include <DataLayoutIndexed.hpp> + +using namespace armnnUtils; + +namespace armnn +{ + +ResizeLayer::ResizeLayer(const ResizeDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Resize, param, name) +{ +} + +std::unique_ptr<IWorkload> ResizeLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + ResizeQueueDescriptor descriptor; + return factory.CreateResize(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +ResizeLayer* ResizeLayer::Clone(Graph& graph) const +{ + return CloneBase<ResizeLayer>(graph, m_Param, GetName()); +} + +std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const +{ + BOOST_ASSERT(inputShapes.size() == 1); + + const TensorShape& inputShape = inputShapes[0]; + const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout; + + unsigned int outWidth = m_Param.m_TargetWidth; + unsigned int outHeight = m_Param.m_TargetHeight; + unsigned int outChannels = inputShape[dimensionIndices.GetChannelsIndex()]; + unsigned int outBatch = inputShape[0]; + + TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ? + TensorShape( { outBatch, outHeight, outWidth, outChannels } ) : + TensorShape( { outBatch, outChannels, outHeight, outWidth }); + + return std::vector<TensorShape>({ tensorShape }); +} + +void ResizeLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "ResizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void ResizeLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitResizeLayer(this, GetParameters(), GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp new file mode 100644 index 0000000000..0d309ff49b --- /dev/null +++ b/src/armnn/layers/ResizeLayer.hpp @@ -0,0 +1,49 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +/// This layer represents a resize operation. +class ResizeLayer : public LayerWithParameters<ResizeDescriptor> +{ +public: + /// Makes a workload for the Resize type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + ResizeLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref ResizeLayer. + void ValidateTensorShapesFromInputs() override; + + /// By default returns inputShapes if the number of inputs are equal to number of outputs, + /// otherwise infers the output shapes from given input shapes and layer properties. + /// @param [in] inputShapes The input shapes layer has. + /// @return A vector to the inferred output shape. + std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a ResizeLayerLayer. + /// @param [in] param ResizeDescriptor to configure the resize operation. + /// @param [in] name Optional name for the layer. + ResizeLayer(const ResizeDescriptor& param, const char* name); + + /// Default destructor + ~ResizeLayer() = default; +}; + +} // namespace armnn diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 208262b699..2d5877db63 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -656,6 +656,13 @@ void SerializerVisitor::VisitResizeBilinearLayer(const armnn::IConnectableLayer* CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer); } +void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer, + const armnn::ResizeDescriptor& resizeDescriptor, + const char* name) +{ + throw armnn::Exception("SerializerVisitor::VisitResizeLayer is not yet implemented"); +} + void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name) { auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt); diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 31f7d05198..2529796b77 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -174,6 +174,10 @@ public: const armnn::ResizeBilinearDescriptor& resizeDescriptor, const char* name = nullptr) override; + void VisitResizeLayer(const armnn::IConnectableLayer* layer, + const armnn::ResizeDescriptor& resizeDescriptor, + const char* name = nullptr) override; + void VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 2eb0e4161e..6c25f87c9a 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -377,6 +377,14 @@ bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& input, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsResizeSupported(const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &input, const TensorInfo &output, Optional<std::string &> reasonIfUnsupported) const diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 52ba5b216f..7f63ccfbb1 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -238,6 +238,11 @@ public: const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsResizeSupported(const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 744758385b..fa9e1cdf52 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -268,6 +268,11 @@ struct ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilin void Validate(const WorkloadInfo& workloadInfo) const; }; +struct ResizeQueueDescriptor : QueueDescriptorWithParameters<ResizeDescriptor> +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + struct FakeQuantizationQueueDescriptor : QueueDescriptorWithParameters<FakeQuantizationDescriptor> { FakeQuantizationQueueDescriptor() diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 7cda3fe884..b74b6afeb3 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -640,6 +640,17 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::Resize: + { + auto cLayer = boost::polymorphic_downcast<const ResizeLayer*>(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = layerSupportObject->IsResizeSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + break; + } case LayerType::ResizeBilinear: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -1080,6 +1091,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBi return std::unique_ptr<IWorkload>(); } +std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::unique_ptr<IWorkload>(); +} + std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 978d3a3a98..02a800265c 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -167,6 +167,9 @@ public: virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const; + virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 7c9d0f52c3..6f3a9d3248 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -394,6 +394,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Prelu) DECLARE_LAYER_POLICY_1_PARAM(Division) +DECLARE_LAYER_POLICY_2_PARAM(Resize) + DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear) DECLARE_LAYER_POLICY_2_PARAM(Reshape) |