aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp6
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/ResizeLayer.cpp76
-rw-r--r--src/armnn/layers/ResizeLayer.hpp49
6 files changed, 137 insertions, 0 deletions
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index dc3dc17c02..6c49eaca3a 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -53,6 +53,7 @@ enum class LayerType
Quantize,
Reshape,
ResizeBilinear,
+ Resize,
Rsqrt,
Softmax,
SpaceToBatchNd,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 9837cd349d..2e049ecbda 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -45,6 +45,7 @@
#include "layers/QuantizeLayer.hpp"
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeBilinearLayer.hpp"
+#include "layers/ResizeLayer.hpp"
#include "layers/RsqrtLayer.hpp"
#include "layers/SoftmaxLayer.hpp"
#include "layers/SpaceToBatchNdLayer.hpp"
@@ -120,6 +121,7 @@ DECLARE_LAYER(PreCompiled)
DECLARE_LAYER(Prelu)
DECLARE_LAYER(Quantize)
DECLARE_LAYER(Reshape)
+DECLARE_LAYER(Resize)
DECLARE_LAYER(ResizeBilinear)
DECLARE_LAYER(Rsqrt)
DECLARE_LAYER(Softmax)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 58ccfb7813..63432da0ff 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1188,6 +1188,12 @@ resizeDescriptor, const char* name)
return m_Graph->AddLayer<ResizeBilinearLayer>(resizeDescriptor,name);
}
+IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
+resizeDescriptor, const char* name)
+{
+ return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor,name);
+}
+
IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 8db968a3f9..f0dfb1dd07 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -137,6 +137,9 @@ public:
IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
const char* name = nullptr) override;
+ IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
new file mode 100644
index 0000000000..44b4d9df5f
--- /dev/null
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -0,0 +1,76 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "ResizeLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <DataLayoutIndexed.hpp>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+ResizeLayer::ResizeLayer(const ResizeDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::Resize, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ResizeLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ ResizeQueueDescriptor descriptor;
+ return factory.CreateResize(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+ResizeLayer* ResizeLayer::Clone(Graph& graph) const
+{
+ return CloneBase<ResizeLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ BOOST_ASSERT(inputShapes.size() == 1);
+
+ const TensorShape& inputShape = inputShapes[0];
+ const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
+
+ unsigned int outWidth = m_Param.m_TargetWidth;
+ unsigned int outHeight = m_Param.m_TargetHeight;
+ unsigned int outChannels = inputShape[dimensionIndices.GetChannelsIndex()];
+ unsigned int outBatch = inputShape[0];
+
+ TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
+ TensorShape( { outBatch, outHeight, outWidth, outChannels } ) :
+ TensorShape( { outBatch, outChannels, outHeight, outWidth });
+
+ return std::vector<TensorShape>({ tensorShape });
+}
+
+void ResizeLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "ResizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void ResizeLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitResizeLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp
new file mode 100644
index 0000000000..0d309ff49b
--- /dev/null
+++ b/src/armnn/layers/ResizeLayer.hpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a resize operation.
+class ResizeLayer : public LayerWithParameters<ResizeDescriptor>
+{
+public:
+ /// Makes a workload for the Resize type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ ResizeLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ResizeLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a ResizeLayerLayer.
+ /// @param [in] param ResizeDescriptor to configure the resize operation.
+ /// @param [in] name Optional name for the layer.
+ ResizeLayer(const ResizeDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~ResizeLayer() = default;
+};
+
+} // namespace armnn