aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnn/Network.hpp2
-rw-r--r--src/armnn/layers/PreluLayer.cpp121
-rw-r--r--src/armnn/layers/PreluLayer.hpp49
-rw-r--r--src/armnn/test/LayerValidateOutputTest.cpp23
7 files changed, 203 insertions, 0 deletions
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 377fb925f1..a1434eae5e 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -49,6 +49,7 @@ enum class LayerType
Permute,
Pooling2d,
PreCompiled,
+ Prelu,
Quantize,
Reshape,
ResizeBilinear,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index de9717caeb..a801431f84 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -41,6 +41,7 @@
#include "layers/PermuteLayer.hpp"
#include "layers/Pooling2dLayer.hpp"
#include "layers/PreCompiledLayer.hpp"
+#include "layers/PreluLayer.hpp"
#include "layers/QuantizeLayer.hpp"
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeBilinearLayer.hpp"
@@ -115,6 +116,7 @@ DECLARE_LAYER(Pad)
DECLARE_LAYER(Permute)
DECLARE_LAYER(Pooling2d)
DECLARE_LAYER(PreCompiled)
+DECLARE_LAYER(Prelu)
DECLARE_LAYER(Quantize)
DECLARE_LAYER(Reshape)
DECLARE_LAYER(ResizeBilinear)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 3e7d4d54dd..75b63e49f6 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1003,6 +1003,11 @@ IConnectableLayer* Network::AddSwitchLayer(const char* name)
return m_Graph->AddLayer<SwitchLayer>(name);
}
+IConnectableLayer* Network::AddPreluLayer(const char* name)
+{
+ return m_Graph->AddLayer<PreluLayer>(name);
+}
+
void Network::Accept(ILayerVisitor& visitor) const
{
for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 2648c3f48f..e1379d0014 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -185,6 +185,8 @@ public:
IConnectableLayer* AddSwitchLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddPreluLayer(const char* name = nullptr) override;
+
void Accept(ILayerVisitor& visitor) const override;
private:
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
new file mode 100644
index 0000000000..6040248391
--- /dev/null
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -0,0 +1,121 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PreluLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+namespace armnn
+{
+
+PreluLayer::PreluLayer(const char* name)
+ : Layer(2, 1, LayerType::Prelu, name)
+{}
+
+std::unique_ptr<IWorkload> PreluLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ PreluQueueDescriptor descriptor;
+
+ return factory.CreatePrelu(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+PreluLayer* PreluLayer::Clone(Graph& graph) const
+{
+ auto layer = CloneBase<PreluLayer>(graph, GetName());
+
+ return std::move(layer);
+}
+
+std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ BOOST_ASSERT(inputShapes.size() == 2);
+
+ const TensorShape& inputShape = inputShapes[0];
+ const TensorShape& alphaShape = inputShapes[1];
+
+ const unsigned int inputShapeDimensions = inputShape.GetNumDimensions();
+ const unsigned int alphaShapeDimensions = alphaShape.GetNumDimensions();
+
+ BOOST_ASSERT(inputShapeDimensions > 0);
+ BOOST_ASSERT(alphaShapeDimensions > 0);
+
+ // The size of the output is the maximum size along each dimension of the input operands,
+ // it starts with the trailing dimensions, and works its way forward
+
+ unsigned int outputDimensions = std::max(inputShapeDimensions, alphaShapeDimensions);
+
+ TensorShape outputShape(outputDimensions);
+
+ int inputShapeIndex = boost::numeric_cast<int>(inputShapeDimensions) - 1;
+ int alphaShapeIndex = boost::numeric_cast<int>(alphaShapeDimensions) - 1;
+ unsigned int outputShapeIndex = outputDimensions - 1;
+
+ // Loop backwards through the common part of the shapes
+ while (inputShapeIndex >= 0 && alphaShapeIndex >= 0)
+ {
+ unsigned int inputDimension = inputShape[boost::numeric_cast<unsigned int>(inputShapeIndex)];
+ unsigned int alphaDimension = alphaShape[boost::numeric_cast<unsigned int>(alphaShapeIndex)];
+
+ // Check that the inputs are broadcast compatible
+ BOOST_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
+ "PreluLayer: Dimensions should either match or one should be of size 1");
+
+ outputShape[outputShapeIndex] = std::max(inputDimension, alphaDimension);
+
+ inputShapeIndex--;
+ alphaShapeIndex--;
+ outputShapeIndex--;
+ }
+
+ // Loop backwards through the remaing part of the input shape (if any)
+ while (inputShapeIndex >= 0)
+ {
+ outputShape[outputShapeIndex] = inputShape[boost::numeric_cast<unsigned int>(inputShapeIndex)];
+
+ inputShapeIndex--;
+ outputShapeIndex--;
+ }
+
+ // Loop backwards through the remaing part of the alpha shape (if any)
+ while (alphaShapeIndex >= 0)
+ {
+ outputShape[outputShapeIndex] = alphaShape[boost::numeric_cast<unsigned int>(alphaShapeIndex)];
+
+ alphaShapeIndex--;
+ outputShapeIndex--;
+ }
+
+ return { outputShape };
+}
+
+void PreluLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(2, CHECK_LOCATION());
+
+ std::vector<TensorShape> inferredShapes = InferOutputShapes(
+ {
+ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
+ });
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "PreluLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void PreluLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitPreluLayer(this, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
new file mode 100644
index 0000000000..54e57b22c1
--- /dev/null
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+// This layer represents a PReLU activation operation.
+class PreluLayer : public Layer
+{
+public:
+ /// Makes a workload for the PReLU type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ PreluLayer* Clone(Graph& graph) const override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref PreluLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a PreluLayer.
+ /// @param [in] name Optional name for the layer.
+ PreluLayer(const char* name);
+
+ /// Default destructor
+ ~PreluLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/test/LayerValidateOutputTest.cpp b/src/armnn/test/LayerValidateOutputTest.cpp
index acefd51110..d47959cb65 100644
--- a/src/armnn/test/LayerValidateOutputTest.cpp
+++ b/src/armnn/test/LayerValidateOutputTest.cpp
@@ -58,4 +58,27 @@ BOOST_AUTO_TEST_CASE(TestSpaceToDepthInferOutputShape)
BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
}
+BOOST_AUTO_TEST_CASE(TestPreluInferOutputShape)
+{
+ armnn::Graph graph;
+
+ armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
+
+ std::vector<armnn::TensorShape> inputShapes
+ {
+ { 4, 1, 2 }, // Input shape
+ { 5, 4, 3, 1} // Alpha shape
+ };
+
+ const std::vector<armnn::TensorShape> expectedOutputShapes
+ {
+ { 5, 4, 3, 2 } // Output shape
+ };
+
+ const std::vector<armnn::TensorShape> outputShapes = preluLayer->InferOutputShapes(inputShapes);
+
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+}
+
BOOST_AUTO_TEST_SUITE_END()