aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-06-12 15:42:18 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-06-17 16:15:22 +0000
commit0e406eed386a4ea015ec703c84a74ea775d88b99 (patch)
treec176eab811b78ce83e86bca2db883e9770708eb2
parente52211e1544a30d24b29523c389116a9e4446e8c (diff)
downloadarmnn-0e406eed386a4ea015ec703c84a74ea775d88b99.tar.gz
IVGCVSW-3267 Add Arm NN front end support for the new Prelu Activation layer
* Added new PreluLayer class * Made necessary changes to ILayerSupport, ILayerVisitor, etc. * Added unit tests Change-Id: Ifcfb78e823bb5a245ed1dad15290d2f60115c882 Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt10
-rw-r--r--include/armnn/ILayerSupport.hpp5
-rw-r--r--include/armnn/ILayerVisitor.hpp6
-rw-r--r--include/armnn/INetwork.hpp5
-rw-r--r--include/armnn/LayerSupport.hpp8
-rw-r--r--include/armnn/LayerVisitorBase.hpp3
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnn/Network.hpp2
-rw-r--r--src/armnn/layers/PreluLayer.cpp121
-rw-r--r--src/armnn/layers/PreluLayer.hpp49
-rw-r--r--src/armnn/test/LayerValidateOutputTest.cpp23
-rw-r--r--src/armnnSerializer/Serializer.cpp6
-rw-r--r--src/armnnSerializer/Serializer.hpp3
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp8
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp40
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp17
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp3
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
23 files changed, 326 insertions, 4 deletions
diff --git a/Android.mk b/Android.mk
index b5376acbb9..7f3080b512 100644
--- a/Android.mk
+++ b/Android.mk
@@ -123,6 +123,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/PermuteLayer.cpp \
src/armnn/layers/Pooling2dLayer.cpp \
src/armnn/layers/PreCompiledLayer.cpp \
+ src/armnn/layers/PreluLayer.cpp \
src/armnn/layers/QuantizeLayer.cpp \
src/armnn/layers/ReshapeLayer.cpp \
src/armnn/layers/ResizeBilinearLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1b6abb6ff2..2971b0d73e 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -296,18 +296,20 @@ list(APPEND armnn_sources
src/armnn/layers/DivisionLayer.hpp
src/armnn/layers/PreCompiledLayer.hpp
src/armnn/layers/PreCompiledLayer.cpp
+ src/armnn/layers/PreluLayer.hpp
+ src/armnn/layers/PreluLayer.cpp
src/armnn/layers/ReshapeLayer.hpp
src/armnn/layers/ReshapeLayer.cpp
- src/armnn/layers/SpaceToBatchNdLayer.hpp
- src/armnn/layers/SpaceToBatchNdLayer.cpp
- src/armnn/layers/SpaceToDepthLayer.hpp
- src/armnn/layers/SpaceToDepthLayer.cpp
src/armnn/layers/ResizeBilinearLayer.hpp
src/armnn/layers/ResizeBilinearLayer.cpp
src/armnn/layers/RsqrtLayer.cpp
src/armnn/layers/RsqrtLayer.hpp
src/armnn/layers/SoftmaxLayer.hpp
src/armnn/layers/SoftmaxLayer.cpp
+ src/armnn/layers/SpaceToBatchNdLayer.hpp
+ src/armnn/layers/SpaceToBatchNdLayer.cpp
+ src/armnn/layers/SpaceToDepthLayer.hpp
+ src/armnn/layers/SpaceToDepthLayer.cpp
src/armnn/layers/SplitterLayer.hpp
src/armnn/layers/SplitterLayer.cpp
src/armnn/layers/StridedSliceLayer.cpp
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 4c113d3427..324a9f5a2d 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -234,6 +234,11 @@ public:
const PreCompiledDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsPreluSupported(const TensorInfo& input,
+ const TensorInfo& alpha,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsQuantizeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index ab83dbfb05..9519c8b0c1 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -290,6 +290,12 @@ public:
const Pooling2dDescriptor& pooling2dDescriptor,
const char* name = nullptr) = 0;
+ /// Function that a PReLU activation layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param name - Optional name for the layer.
+ virtual void VisitPreluLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) = 0;
+
/// Function a quantize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param name - Optional name for the layer.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index e5ebbc4e44..cacca33caf 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -427,6 +427,11 @@ public:
/// @return - Interface for configuring the layer.
virtual IConnectableLayer* AddSwitchLayer(const char* name = nullptr) = 0;
+ /// Adds a PReLU layer to the network.
+ /// @param name - Optional name for the layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddPreluLayer(const char* name = nullptr) = 0;
+
virtual void Accept(ILayerVisitor& visitor) const = 0;
protected:
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index d58aa8731a..673193f330 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -282,6 +282,14 @@ bool IsPreCompiledSupported(const BackendId& backend,
size_t reasonIfUnsupportedMaxLength = 1024);
/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsPreluSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& alpha,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
bool IsPooling2dSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index b4a2ac7870..48fc2bbb0b 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -151,6 +151,9 @@ public:
const Pooling2dDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
+ void VisitPreluLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(__func__); }
+
void VisitQuantizeLayer(const IConnectableLayer*,
const char*) override { DefaultPolicy::Apply(__func__); }
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 377fb925f1..a1434eae5e 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -49,6 +49,7 @@ enum class LayerType
Permute,
Pooling2d,
PreCompiled,
+ Prelu,
Quantize,
Reshape,
ResizeBilinear,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index de9717caeb..a801431f84 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -41,6 +41,7 @@
#include "layers/PermuteLayer.hpp"
#include "layers/Pooling2dLayer.hpp"
#include "layers/PreCompiledLayer.hpp"
+#include "layers/PreluLayer.hpp"
#include "layers/QuantizeLayer.hpp"
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeBilinearLayer.hpp"
@@ -115,6 +116,7 @@ DECLARE_LAYER(Pad)
DECLARE_LAYER(Permute)
DECLARE_LAYER(Pooling2d)
DECLARE_LAYER(PreCompiled)
+DECLARE_LAYER(Prelu)
DECLARE_LAYER(Quantize)
DECLARE_LAYER(Reshape)
DECLARE_LAYER(ResizeBilinear)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 3e7d4d54dd..75b63e49f6 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1003,6 +1003,11 @@ IConnectableLayer* Network::AddSwitchLayer(const char* name)
return m_Graph->AddLayer<SwitchLayer>(name);
}
+IConnectableLayer* Network::AddPreluLayer(const char* name)
+{
+ return m_Graph->AddLayer<PreluLayer>(name);
+}
+
void Network::Accept(ILayerVisitor& visitor) const
{
for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 2648c3f48f..e1379d0014 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -185,6 +185,8 @@ public:
IConnectableLayer* AddSwitchLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddPreluLayer(const char* name = nullptr) override;
+
void Accept(ILayerVisitor& visitor) const override;
private:
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
new file mode 100644
index 0000000000..6040248391
--- /dev/null
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -0,0 +1,121 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PreluLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+namespace armnn
+{
+
+PreluLayer::PreluLayer(const char* name)
+ : Layer(2, 1, LayerType::Prelu, name)
+{}
+
+std::unique_ptr<IWorkload> PreluLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ PreluQueueDescriptor descriptor;
+
+ return factory.CreatePrelu(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+PreluLayer* PreluLayer::Clone(Graph& graph) const
+{
+ auto layer = CloneBase<PreluLayer>(graph, GetName());
+
+ return std::move(layer);
+}
+
+std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ BOOST_ASSERT(inputShapes.size() == 2);
+
+ const TensorShape& inputShape = inputShapes[0];
+ const TensorShape& alphaShape = inputShapes[1];
+
+ const unsigned int inputShapeDimensions = inputShape.GetNumDimensions();
+ const unsigned int alphaShapeDimensions = alphaShape.GetNumDimensions();
+
+ BOOST_ASSERT(inputShapeDimensions > 0);
+ BOOST_ASSERT(alphaShapeDimensions > 0);
+
+ // The size of the output is the maximum size along each dimension of the input operands,
+ // it starts with the trailing dimensions, and works its way forward
+
+ unsigned int outputDimensions = std::max(inputShapeDimensions, alphaShapeDimensions);
+
+ TensorShape outputShape(outputDimensions);
+
+ int inputShapeIndex = boost::numeric_cast<int>(inputShapeDimensions) - 1;
+ int alphaShapeIndex = boost::numeric_cast<int>(alphaShapeDimensions) - 1;
+ unsigned int outputShapeIndex = outputDimensions - 1;
+
+ // Loop backwards through the common part of the shapes
+ while (inputShapeIndex >= 0 && alphaShapeIndex >= 0)
+ {
+ unsigned int inputDimension = inputShape[boost::numeric_cast<unsigned int>(inputShapeIndex)];
+ unsigned int alphaDimension = alphaShape[boost::numeric_cast<unsigned int>(alphaShapeIndex)];
+
+ // Check that the inputs are broadcast compatible
+ BOOST_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
+ "PreluLayer: Dimensions should either match or one should be of size 1");
+
+ outputShape[outputShapeIndex] = std::max(inputDimension, alphaDimension);
+
+ inputShapeIndex--;
+ alphaShapeIndex--;
+ outputShapeIndex--;
+ }
+
+ // Loop backwards through the remaing part of the input shape (if any)
+ while (inputShapeIndex >= 0)
+ {
+ outputShape[outputShapeIndex] = inputShape[boost::numeric_cast<unsigned int>(inputShapeIndex)];
+
+ inputShapeIndex--;
+ outputShapeIndex--;
+ }
+
+ // Loop backwards through the remaing part of the alpha shape (if any)
+ while (alphaShapeIndex >= 0)
+ {
+ outputShape[outputShapeIndex] = alphaShape[boost::numeric_cast<unsigned int>(alphaShapeIndex)];
+
+ alphaShapeIndex--;
+ outputShapeIndex--;
+ }
+
+ return { outputShape };
+}
+
+void PreluLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(2, CHECK_LOCATION());
+
+ std::vector<TensorShape> inferredShapes = InferOutputShapes(
+ {
+ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+ GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
+ });
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "PreluLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void PreluLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitPreluLayer(this, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp
new file mode 100644
index 0000000000..54e57b22c1
--- /dev/null
+++ b/src/armnn/layers/PreluLayer.hpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+// This layer represents a PReLU activation operation.
+class PreluLayer : public Layer
+{
+public:
+ /// Makes a workload for the PReLU type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ PreluLayer* Clone(Graph& graph) const override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref PreluLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a PreluLayer.
+ /// @param [in] name Optional name for the layer.
+ PreluLayer(const char* name);
+
+ /// Default destructor
+ ~PreluLayer() = default;
+};
+
+} // namespace armnn
diff --git a/src/armnn/test/LayerValidateOutputTest.cpp b/src/armnn/test/LayerValidateOutputTest.cpp
index acefd51110..d47959cb65 100644
--- a/src/armnn/test/LayerValidateOutputTest.cpp
+++ b/src/armnn/test/LayerValidateOutputTest.cpp
@@ -58,4 +58,27 @@ BOOST_AUTO_TEST_CASE(TestSpaceToDepthInferOutputShape)
BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
}
+BOOST_AUTO_TEST_CASE(TestPreluInferOutputShape)
+{
+ armnn::Graph graph;
+
+ armnn::PreluLayer* const preluLayer = graph.AddLayer<armnn::PreluLayer>("prelu");
+
+ std::vector<armnn::TensorShape> inputShapes
+ {
+ { 4, 1, 2 }, // Input shape
+ { 5, 4, 3, 1} // Alpha shape
+ };
+
+ const std::vector<armnn::TensorShape> expectedOutputShapes
+ {
+ { 5, 4, 3, 2 } // Output shape
+ };
+
+ const std::vector<armnn::TensorShape> outputShapes = preluLayer->InferOutputShapes(inputShapes);
+
+ BOOST_CHECK(outputShapes.size() == 1);
+ BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 131bb95608..012ed666f1 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -709,6 +709,12 @@ void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* laye
CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
}
+void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
+ const char* name)
+{
+ throw UnimplementedException("SerializerVisitor::VisitPreluLayer not yet implemented");
+}
+
void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
{
auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index c9416bbfd5..aae879993e 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -160,6 +160,9 @@ public:
const armnn::Pooling2dDescriptor& pooling2dDescriptor,
const char* name = nullptr) override;
+ void VisitPreluLayer(const armnn::IConnectableLayer* layer,
+ const char* name = nullptr) override;
+
void VisitQuantizeLayer(const armnn::IConnectableLayer* layer,
const char* name = nullptr) override;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 48705c89b2..12e4ee81ae 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -348,6 +348,14 @@ bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& input,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsPreluSupported(const TensorInfo& input,
+ const TensorInfo& alpha,
+ const TensorInfo& output,
+ Optional<std::string &> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& input,
const armnn::TensorInfo& output,
armnn::Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 4921cf9b3e..d035dfcd62 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -221,6 +221,11 @@ public:
const PreCompiledDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsPreluSupported(const TensorInfo& input,
+ const TensorInfo& alpha,
+ const TensorInfo& output,
+ Optional<std::string &> reasonIfUnsupported) const override;
+
bool IsQuantizeSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 7c9d4ac58c..d8c10bdea6 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1711,4 +1711,44 @@ void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons
// This is internally generated so it should not need validation.
}
+void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateNumInputs(workloadInfo, "PreluQueueDescriptor", 2);
+ ValidateNumOutputs(workloadInfo, "PreluQueueDescriptor", 1);
+
+ std::vector<DataType> supportedTypes
+ {
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QuantisedAsymm8
+ };
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+ supportedTypes,
+ "PreluQueueDescriptor");
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[1],
+ supportedTypes,
+ "PreluQueueDescriptor");
+
+ ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
+ supportedTypes,
+ "PreluQueueDescriptor");
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+ { workloadInfo.m_InputTensorInfos[1].GetDataType() },
+ "PreluQueueDescriptor");
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+ { workloadInfo.m_OutputTensorInfos[0].GetDataType() },
+ "PreluQueueDescriptor");
+
+ ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_InputTensorInfos[1],
+ workloadInfo.m_OutputTensorInfos[0],
+ "PreluQueueDescriptor",
+ "input",
+ "alpha");
+}
+
} //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 501fdd8464..6a51bc3144 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -440,4 +440,9 @@ struct SwitchQueueDescriptor : QueueDescriptor
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct PreluQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
} //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 678d330508..cca39198e1 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -785,6 +785,17 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::Prelu:
+ {
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType),
+ OverrideDataType(alpha, dataType),
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
default:
{
BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
@@ -1015,6 +1026,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiled
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
+ const WorkloadInfo &info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
const WorkloadInfo& Info) const
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index cc99356b73..c9fbe71f96 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -155,6 +155,9 @@ public:
virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
virtual std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index ff632fc701..111cf8f3e3 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -384,6 +384,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
+DECLARE_LAYER_POLICY_1_PARAM(Prelu)
+
DECLARE_LAYER_POLICY_1_PARAM(Division)
DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)