From 0e406eed386a4ea015ec703c84a74ea775d88b99 Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Wed, 12 Jun 2019 15:42:18 +0100 Subject: IVGCVSW-3267 Add Arm NN front end support for the new Prelu Activation layer * Added new PreluLayer class * Made necessary changes to ILayerSupport, ILayerVisitor, etc. * Added unit tests Change-Id: Ifcfb78e823bb5a245ed1dad15290d2f60115c882 Signed-off-by: Matteo Martincigh --- src/backends/backendsCommon/LayerSupportBase.cpp | 8 +++++ src/backends/backendsCommon/LayerSupportBase.hpp | 5 +++ src/backends/backendsCommon/WorkloadData.cpp | 40 ++++++++++++++++++++++ src/backends/backendsCommon/WorkloadData.hpp | 5 +++ src/backends/backendsCommon/WorkloadFactory.cpp | 17 +++++++++ src/backends/backendsCommon/WorkloadFactory.hpp | 3 ++ .../test/IsLayerSupportedTestImpl.hpp | 2 ++ 7 files changed, 80 insertions(+) (limited to 'src/backends') diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 48705c89b2..12e4ee81ae 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -348,6 +348,14 @@ bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& input, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsPreluSupported(const TensorInfo& input, + const TensorInfo& alpha, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& input, const armnn::TensorInfo& output, armnn::Optional reasonIfUnsupported) const diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 4921cf9b3e..d035dfcd62 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -221,6 +221,11 @@ public: const PreCompiledDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsPreluSupported(const TensorInfo& input, + const TensorInfo& alpha, + const TensorInfo& output, + Optional reasonIfUnsupported) const override; + bool IsQuantizeSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 7c9d4ac58c..d8c10bdea6 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1711,4 +1711,44 @@ void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) cons // This is internally generated so it should not need validation. } +void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateNumInputs(workloadInfo, "PreluQueueDescriptor", 2); + ValidateNumOutputs(workloadInfo, "PreluQueueDescriptor", 1); + + std::vector supportedTypes + { + DataType::Float16, + DataType::Float32, + DataType::QuantisedAsymm8 + }; + + ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], + supportedTypes, + "PreluQueueDescriptor"); + + ValidateDataTypes(workloadInfo.m_InputTensorInfos[1], + supportedTypes, + "PreluQueueDescriptor"); + + ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], + supportedTypes, + "PreluQueueDescriptor"); + + ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], + { workloadInfo.m_InputTensorInfos[1].GetDataType() }, + "PreluQueueDescriptor"); + + ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], + { workloadInfo.m_OutputTensorInfos[0].GetDataType() }, + "PreluQueueDescriptor"); + + ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0], + workloadInfo.m_InputTensorInfos[1], + workloadInfo.m_OutputTensorInfos[0], + "PreluQueueDescriptor", + "input", + "alpha"); +} + } //namespace armnn diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 501fdd8464..6a51bc3144 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -440,4 +440,9 @@ struct SwitchQueueDescriptor : QueueDescriptor void Validate(const WorkloadInfo& workloadInfo) const; }; +struct PreluQueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } //namespace armnn diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 678d330508..cca39198e1 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -785,6 +785,17 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::Prelu: + { + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& alpha = layer.GetInputSlot(1).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = layerSupportObject->IsPreluSupported(OverrideDataType(input, dataType), + OverrideDataType(alpha, dataType), + OverrideDataType(output, dataType), + reason); + break; + } default: { BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer."); @@ -1015,6 +1026,12 @@ std::unique_ptr IWorkloadFactory::CreatePreCompiled(const PreCompiled return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor, + const WorkloadInfo &info) const +{ + return std::unique_ptr(); +} + std::unique_ptr IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo& Info) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index cc99356b73..c9fbe71f96 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -155,6 +155,9 @@ public: virtual std::unique_ptr CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr CreatePrelu(const PreluQueueDescriptor& descriptor, + const WorkloadInfo& info) const; + virtual std::unique_ptr CreateQuantize(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo& Info) const; diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index ff632fc701..111cf8f3e3 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -384,6 +384,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Pooling2d) DECLARE_LAYER_POLICY_2_PARAM(PreCompiled) +DECLARE_LAYER_POLICY_1_PARAM(Prelu) + DECLARE_LAYER_POLICY_1_PARAM(Division) DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear) -- cgit v1.2.1