From ab9e52563f624d9782b97400f643d2632cc8d770 Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Thu, 13 Jun 2019 17:27:46 +0100 Subject: IVGCVSW-3268 Add Reference workload support for the new Prelu Activation layer * Added reference workload for the PReLU Activation layer * Added factory methods * Added validation support * Added Int16 support * Added unit tests Change-Id: Ic950d908c5e0a335dccd2960a3ffab0f8b599876 Signed-off-by: Matteo Martincigh --- src/armnn/test/CreateWorkload.hpp | 33 ++++++++ src/backends/backendsCommon/WorkloadData.cpp | 3 +- src/backends/backendsCommon/test/LayerTests.hpp | 92 ++++++++++++++++++++++ src/backends/reference/RefLayerSupport.cpp | 32 ++++++++ src/backends/reference/RefLayerSupport.hpp | 5 ++ src/backends/reference/RefWorkloadFactory.cpp | 10 +++ src/backends/reference/RefWorkloadFactory.hpp | 3 + src/backends/reference/backend.mk | 2 + .../reference/test/RefCreateWorkloadTests.cpp | 28 +++++++ src/backends/reference/test/RefLayerTests.cpp | 5 ++ src/backends/reference/workloads/CMakeLists.txt | 4 + src/backends/reference/workloads/PreluImpl.cpp | 35 ++++++++ src/backends/reference/workloads/PreluImpl.hpp | 21 +++++ .../reference/workloads/RefPreluWorkload.cpp | 35 ++++++++ .../reference/workloads/RefPreluWorkload.hpp | 22 ++++++ src/backends/reference/workloads/RefWorkloads.hpp | 1 + 16 files changed, 330 insertions(+), 1 deletion(-) create mode 100644 src/backends/reference/workloads/PreluImpl.cpp create mode 100644 src/backends/reference/workloads/PreluImpl.hpp create mode 100644 src/backends/reference/workloads/RefPreluWorkload.cpp create mode 100644 src/backends/reference/workloads/RefPreluWorkload.hpp diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp index 8863fecce3..b075744434 100644 --- a/src/armnn/test/CreateWorkload.hpp +++ b/src/armnn/test/CreateWorkload.hpp @@ -1280,4 +1280,37 @@ std::unique_ptr CreateConstantWorkloadTest(armnn::IWorkloadFac return workloadConstant; } +template +std::unique_ptr CreatePreluWorkloadTest(armnn::IWorkloadFactory& factory, + armnn::Graph& graph, + const armnn::TensorShape& outputShape) +{ + // Creates the PReLU layer + Layer* const layer = graph.AddLayer("prelu"); + + // Creates extra layers + Layer* const input = graph.AddLayer (0, "input"); + Layer* const alpha = graph.AddLayer (1, "alpha"); + Layer* const output = graph.AddLayer(0, "output"); + + // Connects up + armnn::TensorInfo inputTensorInfo ({ 1, 4, 1, 2 }, DataType); + armnn::TensorInfo alphaTensorInfo ({ 5, 4, 3, 1 }, DataType); + armnn::TensorInfo outputTensorInfo(outputShape, DataType); + Connect(input, layer, inputTensorInfo, 0, 0); + Connect(alpha, layer, alphaTensorInfo, 0, 1); + Connect(layer, output, outputTensorInfo, 0, 0); + CreateTensorHandles(graph, factory); + + // Makes the workload and checks it + auto workload = MakeAndCheckWorkload(*layer, graph, factory); + + PreluQueueDescriptor queueDescriptor = workload->GetData(); + BOOST_TEST(queueDescriptor.m_Inputs.size() == 2); + BOOST_TEST(queueDescriptor.m_Outputs.size() == 1); + + // Returns so we can do extra, backend-specific tests. + return workload; } + +} // Anonymous namespace diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index d8c10bdea6..b7317af9cd 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1720,7 +1720,8 @@ void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { DataType::Float16, DataType::Float32, - DataType::QuantisedAsymm8 + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 }; ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 058d6946f6..bf0d063091 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -3158,3 +3158,95 @@ LayerTestResult ConcatDifferentInputOutputQParamTest( return ret; } + +template> +LayerTestResult PreluTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType); + armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 2, 2, 3 }, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(0.25f); + inputTensorInfo.SetQuantizationOffset(128); + alphaTensorInfo.SetQuantizationScale(0.25f); + alphaTensorInfo.SetQuantizationOffset(50); + outputTensorInfo.SetQuantizationScale(0.5f); + outputTensorInfo.SetQuantizationOffset(120); + } + + std::vector inputData + { + // Expected quantized values: + // 128, 128, 128, 132, 132, 132, 124, 124, 124, 120, 120, 120 + 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -2.0f, -2.0f, -2.0f + }; + std::vector alphaData + { + // Expected quantized values: + // 50, 54, 58 + 0.0f, 1.0f, 2.0f + }; + std::vector outputExpectedData = + { + // Expected quantized values: + // 20, 120, 120, 122, 122, 122, 120, 118, 116, 120, 116, 112 + 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f + }; + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + auto alpha = MakeTensor(alphaTensorInfo, QuantizedVector(alphaTensorInfo.GetQuantizationScale(), + alphaTensorInfo.GetQuantizationOffset(), + alphaData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputExpectedData)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr alphaHandle = workloadFactory.CreateTensorHandle(alphaTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PreluQueueDescriptor descriptor; + armnn::WorkloadInfo info; + AddInputToWorkload (descriptor, info, inputTensorInfo, inputHandle.get()); + AddInputToWorkload (descriptor, info, alphaTensorInfo, alphaHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePrelu(descriptor, info); + + inputHandle->Allocate(); + alphaHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + CopyDataToITensorHandle(alphaHandle.get(), &alpha[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + return result; +} + +template LayerTestResult, 4> +PreluTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult, 4> +PreluTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult, 4> +PreluTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 919dd5fd6c..077aa1ce3a 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -1353,4 +1353,36 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0, return supported; } +bool RefLayerSupport::IsPreluSupported(const TensorInfo& input, + const TensorInfo& alpha, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + bool supported = true; + + std::array supportedTypes + { + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "PReLU: input is not a supported type."); + + supported &= CheckSupportRule(TypeAnyOf(alpha, supportedTypes), reasonIfUnsupported, + "PReLU: alpha is not a supported type."); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "PReLU: output is not a supported type."); + + supported &= CheckSupportRule(TypesAreEqual(input, alpha, output), reasonIfUnsupported, + "PReLU: input, alpha and output types are mismatched"); + + supported &= CheckSupportRule(ShapesAreBroadcastCompatible(input, alpha, output), reasonIfUnsupported, + "PReLU: shapes are not suitable for implicit broadcast"); + + return supported; +} + } // namespace armnn diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 8850c6e105..041701d8e1 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -255,6 +255,11 @@ public: const TensorInfo& input1, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + + bool IsPreluSupported(const TensorInfo& input, + const TensorInfo& alpha, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; }; } // namespace armnn diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 4467bd4ad6..29b2c52254 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -436,4 +436,14 @@ std::unique_ptr RefWorkloadFactory::CreateDequantize(const Dequantize return std::make_unique(descriptor, info); } +std::unique_ptr RefWorkloadFactory::CreatePrelu(const PreluQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (IsFloat16(info)) + { + return MakeWorkload(descriptor, info); + } + return std::make_unique(descriptor, info); +} + } // namespace armnn diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 78f6bab92c..333a9ca257 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -187,6 +187,9 @@ public: std::unique_ptr CreateQuantize(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreatePrelu(const PreluQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + private: template diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index ecd281208a..a430f4fb68 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -25,6 +25,7 @@ BACKEND_SOURCES := \ workloads/Concatenate.cpp \ workloads/Pad.cpp \ workloads/Pooling2d.cpp \ + workloads/PreluImpl.cpp \ workloads/RefActivationWorkload.cpp \ workloads/RefBatchNormalizationWorkload.cpp \ workloads/RefBatchToSpaceNdFloat32Workload.cpp \ @@ -50,6 +51,7 @@ BACKEND_SOURCES := \ workloads/RefPadWorkload.cpp \ workloads/RefPermuteWorkload.cpp \ workloads/RefPooling2dWorkload.cpp \ + workloads/RefPreluWorkload.cpp \ workloads/RefQuantizeWorkload.cpp \ workloads/RefReshapeWorkload.cpp \ workloads/RefResizeBilinearWorkload.cpp \ diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index e541692654..14615f89df 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -870,4 +870,32 @@ BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload) RefCreateConstantWorkloadTest({ 2, 3, 2, 10 }); } +template +static void RefCreatePreluWorkloadTest(const armnn::TensorShape& outputShape) +{ + armnn::Graph graph; + RefWorkloadFactory factory; + auto workload = CreatePreluWorkloadTest(factory, graph, outputShape); + + // Check output is as expected + auto queueDescriptor = workload->GetData(); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType))); +} + +BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload) +{ + RefCreatePreluWorkloadTest({ 5, 4, 3, 2 }); +} + +BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload) +{ + RefCreatePreluWorkloadTest({ 5, 4, 3, 2 }); +} + +BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload) +{ + RefCreatePreluWorkloadTest({ 5, 4, 3, 2 }); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 95e93653bc..b540d185d3 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -859,4 +859,9 @@ ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test) ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test) ARMNN_AUTO_TEST_CASE(QuantizeClampInt16, QuantizeClampInt16Test) +// PReLU +ARMNN_AUTO_TEST_CASE(PreluFloat32, PreluTest) +ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest) +ARMNN_AUTO_TEST_CASE(PreluInt16, PreluTest) + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 1ab38ccbcb..db0daa0310 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -36,6 +36,8 @@ list(APPEND armnnRefBackendWorkloads_sources Pad.hpp Pooling2d.cpp Pooling2d.hpp + PreluImpl.cpp + PreluImpl.hpp RefActivationWorkload.cpp RefActivationWorkload.hpp RefBatchNormalizationWorkload.cpp @@ -84,6 +86,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefPermuteWorkload.hpp RefPooling2dWorkload.cpp RefPooling2dWorkload.hpp + RefPreluWorkload.cpp + RefPreluWorkload.hpp RefQuantizeWorkload.cpp RefQuantizeWorkload.hpp RefReshapeWorkload.cpp diff --git a/src/backends/reference/workloads/PreluImpl.cpp b/src/backends/reference/workloads/PreluImpl.cpp new file mode 100644 index 0000000000..458025bb0a --- /dev/null +++ b/src/backends/reference/workloads/PreluImpl.cpp @@ -0,0 +1,35 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "PreluImpl.hpp" +#include "RefWorkloadUtils.hpp" +#include "Broadcast.hpp" + +namespace armnn +{ + +void PreluImpl(const PreluQueueDescriptor& data, + Decoder& inputData, + Decoder& alphaData, + Encoder& outputData) +{ + const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[0]); + const TensorInfo& alphaInfo = GetTensorInfo(data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]); + + const TensorShape& inputShape = inputInfo.GetShape(); + const TensorShape& alphaShape = alphaInfo.GetShape(); + const TensorShape& outputShape = outputInfo.GetShape(); + + // PReLU activation: f(x) = alpha * x for x < 0, f(x) = x for x >= 0 + auto prelu = [](float x, float alpha) + { + return x < 0 ? alpha * x : x; + }; + + BroadcastLoop(inputShape, alphaShape, outputShape).Unroll(prelu, 0, inputData, alphaData, outputData); +} + +} // namespace armnn diff --git a/src/backends/reference/workloads/PreluImpl.hpp b/src/backends/reference/workloads/PreluImpl.hpp new file mode 100644 index 0000000000..9299b1c7f7 --- /dev/null +++ b/src/backends/reference/workloads/PreluImpl.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "Encoders.hpp" +#include "Decoders.hpp" + +#include + +namespace armnn +{ + +void PreluImpl(const PreluQueueDescriptor& data, + Decoder& inputData, + Decoder& alphaData, + Encoder& outputData); + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefPreluWorkload.cpp b/src/backends/reference/workloads/RefPreluWorkload.cpp new file mode 100644 index 0000000000..cdc0a63711 --- /dev/null +++ b/src/backends/reference/workloads/RefPreluWorkload.cpp @@ -0,0 +1,35 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefPreluWorkload.hpp" + +#include "RefWorkloadUtils.hpp" +#include "PreluImpl.hpp" + +#include + +namespace armnn +{ + +RefPreluWorkload::RefPreluWorkload(const PreluQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{} + +void RefPreluWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefPreluWorkload_Execute"); + + std::unique_ptr> inputDecoder = MakeDecoder(GetTensorInfo(m_Data.m_Inputs[0]), + m_Data.m_Inputs[0]->Map()); + std::unique_ptr> alphaDecoder = MakeDecoder(GetTensorInfo(m_Data.m_Inputs[1]), + m_Data.m_Inputs[1]->Map()); + std::unique_ptr> outputEncoder = MakeEncoder(GetTensorInfo(m_Data.m_Outputs[0]), + m_Data.m_Outputs[0]->Map()); + + PreluImpl(m_Data, *inputDecoder, *alphaDecoder, *outputEncoder); +} + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefPreluWorkload.hpp b/src/backends/reference/workloads/RefPreluWorkload.hpp new file mode 100644 index 0000000000..72839e67dc --- /dev/null +++ b/src/backends/reference/workloads/RefPreluWorkload.hpp @@ -0,0 +1,22 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +namespace armnn +{ + +class RefPreluWorkload : public BaseWorkload +{ +public: + explicit RefPreluWorkload(const PreluQueueDescriptor& descriptor, + const WorkloadInfo& info); + virtual void Execute() const override; +}; + +} // namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index b14129146a..41b16fa56f 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -51,3 +51,4 @@ #include "RefDequantizeWorkload.hpp" #include "RefQuantizeWorkload.hpp" #include "RefReshapeWorkload.hpp" +#include "RefPreluWorkload.hpp" -- cgit v1.2.1