From 0dd3b43e1497ab406acfdefca425143fbefb1ee4 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Tue, 10 Sep 2019 13:55:09 +0100 Subject: IVGCVSW-3830 Add NEON workload for RSQRT Signed-off-by: Aron Virginas-Tar Change-Id: Ida590b7d28dbee40e496794c42b682d740427cca --- src/backends/neon/NeonLayerSupport.cpp | 8 ++++ src/backends/neon/NeonLayerSupport.hpp | 4 ++ src/backends/neon/NeonWorkloadFactory.cpp | 2 +- src/backends/neon/backend.mk | 1 + src/backends/neon/test/NeonCreateWorkloadTests.cpp | 30 +++++++++++++++ src/backends/neon/test/NeonLayerTests.cpp | 6 +++ src/backends/neon/workloads/CMakeLists.txt | 2 + src/backends/neon/workloads/NeonRsqrtWorkload.cpp | 43 ++++++++++++++++++++++ src/backends/neon/workloads/NeonRsqrtWorkload.hpp | 28 ++++++++++++++ src/backends/neon/workloads/NeonWorkloads.hpp | 1 + 10 files changed, 124 insertions(+), 1 deletion(-) create mode 100644 src/backends/neon/workloads/NeonRsqrtWorkload.cpp create mode 100644 src/backends/neon/workloads/NeonRsqrtWorkload.hpp (limited to 'src') diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 67c156001d..0d4d4c3459 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -42,6 +42,7 @@ #include "workloads/NeonQuantizeWorkload.hpp" #include "workloads/NeonQuantizedLstmWorkload.hpp" #include "workloads/NeonResizeWorkload.hpp" +#include "workloads/NeonRsqrtWorkload.hpp" #include "workloads/NeonSoftmaxBaseWorkload.hpp" #include "workloads/NeonSpaceToDepthWorkload.hpp" #include "workloads/NeonSplitterWorkload.hpp" @@ -547,6 +548,13 @@ bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, return IsResizeSupported(input, output, descriptor, reasonIfUnsupported); } +bool NeonLayerSupport::IsRsqrtSupported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, reasonIfUnsupported, input, output); +} + bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 16a7030f5f..cc4798eea5 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -191,6 +191,10 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsRsqrtSupported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index fc1a65d712..441e27f5cf 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -401,7 +401,7 @@ std::unique_ptr NeonWorkloadFactory::CreateDebug(const DebugQueueDesc std::unique_ptr NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor, const WorkloadInfo &info) const { - return MakeWorkloadHelper(descriptor, info); + return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateTransposeConvolution2d( diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index 706e8b0224..5eae9e5137 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -50,6 +50,7 @@ BACKEND_SOURCES := \ workloads/NeonQuantizeWorkload.cpp \ workloads/NeonReshapeWorkload.cpp \ workloads/NeonResizeWorkload.cpp \ + workloads/NeonRsqrtWorkload.cpp \ workloads/NeonSoftmaxBaseWorkload.cpp \ workloads/NeonSoftmaxFloatWorkload.cpp \ workloads/NeonSoftmaxUint8Workload.cpp \ diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index 643ecd5a4f..6a4f612881 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -181,6 +181,36 @@ BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload) DataType::QuantisedAsymm8>(); } +template +static void NeonCreateElementwiseUnaryWorkloadTest() +{ + Graph graph; + NeonWorkloadFactory factory = + NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); + + auto workload = CreateElementwiseUnaryWorkloadTest + (factory, graph); + + DescriptorType queueDescriptor = workload->GetData(); + + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + + BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({2, 3}, DataType))); + BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType))); +} + +BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32Workload) +{ + NeonCreateElementwiseUnaryWorkloadTest(); +} + template static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) { diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index d9f78e8a5f..056656769f 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -859,6 +859,12 @@ ARMNN_AUTO_TEST_CASE(Abs3d, Abs3dTest) ARMNN_AUTO_TEST_CASE(AbsZero, AbsZeroTest) +// Rsqrt +ARMNN_AUTO_TEST_CASE(Rsqrt2d, Rsqrt2dTest) +ARMNN_AUTO_TEST_CASE(Rsqrt3d, Rsqrt3dTest) +ARMNN_AUTO_TEST_CASE(RsqrtZero, RsqrtZeroTest) +ARMNN_AUTO_TEST_CASE(RsqrtNegative, RsqrtNegativeTest) + #if defined(ARMNNREF_ENABLED) // The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index e0831be814..90043911cf 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -62,6 +62,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonReshapeWorkload.hpp NeonResizeWorkload.cpp NeonResizeWorkload.hpp + NeonRsqrtWorkload.cpp + NeonRsqrtWorkload.hpp NeonSoftmaxBaseWorkload.cpp NeonSoftmaxBaseWorkload.hpp NeonSoftmaxFloatWorkload.cpp diff --git a/src/backends/neon/workloads/NeonRsqrtWorkload.cpp b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp new file mode 100644 index 0000000000..b6292833dd --- /dev/null +++ b/src/backends/neon/workloads/NeonRsqrtWorkload.cpp @@ -0,0 +1,43 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonRsqrtWorkload.hpp" + +#include "NeonWorkloadUtils.hpp" + +#include +#include + +#include + +namespace armnn +{ + +arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo& input, const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + return arm_compute::NERsqrtLayer::validate(&aclInput, &aclOutput); +} + +NeonRsqrtWorkload::NeonRsqrtWorkload(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonRsqrtWorkload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + + m_RsqrtLayer.configure(&input, &output); +} + +void NeonRsqrtWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonRsqrtWorkload_Execute"); + m_RsqrtLayer.run(); +} + +} // namespace armnn diff --git a/src/backends/neon/workloads/NeonRsqrtWorkload.hpp b/src/backends/neon/workloads/NeonRsqrtWorkload.hpp new file mode 100644 index 0000000000..6f6031ef77 --- /dev/null +++ b/src/backends/neon/workloads/NeonRsqrtWorkload.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include + +namespace armnn +{ + +arm_compute::Status NeonRsqrtWorkloadValidate(const TensorInfo& input, const TensorInfo& output); + +class NeonRsqrtWorkload : public BaseWorkload +{ +public: + NeonRsqrtWorkload(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NERsqrtLayer m_RsqrtLayer; +}; + +} // namespace armnn diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index d807e80b15..4f30f33d1b 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -33,6 +33,7 @@ #include "NeonQuantizeWorkload.hpp" #include "NeonReshapeWorkload.hpp" #include "NeonResizeWorkload.hpp" +#include "NeonRsqrtWorkload.hpp" #include "NeonSoftmaxFloatWorkload.hpp" #include "NeonSoftmaxUint8Workload.hpp" #include "NeonSpaceToDepthWorkload.hpp" -- cgit v1.2.1