From fabc289e7a371d5a3d564bed0d373da26f718ab3 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Fri, 31 May 2019 09:05:11 +0100 Subject: IVGCVSW-3185 Add NeonQuantizeWorkload * Added NeonQuantizeWorkload to ArmNN Signed-off-by: Sadik Armagan Change-Id: I41f8707dda35c894841eddae2d3b78f088038c8e --- src/backends/neon/NeonLayerSupport.cpp | 11 +++++ src/backends/neon/NeonLayerSupport.hpp | 4 ++ src/backends/neon/NeonWorkloadFactory.cpp | 7 ++- src/backends/neon/NeonWorkloadFactory.hpp | 4 +- src/backends/neon/backend.mk | 1 + src/backends/neon/test/NeonLayerTests.cpp | 4 ++ src/backends/neon/workloads/CMakeLists.txt | 2 + .../neon/workloads/NeonQuantizeWorkload.cpp | 50 ++++++++++++++++++++++ .../neon/workloads/NeonQuantizeWorkload.hpp | 26 +++++++++++ src/backends/neon/workloads/NeonWorkloads.hpp | 1 + 10 files changed, 104 insertions(+), 6 deletions(-) create mode 100644 src/backends/neon/workloads/NeonQuantizeWorkload.cpp create mode 100644 src/backends/neon/workloads/NeonQuantizeWorkload.hpp diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index e84eb799fc..a85597bbf8 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -35,6 +35,7 @@ #include "workloads/NeonPadWorkload.hpp" #include "workloads/NeonPermuteWorkload.hpp" #include "workloads/NeonPooling2dWorkload.hpp" +#include "workloads/NeonQuantizeWorkload.hpp" #include "workloads/NeonResizeBilinearWorkload.hpp" #include "workloads/NeonSoftmaxBaseWorkload.hpp" #include "workloads/NeonSplitterWorkload.hpp" @@ -438,6 +439,16 @@ bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input, FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor); } +bool NeonLayerSupport::IsQuantizeSupported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported) const +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizeWorkloadValidate, + reasonIfUnsupported, + input, + output); +} + bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported) const diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index dd6ed79c9a..b933591a0e 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -145,6 +145,10 @@ public: const Pooling2dDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsQuantizeSupported(const TensorInfo& input, + const TensorInfo& output, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsReshapeSupported(const TensorInfo& input, const ReshapeDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 4b6225f67b..307aaa56f3 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -219,11 +219,10 @@ std::unique_ptr NeonWorkloadFactory::CreateResizeBilinear( return std::make_unique(descriptor, info); } -std::unique_ptr NeonWorkloadFactory::CreateFakeQuantization( - const FakeQuantizationQueueDescriptor& descriptor, - const WorkloadInfo& info) const +std::unique_ptr NeonWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const { - return nullptr; + return std::make_unique(descriptor, info); } std::unique_ptr NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 6a28d12326..c4a2b50c4e 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -92,8 +92,8 @@ public: std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - std::unique_ptr CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor, - const WorkloadInfo& info) const override; + std::unique_ptr CreateQuantize(const QuantizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; std::unique_ptr CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index 6931bd7325..750118d51b 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -41,6 +41,7 @@ BACKEND_SOURCES := \ workloads/NeonPadWorkload.cpp \ workloads/NeonPermuteWorkload.cpp \ workloads/NeonPooling2dWorkload.cpp \ + workloads/NeonQuantizeWorkload.cpp \ workloads/NeonReshapeWorkload.cpp \ workloads/NeonResizeBilinearWorkload.cpp \ workloads/NeonSoftmaxBaseWorkload.cpp \ diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index d89011d3ae..26677f6e34 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -486,6 +486,10 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::Da ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC) +// Quantize +ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test) +ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test) + // ============================================================================ // COMPARE tests diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 8b2ad63f45..b7638208d1 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -48,6 +48,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonPermuteWorkload.hpp NeonPooling2dWorkload.cpp NeonPooling2dWorkload.hpp + NeonQuantizeWorkload.cpp + NeonQuantizeWorkload.hpp NeonReshapeWorkload.cpp NeonReshapeWorkload.hpp NeonResizeBilinearWorkload.cpp diff --git a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp new file mode 100644 index 0000000000..ef24a7f40b --- /dev/null +++ b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp @@ -0,0 +1,50 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonQuantizeWorkload.hpp" +#include "NeonWorkloadUtils.hpp" + +#include +#include +#include + +#include + +namespace armnn +{ +using namespace armcomputetensorutils; + +arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo& input, const TensorInfo& output) +{ + const arm_compute::TensorInfo neonInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + return arm_compute::NEQuantizationLayer::validate(&neonInputInfo, &neonOutputInfo); +} + +NeonQuantizeWorkload::NeonQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, + const WorkloadInfo& workloadInfo) + : BaseWorkload(descriptor, workloadInfo) +{ + arm_compute::ITensor& input = boost::polymorphic_pointer_downcast( + m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( + m_Data.m_Outputs[0])->GetTensor(); + + m_Layer.reset(new arm_compute::NEQuantizationLayer()); + m_Layer->configure(&input, &output); + m_Layer->prepare(); +} + +void NeonQuantizeWorkload::Execute() const +{ + if (m_Layer) + { + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonQuantizeWorkload_Execute"); + m_Layer->run(); + } +} + +} // namespace armnn diff --git a/src/backends/neon/workloads/NeonQuantizeWorkload.hpp b/src/backends/neon/workloads/NeonQuantizeWorkload.hpp new file mode 100644 index 0000000000..7f5a145371 --- /dev/null +++ b/src/backends/neon/workloads/NeonQuantizeWorkload.hpp @@ -0,0 +1,26 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +namespace armnn { + +arm_compute::Status NeonQuantizeWorkloadValidate(const TensorInfo& input, const TensorInfo& output); + +class NeonQuantizeWorkload : public BaseWorkload +{ +public: + NeonQuantizeWorkload(const QuantizeQueueDescriptor& descriptor, const WorkloadInfo& workloadInfo); + void Execute() const override; + +private: + mutable std::unique_ptr m_Layer; +}; + +} // namespace armnn diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index dc02aecfd7..8ad70d7095 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -26,6 +26,7 @@ #include "NeonPadWorkload.hpp" #include "NeonPermuteWorkload.hpp" #include "NeonPooling2dWorkload.hpp" +#include "NeonQuantizeWorkload.hpp" #include "NeonReshapeWorkload.hpp" #include "NeonResizeBilinearWorkload.hpp" #include "NeonSoftmaxFloatWorkload.hpp" -- cgit v1.2.1