From 0ded4c40578bc78003756d171f2bbe15f6ac72bc Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Tue, 9 Mar 2021 14:15:27 +0000 Subject: Port Arm(R) Neon(TM) Quantization to new API Partially resolves: COMPMID-4193 Change-Id: I91dc964d4308687e76127c305a6bedca796f8ba0 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5246 Reviewed-by: Michalis Spyrou Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- .../NEON/functions/NEGenerateProposalsLayer.h | 6 ++--- .../runtime/NEON/functions/NEQuantizationLayer.h | 30 +++++++++++++++++----- 2 files changed, 27 insertions(+), 9 deletions(-) (limited to 'arm_compute/runtime/NEON') diff --git a/arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h b/arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h index 90e2307ce8..979b3ba83e 100644 --- a/arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h +++ b/arm_compute/runtime/NEON/functions/NEGenerateProposalsLayer.h @@ -44,14 +44,14 @@ class NEComputeAllAnchorsKernel; /** Basic function to generate proposals for a RPN (Region Proposal Network) * - * This function calls the following Neon kernels: + * This function calls the following Arm(R) Neon(TM) layers/kernels: * -# @ref NEComputeAllAnchorsKernel * -# @ref NEPermute x 2 * -# @ref NEReshapeLayer x 2 * -# @ref NEBoundingBoxTransform * -# @ref NEPadLayerKernel * -# @ref NEDequantizationLayerKernel x 2 - * -# @ref NEQuantizationLayerKernel + * -# @ref NEQuantizationLayer * And the following CPP kernels: * -# @ref CPPBoxWithNonMaximaSuppressionLimit */ @@ -113,7 +113,7 @@ private: // Memory group manager MemoryGroup _memory_group; - // Neon kernels + // kernels/layers NEPermute _permute_deltas; NEReshapeLayer _flatten_deltas; NEPermute _permute_scores; diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h index 8b0532beea..54ec76b177 100644 --- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h @@ -24,26 +24,37 @@ #ifndef ARM_COMPUTE_NEQUANTIZATIONLAYER_H #define ARM_COMPUTE_NEQUANTIZATIONLAYER_H +#include "arm_compute/core/Types.h" #include "arm_compute/runtime/IFunction.h" +#include "arm_compute/runtime/IRuntimeContext.h" -#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" - -#include "arm_compute/core/Types.h" +#include namespace arm_compute { class ITensor; class ITensorInfo; -/** Basic function to simulate a quantization layer. This function calls the following Neon kernels: +/** Basic function to simulate a quantization layer. This function calls the following Arm(R) Neon(TM) implementation layers: * * - * -# @ref NEQuantizationLayerKernel + * -# @ref cpu::CpuQuantization * */ -class NEQuantizationLayer : public INESimpleFunctionNoBorder +class NEQuantizationLayer : public IFunction { public: + NEQuantizationLayer(); + /** Default Destructor */ + ~NEQuantizationLayer(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEQuantizationLayer(const NEQuantizationLayer &) = delete; + /** Default move constructor */ + NEQuantizationLayer(NEQuantizationLayer &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEQuantizationLayer &operator=(const NEQuantizationLayer &) = delete; + /** Default move assignment operator */ + NEQuantizationLayer &operator=(NEQuantizationLayer &&) = default; /** Set the input and output tensors. * * @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. @@ -58,6 +69,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *output); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; } // namespace arm_compute #endif /* ARM_COMPUTE_NEQUANTIZATIONLAYER_H */ -- cgit v1.2.1