From adfb2737046028c042f0aecaff87733a442da29f Mon Sep 17 00:00:00 2001 From: John Kesapides Date: Mon, 4 Mar 2019 16:29:22 +0000 Subject: COMPMID-1935 Add support for QASYMM8 in NEQuantizeLayer Change-Id: I2b63a644d8e34f91c830d9ac398debcbdca3e497 Signed-off-by: John Kesapides Reviewed-on: https://review.mlplatform.org/c/829 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- .../runtime/NEON/functions/NEQuantizationLayer.h | 27 +++++++--------------- 1 file changed, 8 insertions(+), 19 deletions(-) (limited to 'arm_compute/runtime/NEON/functions/NEQuantizationLayer.h') diff --git a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h index 9cc1666b4c..9ca199d1ee 100644 --- a/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEQuantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -26,9 +26,8 @@ #include "arm_compute/runtime/IFunction.h" -#include "arm_compute/core/NEON/kernels/NEMinMaxLayerKernel.h" #include "arm_compute/core/NEON/kernels/NEQuantizationLayerKernel.h" -#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" #include "arm_compute/core/Types.h" @@ -38,39 +37,29 @@ class ITensor; /** Basic function to simulate a quantization layer. This function calls the following NEON kernels: * - * @note The implementation supports only 3D input tensors * - * -# @ref NEMinMaxLayerKernel * -# @ref NEQuantizationLayerKernel * */ -class NEQuantizationLayer : public IFunction +class NEQuantizationLayer : public INESimpleFunctionNoBorder { public: /** Default constructor */ - NEQuantizationLayer(); + NEQuantizationLayer() = default; /** Set the input and output tensors. * - * @param[in] input Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: F32 - * @param[out] output Destination tensor with the same dimensions of input. Data types supported: U8 + * @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: F32 + * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8 */ void configure(const ITensor *input, ITensor *output); /** Static function to check if given info will lead to a valid configuration of @ref NEQuantizationLayer * * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F32. - * @param[in] output Output tensor info. Data types supported: U8 + * @param[in] output Output tensor info. Data types supported: QASYMM8 * * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *output); - - // Inherited methods overridden: - void run() override; - -private: - NEQuantizationLayerKernel _quantize_kernel; - NEMinMaxLayerKernel _min_max_kernel; - Tensor _min_max; }; -} +} // namespace arm_compute #endif /* __ARM_COMPUTE_NEQUANTIZATIONLAYER_H__ */ -- cgit v1.2.1