From e03802edd37229a1868bacedd7571cc443810caf Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Mon, 11 Mar 2019 12:20:20 +0000 Subject: COMPMID-1936: Add support for QASYMM8 in CLQuantizeLayer. Change-Id: I9aa1f1f1753bcdee6a74ec15b4fb366f823788b4 Signed-off-by: Usama Arif Reviewed-on: https://review.mlplatform.org/c/850 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- .../core/CL/kernels/CLQuantizationLayerKernel.h | 21 ++++++--------- .../runtime/CL/functions/CLQuantizationLayer.h | 31 ++++++---------------- 2 files changed, 16 insertions(+), 36 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h index 5d78dce1c2..d16ae546ff 100644 --- a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -49,24 +49,20 @@ public: CLQuantizationLayerKernel &operator=(CLQuantizationLayerKernel &&) = default; /** Default destructor */ ~CLQuantizationLayerKernel() = default; - /** Set the input, output, min and max. + /** Set the input, output. * - * @param[in] input Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: F32. - * @param[out] output Destination tensor with the same dimensions of input. Output data type must be U8. - * @param[in] min_max Pointer to the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. - * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * @param[in] input Source tensor. Data types supported: F32/F16. + * @param[out] output Destination tensor with the same dimensions of input. Output data type must be QASYMM8. */ - void configure(const ICLTensor *input, ICLTensor *output, ICLTensor *min_max); + void configure(const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayerKernel * - * @param[in] input Input tensor info. Data types supported: F32. - * @param[in] output Output tensor info. Output data type must be U8. - * @param[in] min_max Info for the tensor with shape [2, batches] which stores the minimum and maximum value for each 3D input tensor. - * The dimensions over the second must match the batched dimensions of the input tensor. Data type supported: F32. + * @param[in] input Input tensor info. Data types supported: F32/F16. + * @param[in] output Output tensor info. Output data type must be QASYMM8. * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max); + static Status validate(const ITensorInfo *input, const ITensorInfo *output); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -74,7 +70,6 @@ public: private: const ICLTensor *_input; ICLTensor *_output; - const ICLTensor *_min_max; }; } // namespace arm_compute #endif /*__ARM_COMPUTE_CLQUANTIZATIONLAYERKERNEL_H__ */ diff --git a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h index 738187dfe7..81dcfad515 100644 --- a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,11 +24,7 @@ #ifndef __ARM_COMPUTE_CLQUANTIZATIONLAYER_H__ #define __ARM_COMPUTE_CLQUANTIZATIONLAYER_H__ -#include "arm_compute/runtime/IFunction.h" - -#include "arm_compute/core/CL/kernels/CLMinMaxLayerKernel.h" -#include "arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h" -#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/ICLSimpleFunction.h" namespace arm_compute { @@ -38,37 +34,26 @@ class ICLTensor; * * @note The implementation supports only 3D input tensors. * - * -# @ref CLMinMaxLayerKernel * -# @ref CLQuantizationLayerKernel * */ -class CLQuantizationLayer : public IFunction +class CLQuantizationLayer : public ICLSimpleFunction { public: - /** Default constructor */ - CLQuantizationLayer(); /** Set the input and output tensors. * - * @param[in] input Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: F32. - * @param[out] output Destination tensor with the same dimensions of input. Output data type must be U8. + * @param[in] input Source tensor. Data types supported: F16/32. + * @param[out] output Destination tensor with the same dimensions of input. Output data type must be QASYMM8. */ void configure(const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayer * - * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F32. - * @param[in] output Output tensor info. Output data type must be U8. + * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F16/32. + * @param[in] output Output tensor info. Output data type must be QASYMM8. * * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *output); - - // Inherited methods overridden: - void run() override; - -private: - CLQuantizationLayerKernel _quantize_kernel; - CLMinMaxLayerKernel _min_max_kernel; - CLTensor _min_max; }; -} +} //namespace arm_compute #endif /* __ARM_COMPUTE_CLQUANTIZATIONLAYER_H__ */ -- cgit v1.2.1