diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2021-04-30 14:46:05 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2021-05-05 17:39:26 +0000 |
commit | ef516e8bb8eb7f55b410268587f3b88b77e2fd8e (patch) | |
tree | d2043bf6bae9c51ab0344a4e13f1c54205e28c3c /src/core | |
parent | 448cb45e2cb86f32a739c925a1ac8c688cf573bf (diff) | |
download | ComputeLibrary-ef516e8bb8eb7f55b410268587f3b88b77e2fd8e.tar.gz |
Rename Quantization/Dequantization kernels/operators to imperative mood
Renames the following kernels/functions
- [Cl|Cpu]DequantizationKernel -> [Cl|Cpu]DequantizeKernel
- [Cl|Cpu]Dequantization -> [Cl|Cpu]CpuDequantize
- [Cl|Cpu]QuantizationKernel -> [Cl|Cpu]QuantizeKernel
- [Cl|Cpu]Quantization -> [Cl|Cpu]Quantize
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: Ic3c5eb3b7fe28f807294d159830eef99c2dd6219
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5566
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/cpu/kernels/CpuDequantizeKernel.cpp (renamed from src/core/cpu/kernels/CpuDequantizationKernel.cpp) | 12 | ||||
-rw-r--r-- | src/core/cpu/kernels/CpuDequantizeKernel.h (renamed from src/core/cpu/kernels/CpuDequantizationKernel.h) | 17 | ||||
-rw-r--r-- | src/core/cpu/kernels/CpuQuantizeKernel.cpp (renamed from src/core/cpu/kernels/CpuQuantizationKernel.cpp) | 47 | ||||
-rw-r--r-- | src/core/cpu/kernels/CpuQuantizeKernel.h (renamed from src/core/cpu/kernels/CpuQuantizationKernel.h) | 26 | ||||
-rw-r--r-- | src/core/gpu/cl/kernels/ClDequantizeKernel.cpp (renamed from src/core/gpu/cl/kernels/ClDequantizationKernel.cpp) | 14 | ||||
-rw-r--r-- | src/core/gpu/cl/kernels/ClDequantizeKernel.h (renamed from src/core/gpu/cl/kernels/ClDequantizationKernel.h) | 20 | ||||
-rw-r--r-- | src/core/gpu/cl/kernels/ClQuantizeKernel.cpp (renamed from src/core/gpu/cl/kernels/ClQuantizationKernel.cpp) | 14 | ||||
-rw-r--r-- | src/core/gpu/cl/kernels/ClQuantizeKernel.h (renamed from src/core/gpu/cl/kernels/ClQuantizationKernel.h) | 21 |
8 files changed, 76 insertions, 95 deletions
diff --git a/src/core/cpu/kernels/CpuDequantizationKernel.cpp b/src/core/cpu/kernels/CpuDequantizeKernel.cpp index 2aa9fb9068..42b5439697 100644 --- a/src/core/cpu/kernels/CpuDequantizationKernel.cpp +++ b/src/core/cpu/kernels/CpuDequantizeKernel.cpp @@ -21,7 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/core/cpu/kernels/CpuDequantizationKernel.h" +#include "src/core/cpu/kernels/CpuDequantizeKernel.h" #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" @@ -349,7 +349,7 @@ void run_dequantization_core(const ITensor *input, ITensor *output, const Window } } // namespace -void CpuDequantizationKernel::configure(const ITensorInfo *src, ITensorInfo *dst) +void CpuDequantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst) { ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst)); @@ -362,13 +362,13 @@ void CpuDequantizationKernel::configure(const ITensorInfo *src, ITensorInfo *dst ICpuKernel::configure(win); } -Status CpuDequantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) +Status CpuDequantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst)); return Status{}; } -void CpuDequantizationKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) +void CpuDequantizeKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); @@ -391,9 +391,9 @@ void CpuDequantizationKernel::run_op(ITensorPack &tensors, const Window &window, ARM_COMPUTE_ERROR("Unsupported data type."); } } -const char *CpuDequantizationKernel::name() const +const char *CpuDequantizeKernel::name() const { - return "CpuDequantizationKernel"; + return "CpuDequantizeKernel"; } } // namespace kernels } // namespace cpu diff --git a/src/core/cpu/kernels/CpuDequantizationKernel.h b/src/core/cpu/kernels/CpuDequantizeKernel.h index 8ac807097c..798f32cec7 100644 --- a/src/core/cpu/kernels/CpuDequantizationKernel.h +++ b/src/core/cpu/kernels/CpuDequantizeKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_DEQUANTIZATIONKERNEL_H -#define ARM_COMPUTE_CPU_DEQUANTIZATIONKERNEL_H +#ifndef ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H +#define ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" @@ -34,22 +34,21 @@ namespace cpu namespace kernels { /** Interface for the dequantization layer kernel. */ -class CpuDequantizationKernel : public ICpuKernel +class CpuDequantizeKernel : public ICpuKernel { public: /** Default constructor */ - CpuDequantizationKernel() = default; - ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDequantizationKernel); + CpuDequantizeKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuDequantizeKernel); /** Set input, output tensors. * * @param[in] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16. * @param[out] dst Destination tensor info with the same dimensions of input. Data type supported: F16/F32. */ void configure(const ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuDequantizationKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16. - * @param[in] dst Destination tensor info. Data types supported: F16/F32. + * Similar to @ref CpuDequantizeKernel::configure() * * @return a status */ @@ -62,4 +61,4 @@ public: } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_DEQUANTIZATIONKERNEL_H */ +#endif /* ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H */ diff --git a/src/core/cpu/kernels/CpuQuantizationKernel.cpp b/src/core/cpu/kernels/CpuQuantizeKernel.cpp index 9b1e017275..8ca81e8b11 100644 --- a/src/core/cpu/kernels/CpuQuantizationKernel.cpp +++ b/src/core/cpu/kernels/CpuQuantizeKernel.cpp @@ -21,7 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/core/cpu/kernels/CpuQuantizationKernel.h" +#include "src/core/cpu/kernels/CpuQuantizeKernel.h" #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" @@ -108,34 +108,29 @@ vector_type<int8_t> vquantize_qasymm8<int8_t>(const float32x4x4_t &qv, const Uni } // namespace -CpuQuantizationKernel::CpuQuantizationKernel() - : _func(nullptr) -{ -} - -void CpuQuantizationKernel::configure(ITensorInfo *src, ITensorInfo *dst) +void CpuQuantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst)); - static const std::map<std::string, QuantizationFunctionExecutorPtr> quant_map = + static const std::map<std::string, QuantizeFunctionExecutorPtr> quant_map = { - { "op_QASYMM8_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<uint8_t, uint8_t> }, - { "op_QASYMM8_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<uint8_t, int8_t> }, - { "op_QASYMM8_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<uint8_t> }, + { "op_QASYMM8_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<uint8_t, uint8_t> }, + { "op_QASYMM8_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<uint8_t, int8_t> }, + { "op_QASYMM8_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<uint8_t> }, - { "op_QASYMM8_SIGNED_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<int8_t, uint8_t> }, - { "op_QASYMM8_SIGNED_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<int8_t, int8_t> }, - { "op_QASYMM8_SIGNED_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<int8_t> }, + { "op_QASYMM8_SIGNED_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<int8_t, uint8_t> }, + { "op_QASYMM8_SIGNED_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<int8_t, int8_t> }, + { "op_QASYMM8_SIGNED_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<int8_t> }, - { "op_F32_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<float, uint8_t> }, - { "op_F32_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<float, int8_t> }, - { "op_F32_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<float> }, + { "op_F32_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<float, uint8_t> }, + { "op_F32_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<float, int8_t> }, + { "op_F32_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<float> }, #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - { "op_F16_QASYMM8", &CpuQuantizationKernel::run_quantize_qasymm8<float16_t, uint8_t> }, - { "op_F16_QASYMM8_SIGNED", &CpuQuantizationKernel::run_quantize_qasymm8<float16_t, int8_t> }, - { "op_F16_QASYMM16", &CpuQuantizationKernel::run_quantize_qasymm16<float16_t> }, + { "op_F16_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<float16_t, uint8_t> }, + { "op_F16_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<float16_t, int8_t> }, + { "op_F16_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<float16_t> }, #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/ }; @@ -156,14 +151,14 @@ void CpuQuantizationKernel::configure(ITensorInfo *src, ITensorInfo *dst) ICpuKernel::configure(win_config); } -Status CpuQuantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) +Status CpuQuantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst)); return Status{}; } template <typename TIn, typename TOut> -void CpuQuantizationKernel::run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window) +void CpuQuantizeKernel::run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window) { const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); @@ -206,7 +201,7 @@ void CpuQuantizationKernel::run_quantize_qasymm8(const ITensor *src, ITensor *ds } template <typename T> -void CpuQuantizationKernel::run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window) +void CpuQuantizeKernel::run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window) { const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); @@ -250,7 +245,7 @@ void CpuQuantizationKernel::run_quantize_qasymm16(const ITensor *src, ITensor *d input, output); } -void CpuQuantizationKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) +void CpuQuantizeKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); @@ -262,9 +257,9 @@ void CpuQuantizationKernel::run_op(ITensorPack &tensors, const Window &window, c (this->*_func)(src, dst, window); } -const char *CpuQuantizationKernel::name() const +const char *CpuQuantizeKernel::name() const { - return "CpuQuantizationKernel"; + return "CpuQuantizeKernel"; } } // namespace kernels } // namespace cpu diff --git a/src/core/cpu/kernels/CpuQuantizationKernel.h b/src/core/cpu/kernels/CpuQuantizeKernel.h index 51d9a4e94f..d3422d3fbd 100644 --- a/src/core/cpu/kernels/CpuQuantizationKernel.h +++ b/src/core/cpu/kernels/CpuQuantizeKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_QUANTIZATIONKERNEL_H -#define ARM_COMPUTE_CPU_QUANTIZATIONKERNEL_H +#ifndef ARM_COMPUTE_CPU_QUANTIZE_KERNEL_H +#define ARM_COMPUTE_CPU_QUANTIZE_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/cpu/ICpuKernel.h" @@ -36,14 +36,13 @@ namespace kernels /** Interface for the quantization layer kernel. * * @note The implementation supports only 3D input tensors - * */ -class CpuQuantizationKernel : public ICpuKernel +class CpuQuantizeKernel : public ICpuKernel { public: /** Default constructor */ - CpuQuantizationKernel(); - ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuQuantizationKernel); + CpuQuantizeKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuQuantizeKernel); /** Set the input, output. * * @param[in] src Source tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. @@ -51,11 +50,10 @@ public: * * @note Output auto initialization is not supported by this kernel */ - void configure(ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref CpuQuantizationKernel + void configure(const ITensorInfo *src, ITensorInfo *dst); + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. - * @param[in] dst Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16. + * Similar to @ref CpuQuantizeKernel::configure() * * @return a status */ @@ -66,11 +64,11 @@ public: const char *name() const override; private: - /** Common signature for all the specialised @ref NEQuantizationLayerKernel functions + /** Common signature for all the specialised @ref CpuQuantizeKernel functions * * @param[in] window Region on which to execute the kernel. */ - using QuantizationFunctionExecutorPtr = void (CpuQuantizationKernel::*)(const ITensor *src, ITensor *dst, const Window &window); + using QuantizeFunctionExecutorPtr = void (CpuQuantizeKernel::*)(const ITensor *src, ITensor *dst, const Window &window); /** Function to apply QASYMM8 or QASYMM8_SIGNED quantization on a tensor. * * @param[in] window Region on which to execute the kernel. @@ -84,9 +82,9 @@ private: template <typename T> void run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window); - QuantizationFunctionExecutorPtr _func; + QuantizeFunctionExecutorPtr _func{ nullptr }; }; } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /*ARM_COMPUTE_CPU_QUANTIZATIONKERNEL_H */ +#endif /* ARM_COMPUTE_CPU_QUANTIZE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClDequantizationKernel.cpp b/src/core/gpu/cl/kernels/ClDequantizeKernel.cpp index 6421a08206..f2758b759f 100644 --- a/src/core/gpu/cl/kernels/ClDequantizationKernel.cpp +++ b/src/core/gpu/cl/kernels/ClDequantizeKernel.cpp @@ -21,7 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/core/gpu/cl/kernels/ClDequantizationKernel.h" +#include "src/core/gpu/cl/kernels/ClDequantizeKernel.h" #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibrary.h" @@ -29,9 +29,11 @@ #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" + #include "src/core/CL/CLValidate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" + #include "support/Cast.h" #include "support/StringSupport.h" @@ -59,11 +61,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst) } } // namespace -ClDequantizationKernel::ClDequantizationKernel() -{ -} - -void ClDequantizationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst) +void ClDequantizeKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); @@ -115,13 +113,13 @@ void ClDequantizationKernel::configure(const CLCompileContext &compile_context, ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); } -Status ClDequantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) +Status ClDequantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst)); return Status{}; } -void ClDequantizationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) +void ClDequantizeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) { ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); diff --git a/src/core/gpu/cl/kernels/ClDequantizationKernel.h b/src/core/gpu/cl/kernels/ClDequantizeKernel.h index 3ccf90c204..33e0164cc9 100644 --- a/src/core/gpu/cl/kernels/ClDequantizationKernel.h +++ b/src/core/gpu/cl/kernels/ClDequantizeKernel.h @@ -21,29 +21,26 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_DEQUANTIZATION_KERNEL_H -#define ARM_COMPUTE_CL_DEQUANTIZATION_KERNEL_H +#ifndef ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H +#define ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H -#include "arm_compute/core/KernelDescriptors.h" #include "src/core/common/Macros.h" #include "src/core/gpu/cl/ClCompileContext.h" #include "src/core/gpu/cl/IClKernel.h" namespace arm_compute { -class ICLTensor; - namespace opencl { namespace kernels { /** Interface for the dequantization layer kernel. */ -class ClDequantizationKernel : public IClKernel +class ClDequantizeKernel : public IClKernel { public: /** Default constructor */ - ClDequantizationKernel(); - ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDequantizationKernel); + ClDequantizeKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClDequantizeKernel); /** Initialise the kernel's input and output * * @param[in] compile_context The compile context to be used. @@ -51,10 +48,9 @@ public: * @param[out] dst Destination tensor info. Data types supported: F16/F32. */ void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClDequantizationKernel + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16. - * @param[in] dst Output tensor info. Data types supported: F16/F32. + * Similar to @ref ClDequantizeKernel::configure() * * @return a status */ @@ -66,4 +62,4 @@ public: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_DEQUANTIZATION_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_DEQUANTIZE_KERNEL_H */ diff --git a/src/core/gpu/cl/kernels/ClQuantizationKernel.cpp b/src/core/gpu/cl/kernels/ClQuantizeKernel.cpp index 9926123529..48d351d536 100644 --- a/src/core/gpu/cl/kernels/ClQuantizationKernel.cpp +++ b/src/core/gpu/cl/kernels/ClQuantizeKernel.cpp @@ -21,7 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/core/gpu/cl/kernels/ClQuantizationKernel.h" +#include "src/core/gpu/cl/kernels/ClQuantizeKernel.h" #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibrary.h" @@ -31,8 +31,10 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" + #include "src/core/CL/CLValidate.h" #include "src/core/helpers/WindowHelpers.h" + #include "support/Cast.h" #include "support/StringSupport.h" @@ -59,11 +61,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst) } } // namespace -ClQuantizationKernel::ClQuantizationKernel() -{ -} - -void ClQuantizationKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst) +void ClQuantizeKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); @@ -146,13 +144,13 @@ void ClQuantizationKernel::configure(const CLCompileContext &compile_context, IT ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); } -Status ClQuantizationKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) +Status ClQuantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst) { ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst)); return Status{}; } -void ClQuantizationKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) +void ClQuantizeKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) { ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); diff --git a/src/core/gpu/cl/kernels/ClQuantizationKernel.h b/src/core/gpu/cl/kernels/ClQuantizeKernel.h index 20822cf9c9..8d37f33032 100644 --- a/src/core/gpu/cl/kernels/ClQuantizationKernel.h +++ b/src/core/gpu/cl/kernels/ClQuantizeKernel.h @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_QUANTIZATION_KERNEL_H -#define ARM_COMPUTE_CL_QUANTIZATION_KERNEL_H +#ifndef ARM_COMPUTE_CL_QUANTIZE_KERNEL_H +#define ARM_COMPUTE_CL_QUANTIZE_KERNEL_H #include "src/core/common/Macros.h" #include "src/core/gpu/cl/ClCompileContext.h" @@ -30,8 +30,6 @@ namespace arm_compute { -class ICLTensor; - namespace opencl { namespace kernels @@ -40,12 +38,12 @@ namespace kernels * * @note The implementation supports only 3D input tensors. */ -class ClQuantizationKernel : public IClKernel +class ClQuantizeKernel : public IClKernel { public: /** Default constructor */ - ClQuantizationKernel(); - ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClQuantizationKernel); + ClQuantizeKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClQuantizeKernel); /** Set the input, output. * * @param[in] compile_context The compile context to be used. @@ -54,11 +52,10 @@ public: * * @note Output auto initialization is not supported by this kernel */ - void configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst); - /** Static function to check if given info will lead to a valid configuration of @ref ClQuantizationKernel + void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst); + /** Static function to check if given info will lead to a valid configuration * - * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. - * @param[in] dst Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16. + * Similar to @ref ClQuantizeKernel::configure() * * @return a status */ @@ -70,4 +67,4 @@ public: } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /*ARM_COMPUTE_CL_QUANTIZATION_KERNEL_H */ +#endif /* ARM_COMPUTE_CL_QUANTIZE_KERNEL_H */ |