diff options
author | Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-27 17:46:17 +0100 |
---|---|---|
committer | felixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-28 12:08:05 +0000 |
commit | afd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch) | |
tree | 03bc7d5a762099989b16a656fa8d397b490ed70e /src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp | |
parent | bdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff) | |
download | ComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz |
Apply clang-format on repository
Code is formatted as per a revised clang format configuration
file(not part of this delivery). Version 14.0.6 is used.
Exclusion List:
- files with .cl extension
- files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...)
And the following directories
- compute_kernel_writer/validation/
- tests/
- include/
- src/core/NEON/kernels/convolution/
- src/core/NEON/kernels/arm_gemm/
- src/core/NEON/kernels/arm_conv/
- data/
There will be a follow up for formatting of .cl files and the
files under tests/ and compute_kernel_writer/validation/.
Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>
Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp')
-rw-r--r-- | src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp | 247 |
1 files changed, 129 insertions, 118 deletions
diff --git a/src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp b/src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp index 3023d93113..eefc294700 100644 --- a/src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp +++ b/src/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp @@ -28,13 +28,14 @@ #include "arm_compute/core/ITensor.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" -#include "arm_compute/core/utils/quantization/AsymmHelpers.h" + #include "src/core/AccessWindowStatic.h" -#include "src/core/NEON/wrapper/wrapper.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" +#include "src/core/NEON/wrapper/wrapper.h" #include <arm_neon.h> @@ -46,26 +47,35 @@ namespace kernels { namespace { -Status validate_arguments(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage) +Status validate_arguments(const ITensorInfo *src, + const ITensorInfo *bias, + const ITensorInfo *dst, + const GEMMLowpOutputStageInfo *output_stage) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_max_bound > std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))); - ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_min_bound < std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)) - || output_stage->gemmlowp_min_bound > output_stage->gemmlowp_max_bound); + ARM_COMPUTE_RETURN_ERROR_ON( + output_stage->gemmlowp_max_bound > + std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))); + ARM_COMPUTE_RETURN_ERROR_ON( + output_stage->gemmlowp_min_bound < + std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)) || + output_stage->gemmlowp_min_bound > output_stage->gemmlowp_max_bound); // Check biases if exist - if(bias != nullptr) + if (bias != nullptr) { ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, bias); ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != bias->dimension(0)); } - if(dst->total_size() != 0) + if (dst->total_size() != 0) { - if(dst->data_type() != output_stage->output_data_type && (output_stage->output_data_type == DataType::QASYMM8 || output_stage->output_data_type == DataType::QASYMM8_SIGNED)) + if (dst->data_type() != output_stage->output_data_type && + (output_stage->output_data_type == DataType::QASYMM8 || + output_stage->output_data_type == DataType::QASYMM8_SIGNED)) { ARM_COMPUTE_RETURN_ERROR_MSG("Mismatching data types"); } @@ -92,24 +102,26 @@ inline void scale_input(int32x4x4_t &in_s32, int32x4_t result_offset_s32, int32_ } template <typename T> -inline typename std::enable_if<std::is_same<T, uint8_t>::value, - typename wrapper::traits::neon_vector<T, 16>::type>::type - convert_to_8bit(const int16x8x2_t in_s16) +inline + typename std::enable_if<std::is_same<T, uint8_t>::value, typename wrapper::traits::neon_vector<T, 16>::type>::type + convert_to_8bit(const int16x8x2_t in_s16) { return wrapper::vcombine(wrapper::vqmovun(in_s16.val[0]), wrapper::vqmovun(in_s16.val[1])); } template <typename T> -inline typename std::enable_if<std::is_same<T, int8_t>::value, - typename wrapper::traits::neon_vector<T, 16>::type>::type - convert_to_8bit(const int16x8x2_t in_s16) +inline typename std::enable_if<std::is_same<T, int8_t>::value, typename wrapper::traits::neon_vector<T, 16>::type>::type +convert_to_8bit(const int16x8x2_t in_s16) { return wrapper::vcombine(wrapper::vqmovn(in_s16.val[0]), wrapper::vqmovn(in_s16.val[1])); } template <typename T> -inline typename wrapper::traits::neon_vector<T, 16>::type finalize_quantization(int32x4x4_t &in_s32, int32x4_t result_shift_s32, typename wrapper::traits::neon_vector<T, 16>::type min, - typename wrapper::traits::neon_vector<T, 16>::type max) +inline typename wrapper::traits::neon_vector<T, 16>::type +finalize_quantization(int32x4x4_t &in_s32, + int32x4_t result_shift_s32, + typename wrapper::traits::neon_vector<T, 16>::type min, + typename wrapper::traits::neon_vector<T, 16>::type max) { // Shift final result (negative value shift right) in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32); @@ -118,13 +130,8 @@ inline typename wrapper::traits::neon_vector<T, 16>::type finalize_quantization( in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32); // Convert S32 to S16 - const int16x8x2_t in_s16 = - { - { - vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])), - vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])) - } - }; + const int16x8x2_t in_s16 = {{vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])), + vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))}}; // Convert S16 to S8 or U8 typename wrapper::traits::neon_vector<T, 16>::type out = convert_to_8bit<T>(in_s16); @@ -137,7 +144,10 @@ inline typename wrapper::traits::neon_vector<T, 16>::type finalize_quantization( } // namespace template <typename T> -void CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window) +void CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal(const ITensor *src, + const ITensor *bias, + ITensor *dst, + const Window &window) { using VectorType = typename wrapper::traits::neon_vector<T, 16>::type; @@ -159,107 +169,105 @@ void CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal(const ITensor *src, c Iterator in(src, win); Iterator out(dst, win); - if(bias != nullptr) + if (bias != nullptr) { Window win_biases; win_biases.set(Window::DimX, Window::Dimension(0, 1, 1)); win_biases.set(Window::DimY, Window::Dimension(0, 1, 1)); Iterator bias_i(bias, win_biases); - execute_window_loop(win, [&](const Coordinates &) - { - // Compute 16 elements per iteration - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + execute_window_loop( + win, + [&](const Coordinates &) { - int32x4x4_t in_s32 = + // Compute 16 elements per iteration + int x = window_start_x; + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - { - vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0), - vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4), - vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8), - vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12) - } - }; - - const int32x4x4_t bias_s32 = + int32x4x4_t in_s32 = {{vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0), + vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4), + vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8), + vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)}}; + + const int32x4x4_t bias_s32 = { + {vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 0), + vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 4), + vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 8), + vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 12)}}; + + // Add the bias to GEMM's result + in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]); + in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]); + in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]); + in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]); + + // Add the offset terms to GEMM's result and multiply by result_mult_int + scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier); + + wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), + finalize_quantization<T>(in_s32, result_shift_s32, min, max)); + } + + // Compute left-over elements + for (; x < window_end_x; ++x) { - { - vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 0), - vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 4), - vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 8), - vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 12) - } - }; - - // Add the bias to GEMM's result - in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]); - in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]); - in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]); - in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]); - - // Add the offset terms to GEMM's result and multiply by result_mult_int - scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier); - - wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max)); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - const int bias_value = *(reinterpret_cast<const int *>(bias_i.ptr()) + x); - int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x); - - // Quantize - in_value = ((in_value + bias_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift; - - // Store the result - *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); - } - }, - in, bias_i, out); + const int bias_value = *(reinterpret_cast<const int *>(bias_i.ptr()) + x); + int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x); + + // Quantize + in_value = ((in_value + bias_value + _output_stage->gemmlowp_offset) * + _output_stage->gemmlowp_multiplier) >> + _output_stage->gemmlowp_shift; + + // Store the result + *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); + } + }, + in, bias_i, out); } else { - execute_window_loop(win, [&](const Coordinates &) - { - // Compute 16 elements per iteration - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + execute_window_loop( + win, + [&](const Coordinates &) { - int32x4x4_t in_s32 = + // Compute 16 elements per iteration + int x = window_start_x; + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - { - vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0), - vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4), - vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8), - vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12) - } - }; - - // Add the offset terms to GEMM's result and multiply by result_mult_int - scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier); - - wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max)); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x); + int32x4x4_t in_s32 = {{vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0), + vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4), + vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8), + vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)}}; + + // Add the offset terms to GEMM's result and multiply by result_mult_int + scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier); + + wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), + finalize_quantization<T>(in_s32, result_shift_s32, min, max)); + } + + // Compute left-over elements + for (; x < window_end_x; ++x) + { + int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x); - // Quantize - in_value = ((in_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift; + // Quantize + in_value = ((in_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> + _output_stage->gemmlowp_shift; - // Store the result - *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); - } - }, - in, out); + // Store the result + *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max)); + } + }, + in, out); } } -void CpuGemmLowpQuantizeDownInt32ScaleKernel::configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage) +void CpuGemmLowpQuantizeDownInt32ScaleKernel::configure(ITensorInfo *src, + ITensorInfo *bias, + ITensorInfo *dst, + const GEMMLowpOutputStageInfo *output_stage) { ARM_COMPUTE_UNUSED(bias); // Perform validate step @@ -268,10 +276,7 @@ void CpuGemmLowpQuantizeDownInt32ScaleKernel::configure(ITensorInfo *src, ITenso // Output auto inizialitation if not yet initialized auto_init_if_empty(*dst, src->clone()->set_data_type(output_stage->output_data_type)); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, - bias, - dst, - output_stage)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, bias, dst, output_stage)); _output_stage = output_stage; @@ -281,14 +286,17 @@ void CpuGemmLowpQuantizeDownInt32ScaleKernel::configure(ITensorInfo *src, ITenso ICpuKernel::configure(win); // Check if we need to clamp the result using min and max - _is_bounded_relu = ((_output_stage->gemmlowp_min_bound != _output_stage->gemmlowp_max_bound) - && !(_output_stage->gemmlowp_min_bound == std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)) - && _output_stage->gemmlowp_max_bound == std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)))); - if(_output_stage->output_data_type == DataType::QASYMM8) + _is_bounded_relu = + ((_output_stage->gemmlowp_min_bound != _output_stage->gemmlowp_max_bound) && + !(_output_stage->gemmlowp_min_bound == + std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)) && + _output_stage->gemmlowp_max_bound == + std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)))); + if (_output_stage->output_data_type == DataType::QASYMM8) { _func = &CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal<uint8_t>; } - else if(_output_stage->output_data_type == DataType::QASYMM8_SIGNED) + else if (_output_stage->output_data_type == DataType::QASYMM8_SIGNED) { _func = &CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal<int8_t>; } @@ -298,7 +306,10 @@ void CpuGemmLowpQuantizeDownInt32ScaleKernel::configure(ITensorInfo *src, ITenso } } -Status CpuGemmLowpQuantizeDownInt32ScaleKernel::validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage) +Status CpuGemmLowpQuantizeDownInt32ScaleKernel::validate(const ITensorInfo *src, + const ITensorInfo *bias, + const ITensorInfo *dst, + const GEMMLowpOutputStageInfo *output_stage) { ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, bias, dst, output_stage)); return Status{}; @@ -323,4 +334,4 @@ const char *CpuGemmLowpQuantizeDownInt32ScaleKernel::name() const } } // namespace kernels } // namespace cpu -} // namespace arm_compute
\ No newline at end of file +} // namespace arm_compute |