aboutsummaryrefslogtreecommitdiff
path: root/src/core/cpu/kernels
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2021-06-17 17:18:45 +0100
committerManuel Bottini <manuel.bottini@arm.com>2021-06-22 17:03:54 +0000
commitae58bdf3b58739e105a24e3640d0245e81cea5ee (patch)
treee993b8768c3eff364a7c706db411c799fa86bfe0 /src/core/cpu/kernels
parent2db3a9955ef22be4be8ccd5a45bc0973ef80e42a (diff)
downloadComputeLibrary-ae58bdf3b58739e105a24e3640d0245e81cea5ee.tar.gz
Port NEGEMMLowp Part 1
Details: Port NEGEMMLowpQuantizeDownInt32ScaleKernel to CpuGemmLowpQuantizeDownInt32ScaleKernel Port NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel to CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel Port NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel to CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel Port NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel to CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel Port NEGEMMLowpOutputStage functions to CpuGemmLowpOutputStage operators Partially Resolves: COMPMID-4403 Change-Id: I6d5f45e43f35d731d564ed3b5c0e804d2a318fb1 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5833 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/cpu/kernels')
-rw-r--r--src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp326
-rw-r--r--src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h107
-rw-r--r--src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp227
-rw-r--r--src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h111
-rw-r--r--src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp239
-rw-r--r--src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h114
-rw-r--r--src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp236
-rw-r--r--src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h108
8 files changed, 1468 insertions, 0 deletions
diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp
new file mode 100644
index 0000000000..f1c797244a
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2020-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
+#include "src/core/AccessWindowStatic.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::S32);
+
+ ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_max_bound > std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type)));
+ ARM_COMPUTE_RETURN_ERROR_ON(output_stage->gemmlowp_min_bound < std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))
+ || output_stage->gemmlowp_min_bound > output_stage->gemmlowp_max_bound);
+
+ // Check biases if exist
+ if(bias != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != bias->dimension(0));
+ }
+
+ if(dst->total_size() != 0)
+ {
+ if(dst->data_type() != output_stage->output_data_type && (output_stage->output_data_type == DataType::QASYMM8 || output_stage->output_data_type == DataType::QASYMM8_SIGNED))
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Mismatching data types");
+ }
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
+ }
+
+ return Status{};
+}
+
+inline void scale_input(int32x4x4_t &in_s32, int32x4_t result_offset_s32, int32_t result_mult_int)
+{
+ // Add the offset terms to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], result_offset_s32);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], result_offset_s32);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], result_offset_s32);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], result_offset_s32);
+
+ // Multiply by result_mult_int
+ in_s32.val[0] = vmulq_n_s32(in_s32.val[0], result_mult_int);
+ in_s32.val[1] = vmulq_n_s32(in_s32.val[1], result_mult_int);
+ in_s32.val[2] = vmulq_n_s32(in_s32.val[2], result_mult_int);
+ in_s32.val[3] = vmulq_n_s32(in_s32.val[3], result_mult_int);
+}
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, uint8_t>::value,
+ typename wrapper::traits::neon_vector<T, 16>::type>::type
+ convert_to_8bit(const int16x8x2_t in_s16)
+{
+ return wrapper::vcombine(wrapper::vqmovun(in_s16.val[0]), wrapper::vqmovun(in_s16.val[1]));
+}
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, int8_t>::value,
+ typename wrapper::traits::neon_vector<T, 16>::type>::type
+ convert_to_8bit(const int16x8x2_t in_s16)
+{
+ return wrapper::vcombine(wrapper::vqmovn(in_s16.val[0]), wrapper::vqmovn(in_s16.val[1]));
+}
+
+template <typename T>
+inline typename wrapper::traits::neon_vector<T, 16>::type finalize_quantization(int32x4x4_t &in_s32, int32x4_t result_shift_s32, typename wrapper::traits::neon_vector<T, 16>::type min,
+ typename wrapper::traits::neon_vector<T, 16>::type max)
+{
+ // Shift final result (negative value shift right)
+ in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
+ in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
+ in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
+ in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
+
+ // Convert S32 to S16
+ const int16x8x2_t in_s16 =
+ {
+ {
+ vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
+ vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
+ }
+ };
+
+ // Convert S16 to S8 or U8
+ typename wrapper::traits::neon_vector<T, 16>::type out = convert_to_8bit<T>(in_s16);
+
+ out = wrapper::vmax(out, min);
+ out = wrapper::vmin(out, max);
+
+ return out;
+}
+} // namespace
+
+template <typename T>
+void CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window)
+{
+ using VectorType = typename wrapper::traits::neon_vector<T, 16>::type;
+
+ const int32x4_t result_offset_s32 = vdupq_n_s32(_output_stage->gemmlowp_offset);
+ const int32x4_t result_shift_s32 = vdupq_n_s32(-_output_stage->gemmlowp_shift);
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ const int clamp_min = (_is_bounded_relu) ? _output_stage->gemmlowp_min_bound : std::numeric_limits<T>::lowest();
+ const int clamp_max = (_is_bounded_relu) ? _output_stage->gemmlowp_max_bound : std::numeric_limits<T>::max();
+
+ VectorType min = wrapper::vdup_n(static_cast<T>(clamp_min), wrapper::traits::vector_128_tag{});
+ VectorType max = wrapper::vdup_n(static_cast<T>(clamp_max), wrapper::traits::vector_128_tag{});
+
+ Window win(window);
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in(src, win);
+ Iterator out(dst, win);
+
+ if(bias != nullptr)
+ {
+ Window win_biases;
+ win_biases.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ Iterator bias_i(bias, win_biases);
+ execute_window_loop(win, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
+ }
+ };
+
+ const int32x4x4_t bias_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 12)
+ }
+ };
+
+ // Add the bias to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]);
+
+ // Add the offset terms to GEMM's result and multiply by result_mult_int
+ scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier);
+
+ wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int bias_value = *(reinterpret_cast<const int *>(bias_i.ptr()) + x);
+ int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x);
+
+ // Quantize
+ in_value = ((in_value + bias_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift;
+
+ // Store the result
+ *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max));
+ }
+ },
+ in, bias_i, out);
+ }
+ else
+ {
+ execute_window_loop(win, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
+ }
+ };
+
+ // Add the offset terms to GEMM's result and multiply by result_mult_int
+ scale_input(in_s32, result_offset_s32, _output_stage->gemmlowp_multiplier);
+
+ wrapper::vstore(reinterpret_cast<T *>(out.ptr() + x), finalize_quantization<T>(in_s32, result_shift_s32, min, max));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ int in_value = *(reinterpret_cast<const int *>(in.ptr()) + x);
+
+ // Quantize
+ in_value = ((in_value + _output_stage->gemmlowp_offset) * _output_stage->gemmlowp_multiplier) >> _output_stage->gemmlowp_shift;
+
+ // Store the result
+ *(out.ptr() + x) = static_cast<T>(utility::clamp<int>(in_value, clamp_min, clamp_max));
+ }
+ },
+ in, out);
+ }
+}
+
+void CpuGemmLowpQuantizeDownInt32ScaleKernel::configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage)
+{
+ ARM_COMPUTE_UNUSED(bias);
+ // Perform validate step
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst, output_stage);
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*dst, src->clone()->set_data_type(output_stage->output_data_type));
+
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src,
+ bias,
+ dst,
+ output_stage));
+
+ _output_stage = output_stage;
+
+ // Configure kernel window
+ Window win = calculate_max_window(*src, Steps());
+
+ ICpuKernel::configure(win);
+
+ // Check if we need to clamp the result using min and max
+ _is_bounded_relu = ((_output_stage->gemmlowp_min_bound != _output_stage->gemmlowp_max_bound)
+ && !(_output_stage->gemmlowp_min_bound == std::get<0>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))
+ && _output_stage->gemmlowp_max_bound == std::get<1>(quantization::get_min_max_values_from_quantized_data_type(output_stage->output_data_type))));
+ if(_output_stage->output_data_type == DataType::QASYMM8)
+ {
+ _func = &CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal<uint8_t>;
+ }
+ else if(_output_stage->output_data_type == DataType::QASYMM8_SIGNED)
+ {
+ _func = &CpuGemmLowpQuantizeDownInt32ScaleKernel::run_internal<int8_t>;
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Data type not supported");
+ }
+}
+
+Status CpuGemmLowpQuantizeDownInt32ScaleKernel::validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, bias, dst, output_stage));
+ return Status{};
+}
+
+void CpuGemmLowpQuantizeDownInt32ScaleKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
+
+ auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
+ auto bias = tensors.get_const_tensor(TensorType::ACL_BIAS);
+ auto dst = tensors.get_tensor(TensorType::ACL_DST);
+ (this->*_func)(src, bias, dst, window);
+}
+
+const char *CpuGemmLowpQuantizeDownInt32ScaleKernel::name() const
+{
+ return "CpuGemmLowpQuantizeDownInt32ScaleKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h
new file mode 100644
index 0000000000..646242dc7e
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ScaleKernel.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2020-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32_SCALE_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32_SCALE_KERNEL_H
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
+ *
+ * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
+ * The following computations will be performed by the kernel:
+ *
+ * -# Add offset terms to final result
+ * -# Multiply each entry of result by result_mult_int
+ * -# Add bias to final result if bias tensor is not a nullptr
+ * -# Shift the int32 accumulator by result_shift
+ * -# Clamp the value between the specified min and max bounds
+ * -# Clamp the resulting int32 values:
+ * -# -to the [0..255] range and cast to QASYMM8.
+ * -# -to the [-128..127] range and cast to QASYMM8_SIGNED.
+ *
+ */
+class CpuGemmLowpQuantizeDownInt32ScaleKernel : public ICpuKernel
+{
+public:
+ /** Default constructor */
+ CpuGemmLowpQuantizeDownInt32ScaleKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpQuantizeDownInt32ScaleKernel);
+ /** Initialise the kernel's input and output.
+ *
+ * @param[in] src Input tensor info. Data type supported: S32
+ * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] dst Output tensor info. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
+ * @param[out] output_stage GEMMLowp output stage metadata.
+ */
+ void configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to CpuGemmLowpQuantizeDownInt32ScaleKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, const GEMMLowpOutputStageInfo *output_stage);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ const char *name() const override;
+
+private:
+ /** Template function to run the NEGEMMLowpQuantizeDownInt32ScaleKernel
+ *
+ * @param[in] src Input tensor info
+ * @param[in] bias Biases tensor info
+ * @param[out] dst Output tensor info
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window())
+ */
+ template <typename T>
+ void run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window);
+
+ /** Common signature for all the specialised CpuGemmLowpQuantizeDownInt32ScaleKernel functions
+ *
+ * @param[in] src Input tensor info
+ * @param[in] bias Biases tensor info
+ * @param[out] dst Output tensor info
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using QuantizeDownFunctionPtr = void (CpuGemmLowpQuantizeDownInt32ScaleKernel::*)(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window);
+
+ QuantizeDownFunctionPtr _func{ nullptr };
+ const GEMMLowpOutputStageInfo *_output_stage{ nullptr };
+ bool _is_bounded_relu{ false };
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32_SCALE_KERNEL_H */
diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp
new file mode 100644
index 0000000000..390e269cbb
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.cpp
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2019-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/NEON/NESymm.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, int min, int max)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(min > max);
+
+ // Check biases if exist
+ if(bias != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != bias->dimension(0));
+ }
+
+ if(dst->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QSYMM16);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, src);
+ }
+
+ return Status{};
+}
+} // namespace
+
+template <bool is_bounded_relu>
+void CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window)
+{
+ const int16x8_t min_s16 = vdupq_n_s16(static_cast<int16_t>(_min));
+ const int16x8_t max_s16 = vdupq_n_s16(static_cast<int16_t>(_max));
+
+ ARM_COMPUTE_UNUSED(min_s16);
+ ARM_COMPUTE_UNUSED(max_s16);
+
+ const int window_step_x = 8;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in(src, win_collapsed);
+ Iterator out(dst, win_collapsed);
+ if(bias != nullptr)
+ {
+ Window win_biases;
+ win_biases.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ Iterator bias_i(bias, win_biases);
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x2_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4)
+ }
+ };
+
+ const int32x4x2_t bias_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 4)
+ }
+ };
+
+ // Add the bias to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]);
+
+ vst1q_s16(reinterpret_cast<int16_t *>(out.ptr()) + x, finalize_quantization_int16<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, min_s16, max_s16));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32_t bias_value = *(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x);
+ int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+
+ // Add bias
+ in_value += bias_value;
+ // Finalize and store the result
+ *(reinterpret_cast<int16_t *>(out.ptr()) + x) = finalize_quantization_int16<is_bounded_relu>(in_value, _result_fixedpoint_multiplier, _result_shift, static_cast<int16_t>(_min),
+ static_cast<int16_t>(_max));
+ }
+ },
+ in, out, bias_i);
+ }
+ else
+ {
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x2_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4)
+ }
+ };
+
+ vst1q_s16(reinterpret_cast<int16_t *>(out.ptr()) + x, finalize_quantization_int16<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, min_s16, max_s16));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+ ARM_COMPUTE_UNUSED(in_value);
+ // Finalize and store the result
+ *(reinterpret_cast<int16_t *>(out.ptr()) + x) = finalize_quantization_int16<is_bounded_relu>(in_value, _result_fixedpoint_multiplier, _result_shift, static_cast<int16_t>(_min),
+ static_cast<int16_t>(_max));
+ }
+ },
+ in, out);
+ }
+}
+
+void CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, int result_fixedpoint_multiplier, int result_shift,
+ int min, int max)
+{
+ // Perform validate step
+ ARM_COMPUTE_UNUSED(bias, dst);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, bias, dst, min, max));
+
+ _result_fixedpoint_multiplier = result_fixedpoint_multiplier;
+ _result_shift = result_shift;
+ _min = min;
+ _max = max;
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*src, src->clone()->set_data_type(DataType::QSYMM16));
+ // Configure kernel window
+ Window win_config = calculate_max_window(*src, Steps());
+ ICpuKernel::configure(win_config);
+
+ // Check if we need to clamp the result using min and max
+ const bool is_bounded_relu = !(min <= -32768 && max >= 32767);
+ _func = is_bounded_relu ? &CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::run_internal<true> :
+ &CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::run_internal<false>;
+}
+
+Status CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, min, max));
+ return Status{};
+}
+
+void CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
+
+ auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
+ auto bias = tensors.get_const_tensor(TensorType::ACL_BIAS);
+ auto dst = tensors.get_tensor(TensorType::ACL_DST);
+
+ (this->*_func)(src, bias, dst, window);
+}
+
+const char *CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::name() const
+{
+ return "CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h
new file mode 100644
index 0000000000..4d743e9a0a
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2019-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT16_SCALEBYFIXEDPOINT_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT16_SCALEBYFIXEDPOINT_KERNEL_H
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel used to quantize down the int32 accumulator values of GEMMLowp to QSYMM16
+ *
+ * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QSYMM16 value.
+ * The following computations will be performed by the kernel:
+ *
+ * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
+ * -# Add bias to final result if bias tensor is not a nullptr
+ * -# Round to nearest division by a power-of-two using result_shift
+ * -# Clamp the value between the specified min and max bounds
+ * -# Clamp the resulting int32 values to the [-32768, 32767] range and cast to QSYMM16.
+ *
+ */
+class CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel : public ICpuKernel
+{
+public:
+ /** Default constructor */
+ CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel);
+ /** Initialise the kernel's input and output.
+ *
+ * @param[in] src Input tensor info. Data type supported: S32
+ * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] dst Output tensor info. Data type supported: Data type supported: QSYMM16
+ * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift Integer value used to round to nearest division by a power-of-two the result after the fixed point multiplication
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to 0.
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16.
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to 0.
+ */
+ void configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, int result_fixedpoint_multiplier, int result_shift, int min = 0, int max = 0);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, int min = 0, int max = 0);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ const char *name() const override;
+
+private:
+ /** Template function to run the CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel
+ *
+ * @param[in] src Input tensor info
+ * @param[in] bias Bias tensor info
+ * @param[out] dst Output tensor info
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ template <bool is_bounded_relu>
+ void run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window);
+
+ /** Common signature for all the specialised CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel functions
+ *
+ * @param[in] src Input tensor info
+ * @param[in] bias Bias tensor info
+ * @param[out] dst Output tensor info
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using QuantizeDownFunctionPtr = void (CpuGemmLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::*)(
+ const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window);
+
+ QuantizeDownFunctionPtr _func{ nullptr };
+ int _result_fixedpoint_multiplier{ 0 };
+ int _result_shift{ 0 };
+ int _min{ 0 };
+ int _max{ 0 };
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT16_SCALEBYFIXEDPOINT_KERNEL_H */
diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp
new file mode 100644
index 0000000000..318b6a06f8
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2019-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/NEON/NEAsymm.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, int min, int max)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(min > max);
+
+ // Check biases if exist
+ if(bias != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != bias->dimension(0));
+ }
+
+ if(dst->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8_SIGNED);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, src);
+ }
+
+ return Status{};
+}
+} // namespace
+
+template <bool is_bounded_relu>
+void CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window)
+{
+ const int32x4_t result_offset_after_shift_s32 = vdupq_n_s32(_result_offset_after_shift);
+ const int8x16_t min_s8 = vdupq_n_s8(static_cast<int8_t>(_min));
+ const int8x16_t max_s8 = vdupq_n_s8(static_cast<int8_t>(_max));
+
+ ARM_COMPUTE_UNUSED(min_s8, max_s8);
+
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in(src, win_collapsed);
+ Iterator out(dst, win_collapsed);
+ if(bias != nullptr)
+ {
+ Window win_biases;
+ win_biases.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ Iterator bias_i(bias, win_biases);
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
+ }
+ };
+
+ const int32x4x4_t bias_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 12)
+ }
+ };
+
+ // Add the bias to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]);
+
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr() + x),
+ finalize_quantization(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_s8, max_s8, is_bounded_relu));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32_t bias_value = *(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x);
+ int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+
+ // Add bias
+ in_value += bias_value;
+ // Finalize and store the result
+ *reinterpret_cast<int8_t *>(out.ptr() + x) = finalize_quantization(in_value, _result_fixedpoint_multiplier, _result_shift, _result_offset_after_shift,
+ static_cast<int8_t>(_min), static_cast<int8_t>(_max), is_bounded_relu);
+ }
+ },
+ in, out, bias_i);
+ }
+ else
+ {
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
+ }
+ };
+
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr() + x),
+ finalize_quantization(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_s8, max_s8, is_bounded_relu));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+
+ // Finalize and store the result
+ *reinterpret_cast<int8_t *>(out.ptr() + x) = finalize_quantization(in_value, _result_fixedpoint_multiplier, _result_shift, _result_offset_after_shift,
+ static_cast<int8_t>(_min), static_cast<int8_t>(_max), is_bounded_relu);
+ }
+ },
+ in, out);
+ }
+}
+
+void CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, int result_fixedpoint_multiplier, int result_shift,
+ int result_offset_after_shift, int min, int max)
+{
+ ARM_COMPUTE_UNUSED(bias);
+ // Perform validate step
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, bias, dst, min, max));
+
+ _result_fixedpoint_multiplier = result_fixedpoint_multiplier;
+ _result_shift = result_shift;
+ _result_offset_after_shift = result_offset_after_shift;
+ _min = min;
+ _max = max;
+
+ // Output auto initialization if not yet initialized
+ auto_init_if_empty(*dst, src->clone()->set_data_type(DataType::QASYMM8_SIGNED));
+
+ // Configure kernel window
+ Window win_config = calculate_max_window(*src, Steps());
+ ICpuKernel::configure(win_config);
+
+ // Check if we need to clamp the result using min and max
+ const bool is_bounded_relu = !(min <= -128 && max >= 127);
+ _func = is_bounded_relu ? &CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::run_internal<true> :
+ &CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::run_internal<false>;
+}
+
+Status CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, int min, int max)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, bias, dst, min, max));
+ return Status{};
+}
+
+void CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
+
+ auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
+ auto bias = tensors.get_const_tensor(TensorType::ACL_BIAS);
+ auto dst = tensors.get_tensor(TensorType::ACL_DST);
+
+ (this->*_func)(src, bias, dst, window);
+}
+
+const char *CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::name() const
+{
+ return "CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h
new file mode 100644
index 0000000000..a941f1f542
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2019-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT8_SCALEBYFIXEDPOINT_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT8_SCALEBYFIXEDPOINT_KERNEL_H
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8_SIGNED
+ *
+ * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8_SIGNED value.
+ * The following computations will be performed by the kernel:
+ *
+ * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
+ * -# Add bias to final result if bias tensor is not a nullptr
+ * -# Round to nearest division by a power-of-two using result_shift
+ * -# Add offset to each result
+ * -# Clamp the value between the specified min and max bounds
+ * -# Clamp the resulting int32 values to the [-128..127] range and cast to QASYMM8_SIGNED.
+ *
+ */
+class CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel : public ICpuKernel
+{
+public:
+ /** Default constructor */
+ CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel);
+ /** Initialise the kernel's input and output.
+ *
+ * @param[in] src Input tensor info. Data type supported: S32
+ * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] dst Output tensor info. Data type supported: Data type supported: QASYMM8_SIGNED
+ * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift Integer value used to round to nearest division by a power-of-two the result after the fixed point multiplication
+ * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8_SIGNED
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8_SIGNED
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8_SIGNED,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ */
+ void configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, int min = 0, int max = 0);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, int min = 0, int max = 0);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ const char *name() const override;
+
+private:
+ /** Template function to run the CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel
+ *
+ * @param[in] src Input tensor info
+ * @param[in] bias Bias tensor info
+ * @param[out] dst Output tensor info
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ template <bool is_bounded_relu>
+ void run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window);
+
+ /** Common signature for all the specialised CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel functions
+ *
+ * @param[in] src Input tensor info
+ * @param[in] bias Bias tensor info
+ * @param[out] dst Output tensor info
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using QuantizeDownFunctionPtr = void (CpuGemmLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::*)(
+ const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window);
+
+ QuantizeDownFunctionPtr _func{ nullptr };
+ int _result_fixedpoint_multiplier{ 0 };
+ int _result_shift{ 0 };
+ int _result_offset_after_shift{ 0 };
+ int _min{ 0 };
+ int _max{ 0 };
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOINT8_SCALEBYFIXEDPOINT_KERNEL_H */
diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
new file mode 100644
index 0000000000..6631a4fc67
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/NEON/NEAsymm.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, int min, int max)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(min > max);
+
+ // Check biases if exist
+ if(bias != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, bias);
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != bias->dimension(0));
+ }
+
+ if(dst->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, src);
+ }
+
+ return Status{};
+}
+} // namespace
+
+template <bool is_bounded_relu>
+void CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window)
+{
+ const int32x4_t result_offset_after_shift_s32 = vdupq_n_s32(_result_offset_after_shift);
+ const uint8x16_t min_u8 = vdupq_n_u8(static_cast<uint8_t>(_min));
+ const uint8x16_t max_u8 = vdupq_n_u8(static_cast<uint8_t>(_max));
+
+ ARM_COMPUTE_UNUSED(min_u8);
+ ARM_COMPUTE_UNUSED(max_u8);
+
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in(src, win_collapsed);
+ Iterator out(dst, win_collapsed);
+ if(bias != nullptr)
+ {
+ Window win_biases;
+ win_biases.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ Iterator bias_i(bias, win_biases);
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
+ }
+ };
+
+ const int32x4x4_t bias_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x + 12)
+ }
+ };
+
+ // Add the bias to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]);
+
+ vst1q_u8(out.ptr() + x, finalize_quantization(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8, is_bounded_relu));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32_t bias_value = *(reinterpret_cast<const int32_t *>(bias_i.ptr()) + x);
+ int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+
+ // Add bias
+ in_value += bias_value;
+ // Finalize and store the result
+ *(out.ptr() + x) = finalize_quantization(in_value, _result_fixedpoint_multiplier, _result_shift, _result_offset_after_shift, static_cast<uint8_t>(_min), static_cast<uint8_t>(_max), is_bounded_relu);
+ }
+ },
+ in, out, bias_i);
+ }
+ else
+ {
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
+ }
+ };
+
+ vst1q_u8(out.ptr() + x, finalize_quantization(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8, is_bounded_relu));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+
+ // Finalize and store the result
+ *(out.ptr() + x) = finalize_quantization(in_value, _result_fixedpoint_multiplier, _result_shift, _result_offset_after_shift, static_cast<uint8_t>(_min), static_cast<uint8_t>(_max), is_bounded_relu);
+ }
+ },
+ in, out);
+ }
+}
+
+void CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, int result_fixedpoint_multiplier, int result_shift,
+ int result_offset_after_shift, int min, int max)
+{
+ ARM_COMPUTE_UNUSED(bias);
+ // Perform validate step
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, bias, dst, min, max));
+
+ _result_fixedpoint_multiplier = result_fixedpoint_multiplier;
+ _result_shift = result_shift;
+ _result_offset_after_shift = result_offset_after_shift;
+ _min = min;
+ _max = max;
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*dst, src->clone()->set_data_type(DataType::QASYMM8));
+
+ // Configure kernel window
+ auto win_config = calculate_max_window(*src, Steps());
+ ICpuKernel::configure(win_config);
+
+ // Check if we need to clamp the result using min and max
+ const bool is_bounded_relu = !(min <= 0 && max >= 255);
+ _func = is_bounded_relu ? &CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run_internal<true> :
+ &CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run_internal<false>;
+}
+
+Status CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, int min, int max)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, bias, dst, min, max));
+ return Status{};
+}
+
+void CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
+
+ auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
+ auto bias = tensors.get_const_tensor(TensorType::ACL_BIAS);
+ auto dst = tensors.get_tensor(TensorType::ACL_DST);
+
+ (this->*_func)(src, bias, dst, window);
+}
+
+const char *CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::name() const
+{
+ return "CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
new file mode 100644
index 0000000000..9b4c056419
--- /dev/null
+++ b/src/core/cpu/kernels/CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOUINT8_SCALEBYFIXEDPOINT_KERNEL_H
+#define ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOUINT8_SCALEBYFIXEDPOINT_KERNEL_H
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8
+ *
+ * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value.
+ * The following computations will be performed by the kernel:
+ *
+ * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
+ * -# Add bias to final result if bias tensor is not a nullptr
+ * -# Round to nearest division by a power-of-two using result_shift
+ * -# Add offset to each result
+ * -# Clamp the value between the specified min and max bounds
+ * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8.
+ *
+ */
+class CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel : public ICpuKernel
+{
+public:
+ /** Default constructor */
+ CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel);
+ /** Initialise the kernel's input and output.
+ *
+ * @param[in] src Input tensor info. Data type supported: S32
+ * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] dst Output tensor info. Data type supported: Data type supported: QASYMM8
+ * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift Integer value used to round to nearest division by a power-of-two the result after the fixed point multiplication
+ * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ */
+ void configure(ITensorInfo *src, ITensorInfo *bias, ITensorInfo *dst, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, int min = 0, int max = 0);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src, const ITensorInfo *bias, const ITensorInfo *dst, int min = 0, int max = 0);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ const char *name() const override;
+
+private:
+ /** Template function to run the CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ template <bool is_bounded_relu>
+ void run_internal(const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window);
+
+ /** Common signature for all the specialised CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel functions
+ *
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using QuantizeDownFunctionPtr = void (CpuGemmLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::*)(
+ const ITensor *src, const ITensor *bias, ITensor *dst, const Window &window);
+
+ QuantizeDownFunctionPtr _func{ nullptr };
+ int _result_fixedpoint_multiplier{ 0 };
+ int _result_shift{ 0 };
+ int _result_offset_after_shift{ 0 };
+ int _min{ 0 };
+ int _max{ 0 };
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_GEMMLOWP_QUANTIZEDOWNINT32TOUINT8_SCALEBYFIXEDPOINT_KERNEL_H */