aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-12-07 09:26:56 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commit7f0f790ae7f5dd044a5d7564492583b8df974a11 (patch)
treeb3d6b7bdb0c6efb1dc4b9547dcc0f1fbcf1b4ee3 /src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
parentf2ad401d1db79373f1c6d1167ae10c94d706cce8 (diff)
downloadComputeLibrary-7f0f790ae7f5dd044a5d7564492583b8df974a11.tar.gz
COMPMID-731 - Remove padding requirements for NEGEMMLowpOutputStage
Used a left-over for loop in: - NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel - NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel in order to remove the padding requirements for AndroidNN Change-Id: I8ef529fc3d1adecf15fbe42002d99bc0030f131f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112266 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp')
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp154
1 files changed, 114 insertions, 40 deletions
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
index 1e369ab03a..8b3f2383ab 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
@@ -66,7 +66,10 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con
std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output)
{
- constexpr unsigned int num_elems_processed_per_iteration = 16;
+ // Note: This kernel performs 16 elements per iteration.
+ // However, since we use a left-over for loop, we cannot have any read or write out of memory
+ // For this reason num_elems_processed_per_iteration is set to 1
+ constexpr unsigned int num_elems_processed_per_iteration = 1;
// Configure kernel window
Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
@@ -86,7 +89,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
if(bias != nullptr)
{
- AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->tensor_shape()[1]);
+ AccessWindowStatic bias_access(bias, 0, 0, bias->dimension(0), bias->dimension(1));
window_changed = window_changed || update_window_and_padding(win, bias_access);
}
@@ -144,6 +147,37 @@ inline uint8x16_t finalize_quantization(int32x4x4_t &in_s32, int result_fixedpoi
return out_u8;
}
+
+/* Function used by the left-over for loop to perform the quantization */
+template <bool is_bounded_relu>
+inline uint8_t finalize_quantization(int32x4_t in_s32, int result_fixedpoint_multiplier, int32_t result_shift, int32x4_t result_offset_after_shift_s32, uint8_t min_u8, uint8_t max_u8)
+{
+ const static int32x4_t zero_s32 = vdupq_n_s32(0);
+ const static int32x4_t sat_value_s32 = vdupq_n_s32(255);
+
+ // Fixed point multiplication with vector saturating rounding doubling multiply high with scalar
+ in_s32 = vqrdmulhq_n_s32(in_s32, result_fixedpoint_multiplier);
+
+ // Round to the nearest division by a power-of-two using result_shift_s32
+ in_s32 = rounding_divide_by_pow2(in_s32, result_shift);
+
+ // Add the offset terms
+ in_s32 = vaddq_s32(in_s32, result_offset_after_shift_s32);
+
+ // Saturate negative values
+ in_s32 = vmaxq_s32(in_s32, zero_s32);
+ in_s32 = vminq_s32(in_s32, sat_value_s32);
+
+ auto out_u8 = static_cast<uint8_t>(vgetq_lane_s32(in_s32, 0));
+
+ if(is_bounded_relu)
+ {
+ out_u8 = std::max(out_u8, min_u8);
+ out_u8 = std::min(out_u8, max_u8);
+ }
+
+ return out_u8;
+}
} // namespace
namespace arm_compute
@@ -161,63 +195,103 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run(const Window
ARM_COMPUTE_UNUSED(min_u8);
ARM_COMPUTE_UNUSED(max_u8);
- Iterator in(_input, window);
- Iterator out(_output, window);
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win(window);
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator in(_input, win);
+ Iterator out(_output, win);
if(_bias != nullptr)
{
Window win_biases;
- win_biases.set(Window::DimX, Window::Dimension(window.x().start(), window.x().end(), window.x().step()));
+ win_biases.set(Window::DimX, Window::Dimension(0, 1, 1));
win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
Iterator bias(_bias, win_biases);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates & id)
{
- int32x4x4_t in_s32 =
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
+ int32x4x4_t in_s32 =
{
- vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 0),
- vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 4),
- vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 8),
- vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 12)
- }
- };
-
- const int32x4x4_t bias_s32 =
- {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
+ }
+ };
+
+ const int32x4x4_t bias_s32 =
{
- vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + 0),
- vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + 4),
- vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + 8),
- vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + 12)
- }
- };
-
- // Add the bias to GEMM's result
- in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]);
- in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]);
- in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]);
- in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]);
-
- vst1q_u8(out.ptr(), finalize_quantization<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8));
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(bias.ptr()) + x + 12)
+ }
+ };
+
+ // Add the bias to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], bias_s32.val[0]);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], bias_s32.val[1]);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], bias_s32.val[2]);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], bias_s32.val[3]);
+
+ vst1q_u8(out.ptr() + x, finalize_quantization<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32_t bias_value = *(reinterpret_cast<const int32_t *>(bias.ptr()) + x);
+ int32_t in_value = *(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+
+ // Add bias
+ in_value += bias_value;
+
+ // Finalize and store the result
+ *(out.ptr() + x) = finalize_quantization<is_bounded_relu>(vdupq_n_s32(in_value), _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, static_cast<uint8_t>(_min),
+ static_cast<uint8_t>(_max));
+ }
},
in, bias, out);
}
else
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates & id)
{
- int32x4x4_t in_s32 =
+ // Compute 16 elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
+ int32x4x4_t in_s32 =
{
- vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 0),
- vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 4),
- vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 8),
- vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 12)
- }
- };
-
- vst1q_u8(out.ptr(), finalize_quantization<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8));
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x + 12)
+ }
+ };
+
+ vst1q_u8(out.ptr() + x, finalize_quantization<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, min_u8, max_u8));
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const int32x4_t in_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(in.ptr()) + x);
+
+ // Finalize and store the result
+ *(out.ptr() + x) = finalize_quantization<is_bounded_relu>(in_s32, _result_fixedpoint_multiplier, _result_shift, result_offset_after_shift_s32, static_cast<uint8_t>(_min), static_cast<uint8_t>(_max));
+ }
},
in, out);
}