From e75a02b60736f37c34388c23c0ccee230f65da59 Mon Sep 17 00:00:00 2001 From: Gian Marco Date: Wed, 8 Nov 2017 12:24:09 +0000 Subject: COMPMID-675 - Reworked NEGEMMLowp interface/function The new interface makes NEGEMMLowp able to work with ASYMM8 data types. Implemented 2 new functions: - NEGEMMLowpMatrixMultiplyCore - NEGEMMLowpOutputStage These functions should make the integration in android NN doable For more information about GEMMLowp: https://github.com/google/gemmlowp/blob/master/doc/low-precision.md Change-Id: Ie2c775f45234f68ca53dba644b3a912b997fd890 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/95504 Tested-by: Kaizen Reviewed-by: Pablo Tello --- .../NEON/kernels/NEGEMMInterleave4x4Kernel.cpp | 3 +- src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp | 509 --------------------- .../kernels/NEGEMMLowpMatrixMultiplyKernel.cpp | 128 +++--- .../kernels/NEGEMMLowpOffsetContributionKernel.cpp | 338 ++++++++++++++ ...GEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp | 141 ++++++ .../NEON/kernels/NEGEMMLowpReductionKernel.cpp | 176 +++---- src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp | 3 +- src/runtime/NEON/functions/NEGEMMLowp.cpp | 134 ------ .../functions/NEGEMMLowpMatrixMultiplyCore.cpp | 84 +++- .../NEON/functions/NEGEMMLowpOutputStage.cpp | 37 ++ 10 files changed, 751 insertions(+), 802 deletions(-) delete mode 100644 src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp create mode 100644 src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp create mode 100644 src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp delete mode 100644 src/runtime/NEON/functions/NEGEMMLowp.cpp create mode 100644 src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp (limited to 'src') diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp index ae5d456141..a29b661a00 100644 --- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp +++ b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp @@ -132,7 +132,8 @@ NEGEMMInterleave4x4Kernel::NEGEMMInterleave4x4Kernel() void NEGEMMInterleave4x4Kernel::configure(const ITensor *input, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, + DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_NULLPTR(output); diff --git a/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp deleted file mode 100644 index 255e486365..0000000000 --- a/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h" - -#include "arm_compute/core/AccessWindowStatic.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/core/Window.h" - -#include -#include -#include - -using namespace arm_compute; - -namespace arm_compute -{ -class Coordinates; -} // namespace arm_compute - -template -void NEGEMMLowpFinalizeKernel::finalize(const Window &window) -{ - const int32x4_t c_offset_s32 = vdupq_n_s32(_c_offset); - const int32x4_t shift_s32 = vdupq_n_s32(-_shift); - - Window collapsed_window = window.collapse_if_possible(IKernel::window(), Window::DimZ); - - if(add_a_offset && add_b_offset) // true, true - { - // Set window for vector_sum_col - Window win_vector_sum_col(collapsed_window); - win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0)); - if(!_slide_vector_sum_col) - { - win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0)); - } - - // Set window for vector_sum_row - Window win_vector_sum_row(collapsed_window); - win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0)); - win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0)); - - Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col); - Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row); - Iterator mm_result(_mm_result, window); - Iterator out(_output, window); - - execute_window_loop(window, [&](const Coordinates & id) - { - // Compute the leftover term due to a_offset. - int32x4x4_t a_offset_term_s32 = - { - { - vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 0), - vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 4), - vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 8), - vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 12) - } - }; - - a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], _a_offset); - a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], _a_offset); - a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], _a_offset); - a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], _a_offset); - - // Compute the leftover term due to b_offset. - int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast(vector_sum_row.ptr()) + id.y()); - b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, _b_offset); - - // Add a_offset_term_s32 and b_offset_term_s32 - int32x4x4_t offset_term_s32 = - { - { - vdupq_n_s32(_k_offset), - vdupq_n_s32(_k_offset), - vdupq_n_s32(_k_offset), - vdupq_n_s32(_k_offset) - } - }; - - offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], vaddq_s32(a_offset_term_s32.val[0], b_offset_term_s32)); - offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], vaddq_s32(a_offset_term_s32.val[1], b_offset_term_s32)); - offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32)); - offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32)); - - // Add c_offset - offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], c_offset_s32); - offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], c_offset_s32); - offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], c_offset_s32); - offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], c_offset_s32); - - int32x4x4_t in_s32 = - { - { - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 0), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 4), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 8), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 12) - } - }; - - // Add the offset terms to GEMM's result - in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]); - in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]); - in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]); - in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]); - - // Multiply by c_mult_int - in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int); - in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int); - in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int); - in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int); - - // Shift final result (negative value shift right) - in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32); - in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32); - in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32); - in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32); - - // Convert S32 to U16 - const int16x8x2_t in_s16 = - { - { - vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])), - vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])), - } - }; - - // Convert S16 to S8 - const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1])); - - vst1q_s8(reinterpret_cast(out.ptr()), out_s8); - }, - vector_sum_col, vector_sum_row, mm_result, out); - } - else if(!add_a_offset && add_b_offset) // false, true - { - // Set window for vector_sum_row - Window win_vector_sum_row(collapsed_window); - win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0)); - win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0)); - - Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row); - Iterator mm_result(_mm_result, window); - Iterator out(_output, window); - - execute_window_loop(window, [&](const Coordinates & id) - { - // Compute the leftover term due to b_offset. - int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast(vector_sum_row.ptr()) + id.y()); - b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, _b_offset); - - // Add b_offset_term_s32 and c_offset_term_s32 - int32x4_t offset_term_s32 = vaddq_s32(b_offset_term_s32, c_offset_s32); - - int32x4x4_t in_s32 = - { - { - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 0), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 4), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 8), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 12) - } - }; - - // Add the offset terms to GEMM's result - in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32); - in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32); - in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32); - in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32); - - // Multiply by c_mult_int - in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int); - in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int); - in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int); - in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int); - - // Shift final result (negative value shift right) - in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32); - in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32); - in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32); - in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32); - - // Convert S32 to U16 - const int16x8x2_t in_s16 = - { - { - vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])), - vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])), - } - }; - - // Convert S16 to S8 - const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1])); - - vst1q_s8(reinterpret_cast(out.ptr()), out_s8); - }, - vector_sum_row, mm_result, out); - } - else if(add_a_offset && !add_b_offset) // true, false - { - // Set window for vector_sum_col - Window win_vector_sum_col(collapsed_window); - win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0)); - if(!_slide_vector_sum_col) - { - win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0)); - } - - Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col); - Iterator mm_result(_mm_result, window); - Iterator out(_output, window); - - execute_window_loop(window, [&](const Coordinates & id) - { - // Compute the leftover term due to a_offset. - int32x4x4_t a_offset_term_s32 = - { - { - vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 0), - vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 4), - vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 8), - vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 12) - } - }; - - a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], _a_offset); - a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], _a_offset); - a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], _a_offset); - a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], _a_offset); - - // Add a_offset_term_s32 and b_offset_term_s32 - int32x4x4_t offset_term_s32 = - { - { - vaddq_s32(c_offset_s32, a_offset_term_s32.val[0]), - vaddq_s32(c_offset_s32, a_offset_term_s32.val[1]), - vaddq_s32(c_offset_s32, a_offset_term_s32.val[2]), - vaddq_s32(c_offset_s32, a_offset_term_s32.val[3]) - } - }; - - int32x4x4_t in_s32 = - { - { - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 0), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 4), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 8), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 12) - } - }; - - // Add the offset terms to GEMM's result - in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]); - in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]); - in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]); - in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]); - - // Multiply by c_mult_int - in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int); - in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int); - in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int); - in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int); - - // Shift final result (negative value shift right) - in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32); - in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32); - in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32); - in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32); - - // Convert S32 to S16 - const int16x8x2_t in_s16 = - { - { - vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])), - vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])) - } - }; - - // Convert S16 to S8 - const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1])); - - vst1q_s8(reinterpret_cast(out.ptr()), out_s8); - }, - vector_sum_col, mm_result, out); - } - else // false, false - { - Iterator mm_result(_mm_result, window); - Iterator out(_output, window); - - execute_window_loop(window, [&](const Coordinates & id) - { - int32x4x4_t in_s32 = - { - { - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 0), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 4), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 8), - vld1q_s32(reinterpret_cast(mm_result.ptr()) + 12) - } - }; - - // Add the offset terms to GEMM's result - in_s32.val[0] = vaddq_s32(in_s32.val[0], c_offset_s32); - in_s32.val[1] = vaddq_s32(in_s32.val[1], c_offset_s32); - in_s32.val[2] = vaddq_s32(in_s32.val[2], c_offset_s32); - in_s32.val[3] = vaddq_s32(in_s32.val[3], c_offset_s32); - - // Multiply by c_mult_int - in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int); - in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int); - in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int); - in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int); - - // Shift final result (negative value shift right) - in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32); - in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32); - in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32); - in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32); - - // Convert S32 to S16 - const int16x8x2_t in_s16 = - { - { - vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])), - vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])) - } - }; - - // Convert U16 to S8 - const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1])); - - vst1q_s8(reinterpret_cast(out.ptr()), out_s8); - }, - mm_result, out); - } -} - -NEGEMMLowpFinalizeKernel::NEGEMMLowpFinalizeKernel() - : _func(nullptr), _vector_sum_col(nullptr), _vector_sum_row(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _c_offset(0), _k_offset(0), _c_mult_int(0), _shift(0), - _slide_vector_sum_col(true) -{ -} - -void NEGEMMLowpFinalizeKernel::configure(const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *mm_result, ITensor *output, int32_t num_mtx_a_cols, int32_t a_offset, - int32_t b_offset, - int32_t c_offset, int32_t c_mult_int, int32_t shift) -{ - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S8); - - TensorShape mm_result_shape = mm_result->info()->tensor_shape(); - TensorShape output_shape = output->info()->tensor_shape(); - - mm_result_shape.collapse(2); - output_shape.collapse(2); - - ARM_COMPUTE_ERROR_ON_MSG(mm_result_shape[2] != output_shape[2], "mm_result tensor must have the same number of batches of output tensor"); - - // If a_offset == 0, vector_sum_col can be a nullptr - if(a_offset != 0) - { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32); - ARM_COMPUTE_ERROR_ON(vector_sum_col->info()->dimension(0) != mm_result->info()->dimension(0)); - - TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape(); - vector_sum_col_shape.collapse(1); - - // Check if vector_sum_col_shape should be slidden or not - // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1 - // This scenario can happen when the the matrix multiplication is used to perform a convolution operation - _slide_vector_sum_col = vector_sum_col_shape[1] != 1; - } - - // If b_offset == 0, vector_sum_row can be a nullptr - if(b_offset != 0) - { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32); - ARM_COMPUTE_ERROR_ON(vector_sum_row->info()->dimension(0) != mm_result->info()->dimension(1)); - - TensorShape vector_sum_row_shape = vector_sum_row->info()->tensor_shape(); - vector_sum_row_shape.collapse(1); - - ARM_COMPUTE_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[2], "mm_result tensor must have the same number of batches of output tensor"); - - if(a_offset != 0) - { - TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape(); - vector_sum_col_shape.collapse(1); - - ARM_COMPUTE_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 - && vector_sum_col_shape[1] != vector_sum_row_shape[1], - "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1"); - } - } - - _vector_sum_col = vector_sum_col; - _vector_sum_row = vector_sum_row; - _mm_result = mm_result; - _output = output; - _a_offset = a_offset; - _b_offset = b_offset; - _k_offset = a_offset * b_offset * num_mtx_a_cols; - _c_offset = c_offset; - _c_mult_int = c_mult_int; - _shift = shift; - - constexpr unsigned int num_elems_processed_per_iteration = 16; - - // Configure kernel window - Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration)); - - AccessWindowHorizontal mm_result_access(mm_result->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_result_access(output->info(), 0, num_elems_processed_per_iteration); - - // Accordingly with a_offset and b_offset, we can have 4 cases: - // a_offset != 0 && b_offset != 0 - // a_offset = 0 && b_offset != 0 - // a_offset != 0 && b_offset = 0 - // a_offset = 0 && b_offset = 0 - if(a_offset != 0 && b_offset != 0) - { - // Set the function to use - _func = &NEGEMMLowpFinalizeKernel::finalize; - - AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0); - AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration); - - update_window_and_padding(win, - vector_sum_col_access, - vector_sum_row_access, - mm_result_access, - output_result_access); - } - else if(a_offset == 0 && b_offset != 0) - { - // Set the function to use - _func = &NEGEMMLowpFinalizeKernel::finalize; - - AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0); - - update_window_and_padding(win, - vector_sum_row_access, - mm_result_access, - output_result_access); - } - else if(a_offset != 0 && b_offset == 0) - { - // Set the function to use - _func = &NEGEMMLowpFinalizeKernel::finalize; - - AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration); - - update_window_and_padding(win, - vector_sum_col_access, - mm_result_access, - output_result_access); - } - else - { - // Set the function to use - _func = &NEGEMMLowpFinalizeKernel::finalize; - - update_window_and_padding(win, - mm_result_access, - output_result_access); - } - - output_result_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape())); - - INEKernel::configure(win); -} - -void NEGEMMLowpFinalizeKernel::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); - - (this->*_func)(window); -} diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp index 4b9c9f3e64..1352f34e3c 100644 --- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp @@ -52,7 +52,7 @@ NEGEMMLowpMatrixMultiplyKernel::NEGEMMLowpMatrixMultiplyKernel() void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); @@ -127,115 +127,115 @@ void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo // All the values needed for computing a single 4x4 block will be read from consecutive memory positions execute_window_loop(window, [&](const Coordinates & id) { - auto *mtx_a0 = reinterpret_cast(ina.ptr()); - auto *mtx_b0 = reinterpret_cast(inb.ptr()); + const uint8_t *mtx_a0 = ina.ptr(); + const uint8_t *mtx_b0 = inb.ptr(); // Note: Since the input are all positives, we can use uint32_t // Accumulators for the block 0 - int32x4x4_t c0 = + uint32x4x4_t c0 = { { - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0) + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0) } }; // Accumulators for the block 1 - int32x4x4_t c1 = + uint32x4x4_t c1 = { { - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0) + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0) } }; // Accumulators for the block 2 - int32x4x4_t c2 = + uint32x4x4_t c2 = { { - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0) + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0) } }; // Accumulators for the block 3 - int32x4x4_t c3 = + uint32x4x4_t c3 = { { - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0) + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0) } }; for(int k = 0; k < width_b; k += 16, mtx_a0 += 4, mtx_b0 += 16) { - const int8x8_t a00_s8 = vld1_s8(mtx_a0); - const int8x16_t b00_s8 = vld1q_s8(mtx_b0); + const uint8x8_t a00_u8 = vld1_u8(mtx_a0); + const uint8x16_t b00_u8 = vld1q_u8(mtx_b0); // Convert a00_s8 to uint16_t and get the lower part - const int16x4_t a00_s16 = vget_low_s16(vmovl_s8(a00_s8)); + const uint16x4_t a00_u16 = vget_low_u16(vmovl_u8(a00_u8)); - // Convert b00_s8 to int16_t - const int16x4x4_t b00_s16 = + // Convert b00_s8 to uint16_t + const uint16x4x4_t b00_u16 = { { - vget_low_s16(vmovl_s8(vget_low_s8(b00_s8))), - vget_high_s16(vmovl_s8(vget_low_s8(b00_s8))), - vget_low_s16(vmovl_s8(vget_high_s8(b00_s8))), - vget_high_s16(vmovl_s8(vget_high_s8(b00_s8))) + vget_low_u16(vmovl_u8(vget_low_u8(b00_u8))), + vget_high_u16(vmovl_u8(vget_low_u8(b00_u8))), + vget_low_u16(vmovl_u8(vget_high_u8(b00_u8))), + vget_high_u16(vmovl_u8(vget_high_u8(b00_u8))) } }; // 4x4 block 0 - c0.val[0] = vmlal_lane_s16(c0.val[0], b00_s16.val[0], a00_s16, 0); - c0.val[1] = vmlal_lane_s16(c0.val[1], b00_s16.val[1], a00_s16, 0); - c0.val[2] = vmlal_lane_s16(c0.val[2], b00_s16.val[2], a00_s16, 0); - c0.val[3] = vmlal_lane_s16(c0.val[3], b00_s16.val[3], a00_s16, 0); + c0.val[0] = vmlal_lane_u16(c0.val[0], b00_u16.val[0], a00_u16, 0); + c0.val[1] = vmlal_lane_u16(c0.val[1], b00_u16.val[1], a00_u16, 0); + c0.val[2] = vmlal_lane_u16(c0.val[2], b00_u16.val[2], a00_u16, 0); + c0.val[3] = vmlal_lane_u16(c0.val[3], b00_u16.val[3], a00_u16, 0); // 4x4 block 1 - c1.val[0] = vmlal_lane_s16(c1.val[0], b00_s16.val[0], a00_s16, 1); - c1.val[1] = vmlal_lane_s16(c1.val[1], b00_s16.val[1], a00_s16, 1); - c1.val[2] = vmlal_lane_s16(c1.val[2], b00_s16.val[2], a00_s16, 1); - c1.val[3] = vmlal_lane_s16(c1.val[3], b00_s16.val[3], a00_s16, 1); + c1.val[0] = vmlal_lane_u16(c1.val[0], b00_u16.val[0], a00_u16, 1); + c1.val[1] = vmlal_lane_u16(c1.val[1], b00_u16.val[1], a00_u16, 1); + c1.val[2] = vmlal_lane_u16(c1.val[2], b00_u16.val[2], a00_u16, 1); + c1.val[3] = vmlal_lane_u16(c1.val[3], b00_u16.val[3], a00_u16, 1); // 4x4 block 2 - c2.val[0] = vmlal_lane_s16(c2.val[0], b00_s16.val[0], a00_s16, 2); - c2.val[1] = vmlal_lane_s16(c2.val[1], b00_s16.val[1], a00_s16, 2); - c2.val[2] = vmlal_lane_s16(c2.val[2], b00_s16.val[2], a00_s16, 2); - c2.val[3] = vmlal_lane_s16(c2.val[3], b00_s16.val[3], a00_s16, 2); + c2.val[0] = vmlal_lane_u16(c2.val[0], b00_u16.val[0], a00_u16, 2); + c2.val[1] = vmlal_lane_u16(c2.val[1], b00_u16.val[1], a00_u16, 2); + c2.val[2] = vmlal_lane_u16(c2.val[2], b00_u16.val[2], a00_u16, 2); + c2.val[3] = vmlal_lane_u16(c2.val[3], b00_u16.val[3], a00_u16, 2); // 4x4 block 3 - c3.val[0] = vmlal_lane_s16(c3.val[0], b00_s16.val[0], a00_s16, 3); - c3.val[1] = vmlal_lane_s16(c3.val[1], b00_s16.val[1], a00_s16, 3); - c3.val[2] = vmlal_lane_s16(c3.val[2], b00_s16.val[2], a00_s16, 3); - c3.val[3] = vmlal_lane_s16(c3.val[3], b00_s16.val[3], a00_s16, 3); + c3.val[0] = vmlal_lane_u16(c3.val[0], b00_u16.val[0], a00_u16, 3); + c3.val[1] = vmlal_lane_u16(c3.val[1], b00_u16.val[1], a00_u16, 3); + c3.val[2] = vmlal_lane_u16(c3.val[2], b00_u16.val[2], a00_u16, 3); + c3.val[3] = vmlal_lane_u16(c3.val[3], b00_u16.val[3], a00_u16, 3); } auto mtx_out = reinterpret_cast(out.ptr()); - vst1q_s32(mtx_out + 0 * out_stride + 0, c0.val[0]); - vst1q_s32(mtx_out + 0 * out_stride + 4, c0.val[1]); - vst1q_s32(mtx_out + 0 * out_stride + 8, c0.val[2]); - vst1q_s32(mtx_out + 0 * out_stride + 12, c0.val[3]); - vst1q_s32(mtx_out + 1 * out_stride + 0, c1.val[0]); - vst1q_s32(mtx_out + 1 * out_stride + 4, c1.val[1]); - vst1q_s32(mtx_out + 1 * out_stride + 8, c1.val[2]); - vst1q_s32(mtx_out + 1 * out_stride + 12, c1.val[3]); - vst1q_s32(mtx_out + 2 * out_stride + 0, c2.val[0]); - vst1q_s32(mtx_out + 2 * out_stride + 4, c2.val[1]); - vst1q_s32(mtx_out + 2 * out_stride + 8, c2.val[2]); - vst1q_s32(mtx_out + 2 * out_stride + 12, c2.val[3]); - vst1q_s32(mtx_out + 3 * out_stride + 0, c3.val[0]); - vst1q_s32(mtx_out + 3 * out_stride + 4, c3.val[1]); - vst1q_s32(mtx_out + 3 * out_stride + 8, c3.val[2]); - vst1q_s32(mtx_out + 3 * out_stride + 12, c3.val[3]); + vst1q_s32(mtx_out + 0 * out_stride + 0, vreinterpretq_s32_u32(c0.val[0])); + vst1q_s32(mtx_out + 0 * out_stride + 4, vreinterpretq_s32_u32(c0.val[1])); + vst1q_s32(mtx_out + 0 * out_stride + 8, vreinterpretq_s32_u32(c0.val[2])); + vst1q_s32(mtx_out + 0 * out_stride + 12, vreinterpretq_s32_u32(c0.val[3])); + vst1q_s32(mtx_out + 1 * out_stride + 0, vreinterpretq_s32_u32(c1.val[0])); + vst1q_s32(mtx_out + 1 * out_stride + 4, vreinterpretq_s32_u32(c1.val[1])); + vst1q_s32(mtx_out + 1 * out_stride + 8, vreinterpretq_s32_u32(c1.val[2])); + vst1q_s32(mtx_out + 1 * out_stride + 12, vreinterpretq_s32_u32(c1.val[3])); + vst1q_s32(mtx_out + 2 * out_stride + 0, vreinterpretq_s32_u32(c2.val[0])); + vst1q_s32(mtx_out + 2 * out_stride + 4, vreinterpretq_s32_u32(c2.val[1])); + vst1q_s32(mtx_out + 2 * out_stride + 8, vreinterpretq_s32_u32(c2.val[2])); + vst1q_s32(mtx_out + 2 * out_stride + 12, vreinterpretq_s32_u32(c2.val[3])); + vst1q_s32(mtx_out + 3 * out_stride + 0, vreinterpretq_s32_u32(c3.val[0])); + vst1q_s32(mtx_out + 3 * out_stride + 4, vreinterpretq_s32_u32(c3.val[1])); + vst1q_s32(mtx_out + 3 * out_stride + 8, vreinterpretq_s32_u32(c3.val[2])); + vst1q_s32(mtx_out + 3 * out_stride + 12, vreinterpretq_s32_u32(c3.val[3])); }, ina, inb, out); } diff --git a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp new file mode 100644 index 0000000000..bd550db54c --- /dev/null +++ b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h" + +#include "arm_compute/core/AccessWindowStatic.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" + +#include +#include +#include + +using namespace arm_compute; + +namespace arm_compute +{ +class Coordinates; +} // namespace arm_compute + +NEGEMMLowpOffsetContributionKernel::NEGEMMLowpOffsetContributionKernel() + : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _mm_result(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true) +{ +} + +void NEGEMMLowpOffsetContributionKernel::configure(ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, int32_t k, int32_t a_offset, int32_t b_offset) +{ + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32); + + // If a_offset == 0, vector_sum_col can be a nullptr + if(a_offset != 0) + { + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32); + ARM_COMPUTE_ERROR_ON(vector_sum_col->info()->dimension(0) != mm_result->info()->dimension(0)); + + TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape(); + vector_sum_col_shape.collapse(1); + + // Check if vector_sum_col_shape should be slidden or not + // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1 + // This scenario can happen when the the matrix multiplication is used to perform a convolution operation + _slide_vector_sum_col = vector_sum_col_shape[1] != 1; + } + + // If b_offset == 0, vector_sum_row can be a nullptr + if(b_offset != 0) + { + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32); + ARM_COMPUTE_ERROR_ON(vector_sum_row->info()->dimension(0) != mm_result->info()->dimension(1)); + + TensorShape output_shape = mm_result->info()->tensor_shape(); + TensorShape vector_sum_row_shape = vector_sum_row->info()->tensor_shape(); + vector_sum_row_shape.collapse(1); + output_shape.collapse(2); + + ARM_COMPUTE_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[2], "mm_result tensor must have the same number of batches of output tensor"); + + if(a_offset != 0) + { + TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape(); + vector_sum_col_shape.collapse(1); + + ARM_COMPUTE_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 + && vector_sum_col_shape[1] != vector_sum_row_shape[1], + "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1"); + } + } + + _vector_sum_col = vector_sum_col; + _vector_sum_row = vector_sum_row; + _mm_result = mm_result; + _a_offset = a_offset; + _b_offset = b_offset; + _k_offset = a_offset * b_offset * k; + + constexpr unsigned int num_elems_processed_per_iteration = 16; + + // Configure kernel window + Window win = calculate_max_window(*mm_result->info(), Steps(num_elems_processed_per_iteration)); + + AccessWindowHorizontal mm_result_access(mm_result->info(), 0, num_elems_processed_per_iteration); + + // Accordingly with a_offset and b_offset, we can have 4 cases: + // a_offset != 0 && b_offset != 0 + // a_offset = 0 && b_offset != 0 + // a_offset != 0 && b_offset = 0 + // a_offset = 0 && b_offset = 0 + if(a_offset != 0 && b_offset != 0) + { + AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0); + AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration); + + update_window_and_padding(win, + vector_sum_col_access, + vector_sum_row_access, + mm_result_access); + } + else if(a_offset == 0 && b_offset != 0) + { + AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0); + + update_window_and_padding(win, + vector_sum_row_access, + mm_result_access); + } + else if(a_offset != 0 && b_offset == 0) + { + AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration); + + update_window_and_padding(win, + vector_sum_col_access, + mm_result_access); + } + else + { + update_window_and_padding(win, + mm_result_access); + } + + INEKernel::configure(win); +} + +void NEGEMMLowpOffsetContributionKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + Window collapsed_window = window.collapse_if_possible(IKernel::window(), Window::DimZ); + + if(_a_offset != 0 && _b_offset != 0) // true, true + { + // Set window for vector_sum_col + Window win_vector_sum_col(collapsed_window); + win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0)); + if(!_slide_vector_sum_col) + { + win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0)); + } + + // Set window for vector_sum_row + Window win_vector_sum_row(collapsed_window); + win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0)); + win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0)); + + Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col); + Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row); + Iterator mm_result(_mm_result, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + // Compute the leftover term due to a_offset. + int32x4x4_t a_offset_term_s32 = + { + { + vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 0), + vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 4), + vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 8), + vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 12) + } + }; + + a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], _a_offset); + a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], _a_offset); + a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], _a_offset); + a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], _a_offset); + + // Compute the leftover term due to b_offset. + int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast(vector_sum_row.ptr()) + id.y()); + b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, _b_offset); + + // Add a_offset_term_s32 and b_offset_term_s32 + int32x4x4_t offset_term_s32 = + { + { + vdupq_n_s32(_k_offset), + vdupq_n_s32(_k_offset), + vdupq_n_s32(_k_offset), + vdupq_n_s32(_k_offset) + } + }; + + offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], vaddq_s32(a_offset_term_s32.val[0], b_offset_term_s32)); + offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], vaddq_s32(a_offset_term_s32.val[1], b_offset_term_s32)); + offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32)); + offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32)); + + int32x4x4_t in_s32 = + { + { + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 0), + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 4), + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 8), + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 12) + } + }; + + // Add the offset terms to GEMM's result + in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]); + in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]); + in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]); + in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]); + + // Store the result with the offset contribution + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 0, in_s32.val[0]); + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 4, in_s32.val[1]); + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 8, in_s32.val[2]); + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 12, in_s32.val[3]); + }, + vector_sum_col, vector_sum_row, mm_result); + } + else if((_a_offset == 0) && (_b_offset != 0)) // false, true + { + // Set window for vector_sum_row + Window win_vector_sum_row(collapsed_window); + win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0)); + win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0)); + + Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row); + Iterator mm_result(_mm_result, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + // Compute the leftover term due to b_offset. + int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast(vector_sum_row.ptr()) + id.y()); + b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, _b_offset); + + int32x4x4_t in_s32 = + { + { + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 0), + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 4), + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 8), + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 12) + } + }; + + // Add the offset terms to GEMM's result + in_s32.val[0] = vaddq_s32(in_s32.val[0], b_offset_term_s32); + in_s32.val[1] = vaddq_s32(in_s32.val[1], b_offset_term_s32); + in_s32.val[2] = vaddq_s32(in_s32.val[2], b_offset_term_s32); + in_s32.val[3] = vaddq_s32(in_s32.val[3], b_offset_term_s32); + + // Store the result with the offset contribution + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 0, in_s32.val[0]); + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 4, in_s32.val[1]); + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 8, in_s32.val[2]); + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 12, in_s32.val[3]); + }, + vector_sum_row, mm_result); + } + else if((_a_offset != 0) && (_b_offset == 0)) // true, false + { + // Set window for vector_sum_col + Window win_vector_sum_col(collapsed_window); + win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0)); + if(!_slide_vector_sum_col) + { + win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0)); + } + + Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col); + Iterator mm_result(_mm_result, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + // Compute the leftover term due to a_offset. + int32x4x4_t a_offset_term_s32 = + { + { + vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 0), + vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 4), + vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 8), + vld1q_s32(reinterpret_cast(vector_sum_col.ptr()) + 12) + } + }; + + a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], _a_offset); + a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], _a_offset); + a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], _a_offset); + a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], _a_offset); + + int32x4x4_t in_s32 = + { + { + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 0), + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 4), + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 8), + vld1q_s32(reinterpret_cast(mm_result.ptr()) + 12) + } + }; + + // Add the offset terms to GEMM's result + in_s32.val[0] = vaddq_s32(in_s32.val[0], a_offset_term_s32.val[0]); + in_s32.val[1] = vaddq_s32(in_s32.val[1], a_offset_term_s32.val[1]); + in_s32.val[2] = vaddq_s32(in_s32.val[2], a_offset_term_s32.val[2]); + in_s32.val[3] = vaddq_s32(in_s32.val[3], a_offset_term_s32.val[3]); + + // Store the result with the offset contribution + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 0, in_s32.val[0]); + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 4, in_s32.val[1]); + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 8, in_s32.val[2]); + vst1q_s32(reinterpret_cast(mm_result.ptr()) + 12, in_s32.val[3]); + }, + vector_sum_col, mm_result); + } + else // false, false + { + // No offset contribution from matrix A and matrix B + return; + } +} diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp new file mode 100644 index 0000000000..aa3c280788 --- /dev/null +++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" + +#include +#include +#include + +using namespace arm_compute; + +namespace arm_compute +{ +class Coordinates; +} // namespace arm_compute + +NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel() + : _input(nullptr), _output(nullptr), _result_offset(0), _result_mult_int(0), _result_shift(0) +{ +} + +void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::configure(const ITensor *input, ITensor *output, int result_offset, int result_mult_int, int result_shift) +{ + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8); + + _input = input; + _output = output; + _result_offset = result_offset; + _result_mult_int = result_mult_int; + _result_shift = result_shift; + + constexpr unsigned int num_elems_processed_per_iteration = 16; + + // Configure kernel window + Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration)); + + AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration); + AccessWindowHorizontal output_result_access(output->info(), 0, num_elems_processed_per_iteration); + + update_window_and_padding(win, + input_access, + output_result_access); + + output_result_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape())); + + INEKernel::configure(win); +} + +void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + const int32x4_t result_offset_s32 = vdupq_n_s32(_result_offset); + const int32x4_t result_shift_s32 = vdupq_n_s32(-_result_shift); + const int32x4_t zero_s32 = vdupq_n_s32(0); + + Iterator in(_input, window); + Iterator out(_output, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + int32x4x4_t in_s32 = + { + { + vld1q_s32(reinterpret_cast(in.ptr()) + 0), + vld1q_s32(reinterpret_cast(in.ptr()) + 4), + vld1q_s32(reinterpret_cast(in.ptr()) + 8), + vld1q_s32(reinterpret_cast(in.ptr()) + 12) + } + }; + + // Add the offset terms to GEMM's result + in_s32.val[0] = vaddq_s32(in_s32.val[0], result_offset_s32); + in_s32.val[1] = vaddq_s32(in_s32.val[1], result_offset_s32); + in_s32.val[2] = vaddq_s32(in_s32.val[2], result_offset_s32); + in_s32.val[3] = vaddq_s32(in_s32.val[3], result_offset_s32); + + // Multiply by c_mult_int + in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _result_mult_int); + in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _result_mult_int); + in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _result_mult_int); + in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _result_mult_int); + + // Shift final result (negative value shift right) + in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32); + in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32); + in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32); + in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32); + + // Saturate negative values + in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32); + in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32); + in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32); + in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32); + + // Convert S32 to S16 + const int16x8x2_t in_s16 = + { + { + vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])), + vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])) + } + }; + + // Convert S16 to U8 + const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1])); + + vst1q_u8(out.ptr(), out_u8); + }, + in, out); +} \ No newline at end of file diff --git a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp index 9df13ce0e3..81d9b5bb81 100644 --- a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp @@ -49,12 +49,12 @@ INEGEMMLowpReductionKernel::INEGEMMLowpReductionKernel() { } -void NEGEMMLowpMatrixAReductionKernel::configure(const ITensor *mtx_a_interleaved4x4, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4) +void NEGEMMLowpMatrixAReductionKernel::configure(const ITensor *mtx_a, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_a_interleaved4x4, 1, DataType::S8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_a, 1, DataType::QASYMM8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32); - _input = mtx_a_interleaved4x4; + _input = mtx_a; _output = vector_sum_row; _k = num_mtx_a_cols; _is_reshaped = is_interleaved4x4; @@ -97,9 +97,9 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf execute_window_loop(collapsed_window, [&](const Coordinates & id) { // Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation - int32x4_t sum_row = vdupq_n_s32(0); + uint32x4_t sum_row = vdupq_n_u32(0); - auto matrix_a = reinterpret_cast(in.ptr() + (id.x() / 4) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]); + const uint8_t *matrix_a = (in.ptr() + (id.x() / 4) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]); #if __arm__ asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a)); @@ -109,43 +109,43 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf // This for loop performs 4 accumulations for(; i <= (_k - 4); i += 4) { - const int8x16_t a0_s8 = vld1q_s8(matrix_a + i * 4); + const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i * 4); // Convert U8 to U16 - int16x4x4_t a0_s16 = + uint16x4x4_t a0_u16 = { { - vget_low_s16(vmovl_s8(vget_low_s8(a0_s8))), - vget_high_s16(vmovl_s8(vget_low_s8(a0_s8))), - vget_low_s16(vmovl_s8(vget_high_s8(a0_s8))), - vget_high_s16(vmovl_s8(vget_high_s8(a0_s8))) + vget_low_u16(vmovl_u8(vget_low_u8(a0_u8))), + vget_high_u16(vmovl_u8(vget_low_u8(a0_u8))), + vget_low_u16(vmovl_u8(vget_high_u8(a0_u8))), + vget_high_u16(vmovl_u8(vget_high_u8(a0_u8))) } }; // Accumulate to U16 - a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[1]); - a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[2]); - a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[3]); + a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[1]); + a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[2]); + a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[3]); // Accumulate to U32 - sum_row = vaddw_s16(sum_row, a0_s16.val[0]); + sum_row = vaddw_u16(sum_row, a0_u16.val[0]); } // This for loop performs the leftover accumulations for(; i < _k; ++i) { - const int8x8_t a0_s8 = vld1_s8(matrix_a + i * 4); + const uint8x8_t a0_u8 = vld1_u8(matrix_a + i * 4); // Convert U8 to U16 - const int16x4_t a0_s16 = vget_low_s16(vmovl_s8(a0_s8)); + const uint16x4_t a0_u16 = vget_low_u16(vmovl_u8(a0_u8)); // Accumulate to U32 - sum_row = vaddw_s16(sum_row, a0_s16); + sum_row = vaddw_u16(sum_row, a0_u16); } auto vector_sum_row = reinterpret_cast(out.ptr()); - vst1q_s32(vector_sum_row, sum_row); + vst1q_s32(vector_sum_row, vreinterpretq_s32_u32(sum_row)); }, in, out); } @@ -154,10 +154,10 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf execute_window_loop(collapsed_window, [&](const Coordinates & id) { // Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation - int32x4_t sum_row_s32 = vdupq_n_s32(0); - int32_t sum_row = 0; + uint32x4_t sum_row_u32 = vdupq_n_u32(0); + uint32_t sum_row = 0; - auto matrix_a = reinterpret_cast(in.ptr() + id.x() * _input->info()->strides_in_bytes()[1] + +id.y() * _input->info()->strides_in_bytes()[2]); + const uint8_t *matrix_a = (in.ptr() + id.x() * _input->info()->strides_in_bytes()[1] + +id.y() * _input->info()->strides_in_bytes()[2]); #if __arm__ asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a)); @@ -167,29 +167,29 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf // This for loop performs 16 accumulations for(; i <= (_k - 16); i += 16) { - const int8x16_t a0_s8 = vld1q_s8(matrix_a + i); + const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i); // Partial accumulations in U16 - const int16x8_t tmp_sum0 = vaddl_s8(vget_low_s8(a0_s8), vget_high_s8(a0_s8)); + const uint16x8_t tmp_sum0 = vaddl_u8(vget_low_u8(a0_u8), vget_high_u8(a0_u8)); // Accumulate to U32 - sum_row_s32 = vaddq_s32(sum_row_s32, vpaddlq_s16(tmp_sum0)); + sum_row_u32 = vaddq_u32(sum_row_u32, vpaddlq_u16(tmp_sum0)); } // This for loop performs the leftover accumulations for(; i < _k; ++i) { - sum_row += static_cast(matrix_a[i]); + sum_row += static_cast(matrix_a[i]); } #if defined(__aarch64__) // Reduction operation available on 64 bit architectures only - sum_row += vaddvq_s32(sum_row_s32); + sum_row += vaddvq_u32(sum_row_u32); #else // __aarch64__ - int32x2_t tmp = vpadd_s32(vget_high_s32(sum_row_s32), vget_low_s32(sum_row_s32)); - tmp = vpadd_s32(tmp, tmp); + uint32x2_t tmp = vpadd_u32(vget_high_u32(sum_row_u32), vget_low_u32(sum_row_u32)); + tmp = vpadd_u32(tmp, tmp); - sum_row += vget_lane_s32(tmp, 0); + sum_row += vget_lane_u32(tmp, 0); #endif // __aarch64__ *(reinterpret_cast(out.ptr())) = static_cast(sum_row); @@ -198,12 +198,12 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf } } -void NEGEMMLowpMatrixBReductionKernel::configure(const ITensor *mtx_b_transposed1xW, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW) +void NEGEMMLowpMatrixBReductionKernel::configure(const ITensor *mtx_b, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_b_transposed1xW, 1, DataType::S8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_b, 1, DataType::QASYMM8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32); - _input = mtx_b_transposed1xW; + _input = mtx_b; _output = vector_sum_col; _k = num_mtx_b_rows; _is_reshaped = is_transposed1xW; @@ -246,17 +246,17 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf execute_window_loop(collapsed_window, [&](const Coordinates & id) { // Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation - int32x4x4_t sum_col = + uint32x4x4_t sum_col = { { - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0) + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0) } }; - auto matrix_b = reinterpret_cast(in.ptr() + (id.x() / 16) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]); + const uint8_t *matrix_b = in.ptr() + (id.x() / 16) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]; #if __arm__ asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b)); @@ -265,14 +265,14 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf int i = 0; for(; i < _k; ++i) { - const int8x16_t b0_s8 = vld1q_s8(matrix_b + i * 16); + const uint8x16_t b0_u8 = vld1q_u8(matrix_b + i * 16); // Convert S8 to U16 - const int16x8x2_t b0_s16 = + const uint16x8x2_t b0_u16 = { { - vmovl_s8(vget_low_s8(b0_s8)), - vmovl_s8(vget_high_s8(b0_s8)) + vmovl_u8(vget_low_u8(b0_u8)), + vmovl_u8(vget_high_u8(b0_u8)) } }; @@ -280,20 +280,20 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf sum_col = { { - vaddw_s16(sum_col.val[0], vget_low_s16(b0_s16.val[0])), - vaddw_s16(sum_col.val[1], vget_high_s16(b0_s16.val[0])), - vaddw_s16(sum_col.val[2], vget_low_s16(b0_s16.val[1])), - vaddw_s16(sum_col.val[3], vget_high_s16(b0_s16.val[1])) + vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])), + vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])), + vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])), + vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1])) } }; } auto vector_sum_col = reinterpret_cast(out.ptr()); - vst1q_s32(vector_sum_col + 0, sum_col.val[0]); - vst1q_s32(vector_sum_col + 4, sum_col.val[1]); - vst1q_s32(vector_sum_col + 8, sum_col.val[2]); - vst1q_s32(vector_sum_col + 12, sum_col.val[3]); + vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0])); + vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1])); + vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2])); + vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3])); }, in, out); } @@ -326,17 +326,17 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf } // Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation - int32x4x4_t sum_col = + uint32x4x4_t sum_col = { { - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0), - vdupq_n_s32(0) + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0), + vdupq_n_u32(0) } }; - auto matrix_b = reinterpret_cast(inb.ptr() + id.y() * _input->info()->strides_in_bytes()[2]); + const uint8_t *matrix_b = inb.ptr() + id.y() * _input->info()->strides_in_bytes()[2]; #if __arm__ asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b)); @@ -347,10 +347,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf // This for loop performs 4 accumulations for(; i <= (_k - 4); i += 4) { - const int8x16_t b0_s8 = vld1q_s8(matrix_b + 0 * in_b_stride); - const int8x16_t b1_s8 = vld1q_s8(matrix_b + 1 * in_b_stride); - const int8x16_t b2_s8 = vld1q_s8(matrix_b + 2 * in_b_stride); - const int8x16_t b3_s8 = vld1q_s8(matrix_b + 3 * in_b_stride); + const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride); + const uint8x16_t b1_u8 = vld1q_u8(matrix_b + 1 * in_b_stride); + const uint8x16_t b2_u8 = vld1q_u8(matrix_b + 2 * in_b_stride); + const uint8x16_t b3_u8 = vld1q_u8(matrix_b + 3 * in_b_stride); #if __arm__ asm volatile("PLD [%0, #128*1]" ::"r"(matrix_b + 1 * in_b_stride)); @@ -360,31 +360,31 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf #endif /* __arm__ */ // Partial accumulation in u16 - int16x8x2_t tmp_sum = + uint16x8x2_t tmp_sum = { { - vdupq_n_s16(0), - vdupq_n_s16(0) + vdupq_n_u16(0), + vdupq_n_u16(0) } }; - tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b0_s8)); - tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b1_s8)); - tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b2_s8)); - tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b3_s8)); - tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b0_s8)); - tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b1_s8)); - tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b2_s8)); - tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b3_s8)); + tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b0_u8)); + tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b1_u8)); + tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b2_u8)); + tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b3_u8)); + tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b0_u8)); + tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b1_u8)); + tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b2_u8)); + tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b3_u8)); // Accumulate to U32 sum_col = { { - vaddw_s16(sum_col.val[0], vget_low_s16(tmp_sum.val[0])), - vaddw_s16(sum_col.val[1], vget_high_s16(tmp_sum.val[0])), - vaddw_s16(sum_col.val[2], vget_low_s16(tmp_sum.val[1])), - vaddw_s16(sum_col.val[3], vget_high_s16(tmp_sum.val[1])) + vaddw_u16(sum_col.val[0], vget_low_u16(tmp_sum.val[0])), + vaddw_u16(sum_col.val[1], vget_high_u16(tmp_sum.val[0])), + vaddw_u16(sum_col.val[2], vget_low_u16(tmp_sum.val[1])), + vaddw_u16(sum_col.val[3], vget_high_u16(tmp_sum.val[1])) } }; @@ -394,14 +394,14 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf // This for loop perfoms the leftover accumulations for(; i < _k; ++i) { - const int8x16_t b0_s8 = vld1q_s8(matrix_b + 0 * in_b_stride); + const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride); // Convert S8 to S16 - const int16x8x2_t b0_s16 = + const uint16x8x2_t b0_u16 = { { - vmovl_s8(vget_low_s8(b0_s8)), - vmovl_s8(vget_high_s8(b0_s8)) + vmovl_u8(vget_low_u8(b0_u8)), + vmovl_u8(vget_high_u8(b0_u8)) } }; @@ -409,10 +409,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf sum_col = { { - vaddw_s16(sum_col.val[0], vget_low_s16(b0_s16.val[0])), - vaddw_s16(sum_col.val[1], vget_high_s16(b0_s16.val[0])), - vaddw_s16(sum_col.val[2], vget_low_s16(b0_s16.val[1])), - vaddw_s16(sum_col.val[3], vget_high_s16(b0_s16.val[1])) + vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])), + vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])), + vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])), + vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1])) } }; @@ -421,10 +421,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf auto vector_sum_col = reinterpret_cast(out.ptr()); - vst1q_s32(vector_sum_col + 0, sum_col.val[0]); - vst1q_s32(vector_sum_col + 4, sum_col.val[1]); - vst1q_s32(vector_sum_col + 8, sum_col.val[2]); - vst1q_s32(vector_sum_col + 12, sum_col.val[3]); + vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0])); + vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1])); + vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2])); + vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3])); }, inb, out); } diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp index 7f4ee1ec49..7f83144e12 100644 --- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp @@ -43,7 +43,8 @@ using namespace arm_compute; void NEGEMMTranspose1xWKernel::configure(const ITensor *input, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, + DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_NULLPTR(output); diff --git a/src/runtime/NEON/functions/NEGEMMLowp.cpp b/src/runtime/NEON/functions/NEGEMMLowp.cpp deleted file mode 100644 index 90bc6a205b..0000000000 --- a/src/runtime/NEON/functions/NEGEMMLowp.cpp +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/runtime/NEON/functions/NEGEMMLowp.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Validate.h" -#include "arm_compute/runtime/NEON/NEScheduler.h" -#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h" -#include "arm_compute/runtime/TensorAllocator.h" -#include "support/ToolchainSupport.h" - -using namespace arm_compute; - -NEGEMMLowp::NEGEMMLowp(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _mm_func(), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), _finalize_kernel(), _vector_sum_col(), _vector_sum_row(), _mm_output(), _a_offset(0), - _b_offset(0) -{ -} - -void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t output_mult_int, int32_t shift) -{ - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((a), 1, DataType::S8); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output); - ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); - ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A"); - ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The output matrix must have the same number of columns as the matrix B"); - - _a_offset = a_offset; - _b_offset = b_offset; - - // Initialize matrix multiply output tensor - const TensorShape &shape_mm_output = output->info()->tensor_shape(); - TensorInfo info_mm_output(shape_mm_output, 1, DataType::S32); - _mm_output.allocator()->init(info_mm_output); - _memory_group.manage(&_mm_output); - - // Initialize Matrix B reduction kernel only if _a_offset is not equal to 0 - if(_a_offset != 0) - { - TensorShape shape_vector_sum_col = b->info()->tensor_shape(); - shape_vector_sum_col.remove_dimension(1); - TensorInfo info_vector_sum_col(shape_vector_sum_col, 1, DataType::S32); - _vector_sum_col.allocator()->init(info_vector_sum_col); - _memory_group.manage(&_vector_sum_col); - - // Configure Matrix B reduction kernel - _mtx_b_reduction_kernel.configure(b, &_vector_sum_col, a->info()->dimension(0), false); - } - - // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0 - if(_b_offset != 0) - { - TensorShape shape_vector_sum_row = a->info()->tensor_shape(); - shape_vector_sum_row.set(Window::DimX, a->info()->dimension(1)); - shape_vector_sum_row.remove_dimension(1); - TensorInfo info_vector_sum_row(shape_vector_sum_row, 1, DataType::S32); - _vector_sum_row.allocator()->init(info_vector_sum_row); - _memory_group.manage(&_vector_sum_row); - - // Configure Matrix A reduction kernel - _mtx_a_reduction_kernel.configure(a, &_vector_sum_row, a->info()->dimension(0), false); - } - - // Configure matrix multiply function - _mm_func.configure(a, b, &_mm_output); - - // Configure finalize kernel - _finalize_kernel.configure(_a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, &_mm_output, output, a->info()->dimension(0), a_offset, b_offset, c_offset, - output_mult_int, shift); - - // Allocate tensors - _mm_output.allocator()->allocate(); - - if(_a_offset != 0) - { - _vector_sum_col.allocator()->allocate(); - } - - if(_b_offset != 0) - { - _vector_sum_row.allocator()->allocate(); - } -} - -void NEGEMMLowp::run() -{ - _memory_group.acquire(); - - // Run matrix A reduction kernel only if _b_offset is not equal to 0 - if(_b_offset != 0) - { - NEScheduler::get().schedule(&_mtx_a_reduction_kernel, Window::DimX); - } - - // Run matrix B reduction kernel only if _a_offset is not equal to 0 - if(_a_offset != 0) - { - NEScheduler::get().schedule(&_mtx_b_reduction_kernel, Window::DimX); - } - - // Run matrix multiply core function - _mm_func.run(); - - // Run finalise kernel - NEScheduler::get().schedule(&_finalize_kernel, Window::DimY); - - _memory_group.release(); -} diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp index 29104cc378..929ee41220 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp @@ -47,19 +47,25 @@ namespace arm_compute using namespace arm_compute; NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _tmp_a(), _tmp_b(), _workspace() + : _memory_group(std::move(memory_manager)), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), + _offset_contribution_kernel(), _vector_sum_col(), _vector_sum_row(), _tmp_a(), _tmp_b(), _workspace(), _a_offset(0), _b_offset(0) { } void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::S8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A"); ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The output matrix must have the same number of columns as the matrix B"); + bool dot_product_path = false; + + _a_offset = a->info()->quantization_info().offset; + _b_offset = b->info()->quantization_info().offset; + #ifdef ARM_COMPUTE_AARCH64_V8_2 // Check for DOT product instruction const struct CPUInfo ci = NEScheduler::get().cpu_info(); @@ -67,6 +73,13 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, if(cpu_has_dotprod != 0) { + dot_product_path = true; + + // If the DOT product instruction is available, the computation will be performed in int8_t + // In order to take into account this, we need to subtract -128 from a_offset and b_offset + _a_offset -= 128; + _b_offset -= 128; + // Configure matrix multiply kernel struct CPUInfo ci = NEScheduler::get().cpu_info(); const int M = output->info()->tensor_shape().y(); @@ -77,12 +90,11 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, constexpr size_t alignment = 4096; _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); _memory_group.manage(&_workspace); + // Configure matrix multiplication kernel auto k = arm_compute::support::cpp14::make_unique(); k->configure(a, b, output, &_workspace, 1.f, 1.f); _mm_kernel = std::move(k); - - _workspace.allocator()->allocate(); } else #endif /* ARM_COMPUTE_AARCH64_V8_2 */ @@ -124,11 +136,58 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, k->configure(&_tmp_a, &_tmp_b, output); _mm_kernel = std::move(k); } + } - // Allocate tensors + // Initialize matrix B reduction kernel only if _a_offset is not equal to 0 + if(_a_offset != 0) + { + TensorShape shape_vector_sum_col = b->info()->tensor_shape(); + shape_vector_sum_col.remove_dimension(1); + TensorInfo info_vector_sum_col(shape_vector_sum_col, 1, DataType::S32); + _vector_sum_col.allocator()->init(info_vector_sum_col); + _memory_group.manage(&_vector_sum_col); + + // Configure Matrix B reduction kernel + _mtx_b_reduction_kernel.configure(b, &_vector_sum_col, a->info()->dimension(0), false); + } + + // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0 + if(_b_offset != 0) + { + TensorShape shape_vector_sum_row = a->info()->tensor_shape(); + shape_vector_sum_row.set(Window::DimX, a->info()->dimension(1)); + shape_vector_sum_row.remove_dimension(1); + TensorInfo info_vector_sum_row(shape_vector_sum_row, 1, DataType::S32); + _vector_sum_row.allocator()->init(info_vector_sum_row); + _memory_group.manage(&_vector_sum_row); + + // Configure matrix A reduction kernel + _mtx_a_reduction_kernel.configure(a, &_vector_sum_row, a->info()->dimension(0), false); + } + + // Configure offset contribution kernel + _offset_contribution_kernel.configure(output, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, a->info()->dimension(0), _a_offset, _b_offset); + + // Allocate tensors + if(!dot_product_path) + { _tmp_a.allocator()->allocate(); _tmp_b.allocator()->allocate(); } + else + { + _workspace.allocator()->allocate(); + } + + if(_a_offset != 0) + { + _vector_sum_col.allocator()->allocate(); + } + + if(_b_offset != 0) + { + _vector_sum_row.allocator()->allocate(); + } } void NEGEMMLowpMatrixMultiplyCore::run() @@ -147,5 +206,20 @@ void NEGEMMLowpMatrixMultiplyCore::run() NEScheduler::get().schedule(_mm_kernel.get(), Window::DimY); + // Run matrix A reduction kernel only if _b_offset is not equal to 0 + if(_b_offset != 0) + { + NEScheduler::get().schedule(&_mtx_a_reduction_kernel, Window::DimX); + } + + // Run matrix B reduction kernel only if _a_offset is not equal to 0 + if(_a_offset != 0) + { + NEScheduler::get().schedule(&_mtx_b_reduction_kernel, Window::DimX); + } + + // Run offset contribution kernel + NEScheduler::get().schedule(&_offset_contribution_kernel, Window::DimY); + _memory_group.release(); } diff --git a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp new file mode 100644 index 0000000000..d09827f908 --- /dev/null +++ b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h" + +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h" +#include "support/ToolchainSupport.h" + +using namespace arm_compute; + +void NEGEMMLowpQuantizeDownInt32ToUint8Scale::configure(const ITensor *input, ITensor *output, int result_offset, int result_mult_int, int result_shift) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, output, result_offset, result_mult_int, result_shift); + _kernel = std::move(k); +} \ No newline at end of file -- cgit v1.2.1