From 181e65145d153210ec5587a42d2938e27e1d5b01 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Wed, 15 Nov 2017 13:28:27 +0000 Subject: COMPMID-675: NEGEMMLowp Assembly Integration Added support for S8 input in NEGEMMLowp Matrix Multiply Kernel. Added a new function to run assembly kernels such that A*B=C (no offsets involved) Added new tests for the assembly gemmlowp kernels (no offsets) Integrated the assembly kernel for the A57 Change-Id: Ib3e39c1f3f7f1baa0d39be69485f61cd18e3c9b3 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/95864 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- .../kernels/NEGEMMLowpMatrixMultiplyKernel.cpp | 209 +++++++++++++++++---- .../NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp | 129 +++++++++++++ 2 files changed, 303 insertions(+), 35 deletions(-) create mode 100644 src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp (limited to 'src/core') diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp index 1352f34e3c..5f052f797d 100644 --- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp @@ -52,7 +52,7 @@ NEGEMMLowpMatrixMultiplyKernel::NEGEMMLowpMatrixMultiplyKernel() void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8, DataType::S8); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); @@ -90,41 +90,8 @@ void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITen INEKernel::configure(win); } -void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo &info) +void inline matrix_multiply_u8(Iterator &ina, Iterator &inb, Iterator &out, int width_b, size_t out_stride, const Window &window) { - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); - - const size_t in_b_stride = _input1->info()->strides_in_bytes()[1]; - const size_t out_stride = _output->info()->strides_in_bytes()[1] / _output->info()->element_size(); - - // Set step_x and step_y for matrix A. Scale by a factor of 4 the Y range as the input interleaved matrix A has 4 times less the rows of the output matrix - Window win_a(window); - win_a.set(Window::DimX, Window::Dimension(0, 0, 0)); - win_a.set(Window::DimY, Window::Dimension(window.y().start() / 4, window.y().end() / 4, 1)); - - // Set step_x and step_y for matrix B. Scale by a factor of 16 the X range as the input transposed matrix A has 16 times less the columns of the output matrix - Window win_b; - // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2 - // This scenario can happen when the the matrix multiplication is used to perform a convolution operation - if(_slide_matrix_b) - { - win_b = window; - } - win_b.set(Window::DimX, Window::Dimension(window.x().start() / 16, window.x().end() / 16, in_b_stride)); - win_b.set(Window::DimY, Window::Dimension(0, 0, 0)); - - // The step x and step y for the output matrix has been already set using in configure() - Iterator ina(_input0, win_a); - Iterator inb(_input1, win_b); - Iterator out(_output, window); - - const int width_b = _input1->info()->dimension(0); - - // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW - // The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration - // All the values needed for computing a single 4x4 block will be read from consecutive memory positions execute_window_loop(window, [&](const Coordinates & id) { const uint8_t *mtx_a0 = ina.ptr(); @@ -239,3 +206,175 @@ void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo }, ina, inb, out); } + +void inline matrix_multiply_s8(Iterator &ina, Iterator &inb, Iterator &out, int width_b, size_t out_stride, const Window &window) +{ + // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW + // The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration + // All the values needed for computing a single 4x4 block will be read from consecutive memory positions + execute_window_loop(window, [&](const Coordinates & id) + { + auto *mtx_a0 = reinterpret_cast(ina.ptr()); + auto *mtx_b0 = reinterpret_cast(inb.ptr()); + + // Note: Since the input are all positives, we can use uint32_t + // Accumulators for the block 0 + int32x4x4_t c0 = + { + { + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0) + } + }; + + // Accumulators for the block 1 + int32x4x4_t c1 = + { + { + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0) + } + }; + + // Accumulators for the block 2 + int32x4x4_t c2 = + { + { + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0) + } + }; + + // Accumulators for the block 3 + int32x4x4_t c3 = + { + { + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0) + } + }; + + for(int k = 0; k < width_b; k += 16, mtx_a0 += 4, mtx_b0 += 16) + { + const int8x8_t a00_s8 = vld1_s8(mtx_a0); + const int8x16_t b00_s8 = vld1q_s8(mtx_b0); + + // Convert a00_s8 to uint16_t and get the lower part + const int16x4_t a00_s16 = vget_low_s16(vmovl_s8(a00_s8)); + + // Convert b00_s8 to int16_t + const int16x4x4_t b00_s16 = + { + { + vget_low_s16(vmovl_s8(vget_low_s8(b00_s8))), + vget_high_s16(vmovl_s8(vget_low_s8(b00_s8))), + vget_low_s16(vmovl_s8(vget_high_s8(b00_s8))), + vget_high_s16(vmovl_s8(vget_high_s8(b00_s8))) + } + }; + + // 4x4 block 0 + c0.val[0] = vmlal_lane_s16(c0.val[0], b00_s16.val[0], a00_s16, 0); + c0.val[1] = vmlal_lane_s16(c0.val[1], b00_s16.val[1], a00_s16, 0); + c0.val[2] = vmlal_lane_s16(c0.val[2], b00_s16.val[2], a00_s16, 0); + c0.val[3] = vmlal_lane_s16(c0.val[3], b00_s16.val[3], a00_s16, 0); + + // 4x4 block 1 + c1.val[0] = vmlal_lane_s16(c1.val[0], b00_s16.val[0], a00_s16, 1); + c1.val[1] = vmlal_lane_s16(c1.val[1], b00_s16.val[1], a00_s16, 1); + c1.val[2] = vmlal_lane_s16(c1.val[2], b00_s16.val[2], a00_s16, 1); + c1.val[3] = vmlal_lane_s16(c1.val[3], b00_s16.val[3], a00_s16, 1); + + // 4x4 block 2 + c2.val[0] = vmlal_lane_s16(c2.val[0], b00_s16.val[0], a00_s16, 2); + c2.val[1] = vmlal_lane_s16(c2.val[1], b00_s16.val[1], a00_s16, 2); + c2.val[2] = vmlal_lane_s16(c2.val[2], b00_s16.val[2], a00_s16, 2); + c2.val[3] = vmlal_lane_s16(c2.val[3], b00_s16.val[3], a00_s16, 2); + + // 4x4 block 3 + c3.val[0] = vmlal_lane_s16(c3.val[0], b00_s16.val[0], a00_s16, 3); + c3.val[1] = vmlal_lane_s16(c3.val[1], b00_s16.val[1], a00_s16, 3); + c3.val[2] = vmlal_lane_s16(c3.val[2], b00_s16.val[2], a00_s16, 3); + c3.val[3] = vmlal_lane_s16(c3.val[3], b00_s16.val[3], a00_s16, 3); + } + + auto mtx_out = reinterpret_cast(out.ptr()); + vst1q_s32(mtx_out + 0 * out_stride + 0, c0.val[0]); + vst1q_s32(mtx_out + 0 * out_stride + 4, c0.val[1]); + vst1q_s32(mtx_out + 0 * out_stride + 8, c0.val[2]); + vst1q_s32(mtx_out + 0 * out_stride + 12, c0.val[3]); + vst1q_s32(mtx_out + 1 * out_stride + 0, c1.val[0]); + vst1q_s32(mtx_out + 1 * out_stride + 4, c1.val[1]); + vst1q_s32(mtx_out + 1 * out_stride + 8, c1.val[2]); + vst1q_s32(mtx_out + 1 * out_stride + 12, c1.val[3]); + vst1q_s32(mtx_out + 2 * out_stride + 0, c2.val[0]); + vst1q_s32(mtx_out + 2 * out_stride + 4, c2.val[1]); + vst1q_s32(mtx_out + 2 * out_stride + 8, c2.val[2]); + vst1q_s32(mtx_out + 2 * out_stride + 12, c2.val[3]); + vst1q_s32(mtx_out + 3 * out_stride + 0, c3.val[0]); + vst1q_s32(mtx_out + 3 * out_stride + 4, c3.val[1]); + vst1q_s32(mtx_out + 3 * out_stride + 8, c3.val[2]); + vst1q_s32(mtx_out + 3 * out_stride + 12, c3.val[3]); + }, + ina, inb, out); +} + +void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + const size_t in_b_stride = _input1->info()->strides_in_bytes()[1]; + const size_t out_stride = _output->info()->strides_in_bytes()[1] / _output->info()->element_size(); + + // Set step_x and step_y for matrix A. Scale by a factor of 4 the Y range as the input interleaved matrix A has 4 times less the rows of the output matrix + Window win_a(window); + win_a.set(Window::DimX, Window::Dimension(0, 0, 0)); + win_a.set(Window::DimY, Window::Dimension(window.y().start() / 4, window.y().end() / 4, 1)); + + // Set step_x and step_y for matrix B. Scale by a factor of 16 the X range as the input transposed matrix A has 16 times less the columns of the output matrix + Window win_b; + // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2 + // This scenario can happen when the the matrix multiplication is used to perform a convolution operation + if(_slide_matrix_b) + { + win_b = window; + } + win_b.set(Window::DimX, Window::Dimension(window.x().start() / 16, window.x().end() / 16, in_b_stride)); + win_b.set(Window::DimY, Window::Dimension(0, 0, 0)); + + // The step x and step y for the output matrix has been already set using in configure() + Iterator ina(_input0, win_a); + Iterator inb(_input1, win_b); + Iterator out(_output, window); + + const int width_b = _input1->info()->dimension(0); + switch(_input0->info()->data_type()) + { + case DataType::S8: + { + matrix_multiply_s8(ina, inb, out, width_b, out_stride, window); + break; + } + case DataType::U8: + case DataType::QASYMM8: + { + matrix_multiply_u8(ina, inb, out, width_b, out_stride, window); + break; + } + default: + { + ARM_COMPUTE_ERROR("Not supported"); + break; + } + } +} diff --git a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp new file mode 100644 index 0000000000..b75a8ab251 --- /dev/null +++ b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h" + +#include "arm_compute/core/AccessWindowStatic.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/IAccessWindow.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp" +#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp" +} // namespace arm_compute + +#include +#include +#include + +// Enable only if compiled for AArch64-V8A targets +#ifdef ARM_COMPUTE_AARCH64_V8A + +namespace arm_compute +{ +void NEGEMMLowpAArch64Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1) +{ + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); + + _input0 = input0; + _input1 = input1; + _output = output; + _workspace = workspace; + _alpha = alpha; + _beta = beta; + _transform_0 = transform_0; + _transform_1 = transform_1; + + // Configure kernel window + Window win = calculate_max_window(*output->info()); + + AccessWindowRectangle output_access(output->info(), 0, 0, 4, 4); + + const int input0_access_end = ceil_to_multiple(input0->info()->tensor_shape().x(), 4); + const int input1_access_end = ceil_to_multiple(input1->info()->tensor_shape().x(), 4); + + update_window_and_padding(win, + AccessWindowStatic(input0->info(), 0, 0, input0_access_end, input0->info()->tensor_shape().y()), + AccessWindowStatic(input1->info(), 0, 0, input1_access_end, input1->info()->tensor_shape().y()), + output_access); + + INEKernel::configure(win); +} + +void NEGEMMLowpAArch64Kernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + const int lda = _input0->info()->strides_in_bytes().y(); + const int ldb = _input1->info()->strides_in_bytes().y(); + const int ldc = _output->info()->strides_in_bytes().y() / sizeof(int32_t); + + const auto in1_ptr = reinterpret_cast(_input1->buffer()); + + const int M = std::min(_output->info()->tensor_shape().y(), static_cast(window.y().end())) - window.y().start(); + const int N = _output->info()->tensor_shape().x(); + const int K = _input0->info()->tensor_shape().x(); + + // Only iterate over batches + Window win(window); + win.set(0, Window::Dimension(0, 1, 1)); + win.set(1, Window::Dimension(0, 1, 1)); + + Iterator in0(_input0, window); + Iterator out(_output, window); + + GemmInterleaved gemm(&info.cpu_info, M, N, K, !_transform_1, !_transform_1); + + constexpr size_t alignment = 4096; + const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id; + void *workspace = _workspace->buffer() + offset; + size_t workspace_size = _workspace->info()->total_size(); + + if(support::cpp11::align(alignment, gemm.get_working_size(), workspace, workspace_size) == nullptr) + { + ARM_COMPUTE_ERROR("Not enough space to align buffer!"); + } + + execute_window_loop(win, [&](const Coordinates & id) + { + gemm.execute(reinterpret_cast(in0.ptr()), lda, + reinterpret_cast(in1_ptr), ldb, + reinterpret_cast(out.ptr()), ldc, + _alpha, _beta, workspace); + }, + in0, out); +} +} // namespace arm_compute +#endif /* ARM_COMPUTE_AARCH64_V8A */ -- cgit v1.2.1