From 181e65145d153210ec5587a42d2938e27e1d5b01 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Wed, 15 Nov 2017 13:28:27 +0000 Subject: COMPMID-675: NEGEMMLowp Assembly Integration Added support for S8 input in NEGEMMLowp Matrix Multiply Kernel. Added a new function to run assembly kernels such that A*B=C (no offsets involved) Added new tests for the assembly gemmlowp kernels (no offsets) Integrated the assembly kernel for the A57 Change-Id: Ib3e39c1f3f7f1baa0d39be69485f61cd18e3c9b3 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/95864 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- SConstruct | 2 +- arm_compute/core/NEON/NEKernels.h | 1 + .../NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h | 48 +++ .../kernels/assembly/kernels/a64_gemm_s8_4x4.hpp | 61 +++ .../assembly/kernels/a64_gemm_s8_4x4/generic.hpp | 465 +++++++++++++++++++++ arm_compute/runtime/NEON/NEFunctions.h | 1 + .../NEGEMMLowpAssemblyMatrixMultiplyCore.h | 69 +++ .../kernels/NEGEMMLowpMatrixMultiplyKernel.cpp | 209 +++++++-- .../NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp | 129 ++++++ .../NEGEMMLowpAssemblyMatrixMultiplyCore.cpp | 168 ++++++++ tests/validation/CPP/GEMMLowp.cpp | 6 + tests/validation/CPP/GEMMLowp.h | 3 + tests/validation/NEON/GEMMLowp.cpp | 17 +- .../validation/fixtures/GEMMLowpAssemblyFixture.h | 121 ++++++ 14 files changed, 1262 insertions(+), 38 deletions(-) create mode 100644 arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h create mode 100644 arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp create mode 100644 arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4/generic.hpp create mode 100644 arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h create mode 100644 src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp create mode 100644 src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp create mode 100644 tests/validation/fixtures/GEMMLowpAssemblyFixture.h diff --git a/SConstruct b/SConstruct index 355449edc7..dffe4975b3 100644 --- a/SConstruct +++ b/SConstruct @@ -119,7 +119,7 @@ if env['arch'] == 'armv7a': env.Append(CXXFLAGS = ['-mfloat-abi=softfp']) elif env['arch'] == 'arm64-v8a': env.Append(CXXFLAGS = ['-march=armv8-a']) - + env.Append(CPPDEFINES = ['ARM_COMPUTE_AARCH64_V8A']) if env['os'] == 'linux': prefix = "aarch64-linux-gnu-" elif env['os'] == 'bare_metal': diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h index d78cec2a62..80fdaec809 100644 --- a/arm_compute/core/NEON/NEKernels.h +++ b/arm_compute/core/NEON/NEKernels.h @@ -109,6 +109,7 @@ #include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h" #include "arm_compute/core/NEON/kernels/arm32/NEGEMMAArch32Kernel.h" #include "arm_compute/core/NEON/kernels/arm64/NEGEMMAArch64Kernel.h" +#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h" #include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h" #endif /* __ARM_COMPUTE_NEKERNELS_H__ */ diff --git a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h new file mode 100644 index 0000000000..e8e71cf3d7 --- /dev/null +++ b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_NEGEMMLOWPAARCH64KERNEL_H__ +#define __ARM_COMPUTE_NEGEMMLOWPAARCH64KERNEL_H__ + +#include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h" + +// Enable only if compiled for AArch64-V8A targets +#ifdef ARM_COMPUTE_AARCH64_V8A + +namespace arm_compute +{ +class ITensor; + +/** AArch64 NEON kernel to multiply two input matrices "A" and "B". */ +class NEGEMMLowpAArch64Kernel : public NEGEMMAssemblyBaseKernel +{ +public: + // Inherited methods overridden: + void run(const Window &window, const ThreadInfo &info) override; + +protected: + void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1) override; +}; +} // namespace arm_compute +#endif /* ARM_COMPUTE_AARCH64_V8A */ +#endif /*__ARM_COMPUTE_NEGEMMLOWPAARCH64KERNEL_H__*/ diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp new file mode 100644 index 0000000000..1588f049f4 --- /dev/null +++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef __aarch64__ + +// Load the actual kernel +#include "a64_gemm_s8_4x4/generic.hpp" + +class gemm_s8_4x4 { +public: + typedef int8_t operand_type; + typedef int32_t result_type; + + typedef void (*kern_type)(const int8_t *, const int8_t *, int32_t *, int, int, int); + + /* Describes the data layout for A input */ + static const int A_interleave = 4; + static const int A_block = 16; + static const bool A_transpose = false; + + /* Same for B input */ + static const int B_interleave = 4; + static const int B_block = 16; + static const bool B_transpose = true; + + /* Kernel blocking parameters */ + static const int out_width = 4; + static const int out_height = 4; + static const int k_unroll = 16; + + kern_type kernel = nullptr; + + gemm_s8_4x4(const CPUInfo *ci) { + kernel = a64_gemm_s8_4x4; + } +}; + +#endif // __aarch64__ + diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4/generic.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4/generic.hpp new file mode 100644 index 0000000000..0ec435b33b --- /dev/null +++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4/generic.hpp @@ -0,0 +1,465 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef __aarch64__ + +#include + +inline void a64_gemm_s8_4x4(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpanel, int ablocks, int bblocks, int K) { + const int8_t *a_ptr = Apanel; + int32_t *c_ptr = Cpanel; + K /= 16; + int oddk = (K & 1); + + for (int yb=0; yb + +namespace arm_compute +{ +class ITensor; + +/** Basic function to execute matrix multiply assembly kernels. + * +*/ +class NEGEMMLowpAssemblyMatrixMultiplyCore : public IFunction +{ +public: + /** Constructor */ + NEGEMMLowpAssemblyMatrixMultiplyCore(std::shared_ptr memory_manager = nullptr); + /** Initialise the kernel's inputs, output + * + * @param[in] a First input tensor (Matrix A). Data type supported: U8, S8. + * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a + * @param[out] output Output tensor. Data type supported: Data type supported: S32 + */ + void configure(const ITensor *a, const ITensor *b, ITensor *output); + + // Inherited methods overridden: + void run() override; + +private: + MemoryGroup _memory_group; + std::unique_ptr _mm_kernel; + std::unique_ptr _mtx_a_reshape_kernel; + std::unique_ptr _mtx_b_reshape_kernel; + Tensor _tmp_a; + Tensor _tmp_b; + Tensor _workspace; +}; +} +#endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYMATRIXMULTIPLYCORE_H__ */ diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp index 1352f34e3c..5f052f797d 100644 --- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp @@ -52,7 +52,7 @@ NEGEMMLowpMatrixMultiplyKernel::NEGEMMLowpMatrixMultiplyKernel() void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8, DataType::S8); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); @@ -90,41 +90,8 @@ void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITen INEKernel::configure(win); } -void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo &info) +void inline matrix_multiply_u8(Iterator &ina, Iterator &inb, Iterator &out, int width_b, size_t out_stride, const Window &window) { - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); - - const size_t in_b_stride = _input1->info()->strides_in_bytes()[1]; - const size_t out_stride = _output->info()->strides_in_bytes()[1] / _output->info()->element_size(); - - // Set step_x and step_y for matrix A. Scale by a factor of 4 the Y range as the input interleaved matrix A has 4 times less the rows of the output matrix - Window win_a(window); - win_a.set(Window::DimX, Window::Dimension(0, 0, 0)); - win_a.set(Window::DimY, Window::Dimension(window.y().start() / 4, window.y().end() / 4, 1)); - - // Set step_x and step_y for matrix B. Scale by a factor of 16 the X range as the input transposed matrix A has 16 times less the columns of the output matrix - Window win_b; - // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2 - // This scenario can happen when the the matrix multiplication is used to perform a convolution operation - if(_slide_matrix_b) - { - win_b = window; - } - win_b.set(Window::DimX, Window::Dimension(window.x().start() / 16, window.x().end() / 16, in_b_stride)); - win_b.set(Window::DimY, Window::Dimension(0, 0, 0)); - - // The step x and step y for the output matrix has been already set using in configure() - Iterator ina(_input0, win_a); - Iterator inb(_input1, win_b); - Iterator out(_output, window); - - const int width_b = _input1->info()->dimension(0); - - // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW - // The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration - // All the values needed for computing a single 4x4 block will be read from consecutive memory positions execute_window_loop(window, [&](const Coordinates & id) { const uint8_t *mtx_a0 = ina.ptr(); @@ -239,3 +206,175 @@ void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo }, ina, inb, out); } + +void inline matrix_multiply_s8(Iterator &ina, Iterator &inb, Iterator &out, int width_b, size_t out_stride, const Window &window) +{ + // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW + // The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration + // All the values needed for computing a single 4x4 block will be read from consecutive memory positions + execute_window_loop(window, [&](const Coordinates & id) + { + auto *mtx_a0 = reinterpret_cast(ina.ptr()); + auto *mtx_b0 = reinterpret_cast(inb.ptr()); + + // Note: Since the input are all positives, we can use uint32_t + // Accumulators for the block 0 + int32x4x4_t c0 = + { + { + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0) + } + }; + + // Accumulators for the block 1 + int32x4x4_t c1 = + { + { + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0) + } + }; + + // Accumulators for the block 2 + int32x4x4_t c2 = + { + { + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0) + } + }; + + // Accumulators for the block 3 + int32x4x4_t c3 = + { + { + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0), + vdupq_n_s32(0) + } + }; + + for(int k = 0; k < width_b; k += 16, mtx_a0 += 4, mtx_b0 += 16) + { + const int8x8_t a00_s8 = vld1_s8(mtx_a0); + const int8x16_t b00_s8 = vld1q_s8(mtx_b0); + + // Convert a00_s8 to uint16_t and get the lower part + const int16x4_t a00_s16 = vget_low_s16(vmovl_s8(a00_s8)); + + // Convert b00_s8 to int16_t + const int16x4x4_t b00_s16 = + { + { + vget_low_s16(vmovl_s8(vget_low_s8(b00_s8))), + vget_high_s16(vmovl_s8(vget_low_s8(b00_s8))), + vget_low_s16(vmovl_s8(vget_high_s8(b00_s8))), + vget_high_s16(vmovl_s8(vget_high_s8(b00_s8))) + } + }; + + // 4x4 block 0 + c0.val[0] = vmlal_lane_s16(c0.val[0], b00_s16.val[0], a00_s16, 0); + c0.val[1] = vmlal_lane_s16(c0.val[1], b00_s16.val[1], a00_s16, 0); + c0.val[2] = vmlal_lane_s16(c0.val[2], b00_s16.val[2], a00_s16, 0); + c0.val[3] = vmlal_lane_s16(c0.val[3], b00_s16.val[3], a00_s16, 0); + + // 4x4 block 1 + c1.val[0] = vmlal_lane_s16(c1.val[0], b00_s16.val[0], a00_s16, 1); + c1.val[1] = vmlal_lane_s16(c1.val[1], b00_s16.val[1], a00_s16, 1); + c1.val[2] = vmlal_lane_s16(c1.val[2], b00_s16.val[2], a00_s16, 1); + c1.val[3] = vmlal_lane_s16(c1.val[3], b00_s16.val[3], a00_s16, 1); + + // 4x4 block 2 + c2.val[0] = vmlal_lane_s16(c2.val[0], b00_s16.val[0], a00_s16, 2); + c2.val[1] = vmlal_lane_s16(c2.val[1], b00_s16.val[1], a00_s16, 2); + c2.val[2] = vmlal_lane_s16(c2.val[2], b00_s16.val[2], a00_s16, 2); + c2.val[3] = vmlal_lane_s16(c2.val[3], b00_s16.val[3], a00_s16, 2); + + // 4x4 block 3 + c3.val[0] = vmlal_lane_s16(c3.val[0], b00_s16.val[0], a00_s16, 3); + c3.val[1] = vmlal_lane_s16(c3.val[1], b00_s16.val[1], a00_s16, 3); + c3.val[2] = vmlal_lane_s16(c3.val[2], b00_s16.val[2], a00_s16, 3); + c3.val[3] = vmlal_lane_s16(c3.val[3], b00_s16.val[3], a00_s16, 3); + } + + auto mtx_out = reinterpret_cast(out.ptr()); + vst1q_s32(mtx_out + 0 * out_stride + 0, c0.val[0]); + vst1q_s32(mtx_out + 0 * out_stride + 4, c0.val[1]); + vst1q_s32(mtx_out + 0 * out_stride + 8, c0.val[2]); + vst1q_s32(mtx_out + 0 * out_stride + 12, c0.val[3]); + vst1q_s32(mtx_out + 1 * out_stride + 0, c1.val[0]); + vst1q_s32(mtx_out + 1 * out_stride + 4, c1.val[1]); + vst1q_s32(mtx_out + 1 * out_stride + 8, c1.val[2]); + vst1q_s32(mtx_out + 1 * out_stride + 12, c1.val[3]); + vst1q_s32(mtx_out + 2 * out_stride + 0, c2.val[0]); + vst1q_s32(mtx_out + 2 * out_stride + 4, c2.val[1]); + vst1q_s32(mtx_out + 2 * out_stride + 8, c2.val[2]); + vst1q_s32(mtx_out + 2 * out_stride + 12, c2.val[3]); + vst1q_s32(mtx_out + 3 * out_stride + 0, c3.val[0]); + vst1q_s32(mtx_out + 3 * out_stride + 4, c3.val[1]); + vst1q_s32(mtx_out + 3 * out_stride + 8, c3.val[2]); + vst1q_s32(mtx_out + 3 * out_stride + 12, c3.val[3]); + }, + ina, inb, out); +} + +void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + const size_t in_b_stride = _input1->info()->strides_in_bytes()[1]; + const size_t out_stride = _output->info()->strides_in_bytes()[1] / _output->info()->element_size(); + + // Set step_x and step_y for matrix A. Scale by a factor of 4 the Y range as the input interleaved matrix A has 4 times less the rows of the output matrix + Window win_a(window); + win_a.set(Window::DimX, Window::Dimension(0, 0, 0)); + win_a.set(Window::DimY, Window::Dimension(window.y().start() / 4, window.y().end() / 4, 1)); + + // Set step_x and step_y for matrix B. Scale by a factor of 16 the X range as the input transposed matrix A has 16 times less the columns of the output matrix + Window win_b; + // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2 + // This scenario can happen when the the matrix multiplication is used to perform a convolution operation + if(_slide_matrix_b) + { + win_b = window; + } + win_b.set(Window::DimX, Window::Dimension(window.x().start() / 16, window.x().end() / 16, in_b_stride)); + win_b.set(Window::DimY, Window::Dimension(0, 0, 0)); + + // The step x and step y for the output matrix has been already set using in configure() + Iterator ina(_input0, win_a); + Iterator inb(_input1, win_b); + Iterator out(_output, window); + + const int width_b = _input1->info()->dimension(0); + switch(_input0->info()->data_type()) + { + case DataType::S8: + { + matrix_multiply_s8(ina, inb, out, width_b, out_stride, window); + break; + } + case DataType::U8: + case DataType::QASYMM8: + { + matrix_multiply_u8(ina, inb, out, width_b, out_stride, window); + break; + } + default: + { + ARM_COMPUTE_ERROR("Not supported"); + break; + } + } +} diff --git a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp new file mode 100644 index 0000000000..b75a8ab251 --- /dev/null +++ b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h" + +#include "arm_compute/core/AccessWindowStatic.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/IAccessWindow.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp" +#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp" +} // namespace arm_compute + +#include +#include +#include + +// Enable only if compiled for AArch64-V8A targets +#ifdef ARM_COMPUTE_AARCH64_V8A + +namespace arm_compute +{ +void NEGEMMLowpAArch64Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1) +{ + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); + + _input0 = input0; + _input1 = input1; + _output = output; + _workspace = workspace; + _alpha = alpha; + _beta = beta; + _transform_0 = transform_0; + _transform_1 = transform_1; + + // Configure kernel window + Window win = calculate_max_window(*output->info()); + + AccessWindowRectangle output_access(output->info(), 0, 0, 4, 4); + + const int input0_access_end = ceil_to_multiple(input0->info()->tensor_shape().x(), 4); + const int input1_access_end = ceil_to_multiple(input1->info()->tensor_shape().x(), 4); + + update_window_and_padding(win, + AccessWindowStatic(input0->info(), 0, 0, input0_access_end, input0->info()->tensor_shape().y()), + AccessWindowStatic(input1->info(), 0, 0, input1_access_end, input1->info()->tensor_shape().y()), + output_access); + + INEKernel::configure(win); +} + +void NEGEMMLowpAArch64Kernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + const int lda = _input0->info()->strides_in_bytes().y(); + const int ldb = _input1->info()->strides_in_bytes().y(); + const int ldc = _output->info()->strides_in_bytes().y() / sizeof(int32_t); + + const auto in1_ptr = reinterpret_cast(_input1->buffer()); + + const int M = std::min(_output->info()->tensor_shape().y(), static_cast(window.y().end())) - window.y().start(); + const int N = _output->info()->tensor_shape().x(); + const int K = _input0->info()->tensor_shape().x(); + + // Only iterate over batches + Window win(window); + win.set(0, Window::Dimension(0, 1, 1)); + win.set(1, Window::Dimension(0, 1, 1)); + + Iterator in0(_input0, window); + Iterator out(_output, window); + + GemmInterleaved gemm(&info.cpu_info, M, N, K, !_transform_1, !_transform_1); + + constexpr size_t alignment = 4096; + const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id; + void *workspace = _workspace->buffer() + offset; + size_t workspace_size = _workspace->info()->total_size(); + + if(support::cpp11::align(alignment, gemm.get_working_size(), workspace, workspace_size) == nullptr) + { + ARM_COMPUTE_ERROR("Not enough space to align buffer!"); + } + + execute_window_loop(win, [&](const Coordinates & id) + { + gemm.execute(reinterpret_cast(in0.ptr()), lda, + reinterpret_cast(in1_ptr), ldb, + reinterpret_cast(out.ptr()), ldc, + _alpha, _beta, workspace); + }, + in0, out); +} +} // namespace arm_compute +#endif /* ARM_COMPUTE_AARCH64_V8A */ diff --git a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp new file mode 100644 index 0000000000..708daeb265 --- /dev/null +++ b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp @@ -0,0 +1,168 @@ +/* Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h" +#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" +#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h" +#include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" +#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h" +#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/runtime/NEON/NEScheduler.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp" +#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp" +#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp" + +} // namespace arm_compute + +using namespace arm_compute; + +NEGEMMLowpAssemblyMatrixMultiplyCore::NEGEMMLowpAssemblyMatrixMultiplyCore(std::shared_ptr memory_manager) + : _memory_group(std::move(memory_manager)), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _tmp_a(), _tmp_b(), _workspace() +{ +} + +void NEGEMMLowpAssemblyMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output) +{ + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::S8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); + ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); + ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A"); + ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The output matrix must have the same number of columns as the matrix B"); + +#ifdef __aarch64__ + const int M = output->info()->tensor_shape().y(); + const int N = output->info()->tensor_shape().x(); + const int K = a->info()->tensor_shape().x(); + constexpr size_t workspace_alignment = 4096; + const struct CPUInfo ci = NEScheduler::get().cpu_info(); +#endif /* __aarch64__ */ + +#ifdef ARM_COMPUTE_AARCH64_V8_2 + if(ci.CPU == CPUTarget::A75_DOT) + { + // Configure matrix multiply kernel + GemmInterleaved gemm(&ci, M, N, K, false, false); + _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); + _memory_group.manage(&_workspace); + + // Configure matrix multiplication kernel + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(a, b, output, &_workspace, 1.f, 1.f); + _mm_kernel = std::move(k); + _workspace.allocator()->allocate(); + } + else if(ci.CPU == CPUTarget::A55_DOT) + { + ARM_COMPUTE_ERROR_ON("WIP"); + } + else +#elif defined(ARM_COMPUTE_AARCH64_V8A) + if(1) + { + // Configure matrix multiply kernel + GemmInterleaved gemm(&ci, M, N, K, false, false); + _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); + _memory_group.manage(&_workspace); + // Configure matrix multiplication kernel + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(a, b, output, &_workspace, 1.f, 1.f); + _mm_kernel = std::move(k); + _workspace.allocator()->allocate(); + } + else +#endif /* ARM_COMPUTE_AARCH64_V8_2 */ + { + // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] + TensorShape shape_tmp_a = a->info()->tensor_shape(); + shape_tmp_a.set(0, a->info()->dimension(0) * 4); + shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.f)); + + // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ] + TensorShape shape_tmp_b = b->info()->tensor_shape(); + shape_tmp_b.set(0, b->info()->dimension(1) * 16); + shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / 16.f)); + + TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type()); + TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type()); + _tmp_a.allocator()->init(info_a); + _tmp_b.allocator()->init(info_b); + _memory_group.manage(&_tmp_a); + _memory_group.manage(&_tmp_b); + + // Configure interleave kernel + { + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(a, &_tmp_a); + _mtx_a_reshape_kernel = std::move(k); + } + + // Configure transpose kernel + { + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(b, &_tmp_b); + _mtx_b_reshape_kernel = std::move(k); + } + + // Configure matrix multiply kernel + { + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(&_tmp_a, &_tmp_b, output); + _mm_kernel = std::move(k); + } + + // Allocate tensors + _tmp_a.allocator()->allocate(); + _tmp_b.allocator()->allocate(); + } +} + +void NEGEMMLowpAssemblyMatrixMultiplyCore::run() +{ + _memory_group.acquire(); + if(_mtx_a_reshape_kernel) + { + NEScheduler::get().schedule(_mtx_a_reshape_kernel.get(), Window::DimY); + } + + if(_mtx_b_reshape_kernel) + { + NEScheduler::get().schedule(_mtx_b_reshape_kernel.get(), Window::DimY); + } + + NEScheduler::get().schedule(_mm_kernel.get(), Window::DimY); + + _memory_group.release(); +} diff --git a/tests/validation/CPP/GEMMLowp.cpp b/tests/validation/CPP/GEMMLowp.cpp index bac3a20c8e..8670a22a66 100644 --- a/tests/validation/CPP/GEMMLowp.cpp +++ b/tests/validation/CPP/GEMMLowp.cpp @@ -73,6 +73,12 @@ SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, co return c; } +// used to validate assembly kernels which don't know anything about offsets +SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b) +{ + return gemmlowp_matrix_multiply_core(a, b, 0, 0); +} + template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift) { diff --git a/tests/validation/CPP/GEMMLowp.h b/tests/validation/CPP/GEMMLowp.h index c09d8f6176..cbed2206e3 100644 --- a/tests/validation/CPP/GEMMLowp.h +++ b/tests/validation/CPP/GEMMLowp.h @@ -40,6 +40,9 @@ SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, co template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift); + +SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b); + } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index 4407eff060..ba91ced443 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -22,7 +22,9 @@ * SOFTWARE. */ #include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h" +#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h" #include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h" #include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h" #include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h" #include "arm_compute/runtime/Tensor.h" @@ -38,6 +40,7 @@ #include "tests/framework/datasets/Datasets.h" #include "tests/validation/Validation.h" #include "tests/validation/fixtures/GEMMInterleaveBlockedFixture.h" +#include "tests/validation/fixtures/GEMMLowpAssemblyFixture.h" #include "tests/validation/fixtures/GEMMLowpFixture.h" namespace arm_compute @@ -48,11 +51,21 @@ namespace validation { namespace { -const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9); -const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4); +const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9); +const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4); +const auto data_matrix_multiply = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 16); } // namespace TEST_SUITE(NEON) +TEST_SUITE(ASSEMBLY_MATRIX_MULTIPLY) +using NEGEMMAssemblyFixture = GEMMLowpAssemblyFixture; +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture, framework::DatasetMode::PRECOMMIT, data_matrix_multiply) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() + TEST_SUITE(GEMMLowp) TEST_SUITE(INTERLEAVE_BLOCKED) diff --git a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h new file mode 100644 index 0000000000..a2587440fb --- /dev/null +++ b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE +#define ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/CPP/GEMMLowp.h" +#include "tests/validation/Helpers.h" + +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template +class GEMMLowpAssemblyFixture : public framework::Fixture +{ +public: + template + void setup(size_t m, size_t n, size_t k) + { + const TensorShape shape_a(k, m); + const TensorShape shape_b(n, k); + const TensorShape shape_c(n, m); + _target = compute_target(shape_a, shape_b, shape_c); + _reference = compute_reference(shape_a, shape_b, shape_c); + } + +protected: + template + void fill(U &&tensor, int i, int lo, int hi) + { + std::uniform_int_distribution<> distribution(lo, hi); + library->fill(tensor, distribution, i); + } + + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) + { + // Create tensors + TensorType a = create_tensor(shape_a, DataType::S8, 1); + TensorType b = create_tensor(shape_b, DataType::S8, 1); + TensorType c = create_tensor(shape_c, DataType::S32, 1); + + // Create and configure function + FunctionType gemmlowp; + gemmlowp.configure(&a, &b, &c); + + ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + a.allocator()->allocate(); + b.allocator()->allocate(); + c.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(a), 0, -128, 127); + fill(AccessorType(b), 1, -128, 127); + fill(AccessorType(c), 2, 0, 0); + + // Compute GEMM function + gemmlowp.run(); + return c; + } + + SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) + { + // Create reference + SimpleTensor a{ shape_a, DataType::S8, 1 }; + SimpleTensor b{ shape_b, DataType::S8, 1 }; + + // Fill reference + fill(a, 0, -128, 127); + fill(b, 1, -128, 127); + + return reference::gemmlowp(a, b); + } + + TensorType _target{}; + SimpleTensor _reference{}; +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */ -- cgit v1.2.1