From f3dfa279d536906dac3e618244b2c1d33e5ff28a Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Tue, 21 Nov 2017 17:52:12 +0000 Subject: COMPMID-632 Assembly: Integrate gemmlowp assembly version Integrate generic gemmlowp assembly version for u8. Change-Id: I17ed4494c25a132b2bac581febe1544e49b4f352 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110114 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com Reviewed-by: Pablo Tello --- .../NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h | 8 + .../kernels/assembly/kernels/a64_gemm_u8_4x4.hpp | 61 +++ .../assembly/kernels/a64_gemm_u8_4x4/generic.hpp | 465 +++++++++++++++++++++ .../kernels/NEGEMMLowpMatrixMultiplyKernel.cpp | 2 +- .../NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp | 149 +++++-- .../NEGEMMLowpAssemblyMatrixMultiplyCore.cpp | 27 +- tests/validation/CPP/GEMMLowp.cpp | 28 +- tests/validation/CPP/GEMMLowp.h | 11 +- tests/validation/NEON/GEMMLowp.cpp | 17 +- .../validation/fixtures/GEMMLowpAssemblyFixture.h | 42 +- tests/validation/fixtures/GEMMLowpFixture.h | 2 +- 11 files changed, 737 insertions(+), 75 deletions(-) create mode 100644 arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp create mode 100644 arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4/generic.hpp diff --git a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h index e8e71cf3d7..a93df033de 100644 --- a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h +++ b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h @@ -37,11 +37,19 @@ class ITensor; class NEGEMMLowpAArch64Kernel : public NEGEMMAssemblyBaseKernel { public: + /** Default constructor */ + NEGEMMLowpAArch64Kernel(); + // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; protected: void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1) override; + +private: + using NEGEMMLowpAArch64 = void(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1, const Window &window, + const ThreadInfo &info); + NEGEMMLowpAArch64 *_func; }; } // namespace arm_compute #endif /* ARM_COMPUTE_AARCH64_V8A */ diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp new file mode 100644 index 0000000000..3561bfec96 --- /dev/null +++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef __aarch64__ + +// Load the actual kernel +#include "a64_gemm_u8_4x4/generic.hpp" + +class gemm_u8_4x4 { +public: + typedef uint8_t operand_type; + typedef uint32_t result_type; + + typedef void (*kern_type)(const uint8_t *, const uint8_t *, uint32_t *, int, int, int); + + /* Describes the data layout for A input */ + static const int A_interleave = 4; + static const int A_block = 16; + static const bool A_transpose = false; + + /* Same for B input */ + static const int B_interleave = 4; + static const int B_block = 16; + static const bool B_transpose = true; + + /* Kernel blocking parameters */ + static const int out_width = 4; + static const int out_height = 4; + static const int k_unroll = 16; + + kern_type kernel = nullptr; + + gemm_u8_4x4(const CPUInfo *ci) { + kernel = a64_gemm_u8_4x4; + } +}; + +#endif // __aarch64__ + diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4/generic.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4/generic.hpp new file mode 100644 index 0000000000..e48c373f21 --- /dev/null +++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4/generic.hpp @@ -0,0 +1,465 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef __aarch64__ + +#include + +inline void a64_gemm_u8_4x4(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cpanel, int ablocks, int bblocks, int K) { + const uint8_t *a_ptr = Apanel; + uint32_t *c_ptr = Cpanel; + K /= 16; + int oddk = (K & 1); + + for (int yb=0; yb @@ -50,10 +51,101 @@ namespace arm_compute namespace arm_compute { +NEGEMMLowpAArch64Kernel::NEGEMMLowpAArch64Kernel() + : _func(nullptr) +{ +} + +void gemm_interleaved_s8(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1, const Window &window, + const ThreadInfo &info) +{ + const int lda = input0->info()->strides_in_bytes().y(); + const int ldb = input1->info()->strides_in_bytes().y(); + const int ldc = output->info()->strides_in_bytes().y() / sizeof(int32_t); + + const auto in1_ptr = reinterpret_cast(input1->buffer()); + + const int M = std::min(output->info()->tensor_shape().y(), static_cast(window.y().end())) - window.y().start(); + const int N = output->info()->tensor_shape().x(); + const int K = input0->info()->tensor_shape().x(); + + // Only iterate over batches + Window win(window); + win.set(0, Window::Dimension(0, 1, 1)); + win.set(1, Window::Dimension(0, 1, 1)); + + Iterator in0(input0, window); + Iterator out(output, window); + + GemmInterleaved gemm(&info.cpu_info, M, N, K, !transform_1, !transform_1); + + constexpr size_t alignment = 4096; + const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id; + void *_workspace = workspace->buffer() + offset; + size_t workspace_size = workspace->info()->total_size(); + + if(support::cpp11::align(alignment, gemm.get_working_size(), _workspace, workspace_size) == nullptr) + { + ARM_COMPUTE_ERROR("Not enough space to align buffer!"); + } + + execute_window_loop(win, [&](const Coordinates & id) + { + gemm.execute(reinterpret_cast(in0.ptr()), lda, + reinterpret_cast(in1_ptr), ldb, + reinterpret_cast(out.ptr()), ldc, + alpha, beta, _workspace); + }, + in0, out); +} + +void gemm_interleaved_u8(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1, const Window &window, + const ThreadInfo &info) +{ + const int lda = input0->info()->strides_in_bytes().y(); + const int ldb = input1->info()->strides_in_bytes().y(); + const int ldc = output->info()->strides_in_bytes().y() / sizeof(uint32_t); + + const auto in1_ptr = reinterpret_cast(input1->buffer()); + + const int M = std::min(output->info()->tensor_shape().y(), static_cast(window.y().end())) - window.y().start(); + const int N = output->info()->tensor_shape().x(); + const int K = input0->info()->tensor_shape().x(); + + // Only iterate over batches + Window win(window); + win.set(0, Window::Dimension(0, 1, 1)); + win.set(1, Window::Dimension(0, 1, 1)); + + Iterator in0(input0, window); + Iterator out(output, window); + + GemmInterleaved gemm(&info.cpu_info, M, N, K, !transform_1, !transform_1); + + constexpr size_t alignment = 4096; + const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id; + void *_workspace = workspace->buffer() + offset; + size_t workspace_size = workspace->info()->total_size(); + + if(support::cpp11::align(alignment, gemm.get_working_size(), _workspace, workspace_size) == nullptr) + { + ARM_COMPUTE_ERROR("Not enough space to align buffer!"); + } + + execute_window_loop(win, [&](const Coordinates & id) + { + gemm.execute(reinterpret_cast(in0.ptr()), lda, + reinterpret_cast(in1_ptr), ldb, + reinterpret_cast(out.ptr()), ldc, + alpha, beta, _workspace); + }, + in0, out); +} + void NEGEMMLowpAArch64Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8, DataType::U8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32, DataType::U32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1); _input0 = input0; @@ -65,6 +157,19 @@ void NEGEMMLowpAArch64Kernel::internal_configure(const ITensor *input0, const IT _transform_0 = transform_0; _transform_1 = transform_1; + switch(input0->info()->data_type()) + { + case DataType::S8: + _func = &gemm_interleaved_s8; + break; + case DataType::U8: + _func = &gemm_interleaved_u8; + break; + default: + ARM_COMPUTE_ERROR("Element size not supported"); + break; + } + // Configure kernel window Window win = calculate_max_window(*output->info()); @@ -85,45 +190,9 @@ void NEGEMMLowpAArch64Kernel::run(const Window &window, const ThreadInfo &info) { ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + ARM_COMPUTE_ERROR_ON(_func == nullptr); - const int lda = _input0->info()->strides_in_bytes().y(); - const int ldb = _input1->info()->strides_in_bytes().y(); - const int ldc = _output->info()->strides_in_bytes().y() / sizeof(int32_t); - - const auto in1_ptr = reinterpret_cast(_input1->buffer()); - - const int M = std::min(_output->info()->tensor_shape().y(), static_cast(window.y().end())) - window.y().start(); - const int N = _output->info()->tensor_shape().x(); - const int K = _input0->info()->tensor_shape().x(); - - // Only iterate over batches - Window win(window); - win.set(0, Window::Dimension(0, 1, 1)); - win.set(1, Window::Dimension(0, 1, 1)); - - Iterator in0(_input0, window); - Iterator out(_output, window); - - GemmInterleaved gemm(&info.cpu_info, M, N, K, !_transform_1, !_transform_1); - - constexpr size_t alignment = 4096; - const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id; - void *workspace = _workspace->buffer() + offset; - size_t workspace_size = _workspace->info()->total_size(); - - if(support::cpp11::align(alignment, gemm.get_working_size(), workspace, workspace_size) == nullptr) - { - ARM_COMPUTE_ERROR("Not enough space to align buffer!"); - } - - execute_window_loop(win, [&](const Coordinates & id) - { - gemm.execute(reinterpret_cast(in0.ptr()), lda, - reinterpret_cast(in1_ptr), ldb, - reinterpret_cast(out.ptr()), ldc, - _alpha, _beta, workspace); - }, - in0, out); + (*_func)(_input0, _input1, _output, _workspace, _alpha, _beta, _transform_0, _transform_1, window, info); } } // namespace arm_compute #endif /* ARM_COMPUTE_AARCH64_V8A */ diff --git a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp index 708daeb265..1bf437eb5f 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp @@ -43,6 +43,7 @@ namespace arm_compute #include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp" #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp" #include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp" +#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp" } // namespace arm_compute @@ -55,8 +56,8 @@ NEGEMMLowpAssemblyMatrixMultiplyCore::NEGEMMLowpAssemblyMatrixMultiplyCore(std:: void NEGEMMLowpAssemblyMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::S8); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8, DataType::S8); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A"); @@ -92,9 +93,25 @@ void NEGEMMLowpAssemblyMatrixMultiplyCore::configure(const ITensor *a, const ITe #elif defined(ARM_COMPUTE_AARCH64_V8A) if(1) { - // Configure matrix multiply kernel - GemmInterleaved gemm(&ci, M, N, K, false, false); - _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); + switch(a->info()->data_type()) + { + case DataType::S8: + { + // Configure matrix multiply kernel + GemmInterleaved gemm(&ci, M, N, K, false, false); + _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); + } + break; + case DataType::U8: + { + // Configure matrix multiply kernel + GemmInterleaved gemm(&ci, M, N, K, false, false); + _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8)); + } + break; + default: + ARM_COMPUTE_ERROR("Datatype not supported"); + } _memory_group.manage(&_workspace); // Configure matrix multiplication kernel auto k = arm_compute::support::cpp14::make_unique(); diff --git a/tests/validation/CPP/GEMMLowp.cpp b/tests/validation/CPP/GEMMLowp.cpp index bf002cf2b5..35b8a6486e 100644 --- a/tests/validation/CPP/GEMMLowp.cpp +++ b/tests/validation/CPP/GEMMLowp.cpp @@ -63,19 +63,21 @@ void quantize_down_int32_to_uint8_scale(const SimpleTensor *in, const SimpleT } } // namespace -template -SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset) +template +SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset) { - TensorShape shape(b.shape()[0], a.shape()[1]); + static_assert(std::is_same::type, int32_t>::value, "Only int32_t is allowed for the output"); - SimpleTensor c(shape, DataType::S32); + TensorShape shape(b.shape()[0], a.shape()[1]); + DataType dt = std::is_same::value ? DataType::S32 : DataType::U32; + SimpleTensor c(shape, dt); const int K = a.shape().x(); const int b_width = b.shape().x(); const int rows = c.shape().y(); //M const int cols = c.shape().x(); //N - std::vector acc; + std::vector acc; acc.resize(cols); for(int i = 0; i < rows; ++i) @@ -86,11 +88,11 @@ SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, co } for(int k = 0; k < K; ++k) { - const int32_t tmp_a = a_offset + static_cast(a[k + i * K]); + const T_out tmp_a = a_offset + static_cast(a[k + i * K]); for(int j = 0; j < b_width; ++j) { - const int32_t tmp_b = b_offset + static_cast(b[j + k * b_width]); - const int32_t mult_as_int = tmp_a * tmp_b; + const T_out tmp_b = b_offset + static_cast(b[j + k * b_width]); + const T_out mult_as_int = tmp_a * tmp_b; acc[j] += mult_as_int; } } @@ -104,9 +106,10 @@ SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, co } // used to validate assembly kernels which don't know anything about offsets -SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b) +template +SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b) { - return gemmlowp_matrix_multiply_core(a, b, 0, 0); + return gemmlowp_matrix_multiply_core(a, b, 0, 0); } template @@ -130,11 +133,14 @@ SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTe return dst; } -template SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset); template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &a, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max); template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &a, const SimpleTensor &b, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max); +template SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset); +template SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset); +template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b); +template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/CPP/GEMMLowp.h b/tests/validation/CPP/GEMMLowp.h index ee33d8e0c0..6c72b56e7a 100644 --- a/tests/validation/CPP/GEMMLowp.h +++ b/tests/validation/CPP/GEMMLowp.h @@ -35,13 +35,16 @@ namespace validation { namespace reference { -SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b); - template -SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset); +SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min = 0, int32_t max = 0); +template +SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset); template -SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min = 0, int32_t max = 0); +SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift); + +template +SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b); template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, const SimpleTensor &bias, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index 1418578a51..6366223820 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -58,14 +58,27 @@ const auto data_matrix_multiply = framework::dataset::make("M", 12, 20) * framew TEST_SUITE(NEON) TEST_SUITE(ASSEMBLY_MATRIX_MULTIPLY) -using NEGEMMAssemblyFixture = GEMMLowpAssemblyFixture; -FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture, framework::DatasetMode::PRECOMMIT, data_matrix_multiply) + +using NEGEMMAssemblyFixture_S8 = GEMMLowpAssemblyFixture; +using NEGEMMAssemblyFixture_U8 = GEMMLowpAssemblyFixture; + +TEST_SUITE(S8) +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture_S8, framework::DatasetMode::PRECOMMIT, data_matrix_multiply) { // Validate output validate(Accessor(_target), _reference); } TEST_SUITE_END() +TEST_SUITE(U8) +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture_U8, framework::DatasetMode::PRECOMMIT, data_matrix_multiply) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() +TEST_SUITE_END() + TEST_SUITE(GEMMLowp) TEST_SUITE(INTERLEAVE_BLOCKED) diff --git a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h index a2587440fb..38e08f7992 100644 --- a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h +++ b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h @@ -42,7 +42,7 @@ namespace test { namespace validation { -template +template class GEMMLowpAssemblyFixture : public framework::Fixture { public: @@ -66,9 +66,11 @@ protected: TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) { + DataType dt_in = std::is_same::value ? DataType::S8 : DataType::U8; + // Create tensors - TensorType a = create_tensor(shape_a, DataType::S8, 1); - TensorType b = create_tensor(shape_b, DataType::S8, 1); + TensorType a = create_tensor(shape_a, dt_in, 1); + TensorType b = create_tensor(shape_b, dt_in, 1); TensorType c = create_tensor(shape_c, DataType::S32, 1); // Create and configure function @@ -89,8 +91,16 @@ protected: ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); // Fill tensors - fill(AccessorType(a), 0, -128, 127); - fill(AccessorType(b), 1, -128, 127); + if(dt_in == DataType::S8) + { + fill(AccessorType(a), 0, -128, 127); + fill(AccessorType(b), 1, -128, 127); + } + else + { + fill(AccessorType(a), 0, 0, 128); + fill(AccessorType(b), 1, 0, 128); + } fill(AccessorType(c), 2, 0, 0); // Compute GEMM function @@ -100,15 +110,25 @@ protected: SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) { + DataType dt = std::is_same::value ? DataType::S8 : DataType::U8; + // Create reference - SimpleTensor a{ shape_a, DataType::S8, 1 }; - SimpleTensor b{ shape_b, DataType::S8, 1 }; + SimpleTensor a{ shape_a, dt, 1 }; + SimpleTensor b{ shape_b, dt, 1 }; // Fill reference - fill(a, 0, -128, 127); - fill(b, 1, -128, 127); - - return reference::gemmlowp(a, b); + if(dt == DataType::S8) + { + fill(a, 0, -128, 127); + fill(b, 1, -128, 127); + } + else + { + fill(a, 0, 0, 128); + fill(b, 1, 0, 128); + } + + return reference::gemmlowp(a, b); } TensorType _target{}; diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h index a99e9323c8..60b89bc653 100644 --- a/tests/validation/fixtures/GEMMLowpFixture.h +++ b/tests/validation/fixtures/GEMMLowpFixture.h @@ -110,7 +110,7 @@ protected: fill(a, 0); fill(b, 1); - return reference::gemmlowp_matrix_multiply_core(a, b, a_offset, b_offset); + return reference::gemmlowp_matrix_multiply_core(a, b, a_offset, b_offset); } TensorType _target{}; -- cgit v1.2.1