From ebf6b8a00b77ea796d877bc1d0e6850c055318a6 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Mon, 24 Sep 2018 16:31:08 +0100 Subject: COMPMID-1518: Add support for GEMM3D in CLGEMMLowpMatrixMultiplyCore Change-Id: Ib14ac821ee5d4aff80bd602cd3e76e7018abb5e6 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/150268 Tested-by: bsgcomp Reviewed-by: Isabella Gottardi Reviewed-by: Michele DiGiorgio --- tests/validation/reference/GEMMLowp.cpp | 71 +++++++++++++++++++-------------- tests/validation/reference/GEMMLowp.h | 6 +-- 2 files changed, 44 insertions(+), 33 deletions(-) (limited to 'tests/validation/reference') diff --git a/tests/validation/reference/GEMMLowp.cpp b/tests/validation/reference/GEMMLowp.cpp index 8e41aef46a..9a7e409e8a 100644 --- a/tests/validation/reference/GEMMLowp.cpp +++ b/tests/validation/reference/GEMMLowp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -98,41 +98,52 @@ void quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor *in, } // namespace template -SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset) +SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset) { static_assert(std::is_same::type, int32_t>::value, "Only int32_t is allowed for the output"); - TensorShape shape(b.shape()[0], a.shape()[1]); DataType dt = std::is_same::value ? DataType::S32 : DataType::U32; - SimpleTensor c(shape, dt); + SimpleTensor c(shape_c, dt); - const int K = a.shape().x(); - const int b_width = b.shape().x(); - const int rows = c.shape().y(); //M - const int cols = c.shape().x(); //N + const int K = a.shape().x(); + const int M = a.shape().y(); + const int N = b.shape().x(); + const int D = a.shape().z(); // Number of matrices in a batch + + const int a_stride_z = K * M; + // Do not slide the matrix B along the 3rd dimension in case matrix B has less than 3 dimensions + const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; + const int c_stride_z = N * M; std::vector acc; - acc.resize(cols); + acc.resize(N); - for(int i = 0; i < rows; ++i) + for(int depth = 0; depth < D; ++depth) { - for(int j = 0; j < cols; ++j) - { - acc[j] = 0; - } - for(int k = 0; k < K; ++k) + const int base_addr_a = depth * a_stride_z; + const int base_addr_b = depth * b_stride_z; + const int base_addr_c = depth * c_stride_z; + + for(int i = 0; i < M; ++i) { - const T_out tmp_a = a_offset + static_cast(a[k + i * K]); - for(int j = 0; j < b_width; ++j) + for(int j = 0; j < N; ++j) { - const T_out tmp_b = b_offset + static_cast(b[j + k * b_width]); - const T_out mult_as_int = tmp_a * tmp_b; - acc[j] += mult_as_int; + acc[j] = 0; + } + for(int k = 0; k < K; ++k) + { + const T_out tmp_a = a_offset + static_cast(a[base_addr_a + k + i * K]); + for(int j = 0; j < N; ++j) + { + const T_out tmp_b = b_offset + static_cast(b[base_addr_b + j + k * N]); + const T_out mult_as_int = tmp_a * tmp_b; + acc[j] += mult_as_int; + } + } + for(int j = 0; j < N; ++j) + { + c[base_addr_c + j + i * N] = acc[j]; } - } - for(int j = 0; j < cols; ++j) - { - c[j + i * cols] = acc[j]; } } @@ -141,9 +152,9 @@ SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, c // used to validate assembly kernels which don't know anything about offsets template -SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b) +SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c) { - return gemmlowp_matrix_multiply_core(a, b, 0, 0); + return gemmlowp_matrix_multiply_core(a, b, shape_c, 0, 0); } template @@ -198,10 +209,10 @@ template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const int32_t max); template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &a, const SimpleTensor &b, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max); -template SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset); -template SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset); -template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b); -template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b); +template SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); +template SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); +template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c); +template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/GEMMLowp.h b/tests/validation/reference/GEMMLowp.h index a3d0bebe3f..4396155b96 100644 --- a/tests/validation/reference/GEMMLowp.h +++ b/tests/validation/reference/GEMMLowp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -38,13 +38,13 @@ namespace reference template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min = 0, int32_t max = 0); template -SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, int32_t a_offset, int32_t b_offset); +SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift); template -SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b); +SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c); template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, const SimpleTensor &bias, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, -- cgit v1.2.1