diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2018-09-24 16:31:08 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:55:19 +0000 |
commit | ebf6b8a00b77ea796d877bc1d0e6850c055318a6 (patch) | |
tree | a8c2bb26d951dd0d25c5e223358d6695ad5f0468 /tests | |
parent | 96e922e8ee4187906211ee0d1dd0f3e27667c170 (diff) | |
download | ComputeLibrary-ebf6b8a00b77ea796d877bc1d0e6850c055318a6.tar.gz |
COMPMID-1518: Add support for GEMM3D in CLGEMMLowpMatrixMultiplyCore
Change-Id: Ib14ac821ee5d4aff80bd602cd3e76e7018abb5e6
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/150268
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Isabella Gottardi <isabella.gottardi@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/datasets/LargeGEMMLowpDataset.h | 22 | ||||
-rw-r--r-- | tests/datasets/SmallGEMMLowpDataset.h | 28 | ||||
-rw-r--r-- | tests/validate_examples/cl_gemm.cpp | 2 | ||||
-rw-r--r-- | tests/validation/CL/GEMMLowp.cpp | 30 | ||||
-rw-r--r-- | tests/validation/fixtures/GEMMLowpAssemblyFixture.h | 2 | ||||
-rw-r--r-- | tests/validation/fixtures/GEMMLowpFixture.h | 24 | ||||
-rw-r--r-- | tests/validation/reference/GEMMLowp.cpp | 71 | ||||
-rw-r--r-- | tests/validation/reference/GEMMLowp.h | 6 |
8 files changed, 138 insertions, 47 deletions
diff --git a/tests/datasets/LargeGEMMLowpDataset.h b/tests/datasets/LargeGEMMLowpDataset.h index 5c0230e262..65cb742ead 100644 --- a/tests/datasets/LargeGEMMLowpDataset.h +++ b/tests/datasets/LargeGEMMLowpDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -52,6 +52,26 @@ public: add_config(TensorShape(941U, 1011U), TensorShape(623U, 941U), TensorShape(623U, 1011U), -9, 1); } }; +class LargeGEMMLowpOutput3DDataset final : public GEMMLowpDataset +{ +public: + LargeGEMMLowpOutput3DDataset() + { + add_config(TensorShape(923U, 429U), TensorShape(871U, 923U), TensorShape(871U, 143U, 3U), 0, 0); + add_config(TensorShape(681U, 1025U), TensorShape(213U, 681U), TensorShape(213U, 205U, 5U), -3, 2); + add_config(TensorShape(364U, 3025U), TensorShape(96U, 364U), TensorShape(96U, 605U, 5U), 2, 3); + } +}; +class LargeGEMMLowpInputOutput3DDataset final : public GEMMLowpDataset +{ +public: + LargeGEMMLowpInputOutput3DDataset() + { + add_config(TensorShape(923U, 143U, 3U), TensorShape(871U, 923U), TensorShape(871U, 143U, 3U), 0, 0); + add_config(TensorShape(681U, 205U, 5U), TensorShape(213U, 681U), TensorShape(213U, 205U, 5U), -2, 5); + add_config(TensorShape(364U, 605U, 5U), TensorShape(96U, 364U), TensorShape(96U, 605U, 5U), 2, 4); + } +}; } // namespace datasets } // namespace test } // namespace arm_compute diff --git a/tests/datasets/SmallGEMMLowpDataset.h b/tests/datasets/SmallGEMMLowpDataset.h index b6651bdb42..40f0c718c6 100644 --- a/tests/datasets/SmallGEMMLowpDataset.h +++ b/tests/datasets/SmallGEMMLowpDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -52,6 +52,32 @@ public: add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1); } }; +class SmallGEMMLowpOutput3DDataset final : public GEMMLowpDataset +{ +public: + SmallGEMMLowpOutput3DDataset() + { + add_config(TensorShape(21U, 14U), TensorShape(34U, 21U), TensorShape(34U, 7U, 2U), 0, 0); + add_config(TensorShape(31U, 1U), TensorShape(23U, 31U), TensorShape(23U, 1U, 1U), -2, 13); + add_config(TensorShape(38U, 12U), TensorShape(21U, 38U), TensorShape(21U, 4U, 3U), 0, 4); + add_config(TensorShape(32U, 1U), TensorShape(17U, 32U), TensorShape(17U, 1U, 1U), -2, 1); + add_config(TensorShape(16U, 16U), TensorShape(8U, 16U), TensorShape(8U, 8U, 2U), 5, 9); + add_config(TensorShape(16U, 16U, 5U), TensorShape(8U, 16U, 5U), TensorShape(8U, 8U, 2U, 5U), -7, 2); + } +}; +class SmallGEMMLowpInputOutput3DDataset final : public GEMMLowpDataset +{ +public: + SmallGEMMLowpInputOutput3DDataset() + { + add_config(TensorShape(21U, 14U, 13U), TensorShape(34U, 21U), TensorShape(34U, 14U, 13U), 0, 0); + add_config(TensorShape(31U, 1U, 3U), TensorShape(23U, 31U), TensorShape(23U, 1U, 3U), 0, 0); + add_config(TensorShape(38U, 12U, 2U), TensorShape(21U, 38U), TensorShape(21U, 12U, 2U), -2, 13); + add_config(TensorShape(32U, 1U, 4U, 3U), TensorShape(17U, 32U), TensorShape(17U, 1U, 4U, 3U), 0, 4); + add_config(TensorShape(16U, 16U, 3U, 2U), TensorShape(8U, 16U), TensorShape(8U, 16U, 3U, 2U), -2, 0); + add_config(TensorShape(16U, 16U, 5U, 3U), TensorShape(8U, 16U), TensorShape(8U, 16U, 5U, 3U), -9, 1); + } +}; } // namespace datasets } // namespace test } // namespace arm_compute diff --git a/tests/validate_examples/cl_gemm.cpp b/tests/validate_examples/cl_gemm.cpp index e0aefbf359..cdaa33f31a 100644 --- a/tests/validate_examples/cl_gemm.cpp +++ b/tests/validate_examples/cl_gemm.cpp @@ -285,7 +285,7 @@ public: fill(ref_src0, 0); fill(ref_src1, 1); - SimpleTensor<int32_t> ref_tmp_dst = reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(ref_src0, ref_src1, offset_src0, offset_src1); + SimpleTensor<int32_t> ref_tmp_dst = reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(ref_src0, ref_src1, TensorShape(N, M), offset_src0, offset_src1); if(add_bias) { diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp index 5148a31936..42bb2123bf 100644 --- a/tests/validation/CL/GEMMLowp.cpp +++ b/tests/validation/CL/GEMMLowp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -47,6 +47,7 @@ TEST_SUITE(CL) TEST_SUITE(GEMMLowp) TEST_SUITE(MatrixMultiplyCore) + using CLGEMMLowpMatrixMultiplyCoreFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore>; DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()), @@ -81,6 +82,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreFixture, framework: validate(CLAccessor(_target), _reference); } +TEST_SUITE(Output3D) +using CLGEMMLowpMatrixMultiplyCoreOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, false, true>; +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpOutput3DDataset()) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpOutput3DDataset()) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // Output3D + +TEST_SUITE(InputOutput3D) +using CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture = GEMMLowpMatrixMultiplyCoreValidationFixture<CLTensor, CLAccessor, CLGEMMLowpMatrixMultiplyCore, true, true>; +FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::PRECOMMIT, datasets::SmallGEMMLowpInputOutput3DDataset()) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMLowpMatrixMultiplyCoreInputOutput3DFixture, framework::DatasetMode::NIGHTLY, datasets::LargeGEMMLowpInputOutput3DDataset()) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // InputOutput3D TEST_SUITE_END() // MatrixMultiplyCore TEST_SUITE(OutputStage) diff --git a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h index d6b94a197d..519932f3b2 100644 --- a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h +++ b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h @@ -128,7 +128,7 @@ protected: fill(b, 1, 0, 255); } - return reference::gemmlowp<int32_t, T2>(a, b); + return reference::gemmlowp<int32_t, T2>(a, b, shape_c); } TensorType _target{}; diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h index 06d6be3fa4..73cb8328ea 100644 --- a/tests/validation/fixtures/GEMMLowpFixture.h +++ b/tests/validation/fixtures/GEMMLowpFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -42,7 +42,7 @@ namespace test { namespace validation { -template <typename TensorType, typename AccessorType, typename FunctionType> +template <typename TensorType, typename AccessorType, typename FunctionType, bool reinterpret_input_as_3d = false, bool reinterpret_output_as_3d = false> class GEMMLowpMatrixMultiplyCoreValidationFixture : public framework::Fixture { public: @@ -62,8 +62,7 @@ protected: library->fill(tensor, distribution, i); } - TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, - int32_t a_offset, int32_t b_offset) + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, int32_t a_offset, int32_t b_offset) { // Create tensors TensorType a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1); @@ -74,8 +73,9 @@ protected: b.info()->set_quantization_info(QuantizationInfo(1.0f / 255, b_offset)); // Create and configure function + // The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output FunctionType gemmlowp; - gemmlowp.configure(&a, &b, &c); + gemmlowp.configure(&a, &b, &c, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_c[2] : 1), reinterpret_input_as_3d)); ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -99,18 +99,24 @@ protected: return c; } - SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, - int32_t a_offset, int32_t b_offset) + SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c, int32_t a_offset, int32_t b_offset) { + TensorShape shape_a_to_use = shape_a; + if(reinterpret_input_as_3d) + { + // Collapse the second and third dimension if the input is 3D + shape_a_to_use.collapse(2U, 1U); + } + // Create reference - SimpleTensor<uint8_t> a{ shape_a, DataType::QASYMM8, 1 }; + SimpleTensor<uint8_t> a{ shape_a_to_use, DataType::QASYMM8, 1 }; SimpleTensor<uint8_t> b{ shape_b, DataType::QASYMM8, 1 }; // Fill reference fill(a, 0); fill(b, 1); - return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(a, b, a_offset, b_offset); + return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(a, b, shape_c, a_offset, b_offset); } TensorType _target{}; diff --git a/tests/validation/reference/GEMMLowp.cpp b/tests/validation/reference/GEMMLowp.cpp index 8e41aef46a..9a7e409e8a 100644 --- a/tests/validation/reference/GEMMLowp.cpp +++ b/tests/validation/reference/GEMMLowp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -98,41 +98,52 @@ void quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> *in, } // namespace template <typename T_out, typename T_in> -SimpleTensor<T_out> gemmlowp_matrix_multiply_core(const SimpleTensor<T_in> &a, const SimpleTensor<T_in> &b, int32_t a_offset, int32_t b_offset) +SimpleTensor<T_out> gemmlowp_matrix_multiply_core(const SimpleTensor<T_in> &a, const SimpleTensor<T_in> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset) { static_assert(std::is_same<typename std::decay<T_out>::type, int32_t>::value, "Only int32_t is allowed for the output"); - TensorShape shape(b.shape()[0], a.shape()[1]); DataType dt = std::is_same<T_out, int32_t>::value ? DataType::S32 : DataType::U32; - SimpleTensor<T_out> c(shape, dt); + SimpleTensor<T_out> c(shape_c, dt); - const int K = a.shape().x(); - const int b_width = b.shape().x(); - const int rows = c.shape().y(); //M - const int cols = c.shape().x(); //N + const int K = a.shape().x(); + const int M = a.shape().y(); + const int N = b.shape().x(); + const int D = a.shape().z(); // Number of matrices in a batch + + const int a_stride_z = K * M; + // Do not slide the matrix B along the 3rd dimension in case matrix B has less than 3 dimensions + const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; + const int c_stride_z = N * M; std::vector<T_out> acc; - acc.resize(cols); + acc.resize(N); - for(int i = 0; i < rows; ++i) + for(int depth = 0; depth < D; ++depth) { - for(int j = 0; j < cols; ++j) - { - acc[j] = 0; - } - for(int k = 0; k < K; ++k) + const int base_addr_a = depth * a_stride_z; + const int base_addr_b = depth * b_stride_z; + const int base_addr_c = depth * c_stride_z; + + for(int i = 0; i < M; ++i) { - const T_out tmp_a = a_offset + static_cast<T_out>(a[k + i * K]); - for(int j = 0; j < b_width; ++j) + for(int j = 0; j < N; ++j) { - const T_out tmp_b = b_offset + static_cast<T_out>(b[j + k * b_width]); - const T_out mult_as_int = tmp_a * tmp_b; - acc[j] += mult_as_int; + acc[j] = 0; + } + for(int k = 0; k < K; ++k) + { + const T_out tmp_a = a_offset + static_cast<T_out>(a[base_addr_a + k + i * K]); + for(int j = 0; j < N; ++j) + { + const T_out tmp_b = b_offset + static_cast<T_out>(b[base_addr_b + j + k * N]); + const T_out mult_as_int = tmp_a * tmp_b; + acc[j] += mult_as_int; + } + } + for(int j = 0; j < N; ++j) + { + c[base_addr_c + j + i * N] = acc[j]; } - } - for(int j = 0; j < cols; ++j) - { - c[j + i * cols] = acc[j]; } } @@ -141,9 +152,9 @@ SimpleTensor<T_out> gemmlowp_matrix_multiply_core(const SimpleTensor<T_in> &a, c // used to validate assembly kernels which don't know anything about offsets template <typename T1, typename T2> -SimpleTensor<T1> gemmlowp(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b) +SimpleTensor<T1> gemmlowp(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b, TensorShape shape_c) { - return gemmlowp_matrix_multiply_core<T1, T2>(a, b, 0, 0); + return gemmlowp_matrix_multiply_core<T1, T2>(a, b, shape_c, 0, 0); } template <typename T> @@ -198,10 +209,10 @@ template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const int32_t max); template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max); -template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, int32_t a_offset, int32_t b_offset); -template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, int32_t a_offset, int32_t b_offset); -template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b); -template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b); +template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); +template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); +template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, TensorShape shape_c); +template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, TensorShape shape_c); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/GEMMLowp.h b/tests/validation/reference/GEMMLowp.h index a3d0bebe3f..4396155b96 100644 --- a/tests/validation/reference/GEMMLowp.h +++ b/tests/validation/reference/GEMMLowp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -38,13 +38,13 @@ namespace reference template <typename T> SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min = 0, int32_t max = 0); template <typename T1, typename T2> -SimpleTensor<T1> gemmlowp_matrix_multiply_core(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b, int32_t a_offset, int32_t b_offset); +SimpleTensor<T1> gemmlowp_matrix_multiply_core(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); template <typename T> SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift); template <typename T1, typename T2> -SimpleTensor<T1> gemmlowp(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b); +SimpleTensor<T1> gemmlowp(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b, TensorShape shape_c); template <typename T> SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, |