From eaa759165231cd8b9486fe446128e0b054ac9e8d Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Wed, 4 Aug 2021 15:22:28 +0100 Subject: Add test for validating batched-GEMM on CPU - Add new dataset for batched-GEMM - Add test for running batched-GEMM without bias. Currently bias is not supported in batched-GEMM - Fix reference implementation to slide correctly the RHS tensor Resolves COMPMID-4588 Change-Id: I20fcb5d9160f44292b7cc34570add911b1d732f6 Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6040 Reviewed-by: SiCong Li Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- tests/datasets/SmallGEMMDataset.h | 12 +++++++++++- tests/validation/NEON/GEMM.cpp | 11 +++++++++++ tests/validation/reference/GEMM.cpp | 30 +++++++++++++++++++++++++----- 3 files changed, 47 insertions(+), 6 deletions(-) diff --git a/tests/datasets/SmallGEMMDataset.h b/tests/datasets/SmallGEMMDataset.h index 7d2b42a0d6..23b46a6547 100644 --- a/tests/datasets/SmallGEMMDataset.h +++ b/tests/datasets/SmallGEMMDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -50,6 +50,16 @@ public: add_config(TensorShape(32U, 1U), TensorShape(17U, 32U), TensorShape(17U, 1U), TensorShape(17U, 1U), 0.4f, 0.7f); } }; + +class SmallBatchedGEMMDataset final : public GEMMDataset +{ +public: + SmallBatchedGEMMDataset() + { + add_config(TensorShape(2U, 4U, 1U, 3U), TensorShape(5U, 2U, 3U), TensorShape(5U), TensorShape(5U, 4U, 1U, 3U), 1.0f, 0.0f); + } +}; + class SmallGEMMOutput3DDataset final : public GEMMDataset { public: diff --git a/tests/validation/NEON/GEMM.cpp b/tests/validation/NEON/GEMM.cpp index 27f0109590..7b207cc3f2 100644 --- a/tests/validation/NEON/GEMM.cpp +++ b/tests/validation/NEON/GEMM.cpp @@ -356,6 +356,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixtureDisabledC, framework::Datas } TEST_SUITE_END() +TEST_SUITE(BatchedGEMMDisabledC) +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMFixtureDisabledC, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallBatchedGEMMDataset(), + framework::dataset::make("ReshapeWeights", { true, false })), + + framework::dataset::make("DataType", DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f); +} +TEST_SUITE_END() + TEST_SUITE_END() TEST_SUITE_END() diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp index 6b3aa390f0..f7e97e47b8 100644 --- a/tests/validation/reference/GEMM.cpp +++ b/tests/validation/reference/GEMM.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -51,12 +51,22 @@ SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const S const int a_stride_w = K * M * D; const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions - const int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions + int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions + + // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions: + // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4 + const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; + + // Batched-GEMM + if(is_batched_gemm) + { + b_stride_w = b_stride_z; + } const int c_stride_z = N * M; const int c_stride_w = N * M * D; -#if defined(_OPENMP) && !( defined(__arm__) && defined(__ANDROID__)) +#if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__)) #pragma omp parallel for collapse(2) #endif /* _OPENMP */ for(int w = 0; w < W; ++w) @@ -106,12 +116,22 @@ SimpleTensor gemm_mixed_precision(const SimpleTensor &a, const SimpleTenso const int a_stride_w = K * M * D; const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions - const int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions + int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions + + // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions: + // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4 + const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; + + // Batched-GEMM + if(is_batched_gemm) + { + b_stride_w = b_stride_z; + } const int c_stride_z = N * M; const int c_stride_w = N * M * D; -#if defined(_OPENMP) && !( defined(__arm__) && defined(__ANDROID__)) +#if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__)) #pragma omp parallel for collapse(2) #endif /* _OPENMP */ for(int w = 0; w < W; ++w) -- cgit v1.2.1