diff options
Diffstat (limited to 'tests/validation/reference/GEMM.cpp')
-rw-r--r-- | tests/validation/reference/GEMM.cpp | 79 |
1 files changed, 52 insertions, 27 deletions
diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp index f7e97e47b8..20f1139a02 100644 --- a/tests/validation/reference/GEMM.cpp +++ b/tests/validation/reference/GEMM.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,10 +35,11 @@ namespace validation namespace reference { template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type> -SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta) +SimpleTensor<T> +gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta) { // Create reference - SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 }; + SimpleTensor<T> dst{c.shape(), c.data_type(), 1}; // Compute reference const int M = a.shape().y(); @@ -50,15 +51,22 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S const int a_stride_z = K * M; const int a_stride_w = K * M * D; - const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions - int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions + const int b_stride_z = + b.shape().num_dimensions() > 2 + ? N * K + : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions + int b_stride_w = + b.shape().num_dimensions() > 3 + ? K * N * D + : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions: // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4 - const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; + const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && + c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; // Batched-GEMM - if(is_batched_gemm) + if (is_batched_gemm) { b_stride_w = b_stride_z; } @@ -69,21 +77,21 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S #if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__)) #pragma omp parallel for collapse(2) #endif /* _OPENMP */ - for(int w = 0; w < W; ++w) + for (int w = 0; w < W; ++w) { - for(int depth = 0; depth < D; ++depth) + for (int depth = 0; depth < D; ++depth) { const int base_addr_a = depth * a_stride_z + w * a_stride_w; const int base_addr_b = depth * b_stride_z + w * b_stride_w; const int base_addr_c = depth * c_stride_z + w * c_stride_w; - for(int row = 0; row < M; ++row) + for (int row = 0; row < M; ++row) { - for(int col = 0; col < N; ++col) + for (int col = 0; col < N; ++col) { T acc(0); - for(int k = 0; k < K; ++k) + for (int k = 0; k < K; ++k) { acc += a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N]; } @@ -99,11 +107,12 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S } template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type> -SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta) +SimpleTensor<T> gemm_mixed_precision( + const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta) { // GEMM mixed-precision combines F32 accumulators with F16 multiplications // Create reference - SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 }; + SimpleTensor<T> dst{c.shape(), c.data_type(), 1}; // Compute reference const int M = a.shape().y(); @@ -115,15 +124,22 @@ SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTenso const int a_stride_z = K * M; const int a_stride_w = K * M * D; - const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions - int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions + const int b_stride_z = + b.shape().num_dimensions() > 2 + ? N * K + : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions + int b_stride_w = + b.shape().num_dimensions() > 3 + ? K * N * D + : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions: // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4 - const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; + const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && + c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; // Batched-GEMM - if(is_batched_gemm) + if (is_batched_gemm) { b_stride_w = b_stride_z; } @@ -134,27 +150,28 @@ SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTenso #if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__)) #pragma omp parallel for collapse(2) #endif /* _OPENMP */ - for(int w = 0; w < W; ++w) + for (int w = 0; w < W; ++w) { - for(int depth = 0; depth < D; ++depth) + for (int depth = 0; depth < D; ++depth) { const int base_addr_a = depth * a_stride_z + w * a_stride_w; const int base_addr_b = depth * b_stride_z + w * b_stride_w; const int base_addr_c = depth * c_stride_z + w * c_stride_w; - for(int row = 0; row < M; ++row) + for (int row = 0; row < M; ++row) { - for(int col = 0; col < N; ++col) + for (int col = 0; col < N; ++col) { float acc(0); - for(int k = 0; k < K; ++k) + for (int k = 0; k < K; ++k) { acc += static_cast<float>(a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N]); } // Finalize the result: alpha * A * B + beta * C - dst[base_addr_c + col + row * N] = static_cast<T>(alpha * acc + beta * c[base_addr_c + col + row * N]); + dst[base_addr_c + col + row * N] = + static_cast<T>(alpha * acc + beta * c[base_addr_c + col + row * N]); } } } @@ -163,9 +180,17 @@ SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTenso return dst; } -template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta); -template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta); -template SimpleTensor<half> gemm_mixed_precision(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta); +template SimpleTensor<float> +gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta); +template SimpleTensor<bfloat16> gemm(const SimpleTensor<bfloat16> &a, + const SimpleTensor<bfloat16> &b, + const SimpleTensor<bfloat16> &c, + float alpha, + float beta); +template SimpleTensor<half> +gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta); +template SimpleTensor<half> gemm_mixed_precision( + const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta); } // namespace reference } // namespace validation } // namespace test |