aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference/GEMM.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/reference/GEMM.cpp')
-rw-r--r--tests/validation/reference/GEMM.cpp55
1 files changed, 54 insertions, 1 deletions
diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp
index 2feab89950..3c72b94143 100644
--- a/tests/validation/reference/GEMM.cpp
+++ b/tests/validation/reference/GEMM.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -84,8 +84,61 @@ SimpleTensor<T> gemm(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const S
return dst;
}
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
+SimpleTensor<T> gemm_mixed_precision(const SimpleTensor<T> &a, const SimpleTensor<T> &b, const SimpleTensor<T> &c, float alpha, float beta)
+{
+ // GEMM mixed-precision combines F32 accumulators with F16 multiplications
+ // Create reference
+ SimpleTensor<T> dst{ c.shape(), c.data_type(), 1 };
+
+ // Compute reference
+ const int M = a.shape().y();
+ const int N = b.shape().x();
+ const int K = a.shape().x();
+ const int D = a.shape().z(); // Number of matrices in a batch
+ const int W = a.shape()[3]; // Number of batched-gemm (Winograd case)
+
+ const int a_stride_z = K * M;
+ const int a_stride_w = K * M * D;
+
+ const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions
+ const int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions
+
+ const int c_stride_z = N * M;
+ const int c_stride_w = N * M * D;
+
+ for(int w = 0; w < W; ++w)
+ {
+ for(int depth = 0; depth < D; ++depth)
+ {
+ const int base_addr_a = depth * a_stride_z + w * a_stride_w;
+ const int base_addr_b = depth * b_stride_z + w * b_stride_w;
+ const int base_addr_c = depth * c_stride_z + w * c_stride_w;
+
+ for(int row = 0; row < M; ++row)
+ {
+ for(int col = 0; col < N; ++col)
+ {
+ float acc(0);
+
+ for(int k = 0; k < K; ++k)
+ {
+ acc += static_cast<float>(a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N]);
+ }
+
+ // Finalize the result: alpha * A * B + beta * C
+ dst[base_addr_c + col + row * N] = static_cast<T>(alpha * acc + beta * c[base_addr_c + col + row * N]);
+ }
+ }
+ }
+ }
+
+ return dst;
+}
+
template SimpleTensor<float> gemm(const SimpleTensor<float> &a, const SimpleTensor<float> &b, const SimpleTensor<float> &c, float alpha, float beta);
template SimpleTensor<half> gemm(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
+template SimpleTensor<half> gemm_mixed_precision(const SimpleTensor<half> &a, const SimpleTensor<half> &b, const SimpleTensor<half> &c, float alpha, float beta);
} // namespace reference
} // namespace validation
} // namespace test