From 3bb72b69566f18ad5c9446d318d2fc2b5f6dba42 Mon Sep 17 00:00:00 2001 From: Adnan AlSinan Date: Fri, 6 May 2022 12:10:11 +0100 Subject: Add a note to clarify pretranspose option in GEMM and GEMMLowp fixtures Signed-off-by: Adnan AlSinan Change-Id: Ie65f9096a75610dc20ffbb25dc43fd2f632f2f03 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7530 Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins --- tests/validation/fixtures/GEMMFixture.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'tests/validation/fixtures/GEMMFixture.h') diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h index be3a3cd735..884b13da80 100644 --- a/tests/validation/fixtures/GEMMFixture.h +++ b/tests/validation/fixtures/GEMMFixture.h @@ -169,7 +169,12 @@ protected: memcpy(c.data() + i * n, c.data(), n * sizeof(T)); } } - + + /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M), + therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K) + in order to be able to call reference implementation that works with (B x M x K) input. + Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */ + // Define transposed shapes TensorShape a_transposed_shape(a.shape().y(), a.shape().x()); TensorShape b_transposed_shape(b.shape().y(), b.shape().x()); -- cgit v1.2.1