aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdnan AlSinan <adnan.alsinan@arm.com>2022-05-06 12:10:11 +0100
committerAdnan AlSinan <adnan.alsinan@arm.com>2022-05-06 14:31:45 +0000
commit3bb72b69566f18ad5c9446d318d2fc2b5f6dba42 (patch)
treec11abd954bd8dce3a755312dd6747a48cf4a937a
parent9d187387ebc00f9b34f8071e647cab1025a4ba7a (diff)
downloadComputeLibrary-3bb72b69566f18ad5c9446d318d2fc2b5f6dba42.tar.gz
Add a note to clarify pretranspose option in GEMM and GEMMLowp fixtures
Signed-off-by: Adnan AlSinan <adnan.alsinan@arm.com> Change-Id: Ie65f9096a75610dc20ffbb25dc43fd2f632f2f03 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7530 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--tests/validation/fixtures/GEMMFixture.h7
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h4
2 files changed, 10 insertions, 1 deletions
diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h
index be3a3cd735..884b13da80 100644
--- a/tests/validation/fixtures/GEMMFixture.h
+++ b/tests/validation/fixtures/GEMMFixture.h
@@ -169,7 +169,12 @@ protected:
memcpy(c.data() + i * n, c.data(), n * sizeof(T));
}
}
-
+
+ /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M),
+ therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
+ in order to be able to call reference implementation that works with (B x M x K) input.
+ Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
+
// Define transposed shapes
TensorShape a_transposed_shape(a.shape().y(), a.shape().x());
TensorShape b_transposed_shape(b.shape().y(), b.shape().x());
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index 3f83cc92f1..3da4c02f6d 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -191,6 +191,10 @@ SimpleTensor<int32_t> compute_gemmlowp_reference(const TensorShape &shape_a, con
fill(b, 1);
// Transpose reference if required
+ /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M),
+ therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
+ in order to be able to call reference implementation that works with (B x M x K) input.
+ Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
if(pretranspose_A)
{
transpose_matrix<TI>(a, a_transposed);