diff options
author | Gunes Bayir <gunes.bayir@arm.com> | 2024-04-26 16:51:54 +0100 |
---|---|---|
committer | Gunes Bayir <gunes.bayir@arm.com> | 2024-04-29 09:31:44 +0000 |
commit | e5ef8c159a14872dda5e36e320f07b0963858d8c (patch) | |
tree | 2d359eeff83b54b3e7e65484abc37e531aedbdf2 /tests/validation/fixtures/GEMMLowpFixture.h | |
parent | 499b5bca1a897461d4105ba52e4c766ddb5f564a (diff) | |
download | ComputeLibrary-e5ef8c159a14872dda5e36e320f07b0963858d8c.tar.gz |
Disable SME2 Gemmlowp s8f32 kernel selection in case results needs to be accumulated
Similar to https://review.mlplatform.org/c/ml/ComputeLibrary/+/11500, s8f32 kernels do not support accumulate mode. This patch modifies the kernel selection and also adds more tests to stress these test cases better.
Partially Resolves: COMPMID-6995
Change-Id: I40e19446c012eb7334e4511e254cce0d635aa234
Signed-off-by: Gunes Bayir <gunes.bayir@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11503
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Radu Salavat <radu.salavat@arm.com>
Reviewed-by: Jakub Sujak <jakub.sujak@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/GEMMLowpFixture.h')
-rw-r--r-- | tests/validation/fixtures/GEMMLowpFixture.h | 10 |
1 files changed, 2 insertions, 8 deletions
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h index 6b7cbba92e..aa4eedb75d 100644 --- a/tests/validation/fixtures/GEMMLowpFixture.h +++ b/tests/validation/fixtures/GEMMLowpFixture.h @@ -472,15 +472,9 @@ template <typename TensorType, typename AccessorType, typename FunctionType, boo class GEMMLowpDequantizedMatrixMultiplyValidationFixture : public framework::Fixture { public: - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset) + void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_output, int32_t a_offset, int32_t b_offset, bool accumulate) { - // Accumulation is supported for Int8/UInt8 only in aarch64 - bool accumulate = true; - // Accumulation is not supported for Int8/UInt8 in aarch32 -#ifdef __arm__ - accumulate = false; -#endif //__arm__ - bool dynamic_qinfo = false; + const bool dynamic_qinfo = false; const auto a_qinfo = QuantizationInfo(1.0f / 255, a_offset); const auto b_qinfo = QuantizationInfo(5.0f / 255, b_offset); TensorFillInfo finfo; |