From 62d600fe8c0afba81cc5f5dd315eb6dcc04f90b8 Mon Sep 17 00:00:00 2001 From: Radu Salavat Date: Mon, 15 Apr 2024 14:42:07 +0000 Subject: Move s32 to f32 conversion in reference layers from quantization to dequantization Signed-off-by: Radu Salavat Change-Id: Ib17946b526d35deeca94b5d2f163b92101e313c4 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11420 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins --- tests/validation/fixtures/GEMMLowpFixture.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'tests/validation/fixtures') diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h index 11a491faa7..6b7cbba92e 100644 --- a/tests/validation/fixtures/GEMMLowpFixture.h +++ b/tests/validation/fixtures/GEMMLowpFixture.h @@ -31,7 +31,7 @@ #include "tests/validation/Validation.h" #include "tests/validation/reference/GEMMLowp.h" #include "tests/validation/reference/ArithmeticOperations.h" -#include "tests/validation/reference/QuantizationLayer.h" +#include "tests/validation/reference/DequantizationLayer.h" #include #include @@ -485,7 +485,7 @@ public: const auto b_qinfo = QuantizationInfo(5.0f / 255, b_offset); TensorFillInfo finfo; _target = compute_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo); - _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate); + _reference = compute_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, finfo, accumulate, dynamic_qinfo); } protected: @@ -495,14 +495,16 @@ protected: return compute_gemmlowp_target(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, output_qinfo, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, GEMMLowpOutputStageInfo(), false, finfo, accumulate, dynamic_qinfo, DataType::F32); } - SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, bool accumulate) + SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_output, const QuantizationInfo& a_qinfo, const QuantizationInfo& b_qinfo, const TensorFillInfo& finfo, bool accumulate, const bool dynamic_qinfo) { + QuantizationInfo s32_ref_output_quant_info = QuantizationInfo(a_qinfo.uniform().scale * b_qinfo.uniform().scale, 0, dynamic_qinfo); + SimpleTensor s32_ref_output = compute_gemmlowp_reference(shape_a, shape_b, shape_output, a_qinfo, b_qinfo, DataType::QASYMM8_SIGNED, DataType::QASYMM8_SIGNED, finfo); + s32_ref_output.quantization_info(s32_ref_output_quant_info); SimpleTensor f32_ref_output(s32_ref_output.shape(), DataType::F32); - QuantizationInfo dst_quant_info = QuantizationInfo(a_qinfo.uniform().scale * b_qinfo.uniform().scale, 0); - f32_ref_output = reference::quantization_layer(s32_ref_output, DataType::F32, dst_quant_info); + f32_ref_output = reference::dequantization_layer(s32_ref_output); if (accumulate) { -- cgit v1.2.1