From 448a81fcec04333364a1e3266d5081596d3a0477 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 21 Nov 2019 14:10:25 +0000 Subject: COMPMID-2805: Add QASYMM8_SIGNED support in NEGEMMLowpOutputStage Add support from requantizing down from S32 to Int8 with fixed point requantization. This involves the following: - Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier - Add bias to final result if bias tensor is not a nullptr - Round to nearest division by a power-of-two using result_shift - Add offset to each result - Clamp the value between the specified min and max bounds - Cast to int8 data type Change-Id: I641b3fac0833c568d8565ccb859bbc561a24c17d Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/2340 Comments-Addressed: Arm Jenkins Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins --- tests/validation/reference/GEMMLowp.h | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'tests/validation/reference/GEMMLowp.h') diff --git a/tests/validation/reference/GEMMLowp.h b/tests/validation/reference/GEMMLowp.h index 815527e1b7..7ff01ef611 100644 --- a/tests/validation/reference/GEMMLowp.h +++ b/tests/validation/reference/GEMMLowp.h @@ -52,20 +52,13 @@ template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, const SimpleTensor &bias, int32_t result_offset, std::vector result_mult_int, std::vector result_shift, int32_t min = 0, int32_t max = 0); -template -SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &in, std::vector result_fixedpoint_multiplier, std::vector result_shift, - int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0); - -template -SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &in, const SimpleTensor &bias, std::vector result_fixedpoint_multiplier, - std::vector result_shift, int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0); +template +SimpleTensor gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor &in, std::vector result_fixedpoint_multiplier, std::vector result_shift, + int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0); -template -SimpleTensor gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor &in, int32_t result_fixedpoint_multiplier, int32_t result_shift, - int32_t min, int32_t max); -template -SimpleTensor gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor &in, const SimpleTensor &bias, int32_t result_fixedpoint_multiplier, - int32_t result_shift, int32_t min, int32_t max); +template +SimpleTensor gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor &in, const SimpleTensor &bias, std::vector result_fixedpoint_multiplier, + std::vector result_shift, int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1