aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference/GEMMLowp.h
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-11-21 14:10:25 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-11-27 10:56:10 +0000
commit448a81fcec04333364a1e3266d5081596d3a0477 (patch)
treebd5382a58fae39a8014157423a8ff339d39e14b9 /tests/validation/reference/GEMMLowp.h
parent449cbf9c20287fca9a56898cdc5821c061a66ce3 (diff)
downloadComputeLibrary-448a81fcec04333364a1e3266d5081596d3a0477.tar.gz
COMPMID-2805: Add QASYMM8_SIGNED support in NEGEMMLowpOutputStage
Add support from requantizing down from S32 to Int8 with fixed point requantization. This involves the following: - Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier - Add bias to final result if bias tensor is not a nullptr - Round to nearest division by a power-of-two using result_shift - Add offset to each result - Clamp the value between the specified min and max bounds - Cast to int8 data type Change-Id: I641b3fac0833c568d8565ccb859bbc561a24c17d Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/2340 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/reference/GEMMLowp.h')
-rw-r--r--tests/validation/reference/GEMMLowp.h19
1 files changed, 6 insertions, 13 deletions
diff --git a/tests/validation/reference/GEMMLowp.h b/tests/validation/reference/GEMMLowp.h
index 815527e1b7..7ff01ef611 100644
--- a/tests/validation/reference/GEMMLowp.h
+++ b/tests/validation/reference/GEMMLowp.h
@@ -52,20 +52,13 @@ template <typename T>
SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_offset, std::vector<int32_t> result_mult_int,
std::vector<int32_t> result_shift, int32_t min = 0, int32_t max = 0);
-template <typename T>
-SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> &in, std::vector<int32_t> result_fixedpoint_multiplier, std::vector<int32_t> result_shift,
- int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0);
-
-template <typename T>
-SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, std::vector<int32_t> result_fixedpoint_multiplier,
- std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0);
+template <typename TIn, typename TOut>
+SimpleTensor<TOut> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<TIn> &in, std::vector<int32_t> result_fixedpoint_multiplier, std::vector<int32_t> result_shift,
+ int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0);
-template <typename T>
-SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, int32_t result_fixedpoint_multiplier, int32_t result_shift,
- int32_t min, int32_t max);
-template <typename T>
-SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier,
- int32_t result_shift, int32_t min, int32_t max);
+template <typename TIn, typename TOut>
+SimpleTensor<TOut> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<TIn> &in, const SimpleTensor<TIn> &bias, std::vector<int32_t> result_fixedpoint_multiplier,
+ std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min = 0, int32_t max = 0);
} // namespace reference
} // namespace validation
} // namespace test