diff options
author | Sheri Zhang <sheri.zhang@arm.com> | 2020-03-09 14:29:52 +0000 |
---|---|---|
committer | Sheri Zhang <sheri.zhang@arm.com> | 2020-03-25 15:58:42 +0000 |
commit | 1b14c75c0d591c4abe4d2d41b7e4e165fbf58382 (patch) | |
tree | 41e671befde3f61247d0728d16907ff281d6294d /tests/validation/reference/GEMMLowp.cpp | |
parent | 2e5fd637205770ec5e11096e6e19b8efc67d544e (diff) | |
download | ComputeLibrary-1b14c75c0d591c4abe4d2d41b7e4e165fbf58382.tar.gz |
COMPMID-2968: Add support for QASYMM8_SIGNED in CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel
Signed-off-by: Sheri Zhang <sheri.zhang@arm.com>
Change-Id: I37e6e76dbd5546c0eaedfacd01ea905c37148e8a
Signed-off-by: Sheri Zhang <sheri.zhang@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2861
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/reference/GEMMLowp.cpp')
-rw-r--r-- | tests/validation/reference/GEMMLowp.cpp | 63 |
1 files changed, 63 insertions, 0 deletions
diff --git a/tests/validation/reference/GEMMLowp.cpp b/tests/validation/reference/GEMMLowp.cpp index 99d08e34f1..61617c8aae 100644 --- a/tests/validation/reference/GEMMLowp.cpp +++ b/tests/validation/reference/GEMMLowp.cpp @@ -131,6 +131,39 @@ void quantize_down_scale_by_fixedpoint(const SimpleTensor<TIn> *in, const Simple std::min<TIn>(std::numeric_limits<TOut>::max(), result))); } } + +template <typename TIn, typename TOut> +void quantize_down_scale_by_float(const SimpleTensor<TIn> *in, const SimpleTensor<TIn> *bias, SimpleTensor<TOut> *dst, std::vector<float_t> result_real_multiplier, + int32_t result_offset, int32_t min, int32_t max) +{ + const int cols_in = in->shape().x(); + const bool is_per_channel = result_real_multiplier.size() > 1; + + for(int i = 0; i < in->num_elements(); ++i) + { + TIn result = (*in)[i]; + + if(bias != nullptr) + { + result += (*bias)[i % cols_in]; + } + + // Float multiplication + const float_t multiplier = (is_per_channel) ? result_real_multiplier[i % cols_in] : result_real_multiplier[0]; + + float_t result_f = static_cast<float_t>(result) * multiplier + static_cast<float_t>(result_offset); + result = static_cast<TIn>(std::round(result_f)); + + // Bounded ReLu + if(min != max) + { + result = std::max(min, std::min(max, result)); + } + + (*dst)[i] = static_cast<TOut>(std::max<TIn>(std::numeric_limits<TOut>::lowest(), + std::min<TIn>(std::numeric_limits<TOut>::max(), result))); + } +} } // namespace template <typename T_out, typename T_in, typename T_in_1> @@ -237,6 +270,36 @@ SimpleTensor<TOut> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor return dst; } +template <typename TIn, typename TOut> +SimpleTensor<TOut> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<TIn> &in, const SimpleTensor<TIn> &bias, + std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max) +{ + SimpleTensor<TOut> dst(in.shape(), DataTypeExtractor<TOut>::data_type()); + + quantize_down_scale_by_float<TIn, TOut>(&in, &bias, &dst, result_real_multiplier, result_offset, min, max); + + return dst; +} + +template <typename TIn, typename TOut> +SimpleTensor<TOut> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<TIn> &in, + std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max) +{ + SimpleTensor<TOut> dst(in.shape(), DataTypeExtractor<TOut>::data_type()); + + quantize_down_scale_by_float<TIn, TOut>(&in, nullptr, &dst, result_real_multiplier, result_offset, min, max); + + return dst; +} + +template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, + std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max); +template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<int32_t> &a, + std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max); +template SimpleTensor<int8_t> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, + std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max); +template SimpleTensor<int8_t> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<int32_t> &a, + std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max); template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, std::vector<int32_t> result_fixedpoint_multiplier, std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, |