From 951b8a4c01de2810349b6f16cf9bbba7578484fa Mon Sep 17 00:00:00 2001 From: Vidhya Sudhan Loganathan Date: Mon, 4 Nov 2019 14:42:08 +0000 Subject: COMPMID-2309 : CLConvolutionLayer: support QUANT8_SYMM_PER_CHANNEL filters Change-Id: I16f6758b768ede404a064db057302ded706e1e8a Signed-off-by: Vidhya Sudhan Loganathan Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/2215 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins --- tests/validation/reference/GEMMLowp.cpp | 71 ++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 32 deletions(-) (limited to 'tests/validation/reference/GEMMLowp.cpp') diff --git a/tests/validation/reference/GEMMLowp.cpp b/tests/validation/reference/GEMMLowp.cpp index 4283cb5bac..08be4a5182 100644 --- a/tests/validation/reference/GEMMLowp.cpp +++ b/tests/validation/reference/GEMMLowp.cpp @@ -39,10 +39,11 @@ namespace reference namespace { template -void quantize_down_int32_to_uint8_scale(const SimpleTensor *in, const SimpleTensor *bias, SimpleTensor *dst, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, - int32_t min, int32_t max) +void quantize_down_int32_to_uint8_scale(const SimpleTensor *in, const SimpleTensor *bias, SimpleTensor *dst, int32_t result_offset, std::vector result_mult_int, + std::vector result_shift, int32_t min, int32_t max) { - const int cols_in = in->shape().x(); + const int cols_in = in->shape().x(); + const bool is_per_channel = result_mult_int.size() > 1; for(int i = 0; i < in->num_elements(); ++i) { @@ -53,9 +54,9 @@ void quantize_down_int32_to_uint8_scale(const SimpleTensor *in, const SimpleT result += (*bias)[i % cols_in]; } - result *= result_mult_int; + result *= (is_per_channel) ? result_mult_int[i % cols_in] : result_mult_int[0]; - result >>= result_shift; + result >>= (is_per_channel) ? result_shift[i % cols_in] : result_shift[0]; // Bounded ReLu if(min != max) @@ -68,10 +69,11 @@ void quantize_down_int32_to_uint8_scale(const SimpleTensor *in, const SimpleT } template -void quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor *in, const SimpleTensor *bias, SimpleTensor *dst, int32_t result_fixedpoint_multiplier, int32_t result_shift, - int32_t result_offset_after_shift, int32_t min, int32_t max) +void quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor *in, const SimpleTensor *bias, SimpleTensor *dst, std::vector result_fixedpoint_multiplier, + std::vector result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max) { - const int cols_in = in->shape().x(); + const int cols_in = in->shape().x(); + const bool is_per_channel = result_fixedpoint_multiplier.size() > 1; for(int i = 0; i < in->num_elements(); ++i) { @@ -83,7 +85,10 @@ void quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor *in, } // Fixed point multiplication - result = asymm_rounding_divide_by_pow2(asymm_int_mult(result, result_fixedpoint_multiplier), result_shift); + const int32_t multiplier = (is_per_channel) ? result_fixedpoint_multiplier[i % cols_in] : result_fixedpoint_multiplier[0]; + const int32_t shift = (is_per_channel) ? result_shift[i % cols_in] : result_shift[0]; + + result = asymm_rounding_divide_by_pow2(asymm_int_mult(result, multiplier), shift); result += result_offset_after_shift; // Bounded ReLu @@ -132,8 +137,8 @@ void quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor *in, } } // namespace -template -SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset) +template +SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset) { static_assert(std::is_same::type, int32_t>::value, "Only int32_t is allowed for the output"); @@ -186,14 +191,15 @@ SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, c } // used to validate assembly kernels which don't know anything about offsets -template -SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c) +template +SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c) { - return gemmlowp_matrix_multiply_core(a, b, shape_c, 0, 0); + return gemmlowp_matrix_multiply_core(a, b, shape_c, 0, 0); } template -SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max) +SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, int32_t result_offset, std::vector result_mult_int, std::vector result_shift, + int32_t min, int32_t max) { SimpleTensor dst(in.shape(), DataType::QASYMM8); @@ -203,8 +209,8 @@ SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTe } template -SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, const SimpleTensor &bias, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, - int32_t min, int32_t max) +SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &in, const SimpleTensor &bias, int32_t result_offset, std::vector result_mult_int, + std::vector result_shift, int32_t min, int32_t max) { SimpleTensor dst(in.shape(), DataType::QASYMM8); @@ -214,9 +220,8 @@ SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTe } template -SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &in, int32_t result_fixedpoint_multiplier, int32_t result_shift, - int32_t result_offset_after_shift, int32_t min, - int32_t max) +SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &in, std::vector result_fixedpoint_multiplier, std::vector result_shift, + int32_t result_offset_after_shift, int32_t min, int32_t max) { SimpleTensor dst(in.shape(), DataType::QASYMM8); @@ -226,8 +231,8 @@ SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint( } template -SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &in, const SimpleTensor &bias, int32_t result_fixedpoint_multiplier, int32_t result_shift, - int32_t result_offset_after_shift, int32_t min, int32_t max) +SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &in, const SimpleTensor &bias, std::vector result_fixedpoint_multiplier, + std::vector result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max) { SimpleTensor dst(in.shape(), DataType::QASYMM8); @@ -258,22 +263,24 @@ SimpleTensor gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint( return dst; } -template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &a, int32_t result_fixedpoint_multiplier, int32_t result_shift, - int32_t result_offset_after_shift, int32_t min, int32_t max); -template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &a, const SimpleTensor &b, int32_t result_fixedpoint_multiplier, - int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); +template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &a, std::vector result_fixedpoint_multiplier, + std::vector result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); +template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor &a, const SimpleTensor &b, + std::vector result_fixedpoint_multiplier, + std::vector result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); template SimpleTensor gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor &a, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max); template SimpleTensor gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor &a, const SimpleTensor &b, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min, int32_t max); -template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &a, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, - int32_t max); -template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &a, const SimpleTensor &b, int32_t result_offset, int32_t result_mult_int, - int32_t result_shift, int32_t min, int32_t max); +template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &a, int32_t result_offset, std::vector result_mult_int, + std::vector result_shift, int32_t min, int32_t max); +template SimpleTensor gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor &a, const SimpleTensor &b, int32_t result_offset, std::vector result_mult_int, + std::vector result_shift, int32_t min, int32_t max); template SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); template SimpleTensor gemmlowp_matrix_multiply_core(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); -template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c); -template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c); +template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c); +template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c); +template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, TensorShape shape_c); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1