From 02c452fe1ec17c3941272a07b5cae1f32d614c56 Mon Sep 17 00:00:00 2001 From: Mohammed Suhail Munshi Date: Thu, 26 Oct 2023 00:14:36 +0100 Subject: Add Dynamic Quantization tests to Fully Connected Layer This patch calculates the output quantization info based on the inputs' quantization information. The previous approach was using the same quantization information for input, weights and output. This implementation does not cover the cases where we have fused activation function. Resolves: [COMPMID-6484] Signed-off-by: Mohammed Suhail Munshi Change-Id: Ib58143165191e82ae8547e661ac7c8d077bda200 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10539 Comments-Addressed: Arm Jenkins Reviewed-by: SiCong Li Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- tests/validation/Helpers.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'tests/validation/Helpers.h') diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h index 5a1e69afbd..647adcdb69 100644 --- a/tests/validation/Helpers.h +++ b/tests/validation/Helpers.h @@ -286,12 +286,13 @@ QuantizationHint suggest_matmul_dst_q_info_and_bias(const QuantizationInfo &lhs_ * @param[in] k number of accumulations taking place in the sum, i.e. c_k = sum_k(a_k * b_k) * @param[in] data_type data type, only QASYMM8, QASYMM8_SIGNED are supported * @param[in] bias_fraction the fraction of bias amplitude compared to integer accummulation. + * @param[in] num_sd (Optional) number of standard deviations we allow from the mean. Default value is 2. * * @return QuantizationHint object containing the suggested output quantization info and min/max bias range */ QuantizationHint suggest_mac_dst_q_info_and_bias(const QuantizationInfo &lhs_q_info, - const QuantizationInfo &rhs_q_info, int32_t k, DataType data_type, - float bias_fraction); + const QuantizationInfo &rhs_q_info, int32_t k, DataType data_type, float bias_fraction, + int num_sd = 2); } // namespace validation } // namespace test } // namespace arm_compute -- cgit v1.2.1