aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/Helpers.cpp
diff options
context:
space:
mode:
authorMohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>2023-10-26 00:14:36 +0100
committerMohmun02 <MohammedSuhail.Munshi@arm.com>2023-11-03 14:01:37 +0000
commit02c452fe1ec17c3941272a07b5cae1f32d614c56 (patch)
tree3fc26399885bfefe6b9b086ea2b7fefc7eff54e2 /tests/validation/Helpers.cpp
parentc259aa5e04714bb3a8d23a6903161c240c279743 (diff)
downloadComputeLibrary-02c452fe1ec17c3941272a07b5cae1f32d614c56.tar.gz
Add Dynamic Quantization tests to Fully Connected Layer
This patch calculates the output quantization info based on the inputs' quantization information. The previous approach was using the same quantization information for input, weights and output. This implementation does not cover the cases where we have fused activation function. Resolves: [COMPMID-6484] Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com> Change-Id: Ib58143165191e82ae8547e661ac7c8d077bda200 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10539 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/Helpers.cpp')
-rw-r--r--tests/validation/Helpers.cpp10
1 files changed, 5 insertions, 5 deletions
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index cb4d87601c..560460fd33 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -426,7 +426,7 @@ QuantizationHint suggest_matmul_dst_q_info_and_bias(const QuantizationInfo &lhs_
}
QuantizationHint suggest_mac_dst_q_info_and_bias(
- const QuantizationInfo &a_q_info, const QuantizationInfo &b_q_info, int32_t K, DataType data_type, float bias_fraction)
+ const QuantizationInfo &a_q_info, const QuantizationInfo &b_q_info, int32_t K, DataType data_type, float bias_fraction, int num_sd)
{
QuantizationInfo c_q_info;
@@ -554,8 +554,8 @@ QuantizationHint suggest_mac_dst_q_info_and_bias(
const float var_d = std_d * std_d;
// Also calculate the suggested bias range
- const int32_t min_bias = mean_d_int - 2 * std_d_int;
- const int32_t max_bias = mean_d_int + 2 * std_d_int;
+ const int32_t min_bias = mean_d_int - (num_sd * std_d_int);
+ const int32_t max_bias = mean_d_int + (num_sd * std_d_int);
// Output/C stats
const float mean_out = K * mean_a * mean_b + mean_d;
@@ -563,8 +563,8 @@ QuantizationHint suggest_mac_dst_q_info_and_bias(
const float std_out = sqrt(var_out);
// Output quantization setup
- const float scale_out = 4 * std_out / 255;
- const int32_t offset_out = static_cast<int32_t>(t_min - (mean_out - 2.f * std_out) / scale_out);
+ const float scale_out = (2 * num_sd) * std_out / 255;
+ const int32_t offset_out = static_cast<int32_t>(t_min - (mean_out - (num_sd * std_out)) / scale_out);
c_q_info = QuantizationInfo(scale_out, offset_out);