From 0afe61f88ce3d2f445c5f01ae5567cb1b0b7f303 Mon Sep 17 00:00:00 2001 From: Eric Kunze Date: Wed, 14 Feb 2024 16:33:31 -0800 Subject: Modify convolution operators to improve bias handling Accumulator size moves to an enumerated attribute, out_t for floating-point changes to be the size of the input. Bias for floating-point also becomes the bit width of the input type. Signed-off-by: Eric Kunze Change-Id: I7369417adbb1106ce34a1978e7f511a30272c318 --- chapters/introduction.adoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'chapters') diff --git a/chapters/introduction.adoc b/chapters/introduction.adoc index c34bf7b..c6764d3 100644 --- a/chapters/introduction.adoc +++ b/chapters/introduction.adoc @@ -397,7 +397,7 @@ if (!local_bound) { output_bnd = operation_fp64(input_abs, weight_abs, bias_abs); size_t T = tensor_size(output_shape) // number dot product results -size_t ksb = (max_value(bias_abs) > 0) ? (KS + 1) : KS; // kernel size and bias +size ksb = ceil(KS / exp2(normal_frac() - normal_frac())) + ((max_value(bias_abs) > 0) ? 1 : 0); fp64_t out_err_sum = 0.0; fp64_t out_err_sumsq = 0.0; for_each(index in output_shape) { @@ -412,7 +412,7 @@ for_each(index in output_shape) { REQUIRE(out_ref == 0.0 && out_imp == 0.0); out_err = 0.0; } else { // 0.0 < out_bnd < infinity - fp64_t out_err_bnd = max(out_bnd * exp2(-1-normal_frac()), normal_min()); + fp64_t out_err_bnd = max(out_bnd * exp2(-1-normal_frac()), normal_min()); out_err = (static_cast(out_imp) - out_ref) / out_err_bnd; REQUIRE(abs(out_err) <= ksb); } -- cgit v1.2.1