aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/weight_compressor.py
diff options
context:
space:
mode:
authorFredrik Svedberg <fredrik.svedberg@arm.com>2022-09-20 16:32:52 +0200
committerFredrik Svedberg <fredrik.svedberg@arm.com>2022-09-21 13:58:40 +0200
commitcc219be4ec175645e8457da80d5effbf4324943b (patch)
treee556c5ef568bef059b3367a92928ffbf9a1df8a6 /ethosu/vela/weight_compressor.py
parent36424312fcc7c279a929073160ca7191a926e77b (diff)
downloadethos-u-vela-cc219be4ec175645e8457da80d5effbf4324943b.tar.gz
MLBEDSW-4338 Randomized int16 PAD output diff
The issue was that the AveragePool in these test cases were translated to DepthwiseConv2DBias and int16 convolutions always runs with reduced scale. Fixed so that reduced scale is not used in this case. Signed-off-by: Fredrik Svedberg <fredrik.svedberg@arm.com> Change-Id: Ice956eabbb37c8aa1991464870006971c6ecec43
Diffstat (limited to 'ethosu/vela/weight_compressor.py')
-rw-r--r--ethosu/vela/weight_compressor.py3
1 files changed, 2 insertions, 1 deletions
diff --git a/ethosu/vela/weight_compressor.py b/ethosu/vela/weight_compressor.py
index 78c43511..db225fb6 100644
--- a/ethosu/vela/weight_compressor.py
+++ b/ethosu/vela/weight_compressor.py
@@ -275,7 +275,8 @@ def _prepare_scale_and_bias(arch, tens, rescale_for_faf, explicit_scaling):
quantised_scales = [(int(m), int(s)) for s, m in zip(explicit_scaling.shift, explicit_scaling.multiplier)]
else:
# quantise all of the weight scales into (scale_factor, shift)
- if ifm_dtype == DataType.int16:
+ if ifm_dtype == DataType.int16 and bias_tens.dtype == DataType.int64:
+ # Reference uses reduced scaling for int16 with int64 bias
quantised_scales = [reduced_quantise_scale(scale) for scale in scales]
else:
quantised_scales = [quantise_scale(scale) for scale in scales]