From 7519d50c993d60faa1ea09e56abfbf17cef23b49 Mon Sep 17 00:00:00 2001 From: James Peet Date: Mon, 19 Jul 2021 16:47:58 +0100 Subject: MLBEDSW-4892: Fix crash affecting biases without quantization. Remove quant_values attribute from Tensor class. It only needs a single values attribute, holding either quantized or unquantized values as appropriate. Change-Id: Ie96f80ac58061b6077e0f7048dc60209fdfbcafa Signed-off-by: James Peet --- ethosu/vela/supported_operators.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'ethosu/vela/supported_operators.py') diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py index c993da13..663c78f8 100644 --- a/ethosu/vela/supported_operators.py +++ b/ethosu/vela/supported_operators.py @@ -532,7 +532,7 @@ class SupportedOperators: def constraint_weights_limit(cls, op): "The sum of the weights cannot exceed {}" weights = op.weights - values = weights.quant_values.astype(np.int64) - weights.quantization.zero_point + values = weights.values.astype(np.int64) - weights.quantization.zero_point limit = np.amax(np.sum(np.absolute(values), axis=(0, 1, 2))) valid = limit <= cls.weights_limit return valid, f"Tensor '{weights.name}' has the sum of weights: {limit}" @@ -551,8 +551,8 @@ class SupportedOperators: def constraint_bias_40bit(op): "Optional Bias tensor values must fit within 40-bits" bias = op.bias - if bias and bias.dtype == DataType.int64 and bias.quant_values is not None: - valid = all(len(bin(quant_value)[2:]) <= 40 for quant_value in bias.quant_values) + if bias and bias.dtype == DataType.int64 and bias.values is not None: + valid = all(len(bin(quant_value)[2:]) <= 40 for quant_value in bias.values) return valid, f"Tensor '{bias.name}' has values larger than 40-bits" return True, "Op has no bias tensor, or it fits in 40-bit" -- cgit v1.2.1