diff options
author | Tim Hall <tim.hall@arm.com> | 2020-07-28 17:40:50 +0100 |
---|---|---|
committer | tim.hall <tim.hall@arm.com> | 2020-08-21 15:34:27 +0000 |
commit | e3786ac6bee55c41b69c0cfb94344f3372f8c991 (patch) | |
tree | 9c313936fd886a0996e5df9a5f25011b6f5f3936 /ethosu | |
parent | 67e0d8f24fcb86115e834acd19dc57027b03ea4f (diff) | |
download | ethos-u-vela-e3786ac6bee55c41b69c0cfb94344f3372f8c991.tar.gz |
MLBEDSW-2679: Tensor quant comparison is incorrect
- Fixed bug with the supported operator check rejecting operators based
upon an incorrect comparison of the tensor quantisations
Signed-off-by: Tim Hall <tim.hall@arm.com>
Change-Id: Ibd0eb50077465d2c515c6ee10394d9b43cdf730c
Diffstat (limited to 'ethosu')
-rw-r--r-- | ethosu/vela/supported_operators.py | 12 | ||||
-rw-r--r-- | ethosu/vela/tensor.py | 19 |
2 files changed, 14 insertions, 17 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py index e6aaca31..65588bf4 100644 --- a/ethosu/vela/supported_operators.py +++ b/ethosu/vela/supported_operators.py @@ -115,7 +115,7 @@ class SupportedOperators: {op: self.check_memory_only_restrictions for op in self.memory_only_ops} ) self.supported_operator_restrictions.update( - {op: self.check_quantization_restrictions for op in self.binary_elem_wise_min_max_ops} + {op: self.check_quantization_restrictions_binary_elem_wise for op in self.binary_elem_wise_min_max_ops} ) self.supported_operator_restrictions.update({op: self.check_activation_ops for op in self.activation_ops}) @@ -364,16 +364,20 @@ class SupportedOperators: return False return True - def check_quantization_restrictions(self, op): + def check_quantization_restrictions_binary_elem_wise(self, op): # makes sure IFM1, IFM2 and OFM quantization are equal for binary ops + assert len(op.inputs) >= 2 and len(op.outputs) == 1 + if ( - len(op.inputs) == 2 - and not op.inputs[0].quantization == op.inputs[1].quantization == op.outputs[0].quantization + op.inputs[0].quantization is None + or not op.inputs[0].quantization.is_scaling_equal(op.inputs[1].quantization) + or not op.inputs[0].quantization.is_scaling_equal(op.outputs[0].quantization) ): print( "Warning: Input/output tensors with different quantization is unsupported for the", op.type, "operator" ) return False + return True def check_activation_ops(self, op): diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py index 66bed59d..5fdea979 100644 --- a/ethosu/vela/tensor.py +++ b/ethosu/vela/tensor.py @@ -184,19 +184,6 @@ class QuantizationParameters: __repr__ = __str__ - def __eq__(self, other): - if other is None: - return False - if not isinstance(other, QuantizationParameters): - return False - - pairs = ((getattr(self, s), getattr(other, s)) for s in QuantizationParameters.__slots__) - - return all(np.array_equal(a, b) for a, b in pairs) - - def __ne__(self, other): - return not self == other - def clone(self): res = QuantizationParameters() res.min = self.min @@ -232,6 +219,12 @@ class QuantizationParameters: return res + def is_scaling_equal(self, other): + if other is None or not isinstance(other, QuantizationParameters): + return False + + return self.scale_f32 == other.scale_f32 and self.zero_point == other.zero_point + def create_const_tensor(name, shape, dtype, values, value_dtype=None, purpose=TensorPurpose.Unknown, quantization=None): # Tensor |