diff options
author | Dwight Lidman <dwight.lidman@arm.com> | 2020-06-09 11:40:54 +0200 |
---|---|---|
committer | Tim Hall <tim.hall@arm.com> | 2020-07-14 15:46:10 +0100 |
commit | ebe26c7af49ad277df347dac7c4c05959d83f387 (patch) | |
tree | 08bd76b7509ab093f730e31d316b138a0fdcc753 /ethosu/vela | |
parent | 68a04b129c8c341d4c1a621c04cfe5306390300c (diff) | |
download | ethos-u-vela-ebe26c7af49ad277df347dac7c4c05959d83f387.tar.gz |
MLBEDSW-1538: Output diff for elementwise min/max
This commit adds a quantization restriction check
for supported operators, so that operators with
different quantization between its IFM (1/2) and
OFM tensors that do not support it, are correctly
placed on the CPU.
The quantization between two tensors is compared
using a new equality function implemented for
the QuantizationParameters class.
Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I70ff36b4ab4955f328d6e6e699f00dbc43c0404a
Diffstat (limited to 'ethosu/vela')
-rw-r--r-- | ethosu/vela/supported_operators.py | 12 | ||||
-rw-r--r-- | ethosu/vela/tensor.py | 13 |
2 files changed, 25 insertions, 0 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py index 3bf46a9d..e8e8d852 100644 --- a/ethosu/vela/supported_operators.py +++ b/ethosu/vela/supported_operators.py @@ -100,6 +100,9 @@ class SupportedOperators: self.supported_operator_restrictions.update( {op: self.check_memory_only_restrictions for op in self.memory_only_ops} ) + self.supported_operator_restrictions.update( + {op: self.check_quantization_restrictions for op in self.binary_elem_wise_min_max_ops} + ) def is_operator_supported(self, op): if op.type not in self.supported_operators: @@ -301,3 +304,12 @@ class SupportedOperators: if op.attrs["new_axis_mask"] != 0 and op.attrs["shrink_axis_mask"] != 0: return False return True + + def check_quantization_restrictions(self, op): + # makes sure IFM1, IFM2 and OFM quantization are equal for binary ops + if (len(op.inputs) == 2 + and not op.inputs[0].quantization == op.inputs[1].quantization == op.outputs[0].quantization): + print("Warning: Input/output tensors with different quantization is unsupported for the", op.type, + "operator") + return False + return True
\ No newline at end of file diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py index 5e97cfe8..42ba853d 100644 --- a/ethosu/vela/tensor.py +++ b/ethosu/vela/tensor.py @@ -181,6 +181,19 @@ class QuantizationParameters: __repr__ = __str__ + def __eq__(self, other): + if other is None: + return False + if not isinstance(other, QuantizationParameters): + return False + + pairs = ((getattr(self, s), getattr(other, s)) for s in QuantizationParameters.__slots__) + + return all(np.array_equal(a, b) for a, b in pairs) + + def __ne__(self, other): + return not self == other + def clone(self): res = QuantizationParameters() res.min = self.min |