diff options
author | William Isaksson <william.isaksson@arm.com> | 2024-01-10 12:28:04 +0100 |
---|---|---|
committer | Rickard Bolin <rickard.bolin@arm.com> | 2024-01-18 13:24:26 +0000 |
commit | 56e5f0c22ebc995dae13c6b72b08b28934a7871a (patch) | |
tree | c0e7a25770d6d3dc8f15782a0e4529aff081ef3c /ethosu/vela/tflite_supported_operators.py | |
parent | 84fe2f60d5c6a25fa73d081cc90ee858ebca821d (diff) | |
download | ethos-u-vela-56e5f0c22ebc995dae13c6b72b08b28934a7871a.tar.gz |
CONV ops int16 tests failed after TensorFlow update
Adds support for setting the accumulator type using the quantized_bias_type attribute
Change-Id: Ibde1149143b510a1c650a5a037d3ab92d878d7cd
Signed-off-by: William Isaksson <william.isaksson@arm.com>
Diffstat (limited to 'ethosu/vela/tflite_supported_operators.py')
-rw-r--r-- | ethosu/vela/tflite_supported_operators.py | 11 |
1 files changed, 1 insertions, 10 deletions
diff --git a/ethosu/vela/tflite_supported_operators.py b/ethosu/vela/tflite_supported_operators.py index ada2136a..48813feb 100644 --- a/ethosu/vela/tflite_supported_operators.py +++ b/ethosu/vela/tflite_supported_operators.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates <open-source-office@arm.com> +# SPDX-FileCopyrightText: Copyright 2020-2024 Arm Limited and/or its affiliates <open-source-office@arm.com> # # SPDX-License-Identifier: Apache-2.0 # @@ -58,7 +58,6 @@ class TFLiteSupportedOperators: depthwise_convolution_ops = set((Op.DepthwiseConv2DBias,)) transpose_convolution_ops = set((Op.Conv2DBackpropInput,)) convolution_like_ops = convolution_ops | depthwise_convolution_ops | transpose_convolution_ops - conv_depth_fc_op = convolution_ops | depthwise_convolution_ops | set((Op.FullyConnected,)) max_pooling_ops = Op.op_set(Op.is_maxpool_op) avg_pooling_ops = Op.op_set(Op.is_avgpool_op) pooling_ops = set((Op.ReduceSum,)) | max_pooling_ops | avg_pooling_ops @@ -239,8 +238,6 @@ class TFLiteSupportedOperators: self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_bias_shape) self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_bias_type) self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_bias_40bit) - for op_type in TFLiteSupportedOperators.conv_depth_fc_op: - self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_no_quantized_bias_type) # Transpose Conv specific checks: for op_type in TFLiteSupportedOperators.transpose_convolution_ops: self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_tconv_stride) @@ -534,12 +531,6 @@ class TFLiteSupportedOperators: return valid, f"Tensor '{bias.name}' has values larger than 40-bits" return True, "Op has no bias tensor, or it fits in 40-bit" - def constraint_no_quantized_bias_type(op): - "Attribute quantized_bias_type must not be set" - quantized_bias_type = op.attrs.get("quantized_bias_type", False) - valid = quantized_bias_type == 0 - return valid, f"Op has quantized_bias_type={quantized_bias_type}" - @staticmethod def constraint_batch_size(op): "IFM Tensor batch size must be 1" |