From a3fe665803c0f72000f9dda249446d5a0d03240f Mon Sep 17 00:00:00 2001 From: Tim Hall Date: Thu, 3 Mar 2022 17:43:16 +0000 Subject: vela: Minor refactor - Changed comments to docstring on QuantizationParams - Simplified op type to op name conversion Signed-off-by: Tim Hall Change-Id: I2fdf5922cc17944c9bd37917a85fdfe50a1e651d --- ethosu/vela/tensor.py | 10 ++++++---- ethosu/vela/tflite_mapping.py | 8 +++----- ethosu/vela/tosa_mapping.py | 8 +++----- ethosu/vela/vela.py | 4 ++-- 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/ethosu/vela/tensor.py b/ethosu/vela/tensor.py index 783f459..38b0e43 100644 --- a/ethosu/vela/tensor.py +++ b/ethosu/vela/tensor.py @@ -269,9 +269,10 @@ class QuantizationParameters: return np.subtract(values, self.zero_point) * self.scale_f32 def is_scaling_equal(self, other: Optional["QuantizationParameters"]) -> bool: - # quantisation parameter scaling is not equal if 'other' is None because - # it implies that the tensor it belongs to is not quantised. otherwise, - # it depends upon whether the scale and zero point are equal + """ + Returns True if the scale and zero point of self and other are equal. If other is None then the scaling is + not considered equal because the tensor is assumed to not be quantised and False will be returned + """ if not isinstance(other, QuantizationParameters): return False @@ -279,12 +280,13 @@ class QuantizationParameters: return self.scale_f32 == other.scale_f32 and self.zero_point == other.zero_point def is_valid(self) -> bool: - # quantisation parameters are consider valid if they have a scale and zero point + """Return True if the quantisation parameters have a scale and zero point""" return self.scale_f32 is not None and self.zero_point is not None def is_per_axis(self) -> bool: """Returns True if either the scale, zero point, minimum or maximum values have more than one value""" + for attr in ("scale_f32", "zero_point", "min", "max"): if np.size(getattr(self, attr)) > 1: return True diff --git a/ethosu/vela/tflite_mapping.py b/ethosu/vela/tflite_mapping.py index a5f7fa2..e12267f 100644 --- a/ethosu/vela/tflite_mapping.py +++ b/ethosu/vela/tflite_mapping.py @@ -950,13 +950,11 @@ builtin_operator_inv_map[Op.CustomNpuOp] = (BuiltinOperator.CUSTOM, CustomOption BUILTIN_OPERATOR_UNKNOWN = "UNKNOWN" +builtin_operator_name_map = {v: k for k, v in vars(BuiltinOperator).items()} -def builtin_type_name(builtin): - return next(k for k, v in vars(BuiltinOperator).items() if v == builtin) - -def optype_to_builtintype(op_type): +def optype_to_builtintype(op_type: Op): if op_type in builtin_operator_inv_map: - return builtin_type_name(builtin_operator_inv_map[op_type][0]) + return builtin_operator_name_map[builtin_operator_inv_map[op_type][0]] else: return BUILTIN_OPERATOR_UNKNOWN diff --git a/ethosu/vela/tosa_mapping.py b/ethosu/vela/tosa_mapping.py index 6710787..f635e4a 100644 --- a/ethosu/vela/tosa_mapping.py +++ b/ethosu/vela/tosa_mapping.py @@ -323,14 +323,12 @@ tosa_operator_map = { tosa_operator_inv_map = {v[0]: (k, v[1]) for k, v in tosa_operator_map.items()} - -def tosa_type_name(builtin): - return next(k for k, v in vars(TosaOp).items() if v == builtin) +tosa_operator_name_map = {v: k for k, v in vars(TosaOp).items()} # TODO will return UNKNOWN for the once that have not yet been defined in tosa_operator_map -def optype_to_tosa_op_type(op_type): +def optype_to_tosa_op_type(op_type: Op): if op_type in tosa_operator_inv_map: - return tosa_type_name(tosa_operator_inv_map[op_type][0]) + return tosa_operator_name_map[tosa_operator_inv_map[op_type][0]] else: return TosaOp.UNKNOWN diff --git a/ethosu/vela/vela.py b/ethosu/vela/vela.py index 63cccc5..efe8edb 100644 --- a/ethosu/vela/vela.py +++ b/ethosu/vela/vela.py @@ -42,7 +42,7 @@ from .tensor import MemArea from .tensor import Tensor from .tflite.Model import Model from .tflite_mapping import builtin_operator_map -from .tflite_mapping import builtin_type_name +from .tflite_mapping import builtin_operator_name_map from .tflite_model_semantic import TFLiteSemantic from .tflite_supported_operators import TFLiteSupportedOperators from .tosa_model_semantic import TosaSemantic @@ -230,7 +230,7 @@ def generate_supported_ops(): raise ValueError op_constraint_links = [] - op_list = sorted(((op, builtin_type_name(op)) for op in builtin_operator_map), key=lambda x: x[1]) + op_list = sorted(((op, builtin_operator_name_map[op]) for op in builtin_operator_map), key=lambda x: x[1]) for op, name in op_list: internal_op = builtin_operator_map[op][0] if internal_op in TFLiteSupportedOperators.supported_operators: -- cgit v1.2.1