diff options
author | Dwight Lidman <dwight.lidman@arm.com> | 2020-11-16 17:40:46 +0100 |
---|---|---|
committer | patrik.gustavsson <patrik.gustavsson@arm.com> | 2020-11-20 09:51:15 +0000 |
commit | c7187434c11151a6a03f252c8718f3bf6445ef5b (patch) | |
tree | c17655e6a888f567aa5dacc38eff54b5a348c00b /ethosu/vela/supported_operators.py | |
parent | 8956761a84f413e6f4c9c7d6e4409b145f81c289 (diff) | |
download | ethos-u-vela-c7187434c11151a6a03f252c8718f3bf6445ef5b.tar.gz |
MLBEDSW-3302: Reject per-channel scaling for unsupported ops
Vela only supports per-channel scaling for
convolution ops. This commit adds a check that
puts ops with per-channel scaling on the CPU.
A caveat worth mentioning is that neither
TensorFlow Lite or TensorFlow Lite Micro support
per-channel scaling for the CPU placed op,
however the problem is moved away from Vela.
This commit also changes a small utility function
in supported_operators.py used for docstring
formatting.
Signed-off-by: Dwight Lidman <dwight.lidman@arm.com>
Change-Id: I9ed090592f1d05dd4566d3e54dba1ef405299383
Diffstat (limited to 'ethosu/vela/supported_operators.py')
-rw-r--r-- | ethosu/vela/supported_operators.py | 21 |
1 files changed, 19 insertions, 2 deletions
diff --git a/ethosu/vela/supported_operators.py b/ethosu/vela/supported_operators.py index 91fcb5ad..6dcb27d0 100644 --- a/ethosu/vela/supported_operators.py +++ b/ethosu/vela/supported_operators.py @@ -43,8 +43,8 @@ def _optype_formatter(op_list): output = map(optype_to_builtintype, op_list) # Remove UNKNOWNs output = (x for x in output if x is not BUILTIN_OPERATOR_UNKNOWN) - # Order alphabetically - return sorted(output) + # Order alphabetically and join into a string representation + return ", ".join(str(op) for op in sorted(output)) class SupportedOperators: @@ -94,6 +94,7 @@ class SupportedOperators: concat_ops = set((Op.Concat, Op.ConcatTFLite, Op.PackReshaped, Op.Pack,)) memory_only_ops = set((Op.Squeeze, Op.Reshape, Op.QuantizedReshape,)) | concat_ops | split_ops shapeless_input_ops = binary_elem_wise_main_ops | set((Op.Split, Op.SplitV,)) + per_axis_quant_ops = convolution_like_ops # per-axis/channel quantization only currently supported for conv ops supported_fused_activations = relu_ops | set((Op.Tanh, Op.Sigmoid, Op.LUT,)) supported_operators = npu_pre_ops | mac_main_ops | elem_wise_main_ops | npu_post_ops | memory_only_ops # Supported data types @@ -113,6 +114,7 @@ class SupportedOperators: docstring_shapeless_input_ops = _optype_formatter(shapeless_input_ops) docstring_supported_int32_tensor_ops = _optype_formatter(supported_int32_tensor_ops) docstring_supported_fused_activations = _optype_formatter(supported_fused_activations) + docstring_per_axis_quant_ops = _optype_formatter(per_axis_quant_ops) def __init__(self): # Setup the generic constraints. Note: the order matters @@ -127,6 +129,7 @@ class SupportedOperators: self.generic_constraints.append(SupportedOperators.constraint_tens_dimension) self.generic_constraints.append(SupportedOperators.constraint_tens_quant_none_check) self.generic_constraints.append(SupportedOperators.constraint_tens_quant_scale) + self.generic_constraints.append(SupportedOperators.constraint_tens_quant_per_axis) self.generic_constraints.append(SupportedOperators.constraint_faf) # Setup specific constraints. Note: the order matters @@ -391,6 +394,20 @@ class SupportedOperators: return valid, ", ".join(extra) @classmethod + @docstring_format_args([docstring_per_axis_quant_ops]) + def constraint_tens_quant_per_axis(cls, op): + "Per-axis quantization is only supported for the following op types: {}" + valid = True + extra = [] + if op.type not in cls.per_axis_quant_ops: + tensors = [tens for tens in op.get_ifm_ifm2_weights_ofm() if tens] + for tens in tensors: + if tens.quantization.is_per_axis(): + valid = False + extra.append(tens.name) + return valid, "The following tensor(s) have per-axis quantization parameters: " + ", ".join(extra) + + @classmethod @docstring_format_args([docstring_supported_fused_activations]) def constraint_faf(cls, op): "The fused activation function (if present) must be one of type: {}" |