From c7187434c11151a6a03f252c8718f3bf6445ef5b Mon Sep 17 00:00:00 2001 From: Dwight Lidman Date: Mon, 16 Nov 2020 17:40:46 +0100 Subject: MLBEDSW-3302: Reject per-channel scaling for unsupported ops Vela only supports per-channel scaling for convolution ops. This commit adds a check that puts ops with per-channel scaling on the CPU. A caveat worth mentioning is that neither TensorFlow Lite or TensorFlow Lite Micro support per-channel scaling for the CPU placed op, however the problem is moved away from Vela. This commit also changes a small utility function in supported_operators.py used for docstring formatting. Signed-off-by: Dwight Lidman Change-Id: I9ed090592f1d05dd4566d3e54dba1ef405299383 --- ethosu/vela/test/test_supported_operators.py | 22 ++++++++++++++++++++++ ethosu/vela/test/testutil.py | 10 +++++++++- 2 files changed, 31 insertions(+), 1 deletion(-) (limited to 'ethosu/vela/test') diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py index 62de0d1d..86d24757 100644 --- a/ethosu/vela/test/test_supported_operators.py +++ b/ethosu/vela/test/test_supported_operators.py @@ -100,6 +100,28 @@ def test_constraint_tens_quant_scale(): assert not support.is_operator_supported(op) +def test_constraint_tens_quant_per_axis_not_supp(): + # Quantization scale cannot be array-valued for elemwise ops + qp = QuantizationParameters() + qp.zero_point = np.zeros((1, 3)) + qp.scale_f32 = np.ones((1, 3)) + op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp) + assert not support.is_operator_supported(op) + + +def test_constraint_tens_quant_per_axis_is_supp(): + op = testutil.create_op_with_quant_tensors( + Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3] + ) + op.attrs = {"stride_w": 1, "stride_h": 1} + assert support.is_operator_supported(op) + qp = QuantizationParameters() + qp.zero_point = np.zeros((1, 3)) + qp.scale_f32 = np.ones((1, 3)) + op.bias.quantization = qp + assert support.is_operator_supported(op) + + def test_constraint_faf(): # Fused activation functions, if set, must be a valid op type op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8]) diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py index b06008af..82588278 100644 --- a/ethosu/vela/test/testutil.py +++ b/ethosu/vela/test/testutil.py @@ -80,7 +80,9 @@ def create_elemwise_op( return op -def create_op_with_quant_tensors(op_type, ifm_shape, ofm_shape, weights_shape=None, datatype=DataType.uint8): +def create_op_with_quant_tensors( + op_type, ifm_shape, ofm_shape, weights_shape=None, bias_shape=None, datatype=DataType.uint8 +): ifm = Tensor(ifm_shape, datatype, "in") ifm.quantization = default_quant_params() ofm = Tensor(ofm_shape, datatype, "out") @@ -102,6 +104,12 @@ def create_op_with_quant_tensors(op_type, ifm_shape, ofm_shape, weights_shape=No "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp ) op.add_input_tensor(weights) + # Optional bias tensor + if bias_shape is not None: + qp = default_quant_params() + qp.zero_point = np.zeros(bias_shape) + bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp) + op.add_input_tensor(bias) return op -- cgit v1.2.1