From 184b2502fd359ff106acf4230d5f77beac0d22ab Mon Sep 17 00:00:00 2001 From: Michael McGeagh Date: Fri, 9 Oct 2020 17:19:52 +0100 Subject: vela: Improve extra info in constraint checks Keeping the constraint functions consistent with each other Added specific tensor names in the extra info Added operator name to the warning generated This should help easily identify specific problematic nodes in a graph and give a good enough explanation as to why they are placed on the CPU Signed-off-by: Michael McGeagh Change-Id: Ie5bbdd31e5e75fe37e3d8bb8fee1d260080bce83 --- ethosu/vela/test/test_supported_operators.py | 38 ++++++++++++++++++---------- 1 file changed, 25 insertions(+), 13 deletions(-) (limited to 'ethosu/vela/test/test_supported_operators.py') diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py index 1fb452cf..6e640b51 100644 --- a/ethosu/vela/test/test_supported_operators.py +++ b/ethosu/vela/test/test_supported_operators.py @@ -101,13 +101,16 @@ def test_constraint_tens_defined_shape(): assert not support.is_operator_supported(op) -def test_constraint_tens_shapeless(): - # Shapeless input is allowed if its of a certain type: - op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8]) - assert support.is_operator_supported(op) +def test_constraint_tens_output_shapeless(): # Shapeless output is not allowed at all: op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [1, 8, 8, 8], []) assert not support.is_operator_supported(op) + + +def test_constraint_tens_input_shapeless(): + # Shapeless input is allowed if its of a certain type: + op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8]) + assert support.is_operator_supported(op) # Invalid shapeless input due to op type: inp = Tensor([], DataType.uint8, "in") out = Tensor([1, 8, 8, 8], DataType.uint8, "out") @@ -124,11 +127,14 @@ def test_constraint_tens_shape_size(): def test_constraint_tens_dtype(): - # Tensors can only be of type uint8, int8, int16 (and int32) + # Tensors can only be of type uint8, int8, int16 and int32 inp = Tensor([1, 8, 8, 8], DataType.float32, "in") out = Tensor([1, 8, 8, 8], DataType.float32, "out") op = testutil.create_op(Op.Relu, [inp], out) assert not support.is_operator_supported(op) + + +def test_constraint_tens_int32_ops(): # For int32, only select op types are allowed: op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8], DataType.int32) assert support.is_operator_supported(op) @@ -150,6 +156,20 @@ def test_constraint_tens_dimension(): assert not support.is_operator_supported(op) +def test_constraint_tens_quant_none_check(): + # Tensors must have quantization parameters + op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None) + assert not support.is_operator_supported(op) + + +def test_constraint_tens_quant_scale(): + # Quantization scale cannot be infinit + qp = QuantizationParameters() + qp.scale_f32 = np.inf + op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp) + assert not support.is_operator_supported(op) + + def test_constraint_faf(): # Fused activation functions, if set, must be a valid op type inp = Tensor([1, 8, 8, 8], DataType.uint8, "in") @@ -157,11 +177,3 @@ def test_constraint_faf(): op = testutil.create_op(Op.Relu, [inp], out) op.activation = Op.Conv2D assert not support.is_operator_supported(op) - - -def test_constraint_tens_quant_scale(): - # Quantization scale cannot be infinit - op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8]) - op.inputs[0].quantization = QuantizationParameters() - op.inputs[0].quantization.scale_f32 = np.inf - assert not support.is_operator_supported(op) -- cgit v1.2.1