aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/test
diff options
context:
space:
mode:
authorMichael McGeagh <michael.mcgeagh@arm.com>2020-10-09 17:19:52 +0100
committerpatrik.gustavsson <patrik.gustavsson@arm.com>2020-10-13 09:35:59 +0000
commit184b2502fd359ff106acf4230d5f77beac0d22ab (patch)
tree05d09cdc84c2da6e8a5e9b33e15e3deda1a61219 /ethosu/vela/test
parent5554bbe54fe7c8bc32f6024c1b2a417e3c5845fa (diff)
downloadethos-u-vela-184b2502fd359ff106acf4230d5f77beac0d22ab.tar.gz
vela: Improve extra info in constraint checks
Keeping the constraint functions consistent with each other Added specific tensor names in the extra info Added operator name to the warning generated This should help easily identify specific problematic nodes in a graph and give a good enough explanation as to why they are placed on the CPU Signed-off-by: Michael McGeagh <michael.mcgeagh@arm.com> Change-Id: Ie5bbdd31e5e75fe37e3d8bb8fee1d260080bce83
Diffstat (limited to 'ethosu/vela/test')
-rw-r--r--ethosu/vela/test/test_supported_operators.py38
1 files changed, 25 insertions, 13 deletions
diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py
index 1fb452cf..6e640b51 100644
--- a/ethosu/vela/test/test_supported_operators.py
+++ b/ethosu/vela/test/test_supported_operators.py
@@ -101,13 +101,16 @@ def test_constraint_tens_defined_shape():
assert not support.is_operator_supported(op)
-def test_constraint_tens_shapeless():
- # Shapeless input is allowed if its of a certain type:
- op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8])
- assert support.is_operator_supported(op)
+def test_constraint_tens_output_shapeless():
# Shapeless output is not allowed at all:
op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [1, 8, 8, 8], [])
assert not support.is_operator_supported(op)
+
+
+def test_constraint_tens_input_shapeless():
+ # Shapeless input is allowed if its of a certain type:
+ op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8])
+ assert support.is_operator_supported(op)
# Invalid shapeless input due to op type:
inp = Tensor([], DataType.uint8, "in")
out = Tensor([1, 8, 8, 8], DataType.uint8, "out")
@@ -124,11 +127,14 @@ def test_constraint_tens_shape_size():
def test_constraint_tens_dtype():
- # Tensors can only be of type uint8, int8, int16 (and int32)
+ # Tensors can only be of type uint8, int8, int16 and int32
inp = Tensor([1, 8, 8, 8], DataType.float32, "in")
out = Tensor([1, 8, 8, 8], DataType.float32, "out")
op = testutil.create_op(Op.Relu, [inp], out)
assert not support.is_operator_supported(op)
+
+
+def test_constraint_tens_int32_ops():
# For int32, only select op types are allowed:
op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8], DataType.int32)
assert support.is_operator_supported(op)
@@ -150,6 +156,20 @@ def test_constraint_tens_dimension():
assert not support.is_operator_supported(op)
+def test_constraint_tens_quant_none_check():
+ # Tensors must have quantization parameters
+ op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm2_quant=None)
+ assert not support.is_operator_supported(op)
+
+
+def test_constraint_tens_quant_scale():
+ # Quantization scale cannot be infinit
+ qp = QuantizationParameters()
+ qp.scale_f32 = np.inf
+ op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
+ assert not support.is_operator_supported(op)
+
+
def test_constraint_faf():
# Fused activation functions, if set, must be a valid op type
inp = Tensor([1, 8, 8, 8], DataType.uint8, "in")
@@ -157,11 +177,3 @@ def test_constraint_faf():
op = testutil.create_op(Op.Relu, [inp], out)
op.activation = Op.Conv2D
assert not support.is_operator_supported(op)
-
-
-def test_constraint_tens_quant_scale():
- # Quantization scale cannot be infinit
- op = testutil.create_elemwise_op(Op.Mul, "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8])
- op.inputs[0].quantization = QuantizationParameters()
- op.inputs[0].quantization.scale_f32 = np.inf
- assert not support.is_operator_supported(op)