aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/test
diff options
context:
space:
mode:
Diffstat (limited to 'ethosu/vela/test')
-rw-r--r--ethosu/vela/test/test_supported_operators.py22
-rw-r--r--ethosu/vela/test/testutil.py10
2 files changed, 31 insertions, 1 deletions
diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py
index 62de0d1d..86d24757 100644
--- a/ethosu/vela/test/test_supported_operators.py
+++ b/ethosu/vela/test/test_supported_operators.py
@@ -100,6 +100,28 @@ def test_constraint_tens_quant_scale():
assert not support.is_operator_supported(op)
+def test_constraint_tens_quant_per_axis_not_supp():
+ # Quantization scale cannot be array-valued for elemwise ops
+ qp = QuantizationParameters()
+ qp.zero_point = np.zeros((1, 3))
+ qp.scale_f32 = np.ones((1, 3))
+ op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp)
+ assert not support.is_operator_supported(op)
+
+
+def test_constraint_tens_quant_per_axis_is_supp():
+ op = testutil.create_op_with_quant_tensors(
+ Op.Conv2DBias, [1, 1, 1, 3], [1, 1, 1, 3], weights_shape=[1, 1, 1, 3], bias_shape=[1, 1, 1, 3]
+ )
+ op.attrs = {"stride_w": 1, "stride_h": 1}
+ assert support.is_operator_supported(op)
+ qp = QuantizationParameters()
+ qp.zero_point = np.zeros((1, 3))
+ qp.scale_f32 = np.ones((1, 3))
+ op.bias.quantization = qp
+ assert support.is_operator_supported(op)
+
+
def test_constraint_faf():
# Fused activation functions, if set, must be a valid op type
op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8])
diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py
index b06008af..82588278 100644
--- a/ethosu/vela/test/testutil.py
+++ b/ethosu/vela/test/testutil.py
@@ -80,7 +80,9 @@ def create_elemwise_op(
return op
-def create_op_with_quant_tensors(op_type, ifm_shape, ofm_shape, weights_shape=None, datatype=DataType.uint8):
+def create_op_with_quant_tensors(
+ op_type, ifm_shape, ofm_shape, weights_shape=None, bias_shape=None, datatype=DataType.uint8
+):
ifm = Tensor(ifm_shape, datatype, "in")
ifm.quantization = default_quant_params()
ofm = Tensor(ofm_shape, datatype, "out")
@@ -102,6 +104,12 @@ def create_op_with_quant_tensors(op_type, ifm_shape, ofm_shape, weights_shape=No
"weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp
)
op.add_input_tensor(weights)
+ # Optional bias tensor
+ if bias_shape is not None:
+ qp = default_quant_params()
+ qp.zero_point = np.zeros(bias_shape)
+ bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp)
+ op.add_input_tensor(bias)
return op