diff options
Diffstat (limited to 'ethosu/vela/test')
-rw-r--r-- | ethosu/vela/test/test_graph_optimiser.py | 5 | ||||
-rw-r--r-- | ethosu/vela/test/test_supported_operators.py | 2 | ||||
-rw-r--r-- | ethosu/vela/test/testutil.py | 6 |
3 files changed, 8 insertions, 5 deletions
diff --git a/ethosu/vela/test/test_graph_optimiser.py b/ethosu/vela/test/test_graph_optimiser.py index 45377417..7fdc4bd8 100644 --- a/ethosu/vela/test/test_graph_optimiser.py +++ b/ethosu/vela/test/test_graph_optimiser.py @@ -21,6 +21,7 @@ import numpy as np from ethosu.vela.graph_optimiser import convert_batched_fc_shape from ethosu.vela.operation import Op from ethosu.vela.tensor import create_const_tensor +from ethosu.vela.tensor import Shape4D from ethosu.vela.tensor import Tensor from ethosu.vela.test import testutil @@ -35,8 +36,8 @@ def test_convert_batched_fc(): ifm.consumer_list.append(op) - op.ifm_shapes.append([4, 1, 1, 8]) - op.ofm_shapes.append([4, 1, 1, 8]) + op.ifm_shapes.append(Shape4D([4, 1, 1, 8])) + op.ofm_shapes.append(Shape4D([4, 1, 1, 8])) prev_op = op.clone() prev_op.ifm_shapes = op.ifm_shapes diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py index 583821a2..973b820d 100644 --- a/ethosu/vela/test/test_supported_operators.py +++ b/ethosu/vela/test/test_supported_operators.py @@ -62,7 +62,7 @@ def test_constraint_tens_input_scalar(): def test_constraint_tens_shape_size(): # Tensors cannot be > 4D - op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8]) + op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 1, 8, 8, 8], [1, 1, 8, 8, 8], set_ifm_ofm_shapes=False) assert not support.is_operator_supported(op) diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py index 63f841b4..c3459501 100644 --- a/ethosu/vela/test/testutil.py +++ b/ethosu/vela/test/testutil.py @@ -75,7 +75,7 @@ def create_elemwise_op( def create_op_with_quant_tensors( - op_type, ifm_shape, ofm_shape, weights_shape=None, bias_shape=None, datatype=DataType.uint8 + op_type, ifm_shape, ofm_shape, weights_shape=None, bias_shape=None, datatype=DataType.uint8, set_ifm_ofm_shapes=True ): ifm = Tensor(ifm_shape, datatype, "in") ifm.quantization = default_quant_params() @@ -107,7 +107,9 @@ def create_op_with_quant_tensors( bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp) op.add_input_tensor(bias) - op.set_ifm_ofm_shapes() + if set_ifm_ofm_shapes: + op.set_ifm_ofm_shapes() + return op |