From 65fd99830a762b2c59aaa446b55cbfa43a92f8ba Mon Sep 17 00:00:00 2001 From: Michael McGeagh Date: Tue, 20 Oct 2020 11:49:28 +0100 Subject: MLBEDSW-2412 All constraints have been refactored All existing constraints have now been refactored using the new framework. Signed-off-by: Michael McGeagh Change-Id: Ic9ba0d7040cb9f114b959a949bfdf777f86752c7 --- ethosu/vela/test/test_supported_operators.py | 528 +++++++++++++++++++++++---- ethosu/vela/test/testutil.py | 32 +- 2 files changed, 481 insertions(+), 79 deletions(-) (limited to 'ethosu/vela/test') diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py index 665ebc2c..595ea590 100644 --- a/ethosu/vela/test/test_supported_operators.py +++ b/ethosu/vela/test/test_supported_operators.py @@ -29,67 +29,9 @@ from ethosu.vela.test import testutil support = SupportedOperators() -def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets): - qp = QuantizationParameters() - in0 = Tensor(in_shape, DataType.uint8, "in") - in0.quantization = qp - in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp) - in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp) - in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp) - out = Tensor(out_shape, DataType.uint8, "out") - out.quantization = qp - attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0} - return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs) - - -def create_strided_slice(): - # Creates a valid strided slice operator with some valid inputs/outputs - op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0]) - op.attrs["begin_mask"] = 1 - op.attrs["end_mask"] = 9 - assert support.is_operator_supported(op) - return op - - -def test_strided_slice(): - # Tests support for StridedSlice operator - op = create_strided_slice() - # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok - op.attrs["new_axis_mask"] = 2 - assert support.is_operator_supported(op) - op = create_strided_slice() - op.attrs["shrink_axis_mask"] = 3 - assert support.is_operator_supported(op) - # But setting both to non-zero is not supported - op.attrs["new_axis_mask"] = 2 - assert not support.is_operator_supported(op) - # begin values must not be None - op.inputs[1].values = None - assert not support.is_operator_supported(op) - # Unsupported strides - op = create_strided_slice() - op.inputs[3].values = [1, 1, 2, 1] - assert not support.is_operator_supported(op) - # Wrong number of input tensors - op = create_strided_slice() - op.add_input_tensor(op.inputs[0].clone()) - assert not support.is_operator_supported(op) - # Unsupported ellipsis mask - op = create_strided_slice() - op.attrs["ellipsis_mask"] = 1 - assert not support.is_operator_supported(op) - # Examples where end offset <= begin offset - op = create_strided_slice() - op.inputs[1].values = [0, 7, 2, 0] - assert not support.is_operator_supported(op) - op = create_strided_slice() - op.inputs[2].values = [0, 7, 2, 0] - assert not support.is_operator_supported(op) - op = create_strided_slice() - op.attrs["begin_mask"] = 0 - assert not support.is_operator_supported(op) - op = create_strided_slice() - op.attrs["end_mask"] = 0 +def test_constraint_tens_no_dynamic(): + # Tensors cannot be dynamic (no shape, not a scalar) + op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], []) assert not support.is_operator_supported(op) @@ -99,18 +41,20 @@ def test_constraint_tens_defined_shape(): assert not support.is_operator_supported(op) -def test_constraint_tens_output_shapeless(): - # Shapeless output is not allowed at all: +def test_constraint_tens_output_scalar(): + # Scalar output is not allowed at all: op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], []) + op.ofm.values = 0.5 assert not support.is_operator_supported(op) -def test_constraint_tens_input_shapeless(): +def test_constraint_tens_input_scalar(): # Shapeless input is allowed if its of a certain type: op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8]) assert support.is_operator_supported(op) # Invalid shapeless input due to op type: op = testutil.create_op_with_quant_tensors(Op.Relu, [], [1, 8, 8, 8]) + op.ifm.values = 0.5 assert not support.is_operator_supported(op) @@ -149,6 +93,7 @@ def test_constraint_tens_quant_none_check(): def test_constraint_tens_quant_scale(): # Quantization scale cannot be infinit qp = QuantizationParameters() + qp.zero_point = 0 qp.scale_f32 = np.inf op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [], [1, 8, 8, 8], ifm_quant=qp) assert not support.is_operator_supported(op) @@ -219,12 +164,12 @@ def test_constraint_weights_type(): assert not support.is_operator_supported(op) -def test_constraint_weights_nonconst(): +def test_constraint_weights_const(): # Weight tensor cannot be non-const tensors op = testutil.create_op_with_quant_tensors(Op.Conv2D, [1, 8, 8, 8], [1, 8, 8, 8]) op.attrs = {"stride_w": 1, "stride_h": 1} weights = Tensor([64, 64, 1, 1], DataType.uint8, "weights") - weights.quantization = QuantizationParameters() + weights.quantization = testutil.default_quant_params() op.add_input_tensor(weights) assert not support.is_operator_supported(op) @@ -251,7 +196,7 @@ def test_constraint_bias_40bit(): op = testutil.create_op_with_quant_tensors(Op.Conv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1]) op.attrs = {"stride_w": 1, "stride_h": 1} bias = Tensor([1, 1, 1, 1], DataType.int64, "bias") - bias.quant_values = np.array([0x1FF_FFFF_FFFF]) + bias.quant_values = np.array([0x01FF_FFFF_FFFF]) op.add_input_tensor(bias) assert not support.is_operator_supported(op) @@ -260,3 +205,452 @@ def test_constraint_batch_size(): op = testutil.create_op_with_quant_tensors(Op.Conv2D, [2, 8, 8, 8], [1, 8, 8, 8], weights_shape=[1, 1, 1, 1]) op.attrs = {"stride_w": 1, "stride_h": 1} assert not support.is_operator_supported(op) + + +def test_constraint_quant_scale_inf(): + op = testutil.create_op_with_quant_tensors(Op.Relu, [1, 8, 8, 8], [1, 8, 8, 8]) + op.ofm.quantization.scale_f32 = np.float32(1e-39) + assert not support.is_operator_supported(op) + + +def test_constraint_depth_multiplier(): + # Valid. Depth multiplier is 1 so no further constraints + op = testutil.create_op_with_quant_tensors( + Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1] + ) + op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 1} + assert support.is_operator_supported(op) + # Invalid. Depth multiplier doesnt equal ofm channel + op = testutil.create_op_with_quant_tensors( + Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1] + ) + op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2} + assert not support.is_operator_supported(op) + # Valid. Depth multiplier is equal to ofm channel + op = testutil.create_op_with_quant_tensors( + Op.DepthwiseConv2DBias, [1, 1, 1, 1], [1, 1, 1, 2], weights_shape=[1, 1, 1, 1] + ) + op.attrs = {"stride_w": 1, "stride_h": 1, "depth_multiplier": 2} + assert support.is_operator_supported(op) + + +def test_constraint_tconv_stride(): + # Strides must be 2 + op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1]) + op.attrs = {"stride_w": 1, "stride_h": 1, "padding": b"SAME"} + ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm") + ifm.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm) + assert not support.is_operator_supported(op) + + +def test_constraint_tconv_same(): + # Valid + op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1]) + op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"} + ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm") + ifm.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm) + assert support.is_operator_supported(op) + # Invalid + op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[1, 1, 1, 1]) + op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"SAME"} + ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm") + ifm.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm) + assert not support.is_operator_supported(op) + + +def test_constraint_tconv_valid(): + # Valid + op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[4, 4, 1, 1]) + op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"} + ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm") + ifm.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm) + assert support.is_operator_supported(op) + # Invalid + op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 4, 4, 1], weights_shape=[2, 2, 1, 1]) + op.attrs = {"stride_w": 2, "stride_h": 2, "padding": b"VALID"} + ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm") + ifm.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm) + assert not support.is_operator_supported(op) + + +def test_constraint_matching_in_out_types(): + # Valid + op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8]) + op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 2, "padding": b"SAME"} + assert support.is_operator_supported(op) + # Invalid. datatypes for ifm and ofm must match (default uint8) + op.ifm.dtype = DataType.int8 + assert not support.is_operator_supported(op) + + +def test_constraint_filter_type(): + # Filter width/height must be integers + op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8]) + op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2.5, "filter_height": "2", "padding": b"SAME"} + assert not support.is_operator_supported(op) + + +def test_constraint_filter_range(): + # Avg pool restrictions are dependent on padding: + # SAME padding restricts both W and H to max 8 + op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8]) + op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 20, "filter_height": 20, "padding": b"SAME"} + assert not support.is_operator_supported(op) + # VALID padding limits are much larger + op.attrs["padding"] = b"VALID" + assert support.is_operator_supported(op) + + +def test_constraint_filter_height_range_valid_pad(): + # Avg pool restrictions are dependent on padding: + op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8]) + op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"VALID"} + assert support.is_operator_supported(op) + # VALID padding restricts to 256 in filter height + op.attrs["filter_height"] = 257 + assert not support.is_operator_supported(op) + + +def test_constraint_filter_product_height_range_valid_pad(): + # Avg pool restrictions are dependent on padding: + op = testutil.create_op_with_quant_tensors(Op.AvgPool, [1, 8, 8, 8], [1, 8, 8, 8]) + op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"VALID"} + assert support.is_operator_supported(op) + # VALID padding restricts filter W x H to 256x256 + op.attrs["filter_width"] = 257 + assert not support.is_operator_supported(op) + + +def test_constraint_filter_height_range(): + # Max pool restrictions arent dependent on padding + op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8]) + op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 2, "filter_height": 256, "padding": b"SAME"} + assert support.is_operator_supported(op) + # Restricts to 256 in filter height + op.attrs["filter_height"] = 257 + assert not support.is_operator_supported(op) + # Doesnt matter if SAME or VALID + op.attrs["padding"] = b"VALID" + assert not support.is_operator_supported(op) + + +def test_constraint_filter_product_height_range(): + # Max pool restrictions arent dependent on padding + op = testutil.create_op_with_quant_tensors(Op.MaxPool, [1, 8, 8, 8], [1, 8, 8, 8]) + op.attrs = {"stride_w": 2, "stride_h": 2, "filter_width": 256, "filter_height": 256, "padding": b"SAME"} + assert support.is_operator_supported(op) + # Restricts filter W x H to 256x256 + op.attrs["filter_width"] = 257 + assert not support.is_operator_supported(op) + # Doesnt matter if SAME or VALID + op.attrs["padding"] = b"VALID" + assert not support.is_operator_supported(op) + + +def test_constraint_resize(): + # IFM W and H == 1 + op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8]) + assert support.is_operator_supported(op) + # IFM == OFM + op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8]) + assert support.is_operator_supported(op) + # IFM x2 == OFM ; align_corners = False + op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8]) + assert support.is_operator_supported(op) + # IFM x2 -1 == OFM ; align_corners = True + op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8]) + op.attrs["align_corners"] = True + assert support.is_operator_supported(op) + # Invalid cases + op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8]) + assert not support.is_operator_supported(op) + op.attrs["align_corners"] = True + assert not support.is_operator_supported(op) + + +def test_constraint_matching_shapes(): + # Softmax requires the ifm and ofm shapes to match + op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 2, 2, 4]) + assert not support.is_operator_supported(op) + op = testutil.create_op_with_quant_tensors(Op.Softmax, [1, 1, 1, 8], [1, 1, 1, 8]) + assert support.is_operator_supported(op) + + +def test_constraint_splitv_inferred(): + # SplitV requires a maximum of one inferred shape (-1) + qp = testutil.default_quant_params() + op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8]) + sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp) + op.add_input_tensor(sizes) + assert not support.is_operator_supported(op) + op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8]) + sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp) + op.add_input_tensor(sizes) + assert support.is_operator_supported(op) + + +def test_constraint_concat_pass(): + # A working concat + op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8]) + ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2") + ifm2.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm2) + op.attrs["axis"] = 3 + assert support.is_operator_supported(op) + + +def test_constraint_axis_exists(): + # Missing axis attribute + op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8]) + ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2") + ifm2.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm2) + assert not support.is_operator_supported(op) + + +def test_constraint_axis_valid(): + # Invalid axis attribute + op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8]) + ifm2 = Tensor([1, 1, 1, 4], DataType.uint8, "in2") + ifm2.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm2) + op.attrs["axis"] = 7 + assert not support.is_operator_supported(op) + + +def test_constraint_matching_dimensionality(): + # Mismatching dimensionality: 4D+2D=4D + op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8]) + ifm2 = Tensor([1, 4], DataType.uint8, "in2") + ifm2.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm2) + op.attrs["axis"] = 3 + assert not support.is_operator_supported(op) + + +def test_constraint_valid_dimensions(): + # Mismatching dimension value: + # ifm2 has w and h as 2, which is not the axis to concat and doesnt match ifm1 or ofm + op = testutil.create_op_with_quant_tensors(Op.Concat, [1, 1, 1, 4], [1, 1, 1, 8]) + ifm2 = Tensor([1, 2, 2, 4], DataType.uint8, "in2") + ifm2.quantization = testutil.default_quant_params() + op.add_input_tensor(ifm2) + op.attrs["axis"] = 3 + assert not support.is_operator_supported(op) + + +def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets): + qp = testutil.default_quant_params() + in0 = Tensor(in_shape, DataType.uint8, "in") + in0.quantization = qp + in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets, quantization=qp) + in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets, quantization=qp) + in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1], quantization=qp) + out = Tensor(out_shape, DataType.uint8, "out") + out.quantization = qp + attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0} + return testutil.create_op(Op.StridedSlice, [in0, in1, in2, in3], out, attrs=attrs) + + +def create_strided_slice(): + # Creates a valid strided slice operator with some valid inputs/outputs + op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0]) + op.attrs["begin_mask"] = 1 + op.attrs["end_mask"] = 9 + assert support.is_operator_supported(op) + return op + + +def test_constraint_stridedslice_input_count(): + # Wrong number of input tensors + op = create_strided_slice() + op.add_input_tensor(op.inputs[0].clone()) + assert not support.is_operator_supported(op) + + +def test_constraint_stridedslice_inputs_const(): + # begin, end, stride values must not be None + op = create_strided_slice() + op.inputs[1].values = None + assert not support.is_operator_supported(op) + op = create_strided_slice() + op.inputs[2].values = None + assert not support.is_operator_supported(op) + op = create_strided_slice() + op.inputs[3].values = None + assert not support.is_operator_supported(op) + + +def test_constraint_stridedslice_tens_size_matches(): + op = create_strided_slice() + op.inputs[1].values = [1, 1, 1, 1, 1, 1, 1, 1] + assert not support.is_operator_supported(op) + + +def test_constraint_stridedslice_stride_values(): + # Unsupported strides + op = create_strided_slice() + op.inputs[3].values = [1, 1, 2, 1] + assert not support.is_operator_supported(op) + + +def test_constraint_ellipsis_mask(): + # Unsupported ellipsis mask + op = create_strided_slice() + op.attrs["ellipsis_mask"] = 1 + assert not support.is_operator_supported(op) + + +def test_constraint_axis_masks(): + op = create_strided_slice() + # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok + op.attrs["new_axis_mask"] = 2 + assert support.is_operator_supported(op) + op = create_strided_slice() + op.attrs["shrink_axis_mask"] = 3 + assert support.is_operator_supported(op) + # But setting both to non-zero is not supported + op.attrs["new_axis_mask"] = 2 + assert not support.is_operator_supported(op) + + +def test_constraint_slice_ranges(): + # Examples where end offset <= begin offset + op = create_strided_slice() + op.inputs[1].values = [0, 7, 2, 0] + assert not support.is_operator_supported(op) + op = create_strided_slice() + op.inputs[2].values = [0, 7, 2, 0] + assert not support.is_operator_supported(op) + op = create_strided_slice() + op.attrs["begin_mask"] = 0 + assert not support.is_operator_supported(op) + op = create_strided_slice() + op.attrs["end_mask"] = 0 + assert not support.is_operator_supported(op) + + +def test_constraint_matching_inputs_types(): + # input data types must match (default is uint8) + op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8]) + op.ifm2.dtype = DataType.int8 + assert not support.is_operator_supported(op) + + +def test_constraint_matching_signed(): + # signed inputs require output to also be signed + op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int8) + op.ofm.dtype = DataType.uint8 + assert not support.is_operator_supported(op) + + +def test_constraint_unsigned_valid(): + # unsigned inputs require output to be either: + op = testutil.create_elemwise_op(Op.Mul, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8]) + # the same (default uint8) + assert support.is_operator_supported(op) + op.ofm.dtype = DataType.int8 + assert not support.is_operator_supported(op) + op.ofm.dtype = DataType.int16 + assert not support.is_operator_supported(op) + # or int32 + op.ofm.dtype = DataType.int32 + assert support.is_operator_supported(op) + + +def test_constraint_inputs_int32(): + # both inputs must be type int32 + op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8]) + assert not support.is_operator_supported(op) + op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32) + assert support.is_operator_supported(op) + op.ifm2.dtype = DataType.int16 + assert not support.is_operator_supported(op) + + +def test_constraint_output_int32(): + # output must be type int32 + op = testutil.create_elemwise_op(Op.SHL, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8], datatype=DataType.int32) + assert support.is_operator_supported(op) + op.ofm.dtype = DataType.int16 + assert not support.is_operator_supported(op) + + +def test_constraint_matching_quantization_parameters(): + qp = QuantizationParameters() + qp.scale_f32 = np.float32(1.5) + qp.zero_point = 128 + # valid - all matching (uses default quant params) + op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8]) + assert support.is_operator_supported(op) + # invalid - ifm mismatch ofm + op.ifm.quantization = qp + assert not support.is_operator_supported(op) + # invalid - ifm2 mismatch ofm + op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8]) + op.ifm2.quantization = qp + assert not support.is_operator_supported(op) + # invalid - both ifm and ifm2 mismatch ofm + op = testutil.create_elemwise_op(Op.Minimum, "op", [1, 8, 8, 8], [1, 8, 8, 8], [1, 8, 8, 8]) + op.ifm.quantization = qp + op.ifm2.quantization = qp + assert not support.is_operator_supported(op) + # valid - all matching + op.ofm.quantization = qp + assert support.is_operator_supported(op) + + +def test_constraint_elemwise_batch_size(): + # BINARY CASE + # Batch can be >1 if dims is <=2D + op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [2, 2], [2, 2]) + assert support.is_operator_supported(op) + # For dims >2D, batch must be 1 + op = testutil.create_elemwise_op(Op.Add, "op", [1, 2, 2], [1, 2, 2], [1, 2, 2]) + assert support.is_operator_supported(op) + # invalid case + op = testutil.create_elemwise_op(Op.Add, "op", [2, 2, 2], [2, 2, 2], [2, 2, 2]) + assert not support.is_operator_supported(op) + + # UNARY CASE + # Batch can be >1 if dims is <=2D + op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32) + assert support.is_operator_supported(op) + # For dims >2D, batch must be 1 + op = testutil.create_elemwise_op(Op.CLZ, "op", [1, 2, 2], None, [1, 2, 2], datatype=DataType.int32) + assert support.is_operator_supported(op) + # invalid case + op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2, 2], None, [2, 2, 2], datatype=DataType.int32) + assert not support.is_operator_supported(op) + + +def test_constraint_matching_either_shapes(): + # BINARY CASE + # At least one ifm shape must match ofm's shape + op = testutil.create_elemwise_op(Op.Add, "op", [2, 2], [4, 4], [2, 2]) + assert support.is_operator_supported(op) + op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [2, 2], [2, 2]) + assert support.is_operator_supported(op) + op = testutil.create_elemwise_op(Op.Add, "op", [4, 4], [4, 4], [2, 2]) + assert not support.is_operator_supported(op) + + # UNARY CASE + # No second input so this is treated the same as requiring ifm shape to match ofm shape + op = testutil.create_elemwise_op(Op.CLZ, "op", [2, 2], None, [2, 2], datatype=DataType.int32) + assert support.is_operator_supported(op) + op = testutil.create_elemwise_op(Op.CLZ, "op", [4, 4], None, [2, 2], datatype=DataType.int32) + assert not support.is_operator_supported(op) + + +def test_constraint_alpha_valid(): + # Alpha cannot be negative + op = testutil.create_elemwise_op(Op.LeakyRelu, "op", [2, 2], None, [2, 2]) + op.attrs["alpha"] = 0 + assert support.is_operator_supported(op) + op.attrs["alpha"] = -1 + assert not support.is_operator_supported(op) diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py index 92bf53dc..b06008af 100644 --- a/ethosu/vela/test/testutil.py +++ b/ethosu/vela/test/testutil.py @@ -39,16 +39,23 @@ def create_arch(): ) +def default_quant_params(): + qp = QuantizationParameters() + qp.scale_f32 = np.float32(1) + qp.zero_point = 0 + return qp + + def create_elemwise_op( - type, + op_type, name, ifm_shape, ifm2_shape, ofm_shape, datatype=DataType.uint8, - ifm_quant=QuantizationParameters(), - ifm2_quant=QuantizationParameters(), - ofm_quant=QuantizationParameters(), + ifm_quant=default_quant_params(), + ifm2_quant=default_quant_params(), + ofm_quant=default_quant_params(), ): # Creates elementwise operation with constant IFM/IFM2 if datatype.size_in_bytes() == 1: @@ -57,15 +64,16 @@ def create_elemwise_op( np_type = np.int16 else: np_type = np.int32 - op = Operation(type, name) + op = Operation(op_type, name) op.add_input_tensor( create_const_tensor(name + "_ifm", ifm_shape, datatype, np.zeros(ifm_shape), np_type, quantization=ifm_quant) ) - op.add_input_tensor( - create_const_tensor( - name + "_ifm2", ifm2_shape, datatype, np.zeros(ifm2_shape), np_type, quantization=ifm2_quant + if ifm2_shape is not None: + op.add_input_tensor( + create_const_tensor( + name + "_ifm2", ifm2_shape, datatype, np.zeros(ifm2_shape), np_type, quantization=ifm2_quant + ) ) - ) ofm = Tensor(ofm_shape, datatype, name + "_ofm") ofm.quantization = ofm_quant op.set_output_tensor(ofm) @@ -73,11 +81,10 @@ def create_elemwise_op( def create_op_with_quant_tensors(op_type, ifm_shape, ofm_shape, weights_shape=None, datatype=DataType.uint8): - qp = QuantizationParameters() ifm = Tensor(ifm_shape, datatype, "in") - ifm.quantization = qp + ifm.quantization = default_quant_params() ofm = Tensor(ofm_shape, datatype, "out") - ofm.quantization = qp + ofm.quantization = default_quant_params() op = Operation(op_type, "op") op.add_input_tensor(ifm) op.set_output_tensor(ofm) @@ -89,6 +96,7 @@ def create_op_with_quant_tensors(op_type, ifm_shape, ofm_shape, weights_shape=No np_type = np.int16 else: np_type = np.int32 + qp = default_quant_params() qp.zero_point = np.zeros(weights_shape) weights = create_const_tensor( "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp -- cgit v1.2.1