aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela
diff options
context:
space:
mode:
Diffstat (limited to 'ethosu/vela')
-rw-r--r--ethosu/vela/test/test_tflite_supported_operators.py37
-rw-r--r--ethosu/vela/tflite_graph_optimiser.py29
-rw-r--r--ethosu/vela/tflite_supported_operators.py23
3 files changed, 74 insertions, 15 deletions
diff --git a/ethosu/vela/test/test_tflite_supported_operators.py b/ethosu/vela/test/test_tflite_supported_operators.py
index f54211f0..e5cc280b 100644
--- a/ethosu/vela/test/test_tflite_supported_operators.py
+++ b/ethosu/vela/test/test_tflite_supported_operators.py
@@ -218,12 +218,47 @@ def test_constraint_depth_multiplier():
def test_constraint_tconv_stride():
- # Strides must be 2
+ # Valid 2x2
op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 2, 2, 1], weights_shape=[1, 1, 1, 1])
+ op.attrs = {"stride_w": 2, "stride_h": 2, "padding": Padding.SAME}
+ ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
+ ifm.quantization = testutil.default_quant_params()
+ op.add_input_tensor(ifm)
+ assert support.is_operator_supported(op)
+ # Valid 1x1
+ op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
op.attrs = {"stride_w": 1, "stride_h": 1, "padding": Padding.SAME}
ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
ifm.quantization = testutil.default_quant_params()
op.add_input_tensor(ifm)
+ assert support.is_operator_supported(op)
+ # Valid 2x1 (WxH) ifm h and kernel h = 1
+ op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 1, 2, 1], weights_shape=[1, 1, 1, 1])
+ op.attrs = {"stride_w": 2, "stride_h": 1, "padding": Padding.SAME}
+ ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
+ ifm.quantization = testutil.default_quant_params()
+ op.add_input_tensor(ifm)
+ assert support.is_operator_supported(op)
+ # Invalid 2x1 (WxH) ifm h = 2 and kernel h = 1
+ op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 1, 2, 1], weights_shape=[1, 1, 1, 1])
+ op.attrs = {"stride_w": 2, "stride_h": 1, "padding": Padding.SAME}
+ ifm = Tensor([1, 2, 1, 1], DataType.uint8, "ifm")
+ ifm.quantization = testutil.default_quant_params()
+ op.add_input_tensor(ifm)
+ assert not support.is_operator_supported(op)
+ # Invalid 2x1 (WxH) ifm h = 1 and kernel h = 2
+ op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 1, 1, 1], weights_shape=[1, 2, 1, 1])
+ op.attrs = {"stride_w": 2, "stride_h": 1, "padding": Padding.SAME}
+ ifm = Tensor([1, 2, 1, 1], DataType.uint8, "ifm")
+ ifm.quantization = testutil.default_quant_params()
+ op.add_input_tensor(ifm)
+ assert not support.is_operator_supported(op)
+ # Invalid 1x2 (WxH)
+ op = testutil.create_op_with_quant_tensors(Op.Conv2DBackpropInput, [0], [1, 1, 1, 1], weights_shape=[1, 1, 1, 1])
+ op.attrs = {"stride_w": 1, "stride_h": 2, "padding": Padding.SAME}
+ ifm = Tensor([1, 1, 1, 1], DataType.uint8, "ifm")
+ ifm.quantization = testutil.default_quant_params()
+ op.add_input_tensor(ifm)
assert not support.is_operator_supported(op)
diff --git a/ethosu/vela/tflite_graph_optimiser.py b/ethosu/vela/tflite_graph_optimiser.py
index 9b98b8fa..51078718 100644
--- a/ethosu/vela/tflite_graph_optimiser.py
+++ b/ethosu/vela/tflite_graph_optimiser.py
@@ -243,13 +243,15 @@ def calc_padding_and_skirt(padding_type, kernel, input_shape, explicit_padding):
return padding, skirt
-def calc_upscaled_padding_and_skirt(padding_type, kernel_size, stride, input_shape, upscaling_factor):
+def calc_upscaled_padding_and_skirt(
+ padding_type, kernel_size, stride, input_shape, upscaling_factor_y, upscaling_factor_x
+):
kernel_height, kernel_width = kernel_size[0], kernel_size[1]
if padding_type == Padding.SAME:
- ypad = needed_total_padding(int(input_shape.height) * upscaling_factor, int(stride[1]), int(kernel_height))
- xpad = needed_total_padding(int(input_shape.width) * upscaling_factor, int(stride[2]), int(kernel_width))
- right_pad = max(((xpad + 1) // upscaling_factor) - 1, 0)
- bottom_pad = max(((ypad + 1) // upscaling_factor) - 1, 0)
+ ypad = needed_total_padding(int(input_shape.height) * upscaling_factor_y, int(stride[1]), int(kernel_height))
+ xpad = needed_total_padding(int(input_shape.width) * upscaling_factor_x, int(stride[2]), int(kernel_width))
+ right_pad = max(((xpad + 1) // upscaling_factor_x) - 1, 0)
+ bottom_pad = max(((ypad + 1) // upscaling_factor_y) - 1, 0)
left_pad = max(kernel_width - 1 - right_pad, 0)
top_pad = max(kernel_height - 1 - bottom_pad, 0)
elif padding_type == Padding.VALID:
@@ -269,7 +271,11 @@ def fixup_conv2d_backprop(op: Operation, arch, nng) -> Operation:
# flip the inputs
op.inputs[0], op.inputs[2] = op.inputs[2], op.inputs[0]
op.type = Op.Conv2DBackpropInputSwitchedBias
- op.ifm_resampling_mode = resampling_mode.TRANSPOSE
+ stride_w = op.kernel.stride.x
+ stride_h = op.kernel.stride.y
+ if stride_w > 1 or stride_h > 1:
+ # Transpose conv2d with upscaling
+ op.ifm_resampling_mode = resampling_mode.TRANSPOSE
# Update strides
op.attrs.update({"stride_w": 1, "stride_h": 1, "strides": (1, 1, 1, 1)})
@@ -924,10 +930,15 @@ def add_padding_fields(op, arch, nng):
else:
raise UnsupportedFeatureError(f"Unknown operation that uses padding: {optype_to_builtintype(op.type)}")
- if op.type == Op.Conv2DBackpropInputSwitchedBias:
- upscaling_factor = output_shape.height // input_shape.height
+ if op.type == Op.Conv2DBackpropInputSwitchedBias and op.ifm_resampling_mode == resampling_mode.TRANSPOSE:
+ # Transpose with upscale
padding, skirt = calc_upscaled_padding_and_skirt(
- op.attrs["padding"], kernel_size, op.attrs["strides"], input_shape, upscaling_factor
+ op.attrs["padding"],
+ kernel_size,
+ op.attrs["strides"],
+ input_shape,
+ output_shape.height // input_shape.height,
+ output_shape.width // input_shape.width,
)
else:
padding, skirt = calc_padding_and_skirt(
diff --git a/ethosu/vela/tflite_supported_operators.py b/ethosu/vela/tflite_supported_operators.py
index 723c5f22..52b04857 100644
--- a/ethosu/vela/tflite_supported_operators.py
+++ b/ethosu/vela/tflite_supported_operators.py
@@ -590,11 +590,24 @@ class TFLiteSupportedOperators:
@staticmethod
def constraint_tconv_stride(op):
- "Stride values for both width and height must be 2"
- w = op.kernel.stride.x
- h = op.kernel.stride.y
- valid = (w == 2) and (h == 2)
- return valid, f"Op has stride WxH as: {w}x{h}"
+ """Stride values for width and height must match one of the following criteria:
+ Stride values WxH must be 1x1 or 2x2
+ Stride WxH 2x1 supported if ifm height and kernel height = 1"""
+ s_w = op.kernel.stride.x
+ s_h = op.kernel.stride.y
+ k_h = op.kernel.height
+ i_h = op.ifm.shape[1]
+ valid = False
+ if s_w == 1 and s_h == 1:
+ valid = True
+
+ if s_w == 2 and s_h == 2:
+ valid = True
+
+ if s_w == 2 and s_h == 1 and i_h == 1 and k_h == 1:
+ valid = True
+
+ return valid, f"Op has ifm_height={i_h}, kernel_height={k_h} and stride WxH as {s_w}x{s_h}"
@staticmethod
def constraint_tconv_same(op):