aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Hall <tim.hall@arm.com>2022-07-18 21:26:47 +0100
committertim.hall <tim.hall@arm.com>2022-07-23 16:56:07 +0000
commit47c7636586be265eed9e352e6ad4c090a02fb31f (patch)
tree9ab0472d909b75c8718fd4c476adb94f095946a7
parente178f387c56705e7c46ccf57d3676086fb22f05a (diff)
downloadethos-u-vela-47c7636586be265eed9e352e6ad4c090a02fb31f.tar.gz
MLBEDSW-6616: ResizeBilinear align corners is incorrect
- Fixed align corners support when converting in to upscale and average pool. The problem was due to the wrong ratio ifm to ofm size, causing an scaling factor that was not 2x/4x/8x. Works for uint8, int8 and int16. - Fixed checking of align corners in supported operators check - Added additional supported operators check for the size tensor - Updated and added more supported operators unit tests Signed-off-by: Tim Hall <tim.hall@arm.com> Change-Id: Idb78fa9e76ede2c37e8ac6cb1c322154bd156898
-rw-r--r--ethosu/vela/test/test_tflite_supported_operators.py64
-rw-r--r--ethosu/vela/tflite_graph_optimiser.py25
-rw-r--r--ethosu/vela/tflite_supported_operators.py57
3 files changed, 118 insertions, 28 deletions
diff --git a/ethosu/vela/test/test_tflite_supported_operators.py b/ethosu/vela/test/test_tflite_supported_operators.py
index 04d3cba1..ab12e417 100644
--- a/ethosu/vela/test/test_tflite_supported_operators.py
+++ b/ethosu/vela/test/test_tflite_supported_operators.py
@@ -306,30 +306,82 @@ def test_constraint_filter_product_height_range():
assert not support.is_operator_supported(op)
-def test_constraint_resize():
+def test_constraint_bilinear_resize():
# IFM W and H == 1
op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
assert support.is_operator_supported(op)
+
# IFM == OFM
op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 8, 8, 8], [1, 8, 8, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
assert support.is_operator_supported(op)
+
# IFM x2 == OFM ; align_corners = False
op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
+ assert support.is_operator_supported(op)
+
+ # IFM x4 == OFM ; align_corners = False
+ op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 16, 16, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [16, 16], np.int32))
assert support.is_operator_supported(op)
- # IFM x2 -1 == OFM ; align_corners = True
+
+ # IFM x8 == OFM ; align_corners = False
+ op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 32, 32, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [32, 32], np.int32))
+ assert support.is_operator_supported(op)
+
+ # IFM -1 x2 == OFM -1 ; align_corners = True
op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 7, 7, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
op.attrs["align_corners"] = True
assert support.is_operator_supported(op)
- # Invalid cases
- op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 20, 20, 8])
+
+ # IFM -1 x4 == OFM -1 ; align_corners = True
+ op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 13, 13, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [13, 13], np.int32))
+ op.attrs["align_corners"] = True
+ assert support.is_operator_supported(op)
+
+ # IFM -1 x8 == OFM -1 ; align_corners = True
+ op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 25, 25, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [25, 25], np.int32))
+ op.attrs["align_corners"] = True
+ assert support.is_operator_supported(op)
+
+ # Invalid case - upscale size
+ op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 17, 17, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [17, 17], np.int32))
assert not support.is_operator_supported(op)
+
+ # Invalid case - upscale size with align corners
+ op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 15, 15, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [15, 15], np.int32))
op.attrs["align_corners"] = True
assert not support.is_operator_supported(op)
+def test_constraint_bilinear_resize_size():
+ # Invalid case - size != ofm size
+ op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
+ assert not support.is_operator_supported(op)
+
+
def test_constraint_bilinear_resize_attrs():
- op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 1, 1, 8], [1, 8, 8, 8])
- assert support.is_operator_supported(op)
+ # Invalid case - both align corners and half-pixel centers
+ op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
+ op.attrs["align_corners"] = True
+ op.attrs["half_pixel_centers"] = True
+ assert not support.is_operator_supported(op)
+
+
+def test_constraint_bilinear_resize_hpc():
+ # Invalid case - half-pixel centers (not supported)
+ op = testutil.create_op_with_quant_tensors(Op.ResizeBilinear, [1, 4, 4, 8], [1, 8, 8, 8])
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
op.attrs["half_pixel_centers"] = True
assert not support.is_operator_supported(op)
diff --git a/ethosu/vela/tflite_graph_optimiser.py b/ethosu/vela/tflite_graph_optimiser.py
index b1a56605..d2899c4c 100644
--- a/ethosu/vela/tflite_graph_optimiser.py
+++ b/ethosu/vela/tflite_graph_optimiser.py
@@ -303,26 +303,22 @@ def convert_resizebilinear_1x1_to_add(op):
# Convert ResizeBilinear to a number of 2x2 nearest neighbor upscaling and one avgpool op with kernel size dependent
# on the upscaling factor. Avgpool kernel limit of 8x8 when padding is applied limits upscaling to 8x8.
-def convert_resizebilinear_to_nearest_neighbor_upscaling_and_pool(op):
+def convert_resizebilinear_to_upscale_and_average_pool(op):
pre_op = op
outputs = op.outputs
dtype = op.ifm.dtype
op.attrs.update({"strides": (1, 1, 1, 1), "ksize": (1, 1, 1, 1)})
- if op.attrs["align_corners"]:
- shape_modifier = 1
- op.attrs["padding"] = Padding.VALID
- else:
- shape_modifier = 0
- op.attrs["padding"] = Padding.SAME
+ op.attrs["padding"] = Padding.SAME # doesn't really matter as the kernel is 1x1
op.ifm_resampling_mode = resampling_mode.NEAREST
upscaled_shape = np.array(op.ifm_shapes[0].get_hw_as_list())
- out_shape = np.array(op.ofm_shapes[0].get_hw_as_list())
+
+ # Get upscale factor that was calculated in the supported operators check
+ upscale_factor = op.attrs["upscale_factor"]
# Calculate how many times 2x2 upscaling needs to be performed
# Force the result of round to be an integer. This is because the behaviour of rounding numpy.float64 values changed
# between different versions of numpy. This consistency ensures that the kernel dimensions are kept integral
- upscale_factor = int(round(out_shape[1] / upscaled_shape[1]))
n = int(np.log2(upscale_factor))
# Perform 2x2 upscaling n-1 times
@@ -333,7 +329,7 @@ def convert_resizebilinear_to_nearest_neighbor_upscaling_and_pool(op):
scaled_op.inputs[0] = pre_op.outputs[0]
# Nearest neighbor 2x2 upscaling
- upscaled_shape = upscaled_shape * 2 - shape_modifier
+ upscaled_shape = upscaled_shape * 2
shape = op.ofm_shapes[0].as_list()
shape[1:3] = upscaled_shape
out_tens = Tensor(shape, dtype, f"{op.outputs[0].name}_{count}")
@@ -348,8 +344,11 @@ def convert_resizebilinear_to_nearest_neighbor_upscaling_and_pool(op):
if n > 1:
scaled_op = op.clone(f"_{n-1}")
scaled_op.inputs[0] = pre_op.outputs[0]
- scaled_op.attrs["padding"] = Padding.EXPLICIT
- scaled_op.attrs["explicit_padding"] = [0, 0, upscale_factor - 1, upscale_factor - 1]
+ if op.attrs["align_corners"]:
+ scaled_op.attrs["padding"] = Padding.VALID
+ else:
+ scaled_op.attrs["padding"] = Padding.EXPLICIT
+ scaled_op.attrs["explicit_padding"] = [0, 0, upscale_factor - 1, upscale_factor - 1]
scaled_op.attrs.update({"ksize": (1, upscale_factor, upscale_factor, 1)})
scaled_op.outputs = outputs
scaled_op.outputs[0].ops = [scaled_op]
@@ -367,7 +366,7 @@ def fixup_resizebilinear(op, arch, nng):
elif op.ifm_shapes[0].height == 1 and op.ifm_shapes[0].width == 1:
convert_resizebilinear_1x1_to_add(op)
else:
- convert_resizebilinear_to_nearest_neighbor_upscaling_and_pool(op)
+ convert_resizebilinear_to_upscale_and_average_pool(op)
return op
diff --git a/ethosu/vela/tflite_supported_operators.py b/ethosu/vela/tflite_supported_operators.py
index 25a34e82..01d2e61f 100644
--- a/ethosu/vela/tflite_supported_operators.py
+++ b/ethosu/vela/tflite_supported_operators.py
@@ -242,8 +242,10 @@ class TFLiteSupportedOperators:
# Resizing specific checks:
for op_type in TFLiteSupportedOperators.resizing_ops:
- self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_resize)
+ self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_bilinear_resize)
+ self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_bilinear_resize_size)
self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_bilinear_resize_attrs)
+ self.specific_constraints[op_type].append(TFLiteSupportedOperators.constraint_bilinear_resize_hpc)
# Vector Product specific checks:
for op_type in TFLiteSupportedOperators.fc_vector_products:
@@ -587,35 +589,72 @@ class TFLiteSupportedOperators:
return True, "Op has padding=SAME"
@staticmethod
- def constraint_resize(op):
+ def constraint_bilinear_resize(op):
"""The width and height of the IFM and OFM must match one of the following criteria:
IFM W and H must both be 1
IFM must match OFM
- OFM W and H must be equal and 2/4/8x IFM -1, if align_corners is True
- OFM W and H must be equal and 2/4/8x IFM, if align_corners is False"""
+ OFM W and H must be equal and OFM W-1 and H-1 must be 2x/4x/8x IFM W-1 and H-1, if align_corners is True
+ OFM W and H must be equal and OFM W and H must be 2x/4x/8x IFM W and H, if align_corners is False"""
# Easier to start with False condition as very few cases result in a supported resize
valid = False
ifm_shape = op.ifm.shape
+ ifm_shape_h = ifm_shape[1]
+ ifm_shape_w = ifm_shape[2]
ofm_shape = op.ofm.shape
+ ofm_shape_h = ofm_shape[1]
+ ofm_shape_w = ofm_shape[2]
+
align_corners = op.attrs.get("align_corners", False)
if len(ifm_shape) == 4:
# Valid if IFM W and H are both 1, or IFM and OFM shape are the same
- if ((ifm_shape[1] == 1) and (ifm_shape[2] == 1)) or (ifm_shape == ofm_shape):
+ if ((ifm_shape_h == 1) and (ifm_shape_w == 1)) or (ifm_shape == ofm_shape):
valid = True
else:
# Valid if OFM is 2/4/8x IFM (-1 for align corners)
- w_upscale_factor = (ofm_shape[1] + 1) / ifm_shape[1] if align_corners else ofm_shape[1] / ifm_shape[1]
- h_upscale_factor = (ofm_shape[2] + 1) / ifm_shape[2] if align_corners else ofm_shape[2] / ifm_shape[2]
+ if align_corners:
+ h_upscale_factor = (ofm_shape_h - 1) / (ifm_shape_h - 1)
+ w_upscale_factor = (ofm_shape_w - 1) / (ifm_shape_w - 1)
+ else:
+ h_upscale_factor = ofm_shape_h / ifm_shape_h
+ w_upscale_factor = ofm_shape_w / ifm_shape_w
- valid = w_upscale_factor == h_upscale_factor and w_upscale_factor in [2, 4, 8]
+ # could use either height or width. save as int because it is more usable later in graph optimiser
+ op.attrs["upscale_factor"] = int(h_upscale_factor)
+ valid = h_upscale_factor == w_upscale_factor and h_upscale_factor in (2.0, 4.0, 8.0)
return valid, f"Op has ifm_shape={ifm_shape}, ofm_shape={ofm_shape} and align_corners={align_corners}"
@staticmethod
+ def constraint_bilinear_resize_size(op):
+ "The size tensor must match the output tensor shape"
+ valid = False
+ ofm_shape = op.ofm.shape
+ size_h, size_w = None, None
+ # check that the size tensor (the second input) exists, is not none, and has the correct values
+ if len(op.inputs) == 2 and op.inputs[1] is not None and len(op.inputs[1].values) == 2:
+ size_h, size_w = op.inputs[1].values
+ # check size and output size match
+ if size_h == ofm_shape[1] and size_w == ofm_shape[2]:
+ valid = True
+
+ return valid, f"Op has size={size_h}x{size_w} and ofm_shape={ofm_shape}."
+
+ @staticmethod
def constraint_bilinear_resize_attrs(op):
+ "Both align_corners and half_pixel_centers can't be True"
+ valid = True
+ align_corners = op.attrs.get("align_corners", False)
+ half_pixel_centers = op.attrs.get("half_pixel_centers", False)
+
+ if align_corners and half_pixel_centers:
+ valid = False
+ return valid, "Op has both align_corners and half_pixel_centers set to True."
+
+ @staticmethod
+ def constraint_bilinear_resize_hpc(op):
"half_pixel_centers are not supported"
valid = True
- if op.attrs.get("half_pixel_centers"):
+ if op.attrs.get("half_pixel_centers", False):
valid = False
return valid, f"Op has half_pixel_centers set to {not valid}."