From 42fed9484c9aa2a43bdd1b07f9b66bdecabd821d Mon Sep 17 00:00:00 2001 From: Dwight Lidman Date: Fri, 29 May 2020 09:37:03 +0200 Subject: MLBEDSW-2372: Failing assert for ResizeBilinear with upscale != 2x This commit fixes the failing assert by removing it and instead placing unsupported ResizeBilinear operators on the CPU. It introduces a new graph optimisation function which adds the necessary attributes as well as new operator restrictions for ResizeBilinear. Signed-off-by: Dwight Lidman Change-Id: I2feffd0b5a2169ebffbe4f165e450b3f2d140380 --- ethosu/vela/graph_optimiser.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'ethosu/vela/graph_optimiser.py') diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py index 72bb486c..758b51a2 100644 --- a/ethosu/vela/graph_optimiser.py +++ b/ethosu/vela/graph_optimiser.py @@ -23,6 +23,7 @@ import numpy as np from . import rewrite_graph from .data_type import DataType from .errors import UnsupportedFeatureError +from .ethos_u55_regs.ethos_u55_regs import resampling_mode from .operation import NpuBlockType from .operation import Operation from .tensor import Tensor @@ -483,6 +484,30 @@ def convert_mul_max_to_abs_or_lrelu(op, arch): return op +def add_attrs_to_resizebilinear(op, arch): + if op.type == 'ResizeBilinear' and op.run_on_npu: + input_tensor = op.inputs[0] + upscaled_shape = [input_tensor.shape[1] * 2, input_tensor.shape[2] * 2] + out_shape = op.outputs[0].shape[1:3] + if not op.attrs["align_corners"] and out_shape == upscaled_shape: + # this means the output is supposed to be a x2 upscale, + # so we need to do SAME padding + op.attrs["padding"] = b"SAME" + elif op.attrs["align_corners"] and out_shape == [upscaled_shape[0] - 1, upscaled_shape[1] - 1]: + # here we can just run the avg pool without padding and + # produce a (M * 2 - 1, N * 2 - 1) sized output + op.attrs["padding"] = b"VALID" + else: + # If this exception is raised, something is wrong with the supported op check + raise UnsupportedFeatureError("Unsupported upscaling factor") + input_tensor.resampling_mode = resampling_mode.NEAREST + op.attrs.update({ + 'strides': (1, 1, 1, 1), + 'ksize': (1, 2, 2, 1), + }) + return op + + def supported_operator_check(op, arch): op.run_on_npu = arch.supported_operators.is_operator_supported(op) return op @@ -503,6 +528,7 @@ def optimise_graph_a(nng, arch, verbose_graph=False): fixup_pack_input, fixup_conv2d_backprop, fixup_act_reorder, + add_attrs_to_resizebilinear, add_padding_fields, mark_npu_block_type, fixup_elementwise_with_scalars, -- cgit v1.2.1