From 9a03fdff316662be69a1adc4e391e43bc6519b08 Mon Sep 17 00:00:00 2001 From: Charles Xu Date: Thu, 2 Jul 2020 15:12:40 +0200 Subject: MLBEDSW-2569:Support 1x1 IFM ResizeBilinear Signed-off-by: Charles Xu Change-Id: I44428d77b2e8e44a477e5c4dfe28ab8dd1792838 --- ethosu/vela/graph_optimiser.py | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) (limited to 'ethosu/vela/graph_optimiser.py') diff --git a/ethosu/vela/graph_optimiser.py b/ethosu/vela/graph_optimiser.py index c805be50..355b16ff 100644 --- a/ethosu/vela/graph_optimiser.py +++ b/ethosu/vela/graph_optimiser.py @@ -27,6 +27,7 @@ from .ethos_u55_regs.ethos_u55_regs import resampling_mode from .numeric_util import full_shape from .operation import NpuBlockType from .operation import Operation +from .tensor import QuantizationParameters from .tensor import Tensor passthrough_nodes = set(("Identity",)) @@ -181,6 +182,39 @@ def fixup_conv2d_backprop(op, arch): return op +# Convert the op to an elementwise add +def convert_resizebilinear_1x1_to_add(op): + op.type = "AddAct" + op.name = op.name + "_add" + op.attrs.update({"npu_block_type": NpuBlockType.ElementWise}) + op.attrs["resizebilinear"] = True + # Create an input tensor filled with zeros + shape = op.outputs[0].shape + tens = Tensor(shape, op.inputs[0].dtype, op.inputs[1].name + "_add") + tens.values = np.zeros(shape) + tens.quant_values = np.zeros(shape, np.uint8) + tens.quantization = QuantizationParameters(0.0, 255.0) + tens.quantization.scale_f32 = 1.0 + tens.quantization.zero_point = 0 + tens.consumer_list = [op] + tens_op = op.inputs[1].ops[0] + tens_op.outputs = [tens] + tens.ops = [tens_op] + # Set the add inputs + op.inputs[1] = op.inputs[0] + op.inputs[0] = tens + + return op + + +def fixup_resizebilinear(op, arch): + if op.type == "ResizeBilinear": + if op.inputs[0].shape[1] == 1 and op.inputs[0].shape[2] == 1: + convert_resizebilinear_1x1_to_add(op) + + return op + + def fixup_fully_connected_input(op, arch): if op.type == "FullyConnectedAct": inp = op.inputs[0] @@ -614,8 +648,7 @@ def add_attrs_to_resizebilinear(op, arch): # produce a (M * 2 - 1, N * 2 - 1) sized output op.attrs["padding"] = b"VALID" else: - # If this exception is raised, something is wrong with the supported op check - raise UnsupportedFeatureError("Unsupported upscaling factor") + return op input_tensor.resampling_mode = resampling_mode.NEAREST op.attrs.update({"strides": (1, 1, 1, 1), "ksize": (1, 2, 2, 1)}) return op @@ -647,6 +680,7 @@ def optimise_graph_a(nng, arch, verbose_graph=False): mark_npu_block_type, fixup_elementwise_with_scalars, reorder_depthwise_weights, + fixup_resizebilinear, # convert_mul_max_to_abs_or_lrelu # TODO: enable optimisation once quantisation issues are resolved ] -- cgit v1.2.1