From ae2d553c4f3dd71a1df6c0e8c9cb920ae584b59e Mon Sep 17 00:00:00 2001 From: Louis Verhaard Date: Fri, 11 Dec 2020 17:19:54 +0100 Subject: MLBEDSW-3499: Support for PAD operator Replaces the PAD operator by hardware padding when possible. Change-Id: I9dce0885e51a4a73715824d7368637222e39b2b3 Signed-off-by: Louis Verhaard --- ethosu/vela/test/test_graph_optimiser.py | 45 ++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'ethosu/vela/test/test_graph_optimiser.py') diff --git a/ethosu/vela/test/test_graph_optimiser.py b/ethosu/vela/test/test_graph_optimiser.py index 7fdc4bd8..b3938bcc 100644 --- a/ethosu/vela/test/test_graph_optimiser.py +++ b/ethosu/vela/test/test_graph_optimiser.py @@ -18,8 +18,12 @@ # Unit tests for graph_optimiser import numpy as np +from ethosu.vela.data_type import DataType from ethosu.vela.graph_optimiser import convert_batched_fc_shape +from ethosu.vela.graph_optimiser import optimise_pad +from ethosu.vela.nn_graph import Graph from ethosu.vela.operation import Op +from ethosu.vela.operation import Padding from ethosu.vela.tensor import create_const_tensor from ethosu.vela.tensor import Shape4D from ethosu.vela.tensor import Tensor @@ -73,3 +77,44 @@ def test_convert_batched_fc(): assert conv_op.type == Op.FullyConnected assert len(conv_op.ifm.shape) == 2 assert conv_op.ifm.shape == conv_op.ofm.shape + + +def test_optimise_pad(): + """ + Tests that the PAD operator is bypassed when followed by a convolution operator, + and that the padding of the convolution operation is correctly updated + """ + # Create Pad operation followed by Conv2D + quant = testutil.default_quant_params() + in_tens = Tensor([1, 76, 75, 64], DataType.uint8, "input") + in_tens.quantization = quant + pad_input = create_const_tensor("pad_input", [4, 2], DataType.int32, [[0, 0], [2, 1], [1, 1], [0, 0]]) + temp_tens = Tensor([1, 79, 77, 64], DataType.uint8, "pad_out") + temp_tens.quantization = quant.clone() + out_tens = Tensor([1, 76, 75, 64], DataType.uint8, "output") + out_tens.quantization = quant.clone() + weight_tens = Tensor([5, 3, 64, 64], DataType.uint8, "weights") + weight_tens.values = np.zeros(weight_tens.shape) + weight_tens.quant_values = np.zeros(weight_tens.shape, np.uint8) + weight_tens.quantization = quant.clone() + + bias_tens = Tensor([64], DataType.int32, "biases") + pad_op = testutil.create_op(Op.Pad, [in_tens, pad_input], temp_tens) + attrs = {"padding": Padding.VALID, "stride_w": 2, "stride_h": 2, "dilation_w_factor": 1, "dilation_h_factor": 1} + attrs["strides"] = (1, attrs["stride_h"], attrs["stride_w"], 1) + pad_op.run_on_npu = True + conv2d_op = testutil.create_op(Op.Conv2D, [temp_tens, weight_tens, bias_tens], out_tens, attrs) + conv2d_op.run_on_npu = True + nng = Graph() + sg = testutil.create_subgraph([pad_op, conv2d_op]) + nng.subgraphs.append(sg) + arch = testutil.create_arch() + + optimise_pad(conv2d_op, nng, arch) + + op = sg.output_tensors[0].ops[0] + assert op.type == Op.Conv2D + assert op.attrs["padding"] == Padding.EXPLICIT + assert op.attrs["explicit_padding"] == (2, 1, 1, 1) + assert op.ifm.shape == [1, 76, 75, 64] + assert pad_op not in op.ifm.ops -- cgit v1.2.1