aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/test
diff options
context:
space:
mode:
authorLouis Verhaard <louis.verhaard@arm.com>2021-01-27 15:57:57 +0100
committerpatrik.gustavsson <patrik.gustavsson@arm.com>2021-02-01 16:44:39 +0000
commitebf4af6a45c60d3f75ccd6019612a7f8b6552d72 (patch)
tree79a84e13a59ee8c0c4e11aa7bb0fe008f4a4ab29 /ethosu/vela/test
parent189f748e1a79ed88044efbe7137963bca830cbb5 (diff)
downloadethos-u-vela-ebf4af6a45c60d3f75ccd6019612a7f8b6552d72.tar.gz
MLBEDSW-3903: Bug fix PAD operator
- Added checks for unsupported pad sizes in PAD operator - Bug fix right pad/bottom pad calculation when replacing PAD operator by hardware padding Change-Id: Ib84be711277d987052f14352ab386e0e0b774987 Signed-off-by: Louis Verhaard <louis.verhaard@arm.com>
Diffstat (limited to 'ethosu/vela/test')
-rw-r--r--ethosu/vela/test/test_graph_optimiser.py34
-rw-r--r--ethosu/vela/test/test_supported_operators.py40
2 files changed, 72 insertions, 2 deletions
diff --git a/ethosu/vela/test/test_graph_optimiser.py b/ethosu/vela/test/test_graph_optimiser.py
index 55980e3d..4281d314 100644
--- a/ethosu/vela/test/test_graph_optimiser.py
+++ b/ethosu/vela/test/test_graph_optimiser.py
@@ -17,8 +17,10 @@
# Description:
# Unit tests for graph_optimiser
import numpy as np
+import pytest
from ethosu.vela.data_type import DataType
+from ethosu.vela.graph_optimiser import calc_explicit_padding
from ethosu.vela.graph_optimiser import convert_batched_fc_shape
from ethosu.vela.graph_optimiser import optimise_graph_a
from ethosu.vela.graph_optimiser import optimise_pad
@@ -82,6 +84,38 @@ def test_convert_batched_fc():
assert conv_op.ifm.shape == conv_op.ofm.shape
+explicit_padding_test_data = [
+ # Kernel size 2
+ [(17, 1, 2, 1, 1), (1, 1)],
+ [(18, 1, 2, 0, 1), (0, 1)],
+ [(18, 1, 2, 1, 0), (1, 0)],
+ # Kernel size 3
+ [(18, 2, 3, 1, 1), (1, 0)],
+ [(25, 2, 3, 1, 1), (1, 1)],
+ # Kernel size 4
+ [(18, 1, 4, 1, 2), (1, 2)],
+ [(18, 1, 4, 2, 1), (2, 1)],
+ [(19, 1, 4, 2, 2), (2, 2)],
+ # Kernel size 5
+ [(19, 1, 5, 1, 2), (1, 2)],
+ [(19, 1, 5, 0, 2), (0, 2)],
+ [(19, 1, 5, 1, 0), (1, 0)],
+ # Kernel size 21
+ [(41, 2, 21, 8, 10), (8, 10)],
+ [(41, 3, 21, 10, 10), (10, 9)],
+ [(42, 3, 21, 10, 10), (10, 8)],
+ [(42, 3, 21, 9, 10), (9, 9)],
+ [(41, 3, 21, 10, 6), (10, 6)],
+]
+
+
+@pytest.mark.parametrize("test_input, expected_result", explicit_padding_test_data)
+def test_calc_explicit_padding(test_input, expected_result):
+ input_size, stride, filter_size, explicit_pad_before, explicit_pad_after = test_input
+ before, after = calc_explicit_padding(input_size, stride, filter_size, explicit_pad_before, explicit_pad_after)
+ assert (before, after) == expected_result
+
+
def test_optimise_pad():
"""
Tests that the PAD operator is bypassed when followed by a convolution operator,
diff --git a/ethosu/vela/test/test_supported_operators.py b/ethosu/vela/test/test_supported_operators.py
index 5c01027d..5f64dd9d 100644
--- a/ethosu/vela/test/test_supported_operators.py
+++ b/ethosu/vela/test/test_supported_operators.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
+# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -17,6 +17,7 @@
# Description:
# Unit tests for support_operators
import numpy as np
+import pytest
from ethosu.vela.data_type import DataType
from ethosu.vela.operation import ActivationFunction
@@ -525,6 +526,7 @@ def create_pad_op(
out_dtype=DataType.int8,
pad_dtype=DataType.int32,
pad_setting=Padding.VALID,
+ kernel_size=3,
):
qp = testutil.default_quant_params()
in0 = Tensor(in_shape, in_dtype, "in")
@@ -535,7 +537,7 @@ def create_pad_op(
op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
conv_out_tens = Tensor(in_shape, in_dtype, "output")
conv_out_tens.quantization = qp.clone()
- weight_tens = Tensor(in_shape, in_dtype, "weights")
+ weight_tens = Tensor([kernel_size, kernel_size, in_shape[-1], out_shape[-1]], in_dtype, "weights")
weight_tens.values = np.zeros(weight_tens.shape)
weight_tens.quant_values = np.zeros(weight_tens.shape, np.int8)
weight_tens.quantization = qp.clone()
@@ -609,6 +611,40 @@ def test_constraint_pad_consumer():
assert not support.is_operator_supported(op)
+pad_invalid_size_test_data = [
+ (2, 1, 1, 1),
+ (1, 2, 1, 1),
+ (1, 1, 2, 1),
+ (1, 1, 1, 2),
+]
+
+
+@pytest.mark.parametrize("top, left, bottom, right", pad_invalid_size_test_data)
+def test_constraint_pad_size(top, left, bottom, right):
+ # Tests PAD operator with a padding that is too high to be handled by the NPU
+ out_shape = [1, 11 + left + right, 11 + top + bottom, 1]
+ padding = [[0, 0], [top, bottom], [left, right], [0, 0]]
+ op = create_pad_op(in_shape=[1, 11, 11, 1], out_shape=out_shape, padding=padding,)
+ assert not support.is_operator_supported(op)
+
+
+leading_pad_test_data = [
+ (2, 2, 11, True),
+ (1, 2, 11, False),
+ (2, 1, 11, False),
+ (5, 2, 11, True),
+]
+
+
+@pytest.mark.parametrize("top, left, kernel_size, expected", leading_pad_test_data)
+def test_constraint_leading_pad_size(top, left, kernel_size, expected):
+ # Tests PAD operator with big kernel size; top and left pad must be multiple of stride
+ out_shape = [1, 11 + left, 11 + top, 1]
+ padding = [[0, 0], [top, 0], [left, 0], [0, 0]]
+ op = create_pad_op(in_shape=[1, 11, 11, 1], out_shape=out_shape, padding=padding, kernel_size=kernel_size)
+ assert support.is_operator_supported(op) == expected
+
+
def create_strided_slice():
# Creates a valid strided slice operator with some valid inputs/outputs
op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0])