aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/test
diff options
context:
space:
mode:
authorTim Hall <tim.hall@arm.com>2023-01-13 17:57:25 +0000
committertim.hall <tim.hall@arm.com>2023-01-20 14:07:21 +0000
commit3b1578e44b4c6a8c8c9a8e0891d3866a89bd66af (patch)
tree491c337bc854d435b80f0a535496084ea9ebc9ac /ethosu/vela/test
parentf34904717f643499f3ea6210322bbe1b635db088 (diff)
downloadethos-u-vela-3b1578e44b4c6a8c8c9a8e0891d3866a89bd66af.tar.gz
MLBEDSW-7151: MLCE: Difference in model output between x86 & aarch64
- The issue is due to undefined behaviour when casting a NumPy float to a NumPy unsigned integer which occurs in create_const_tensor() - The fix is to make sure that the values are first cast to a Python float - In addition, the values datatype argument has been removed from create_const_tensor() to stop the tensor and values datatypes getting out of sync Change-Id: I134b9be8c941b361929a5ae7db8cb35f2e9728f2 Signed-off-by: Tim Hall <tim.hall@arm.com>
Diffstat (limited to 'ethosu/vela/test')
-rw-r--r--ethosu/vela/test/test_graph_optimiser.py21
-rw-r--r--ethosu/vela/test/test_lut.py16
-rw-r--r--ethosu/vela/test/test_tflite_model_semantic.py13
-rw-r--r--ethosu/vela/test/test_tflite_supported_operators.py33
-rw-r--r--ethosu/vela/test/testutil.py26
5 files changed, 41 insertions, 68 deletions
diff --git a/ethosu/vela/test/test_graph_optimiser.py b/ethosu/vela/test/test_graph_optimiser.py
index 152669f7..54dd70f6 100644
--- a/ethosu/vela/test/test_graph_optimiser.py
+++ b/ethosu/vela/test/test_graph_optimiser.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2020-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
+# SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -40,9 +40,9 @@ from ethosu.vela.tflite_graph_optimiser import rewrite_fully_connected_input
def test_convert_batched_fc():
"""Tests shape conversion of batched fully connected"""
ifm_shape = [4, 8]
- ifm = create_const_tensor("test_in", ifm_shape, np.uint8, np.zeros(ifm_shape))
+ ifm = create_const_tensor("test_in", ifm_shape, DataType.uint8, np.zeros(ifm_shape))
w_shape = [8, 4]
- weights = create_const_tensor("weight_in", w_shape, np.uint8, np.zeros(w_shape))
+ weights = create_const_tensor("weight_in", w_shape, DataType.uint8, np.zeros(w_shape))
ofm = Tensor(ifm.shape, np.uint8, "test_out")
op = testutil.create_op(Op.FullyConnected, [ifm, weights], ofm)
@@ -132,7 +132,8 @@ def create_pad_and_conv2d(
qp = testutil.default_quant_params()
in0 = Tensor(in_shape, in_dtype, "in")
in0.quantization = qp
- pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
+ shape = [] if padding == [] else list(np.shape(padding))
+ pad_tensor = create_const_tensor(name="pad", shape=shape, values=padding, dtype=pad_dtype)
out = Tensor(out_shape, out_dtype, "out")
out.quantization = qp.clone()
op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
@@ -543,9 +544,7 @@ def test_quant_static_optimisations():
Tests if the quant value at vela compile time is calculated correctly
"""
- quant_ifm = create_const_tensor(
- "const_quant_ifm", values=np.array(127), value_dtype=np.int8, shape=[], dtype=DataType.int8
- )
+ quant_ifm = create_const_tensor("const_quant_ifm", values=np.array(127), shape=[], dtype=DataType.int8)
quant_ifm.quantization = testutil.default_quant_params()
quant_ifm.quantization.scale_f32 = 0.15748031
quant_ifm.quantization.quant_min = -128
@@ -568,9 +567,7 @@ def test_quant_static_optimisations():
assert op.ofm.values == 127
- quant_ifm = create_const_tensor(
- "const_quant_ifm", values=np.array(127), value_dtype=np.int8, shape=[], dtype=DataType.int8
- )
+ quant_ifm = create_const_tensor("const_quant_ifm", values=np.array(127), shape=[], dtype=DataType.int8)
quant_ifm.quantization = testutil.default_quant_params()
quant_ifm.quantization.scale_f32 = 0.15748031
quant_ifm.quantization.quant_min = -128
@@ -600,9 +597,7 @@ def test_optimise_quantize_multiple_values():
when passing multiple values to quantize node
"""
- quant_ifm = create_const_tensor(
- "const_quant_ifm", values=np.array([127, 127]), value_dtype=np.int8, shape=[], dtype=DataType.int8
- )
+ quant_ifm = create_const_tensor("const_quant_ifm", values=np.array([127, 127]), shape=[], dtype=DataType.int8)
quant_ifm.quantization = testutil.default_quant_params()
quant_ifm.quantization.scale_f32 = 0.15748031
quant_ifm.quantization.quant_min = -128
diff --git a/ethosu/vela/test/test_lut.py b/ethosu/vela/test/test_lut.py
index 90732707..712be7a2 100644
--- a/ethosu/vela/test/test_lut.py
+++ b/ethosu/vela/test/test_lut.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2020-2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
+# SPDX-FileCopyrightText: Copyright 2020-2021, 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -18,8 +18,6 @@
# Unit tests for LUT support
import random
-import numpy as np
-
from ethosu.vela import lut
from ethosu.vela import mark_tensors
from ethosu.vela import pass_packing
@@ -37,9 +35,7 @@ from ethosu.vela.test import testutil
def set_256_lut(op, key, arch):
random.seed(key)
values = random.choices(range(256), k=256)
- lut_tensor = create_const_tensor(
- op.name + "_lut", [1, 1, 1, 256], DataType.int8, values, np.uint8, TensorPurpose.LUT
- )
+ lut_tensor = create_const_tensor(op.name + "_lut", [1, 1, 1, 256], DataType.int8, values, TensorPurpose.LUT)
scratch_lut_tensor = lut_tensor.clone_into_fast_storage(arch)
op.set_activation_lut(scratch_lut_tensor)
@@ -47,9 +43,7 @@ def set_256_lut(op, key, arch):
def set_1K_lut(op, key, arch):
random.seed(key)
values = random.choices(range(256), k=256)
- lut_tensor = create_const_tensor(
- op.name + "_lut", [1, 1, 1, 256], DataType.int32, values, np.uint32, TensorPurpose.LUT
- )
+ lut_tensor = create_const_tensor(op.name + "_lut", [1, 1, 1, 256], DataType.int32, values, TensorPurpose.LUT)
scratch_lut_tensor = lut_tensor.clone_into_fast_storage(arch)
op.set_activation_lut(scratch_lut_tensor)
@@ -57,9 +51,7 @@ def set_1K_lut(op, key, arch):
def set_2K_lut(op, key, arch):
random.seed(key)
values = random.choices(range(512), k=512)
- lut_tensor = create_const_tensor(
- op.name + "_lut", [1, 1, 1, 512], DataType.int32, values, np.uint32, TensorPurpose.LUT
- )
+ lut_tensor = create_const_tensor(op.name + "_lut", [1, 1, 1, 512], DataType.int32, values, TensorPurpose.LUT)
scratch_lut_tensor = lut_tensor.clone_into_fast_storage(arch)
op.set_activation_lut(scratch_lut_tensor)
diff --git a/ethosu/vela/test/test_tflite_model_semantic.py b/ethosu/vela/test/test_tflite_model_semantic.py
index c242063d..2e0936d0 100644
--- a/ethosu/vela/test/test_tflite_model_semantic.py
+++ b/ethosu/vela/test/test_tflite_model_semantic.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
+# SPDX-FileCopyrightText: Copyright 2021-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -195,11 +195,11 @@ def test_constraint_splitv_inferred():
# SplitV requires a maximum of one inferred shape (-1)
qp = testutil.default_quant_params()
op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
- sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], np.int16, quantization=qp)
+ sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, -1, 2, -1]]]], quantization=qp)
op.add_input_tensor(sizes)
assert not semantic_checker.is_operator_semantic_valid(op)
op = testutil.create_op_with_quant_tensors(Op.SplitV, [1, 1, 1, 8], [1, 1, 1, 8])
- sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], np.int16, quantization=qp)
+ sizes = create_const_tensor("sizes", [1, 1, 1, 4], DataType.int16, [[[[0, 1, 2, -1]]]], quantization=qp)
op.add_input_tensor(sizes)
assert semantic_checker.is_operator_semantic_valid(op)
@@ -278,7 +278,8 @@ def create_pad_op(
qp = testutil.default_quant_params()
in0 = Tensor(in_shape, in_dtype, "in")
in0.quantization = qp
- pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
+ shape = [] if padding == [] else list(np.shape(padding))
+ pad_tensor = create_const_tensor(name="pad", shape=shape, values=padding, dtype=pad_dtype)
out = Tensor(out_shape, out_dtype, "out")
out.quantization = qp.clone()
op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
@@ -449,9 +450,9 @@ def create_mean(input_shape, output_shape, axis, datatype, attrs):
ofm = Tensor(output_shape, datatype, "out")
ofm.quantization = testutil.default_quant_params()
if type(axis) is list:
- indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis, np.uint8)
+ indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis)
elif type(axis) is int:
- indices = create_const_tensor("indices", [], DataType.int32, axis, np.uint8)
+ indices = create_const_tensor("indices", [], DataType.int32, axis)
op = testutil.create_op(Op.Mean, [ifm, indices], ofm, attrs)
return op
diff --git a/ethosu/vela/test/test_tflite_supported_operators.py b/ethosu/vela/test/test_tflite_supported_operators.py
index d091531d..6a0b58e3 100644
--- a/ethosu/vela/test/test_tflite_supported_operators.py
+++ b/ethosu/vela/test/test_tflite_supported_operators.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2020-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
+# SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -303,55 +303,55 @@ def test_constraint_resize():
for resize_op in Op.op_set(Op.is_resize_op):
# IFM W and H == 1
op = testutil.create_op_with_quant_tensors(resize_op, [1, 1, 1, 8], [1, 8, 8, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8]))
assert support.is_operator_supported(op)
# IFM == OFM
op = testutil.create_op_with_quant_tensors(resize_op, [1, 8, 8, 8], [1, 8, 8, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8]))
assert support.is_operator_supported(op)
# IFM x2 == OFM ; align_corners = False
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8]))
assert support.is_operator_supported(op)
# IFM x4 == OFM ; align_corners = False
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 16, 16, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [16, 16], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [16, 16]))
assert support.is_operator_supported(op)
# IFM x8 == OFM ; align_corners = False
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 32, 32, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [32, 32], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [32, 32]))
assert support.is_operator_supported(op)
# IFM -1 x2 == OFM -1 ; align_corners = True
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 7, 7, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7]))
op.attrs["align_corners"] = True
assert support.is_operator_supported(op)
# IFM -1 x4 == OFM -1 ; align_corners = True
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 13, 13, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [13, 13], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [13, 13]))
op.attrs["align_corners"] = True
assert support.is_operator_supported(op)
# IFM -1 x8 == OFM -1 ; align_corners = True
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 25, 25, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [25, 25], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [25, 25]))
op.attrs["align_corners"] = True
assert support.is_operator_supported(op)
# Invalid case - upscale size
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 17, 17, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [17, 17], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [17, 17]))
assert not support.is_operator_supported(op)
# Invalid case - upscale size with align corners
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 15, 15, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [15, 15], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [15, 15]))
op.attrs["align_corners"] = True
assert not support.is_operator_supported(op)
@@ -360,7 +360,7 @@ def test_constraint_resize_size():
for resize_op in Op.op_set(Op.is_resize_op):
# Invalid case - size != ofm size
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [7, 7]))
assert not support.is_operator_supported(op)
@@ -368,7 +368,7 @@ def test_constraint_resize_attrs():
for resize_op in Op.op_set(Op.is_resize_op):
# Invalid case - both align corners and half-pixel centers
op = testutil.create_op_with_quant_tensors(resize_op, [1, 4, 4, 8], [1, 8, 8, 8])
- op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8], np.int32))
+ op.add_input_tensor(create_const_tensor("size", [2], DataType.int32, [8, 8]))
op.attrs["align_corners"] = True
op.attrs["half_pixel_centers"] = True
assert not support.is_operator_supported(op)
@@ -395,7 +395,8 @@ def create_pad_op(
qp = testutil.default_quant_params()
in0 = Tensor(in_shape, in_dtype, "in")
in0.quantization = qp
- pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
+ shape = [] if padding == [] else list(np.shape(padding))
+ pad_tensor = create_const_tensor(name="pad", shape=shape, values=padding, dtype=pad_dtype)
out = Tensor(out_shape, out_dtype, "out")
out.quantization = qp.clone()
op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
@@ -587,9 +588,9 @@ def create_mean(input_shape, output_shape, axis, datatype, attrs):
ofm = Tensor(output_shape, datatype, "out")
ofm.quantization = testutil.default_quant_params()
if type(axis) is list:
- indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis, np.uint8)
+ indices = create_const_tensor("indices", [len(axis)], DataType.int32, axis)
elif type(axis) is int:
- indices = create_const_tensor("indices", [], DataType.int32, axis, np.uint8)
+ indices = create_const_tensor("indices", [], DataType.int32, axis)
op = testutil.create_op(Op.Mean, [ifm, indices], ofm, attrs)
return op
diff --git a/ethosu/vela/test/testutil.py b/ethosu/vela/test/testutil.py
index acf35fe3..88fc8747 100644
--- a/ethosu/vela/test/testutil.py
+++ b/ethosu/vela/test/testutil.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2020-2021 Arm Limited and/or its affiliates <open-source-office@arm.com>
+# SPDX-FileCopyrightText: Copyright 2020-2021, 2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -53,21 +53,13 @@ def create_elemwise_op(
ofm_quant=default_quant_params(),
):
# Creates elementwise operation with constant IFM/IFM2
- if datatype.size_in_bytes() == 1:
- np_type = np.uint8
- elif datatype.size_in_bytes() == 2:
- np_type = np.int16
- else:
- np_type = np.int32
op = Operation(op_type, name)
op.add_input_tensor(
- create_const_tensor(name + "_ifm", ifm_shape, datatype, np.zeros(ifm_shape), np_type, quantization=ifm_quant)
+ create_const_tensor(name + "_ifm", ifm_shape, datatype, np.zeros(ifm_shape), quantization=ifm_quant)
)
if ifm2_shape is not None:
op.add_input_tensor(
- create_const_tensor(
- name + "_ifm2", ifm2_shape, datatype, np.zeros(ifm2_shape), np_type, quantization=ifm2_quant
- )
+ create_const_tensor(name + "_ifm2", ifm2_shape, datatype, np.zeros(ifm2_shape), quantization=ifm2_quant)
)
ofm = Tensor(ofm_shape, datatype, name + "_ofm")
ofm.quantization = ofm_quant
@@ -89,25 +81,17 @@ def create_op_with_quant_tensors(
op.set_output_tensor(ofm)
# Optional weight tensor
if weights_shape is not None:
- if datatype.size_in_bytes() == 1:
- np_type = np.uint8
- elif datatype.size_in_bytes() == 2:
- np_type = np.int16
- else:
- np_type = np.int32
qp = default_quant_params()
if op.type is not Op.FullyConnected:
qp.zero_point = np.zeros(weights_shape)
- weights = create_const_tensor(
- "weights", weights_shape, datatype, np.zeros(weights_shape), np_type, quantization=qp
- )
+ weights = create_const_tensor("weights", weights_shape, datatype, np.zeros(weights_shape), quantization=qp)
op.add_input_tensor(weights)
# Optional bias tensor
if bias_shape is not None:
qp = default_quant_params()
if op.type is not Op.FullyConnected:
qp.zero_point = np.zeros(bias_shape)
- bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), np.int32, quantization=qp)
+ bias = create_const_tensor("bias", bias_shape, DataType.int32, np.zeros(bias_shape), quantization=qp)
op.add_input_tensor(bias)
if set_ifm_ofm_shapes: