aboutsummaryrefslogtreecommitdiff
path: root/ethosu/vela/test/test_graph_optimiser.py
diff options
context:
space:
mode:
authorTim Hall <tim.hall@arm.com>2023-01-13 17:57:25 +0000
committertim.hall <tim.hall@arm.com>2023-01-20 14:07:21 +0000
commit3b1578e44b4c6a8c8c9a8e0891d3866a89bd66af (patch)
tree491c337bc854d435b80f0a535496084ea9ebc9ac /ethosu/vela/test/test_graph_optimiser.py
parentf34904717f643499f3ea6210322bbe1b635db088 (diff)
downloadethos-u-vela-3b1578e44b4c6a8c8c9a8e0891d3866a89bd66af.tar.gz
MLBEDSW-7151: MLCE: Difference in model output between x86 & aarch64
- The issue is due to undefined behaviour when casting a NumPy float to a NumPy unsigned integer which occurs in create_const_tensor() - The fix is to make sure that the values are first cast to a Python float - In addition, the values datatype argument has been removed from create_const_tensor() to stop the tensor and values datatypes getting out of sync Change-Id: I134b9be8c941b361929a5ae7db8cb35f2e9728f2 Signed-off-by: Tim Hall <tim.hall@arm.com>
Diffstat (limited to 'ethosu/vela/test/test_graph_optimiser.py')
-rw-r--r--ethosu/vela/test/test_graph_optimiser.py21
1 files changed, 8 insertions, 13 deletions
diff --git a/ethosu/vela/test/test_graph_optimiser.py b/ethosu/vela/test/test_graph_optimiser.py
index 152669f7..54dd70f6 100644
--- a/ethosu/vela/test/test_graph_optimiser.py
+++ b/ethosu/vela/test/test_graph_optimiser.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2020-2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
+# SPDX-FileCopyrightText: Copyright 2020-2023 Arm Limited and/or its affiliates <open-source-office@arm.com>
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -40,9 +40,9 @@ from ethosu.vela.tflite_graph_optimiser import rewrite_fully_connected_input
def test_convert_batched_fc():
"""Tests shape conversion of batched fully connected"""
ifm_shape = [4, 8]
- ifm = create_const_tensor("test_in", ifm_shape, np.uint8, np.zeros(ifm_shape))
+ ifm = create_const_tensor("test_in", ifm_shape, DataType.uint8, np.zeros(ifm_shape))
w_shape = [8, 4]
- weights = create_const_tensor("weight_in", w_shape, np.uint8, np.zeros(w_shape))
+ weights = create_const_tensor("weight_in", w_shape, DataType.uint8, np.zeros(w_shape))
ofm = Tensor(ifm.shape, np.uint8, "test_out")
op = testutil.create_op(Op.FullyConnected, [ifm, weights], ofm)
@@ -132,7 +132,8 @@ def create_pad_and_conv2d(
qp = testutil.default_quant_params()
in0 = Tensor(in_shape, in_dtype, "in")
in0.quantization = qp
- pad_tensor = create_const_tensor(name="pad", shape=list(np.shape(padding)), values=padding, dtype=pad_dtype)
+ shape = [] if padding == [] else list(np.shape(padding))
+ pad_tensor = create_const_tensor(name="pad", shape=shape, values=padding, dtype=pad_dtype)
out = Tensor(out_shape, out_dtype, "out")
out.quantization = qp.clone()
op = testutil.create_op(Op.Pad, [in0, pad_tensor], out)
@@ -543,9 +544,7 @@ def test_quant_static_optimisations():
Tests if the quant value at vela compile time is calculated correctly
"""
- quant_ifm = create_const_tensor(
- "const_quant_ifm", values=np.array(127), value_dtype=np.int8, shape=[], dtype=DataType.int8
- )
+ quant_ifm = create_const_tensor("const_quant_ifm", values=np.array(127), shape=[], dtype=DataType.int8)
quant_ifm.quantization = testutil.default_quant_params()
quant_ifm.quantization.scale_f32 = 0.15748031
quant_ifm.quantization.quant_min = -128
@@ -568,9 +567,7 @@ def test_quant_static_optimisations():
assert op.ofm.values == 127
- quant_ifm = create_const_tensor(
- "const_quant_ifm", values=np.array(127), value_dtype=np.int8, shape=[], dtype=DataType.int8
- )
+ quant_ifm = create_const_tensor("const_quant_ifm", values=np.array(127), shape=[], dtype=DataType.int8)
quant_ifm.quantization = testutil.default_quant_params()
quant_ifm.quantization.scale_f32 = 0.15748031
quant_ifm.quantization.quant_min = -128
@@ -600,9 +597,7 @@ def test_optimise_quantize_multiple_values():
when passing multiple values to quantize node
"""
- quant_ifm = create_const_tensor(
- "const_quant_ifm", values=np.array([127, 127]), value_dtype=np.int8, shape=[], dtype=DataType.int8
- )
+ quant_ifm = create_const_tensor("const_quant_ifm", values=np.array([127, 127]), shape=[], dtype=DataType.int8)
quant_ifm.quantization = testutil.default_quant_params()
quant_ifm.quantization.scale_f32 = 0.15748031
quant_ifm.quantization.quant_min = -128