aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2023-03-15 14:05:06 +0000
committerViet-Hoa Do <viet-hoa.do@arm.com>2023-03-29 14:03:30 +0000
commitfd472f05dc73005a89a5e6275940ab5c9a609485 (patch)
tree4a00f42f64f4bea72c489961aaa376665d324c60 /tests/validation/NEON/ElementwiseAbsoluteValue.cpp
parent5a7d1571a2de24eefc6f1d8d22deeef9f47521ee (diff)
downloadComputeLibrary-fd472f05dc73005a89a5e6275940ab5c9a609485.tar.gz
Add quantized support for unary elementwise in CPU
* Add quantized unary elementwise in CPU using LUT. * Widen the input data range of the test suite. - Fix CPU exponential function overflow/underflow range. - Fix saturation issue of CL round operator. Resolves: COMPMID-5763 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I41445de2b4a33ec6b01e0ab701516c240c852d0b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9367 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/NEON/ElementwiseAbsoluteValue.cpp')
-rw-r--r--tests/validation/NEON/ElementwiseAbsoluteValue.cpp33
1 files changed, 32 insertions, 1 deletions
diff --git a/tests/validation/NEON/ElementwiseAbsoluteValue.cpp b/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
index ccde670034..7f6a6a5bb2 100644
--- a/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
+++ b/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
@@ -53,6 +55,9 @@ TEST_SUITE(AbsLayer)
template <typename T>
using NEAbsLayerFixture = AbsValidationFixture<Tensor, Accessor, NEAbsLayer, T>;
+template <typename T>
+using NEAbsLayerQuantizedFixture = AbsQuantizedValidationFixture<Tensor, Accessor, NEAbsLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -107,6 +112,32 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEAbsLayerFixture<int32_t>, framework::DatasetM
TEST_SUITE_END() // S32
TEST_SUITE_END() // Integer
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAbsLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAbsLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // AbsLayer
TEST_SUITE_END() // Neon
} // namespace validation