aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2023-03-15 14:05:06 +0000
committerViet-Hoa Do <viet-hoa.do@arm.com>2023-03-29 14:03:30 +0000
commitfd472f05dc73005a89a5e6275940ab5c9a609485 (patch)
tree4a00f42f64f4bea72c489961aaa376665d324c60 /tests/validation
parent5a7d1571a2de24eefc6f1d8d22deeef9f47521ee (diff)
downloadComputeLibrary-fd472f05dc73005a89a5e6275940ab5c9a609485.tar.gz
Add quantized support for unary elementwise in CPU
* Add quantized unary elementwise in CPU using LUT. * Widen the input data range of the test suite. - Fix CPU exponential function overflow/underflow range. - Fix saturation issue of CL round operator. Resolves: COMPMID-5763 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I41445de2b4a33ec6b01e0ab701516c240c852d0b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9367 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/NEON/ElementwiseAbsoluteValue.cpp33
-rw-r--r--tests/validation/NEON/ElementwiseExpLayer.cpp34
-rw-r--r--tests/validation/NEON/ElementwiseLog.cpp34
-rw-r--r--tests/validation/NEON/ElementwiseNegation.cpp33
-rw-r--r--tests/validation/NEON/ElementwiseRound.cpp37
-rw-r--r--tests/validation/NEON/ElementwiseRsqrtLayer.cpp33
-rw-r--r--tests/validation/NEON/ElementwiseSin.cpp34
-rw-r--r--tests/validation/fixtures/ElementwiseUnaryFixture.h212
-rw-r--r--tests/validation/reference/ElementwiseUnary.cpp30
9 files changed, 440 insertions, 40 deletions
diff --git a/tests/validation/NEON/ElementwiseAbsoluteValue.cpp b/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
index ccde670034..7f6a6a5bb2 100644
--- a/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
+++ b/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
@@ -53,6 +55,9 @@ TEST_SUITE(AbsLayer)
template <typename T>
using NEAbsLayerFixture = AbsValidationFixture<Tensor, Accessor, NEAbsLayer, T>;
+template <typename T>
+using NEAbsLayerQuantizedFixture = AbsQuantizedValidationFixture<Tensor, Accessor, NEAbsLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -107,6 +112,32 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEAbsLayerFixture<int32_t>, framework::DatasetM
TEST_SUITE_END() // S32
TEST_SUITE_END() // Integer
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAbsLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAbsLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // AbsLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseExpLayer.cpp b/tests/validation/NEON/ElementwiseExpLayer.cpp
index f9e5f39989..e8940c5385 100644
--- a/tests/validation/NEON/ElementwiseExpLayer.cpp
+++ b/tests/validation/NEON/ElementwiseExpLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(ExpLayer)
@@ -53,6 +55,9 @@ TEST_SUITE(ExpLayer)
template <typename T>
using NEExpLayerFixture = ExpValidationFixture<Tensor, Accessor, NEExpLayer, T>;
+template <typename T>
+using NEExpLayerQuantizedFixture = ExpQuantizedValidationFixture<Tensor, Accessor, NEExpLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -82,6 +87,33 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEExpLayerFixture<float>, framework::DatasetMod
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEExpLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.01, 0) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.003, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEExpLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.02, -1) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.002, -2) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
+
TEST_SUITE_END() // ExpLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseLog.cpp b/tests/validation/NEON/ElementwiseLog.cpp
index 3aa7fb3665..49a88ced1c 100644
--- a/tests/validation/NEON/ElementwiseLog.cpp
+++ b/tests/validation/NEON/ElementwiseLog.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(LogLayer)
@@ -53,6 +55,9 @@ TEST_SUITE(LogLayer)
template <typename T>
using NELogLayerFixture = LogValidationFixture<Tensor, Accessor, NELogLayer, T>;
+template <typename T>
+using NELogLayerQuantizedFixture = LogQuantizedValidationFixture<Tensor, Accessor, NELogLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -88,6 +93,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NELogLayerFixture<float>, framework::DatasetMod
}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NELogLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(10.5, 0) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NELogLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.75, -128) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(12.5, -2) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // LogLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseNegation.cpp b/tests/validation/NEON/ElementwiseNegation.cpp
index 0b63588d8a..038058c70c 100644
--- a/tests/validation/NEON/ElementwiseNegation.cpp
+++ b/tests/validation/NEON/ElementwiseNegation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(NegLayer)
@@ -53,6 +55,9 @@ TEST_SUITE(NegLayer)
template <typename T>
using NENegLayerFixture = NegValidationInPlaceFixture<Tensor, Accessor, NENegLayer, T>;
+template <typename T>
+using NENegLayerQuantizedFixture = NegQuantizedValidationFixture<Tensor, Accessor, NENegLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -113,6 +118,32 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NENegLayerFixture<int32_t>, framework::DatasetM
TEST_SUITE_END() // S32
TEST_SUITE_END() // Integer
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NENegLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NENegLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // NegLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseRound.cpp b/tests/validation/NEON/ElementwiseRound.cpp
index d2f0b456a0..a6ff47c830 100644
--- a/tests/validation/NEON/ElementwiseRound.cpp
+++ b/tests/validation/NEON/ElementwiseRound.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,12 +40,20 @@ namespace test
{
namespace validation
{
+namespace
+{
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
+} // namespace
TEST_SUITE(NEON)
TEST_SUITE(RoundLayer)
template <typename T>
using NERoundLayerFixture = RoundValidationFixture<Tensor, Accessor, NERoundLayer, T>;
+template <typename T>
+using NERoundLayerQuantizedFixture = RoundQuantizedValidationFixture<Tensor, Accessor, NERoundLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -81,6 +89,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NERoundLayerFixture<float>, framework::DatasetM
}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERoundLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERoundLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // RoundLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseRsqrtLayer.cpp b/tests/validation/NEON/ElementwiseRsqrtLayer.cpp
index 2d52183b15..1d291ac6dc 100644
--- a/tests/validation/NEON/ElementwiseRsqrtLayer.cpp
+++ b/tests/validation/NEON/ElementwiseRsqrtLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(RsqrtLayer)
@@ -72,6 +74,9 @@ TEST_SUITE_END() // DynamicShape
template <typename T>
using NERsqrtLayerFixture = RsqrtValidationFixture<Tensor, Accessor, NERsqrtLayer, T>;
+template <typename T>
+using NERsqrtLayerQuantizedFixture = RsqrtQuantizedValidationFixture<Tensor, Accessor, NERsqrtLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -102,6 +107,32 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NERsqrtLayerFixture<float>, framework::DatasetM
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERsqrtLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(20, 0) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERsqrtLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(25, -128) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // RsqrtLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseSin.cpp b/tests/validation/NEON/ElementwiseSin.cpp
index 06775c0690..76f4c50b46 100644
--- a/tests/validation/NEON/ElementwiseSin.cpp
+++ b/tests/validation/NEON/ElementwiseSin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ AbsoluteTolerance<float> tolerance_fp32(0.00001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
AbsoluteTolerance<float> tolerance_fp16(0.0005f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(SinLayer)
@@ -53,6 +55,9 @@ TEST_SUITE(SinLayer)
template <typename T>
using NESinLayerFixture = SinValidationFixture<Tensor, Accessor, NESinLayer, T>;
+template <typename T>
+using NESinLayerQuantizedFixture = SinQuantizedValidationFixture<Tensor, Accessor, NESinLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -89,6 +94,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NESinLayerFixture<float>, framework::DatasetMod
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(200, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.07, 6) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(123, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
+
TEST_SUITE_END() // SinLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/fixtures/ElementwiseUnaryFixture.h b/tests/validation/fixtures/ElementwiseUnaryFixture.h
index 1dc4f03e99..9b40d34d2b 100644
--- a/tests/validation/fixtures/ElementwiseUnaryFixture.h
+++ b/tests/validation/fixtures/ElementwiseUnaryFixture.h
@@ -24,8 +24,10 @@
#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
#define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
+#include "arm_compute/core/QuantizationInfo.h"
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
@@ -33,6 +35,11 @@
#include "tests/framework/Fixture.h"
#include "tests/validation/reference/ElementwiseUnary.h"
+#include <tuple>
+#include <limits>
+#include <type_traits>
+#include <vector>
+
namespace arm_compute
{
namespace test
@@ -64,67 +71,131 @@ protected:
{
case ElementWiseUnary::EXP:
{
- FloatDistributionType distribution{ FloatType(-1.0f), FloatType(1.0f) };
- library->fill(tensor, distribution, i);
+ switch(data_type)
+ {
+ case DataType::F32:
+ {
+ FloatDistributionType distribution{ FloatType(-86.63f), FloatType(88.36f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(-9.00f), FloatType(10.73f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+
break;
}
case ElementWiseUnary::RSQRT:
+ case ElementWiseUnary::LOG:
{
- if(data_type == DataType::F32 || data_type == DataType::F16)
+ // For floating-point data type, the chosen input range is all positive numbers
+ // (i.e. positive and negative zeros are excluded).
+ switch(data_type)
{
- FloatDistributionType distribution{ FloatType(1.0f), FloatType(2.0f) };
- library->fill(tensor, distribution, i);
+ case DataType::F32:
+ {
+ FloatDistributionType distribution{ std::numeric_limits<float>::min(), std::numeric_limits<float>::max() };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(0.00006103515625f), FloatType(65504.0f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
}
- else
+
+ break;
+ }
+ case ElementWiseUnary::SIN:
+ {
+ switch(data_type)
{
- library->fill_tensor_uniform(tensor, i);
+ case DataType::F32:
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(-100.0f), FloatType(100.0f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::S32:
+ {
+ std::uniform_int_distribution<int32_t> distribution(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
}
+
break;
}
case ElementWiseUnary::ABS:
case ElementWiseUnary::NEG:
+ case ElementWiseUnary::ROUND:
{
switch(data_type)
{
- case DataType::F16:
+ case DataType::F32:
{
- arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -2.0f, 2.0f };
+ FloatDistributionType distribution{ std::numeric_limits<float>::lowest() / 2, std::numeric_limits<float>::max() / 2 };
library->fill(tensor, distribution, i);
break;
}
- case DataType::F32:
+
+ case DataType::F16:
{
- FloatDistributionType distribution{ FloatType(-2.0f), FloatType(2.0f) };
+ FloatDistributionType distribution{ FloatType(-65504.0f), FloatType(65504.0f) };
library->fill(tensor, distribution, i);
break;
}
+
case DataType::S32:
{
- std::uniform_int_distribution<int32_t> distribution(-100, 100);
+ std::uniform_int_distribution<int32_t> distribution(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
library->fill(tensor, distribution, i);
break;
}
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
default:
- ARM_COMPUTE_ERROR("DataType for Elementwise Negation Not implemented");
+ ARM_COMPUTE_ERROR("Not implemented");
}
- break;
- }
- case ElementWiseUnary::LOG:
- {
- FloatDistributionType distribution{ FloatType(0.0000001f), FloatType(100.0f) };
- library->fill(tensor, distribution, i);
- break;
- }
- case ElementWiseUnary::SIN:
- {
- FloatDistributionType distribution{ FloatType(-100.00f), FloatType(100.00f) };
- library->fill(tensor, distribution, i);
- break;
- }
- case ElementWiseUnary::ROUND:
- {
- FloatDistributionType distribution{ FloatType(100.0f), FloatType(-100.0f) };
- library->fill(tensor, distribution, i);
+
break;
}
default:
@@ -199,6 +270,8 @@ protected:
SimpleTensor<T> _reference{};
ElementWiseUnary _op{};
bool _use_dynamic_shape{ false };
+ QuantizationInfo _input_qinfo{};
+ QuantizationInfo _output_qinfo{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class RsqrtQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
@@ -245,6 +318,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ExpQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::EXP, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class NegValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -256,6 +340,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class NegQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::NEG, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class NegValidationInPlaceFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -267,6 +362,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class NegQuantizedValidationInPlaceFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, bool in_place, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, in_place, ElementWiseUnary::NEG, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class LogValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -278,6 +384,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class LogQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::LOG, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class AbsValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -289,6 +406,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class AbsQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ABS, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class SinValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -300,6 +428,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SinQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::SIN, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class RoundValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -309,6 +448,17 @@ public:
ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ROUND);
}
};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class RoundQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ROUND, false, iq, oq);
+ }
+};
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/reference/ElementwiseUnary.cpp b/tests/validation/reference/ElementwiseUnary.cpp
index d5218d772d..558f9d24fc 100644
--- a/tests/validation/reference/ElementwiseUnary.cpp
+++ b/tests/validation/reference/ElementwiseUnary.cpp
@@ -88,8 +88,21 @@ SimpleTensor<int8_t> elementwise_unary(const SimpleTensor<int8_t> &src, SimpleTe
dst_tmp[i] = (127.0f - dst.quantization_info().uniform().offset) * dst.quantization_info().uniform().scale ;
}
break;
+
+ case ElementWiseUnary::LOG:
+ if(src_tmp[i] != 0)
+ {
+ dst_tmp[i] = std::log(src_tmp[i]);
+ }
+ else
+ {
+ dst_tmp[i] = (-128.0f - dst.quantization_info().uniform().offset) * dst.quantization_info().uniform().scale ;
+ }
+ break;
+
default:
- ARM_COMPUTE_ERROR("Not implemented");
+ elementwise_unary(src_tmp, dst_tmp, op);
+ break;
}
}
dst = convert_to_asymmetric<int8_t>(dst_tmp, dst.quantization_info());
@@ -122,8 +135,21 @@ SimpleTensor<uint8_t> elementwise_unary(const SimpleTensor<uint8_t> &src, Simple
dst_tmp[i] = (255.0f - dst.quantization_info().uniform().offset)* dst.quantization_info().uniform().scale;
}
break;
+
+ case ElementWiseUnary::LOG:
+ if(src_tmp[i] != 0)
+ {
+ dst_tmp[i] = std::log(src_tmp[i]);
+ }
+ else
+ {
+ dst_tmp[i] = -dst.quantization_info().uniform().offset * dst.quantization_info().uniform().scale;
+ }
+ break;
+
default:
- ARM_COMPUTE_ERROR("Not implemented");
+ elementwise_unary(src_tmp, dst_tmp, op);
+ break;
}
}
dst = convert_to_asymmetric<uint8_t>(dst_tmp, dst.quantization_info());