aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/QuantizationLayer.cpp
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2020-01-30 17:30:32 +0000
committerManuel Bottini <manuel.bottini@arm.com>2020-02-07 15:59:28 +0000
commit2f60221e60b69852918581b4eb450a0f81455a46 (patch)
tree25bed812a94b1dca4ec58e22f8d6a287b003106f /tests/validation/CL/QuantizationLayer.cpp
parent0b18d9740f04cc4e9cb6000a76b9c1dcd8327e24 (diff)
downloadComputeLibrary-2f60221e60b69852918581b4eb450a0f81455a46.tar.gz
COMPMID-3046: Add CLRequantizationLayerKernel
Change-Id: I034f5aa023642f2323372495ddd14fc62b4c12e0 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2681 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/CL/QuantizationLayer.cpp')
-rw-r--r--tests/validation/CL/QuantizationLayer.cpp73
1 files changed, 67 insertions, 6 deletions
diff --git a/tests/validation/CL/QuantizationLayer.cpp b/tests/validation/CL/QuantizationLayer.cpp
index e9544fdb8a..e3f47f98a8 100644
--- a/tests/validation/CL/QuantizationLayer.cpp
+++ b/tests/validation/CL/QuantizationLayer.cpp
@@ -42,9 +42,12 @@ namespace validation
{
namespace
{
-constexpr AbsoluteTolerance<float> tolerance_f32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
-const auto QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes());
-const auto QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes());
+constexpr AbsoluteTolerance<float> tolerance_f32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
+constexpr AbsoluteTolerance<uint8_t> tolerance_u8(1); /**< Tolerance value for comparing reference's output against implementation's output for QASYMM8 data types */
+constexpr AbsoluteTolerance<int8_t> tolerance_s8(1); /**< Tolerance value for comparing reference's output against implementation's output for QASYMM8_SIGNED data types */
+constexpr AbsoluteTolerance<uint16_t> tolerance_u16(1); /**< Tolerance value for comparing reference's output against implementation's output for QASYMM16 data types */
+const auto QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes());
+const auto QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes());
} // namespace
TEST_SUITE(CL)
@@ -53,7 +56,7 @@ TEST_SUITE(QuantizationLayer)
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong input data type
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong output data type
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong output data type
TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::F32), // Mismatching shapes
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Valid
@@ -125,7 +128,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, CLQuantizationLayerQASYMM16Fixture<floa
framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
+ validate(CLAccessor(_target), _reference, tolerance_u16);
}
FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, CLQuantizationLayerQASYMM8Fixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
framework::dataset::make("DataTypeIn", DataType::F32)),
@@ -141,7 +144,7 @@ FIXTURE_DATA_TEST_CASE(RunLargeQASYMM16, CLQuantizationLayerQASYMM16Fixture<floa
framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_f32);
+ validate(CLAccessor(_target), _reference, tolerance_u16);
}
TEST_SUITE_END() // FP32
@@ -165,6 +168,64 @@ FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, CLQuantizationLayerQASYMM8Fixture<half>,
TEST_SUITE_END() // FP16
TEST_SUITE_END() // Float
+TEST_SUITE(Quantized)
+template <typename T>
+using CLQuantizationLayerQASYMM8GenFixture = QuantizationValidationGenericFixture<CLTensor, CLAccessor, CLQuantizationLayer, T, uint8_t>;
+template <typename T>
+using CLQuantizationLayerQASYMM8_SIGNEDGenFixture = QuantizationValidationGenericFixture<CLTensor, CLAccessor, CLQuantizationLayer, T, int8_t>;
+template <typename T>
+using CLQuantizationLayerQASYMM16GenFixture = QuantizationValidationGenericFixture<CLTensor, CLAccessor, CLQuantizationLayer, T, uint16_t>;
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8GenFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 15) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_u8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, CLQuantizationLayerQASYMM8_SIGNEDGenFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataTypeIn", DataType::QASYMM8)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })),
+ framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })),
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 15) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_s8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, CLQuantizationLayerQASYMM16GenFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataTypeIn", DataType::QASYMM8)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
+ framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })),
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(4.0f, 23) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_u16);
+}
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, CLQuantizationLayerQASYMM8_SIGNEDGenFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataTypeIn", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })),
+ framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })),
+ framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 5) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_s8);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8GenFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.0f, 10) })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.0f, 30) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_u8);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+TEST_SUITE_END() // Quantized
+
TEST_SUITE_END() // QuantizationLayer
TEST_SUITE_END() // CL
} // namespace validation