aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/QuantizationLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/NEON/QuantizationLayer.cpp')
-rw-r--r--tests/validation/NEON/QuantizationLayer.cpp41
1 files changed, 37 insertions, 4 deletions
diff --git a/tests/validation/NEON/QuantizationLayer.cpp b/tests/validation/NEON/QuantizationLayer.cpp
index 8d19c93761..49118f7dc5 100644
--- a/tests/validation/NEON/QuantizationLayer.cpp
+++ b/tests/validation/NEON/QuantizationLayer.cpp
@@ -43,7 +43,8 @@ namespace validation
namespace
{
/** Tolerance for quantization */
-constexpr AbsoluteTolerance<uint8_t> tolerance_u8(1);
+constexpr AbsoluteTolerance<uint8_t> tolerance_u8(1);
+constexpr AbsoluteTolerance<uint16_t> tolerance_u16(1);
const auto QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes());
const auto QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes());
@@ -98,6 +99,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS
template <typename T>
using NEQuantizationLayerQASYMM8Fixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T, uint8_t>;
+template <typename T>
+using NEQuantizationLayerQASYMM16Fixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T, uint16_t>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
@@ -109,6 +112,14 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8Fixture<float>
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, NEQuantizationLayerQASYMM16Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u16);
+}
FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, NEQuantizationLayerQASYMM8Fixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
@@ -117,10 +128,16 @@ FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, NEQuantizationLayerQASYMM8Fixture<float>
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
+FIXTURE_DATA_TEST_CASE(RunLargeQASYMM16, NEQuantizationLayerQASYMM16Fixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u16);
+}
TEST_SUITE_END() // FP32
-TEST_SUITE_END() // Float
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-TEST_SUITE(Half)
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
framework::dataset::make("DataType", DataType::F16)),
@@ -130,6 +147,14 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8Fixture<half>,
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, NEQuantizationLayerQASYMM16Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u16);
+}
FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, NEQuantizationLayerQASYMM8Fixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
@@ -138,9 +163,17 @@ FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, NEQuantizationLayerQASYMM8Fixture<half>,
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
+FIXTURE_DATA_TEST_CASE(RunLargeQASYMM16, NEQuantizationLayerQASYMM16Fixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u16);
+}
TEST_SUITE_END() // FP16
-TEST_SUITE_END() // Half
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE_END() // Float
TEST_SUITE_END() // QuantizationLayer
TEST_SUITE_END() // NEON