aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/QuantizationLayer.cpp
diff options
context:
space:
mode:
authorJohn Kesapides <john.kesapides@arm.com>2019-03-04 16:29:22 +0000
committerPablo Marquez <pablo.tello@arm.com>2019-03-13 13:34:48 +0000
commitadfb2737046028c042f0aecaff87733a442da29f (patch)
tree23b08fb9529075277e51dc1ae7e6489f690c9698 /tests/validation/NEON/QuantizationLayer.cpp
parent381fcf20c3ee028e14c154ff4b75bc7410f91168 (diff)
downloadComputeLibrary-adfb2737046028c042f0aecaff87733a442da29f.tar.gz
COMPMID-1935 Add support for QASYMM8 in NEQuantizeLayer
Change-Id: I2b63a644d8e34f91c830d9ac398debcbdca3e497 Signed-off-by: John Kesapides <john.kesapides@arm.com> Reviewed-on: https://review.mlplatform.org/c/829 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/NEON/QuantizationLayer.cpp')
-rw-r--r--tests/validation/NEON/QuantizationLayer.cpp53
1 files changed, 35 insertions, 18 deletions
diff --git a/tests/validation/NEON/QuantizationLayer.cpp b/tests/validation/NEON/QuantizationLayer.cpp
index 6526539d1a..487eb70120 100644
--- a/tests/validation/NEON/QuantizationLayer.cpp
+++ b/tests/validation/NEON/QuantizationLayer.cpp
@@ -55,21 +55,17 @@ TEST_SUITE(QuantizationLayer)
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8), // Wrong input data type
- TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8), // Invalid shape
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong input data type
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong output data type
- TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::U8), // Missmatching shapes
- TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::U8), // Shrink window
+ TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::F32), // Missmatching shapes
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Valid
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8),
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U16),
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8),
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8),
})),
- framework::dataset::make("Expected", { false, false, false, false, false, true})),
+ framework::dataset::make("Expected", { false, false, false, true})),
input_info, output_info, expected)
{
ARM_COMPUTE_EXPECT(bool(NEQuantizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
@@ -81,7 +77,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS
{
// Create tensors
Tensor src = create_tensor<Tensor>(shape, data_type);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U8);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::QASYMM8);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -96,30 +92,51 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS
validate(dst.info()->valid_region(), valid_region);
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 8).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
+ validate(src.info()->padding(), PaddingSize());
+ validate(dst.info()->padding(), PaddingSize());
}
template <typename T>
-using NEQuantizationLayerFixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T>;
+using NEQuantizationLayerFixture = QAsymm8QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
- framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(Half)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u8);
+}
+TEST_SUITE_END() // FP16
+TEST_SUITE_END() // Half
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE_END() // QuantizationLayer
TEST_SUITE_END() // NEON