From e03802edd37229a1868bacedd7571cc443810caf Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Mon, 11 Mar 2019 12:20:20 +0000 Subject: COMPMID-1936: Add support for QASYMM8 in CLQuantizeLayer. Change-Id: I9aa1f1f1753bcdee6a74ec15b4fb366f823788b4 Signed-off-by: Usama Arif Reviewed-on: https://review.mlplatform.org/c/850 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- tests/validation/CL/QuantizationLayer.cpp | 48 ++++++++++++++++++++----------- 1 file changed, 31 insertions(+), 17 deletions(-) (limited to 'tests/validation/CL/QuantizationLayer.cpp') diff --git a/tests/validation/CL/QuantizationLayer.cpp b/tests/validation/CL/QuantizationLayer.cpp index f0cc4ccafa..26e030489c 100644 --- a/tests/validation/CL/QuantizationLayer.cpp +++ b/tests/validation/CL/QuantizationLayer.cpp @@ -53,21 +53,17 @@ TEST_SUITE(QuantizationLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8), // Wrong input data type - TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8), // Invalid shape + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong input data type TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong output data type - TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::U8), // Mismatching shapes - TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::U8), // Shrink window + TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::F32), // Mismatching shapes TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Valid }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), - TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8), TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U16), - TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), - TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::F32), - TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8), + TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), })), - framework::dataset::make("Expected", { false, false, false, false, false, true})), + framework::dataset::make("Expected", { false, false, false, true})), input_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(CLQuantizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); @@ -79,7 +75,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS { // Create tensors CLTensor src = create_tensor(shape, data_type); - CLTensor dst = create_tensor(shape, DataType::U8); + CLTensor dst = create_tensor(shape, DataType::QASYMM8); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -94,9 +90,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS validate(dst.info()->valid_region(), valid_region); // Validate padding - const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding(); - validate(src.info()->padding(), padding); - validate(dst.info()->padding(), padding); + validate(src.info()->padding(), PaddingSize()); + validate(dst.info()->padding(), PaddingSize()); } template @@ -104,19 +99,38 @@ using CLQuantizationLayerFixture = QuantizationValidationFixture, framework::DatasetMode::PRECOMMIT, combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLQuantizationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLQuantizationLayerFixture, framework::DatasetMode::NIGHTLY, combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()), - framework::dataset::make("DataType", DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLarge, CLQuantizationLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } TEST_SUITE_END() // FP32 + +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLQuantizationLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLQuantizationLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} +TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float TEST_SUITE_END() // QuantizationLayer -- cgit v1.2.1