From 9fef38ac706aac6ff194fb76e92dcc774e12e115 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Fri, 6 Jul 2018 18:06:58 +0100 Subject: COMPMID-1376: Add support for QASYMM8 in CLDeconvolutionLayer Change-Id: I13ec79b6668e2b9559d3fa789ae0b51ab6975289 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/139126 Reviewed-by: Michalis Spyrou Tested-by: Jenkins --- tests/validation/CL/DeconvolutionLayer.cpp | 53 +++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 8 deletions(-) (limited to 'tests/validation/CL/DeconvolutionLayer.cpp') diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp index 0fd7ed4ddc..5d10073641 100644 --- a/tests/validation/CL/DeconvolutionLayer.cpp +++ b/tests/validation/CL/DeconvolutionLayer.cpp @@ -45,6 +45,7 @@ namespace { constexpr AbsoluteTolerance tolerance_fp32(0.001f); /**< Tolerance for floating point tests */ RelativeTolerance tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's for DataType::F16 */ +constexpr AbsoluteTolerance tolerance_qasymm8(1.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */ constexpr float tolerance_num = 0.07f; /**< Tolerance number */ const auto data4x4 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 4) * framework::dataset::make("StrideY", 1, 4) * framework::dataset::make("PadX", 0, 3) @@ -105,54 +106,48 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, (combine(datasets::Sm DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights shape - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Non supported data type TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid bias shape TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32), // Window shrink TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), }), framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16), TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32), TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32), })), framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(1U), 1, DataType::F16), - TensorInfo(TensorShape(1U), 1, DataType::F32), TensorInfo(TensorShape(1U), 1, DataType::F32), TensorInfo(TensorShape(25U, 11U), 1, DataType::F32), TensorInfo(TensorShape(1U), 1, DataType::F32), TensorInfo(TensorShape(4U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::S32), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32), TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32), })), framework::dataset::make("PadStrideInfo", { PadStrideInfo(1, 1, 0, 0), - PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0), })), framework::dataset::make("ax", { 1U, - 1U, 1U, 1U, 0U, 0U, })), framework::dataset::make("ay", { 1U, - 1U, 1U, 1U, 0U, 0U, })), - framework::dataset::make("Expected", { false, false, false, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, true })), input_info, weights_info, bias_info, output_info, pad_info, ax, ay, expected) { bool is_valid = bool(CLDeconvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pad_info, ax, ay)); @@ -228,6 +223,48 @@ TEST_SUITE_END() TEST_SUITE_END() TEST_SUITE_END() +template +using CLDeconvolutionLayerQuantizedFixture4x4 = DeconvolutionValidationQuantizedFixture; + +template +using CLDeconvolutionLayerQuantizedFixture3x3 = DeconvolutionValidationQuantizedFixture; + +template +using CLDeconvolutionLayerQuantizedFixture1x1 = DeconvolutionValidationQuantizedFixture; + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) + +TEST_SUITE(W4x4) +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture4x4, framework::DatasetMode::ALL, combine(combine(data4x4, framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 127)))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +TEST_SUITE_END() + +TEST_SUITE(W3x3) +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture3x3, framework::DatasetMode::ALL, combine(combine(data3x3, framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 127)))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +TEST_SUITE_END() + +TEST_SUITE(W1x1) +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture1x1, framework::DatasetMode::ALL, combine(combine(data1x1, framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255.f, 127)))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +TEST_SUITE_END() + +TEST_SUITE_END() +TEST_SUITE_END() + TEST_SUITE_END() TEST_SUITE_END() } // namespace validation -- cgit v1.2.1