From e92b0458a0432254f8e49bc306aebfe172bb4d0e Mon Sep 17 00:00:00 2001 From: Freddie Liardet Date: Thu, 22 Apr 2021 14:55:17 +0100 Subject: Add per-channel quantization support for CLDeconvolutionLayer Add QSYMM8_PER_CHANNEL support on weight input for CLDeconvolutionLayer. When weights are per-channel quantized type "Direct" method is always used. Also reduce number of QSYMM8_PER_CHANNEL tests for NEDeconvolutionLayer. Resolves: COMPMID-3438 Signed-off-by: Freddie Liardet Change-Id: I1330cac5142e19d21e322574fb8d912558745b02 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5484 Reviewed-by: Michele Di Giorgio Reviewed-by: Giorgio Arena Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- .../runtime/CL/functions/CLDeconvolutionLayer.h | 14 +- .../CL/functions/CLDirectDeconvolutionLayer.h | 8 +- src/runtime/CL/functions/CLDeconvolutionLayer.cpp | 7 +- .../CL/functions/CLDirectDeconvolutionLayer.cpp | 8 +- tests/validation/CL/DeconvolutionLayer.cpp | 179 +++++++++++++++++---- tests/validation/NEON/DeconvolutionLayer.cpp | 29 ++-- .../fixtures/DeconvolutionLayerFixture.h | 3 +- tests/validation/reference/DeconvolutionLayer.cpp | 1 + 8 files changed, 192 insertions(+), 57 deletions(-) diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h index df3cad6632..4be8c17835 100644 --- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,8 +47,8 @@ public: /** Set the input, weights, biases and output tensors. * * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. - * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input. - * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input. + * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input or QSYMM8_PER_CHANNEL if @p input is QASYMM8/QASYMM8_SIGNED. + * @param[in] bias (Optional) The biases have one dimension. Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type * @param[out] output Output tensor. The output has the same number of dimensions as the @p input. * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel. @@ -59,8 +59,8 @@ public: * * @param[in] compile_context The compile context to be used. * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. - * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input. - * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input. + * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input or QSYMM8_PER_CHANNEL if @p input is QASYMM8/QASYMM8_SIGNED. + * @param[in] bias (Optional) The biases have one dimension. Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type * @param[out] output Output tensor. The output has the same number of dimensions as the @p input. * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel. @@ -71,8 +71,8 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayer * * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. - * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input. - * @param[in] bias (Optional) The biases have one dimension. Data type supported: Same as @p input. + * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input or QSYMM8_PER_CHANNEL if @p input is QASYMM8/QASYMM8_SIGNED. + * @param[in] bias (Optional) The biases have one dimension. Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type * @param[in] output Output tensor info. The output has the same number of dimensions as the @p input. * @param[in] deconv_info Contains padding and policies to be used in the deconvolution, this is described in @ref PadStrideInfo. * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel. diff --git a/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h index 232b9f59b6..a23500e16b 100644 --- a/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLDirectDeconvolutionLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -89,7 +89,7 @@ public: * * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. - * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input. + * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input or QSYMM8_PER_CHANNEL if @p input is QASYMM8/QASYMM8_SIGNED. * @param[in] bias (Optional) The biases have one dimension. * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type * @param[out] output Output tensor. The output has the same number of dimensions as the @p input. @@ -103,7 +103,7 @@ public: * @param[in] compile_context The compile context to be used. * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. - * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input. + * @param[in] weights The 4d weights with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input or QSYMM8_PER_CHANNEL if @p input is QASYMM8/QASYMM8_SIGNED. * @param[in] bias (Optional) The biases have one dimension. * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type * @param[out] output Output tensor. The output has the same number of dimensions as the @p input. @@ -117,7 +117,7 @@ public: * * @param[in] input Input tensor info. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. * Data types supported: QASYMM8_SIGNED/QASYMM8/F16/F32. - * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input. + * @param[in] weights The 4d weights info with dimensions [width, height, IFM, OFM]. Data type supported: Same as @p input or QSYMM8_PER_CHANNEL if @p input is QASYMM8/QASYMM8_SIGNED. * @param[in] bias (Optional) The biases have one dimension. * Data type supported: Should match @p input data type, except for input of QASYMM8 and QASYMM8_SIGNED type where biases should be of S32 type * @param[in] output Output tensor info. The output has the same number of dimensions as the @p input. diff --git a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp index 75f34cc5ee..918848745e 100644 --- a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -105,6 +105,11 @@ DeconvolutionMethod CLDeconvolutionLayer::get_deconvolution_method(const ITensor { ARM_COMPUTE_UNUSED(output, bias, weights_info); + if(is_data_type_quantized_per_channel(weights->data_type())) + { + return DeconvolutionMethod::DIRECT; + } + const DataLayout data_layout = input->data_layout(); const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); diff --git a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp index d802ef2dc9..00d9a9ec89 100644 --- a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp @@ -74,7 +74,12 @@ Status CLDirectDeconvolutionLayer::validate(const ITensorInfo *input, const ITen const TensorShape output_shape = compute_deconvolution_output_shape(out_dims, *input, *weights); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + + if(input->data_type() != weights->data_type()) + { + ARM_COMPUTE_RETURN_ERROR_ON(weights->data_type() != DataType::QSYMM8_PER_CHANNEL || !is_data_type_quantized_asymmetric(input->data_type())); + } if(bias != nullptr) { @@ -227,7 +232,6 @@ void CLDirectDeconvolutionLayer::prepare() { _weights_flipped.allocator()->free(); } - _is_prepared = true; } } diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp index c284cdcee3..15962b588d 100644 --- a/tests/validation/CL/DeconvolutionLayer.cpp +++ b/tests/validation/CL/DeconvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -87,34 +87,34 @@ TEST_SUITE(DeconvolutionLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights shape - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), // Non supported data type - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid bias shape - TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32), // Window shrink - TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), - }), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid weights shape + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), // Non supported data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid bias shape + TensorInfo(TensorShape(13U, 11U, 4U, 3U), 1, DataType::F32), // Window shrink + TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), + }), framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 2U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(3U, 2U, 2U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(3U, 3U, 4U), 1, DataType::F32), TensorInfo(TensorShape(1U, 1U, 2U, 4U), 1, DataType::F32), - })), - framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(1U), 1, DataType::F16), - TensorInfo(TensorShape(1U), 1, DataType::F32), - TensorInfo(TensorShape(1U), 1, DataType::F32), - TensorInfo(TensorShape(25U, 11U), 1, DataType::F32), - TensorInfo(TensorShape(1U), 1, DataType::F32), - TensorInfo(TensorShape(4U), 1, DataType::F32), - })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32), - })), + })), + framework::dataset::make("BiasInfo", { TensorInfo(TensorShape(1U), 1, DataType::F16), + TensorInfo(TensorShape(1U), 1, DataType::F32), + TensorInfo(TensorShape(1U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(1U), 1, DataType::F32), + TensorInfo(TensorShape(4U), 1, DataType::F32), + })), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(13U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(11U, 9U, 1U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 16U, 4U), 1, DataType::F32), + })), framework::dataset::make("PadStrideInfo", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), @@ -286,6 +286,18 @@ using CLDeconvolutionLayerQuantizedFixture2x2 = DeconvolutionValidationQuantized template using CLDeconvolutionLayerQuantizedFixture1x1 = DeconvolutionValidationQuantizedFixture; +template +using CLDeconvolutionLayerQuantizedPerChannelFixture4x4 = DeconvolutionValidationQuantizedPerChannelFixture; + +template +using CLDeconvolutionLayerQuantizedPerChannelFixture3x3 = DeconvolutionValidationQuantizedPerChannelFixture; + +template +using CLDeconvolutionLayerQuantizedPerChannelFixture2x2 = DeconvolutionValidationQuantizedPerChannelFixture; + +template +using CLDeconvolutionLayerQuantizedPerChannelFixture1x1 = DeconvolutionValidationQuantizedPerChannelFixture; + TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) @@ -414,8 +426,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedFixture2x2 TEST_SUITE_END() // W2x2 TEST_SUITE(W1x1) // DirectDeconvolution and GEMMDeconvolution -FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(data1x1, framework::dataset::make("DataType", - DataType::QASYMM8_SIGNED)), +FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture1x1, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(data1x1, + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), data_layouts_dataset), framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 0), QuantizationInfo(2.f / 255.f, 0) })), framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 0), QuantizationInfo(4.f / 255.f, 0) })), @@ -428,6 +440,115 @@ TEST_SUITE_END() // W1x1 TEST_SUITE_END() // QASYMM8_SIGNED +const auto input_qinfo_dataset = framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 10) }); +const auto output_qinfo_dataset = framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 0) }); +const auto input_signed_qinfo_dataset = framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, -10) }); +const auto output_signed_qinfo_dataset = framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 10) }); + +TEST_SUITE(QSYMM8_PER_CHANNEL) + +TEST_SUITE(W4x4) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedPerChannelFixture4x4, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data4x4, + framework::dataset::make("DataType", DataType::QASYMM8)), + data_layouts_dataset), + input_qinfo_dataset), + output_qinfo_dataset), + add_bias_dataset), + framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLDeconvolutionLayerQuantizedPerChannelFixture4x4, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data4x4, + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + data_layouts_dataset), + input_signed_qinfo_dataset), + output_signed_qinfo_dataset), + add_bias_dataset), + framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +TEST_SUITE_END() // W4x4 + +TEST_SUITE(W3x3) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedPerChannelFixture3x3, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data3x3, + framework::dataset::make("DataType", DataType::QASYMM8)), + data_layouts_dataset), + input_qinfo_dataset), + output_qinfo_dataset), + add_bias_dataset), + framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLDeconvolutionLayerQuantizedPerChannelFixture3x3, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data3x3, + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + data_layouts_dataset), + input_signed_qinfo_dataset), + output_signed_qinfo_dataset), + add_bias_dataset), + framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +TEST_SUITE_END() // W3x3 + +TEST_SUITE(W2x2) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedPerChannelFixture2x2, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data2x2_precommit, + framework::dataset::make("DataType", DataType::QASYMM8)), + data_layouts_dataset), + input_qinfo_dataset), + output_qinfo_dataset), + add_bias_dataset), + framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLDeconvolutionLayerQuantizedPerChannelFixture2x2, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data2x2_precommit, + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + data_layouts_dataset), + input_signed_qinfo_dataset), + output_signed_qinfo_dataset), + add_bias_dataset), + framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +TEST_SUITE_END() // W2x2 + +TEST_SUITE(W1x1) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedPerChannelFixture1x1, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data1x1, + framework::dataset::make("DataType", DataType::QASYMM8)), + data_layouts_dataset), + input_qinfo_dataset), + output_qinfo_dataset), + add_bias_dataset), + framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLDeconvolutionLayerQuantizedPerChannelFixture1x1, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data1x1, + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + data_layouts_dataset), + input_signed_qinfo_dataset), + output_signed_qinfo_dataset), + add_bias_dataset), + framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num); +} +TEST_SUITE_END() // W1x1 + +TEST_SUITE_END() // QSYMM8_PER_CHANNEL + TEST_SUITE_END() // Quantized TEST_SUITE_END() // DeconvolutionLayer diff --git a/tests/validation/NEON/DeconvolutionLayer.cpp b/tests/validation/NEON/DeconvolutionLayer.cpp index 4c6ee2615d..19bd742a61 100644 --- a/tests/validation/NEON/DeconvolutionLayer.cpp +++ b/tests/validation/NEON/DeconvolutionLayer.cpp @@ -395,14 +395,19 @@ TEST_SUITE_END() // W1x1 TEST_SUITE_END() // QASYMM8_SIGNED +const auto input_qinfo_per_channel_dataset = framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 10) }); +const auto output_qinfo_per_channel_dataset = framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 0) }); +const auto input_signed_qinfo_per_channel_dataset = framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, -10) }); +const auto output_signed_qinfo_per_channel_dataset = framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 10) }); + TEST_SUITE(QSYMM8_PER_CHANNEL) TEST_SUITE(W4x4) FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedPerChannelFixture4x4, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data4x4, framework::dataset::make("DataType", DataType::QASYMM8)), data_layouts_dataset), - input_qinfo_dataset), - output_qinfo_dataset), + input_qinfo_per_channel_dataset), + output_qinfo_per_channel_dataset), add_bias_dataset), framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { @@ -412,8 +417,8 @@ FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedPerChannelFixture4x4, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data4x4, framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), data_layouts_dataset), - input_qinfo_dataset), - output_qinfo_dataset), + input_signed_qinfo_per_channel_dataset), + output_signed_qinfo_per_channel_dataset), add_bias_dataset), framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { @@ -426,8 +431,8 @@ TEST_SUITE(W3x3) FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedPerChannelFixture3x3, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data3x3, framework::dataset::make("DataType", DataType::QASYMM8)), data_layouts_dataset), - input_qinfo_dataset), - output_qinfo_dataset), + input_qinfo_per_channel_dataset), + output_qinfo_per_channel_dataset), add_bias_dataset), framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { @@ -437,8 +442,8 @@ FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedPerChannelFixture3x3, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data3x3, framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), data_layouts_dataset), - input_qinfo_dataset), - output_qinfo_dataset), + input_signed_qinfo_per_channel_dataset), + output_signed_qinfo_per_channel_dataset), add_bias_dataset), framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { @@ -451,8 +456,8 @@ TEST_SUITE(W1x1) FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedPerChannelFixture1x1, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data1x1, framework::dataset::make("DataType", DataType::QASYMM8)), data_layouts_dataset), - input_qinfo_dataset), - output_qinfo_dataset), + input_qinfo_per_channel_dataset), + output_qinfo_per_channel_dataset), add_bias_dataset), framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { @@ -462,8 +467,8 @@ FIXTURE_DATA_TEST_CASE(Run, NEDeconvolutionLayerQuantizedPerChannelFixture1x1, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(data1x1, framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), data_layouts_dataset), - input_qinfo_dataset), - output_qinfo_dataset), + input_signed_qinfo_per_channel_dataset), + output_signed_qinfo_per_channel_dataset), add_bias_dataset), framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h index 4bc1d3bb45..7c06635bcd 100644 --- a/tests/validation/fixtures/DeconvolutionLayerFixture.h +++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h @@ -136,8 +136,7 @@ protected: { case DataType::S32: { - const int32_t value = static_cast(tensor.quantization_info().uniform().offset); - library->fill_tensor_value(tensor, value); + library->fill_tensor_value(tensor, 0); break; } case DataType::F16: diff --git a/tests/validation/reference/DeconvolutionLayer.cpp b/tests/validation/reference/DeconvolutionLayer.cpp index afbf063e63..eeb25fcbe3 100644 --- a/tests/validation/reference/DeconvolutionLayer.cpp +++ b/tests/validation/reference/DeconvolutionLayer.cpp @@ -24,6 +24,7 @@ #include "ConvolutionLayer.h" #include "tests/validation/Helpers.h" + namespace arm_compute { namespace test -- cgit v1.2.1