aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiuseppe Rossini <giuseppe.rossini@arm.com>2020-01-16 16:38:56 +0000
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2020-01-16 19:34:52 +0000
commit0a958cbb48816a074303ae218a90414fb619efed (patch)
tree7ddd35a9d197ae20c9202ca68a47233f61771c8d
parent74a16960f3cf1b8a93a70c349a8fc05fe45d4b97 (diff)
downloadComputeLibrary-0a958cbb48816a074303ae218a90414fb619efed.tar.gz
COMPMID-3013: Transpose Convolution 2D VTS/CTS failures on CL
Change-Id: If19eec7f7a09e1008cc83ddcc673de145fca471d Signed-off-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/2599 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp13
-rw-r--r--tests/validation/CL/DeconvolutionLayer.cpp26
2 files changed, 25 insertions, 14 deletions
diff --git a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
index 604147a37a..14bda11f5f 100644
--- a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -239,7 +239,18 @@ void CLGEMMDeconvolutionLayer::configure(const ICLTensor *input, const ICLTensor
// Configure output stage for asymmetric quantized types
if(_is_quantized)
{
+ // gemmlowp adds the offsets (instead of subtracting them). Thus, we need to negate the original
+ // and restore them back to make it work properly.
+ QuantizationInfo iq_info = input->info()->quantization_info();
+ QuantizationInfo wq_info = weights->info()->quantization_info();
+
+ input_to_use->info()->set_quantization_info(QuantizationInfo(iq_info.uniform().scale, -iq_info.uniform().offset));
+ _reshaped_weights_t.info()->set_quantization_info(QuantizationInfo(wq_info.uniform().scale, -wq_info.uniform().offset));
+
_mm_gemmlowp.configure(input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, gemm_info);
+
+ input_to_use->info()->set_quantization_info(iq_info);
+ _reshaped_weights_t.info()->set_quantization_info(wq_info);
}
else
{
diff --git a/tests/validation/CL/DeconvolutionLayer.cpp b/tests/validation/CL/DeconvolutionLayer.cpp
index 090db1c2c1..4e90e68ebd 100644
--- a/tests/validation/CL/DeconvolutionLayer.cpp
+++ b/tests/validation/CL/DeconvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,7 @@ namespace
{
constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's for DataType::F16 */
-constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
+constexpr AbsoluteTolerance<float> tolerance_qasymm8(1.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
const auto data4x4 = datasets::SmallDeconvolutionShapes() * framework::dataset::make("StrideX", 1, 4) * framework::dataset::make("StrideY", 1, 4) * framework::dataset::make("PadX", 0, 3)
@@ -272,8 +272,8 @@ TEST_SUITE(W4x4)
FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture4x4<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(data4x4, framework::dataset::make("DataType",
DataType::QASYMM8)),
data_layouts_dataset),
- framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 0), QuantizationInfo(2.f / 255.f, 0) })),
- framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 0), QuantizationInfo(4.f / 255.f, 0) })),
+ framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 10), QuantizationInfo(2.f / 255.f, 5) })),
+ framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 5), QuantizationInfo(4.f / 255.f, 10) })),
add_bias_dataset))
{
// Validate output
@@ -286,8 +286,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedFixture3x3<uint8_t
framework::dataset::make("DataType",
DataType::QASYMM8)),
data_layouts_dataset),
- framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 0), QuantizationInfo(2.f, 0) })),
- framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 0), QuantizationInfo(3.f, 0) })),
+ framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 10), QuantizationInfo(2.f / 255.f, 4) })),
+ framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 10), QuantizationInfo(4.f / 255.f, 5) })),
add_bias_dataset))
{
// Validate output
@@ -297,8 +297,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDeconvolutionLayerQuantizedFixture3x3<uint8_t
framework::dataset::make("DataType",
DataType::QASYMM8)),
data_layouts_dataset),
- framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 0), QuantizationInfo(2.f, 0) })),
- framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 0), QuantizationInfo(3.f, 0) })),
+ framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 10), QuantizationInfo(2.f / 255.f, 128) })),
+ framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 128), QuantizationInfo(4.f / 255.f, 128) })),
add_bias_dataset))
{
// Validate output
@@ -310,12 +310,12 @@ TEST_SUITE(W2x2)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDeconvolutionLayerQuantizedFixture2x2<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(data2x2_precommit,
framework::dataset::make("DataType", DataType::QASYMM8)),
data_layouts_dataset),
- framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 0), QuantizationInfo(2.f, 0) })),
- framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 0), QuantizationInfo(3.f, 0) })),
+ framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 128), QuantizationInfo(2.f / 255.f, 128) })),
+ framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 64), QuantizationInfo(4.f / 255.f, 128) })),
add_bias_dataset))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_fp32);
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8, tolerance_num);
}
TEST_SUITE_END() // W2x2
@@ -323,8 +323,8 @@ TEST_SUITE(W1x1)
FIXTURE_DATA_TEST_CASE(Run, CLDeconvolutionLayerQuantizedFixture1x1<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(data1x1, framework::dataset::make("DataType",
DataType::QASYMM8)),
data_layouts_dataset),
- framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 0), QuantizationInfo(2.f, 0) })),
- framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 0), QuantizationInfo(3.f, 0) })),
+ framework::dataset::make("InputQuantizationInfo", { QuantizationInfo(1.f / 255.f, 0), QuantizationInfo(2.f / 255.f, 0) })),
+ framework::dataset::make("OutputQuantizationInfo", { QuantizationInfo(3.f / 255.f, 0), QuantizationInfo(4.f / 255.f, 0) })),
add_bias_dataset))
{
// Validate output