From 93c70b8d7c69ddcee58162c2618aa0e898b4bd1e Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 8 Aug 2019 11:59:14 +0100 Subject: COMPMID-2547: CLSpaceToBatchLayer causes NN Test Failures on QUANT8_ASYMM Data Type Change-Id: I47c9d057e50fa624f9b9e3fd79724e4fa7d0fd82 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/1713 Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins --- tests/validation/CL/SpaceToBatchLayer.cpp | 39 ++++++++++++---- tests/validation/NEON/SpaceToBatchLayer.cpp | 31 +++++++++++-- tests/validation/fixtures/SpaceToBatchFixture.h | 59 ++++++++++++++++++------- tests/validation/reference/SpaceToBatch.cpp | 10 +++-- 4 files changed, 107 insertions(+), 32 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/SpaceToBatchLayer.cpp b/tests/validation/CL/SpaceToBatchLayer.cpp index 7fca9ec1f1..3ddbcd8afd 100644 --- a/tests/validation/CL/SpaceToBatchLayer.cpp +++ b/tests/validation/CL/SpaceToBatchLayer.cpp @@ -105,15 +105,15 @@ DATA_TEST_CASE(ValidateStatic, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( TEST_SUITE(Float) TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType", - DataType::F32)), +FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType", - DataType::F32)), +FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output @@ -122,15 +122,15 @@ FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture, framework::Data TEST_SUITE_END() // FP32 TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType", - DataType::F16)), +FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType", - DataType::F16)), +FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output @@ -139,6 +139,29 @@ FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture, framework::Datas TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float +template +using CLSpaceToBatchLayerQuantizedFixture = SpaceToBatchLayerValidationQuantizedFixture; + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { 1.f / 255.f, 9.f }))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { 1.f / 255.f, 9.f }))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // QASYMM8 +TEST_SUITE_END() // Quantized TEST_SUITE_END() // SpaceToBatch TEST_SUITE_END() // CL } // namespace validation diff --git a/tests/validation/NEON/SpaceToBatchLayer.cpp b/tests/validation/NEON/SpaceToBatchLayer.cpp index 1d5ef06af2..fc8a800b48 100644 --- a/tests/validation/NEON/SpaceToBatchLayer.cpp +++ b/tests/validation/NEON/SpaceToBatchLayer.cpp @@ -121,15 +121,15 @@ FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerFixture, framework::Data TEST_SUITE_END() // FP32 TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(Small, NESpaceToBatchLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType", - DataType::F16)), +FIXTURE_DATA_TEST_CASE(Small, NESpaceToBatchLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType", - DataType::F16)), +FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output @@ -138,6 +138,29 @@ FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerFixture, framework::Datas TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float +template +using NESpaceToBatchLayerQuantizedFixture = SpaceToBatchLayerValidationQuantizedFixture; + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(Small, NESpaceToBatchLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { 1.f / 255.f, 9.f }))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(Large, NESpaceToBatchLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeSpaceToBatchLayerDataset(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { 1.f / 255.f, 9.f }))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() // QASYMM8 +TEST_SUITE_END() // Quantized TEST_SUITE_END() // SpaceToBatch TEST_SUITE_END() // NEON } // namespace validation diff --git a/tests/validation/fixtures/SpaceToBatchFixture.h b/tests/validation/fixtures/SpaceToBatchFixture.h index a304162998..d88ecb934a 100644 --- a/tests/validation/fixtures/SpaceToBatchFixture.h +++ b/tests/validation/fixtures/SpaceToBatchFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -36,31 +36,32 @@ namespace test namespace validation { template -class SpaceToBatchLayerValidationFixture : public framework::Fixture +class SpaceToBatchLayerValidationGenericFixture : public framework::Fixture { public: template - void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout) + void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, + DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) { - _target = compute_target(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout); - _reference = compute_reference(input_shape, block_shape_shape, paddings_shape, output_shape, data_type); + _target = compute_target(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout, quantization_info); + _reference = compute_reference(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, quantization_info); } protected: template void fill(U &&tensor, int i) { - std::uniform_real_distribution<> distribution(-1.0f, 1.0f); - library->fill(tensor, distribution, i); + library->fill_tensor_uniform(tensor, i); } + template - void fill_pad(U &&tensor, int i) + void fill_pad(U &&tensor) { - std::uniform_int_distribution<> distribution(0, 0); - library->fill(tensor, distribution, i); + library->fill_tensor_value(tensor, 0); } + TensorType compute_target(TensorShape input_shape, const TensorShape &block_shape_shape, const TensorShape &paddings_shape, TensorShape output_shape, - DataType data_type, DataLayout data_layout) + DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) { if(data_layout == DataLayout::NHWC) { @@ -69,10 +70,10 @@ protected: } // Create tensors - TensorType input = create_tensor(input_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType input = create_tensor(input_shape, data_type, 1, quantization_info, data_layout); TensorType block_shape = create_tensor(block_shape_shape, DataType::S32); TensorType paddings = create_tensor(paddings_shape, DataType::S32); - TensorType output = create_tensor(output_shape, data_type, 1, QuantizationInfo(), data_layout); + TensorType output = create_tensor(output_shape, data_type, 1, quantization_info, data_layout); // Create and configure function FunctionType space_to_batch; @@ -96,7 +97,7 @@ protected: // Fill tensors fill(AccessorType(input), 0); - fill_pad(AccessorType(paddings), 0); + fill_pad(AccessorType(paddings)); { auto block_shape_data = AccessorType(block_shape); const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); @@ -112,16 +113,16 @@ protected: } SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &block_shape_shape, const TensorShape &paddings_shape, - const TensorShape &output_shape, DataType data_type) + const TensorShape &output_shape, DataType data_type, QuantizationInfo quantization_info) { // Create reference - SimpleTensor input{ input_shape, data_type }; + SimpleTensor input{ input_shape, data_type, 1, quantization_info }; SimpleTensor block_shape{ block_shape_shape, DataType::S32 }; SimpleTensor paddings{ paddings_shape, DataType::S32 }; // Fill reference fill(input, 0); - fill_pad(paddings, 0); + fill_pad(paddings); for(unsigned int i = 0; i < block_shape_shape.x(); ++i) { block_shape[i] = input_shape[i] / output_shape[i]; @@ -134,6 +135,30 @@ protected: TensorType _target{}; SimpleTensor _reference{}; }; + +template +class SpaceToBatchLayerValidationFixture : public SpaceToBatchLayerValidationGenericFixture +{ +public: + template + void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, + DataType data_type, DataLayout data_layout) + { + SpaceToBatchLayerValidationGenericFixture::setup(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout, QuantizationInfo()); + } +}; + +template +class SpaceToBatchLayerValidationQuantizedFixture : public SpaceToBatchLayerValidationGenericFixture +{ +public: + template + void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, + DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) + { + SpaceToBatchLayerValidationGenericFixture::setup(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout, quantization_info); + } +}; } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/reference/SpaceToBatch.cpp b/tests/validation/reference/SpaceToBatch.cpp index c635d4abfd..8c25bb7d91 100644 --- a/tests/validation/reference/SpaceToBatch.cpp +++ b/tests/validation/reference/SpaceToBatch.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,7 +37,7 @@ namespace reference template SimpleTensor space_to_batch(const SimpleTensor &src, const SimpleTensor &block_shape, const SimpleTensor &paddings, const TensorShape &dst_shape) { - SimpleTensor result(dst_shape, src.data_type()); + SimpleTensor result(dst_shape, src.data_type(), 1, src.quantization_info()); const auto width_out = static_cast(dst_shape[0]); const auto height_out = static_cast(dst_shape[1]); @@ -55,6 +55,9 @@ SimpleTensor space_to_batch(const SimpleTensor &src, const SimpleTensor space_to_batch(const SimpleTensor &src, const SimpleTensor= padding_top + height_in || outW * block_width + shift_w < padding_left || outW * block_width + shift_w >= padding_left + width_in) { - result[out_pos] = 0; + result[out_pos] = pad_value; } else { @@ -90,6 +93,7 @@ SimpleTensor space_to_batch(const SimpleTensor &src, const SimpleTensor space_to_batch(const SimpleTensor &src, const SimpleTensor &block_shape, const SimpleTensor &paddings, const TensorShape &dst_shape); template SimpleTensor space_to_batch(const SimpleTensor &src, const SimpleTensor &block_shape, const SimpleTensor &paddings, const TensorShape &dst_shape); +template SimpleTensor space_to_batch(const SimpleTensor &src, const SimpleTensor &block_shape, const SimpleTensor &paddings, const TensorShape &dst_shape); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1