aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/PoolingLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/CL/PoolingLayer.cpp')
-rw-r--r--tests/validation/CL/PoolingLayer.cpp187
1 files changed, 172 insertions, 15 deletions
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 611c01b37a..9fe28c7acf 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,6 +71,12 @@ framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(5, 7) })),
framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
framework::dataset::make("ExcludePadding", { true }));
+const auto PoolingLayerDatasetFPIndicesSmall = combine(combine(combine(framework::dataset::make("PoolingType",
+{ PoolingType::MAX }),
+framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 2, 0, 0) })),
+framework::dataset::make("ExcludePadding", { true, false }));
+
constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */
constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric type */
@@ -88,17 +94,16 @@ TEST_SUITE(PoolingLayer)
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid parameters
TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32), // Non-rectangular Global Pooling
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling
- TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::QASYMM8), // Invalid exclude_padding = false with quantized type, no actual padding and NHWC
+ TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 16U, 1U), 1, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
- TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),
@@ -106,9 +111,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(12U, 12U, 5U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 15U, 1U), 1, DataType::F32),
})),
framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
- PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NCHW, PadStrideInfo(1, 1, 2, 0)),
PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 2)),
PoolingLayerInfo(PoolingType::L2, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
@@ -116,17 +121,21 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
PoolingLayerInfo(PoolingType::MAX, DataLayout::NCHW),
PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NHWC, PadStrideInfo(), false),
PoolingLayerInfo(PoolingType::AVG, DataLayout::NCHW),
+ PoolingLayerInfo(PoolingType::MAX, 2, DataLayout::NHWC, PadStrideInfo(1, 1, 0, 0), false),
})),
- framework::dataset::make("Expected", { false, false, false, false, false, true, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, false, true, true , false})),
input_info, output_info, pool_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS);
}
+
// clang-format on
// *INDENT-ON*
template <typename T>
using CLPoolingLayerFixture = PoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+template <typename T>
+using CLPoolingLayerMixedDataLayoutFixture = PoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T, true>;
template <typename T>
using CLSpecialPoolingLayerFixture = SpecialPoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
@@ -134,6 +143,9 @@ using CLSpecialPoolingLayerFixture = SpecialPoolingLayerValidationFixture<CLTens
template <typename T>
using CLMixedPrecesionPoolingLayerFixture = PoolingLayerValidationMixedPrecisionFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+template <typename T>
+using CLPoolingLayerIndicesFixture = PoolingLayerIndicesValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+
TEST_SUITE(Float)
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSpecial, CLSpecialPoolingLayerFixture<float>, framework::DatasetMode::ALL, datasets::PoolingLayerDatasetSpecial() * framework::dataset::make("DataType", DataType::F32))
@@ -141,7 +153,7 @@ FIXTURE_DATA_TEST_CASE(RunSpecial, CLSpecialPoolingLayerFixture<float>, framewor
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetFPSmall,
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallNoneUnitShapes(), combine(PoolingLayerDatasetFPSmall,
framework::dataset::make("DataType",
DataType::F32))),
pool_data_layout_dataset))
@@ -149,6 +161,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::Datase
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallNoneUnitShapes(),
+ combine(combine(combine(combine(datasets::PoolingTypes(),
+ framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0) })),
+ framework::dataset::make("ExcludePadding", { false })),
+ framework::dataset::make("DataType", DataType::F32))),
+ pool_data_layout_dataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP,
framework::dataset::make("DataType",
DataType::F32))),
@@ -157,11 +180,62 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<float>, framework::Datase
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
+
+FIXTURE_DATA_TEST_CASE(RunSmallIndices, CLPoolingLayerIndicesFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallNoneUnitShapes(),
+ combine(PoolingLayerDatasetFPIndicesSmall,
+ framework::dataset::make("DataType",
+ DataType::F32))),
+ pool_data_layout_dataset),framework::dataset::make("UseKernelIndices", { false })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+ validate(CLAccessor(_target_indices), _ref_indices);
+}
+
+TEST_SUITE(GlobalPooling)
+// *INDENT-OFF*
+// clang-format off
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U),
+ TensorShape(27U, 13U, 2U, 4U)
+ }),
+ framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
+ framework::dataset::make("PoolingSize", { Size2D(27, 13) })),
+ framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
+ framework::dataset::make("ExcludePadding", false)),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("InputShape", { TensorShape(79U, 37U, 11U),
+ TensorShape(79U, 37U, 11U, 4U)
+ }),
+ framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
+ framework::dataset::make("PoolingSize", { Size2D(79, 37) })),
+ framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
+ framework::dataset::make("ExcludePadding", false)),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+// clang-format on
+// *INDENT-ON*
+TEST_SUITE_END() // GlobalPooling
+
TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLMixedPrecesionPoolingLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetFPSmall,
- framework::dataset::make("DataType", DataType::F16))),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLMixedPrecesionPoolingLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallNoneUnitShapes(),
+ combine(PoolingLayerDatasetFPSmall,
+ framework::dataset::make("DataType", DataType::F16))),
pool_data_layout_dataset),
pool_fp_mixed_precision_dataset))
{
@@ -176,6 +250,55 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLMixedPrecesionPoolingLayerFixture<half>, fram
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16);
}
+FIXTURE_DATA_TEST_CASE(RunSmallIndices, CLPoolingLayerIndicesFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallNoneUnitShapes(),
+ combine(PoolingLayerDatasetFPIndicesSmall,
+ framework::dataset::make("DataType",
+ DataType::F16))),
+ pool_data_layout_dataset), framework::dataset::make("UseKernelIndices", { false })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+ validate(CLAccessor(_target_indices), _ref_indices);
+}
+
+TEST_SUITE(GlobalPooling)
+// *INDENT-OFF*
+// clang-format off
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<half>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U),
+ TensorShape(27U, 13U, 2U, 4U)
+ }),
+ framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
+ framework::dataset::make("PoolingSize", { Size2D(27, 13) })),
+ framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
+ framework::dataset::make("ExcludePadding", false)),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<half>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("InputShape", { TensorShape(79U, 37U, 11U),
+ TensorShape(79U, 37U, 11U, 4U)
+ }),
+ framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
+ framework::dataset::make("PoolingSize", { Size2D(79, 37) })),
+ framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
+ framework::dataset::make("ExcludePadding", false)),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+// clang-format on
+// *INDENT-ON*
+TEST_SUITE_END() // GlobalPooling
+
TEST_SUITE_END() // FP16
TEST_SUITE_END() // Float
@@ -183,11 +306,29 @@ TEST_SUITE(Quantized)
template <typename T>
using CLPoolingLayerQuantizedFixture = PoolingLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+template <typename T>
+using CLPoolingLayerQuantizedMixedDataLayoutFixture = PoolingLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLPoolingLayer, T, true>;
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQASYMM8Small,
- framework::dataset::make("DataType", DataType::QASYMM8))),
- pool_data_layout_dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallNoneUnitShapes(),
+ combine(PoolingLayerDatasetQASYMM8Small,
+ framework::dataset::make("DataType", DataType::QASYMM8))),
+ pool_data_layout_dataset),
+ framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10), QuantizationInfo(1.f / 255.f, 10) })),
+ framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5), QuantizationInfo(1.f / 255.f, 10) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallNoneUnitShapes(),
+ combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
+ framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
+ framework::dataset::make("ExcludePadding", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8))),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })),
+ framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10) })),
+ framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
@@ -195,9 +336,25 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<uint8_t>, framew
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQASYMM8Small,
- framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
- pool_data_layout_dataset))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallNoneUnitShapes(),
+ combine(PoolingLayerDatasetQASYMM8Small,
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
+ pool_data_layout_dataset),
+ framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10), QuantizationInfo(1.f / 127.f, -10) })),
+ framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -5), QuantizationInfo(1.f / 127.f, -10) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8_s);
+}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallNoneUnitShapes(),
+ combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
+ framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
+ framework::dataset::make("ExcludePadding", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })),
+ framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })),
+ framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8_s);