aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/PoolingLayer.cpp
diff options
context:
space:
mode:
authorFreddie Liardet <frederick.liardet@arm.com>2021-05-04 12:41:16 +0100
committerfrederick.liardet <frederick.liardet@arm.com>2021-05-13 13:13:06 +0000
commitafcbb8f47427405a35be508425376286f0fd7a70 (patch)
treeb373f2d2a6a94b53116c5a53da7c4b4181753486 /tests/validation/NEON/PoolingLayer.cpp
parentfd83bc8894007c2c9591896ba4229c99d8236a7a (diff)
downloadComputeLibrary-afcbb8f47427405a35be508425376286f0fd7a70.tar.gz
Fix Pooling Layer Bug when input is 1xN size
Return error in pooling layer when any calculated output dimension is less than 1. Simplify use of pooling layer output dimension values in CpuPoolingKernel.cpp. Remove some invalid tests in cpu/gpu pooling layers. Resolves COMPMID-4358. Signed-off-by: Freddie Liardet <frederick.liardet@arm.com> Change-Id: If8f8ffec579d3eca1c27a45e5b0b684a77103cff Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5559 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/NEON/PoolingLayer.cpp')
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp10
1 files changed, 5 insertions, 5 deletions
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index acc9c3e516..b70a18907f 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -97,6 +97,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::QASYMM8), // Invalid exclude_padding = false with quantized type, no actual padding and NHWC
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 16U, 1U), 1, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32),
@@ -106,6 +107,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(12U, 12U, 5U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 15U, 1U), 1, DataType::F32),
})),
framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
@@ -115,8 +117,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
PoolingLayerInfo(PoolingType::MAX, DataLayout::NCHW),
PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NHWC, PadStrideInfo(), false),
PoolingLayerInfo(PoolingType::AVG, DataLayout::NCHW),
+ PoolingLayerInfo(PoolingType::MAX, 2, DataLayout::NHWC, PadStrideInfo(1, 1, 0, 0), false),
})),
- framework::dataset::make("Expected", { false, false, false, false, true, false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, false, true, false, false})),
input_info, output_info, pool_info, expected)
{
bool is_valid = bool(NEPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info));
@@ -145,15 +148,12 @@ TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunIndices, NEPoolingLayerIndicesFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), combine(PoolingLayerIndicesDatasetFPSmall,
framework::dataset::make("DataType",
DataType::F32))),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })
-
- ))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
validate(Accessor(_target_indices), _ref_indices);
}
-
FIXTURE_DATA_TEST_CASE(RunSpecial, NESpecialPoolingLayerFixture<float>, framework::DatasetMode::ALL, datasets::PoolingLayerDatasetSpecial() * framework::dataset::make("DataType", DataType::F32))
{
// Validate output