aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/PoolingLayer.cpp
diff options
context:
space:
mode:
authorFreddie Liardet <frederick.liardet@arm.com>2021-05-04 12:41:16 +0100
committerfrederick.liardet <frederick.liardet@arm.com>2021-05-13 13:13:06 +0000
commitafcbb8f47427405a35be508425376286f0fd7a70 (patch)
treeb373f2d2a6a94b53116c5a53da7c4b4181753486 /tests/validation/CL/PoolingLayer.cpp
parentfd83bc8894007c2c9591896ba4229c99d8236a7a (diff)
downloadComputeLibrary-afcbb8f47427405a35be508425376286f0fd7a70.tar.gz
Fix Pooling Layer Bug when input is 1xN size
Return error in pooling layer when any calculated output dimension is less than 1. Simplify use of pooling layer output dimension values in CpuPoolingKernel.cpp. Remove some invalid tests in cpu/gpu pooling layers. Resolves COMPMID-4358. Signed-off-by: Freddie Liardet <frederick.liardet@arm.com> Change-Id: If8f8ffec579d3eca1c27a45e5b0b684a77103cff Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5559 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/CL/PoolingLayer.cpp')
-rw-r--r--tests/validation/CL/PoolingLayer.cpp5
1 files changed, 4 insertions, 1 deletions
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 0153e659ae..63dec3910f 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -101,6 +101,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 16U, 1U), 1, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
@@ -110,6 +111,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(12U, 12U, 5U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 15U, 1U), 1, DataType::F32),
})),
framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NCHW, PadStrideInfo(1, 1, 2, 0)),
@@ -119,8 +121,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
PoolingLayerInfo(PoolingType::MAX, DataLayout::NCHW),
PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NHWC, PadStrideInfo(), false),
PoolingLayerInfo(PoolingType::AVG, DataLayout::NCHW),
+ PoolingLayerInfo(PoolingType::MAX, 2, DataLayout::NHWC, PadStrideInfo(1, 1, 0, 0), false),
})),
- framework::dataset::make("Expected", { false, false, false, false, true, false, true, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, false, true, true , false})),
input_info, output_info, pool_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS);