From afcbb8f47427405a35be508425376286f0fd7a70 Mon Sep 17 00:00:00 2001 From: Freddie Liardet Date: Tue, 4 May 2021 12:41:16 +0100 Subject: Fix Pooling Layer Bug when input is 1xN size Return error in pooling layer when any calculated output dimension is less than 1. Simplify use of pooling layer output dimension values in CpuPoolingKernel.cpp. Remove some invalid tests in cpu/gpu pooling layers. Resolves COMPMID-4358. Signed-off-by: Freddie Liardet Change-Id: If8f8ffec579d3eca1c27a45e5b0b684a77103cff Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5559 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- tests/validation/NEON/PoolingLayer.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'tests/validation/NEON/PoolingLayer.cpp') diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index acc9c3e516..b70a18907f 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -97,6 +97,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::QASYMM8), // Invalid exclude_padding = false with quantized type, no actual padding and NHWC TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 16U, 1U), 1, DataType::F32), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16), TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32), @@ -106,6 +107,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32), TensorInfo(TensorShape(12U, 12U, 5U), 1, DataType::QASYMM8), TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(1U, 15U, 1U), 1, DataType::F32), })), framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)), PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)), @@ -115,8 +117,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( PoolingLayerInfo(PoolingType::MAX, DataLayout::NCHW), PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NHWC, PadStrideInfo(), false), PoolingLayerInfo(PoolingType::AVG, DataLayout::NCHW), + PoolingLayerInfo(PoolingType::MAX, 2, DataLayout::NHWC, PadStrideInfo(1, 1, 0, 0), false), })), - framework::dataset::make("Expected", { false, false, false, false, true, false, false, false, true })), + framework::dataset::make("Expected", { false, false, false, false, true, false, true, false, false})), input_info, output_info, pool_info, expected) { bool is_valid = bool(NEPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)); @@ -145,15 +148,12 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunIndices, NEPoolingLayerIndicesFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), combine(PoolingLayerIndicesDatasetFPSmall, framework::dataset::make("DataType", DataType::F32))), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }) - - )) + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); validate(Accessor(_target_indices), _ref_indices); } - FIXTURE_DATA_TEST_CASE(RunSpecial, NESpecialPoolingLayerFixture, framework::DatasetMode::ALL, datasets::PoolingLayerDatasetSpecial() * framework::dataset::make("DataType", DataType::F32)) { // Validate output -- cgit v1.2.1