From afcbb8f47427405a35be508425376286f0fd7a70 Mon Sep 17 00:00:00 2001 From: Freddie Liardet Date: Tue, 4 May 2021 12:41:16 +0100 Subject: Fix Pooling Layer Bug when input is 1xN size Return error in pooling layer when any calculated output dimension is less than 1. Simplify use of pooling layer output dimension values in CpuPoolingKernel.cpp. Remove some invalid tests in cpu/gpu pooling layers. Resolves COMPMID-4358. Signed-off-by: Freddie Liardet Change-Id: If8f8ffec579d3eca1c27a45e5b0b684a77103cff Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5559 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- arm_compute/core/utils/misc/ShapeCalculator.h | 30 ++++++++++++++------------- 1 file changed, 16 insertions(+), 14 deletions(-) (limited to 'arm_compute/core/utils') diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index 8e49c068af..d0dc202f91 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -759,25 +759,27 @@ inline TensorShape compute_min_max_shape(const ITensorInfo *input) */ inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info) { - unsigned int pooled_w = 0; - unsigned int pooled_h = 0; + int pooled_w = 0; + int pooled_h = 0; TensorShape output_shape{ input.tensor_shape() }; - const bool is_global_pooling = pool_info.is_global_pooling; - const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH); - const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT); - const unsigned int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size.width; - const unsigned int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size.height; + const bool is_global_pooling = pool_info.is_global_pooling; + const int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH); + const int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT); + const int input_width = input.tensor_shape()[idx_width]; + const int input_height = input.tensor_shape()[idx_height]; + const int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size.width; + const int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size.height; - std::tie(pooled_w, pooled_h) = scaled_dimensions(output_shape[idx_width], - output_shape[idx_height], - pool_size_x, - pool_size_y, - pool_info.pad_stride_info); + std::tie(pooled_w, pooled_h) = scaled_dimensions_signed(input_width, input_height, + pool_size_x, pool_size_y, + pool_info.pad_stride_info); - output_shape.set(idx_width, pooled_w); - output_shape.set(idx_height, pooled_h); + ARM_COMPUTE_ERROR_ON_MSG((pooled_w < 1 || pooled_h < 1), "Calculated output dimension size is invalid"); + + output_shape.set(idx_width, static_cast(pooled_w)); + output_shape.set(idx_height, static_cast(pooled_h)); return output_shape; } -- cgit v1.2.1