From 6e464c37b5335e362ac3f988cc4b0beed5205ff4 Mon Sep 17 00:00:00 2001 From: Isabella Gottardi Date: Fri, 26 Jan 2018 12:32:45 +0000 Subject: COMPMID-828 - Add support for non square pool size - Part1 Change-Id: Ib8100e7c659c49694c746fa3f36ce20f44f6929f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/117804 Reviewed-by: Michele DiGiorgio Reviewed-by: Georgios Pinitas Tested-by: Jenkins --- src/core/CL/kernels/CLPoolingLayerKernel.cpp | 7 ++++--- .../GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp | 9 +++++---- src/core/NEON/kernels/NEPoolingLayerKernel.cpp | 19 ++++++++++--------- src/runtime/NEON/functions/NEPoolingLayer.cpp | 2 +- 4 files changed, 20 insertions(+), 17 deletions(-) (limited to 'src') diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp index 860cc92266..043a4bde04 100644 --- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp +++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp @@ -63,12 +63,13 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c "Unsupported combination of parameters!"); const bool is_global_pooling = pool_info.is_global_pooling(); - const unsigned int pool_size = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size(); + const unsigned int pool_size = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size().width; ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_global_pooling && (input->tensor_shape().x() != input->tensor_shape().y()), "Global pooling is supported only with rectangular inputs!"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_global_pooling && ((pool_info.pad_stride_info().pad().first >= pool_size) || (pool_info.pad_stride_info().pad().second >= pool_size)), "Invalid pool size and pool pad combination!"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_info.pool_size().width != pool_info.pool_size().height, "Invalid Pool size, width not equal to height!"); // Checks performed when output is configured if(output->total_size() != 0) @@ -98,7 +99,7 @@ std::tuple validate_and_configure_window(ITenso int pool_stride_y = 0; unsigned int pooled_w = 0; unsigned int pooled_h = 0; - int pool_size = pool_info.pool_size(); + int pool_size = pool_info.pool_size().width; const PadStrideInfo pad_stride_info = pool_info.pad_stride_info(); std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad(); std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride(); @@ -171,7 +172,7 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int pooled_w = 0; unsigned int pooled_h = 0; const PoolingType pool_type = pool_info.pool_type(); - int pool_size = pool_info.pool_size(); + int pool_size = pool_info.pool_size().width; const PadStrideInfo pad_stride_info = pool_info.pad_stride_info(); const bool exclude_padding = pool_info.exclude_padding(); std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad(); diff --git a/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp index 6451db741d..64b94c0334 100644 --- a/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp +++ b/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -62,12 +62,13 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c "Unsupported combination of parameters!"); const bool is_global_pooling = pool_info.is_global_pooling(); - const unsigned int pool_size = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size(); + const unsigned int pool_size = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size().width; ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_global_pooling && (input->tensor_shape().x() != input->tensor_shape().y()), "Global pooling is supported only with rectangular inputs!"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_global_pooling && ((pool_info.pad_stride_info().pad().first >= pool_size) || (pool_info.pad_stride_info().pad().second >= pool_size)), "Invalid pool size and pool pad combination!"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(pool_info.pool_size().width != pool_info.pool_size().height, "Invalid Pool size, width not equal to height!"); // Checks performed when output is configured if(output->total_size() != 0) @@ -97,7 +98,7 @@ std::tuple validate_and_configure_window(ITenso int pool_stride_y = 0; unsigned int pooled_w = 0; unsigned int pooled_h = 0; - int pool_size = pool_info.pool_size(); + int pool_size = pool_info.pool_size().width; const PadStrideInfo pad_stride_info = pool_info.pad_stride_info(); std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad(); std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride(); @@ -229,7 +230,7 @@ void GCPoolingLayerKernel::configure(const IGCTensor *input, IGCTensor *output, unsigned int pooled_w = 0; unsigned int pooled_h = 0; const PoolingType pool_type = pool_info.pool_type(); - int pool_size = pool_info.pool_size(); + int pool_size = pool_info.pool_size().width; const PadStrideInfo pad_stride_info = pool_info.pad_stride_info(); const bool exclude_padding = pool_info.exclude_padding(); std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad(); diff --git a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp index be5fa4cc4c..a3ab8a361f 100644 --- a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp +++ b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp @@ -151,7 +151,7 @@ inline void scale_vector_s16x8(uint16x8_t &v, const Coordinates &id, int id_offs v = vsetq_lane_u16(elems[7], v, 7); } -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, unsigned int &pooled_w, unsigned int pooled_h, int pool_size) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, unsigned int &pooled_w, unsigned int pooled_h, int pool_size_x, int pool_size_y) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); @@ -166,10 +166,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(pool_type == PoolingType::L2 && is_data_type_quantized(input->data_type())); - ARM_COMPUTE_RETURN_ERROR_ON((supported_pool_sizes.find(pool_size) == supported_pool_sizes.end()) && ((input->data_type() != DataType::F32) && (input->data_type() != DataType::QASYMM8))); + ARM_COMPUTE_RETURN_ERROR_ON((supported_pool_sizes.find(pool_size_x) == supported_pool_sizes.end()) && ((input->data_type() != DataType::F32) && (input->data_type() != DataType::QASYMM8))); ARM_COMPUTE_RETURN_ERROR_ON(is_global_pooling && (input->tensor_shape().x() != input->tensor_shape().y())); ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_fixed_point(input->data_type()) && pool_stride_x > 2); ARM_COMPUTE_RETURN_ERROR_ON(exclude_padding && is_data_type_fixed_point(input->data_type())); + ARM_COMPUTE_RETURN_ERROR_ON(pool_size_x != pool_size_y); if(output->total_size() != 0) { @@ -370,7 +371,7 @@ void NEPoolingLayerKernel::configure(const ITensor *input, ITensor *output, cons const int pool_stride_x = pad_stride_info.stride().first; // Update pool size in case of global pooling - const int pool_size = is_global_pooling ? input->info()->dimension(0) : pool_info.pool_size(); + const int pool_size = is_global_pooling ? input->info()->dimension(0) : pool_info.pool_size().width; // Validate pool info before calling scaled_dimensions ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_pool_info(input->info(), pool_info, pool_size)); @@ -387,7 +388,7 @@ void NEPoolingLayerKernel::configure(const ITensor *input, ITensor *output, cons auto_init(input->info(), output->info(), pooled_w, pooled_h); // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), pool_info, pooled_w, pooled_h, pool_size)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), pool_info, pooled_w, pooled_h, pool_size, pool_size)); // Set instance variables _input = input; @@ -1491,7 +1492,7 @@ void NEPoolingLayerKernel::poolingN_f32(const Window &window_input, const Window Iterator input(_input, window_input); Iterator output(_output, window); - const int pool_size = _pool_info.is_global_pooling() ? _input->info()->tensor_shape().x() : _pool_info.pool_size(); + const int pool_size = _pool_info.is_global_pooling() ? _input->info()->tensor_shape().x() : _pool_info.pool_size().width; const int pool_pad_right = _pool_info.pad_stride_info().pad_right(); const int pool_pad_top = _pool_info.pad_stride_info().pad_top(); const int pool_pad_left = _pool_info.pad_stride_info().pad_left(); @@ -1613,7 +1614,7 @@ void NEPoolingLayerKernel::poolingN_qasymm8(const Window &window_input, const Wi Iterator input(_input, window_input); Iterator output(_output, window); - const int pool_size = _pool_info.is_global_pooling() ? _input->info()->tensor_shape().x() : _pool_info.pool_size(); + const int pool_size = _pool_info.is_global_pooling() ? _input->info()->tensor_shape().x() : _pool_info.pool_size().width; const int pool_pad_right = _pool_info.pad_stride_info().pad_right(); const int pool_pad_top = _pool_info.pad_stride_info().pad_top(); const int pool_pad_left = _pool_info.pad_stride_info().pad_left(); @@ -1712,7 +1713,7 @@ Status NEPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInf BorderSize border_size(0); const bool is_global_pooling = pool_info.is_global_pooling(); - const unsigned int pool_size = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size(); + const unsigned int pool_size = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size().width; // Validate pool info befor calling scaled_dimensions ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_pool_info(input, pool_info, pool_size)); @@ -1724,7 +1725,7 @@ Status NEPoolingLayerKernel::validate(const ITensorInfo *input, const ITensorInf pool_size, pool_info.pad_stride_info()); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, pool_info, pooled_w, pooled_h, pool_size)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, pool_info, pooled_w, pooled_h, pool_size, pool_size)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), pool_info, num_elems_processed_per_iteration, border_size, pooled_w, pooled_h, pool_size).first); return Status{}; @@ -1739,7 +1740,7 @@ void NEPoolingLayerKernel::run(const Window &window, const ThreadInfo &info) const unsigned int pool_stride_x = _pool_info.pad_stride_info().stride().first; const unsigned int pool_stride_y = _pool_info.pad_stride_info().stride().second; - const unsigned int pool_size = _pool_info.pool_size(); + const unsigned int pool_size = _pool_info.pool_size().width; // Set step for input in x and y direction for the input Window window_input(window); diff --git a/src/runtime/NEON/functions/NEPoolingLayer.cpp b/src/runtime/NEON/functions/NEPoolingLayer.cpp index 8a32507a73..bc0b6f86d3 100644 --- a/src/runtime/NEON/functions/NEPoolingLayer.cpp +++ b/src/runtime/NEON/functions/NEPoolingLayer.cpp @@ -38,7 +38,7 @@ NEPoolingLayer::NEPoolingLayer() void NEPoolingLayer::configure(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info) { // Check if we have Global Pooling Layer - _is_global_pooling_layer = (input->info()->dimension(0) == pool_info.pool_size()) && (input->info()->dimension(1) == pool_info.pool_size()); + _is_global_pooling_layer = (input->info()->dimension(0) == pool_info.pool_size().width) && (input->info()->dimension(1) == pool_info.pool_size().height); // Configure pooling kernel _pooling_layer_kernel.configure(input, output, pool_info); -- cgit v1.2.1