From 15997879873b374ea297197fc4aafb15e38b938b Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Mon, 19 Feb 2018 13:58:22 +0000 Subject: COMPMID-934: Asymmetric padding support. Change-Id: Ibe7a679e4c053a088b8c893e495c97cb24bf7272 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/121298 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- .../CLDepthwiseConvolutionLayer3x3Kernel.cpp | 8 ++-- .../CL/kernels/CLDirectConvolutionLayerKernel.cpp | 25 +++++------ src/core/CL/kernels/CLPoolingLayerKernel.cpp | 48 ++++++++++------------ .../kernels/NEDirectConvolutionLayerKernel.cpp | 31 +++++++------- 4 files changed, 51 insertions(+), 61 deletions(-) (limited to 'src/core') diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp index c7cee4c387..c24420a7e3 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp @@ -121,10 +121,10 @@ void CLDepthwiseConvolutionLayer3x3Kernel::configure(const ICLTensor *input, con const GPUTarget gpu_target = get_arch_from_target(get_target()); // Configure kernel window - const unsigned int conv_pad_left = std::max(conv_info.pad_left(), 1U); - const unsigned int conv_pad_top = std::max(conv_info.pad_top(), 1U); - const unsigned int conv_pad_right = std::max(conv_info.pad_right(), 1U); - const unsigned int conv_pad_bottom = std::max(conv_info.pad_bottom(), 1U); + const unsigned int conv_pad_left = conv_info.pad_left(); + const unsigned int conv_pad_top = conv_info.pad_top(); + const unsigned int conv_pad_right = conv_info.pad_right(); + const unsigned int conv_pad_bottom = conv_info.pad_bottom(); unsigned int num_elems_read_per_iteration_x = 0; unsigned int num_elems_read_per_iteration_y = 0; diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp index 4b141f7ecd..ac3c9ac4a6 100644 --- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp +++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -126,10 +126,10 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen unsigned int conv_stride_x = std::get<0>(conv_info.stride()); unsigned int conv_stride_y = std::get<1>(conv_info.stride()); - unsigned int conv_pad_left = std::max(conv_info.pad_left(), kernel_size / 2); - unsigned int conv_pad_top = std::max(conv_info.pad_top(), kernel_size / 2); - unsigned int conv_pad_right = std::max(conv_info.pad_right(), kernel_size / 2); - unsigned int conv_pad_bottom = std::max(conv_info.pad_bottom(), kernel_size / 2); + unsigned int conv_pad_left = conv_info.pad_left(); + unsigned int conv_pad_top = conv_info.pad_top(); + unsigned int conv_pad_right = conv_info.pad_right(); + unsigned int conv_pad_bottom = conv_info.pad_bottom(); unsigned int num_elems_read_per_iteration_x = 0; unsigned int num_elems_read_per_iteration_y = 0; @@ -302,18 +302,13 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL _conv_stride_x = std::get<0>(conv_info.stride()); _conv_stride_y = std::get<1>(conv_info.stride()); + _border_size = BorderSize(conv_info.pad_top(), conv_info.pad_right(), conv_info.pad_bottom(), conv_info.pad_left()); _input = input; _weights = weights; _output = output; _biases = biases; - int conv_pad_left = std::min(conv_info.pad_left(), kernel_size / 2); - int conv_pad_top = std::min(conv_info.pad_top(), kernel_size / 2); - int conv_pad_right = std::min(conv_info.pad_right(), kernel_size / 2); - int conv_pad_bottom = std::min(conv_info.pad_bottom(), kernel_size / 2); - _border_size = BorderSize(conv_pad_top, conv_pad_right, conv_pad_bottom, conv_pad_left); - const GPUTarget gpu_target = get_arch_from_target(get_target()); std::stringstream kernel_name; @@ -450,13 +445,13 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL _config_id += "_"; _config_id += support::cpp11::to_string(kernel_size); _config_id += "_"; - _config_id += support::cpp11::to_string(conv_pad_left); + _config_id += support::cpp11::to_string(border_size().left); _config_id += "_"; - _config_id += support::cpp11::to_string(conv_pad_top); + _config_id += support::cpp11::to_string(border_size().top); _config_id += "_"; - _config_id += support::cpp11::to_string(conv_pad_right); + _config_id += support::cpp11::to_string(border_size().right); _config_id += "_"; - _config_id += support::cpp11::to_string(conv_pad_bottom); + _config_id += support::cpp11::to_string(border_size().bottom); _config_id += "_"; _config_id += support::cpp11::to_string(_conv_stride_x); _config_id += "_"; diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp index c515ed68e7..b3034e10cc 100644 --- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp +++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp @@ -61,15 +61,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MSG((is_data_type_quantized_asymmetric(input->data_type()) && pool_info.pool_type() == PoolingType::L2), "Unsupported combination of parameters!"); - ARM_COMPUTE_RETURN_ERROR_ON(!pool_info.pad_stride_info().padding_is_symmetric()); const bool is_global_pooling = pool_info.is_global_pooling(); const unsigned int pool_size_x = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size().width; const unsigned int pool_size_y = is_global_pooling ? input->tensor_shape().y() : pool_info.pool_size().height; - ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_global_pooling && ((pool_info.pad_stride_info().pad().first >= pool_size_x) || (pool_info.pad_stride_info().pad().second >= pool_size_y)), - "Invalid pool size and pool pad combination!"); - // Checks performed when output is configured if(output->total_size() != 0) { @@ -92,8 +88,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c std::tuple validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &pool_info) { - int pool_pad_x = 0; - int pool_pad_y = 0; int pool_stride_x = 0; int pool_stride_y = 0; unsigned int pooled_w = 0; @@ -101,8 +95,11 @@ std::tuple validate_and_configure_window(ITenso int pool_size_x = pool_info.is_global_pooling() ? input->dimension(0) : pool_info.pool_size().width; int pool_size_y = pool_info.is_global_pooling() ? input->dimension(1) : pool_info.pool_size().height; const PadStrideInfo pad_stride_info = pool_info.pad_stride_info(); - std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad(); std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride(); + const int pool_pad_right = pad_stride_info.pad_right(); + const int pool_pad_top = pad_stride_info.pad_top(); + const int pool_pad_left = pad_stride_info.pad_left(); + const int pool_pad_bottom = pad_stride_info.pad_bottom(); ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); @@ -115,7 +112,7 @@ std::tuple validate_and_configure_window(ITenso auto_init(input, output, pooled_w, pooled_h); - BorderSize border_size = BorderSize(pool_pad_y, pool_pad_x); + BorderSize border_size = BorderSize(pool_pad_top, pool_pad_right, pool_pad_bottom, pool_pad_left); const DataType data_type = input->data_type(); const int input_width = input->dimension(0); @@ -131,15 +128,15 @@ std::tuple validate_and_configure_window(ITenso const int num_iterations_x = (pooled_w + num_elems_processed_per_iteration - 1) / num_elems_processed_per_iteration; // Upper limit for the number of right/bottom border elements that are accessed - const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_x + num_elems_read_per_iteration) - input_width; - const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_y + pool_size_y) - input_height; + const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - input_width; + const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - input_height; - border_size.right = std::max(upper_bound_w, pool_pad_x); - border_size.bottom = std::max(upper_bound_h, pool_pad_y); + border_size.right = std::max(upper_bound_w, pool_pad_right); + border_size.bottom = std::max(upper_bound_h, pool_pad_bottom); Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); - AccessWindowRectangle input_access(input, -pool_pad_x, -pool_pad_y, num_elems_read_per_iteration, pool_size_y, + AccessWindowRectangle input_access(input, -pool_pad_left, -pool_pad_top, num_elems_read_per_iteration, pool_size_y, pool_stride_x * num_elems_processed_per_iteration, pool_stride_y); AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); bool window_changed = update_window_and_padding(win, input_access, output_access); @@ -162,8 +159,6 @@ BorderSize CLPoolingLayerKernel::border_size() const void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info) { - int pool_pad_x = 0; - int pool_pad_y = 0; int pool_stride_x = 0; int pool_stride_y = 0; unsigned int pooled_w = 0; @@ -173,8 +168,9 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const int pool_size_y = pool_info.is_global_pooling() ? input->info()->dimension(1) : pool_info.pool_size().height; const PadStrideInfo pad_stride_info = pool_info.pad_stride_info(); const bool exclude_padding = pool_info.exclude_padding(); - std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad(); std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride(); + const int pool_pad_top = pad_stride_info.pad_top(); + const int pool_pad_left = pad_stride_info.pad_left(); ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); @@ -207,11 +203,11 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, if(pool_type != PoolingType::MAX) { build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING"); - build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(input->info()->dimension(0) + (exclude_padding ? 0 : pool_pad_x))); - build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1) + (exclude_padding ? 0 : pool_pad_y))); + build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(input->info()->dimension(0) + (exclude_padding ? 0 : pool_pad_left))); + build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1) + (exclude_padding ? 0 : pool_pad_top))); build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(pool_stride_y)); - build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_x)); - build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_y)); + build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_left)); + build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_top)); } // Create kernel @@ -278,8 +274,8 @@ void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue) ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); - unsigned int pool_pad_x, pool_pad_y, pool_stride_x, pool_stride_y = 0; - std::tie(pool_pad_x, pool_pad_y) = _pool_info.pad_stride_info().pad(); + unsigned int pool_stride_x = 0; + unsigned int pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = _pool_info.pad_stride_info().stride(); Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); @@ -289,11 +285,11 @@ void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue) { // Upsample input by pool size Window in_slice(slice); - in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start() - pool_pad_x, - (in_slice.x().end() - pool_pad_x) * pool_stride_x, + in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start() - _pool_info.pad_stride_info().pad_left(), + (in_slice.x().end() - _pool_info.pad_stride_info().pad_left()) * pool_stride_x, pool_stride_x * _num_elems_processed_per_iteration)); - in_slice.set(Window::DimY, Window::Dimension(in_slice.y().start() - pool_pad_y, - (in_slice.y().end() - pool_pad_y) * pool_stride_y, + in_slice.set(Window::DimY, Window::Dimension(in_slice.y().start() - _pool_info.pad_stride_info().pad_top(), + (in_slice.y().end() - _pool_info.pad_stride_info().pad_top()) * pool_stride_y, pool_stride_y)); // Set inputs diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp index 536a667799..4dc186a8a7 100644 --- a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp +++ b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp @@ -274,6 +274,7 @@ public: ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimX) > small_tensor_size_optim); ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimY) > small_tensor_size_optim); + const int input_stride_x = input->info()->strides_in_bytes().x(); const int input_stride_y = input->info()->strides_in_bytes().y(); const int input_stride_z = input->info()->strides_in_bytes().z(); const int output_stride_y = output->info()->strides_in_bytes().y(); @@ -284,6 +285,8 @@ public: const int range_z = window.z().end() - window.z().start(); const int kernel_depth = weights->info()->dimension(Window::DimZ); const unsigned int conv_stride_y = std::get<1>(conv_info.stride()); + const unsigned int conv_pad_left = conv_info.pad_left(); + const unsigned int conv_pad_top = conv_info.pad_top(); // setup output window for the iterator Window window_out = window; @@ -307,7 +310,7 @@ public: execute_window_loop(window_out, [&](const Coordinates & id) { - const uint8_t *input_ptr = in.ptr(); + const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y; uint8_t *out_ptr = out.ptr(); int ih = 0; int oh = 0; @@ -351,6 +354,7 @@ public: static void convolve(const Window &window, unsigned int num_elems_read_per_iteration, unsigned int num_elems_written_per_iteration, const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info) { + const int input_stride_x = input->info()->strides_in_bytes().x(); const int input_stride_y = input->info()->strides_in_bytes().y(); const int input_stride_z = input->info()->strides_in_bytes().z(); const int output_stride_y = output->info()->strides_in_bytes().y(); @@ -362,6 +366,8 @@ public: const int range_z = window.z().end() - window.z().start(); const int kernel_depth = weights->info()->dimension(Window::DimZ); const unsigned int conv_stride_y = std::get<1>(conv_info.stride()); + const unsigned int conv_pad_left = conv_info.pad_left(); + const unsigned int conv_pad_top = conv_info.pad_top(); const int fixed_point_position = input->info()->fixed_point_position(); // setup output window for the iterator @@ -389,7 +395,7 @@ public: /* For a detailed explanation on how the algorithm works refer to template <> class convolver_3x3<1> */ - const uint8_t *input_ptr = in.ptr(); + const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y; uint8_t *out_ptr = out.ptr(); int ih = 0; int oh = 0; @@ -680,8 +686,8 @@ public: const int delta_input = get_input_num_elems_processed(num_elems_written_per_iteration); const int kernel_depth = weights->info()->dimension(Window::DimZ); const unsigned int conv_stride_y = std::get<1>(conv_info.stride()); - const unsigned int conv_pad_x = std::get<0>(conv_info.pad()); - const unsigned int conv_pad_y = std::get<1>(conv_info.pad()); + const unsigned int conv_pad_left = conv_info.pad_left(); + const unsigned int conv_pad_top = conv_info.pad_top(); const int fixed_point_position = input->info()->fixed_point_position(); // setup output window for the iterator @@ -707,7 +713,7 @@ public: execute_window_loop(window_out, [&](const Coordinates & id) { - const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y; + const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y; uint8_t *out_ptr = out.ptr(); int ih = 0; int oh = 0; @@ -804,8 +810,8 @@ public: const int delta_input = get_input_num_elems_processed(num_elems_written_per_iteration); const int kernel_depth = weights->info()->dimension(Window::DimZ); const unsigned int conv_stride_y = std::get<1>(conv_info.stride()); - const unsigned int conv_pad_x = std::get<0>(conv_info.pad()); - const unsigned int conv_pad_y = std::get<1>(conv_info.pad()); + const unsigned int conv_pad_left = conv_info.pad_left(); + const unsigned int conv_pad_top = conv_info.pad_top(); const int fixed_point_position = input->info()->fixed_point_position(); // setup output window for the iterator @@ -831,7 +837,7 @@ public: execute_window_loop(window_out, [&](const Coordinates & id) { - const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y; + const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y; uint8_t *out_ptr = out.ptr(); int ih = 0; int oh = 0; @@ -1016,13 +1022,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); - ARM_COMPUTE_RETURN_ERROR_ON(!conv_info.padding_is_symmetric()); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) == 1 && (std::get<0>(conv_info.pad()) || std::get<1>(conv_info.pad())), - "Pad > 0 not supported for 1x1 weights"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) == 3 && (std::get<0>(conv_info.pad()) > 1 || std::get<1>(conv_info.pad()) > 1), - "Pad > 1 not supported for 3x3 weights"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) == 5 && (std::get<0>(conv_info.pad()) > 2 || std::get<1>(conv_info.pad()) > 2), - "Pad > 2 not supported for 5x5 weights"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported."); ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != input->dimension(2)); @@ -1204,7 +1203,7 @@ Status NEDirectConvolutionLayerKernel::validate(const ITensorInfo *input, const unsigned int num_weight_elems_read_per_row = 0; unsigned int num_elems_read_per_iteration = 0; unsigned int num_elems_written_per_iteration = 0; - BorderSize border_size(conv_info.pad().first, conv_info.pad().second); + BorderSize border_size = {}; ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, output, conv_info)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), -- cgit v1.2.1