From 881f2ded860fc1db23810076b699c4492556c376 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Fri, 12 Apr 2019 10:29:17 +0100 Subject: COMPMID-2048: Add support for dilation in NEDepthwiseConvolution. Change-Id: If9941e770779fbf918ba5ff0573da9378078b969 Signed-off-by: Usama Arif Reviewed-on: https://review.mlplatform.org/c/999 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez --- .../NEDepthwiseConvolutionLayer3x3Kernel.cpp | 106 +++++++++++++-------- src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp | 27 +++--- .../CL/functions/CLDepthwiseConvolutionLayer.cpp | 11 +++ .../NEON/functions/NEDepthwiseConvolutionLayer.cpp | 55 +++++++---- .../NEDepthwiseConvolutionAssemblyDispatch.cpp | 6 +- 5 files changed, 134 insertions(+), 71 deletions(-) (limited to 'src') diff --git a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp index ec672e0fc4..fdafc2da90 100644 --- a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp +++ b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp @@ -49,7 +49,7 @@ class convolver_3x3 { public: static void convolve(const Window &window, unsigned int num_elems_written_per_iteration, - const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier) + const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation) { const int input_offset = -input->info()->quantization_info().offset; const int weights_offset = -weights->info()->quantization_info().offset; @@ -57,6 +57,7 @@ public: const int input_stride_x = input->info()->strides_in_bytes().x(); const int input_stride_y = input->info()->strides_in_bytes().y(); const int input_stride_z = input->info()->strides_in_bytes().z(); + const int input_stride_w = input->info()->strides_in_bytes()[3]; const int output_stride_y = output->info()->strides_in_bytes().y(); const int kernel_stride_y = weights->info()->strides_in_bytes().y(); const int kernel_stride_z = weights->info()->strides_in_bytes().z(); @@ -74,9 +75,10 @@ public: // setup input window for the iterator Window window_in = window; - // we just want execute_window_loop to iterate over the dimensions > 2, so we set the first 2 dimensions to 0 + // Iteration of input is taken care of in execute_window_loop window_in.set(Window::DimX, Window::Dimension(0, 0, 0)); window_in.set(Window::DimY, Window::Dimension(0, 0, 0)); + window_in.set(Window::DimZ, Window::Dimension(0, 0, 0)); Window window_k = calculate_max_window(*weights->info(), Steps(1u)); @@ -91,7 +93,7 @@ public: int ih = 0; int oh = 0; - const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y - (id.z() - id.z() / depth_multiplier) * input_stride_z; + const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y + (id.z() / depth_multiplier) * input_stride_z + input_stride_w * id[3]; const uint8_t *ptr_weights_base = weights_ptr + id.z() * kernel_stride_z; const auto ptr_weights_r0 = reinterpret_cast(ptr_weights_base); @@ -104,45 +106,54 @@ public: for(ih = 0, oh = 0; oh < output_h; ++oh, ih += conv_stride_y) { auto in_top = reinterpret_cast(input_ptr + (ih + 0) * input_stride_y); - auto in_mid = reinterpret_cast(input_ptr + (ih + 1) * input_stride_y); - auto in_low = reinterpret_cast(input_ptr + (ih + 2) * input_stride_y); - auto p_out = reinterpret_cast(out.ptr() + oh * output_stride_y); + auto in_mid = reinterpret_cast(input_ptr + (ih + dilation.y()) * input_stride_y); + auto in_low = reinterpret_cast(input_ptr + (ih + 2 * dilation.y()) * input_stride_y); //uint8 + auto p_out = reinterpret_cast(out.ptr() + oh * output_stride_y); //int32 for(int ow = 0; ow < output_w; ow += num_elems_written_per_iteration, in_top += delta_input, in_mid += delta_input, in_low += delta_input, p_out += num_elems_written_per_iteration) { - auto vres = detail::convolve_3x3(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, input_offset); - detail::store_results(p_out, vres); + if(dilation == Size2D(1U, 1U)) + { + auto vres = detail::convolve_3x3(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, input_offset); + detail::store_results(p_out, vres); + } + else + { + auto vres = detail::convolve_3x3_dilation(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, dilation.x(), input_offset); + detail::store_results(p_out, vres); + } } } }, - in, out); + out); } }; template inline void convolve_3x3(const Window &window, unsigned int num_elems_written_per_iteration, - const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier) + const ITensor *input, const ITensor *weights, ITensor *output, + const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation) { const unsigned int conv_stride_x = std::get<0>(conv_info.stride()); switch(conv_stride_x) { case 1: - convolver_3x3::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier); + convolver_3x3::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier, dilation); break; case 2: - convolver_3x3::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier); + convolver_3x3::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier, dilation); break; case 3: - convolver_3x3::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier); + convolver_3x3::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier, dilation); break; default: ARM_COMPUTE_ERROR("Not implemented"); } } -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation) { ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); @@ -157,7 +168,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, if(output->total_size() != 0) { - const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier); + const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); if(is_data_type_quantized_asymmetric(input->data_type())) @@ -173,13 +184,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, return Status{}; } -std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier) +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, + const Size2D &dilation) { Window win; bool window_changed = false; // Get convolved dimensions - const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier); + const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation); const DataType output_dt = (input->data_type() == DataType::QASYMM8) ? DataType::S32 : input->data_type(); // Output auto inizialitation if not yet initialized @@ -197,15 +209,16 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen switch(input->data_type()) { case DataType::QASYMM8: - num_elems_read_per_iteration = 16; + num_elems_read_per_iteration = 16 + 15 * (dilation.x() - 1); break; #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: - num_elems_read_per_iteration = 24; + num_elems_written_per_iteration = 32 >> conv_stride_x; + num_elems_read_per_iteration = 24 + 23 * (dilation.x() - 1); break; #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F32: - num_elems_read_per_iteration = 12; + num_elems_read_per_iteration = 12 + 11 * (dilation.x() - 1); break; default: ARM_COMPUTE_ERROR("Data type not supported."); @@ -214,7 +227,7 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen // Configure kernel window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration)); - AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration, 3, conv_stride_x, conv_stride_y); + AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration, 3 + 2 * (dilation.y() - 1), conv_stride_x, conv_stride_y); AccessWindowStatic weights_access(weights, 0, 0, 3, 3); AccessWindowHorizontal output_access(output, 0, num_elems_written_per_iteration); @@ -227,7 +240,7 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen } // namespace NEDepthwiseConvolutionLayer3x3Kernel::NEDepthwiseConvolutionLayer3x3Kernel() - : _border_size(0), _input(), _output(), _weights(), _conv_info(), _num_elems_written_per_iteration(0), _depth_multiplier(1) + : _border_size(0), _input(), _output(), _weights(), _conv_info(), _num_elems_written_per_iteration(0), _depth_multiplier(1), _dilation() { } @@ -236,28 +249,41 @@ BorderSize NEDepthwiseConvolutionLayer3x3Kernel::border_size() const return _border_size; } -void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier) +void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, + const Size2D &dilation) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), output->info(), conv_info, depth_multiplier)); - - _input = input; - _output = output; - _weights = weights; - _conv_info = conv_info; - _depth_multiplier = depth_multiplier; - _num_elems_written_per_iteration = 16 >> _conv_info.stride().first; - _border_size = BorderSize(_conv_info.pad_top(), _conv_info.pad_right(), _conv_info.pad_bottom(), _conv_info.pad_left()); - - auto win_config = validate_and_configure_window(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), output->info(), conv_info, depth_multiplier, dilation)); + + _input = input; + _output = output; + _weights = weights; + _conv_info = conv_info; + _depth_multiplier = depth_multiplier; + switch(input->info()->data_type()) + { + case DataType::QASYMM8: + case DataType::F32: + _num_elems_written_per_iteration = 16 >> _conv_info.stride().first; + break; + case DataType::F16: + _num_elems_written_per_iteration = 32 >> _conv_info.stride().first; + break; + default: + ARM_COMPUTE_ERROR("Data type not supported."); + } + _border_size = BorderSize(_conv_info.pad_top(), _conv_info.pad_right(), _conv_info.pad_bottom(), _conv_info.pad_left()); + _dilation = dilation; + auto win_config = validate_and_configure_window(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier, dilation); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); INEKernel::configure(win_config.second); } -Status NEDepthwiseConvolutionLayer3x3Kernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier) +Status NEDepthwiseConvolutionLayer3x3Kernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, + const Size2D &dilation) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, output, conv_info, depth_multiplier)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, depth_multiplier).first); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, output, conv_info, depth_multiplier, dilation)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, depth_multiplier, dilation).first); return Status{}; } @@ -272,14 +298,14 @@ void NEDepthwiseConvolutionLayer3x3Kernel::run(const Window &window, const Threa { #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: - convolve_3x3(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier); + convolve_3x3(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation); break; #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F32: - convolve_3x3(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier); + convolve_3x3(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation); break; case DataType::QASYMM8: - convolve_3x3(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier); + convolve_3x3(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation); break; default: ARM_COMPUTE_ERROR("Not implemented"); diff --git a/src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp b/src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp index 62373e348b..88f8b31a35 100644 --- a/src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp +++ b/src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp @@ -38,7 +38,8 @@ using namespace arm_compute; namespace { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, + const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, const Size2D &dilation) { ARM_COMPUTE_UNUSED(conv_info); //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions. @@ -48,6 +49,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(2) * depth_multiplier) != output->dimension(2)); ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(0) != (kernel_dims.width * kernel_dims.height + ((has_bias) ? 1 : 0))); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); + ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || dilation.y() < 1); return Status{}; } @@ -84,7 +86,7 @@ void NEDepthwiseIm2ColKernel::run_generic(const Window &window) Iterator out(_output, window_out); const int full_length = input_w + pad_left + pad_right; - const int max_initial_x = stride_x * (((full_length - _kernel_dims.width) / stride_x) + 1); + const int max_initial_x = stride_x * (((full_length - (_kernel_dims.width + (_kernel_dims.width - 1) * (_dilation.x() - 1))) / stride_x) + 1); // Define pad value auto zero = static_cast(0); @@ -103,12 +105,12 @@ void NEDepthwiseIm2ColKernel::run_generic(const Window &window) // Get pointers const uint8_t *const input_ptr = in.ptr() + id.z() / _depth_multiplier * input_stride_z; auto output_ptr = reinterpret_cast(out.ptr()); - const int height = src_y + _kernel_dims.height; - const int width = src_x + _kernel_dims.width; + const int height = src_y + (_kernel_dims.height + (_kernel_dims.height - 1) * (_dilation.y() - 1)); + const int width = src_x + (_kernel_dims.width + (_kernel_dims.width - 1) * (_dilation.x() - 1)); - for(int y = src_y; y < height; ++y) + for(int y = src_y; y < height; y += _dilation.y()) { - for(int x = src_x; x < width; ++x, ++output_ptr) + for(int x = src_x; x < width; x += _dilation.x(), ++output_ptr) { if(x < 0 || x >= input_w || y < 0 || y >= input_h) { @@ -130,15 +132,16 @@ void NEDepthwiseIm2ColKernel::run_generic(const Window &window) } NEDepthwiseIm2ColKernel::NEDepthwiseIm2ColKernel() - : _func(nullptr), _input(nullptr), _output(nullptr), _kernel_dims(), _conv_info(), _has_bias(), _depth_multiplier(1) + : _func(nullptr), _input(nullptr), _output(nullptr), _kernel_dims(), _conv_info(), _has_bias(), _depth_multiplier(1), _dilation() { } -void NEDepthwiseIm2ColKernel::configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier) +void NEDepthwiseIm2ColKernel::configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, + const Size2D &dilation) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, depth_multiplier)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, depth_multiplier, dilation)); _input = input; _output = output; @@ -146,6 +149,7 @@ void NEDepthwiseIm2ColKernel::configure(const ITensor *input, ITensor *output, c _conv_info = conv_info; _has_bias = has_bias; _depth_multiplier = depth_multiplier; + _dilation = dilation; // Configure kernel window Window win = calculate_max_window(*input->info(), Steps()); @@ -172,10 +176,11 @@ void NEDepthwiseIm2ColKernel::configure(const ITensor *input, ITensor *output, c INEKernel::configure(win); } -Status NEDepthwiseIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier) +Status NEDepthwiseIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, + const Size2D &dilation) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, depth_multiplier)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, depth_multiplier, dilation)); return Status{}; } diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp index 23c3f81edc..8211104bda 100644 --- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp @@ -49,6 +49,14 @@ void CLDepthwiseConvolutionLayer3x3::configure(ICLTensor *input, const ICLTensor { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); + // idx_w and idx_h only used for validation + const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); + ARM_COMPUTE_UNUSED(idx_w); + ARM_COMPUTE_UNUSED(idx_h); + + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_w) + (weights->info()->dimension(idx_w) - 1) * (dilation.x() - 1) > input->info()->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right()); + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_h) + (weights->info()->dimension(idx_h) - 1) * (dilation.y() - 1) > input->info()->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom()); const bool is_nhwc = input->info()->data_layout() == DataLayout::NHWC; @@ -239,6 +247,9 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_w) + (weights->info()->dimension(idx_w) - 1) * (dilation.x() - 1) > input->info()->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right()); + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_h) + (weights->info()->dimension(idx_h) - 1) * (dilation.y() - 1) > input->info()->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom()); + const bool can_run_optimised_3x3_kernel = (weights->info()->dimension(idx_w) == 3) && (weights->info()->dimension(idx_h) == 3); if(bool(can_run_optimised_3x3_kernel)) diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp index 4c602b3640..5133756993 100644 --- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp @@ -51,7 +51,8 @@ void NEDepthwiseConvolutionLayer3x3::configure_generic(ITensor ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, - const ActivationLayerInfo &act_info) + const ActivationLayerInfo &act_info, + const Size2D &dilation) { ARM_COMPUTE_UNUSED(act_info); @@ -87,8 +88,8 @@ void NEDepthwiseConvolutionLayer3x3::configure_generic(ITensor _permute_weights.configure(weights, &_permuted_weights, PermutationVector(1U, 2U, 0U)); _permuted_weights.info()->set_data_layout(DataLayout::NCHW); - // Configure optimized depthwise - _dwc_kernel.configure(&_permuted_input, &_permuted_weights, (_is_quantized) ? &_accumulator : &_permuted_output, conv_info, depth_multiplier); + // Configure depthwise + _dwc_kernel.configure(&_permuted_input, &_permuted_weights, (_is_quantized) ? &_accumulator : &_permuted_output, conv_info, depth_multiplier, dilation); // Configure border handler _border_handler.configure(&_permuted_input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value); @@ -99,7 +100,7 @@ void NEDepthwiseConvolutionLayer3x3::configure_generic(ITensor else { // Configure depthwise convolution kernel - _dwc_kernel.configure(input, weights, (_is_quantized) ? &_accumulator : output, conv_info, depth_multiplier); + _dwc_kernel.configure(input, weights, (_is_quantized) ? &_accumulator : output, conv_info, depth_multiplier, dilation); // Configure border handler _border_handler.configure(input, _dwc_kernel.border_size(), BorderMode::CONSTANT, zero_value); @@ -185,18 +186,25 @@ void NEDepthwiseConvolutionLayer3x3::configure(ITensor *input, const ActivationLayerInfo &act_info, const Size2D &dilation) { - ARM_COMPUTE_ERROR_ON(dilation.x() != 1 || dilation.y() != 1); - ARM_COMPUTE_UNUSED(dilation); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); + // idx_w and idx_h only used for validation + const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); + ARM_COMPUTE_UNUSED(idx_w); + ARM_COMPUTE_UNUSED(idx_h); + + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_w) + (weights->info()->dimension(idx_w) - 1) * (dilation.x() - 1) > input->info()->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right()); + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_h) + (weights->info()->dimension(idx_h) - 1) * (dilation.y() - 1) > input->info()->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom()); + _original_weights = weights; _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); _has_bias = biases != nullptr; _is_optimized = NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(input->info(), weights->info(), conv_info, - depth_multiplier); + depth_multiplier, dilation); _is_nchw = input->info()->data_layout() == DataLayout::NCHW; _permute = _is_optimized == _is_nchw; _is_prepared = false; @@ -209,7 +217,7 @@ void NEDepthwiseConvolutionLayer3x3::configure(ITensor *input, } else { - configure_generic(input, weights, biases, output, conv_info, depth_multiplier, act_info); + configure_generic(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation); } // Configure activation @@ -230,7 +238,11 @@ Status NEDepthwiseConvolutionLayer3x3::validate(const ITensorInfo *input { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); - ARM_COMPUTE_RETURN_ERROR_ON(dilation.x() != 1 || dilation.y() != 1); + ARM_COMPUTE_RETURN_ERROR_ON(dilation.x() < 1 || dilation.y() < 1); + const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) + (weights->dimension(idx_w) - 1) * (dilation.x() - 1) > input->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right()); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) + (weights->dimension(idx_h) - 1) * (dilation.y() - 1) > input->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom()); if(biases != nullptr) { @@ -239,7 +251,7 @@ Status NEDepthwiseConvolutionLayer3x3::validate(const ITensorInfo *input ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(channel_idx)); } - if(!NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(input, weights, conv_info, depth_multiplier)) + if(!NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(input, weights, conv_info, depth_multiplier, dilation)) { const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); TensorInfo accumulator = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32)); @@ -356,12 +368,17 @@ void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weigh { const unsigned int channel_idx = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL); ARM_COMPUTE_UNUSED(channel_idx); - ARM_COMPUTE_ERROR_ON(dilation.x() != 1 || dilation.y() != 1); - ARM_COMPUTE_UNUSED(dilation); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); ARM_COMPUTE_ERROR_ON((input->info()->dimension(channel_idx) * depth_multiplier) != weights->info()->dimension(channel_idx)); + // idx_w and idx_h only used for validation + const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); + ARM_COMPUTE_UNUSED(idx_w); + ARM_COMPUTE_UNUSED(idx_h); + + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_w) + (weights->info()->dimension(idx_w) - 1) * (dilation.x() - 1) > input->info()->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right()); + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_h) + (weights->info()->dimension(idx_h) - 1) * (dilation.y() - 1) > input->info()->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom()); _is_nhwc = input->info()->data_layout() == DataLayout::NHWC; @@ -392,7 +409,7 @@ void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weigh bool append_bias = (biases != nullptr) && !_is_quantized; // Calculate output shape - TensorShape output_shape = shape_calculator::compute_depthwise_convolution_shape(*input->info(), *weights->info(), conv_info, depth_multiplier); + TensorShape output_shape = shape_calculator::compute_depthwise_convolution_shape(*input->info(), *weights->info(), conv_info, depth_multiplier, dilation); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape)); @@ -420,7 +437,7 @@ void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weigh shape_im2col.set(1, conv_size); shape_im2col.set(2, weights_z); _input_reshaped.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col).set_data_layout(DataLayout::NCHW)); - _im2col_kernel.configure(input_to_use, &_input_reshaped, Size2D(weights_w, weights_h), conv_info, append_bias, depth_multiplier); + _im2col_kernel.configure(input_to_use, &_input_reshaped, Size2D(weights_w, weights_h), conv_info, append_bias, depth_multiplier, dilation); // Weights reshape configuration const TensorShape shape_weights_reshape(patch_size, weights_z); @@ -491,11 +508,13 @@ Status NEDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITe { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); - ARM_COMPUTE_RETURN_ERROR_ON(dilation.x() != 1 || dilation.y() != 1); + ARM_COMPUTE_RETURN_ERROR_ON(dilation.x() < 1 || dilation.y() < 1); const unsigned int width_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); const unsigned int height_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(width_idx) + (weights->dimension(width_idx) - 1) * (dilation.x() - 1) > input->dimension(width_idx) + conv_info.pad_left() + conv_info.pad_right()); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(height_idx) + (weights->dimension(height_idx) - 1) * (dilation.y() - 1) > input->dimension(height_idx) + conv_info.pad_top() + conv_info.pad_bottom()); // Clone output to use auto init auto output_clone = output->clone(); @@ -522,7 +541,7 @@ Status NEDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITe const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); const bool append_bias = (biases != nullptr) && !is_quantized; - TensorShape output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier); + TensorShape output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation); const size_t weights_w = weights_to_use->dimension(0); const size_t weights_h = weights_to_use->dimension(1); const size_t weights_z = weights_to_use->dimension(2); @@ -549,7 +568,7 @@ Status NEDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITe shape_im2col.set(1, conv_size); shape_im2col.set(2, weights_z); TensorInfo input_reshaped(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col).set_data_layout(DataLayout::NCHW)); - ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseIm2ColKernel::validate(input_to_use, &input_reshaped, Size2D(weights_w, weights_h), conv_info, append_bias, depth_multiplier)); + ARM_COMPUTE_RETURN_ON_ERROR(NEDepthwiseIm2ColKernel::validate(input_to_use, &input_reshaped, Size2D(weights_w, weights_h), conv_info, append_bias, depth_multiplier, dilation)); // Weights reshape configuration const TensorShape shape_weights_reshape(patch_size, weights_z); diff --git a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp index d9b2bff810..049bf66689 100644 --- a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp +++ b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp @@ -254,7 +254,8 @@ Status NEDepthwiseConvolutionAssemblyDispatch::validate(const ITensorInfo bool NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(const ITensorInfo *input, const ITensorInfo *weights, PadStrideInfo conv_info, - unsigned int depth_multiplier) + unsigned int depth_multiplier, + const Size2D &dilation) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights); @@ -290,8 +291,9 @@ bool NEDepthwiseConvolutionAssemblyDispatch::is_optimized_supported(const ITenso bool is_same_padding = (pad_top == same_pad.pad_top()) && (pad_right == same_pad.pad_right()) && (pad_bottom == same_pad.pad_bottom()) && (pad_left == same_pad.pad_left()); bool is_valid_padding = (pad_top == 0) && (pad_right == 0) && (pad_bottom == 0) && (pad_left == 0); bool supported_padding = is_same_padding || is_valid_padding; + bool is_dilation_1 = dilation.x() == 1 && dilation.y() == 1; - return is_data_type_valid && weights_supported && supported_strides && supported_padding && (depth_multiplier == 1); + return is_data_type_valid && weights_supported && supported_strides && supported_padding && (depth_multiplier == 1) && is_dilation_1; } void NEDepthwiseConvolutionAssemblyDispatch::run() -- cgit v1.2.1