aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorUsama Arif <usama.arif@arm.com>2019-04-12 10:29:17 +0100
committerPablo Marquez <pablo.tello@arm.com>2019-04-24 12:31:21 +0000
commit881f2ded860fc1db23810076b699c4492556c376 (patch)
tree6237444ceaf2d5098e0b546693b8c18955963012 /src/core
parent557d4aece64b2ed422ec853dbc2b7a4949ea56ca (diff)
downloadComputeLibrary-881f2ded860fc1db23810076b699c4492556c376.tar.gz
COMPMID-2048: Add support for dilation in NEDepthwiseConvolution.
Change-Id: If9941e770779fbf918ba5ff0573da9378078b969 Signed-off-by: Usama Arif <usama.arif@arm.com> Reviewed-on: https://review.mlplatform.org/c/999 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
Diffstat (limited to 'src/core')
-rw-r--r--src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp106
-rw-r--r--src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp27
2 files changed, 82 insertions, 51 deletions
diff --git a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
index ec672e0fc4..fdafc2da90 100644
--- a/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.cpp
@@ -49,7 +49,7 @@ class convolver_3x3
{
public:
static void convolve(const Window &window, unsigned int num_elems_written_per_iteration,
- const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
+ const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation)
{
const int input_offset = -input->info()->quantization_info().offset;
const int weights_offset = -weights->info()->quantization_info().offset;
@@ -57,6 +57,7 @@ public:
const int input_stride_x = input->info()->strides_in_bytes().x();
const int input_stride_y = input->info()->strides_in_bytes().y();
const int input_stride_z = input->info()->strides_in_bytes().z();
+ const int input_stride_w = input->info()->strides_in_bytes()[3];
const int output_stride_y = output->info()->strides_in_bytes().y();
const int kernel_stride_y = weights->info()->strides_in_bytes().y();
const int kernel_stride_z = weights->info()->strides_in_bytes().z();
@@ -74,9 +75,10 @@ public:
// setup input window for the iterator
Window window_in = window;
- // we just want execute_window_loop to iterate over the dimensions > 2, so we set the first 2 dimensions to 0
+ // Iteration of input is taken care of in execute_window_loop
window_in.set(Window::DimX, Window::Dimension(0, 0, 0));
window_in.set(Window::DimY, Window::Dimension(0, 0, 0));
+ window_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
Window window_k = calculate_max_window(*weights->info(), Steps(1u));
@@ -91,7 +93,7 @@ public:
int ih = 0;
int oh = 0;
- const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y - (id.z() - id.z() / depth_multiplier) * input_stride_z;
+ const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y + (id.z() / depth_multiplier) * input_stride_z + input_stride_w * id[3];
const uint8_t *ptr_weights_base = weights_ptr + id.z() * kernel_stride_z;
const auto ptr_weights_r0 = reinterpret_cast<const T1 *>(ptr_weights_base);
@@ -104,45 +106,54 @@ public:
for(ih = 0, oh = 0; oh < output_h; ++oh, ih += conv_stride_y)
{
auto in_top = reinterpret_cast<const T1 *>(input_ptr + (ih + 0) * input_stride_y);
- auto in_mid = reinterpret_cast<const T1 *>(input_ptr + (ih + 1) * input_stride_y);
- auto in_low = reinterpret_cast<const T1 *>(input_ptr + (ih + 2) * input_stride_y);
- auto p_out = reinterpret_cast<T2 *>(out.ptr() + oh * output_stride_y);
+ auto in_mid = reinterpret_cast<const T1 *>(input_ptr + (ih + dilation.y()) * input_stride_y);
+ auto in_low = reinterpret_cast<const T1 *>(input_ptr + (ih + 2 * dilation.y()) * input_stride_y); //uint8
+ auto p_out = reinterpret_cast<T2 *>(out.ptr() + oh * output_stride_y); //int32
for(int ow = 0; ow < output_w; ow += num_elems_written_per_iteration,
in_top += delta_input, in_mid += delta_input, in_low += delta_input,
p_out += num_elems_written_per_iteration)
{
- auto vres = detail::convolve_3x3<stridex>(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, input_offset);
- detail::store_results<stridex>(p_out, vres);
+ if(dilation == Size2D(1U, 1U))
+ {
+ auto vres = detail::convolve_3x3<stridex>(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, input_offset);
+ detail::store_results<stridex>(p_out, vres);
+ }
+ else
+ {
+ auto vres = detail::convolve_3x3_dilation<stridex>(in_top, in_mid, in_low, vw_r0, vw_r1, vw_r2, dilation.x(), input_offset);
+ detail::store_results<stridex>(p_out, vres);
+ }
}
}
},
- in, out);
+ out);
}
};
template <typename T1, typename T2>
inline void convolve_3x3(const Window &window, unsigned int num_elems_written_per_iteration,
- const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
+ const ITensor *input, const ITensor *weights, ITensor *output,
+ const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation)
{
const unsigned int conv_stride_x = std::get<0>(conv_info.stride());
switch(conv_stride_x)
{
case 1:
- convolver_3x3<T1, T2, 1>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier);
+ convolver_3x3<T1, T2, 1>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier, dilation);
break;
case 2:
- convolver_3x3<T1, T2, 2>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier);
+ convolver_3x3<T1, T2, 2>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier, dilation);
break;
case 3:
- convolver_3x3<T1, T2, 3>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier);
+ convolver_3x3<T1, T2, 3>::convolve(window, num_elems_written_per_iteration, input, weights, output, conv_info, depth_multiplier, dilation);
break;
default:
ARM_COMPUTE_ERROR("Not implemented");
}
}
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation)
{
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
@@ -157,7 +168,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
if(output->total_size() != 0)
{
- const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier);
+ const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
if(is_data_type_quantized_asymmetric(input->data_type()))
@@ -173,13 +184,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
+ const Size2D &dilation)
{
Window win;
bool window_changed = false;
// Get convolved dimensions
- const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier);
+ const TensorShape output_shape = misc::shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation);
const DataType output_dt = (input->data_type() == DataType::QASYMM8) ? DataType::S32 : input->data_type();
// Output auto inizialitation if not yet initialized
@@ -197,15 +209,16 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
switch(input->data_type())
{
case DataType::QASYMM8:
- num_elems_read_per_iteration = 16;
+ num_elems_read_per_iteration = 16 + 15 * (dilation.x() - 1);
break;
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
- num_elems_read_per_iteration = 24;
+ num_elems_written_per_iteration = 32 >> conv_stride_x;
+ num_elems_read_per_iteration = 24 + 23 * (dilation.x() - 1);
break;
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F32:
- num_elems_read_per_iteration = 12;
+ num_elems_read_per_iteration = 12 + 11 * (dilation.x() - 1);
break;
default:
ARM_COMPUTE_ERROR("Data type not supported.");
@@ -214,7 +227,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
// Configure kernel window
win = calculate_max_window(*output, Steps(num_elems_written_per_iteration));
- AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration, 3, conv_stride_x, conv_stride_y);
+ AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration, 3 + 2 * (dilation.y() - 1), conv_stride_x, conv_stride_y);
AccessWindowStatic weights_access(weights, 0, 0, 3, 3);
AccessWindowHorizontal output_access(output, 0, num_elems_written_per_iteration);
@@ -227,7 +240,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
} // namespace
NEDepthwiseConvolutionLayer3x3Kernel::NEDepthwiseConvolutionLayer3x3Kernel()
- : _border_size(0), _input(), _output(), _weights(), _conv_info(), _num_elems_written_per_iteration(0), _depth_multiplier(1)
+ : _border_size(0), _input(), _output(), _weights(), _conv_info(), _num_elems_written_per_iteration(0), _depth_multiplier(1), _dilation()
{
}
@@ -236,28 +249,41 @@ BorderSize NEDepthwiseConvolutionLayer3x3Kernel::border_size() const
return _border_size;
}
-void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
+void NEDepthwiseConvolutionLayer3x3Kernel::configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
+ const Size2D &dilation)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), output->info(), conv_info, depth_multiplier));
-
- _input = input;
- _output = output;
- _weights = weights;
- _conv_info = conv_info;
- _depth_multiplier = depth_multiplier;
- _num_elems_written_per_iteration = 16 >> _conv_info.stride().first;
- _border_size = BorderSize(_conv_info.pad_top(), _conv_info.pad_right(), _conv_info.pad_bottom(), _conv_info.pad_left());
-
- auto win_config = validate_and_configure_window(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), output->info(), conv_info, depth_multiplier, dilation));
+
+ _input = input;
+ _output = output;
+ _weights = weights;
+ _conv_info = conv_info;
+ _depth_multiplier = depth_multiplier;
+ switch(input->info()->data_type())
+ {
+ case DataType::QASYMM8:
+ case DataType::F32:
+ _num_elems_written_per_iteration = 16 >> _conv_info.stride().first;
+ break;
+ case DataType::F16:
+ _num_elems_written_per_iteration = 32 >> _conv_info.stride().first;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data type not supported.");
+ }
+ _border_size = BorderSize(_conv_info.pad_top(), _conv_info.pad_right(), _conv_info.pad_bottom(), _conv_info.pad_left());
+ _dilation = dilation;
+ auto win_config = validate_and_configure_window(_input->info(), _weights->info(), _output->info(), _conv_info, _depth_multiplier, dilation);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
INEKernel::configure(win_config.second);
}
-Status NEDepthwiseConvolutionLayer3x3Kernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier)
+Status NEDepthwiseConvolutionLayer3x3Kernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier,
+ const Size2D &dilation)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, output, conv_info, depth_multiplier));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, depth_multiplier).first);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, output, conv_info, depth_multiplier, dilation));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, depth_multiplier, dilation).first);
return Status{};
}
@@ -272,14 +298,14 @@ void NEDepthwiseConvolutionLayer3x3Kernel::run(const Window &window, const Threa
{
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
- convolve_3x3<float16_t, float16_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier);
+ convolve_3x3<float16_t, float16_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation);
break;
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F32:
- convolve_3x3<float, float>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier);
+ convolve_3x3<float, float>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation);
break;
case DataType::QASYMM8:
- convolve_3x3<uint8_t, int32_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier);
+ convolve_3x3<uint8_t, int32_t>(window, _num_elems_written_per_iteration, _input, _weights, _output, _conv_info, _depth_multiplier, _dilation);
break;
default:
ARM_COMPUTE_ERROR("Not implemented");
diff --git a/src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp b/src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp
index 62373e348b..88f8b31a35 100644
--- a/src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp
+++ b/src/core/NEON/kernels/NEDepthwiseIm2ColKernel.cpp
@@ -38,7 +38,8 @@ using namespace arm_compute;
namespace
{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
+ const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, const Size2D &dilation)
{
ARM_COMPUTE_UNUSED(conv_info);
//Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
@@ -48,6 +49,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(2) * depth_multiplier) != output->dimension(2));
ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(0) != (kernel_dims.width * kernel_dims.height + ((has_bias) ? 1 : 0)));
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || dilation.y() < 1);
return Status{};
}
@@ -84,7 +86,7 @@ void NEDepthwiseIm2ColKernel::run_generic(const Window &window)
Iterator out(_output, window_out);
const int full_length = input_w + pad_left + pad_right;
- const int max_initial_x = stride_x * (((full_length - _kernel_dims.width) / stride_x) + 1);
+ const int max_initial_x = stride_x * (((full_length - (_kernel_dims.width + (_kernel_dims.width - 1) * (_dilation.x() - 1))) / stride_x) + 1);
// Define pad value
auto zero = static_cast<T>(0);
@@ -103,12 +105,12 @@ void NEDepthwiseIm2ColKernel::run_generic(const Window &window)
// Get pointers
const uint8_t *const input_ptr = in.ptr() + id.z() / _depth_multiplier * input_stride_z;
auto output_ptr = reinterpret_cast<T *>(out.ptr());
- const int height = src_y + _kernel_dims.height;
- const int width = src_x + _kernel_dims.width;
+ const int height = src_y + (_kernel_dims.height + (_kernel_dims.height - 1) * (_dilation.y() - 1));
+ const int width = src_x + (_kernel_dims.width + (_kernel_dims.width - 1) * (_dilation.x() - 1));
- for(int y = src_y; y < height; ++y)
+ for(int y = src_y; y < height; y += _dilation.y())
{
- for(int x = src_x; x < width; ++x, ++output_ptr)
+ for(int x = src_x; x < width; x += _dilation.x(), ++output_ptr)
{
if(x < 0 || x >= input_w || y < 0 || y >= input_h)
{
@@ -130,15 +132,16 @@ void NEDepthwiseIm2ColKernel::run_generic(const Window &window)
}
NEDepthwiseIm2ColKernel::NEDepthwiseIm2ColKernel()
- : _func(nullptr), _input(nullptr), _output(nullptr), _kernel_dims(), _conv_info(), _has_bias(), _depth_multiplier(1)
+ : _func(nullptr), _input(nullptr), _output(nullptr), _kernel_dims(), _conv_info(), _has_bias(), _depth_multiplier(1), _dilation()
{
}
-void NEDepthwiseIm2ColKernel::configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier)
+void NEDepthwiseIm2ColKernel::configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier,
+ const Size2D &dilation)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, depth_multiplier));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, depth_multiplier, dilation));
_input = input;
_output = output;
@@ -146,6 +149,7 @@ void NEDepthwiseIm2ColKernel::configure(const ITensor *input, ITensor *output, c
_conv_info = conv_info;
_has_bias = has_bias;
_depth_multiplier = depth_multiplier;
+ _dilation = dilation;
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
@@ -172,10 +176,11 @@ void NEDepthwiseIm2ColKernel::configure(const ITensor *input, ITensor *output, c
INEKernel::configure(win);
}
-Status NEDepthwiseIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier)
+Status NEDepthwiseIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier,
+ const Size2D &dilation)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, depth_multiplier));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, depth_multiplier, dilation));
return Status{};
}