aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp8
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp25
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp48
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp31
-rw-r--r--tests/datasets/DepthwiseConvolutionLayerDataset.h3
-rw-r--r--tests/datasets/PoolingLayerDataset.h15
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp4
-rw-r--r--tests/validation/CL/PoolingLayer.cpp12
-rw-r--r--tests/validation/NEON/DirectConvolutionLayer.cpp4
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp9
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h13
-rw-r--r--tests/validation/reference/DepthwiseConvolutionLayer.cpp8
12 files changed, 103 insertions, 77 deletions
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp
index c7cee4c387..c24420a7e3 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp
@@ -121,10 +121,10 @@ void CLDepthwiseConvolutionLayer3x3Kernel::configure(const ICLTensor *input, con
const GPUTarget gpu_target = get_arch_from_target(get_target());
// Configure kernel window
- const unsigned int conv_pad_left = std::max(conv_info.pad_left(), 1U);
- const unsigned int conv_pad_top = std::max(conv_info.pad_top(), 1U);
- const unsigned int conv_pad_right = std::max(conv_info.pad_right(), 1U);
- const unsigned int conv_pad_bottom = std::max(conv_info.pad_bottom(), 1U);
+ const unsigned int conv_pad_left = conv_info.pad_left();
+ const unsigned int conv_pad_top = conv_info.pad_top();
+ const unsigned int conv_pad_right = conv_info.pad_right();
+ const unsigned int conv_pad_bottom = conv_info.pad_bottom();
unsigned int num_elems_read_per_iteration_x = 0;
unsigned int num_elems_read_per_iteration_y = 0;
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
index 4b141f7ecd..ac3c9ac4a6 100644
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -126,10 +126,10 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
unsigned int conv_stride_x = std::get<0>(conv_info.stride());
unsigned int conv_stride_y = std::get<1>(conv_info.stride());
- unsigned int conv_pad_left = std::max(conv_info.pad_left(), kernel_size / 2);
- unsigned int conv_pad_top = std::max(conv_info.pad_top(), kernel_size / 2);
- unsigned int conv_pad_right = std::max(conv_info.pad_right(), kernel_size / 2);
- unsigned int conv_pad_bottom = std::max(conv_info.pad_bottom(), kernel_size / 2);
+ unsigned int conv_pad_left = conv_info.pad_left();
+ unsigned int conv_pad_top = conv_info.pad_top();
+ unsigned int conv_pad_right = conv_info.pad_right();
+ unsigned int conv_pad_bottom = conv_info.pad_bottom();
unsigned int num_elems_read_per_iteration_x = 0;
unsigned int num_elems_read_per_iteration_y = 0;
@@ -302,18 +302,13 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
_conv_stride_x = std::get<0>(conv_info.stride());
_conv_stride_y = std::get<1>(conv_info.stride());
+ _border_size = BorderSize(conv_info.pad_top(), conv_info.pad_right(), conv_info.pad_bottom(), conv_info.pad_left());
_input = input;
_weights = weights;
_output = output;
_biases = biases;
- int conv_pad_left = std::min(conv_info.pad_left(), kernel_size / 2);
- int conv_pad_top = std::min(conv_info.pad_top(), kernel_size / 2);
- int conv_pad_right = std::min(conv_info.pad_right(), kernel_size / 2);
- int conv_pad_bottom = std::min(conv_info.pad_bottom(), kernel_size / 2);
- _border_size = BorderSize(conv_pad_top, conv_pad_right, conv_pad_bottom, conv_pad_left);
-
const GPUTarget gpu_target = get_arch_from_target(get_target());
std::stringstream kernel_name;
@@ -450,13 +445,13 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
_config_id += "_";
_config_id += support::cpp11::to_string(kernel_size);
_config_id += "_";
- _config_id += support::cpp11::to_string(conv_pad_left);
+ _config_id += support::cpp11::to_string(border_size().left);
_config_id += "_";
- _config_id += support::cpp11::to_string(conv_pad_top);
+ _config_id += support::cpp11::to_string(border_size().top);
_config_id += "_";
- _config_id += support::cpp11::to_string(conv_pad_right);
+ _config_id += support::cpp11::to_string(border_size().right);
_config_id += "_";
- _config_id += support::cpp11::to_string(conv_pad_bottom);
+ _config_id += support::cpp11::to_string(border_size().bottom);
_config_id += "_";
_config_id += support::cpp11::to_string(_conv_stride_x);
_config_id += "_";
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index c515ed68e7..b3034e10cc 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -61,15 +61,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG((is_data_type_quantized_asymmetric(input->data_type()) && pool_info.pool_type() == PoolingType::L2),
"Unsupported combination of parameters!");
- ARM_COMPUTE_RETURN_ERROR_ON(!pool_info.pad_stride_info().padding_is_symmetric());
const bool is_global_pooling = pool_info.is_global_pooling();
const unsigned int pool_size_x = is_global_pooling ? input->tensor_shape().x() : pool_info.pool_size().width;
const unsigned int pool_size_y = is_global_pooling ? input->tensor_shape().y() : pool_info.pool_size().height;
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_global_pooling && ((pool_info.pad_stride_info().pad().first >= pool_size_x) || (pool_info.pad_stride_info().pad().second >= pool_size_y)),
- "Invalid pool size and pool pad combination!");
-
// Checks performed when output is configured
if(output->total_size() != 0)
{
@@ -92,8 +88,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &pool_info)
{
- int pool_pad_x = 0;
- int pool_pad_y = 0;
int pool_stride_x = 0;
int pool_stride_y = 0;
unsigned int pooled_w = 0;
@@ -101,8 +95,11 @@ std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITenso
int pool_size_x = pool_info.is_global_pooling() ? input->dimension(0) : pool_info.pool_size().width;
int pool_size_y = pool_info.is_global_pooling() ? input->dimension(1) : pool_info.pool_size().height;
const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
- std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
+ const int pool_pad_right = pad_stride_info.pad_right();
+ const int pool_pad_top = pad_stride_info.pad_top();
+ const int pool_pad_left = pad_stride_info.pad_left();
+ const int pool_pad_bottom = pad_stride_info.pad_bottom();
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
@@ -115,7 +112,7 @@ std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITenso
auto_init(input, output, pooled_w, pooled_h);
- BorderSize border_size = BorderSize(pool_pad_y, pool_pad_x);
+ BorderSize border_size = BorderSize(pool_pad_top, pool_pad_right, pool_pad_bottom, pool_pad_left);
const DataType data_type = input->data_type();
const int input_width = input->dimension(0);
@@ -131,15 +128,15 @@ std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITenso
const int num_iterations_x = (pooled_w + num_elems_processed_per_iteration - 1) / num_elems_processed_per_iteration;
// Upper limit for the number of right/bottom border elements that are accessed
- const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_x + num_elems_read_per_iteration) - input_width;
- const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_y + pool_size_y) - input_height;
+ const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - input_width;
+ const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - input_height;
- border_size.right = std::max(upper_bound_w, pool_pad_x);
- border_size.bottom = std::max(upper_bound_h, pool_pad_y);
+ border_size.right = std::max(upper_bound_w, pool_pad_right);
+ border_size.bottom = std::max(upper_bound_h, pool_pad_bottom);
Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
- AccessWindowRectangle input_access(input, -pool_pad_x, -pool_pad_y, num_elems_read_per_iteration, pool_size_y,
+ AccessWindowRectangle input_access(input, -pool_pad_left, -pool_pad_top, num_elems_read_per_iteration, pool_size_y,
pool_stride_x * num_elems_processed_per_iteration, pool_stride_y);
AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
bool window_changed = update_window_and_padding(win, input_access, output_access);
@@ -162,8 +159,6 @@ BorderSize CLPoolingLayerKernel::border_size() const
void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info)
{
- int pool_pad_x = 0;
- int pool_pad_y = 0;
int pool_stride_x = 0;
int pool_stride_y = 0;
unsigned int pooled_w = 0;
@@ -173,8 +168,9 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
const int pool_size_y = pool_info.is_global_pooling() ? input->info()->dimension(1) : pool_info.pool_size().height;
const PadStrideInfo pad_stride_info = pool_info.pad_stride_info();
const bool exclude_padding = pool_info.exclude_padding();
- std::tie(pool_pad_x, pool_pad_y) = pad_stride_info.pad();
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
+ const int pool_pad_top = pad_stride_info.pad_top();
+ const int pool_pad_left = pad_stride_info.pad_left();
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
@@ -207,11 +203,11 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
if(pool_type != PoolingType::MAX)
{
build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING");
- build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(input->info()->dimension(0) + (exclude_padding ? 0 : pool_pad_x)));
- build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1) + (exclude_padding ? 0 : pool_pad_y)));
+ build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(input->info()->dimension(0) + (exclude_padding ? 0 : pool_pad_left)));
+ build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1) + (exclude_padding ? 0 : pool_pad_top)));
build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(pool_stride_y));
- build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_x));
- build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_y));
+ build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_left));
+ build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_top));
}
// Create kernel
@@ -278,8 +274,8 @@ void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue)
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
- unsigned int pool_pad_x, pool_pad_y, pool_stride_x, pool_stride_y = 0;
- std::tie(pool_pad_x, pool_pad_y) = _pool_info.pad_stride_info().pad();
+ unsigned int pool_stride_x = 0;
+ unsigned int pool_stride_y = 0;
std::tie(pool_stride_x, pool_stride_y) = _pool_info.pad_stride_info().stride();
Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
@@ -289,11 +285,11 @@ void CLPoolingLayerKernel::run(const Window &window, cl::CommandQueue &queue)
{
// Upsample input by pool size
Window in_slice(slice);
- in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start() - pool_pad_x,
- (in_slice.x().end() - pool_pad_x) * pool_stride_x,
+ in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start() - _pool_info.pad_stride_info().pad_left(),
+ (in_slice.x().end() - _pool_info.pad_stride_info().pad_left()) * pool_stride_x,
pool_stride_x * _num_elems_processed_per_iteration));
- in_slice.set(Window::DimY, Window::Dimension(in_slice.y().start() - pool_pad_y,
- (in_slice.y().end() - pool_pad_y) * pool_stride_y,
+ in_slice.set(Window::DimY, Window::Dimension(in_slice.y().start() - _pool_info.pad_stride_info().pad_top(),
+ (in_slice.y().end() - _pool_info.pad_stride_info().pad_top()) * pool_stride_y,
pool_stride_y));
// Set inputs
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
index 536a667799..4dc186a8a7 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
@@ -274,6 +274,7 @@ public:
ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimX) > small_tensor_size_optim);
ARM_COMPUTE_ERROR_ON(input->info()->dimension(Window::DimY) > small_tensor_size_optim);
+ const int input_stride_x = input->info()->strides_in_bytes().x();
const int input_stride_y = input->info()->strides_in_bytes().y();
const int input_stride_z = input->info()->strides_in_bytes().z();
const int output_stride_y = output->info()->strides_in_bytes().y();
@@ -284,6 +285,8 @@ public:
const int range_z = window.z().end() - window.z().start();
const int kernel_depth = weights->info()->dimension(Window::DimZ);
const unsigned int conv_stride_y = std::get<1>(conv_info.stride());
+ const unsigned int conv_pad_left = conv_info.pad_left();
+ const unsigned int conv_pad_top = conv_info.pad_top();
// setup output window for the iterator
Window window_out = window;
@@ -307,7 +310,7 @@ public:
execute_window_loop(window_out, [&](const Coordinates & id)
{
- const uint8_t *input_ptr = in.ptr();
+ const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y;
uint8_t *out_ptr = out.ptr();
int ih = 0;
int oh = 0;
@@ -351,6 +354,7 @@ public:
static void convolve(const Window &window, unsigned int num_elems_read_per_iteration, unsigned int num_elems_written_per_iteration,
const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info)
{
+ const int input_stride_x = input->info()->strides_in_bytes().x();
const int input_stride_y = input->info()->strides_in_bytes().y();
const int input_stride_z = input->info()->strides_in_bytes().z();
const int output_stride_y = output->info()->strides_in_bytes().y();
@@ -362,6 +366,8 @@ public:
const int range_z = window.z().end() - window.z().start();
const int kernel_depth = weights->info()->dimension(Window::DimZ);
const unsigned int conv_stride_y = std::get<1>(conv_info.stride());
+ const unsigned int conv_pad_left = conv_info.pad_left();
+ const unsigned int conv_pad_top = conv_info.pad_top();
const int fixed_point_position = input->info()->fixed_point_position();
// setup output window for the iterator
@@ -389,7 +395,7 @@ public:
/*
For a detailed explanation on how the algorithm works refer to template <> class convolver_3x3<1>
*/
- const uint8_t *input_ptr = in.ptr();
+ const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y;
uint8_t *out_ptr = out.ptr();
int ih = 0;
int oh = 0;
@@ -680,8 +686,8 @@ public:
const int delta_input = get_input_num_elems_processed<stridex>(num_elems_written_per_iteration);
const int kernel_depth = weights->info()->dimension(Window::DimZ);
const unsigned int conv_stride_y = std::get<1>(conv_info.stride());
- const unsigned int conv_pad_x = std::get<0>(conv_info.pad());
- const unsigned int conv_pad_y = std::get<1>(conv_info.pad());
+ const unsigned int conv_pad_left = conv_info.pad_left();
+ const unsigned int conv_pad_top = conv_info.pad_top();
const int fixed_point_position = input->info()->fixed_point_position();
// setup output window for the iterator
@@ -707,7 +713,7 @@ public:
execute_window_loop(window_out, [&](const Coordinates & id)
{
- const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y;
+ const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y;
uint8_t *out_ptr = out.ptr();
int ih = 0;
int oh = 0;
@@ -804,8 +810,8 @@ public:
const int delta_input = get_input_num_elems_processed<stridex>(num_elems_written_per_iteration);
const int kernel_depth = weights->info()->dimension(Window::DimZ);
const unsigned int conv_stride_y = std::get<1>(conv_info.stride());
- const unsigned int conv_pad_x = std::get<0>(conv_info.pad());
- const unsigned int conv_pad_y = std::get<1>(conv_info.pad());
+ const unsigned int conv_pad_left = conv_info.pad_left();
+ const unsigned int conv_pad_top = conv_info.pad_top();
const int fixed_point_position = input->info()->fixed_point_position();
// setup output window for the iterator
@@ -831,7 +837,7 @@ public:
execute_window_loop(window_out, [&](const Coordinates & id)
{
- const uint8_t *input_ptr = in.ptr() - conv_pad_x * input_stride_x - conv_pad_y * input_stride_y;
+ const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y;
uint8_t *out_ptr = out.ptr();
int ih = 0;
int oh = 0;
@@ -1016,13 +1022,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
- ARM_COMPUTE_RETURN_ERROR_ON(!conv_info.padding_is_symmetric());
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) == 1 && (std::get<0>(conv_info.pad()) || std::get<1>(conv_info.pad())),
- "Pad > 0 not supported for 1x1 weights");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) == 3 && (std::get<0>(conv_info.pad()) > 1 || std::get<1>(conv_info.pad()) > 1),
- "Pad > 1 not supported for 3x3 weights");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) == 5 && (std::get<0>(conv_info.pad()) > 2 || std::get<1>(conv_info.pad()) > 2),
- "Pad > 2 not supported for 5x5 weights");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported.");
ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(2) != input->dimension(2));
@@ -1204,7 +1203,7 @@ Status NEDirectConvolutionLayerKernel::validate(const ITensorInfo *input, const
unsigned int num_weight_elems_read_per_row = 0;
unsigned int num_elems_read_per_iteration = 0;
unsigned int num_elems_written_per_iteration = 0;
- BorderSize border_size(conv_info.pad().first, conv_info.pad().second);
+ BorderSize border_size = {};
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, output, conv_info));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(),
weights->clone().get(),
diff --git a/tests/datasets/DepthwiseConvolutionLayerDataset.h b/tests/datasets/DepthwiseConvolutionLayerDataset.h
index 629217a8a8..b8a16a7c3d 100644
--- a/tests/datasets/DepthwiseConvolutionLayerDataset.h
+++ b/tests/datasets/DepthwiseConvolutionLayerDataset.h
@@ -159,6 +159,9 @@ public:
add_config(TensorShape(33U, 27U, 11U), TensorShape(3U, 3U, 11U), TensorShape(11U, 14U, 11U), PadStrideInfo(3, 2, 1, 1));
add_config(TensorShape(21U, 31U, 9U, 4U), TensorShape(3U, 3U, 9U), TensorShape(21U, 15U, 9U, 4U), PadStrideInfo(1, 2, 1, 0));
add_config(TensorShape(33U, 27U, 11U, 3U), TensorShape(3U, 3U, 11U), TensorShape(31U, 14U, 11U, 3U), PadStrideInfo(1, 2, 0, 1));
+ // Asymmetric padding
+ add_config(TensorShape(33U, 27U, 11U), TensorShape(3U, 3U, 11U), TensorShape(16U, 13U, 11U), PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR));
+ add_config(TensorShape(33U, 27U, 11U), TensorShape(3U, 3U, 11U), TensorShape(18U, 14U, 11U), PadStrideInfo(2, 2, 3, 1, 2, 1, DimensionRoundingType::FLOOR));
}
};
diff --git a/tests/datasets/PoolingLayerDataset.h b/tests/datasets/PoolingLayerDataset.h
index 56ec3b87d8..53e392fe69 100644
--- a/tests/datasets/PoolingLayerDataset.h
+++ b/tests/datasets/PoolingLayerDataset.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -105,6 +105,19 @@ private:
std::vector<TensorShape> _dst_shapes{};
std::vector<PoolingLayerInfo> _infos{};
};
+
+// Special pooling dataset
+class PoolingLayerDatasetSpecial final : public PoolingLayerDataset
+{
+public:
+ PoolingLayerDatasetSpecial()
+ {
+ // Special cases
+ add_config(TensorShape(60U, 52U, 3U, 2U), TensorShape(13U, 11U, 32U), PoolingLayerInfo(PoolingType::AVG, Size2D(100, 100), PadStrideInfo(5, 5, 50, 50), true));
+ // Asymmetric padding
+ add_config(TensorShape(112U, 112U, 32U), TensorShape(56U, 56U, 32U), PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR)));
+ }
+};
} // namespace datasets
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index 4af825e526..bf8b4057ee 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -56,8 +56,8 @@ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance for qu
const auto data = combine(datasets::SmallDirectConvolutionShapes(),
combine(framework::dataset::make("StrideX", 1, 3),
combine(framework::dataset::make("StrideY", 1, 3),
- combine(concat(combine(framework::dataset::make("PadX", 0),
- combine(framework::dataset::make("PadY", 0),
+ combine(concat(combine(framework::dataset::make("PadX", 0, 1),
+ combine(framework::dataset::make("PadY", 0, 1),
framework::dataset::make("KernelSize", 1))),
combine(framework::dataset::make("PadX", 0, 2),
combine(framework::dataset::make("PadY", 0, 2),
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index dc9604423f..9da4c55c78 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/PaddingCalculator.h"
+#include "tests/datasets/PoolingLayerDataset.h"
#include "tests/datasets/PoolingTypesDataset.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/framework/Asserts.h"
@@ -43,12 +44,6 @@ namespace validation
{
namespace
{
-/** Failing data set */
-const auto PoolingLayerDatasetSpecial = ((((framework::dataset::make("Shape", TensorShape{ 60U, 52U, 3U, 5U })
- * framework::dataset::make("PoolType", PoolingType::AVG))
- * framework::dataset::make("PoolingSize", Size2D(100, 100)))
- * framework::dataset::make("PadStride", PadStrideInfo(5, 5, 50, 50)))
- * framework::dataset::make("ExcludePadding", true));
/** Input data set for floating-point data types */
const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(7, 7), Size2D(9, 9), Size2D(5, 7), Size2D(7, 9) })),
framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
@@ -121,9 +116,12 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
template <typename T>
using CLPoolingLayerFixture = PoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+template <typename T>
+using CLSpecialPoolingLayerFixture = SpecialPoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSpecial, CLPoolingLayerFixture<float>, framework::DatasetMode::ALL, PoolingLayerDatasetSpecial * framework::dataset::make("DataType", DataType::F32))
+FIXTURE_DATA_TEST_CASE(RunSpecial, CLSpecialPoolingLayerFixture<float>, framework::DatasetMode::ALL, datasets::PoolingLayerDatasetSpecial() * framework::dataset::make("DataType", DataType::F32))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp
index f51752d946..57e030c349 100644
--- a/tests/validation/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation/NEON/DirectConvolutionLayer.cpp
@@ -49,8 +49,8 @@ constexpr AbsoluteTolerance<float> tolerance_fp16(0.01f); /**< Tolerance for ha
constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
/** Direct convolution data set. */
-const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX", 0),
- combine(framework::dataset::make("PadY", 0),
+const auto data_pad_f32 = concat(concat(combine(framework::dataset::make("PadX", 0, 1),
+ combine(framework::dataset::make("PadY", 0, 1),
framework::dataset::make("KernelSize", 1))),
combine(framework::dataset::make("PadX", 0, 2),
combine(framework::dataset::make("PadY", 0, 2),
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index 4697d4db01..350a7b883b 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
#include "tests/PaddingCalculator.h"
+#include "tests/datasets/PoolingLayerDataset.h"
#include "tests/datasets/PoolingTypesDataset.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/framework/Asserts.h"
@@ -119,8 +120,16 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
template <typename T>
using NEPoolingLayerFixture = PoolingLayerValidationFixture<Tensor, Accessor, NEPoolingLayer, T>;
+template <typename T>
+using NESpecialPoolingLayerFixture = SpecialPoolingLayerValidationFixture<Tensor, Accessor, NEPoolingLayer, T>;
+
TEST_SUITE(Float)
TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSpecial, NESpecialPoolingLayerFixture<float>, framework::DatasetMode::ALL, datasets::PoolingLayerDatasetSpecial() * framework::dataset::make("DataType", DataType::F32))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), combine(PoolingLayerDatasetFP, framework::dataset::make("DataType",
DataType::F32))))
{
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index f101199365..3bbb403ae7 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -164,6 +164,18 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SpecialPoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape src_shape, TensorShape dst_shape, PoolingLayerInfo pool_info, DataType data_type)
+ {
+ ARM_COMPUTE_UNUSED(dst_shape);
+ PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(src_shape, pool_info, data_type, 0, QuantizationInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class GlobalPoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -173,6 +185,7 @@ public:
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type), data_type, 0, QuantizationInfo());
}
};
+
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/reference/DepthwiseConvolutionLayer.cpp b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
index ffea1bcf89..b2a7067709 100644
--- a/tests/validation/reference/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/reference/DepthwiseConvolutionLayer.cpp
@@ -143,10 +143,10 @@ SimpleTensor<uint8_t> depthwise_convolution(const SimpleTensor<uint8_t> &src, co
const int filter_half_width = filter_width / 2;
const int filter_half_height = filter_height / 2;
- const int pad_left = std::min(static_cast<int>(conv_info.pad_left()), filter_half_width);
- const int pad_top = std::min(static_cast<int>(conv_info.pad_top()), filter_half_height);
- const int pad_right = std::min(static_cast<int>(conv_info.pad_right()), filter_half_width);
- const int pad_bottom = std::min(static_cast<int>(conv_info.pad_bottom()), filter_half_height);
+ const int pad_left = conv_info.pad_left();
+ const int pad_top = conv_info.pad_top();
+ const int pad_right = conv_info.pad_right();
+ const int pad_bottom = conv_info.pad_bottom();
const int minimum_x = -pad_left + filter_half_width;
const int minimum_y = -pad_top + filter_half_height;