aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/Utils.h14
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h30
-rw-r--r--src/core/Utils.cpp29
-rw-r--r--src/core/cpu/kernels/CpuPoolingKernel.cpp91
-rw-r--r--src/core/gpu/cl/kernels/ClPoolingKernel.cpp12
-rw-r--r--tests/datasets/PoolingLayerDataset.h4
-rw-r--r--tests/validation/CL/PoolingLayer.cpp5
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp10
8 files changed, 113 insertions, 82 deletions
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index d5c365e6ab..af9a777a0c 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -779,6 +779,20 @@ std::pair<unsigned int, unsigned int> scaled_dimensions(int width, int height,
const PadStrideInfo &pad_stride_info,
const Size2D &dilation = Size2D(1U, 1U));
+/** Returns calculated width and height of output scaled tensor depending on dimensions rounding mode.
+ *
+ * @param[in] width Width of input tensor (Number of columns)
+ * @param[in] height Height of input tensor (Number of rows)
+ * @param[in] kernel_width Kernel width.
+ * @param[in] kernel_height Kernel height.
+ * @param[in] pad_stride_info Pad and stride information.
+ *
+ * @return A pair with the new width in the first position and the new height in the second, returned values can be < 1
+ */
+std::pair<int, int> scaled_dimensions_signed(int width, int height,
+ int kernel_width, int kernel_height,
+ const PadStrideInfo &pad_stride_info);
+
/** Check if the given reduction operation should be handled in a serial way.
*
* @param[in] op Reduction operation to perform
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 8e49c068af..d0dc202f91 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -759,25 +759,27 @@ inline TensorShape compute_min_max_shape(const ITensorInfo *input)
*/
inline TensorShape compute_pool_shape(const ITensorInfo &input, PoolingLayerInfo pool_info)
{
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
+ int pooled_w = 0;
+ int pooled_h = 0;
TensorShape output_shape{ input.tensor_shape() };
- const bool is_global_pooling = pool_info.is_global_pooling;
- const unsigned int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
- const unsigned int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
- const unsigned int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size.width;
- const unsigned int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size.height;
+ const bool is_global_pooling = pool_info.is_global_pooling;
+ const int idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT);
+ const int input_width = input.tensor_shape()[idx_width];
+ const int input_height = input.tensor_shape()[idx_height];
+ const int pool_size_x = is_global_pooling ? output_shape[idx_width] : pool_info.pool_size.width;
+ const int pool_size_y = is_global_pooling ? output_shape[idx_height] : pool_info.pool_size.height;
- std::tie(pooled_w, pooled_h) = scaled_dimensions(output_shape[idx_width],
- output_shape[idx_height],
- pool_size_x,
- pool_size_y,
- pool_info.pad_stride_info);
+ std::tie(pooled_w, pooled_h) = scaled_dimensions_signed(input_width, input_height,
+ pool_size_x, pool_size_y,
+ pool_info.pad_stride_info);
- output_shape.set(idx_width, pooled_w);
- output_shape.set(idx_height, pooled_h);
+ ARM_COMPUTE_ERROR_ON_MSG((pooled_w < 1 || pooled_h < 1), "Calculated output dimension size is invalid");
+
+ output_shape.set(idx_width, static_cast<size_t>(pooled_w));
+ output_shape.set(idx_height, static_cast<size_t>(pooled_h));
return output_shape;
}
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index e44c86db88..b81b498ae5 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -426,6 +426,35 @@ std::pair<unsigned int, unsigned int> scaled_dimensions(int width, int height,
return std::make_pair(static_cast<unsigned int>(w), static_cast<unsigned int>(h));
}
+std::pair<int, int> scaled_dimensions_signed(int width, int height,
+ int kernel_width, int kernel_height,
+ const PadStrideInfo &pad_stride_info)
+{
+ const int pad_left = pad_stride_info.pad_left();
+ const int pad_top = pad_stride_info.pad_top();
+ const int pad_right = pad_stride_info.pad_right();
+ const int pad_bottom = pad_stride_info.pad_bottom();
+ const int stride_x = pad_stride_info.stride().first;
+ const int stride_y = pad_stride_info.stride().second;
+ int w = 0;
+ int h = 0;
+ switch(pad_stride_info.round())
+ {
+ case DimensionRoundingType::FLOOR:
+ w = static_cast<int>(std::floor((static_cast<float>(width + pad_left + pad_right - kernel_width) / stride_x) + 1));
+ h = static_cast<int>(std::floor((static_cast<float>(height + pad_top + pad_bottom - kernel_height) / stride_y) + 1));
+ break;
+ case DimensionRoundingType::CEIL:
+ w = static_cast<int>(std::ceil((static_cast<float>(width + pad_left + pad_right - kernel_width) / stride_x) + 1));
+ h = static_cast<int>(std::ceil((static_cast<float>(height + pad_top + pad_bottom - kernel_height) / stride_y) + 1));
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported rounding type");
+ }
+
+ return std::make_pair(static_cast<int>(w), static_cast<int>(h));
+}
+
bool needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis)
{
const bool is_min_max = (op == ReductionOperation::MAX || op == ReductionOperation::MIN);
diff --git a/src/core/cpu/kernels/CpuPoolingKernel.cpp b/src/core/cpu/kernels/CpuPoolingKernel.cpp
index 115a3a4c67..a55f60d7ad 100644
--- a/src/core/cpu/kernels/CpuPoolingKernel.cpp
+++ b/src/core/cpu/kernels/CpuPoolingKernel.cpp
@@ -183,14 +183,27 @@ const PoolingKernel *get_implementation(DataType dt, DataLayout dl, int pool_str
}
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info,
- unsigned int &pooled_w, unsigned int pooled_h, const ITensorInfo *indices, Size2D pool_size)
+ const ITensorInfo *indices, Size2D pool_size)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(pool_size.x() == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(pool_size.y() == 0);
int pool_stride_x = 0;
int pool_stride_y = 0;
+ int output_width = 0;
+ int output_height = 0;
PoolingType pool_type = pool_info.pool_type;
const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
+ const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+
+ std::tie(output_width, output_height) = scaled_dimensions_signed(src->tensor_shape()[idx_width], src->tensor_shape()[idx_height],
+ pool_size.x(), pool_size.y(), pool_info.pad_stride_info);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_width < 1 || output_height < 1), "Calculated output dimension size is invalid");
+
+ TensorInfo out_info(TensorInfo(compute_pool_shape(*src, pool_info), 1, dst->data_type()));
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
@@ -210,14 +223,11 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(src, dst);
- ARM_COMPUTE_RETURN_ERROR_ON((dst->dimension(get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::WIDTH)) != pooled_w)
- || (dst->dimension(get_data_layout_dimension_index(src->data_layout(), DataLayoutDimension::HEIGHT)) != pooled_h));
-
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &out_info);
if(indices)
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG((pool_size != Size2D(2, 2)), "Pooling indices only supported for pool size 2x2");
- ARM_COMPUTE_RETURN_ERROR_ON((indices->dimension(get_data_layout_dimension_index(indices->data_layout(), DataLayoutDimension::WIDTH)) != pooled_w)
- || (indices->dimension(get_data_layout_dimension_index(indices->data_layout(), DataLayoutDimension::HEIGHT)) != pooled_h));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(indices, &out_info);
}
}
@@ -227,18 +237,10 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
return Status{};
}
-Status validate_arguments_pool_info(const unsigned int pool_size_x, const unsigned int pool_size_y)
-{
- ARM_COMPUTE_RETURN_ERROR_ON(pool_size_x == 0);
- ARM_COMPUTE_RETURN_ERROR_ON(pool_size_y == 0);
-
- return Status{};
-}
-
std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, ITensorInfo *indices, const PoolingLayerInfo &pool_info,
unsigned int &num_elems_processed_per_iteration,
BorderSize &border_size,
- unsigned int pooled_w, unsigned int pooled_h, int pool_size_x, int pool_size_y)
+ int pool_size_x, int pool_size_y)
{
// dst auto inizialitation if not yet initialized
auto_init_if_empty(*dst, src->clone()->set_tensor_shape(compute_pool_shape(*src, pool_info)));
@@ -260,18 +262,13 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITenso
const int src_height = src->dimension(idx_height);
const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
- const int pool_pad_right = pad_stride_info.pad_right();
- const int pool_pad_top = pad_stride_info.pad_top();
- const int pool_pad_left = pad_stride_info.pad_left();
- const int pool_pad_bottom = pad_stride_info.pad_bottom();
- const bool is_square = pool_size_x == pool_size_y;
-
- // Check dst dimensions
- std::tie(pooled_w, pooled_h) = scaled_dimensions(src->dimension(idx_width),
- src->dimension(idx_height),
- pool_size_x,
- pool_size_y,
- pad_stride_info);
+ const int pool_pad_right = pad_stride_info.pad_right();
+ const int pool_pad_top = pad_stride_info.pad_top();
+ const int pool_pad_left = pad_stride_info.pad_left();
+ const int pool_pad_bottom = pad_stride_info.pad_bottom();
+ const bool is_square = pool_size_x == pool_size_y;
+ const unsigned int pooled_w = dst->dimension(idx_width);
+ const unsigned int pooled_h = dst->dimension(idx_height);
//If it's not squared and optimized will be executed the MxN
num_elems_read_per_iteration = 1;
@@ -398,20 +395,8 @@ void CpuPoolingKernel::configure(ITensorInfo *src, ITensorInfo *dst, const Pooli
is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width,
is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height);
- // Validate pool info before calling scaled_dimensions
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_pool_info(pool_size.x(), pool_size.y()));
-
- // Check dst dimensions
- unsigned int pooled_w;
- unsigned int pooled_h;
- std::tie(pooled_w, pooled_h) = scaled_dimensions(src->dimension(idx_width),
- src->dimension(idx_height),
- pool_size.x(),
- pool_size.y(),
- pad_stride_info);
-
// Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, pool_info, pooled_w, pooled_h, indices, pool_size));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, pool_info, indices, pool_size));
// Set instance variables
_pool_info = pool_info;
@@ -429,7 +414,7 @@ void CpuPoolingKernel::configure(ITensorInfo *src, ITensorInfo *dst, const Pooli
{
// Configure kernel window
auto win_config = validate_and_configure_window(src, dst, indices, pool_info, _num_elems_processed_per_iteration,
- _border_size, pooled_w, pooled_h, pool_size.x(), pool_size.y());
+ _border_size, pool_size.x(), pool_size.y());
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICpuKernel::configure(win_config.second);
}
@@ -439,36 +424,22 @@ Status CpuPoolingKernel::validate(const ITensorInfo *src, const ITensorInfo *dst
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
unsigned int num_elems_processed_per_iteration = 0;
BorderSize border_size(0);
- const bool is_global_pooling = pool_info.is_global_pooling;
- unsigned int pool_size_x = 0;
- unsigned int pool_size_y = 0;
+ const bool is_global_pooling = pool_info.is_global_pooling;
// Get data layout
const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- pool_size_x = is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
- pool_size_y = is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
-
- // Validate pool info before calling scaled_dimensions
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_pool_info(pool_size_x, pool_size_y));
-
- // Check dst dimensions
- std::tie(pooled_w, pooled_h) = scaled_dimensions(src->dimension(idx_width),
- src->dimension(idx_height),
- pool_size_x,
- pool_size_y,
- pool_info.pad_stride_info);
+ unsigned int pool_size_x = is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
+ unsigned int pool_size_y = is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, pool_info, pooled_w, pooled_h, indices, Size2D(pool_size_x, pool_size_y)));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, pool_info, indices, Size2D(pool_size_x, pool_size_y)));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(),
- (indices) ? indices->clone().get() : nullptr, pool_info, num_elems_processed_per_iteration, border_size, pooled_w, pooled_h,
+ (indices) ? indices->clone().get() : nullptr, pool_info, num_elems_processed_per_iteration, border_size,
pool_size_x, pool_size_y)
.first);
diff --git a/src/core/gpu/cl/kernels/ClPoolingKernel.cpp b/src/core/gpu/cl/kernels/ClPoolingKernel.cpp
index a432877a1d..08a3ce3784 100644
--- a/src/core/gpu/cl/kernels/ClPoolingKernel.cpp
+++ b/src/core/gpu/cl/kernels/ClPoolingKernel.cpp
@@ -67,6 +67,18 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
ARM_COMPUTE_RETURN_ERROR_ON_MSG((is_data_type_quantized_asymmetric(src->data_type()) && pool_info.pool_type == PoolingType::L2),
"Unsupported combination of parameters!");
+ const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const bool is_global_pooling = pool_info.is_global_pooling;
+ unsigned int pool_size_x = is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
+ unsigned int pool_size_y = is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
+ int output_width = 0;
+ int output_height = 0;
+ std::tie(output_width, output_height) = scaled_dimensions_signed(src->tensor_shape()[idx_width], src->tensor_shape()[idx_height],
+ pool_size_x, pool_size_y, pool_info.pad_stride_info);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((output_width < 1 || output_height < 1), "Calculated output dimension size is invalid");
+
// Check indices
if(indices)
{
diff --git a/tests/datasets/PoolingLayerDataset.h b/tests/datasets/PoolingLayerDataset.h
index 01b2491eb2..1557240fd2 100644
--- a/tests/datasets/PoolingLayerDataset.h
+++ b/tests/datasets/PoolingLayerDataset.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -106,7 +106,7 @@ public:
PoolingLayerDatasetSpecial()
{
// Special cases
- add_config(TensorShape(2U, 3U, 4U, 1U), PoolingLayerInfo(PoolingType::AVG, Size2D(3, 3), DataLayout::NCHW, PadStrideInfo(3, 3, 0, 0), true));
+ add_config(TensorShape(2U, 3U, 4U, 1U), PoolingLayerInfo(PoolingType::AVG, Size2D(2, 2), DataLayout::NCHW, PadStrideInfo(3, 3, 0, 0), true));
add_config(TensorShape(60U, 52U, 3U, 2U), PoolingLayerInfo(PoolingType::AVG, Size2D(100, 100), DataLayout::NCHW, PadStrideInfo(5, 5, 50, 50), true));
// Asymmetric padding
add_config(TensorShape(112U, 112U, 32U), PoolingLayerInfo(PoolingType::MAX, 3, DataLayout::NCHW, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR)));
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 0153e659ae..63dec3910f 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -101,6 +101,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 16U, 1U), 1, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
@@ -110,6 +111,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(12U, 12U, 5U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 15U, 1U), 1, DataType::F32),
})),
framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NCHW, PadStrideInfo(1, 1, 2, 0)),
@@ -119,8 +121,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
PoolingLayerInfo(PoolingType::MAX, DataLayout::NCHW),
PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NHWC, PadStrideInfo(), false),
PoolingLayerInfo(PoolingType::AVG, DataLayout::NCHW),
+ PoolingLayerInfo(PoolingType::MAX, 2, DataLayout::NHWC, PadStrideInfo(1, 1, 0, 0), false),
})),
- framework::dataset::make("Expected", { false, false, false, false, true, false, true, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, false, true, true , false})),
input_info, output_info, pool_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS);
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index acc9c3e516..b70a18907f 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -97,6 +97,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::QASYMM8), // Invalid exclude_padding = false with quantized type, no actual padding and NHWC
TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 16U, 1U), 1, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32),
@@ -106,6 +107,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(12U, 12U, 5U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 15U, 1U), 1, DataType::F32),
})),
framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
@@ -115,8 +117,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
PoolingLayerInfo(PoolingType::MAX, DataLayout::NCHW),
PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NHWC, PadStrideInfo(), false),
PoolingLayerInfo(PoolingType::AVG, DataLayout::NCHW),
+ PoolingLayerInfo(PoolingType::MAX, 2, DataLayout::NHWC, PadStrideInfo(1, 1, 0, 0), false),
})),
- framework::dataset::make("Expected", { false, false, false, false, true, false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true, false, true, false, false})),
input_info, output_info, pool_info, expected)
{
bool is_valid = bool(NEPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info));
@@ -145,15 +148,12 @@ TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunIndices, NEPoolingLayerIndicesFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), combine(PoolingLayerIndicesDatasetFPSmall,
framework::dataset::make("DataType",
DataType::F32))),
- framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })
-
- ))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
validate(Accessor(_target_indices), _ref_indices);
}
-
FIXTURE_DATA_TEST_CASE(RunSpecial, NESpecialPoolingLayerFixture<float>, framework::DatasetMode::ALL, datasets::PoolingLayerDatasetSpecial() * framework::dataset::make("DataType", DataType::F32))
{
// Validate output