diff options
Diffstat (limited to 'tests')
-rw-r--r-- | tests/validation/CL/PoolingLayer.cpp | 8 | ||||
-rw-r--r-- | tests/validation/GLES_COMPUTE/PoolingLayer.cpp | 4 | ||||
-rw-r--r-- | tests/validation/NEON/PoolingLayer.cpp | 6 | ||||
-rw-r--r-- | tests/validation/fixtures/PoolingLayerFixture.h | 8 | ||||
-rw-r--r-- | tests/validation/reference/PoolingLayer.cpp | 47 | ||||
-rw-r--r-- | tests/validation/reference/PoolingLayer.h | 6 |
6 files changed, 41 insertions, 38 deletions
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp index fdb2a8dd20..a830e0841f 100644 --- a/tests/validation/CL/PoolingLayer.cpp +++ b/tests/validation/CL/PoolingLayer.cpp @@ -46,21 +46,21 @@ namespace /** Failing data set */ const auto PoolingLayerDatasetSpecial = ((((framework::dataset::make("Shape", TensorShape{ 60U, 52U, 3U, 5U }) * framework::dataset::make("PoolType", PoolingType::AVG)) - * framework::dataset::make("PoolingSize", 100)) + * framework::dataset::make("PoolingSize", Size2D(100, 100))) * framework::dataset::make("PadStride", PadStrideInfo(5, 5, 50, 50))) * framework::dataset::make("ExcludePadding", true)); /** Input data set for floating-point data types */ -const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3, 4, 7, 9 })), +const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(7, 7), Size2D(9, 9) })), framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), framework::dataset::make("ExcludePadding", { true, false })); /** Input data set for fixed-point data types */ -const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { 2, 3 })), +const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })), framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), framework::dataset::make("ExcludePadding", { true, false })); /** Input data set for asymmetric data type */ -const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { 2, 3 })), +const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })), framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), framework::dataset::make("ExcludePadding", { true, false })); diff --git a/tests/validation/GLES_COMPUTE/PoolingLayer.cpp b/tests/validation/GLES_COMPUTE/PoolingLayer.cpp index e789dbab07..1496ceec1c 100644 --- a/tests/validation/GLES_COMPUTE/PoolingLayer.cpp +++ b/tests/validation/GLES_COMPUTE/PoolingLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,7 @@ namespace validation namespace { /** Input data set for floating-point data types */ -const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3, 4, 7, 9 })), +const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(4, 4), Size2D(7, 7), Size2D(9, 9) })), framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), framework::dataset::make("ExcludePadding", { true, false })); diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index e526613bdd..eace8d7148 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -44,17 +44,17 @@ namespace validation namespace { /** Input data set for float data types */ -const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3, 7, 9 })), +const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(7, 7), Size2D(9, 9) })), framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), framework::dataset::make("ExcludePadding", { true, false })); /** Input data set for quantized data types */ -const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { 2, 3 })), +const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })), framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), framework::dataset::make("ExcludePadding", { false })); /** Input data set for asymmetric data type */ -const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { 2, 3, 9 })), +const auto PoolingLayerDatasetQASYMM8 = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(9, 9) })), framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })), framework::dataset::make("ExcludePadding", { true, false })); diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h index 890eef2d4b..f101199365 100644 --- a/tests/validation/fixtures/PoolingLayerFixture.h +++ b/tests/validation/fixtures/PoolingLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -132,7 +132,7 @@ class PoolingLayerValidationFixture : public PoolingLayerValidationGenericFixtur { public: template <typename...> - void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type) + void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding), data_type, 0, QuantizationInfo()); @@ -144,7 +144,7 @@ class PoolingLayerValidationFixedPointFixture : public PoolingLayerValidationGen { public: template <typename...> - void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, int fractional_bits) + void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, int fractional_bits) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding), data_type, fractional_bits, QuantizationInfo()); @@ -156,7 +156,7 @@ class PoolingLayerValidationQuantizedFixture : public PoolingLayerValidationGene { public: template <typename...> - void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, QuantizationInfo quantization_info) + void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, QuantizationInfo quantization_info) { PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, pad_stride_info, exclude_padding), data_type, 0, quantization_info); diff --git a/tests/validation/reference/PoolingLayer.cpp b/tests/validation/reference/PoolingLayer.cpp index d05c0403ff..c14ab98c28 100644 --- a/tests/validation/reference/PoolingLayer.cpp +++ b/tests/validation/reference/PoolingLayer.cpp @@ -37,14 +37,15 @@ namespace reference { namespace { -TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info) +TensorShape calculate_output_shape(TensorShape shape, const PoolingLayerInfo &info) { - TensorShape dst_shape = shape; - const int pool_size = info.is_global_pooling() ? shape.x() : info.pool_size(); + TensorShape dst_shape = shape; + const int pool_size_x = info.is_global_pooling() ? shape.x() : info.pool_size().width; + const int pool_size_y = info.is_global_pooling() ? shape.y() : info.pool_size().height; const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(shape.x(), shape.y(), - pool_size, - pool_size, + pool_size_x, + pool_size_y, info.pad_stride_info()); dst_shape.set(0, scaled_dims.first); dst_shape.set(1, scaled_dims.second); @@ -54,11 +55,12 @@ TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info) } // namespace template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type> -SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info) +SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info) { ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y())); - const int pool_size = info.is_global_pooling() ? src.shape().x() : info.pool_size(); + const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width; + const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height; PoolingType type = info.pool_type(); int pool_stride_x = info.pad_stride_info().stride().first; int pool_stride_y = info.pad_stride_info().stride().second; @@ -88,8 +90,8 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info) { int wstart = w * pool_stride_x - pad_left; int hstart = h * pool_stride_y - pad_top; - int wend = std::min(wstart + pool_size, w_src); - int hend = std::min(hstart + pool_size, h_src); + int wend = std::min(wstart + pool_size_x, w_src); + int hend = std::min(hstart + pool_size_y, h_src); wstart = std::max(wstart, 0); hstart = std::max(hstart, 0); @@ -122,8 +124,8 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info) T avg_val(0); int wstart = w * pool_stride_x - pad_left; int hstart = h * pool_stride_y - pad_top; - int wend = std::min(wstart + pool_size, w_src + pad_right); - int hend = std::min(hstart + pool_size, h_src + pad_bottom); + int wend = std::min(wstart + pool_size_x, w_src + pad_right); + int hend = std::min(hstart + pool_size_y, h_src + pad_bottom); int pool = (hend - hstart) * (wend - wstart); wstart = std::max(wstart, 0); hstart = std::max(hstart, 0); @@ -167,11 +169,12 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info) } template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type> -SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info) +SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info) { ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y())); - const int pool_size = info.is_global_pooling() ? src.shape().x() : info.pool_size(); + const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width; + const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height; PoolingType type = info.pool_type(); int pool_stride_x = info.pad_stride_info().stride().first; int pool_stride_y = info.pad_stride_info().stride().second; @@ -201,8 +204,8 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info) { int wstart = w * pool_stride_x - pad_left; int hstart = h * pool_stride_y - pad_top; - int wend = std::min(wstart + pool_size, w_src); - int hend = std::min(hstart + pool_size, h_src); + int wend = std::min(wstart + pool_size_x, w_src); + int hend = std::min(hstart + pool_size_y, h_src); wstart = std::max(wstart, 0); hstart = std::max(hstart, 0); @@ -234,8 +237,8 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info) { int wstart = w * pool_stride_x - pad_left; int hstart = h * pool_stride_y - pad_top; - int wend = std::min(wstart + pool_size, w_src + pad_right); - int hend = std::min(hstart + pool_size, h_src + pad_bottom); + int wend = std::min(wstart + pool_size_x, w_src + pad_right); + int hend = std::min(hstart + pool_size_y, h_src + pad_bottom); int pool = (hend - hstart) * (wend - wstart); wstart = std::max(wstart, 0); hstart = std::max(hstart, 0); @@ -288,7 +291,7 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info) } template <> -SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, PoolingLayerInfo info) +SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, const PoolingLayerInfo &info) { SimpleTensor<float> src_tmp = convert_from_asymmetric(src); SimpleTensor<float> dst_tmp = pooling_layer<float>(src_tmp, info); @@ -296,10 +299,10 @@ SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, P return dst; } -template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, PoolingLayerInfo info); -template SimpleTensor<half> pooling_layer(const SimpleTensor<half> &src, PoolingLayerInfo info); -template SimpleTensor<qint8_t> pooling_layer(const SimpleTensor<qint8_t> &src, PoolingLayerInfo info); -template SimpleTensor<qint16_t> pooling_layer(const SimpleTensor<qint16_t> &src, PoolingLayerInfo info); +template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, const PoolingLayerInfo &info); +template SimpleTensor<half> pooling_layer(const SimpleTensor<half> &src, const PoolingLayerInfo &info); +template SimpleTensor<qint8_t> pooling_layer(const SimpleTensor<qint8_t> &src, const PoolingLayerInfo &info); +template SimpleTensor<qint16_t> pooling_layer(const SimpleTensor<qint16_t> &src, const PoolingLayerInfo &info); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/PoolingLayer.h b/tests/validation/reference/PoolingLayer.h index 334054a0eb..b0d30af32a 100644 --- a/tests/validation/reference/PoolingLayer.h +++ b/tests/validation/reference/PoolingLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -36,10 +36,10 @@ namespace validation namespace reference { template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0> -SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info); +SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info); template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0> -SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info); +SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info); } // namespace reference } // namespace validation } // namespace test |