aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-10-30 15:56:32 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitadaae7e453cc4cc07905daca68fa7b938555d581 (patch)
tree712003aa080cf577b2b2af318f219367897dc0e4 /tests
parentcf3935ffd4c67d9396c2435a3a28d3a159753105 (diff)
downloadComputeLibrary-adaae7e453cc4cc07905daca68fa7b938555d581.tar.gz
COMPMID-647: Exclude padding pixels from averaging factor.
Adds support for excluding the padding pixels from the average scaling factor calculation. Change-Id: Ia13fbfeae235aff564db74191613921848231a01 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/93715 Reviewed-by: Robert Hughes <robert.hughes@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/PoolingLayer.cpp11
-rw-r--r--tests/validation/CPP/PoolingLayer.cpp36
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp10
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h10
4 files changed, 41 insertions, 26 deletions
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index 809c80f28c..87b86fedf2 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -44,13 +44,14 @@ namespace validation
namespace
{
/** Input data set for float data types */
-const auto PoolingLayerDatasetFP = combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3, 4, 7, 9 })),
- framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) }));
+const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3, 4, 7, 9 })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
+ framework::dataset::make("ExcludePadding", { true, false }));
/** Input data set for quantized data types */
-const auto PoolingLayerDatasetQS = combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { 2, 3 })),
- framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) }));
-
+const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { 2, 3 })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
+ framework::dataset::make("ExcludePadding", { true, false }));
constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for float types */
constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for float types */
constexpr AbsoluteTolerance<float> tolerance_qs8(3); /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
diff --git a/tests/validation/CPP/PoolingLayer.cpp b/tests/validation/CPP/PoolingLayer.cpp
index 85a8343d87..4f755ce2c4 100644
--- a/tests/validation/CPP/PoolingLayer.cpp
+++ b/tests/validation/CPP/PoolingLayer.cpp
@@ -54,12 +54,13 @@ TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info)
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
{
- const int pool_size = info.pool_size();
- PoolingType type = info.pool_type();
- int pool_stride_x = info.pad_stride_info().stride().first;
- int pool_stride_y = info.pad_stride_info().stride().second;
- int pad_x = info.pad_stride_info().pad().first;
- int pad_y = info.pad_stride_info().pad().second;
+ const int pool_size = info.pool_size();
+ PoolingType type = info.pool_type();
+ int pool_stride_x = info.pad_stride_info().stride().first;
+ int pool_stride_y = info.pad_stride_info().stride().second;
+ int pad_x = info.pad_stride_info().pad().first;
+ int pad_y = info.pad_stride_info().pad().second;
+ bool exclude_padding = info.exclude_padding();
const auto w_src = static_cast<int>(src.shape()[0]);
const auto h_src = static_cast<int>(src.shape()[1]);
@@ -122,6 +123,11 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
hstart = std::max(hstart, 0);
wend = std::min(wend, w_src);
hend = std::min(hend, h_src);
+ // Exclude padding pixels from the average
+ if(exclude_padding)
+ {
+ pool = (hend - hstart) * (wend - wstart);
+ }
if(type == PoolingType::AVG)
{
@@ -157,12 +163,13 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
{
- const int pool_size = info.pool_size();
- PoolingType type = info.pool_type();
- int pool_stride_x = info.pad_stride_info().stride().first;
- int pool_stride_y = info.pad_stride_info().stride().second;
- int pad_x = info.pad_stride_info().pad().first;
- int pad_y = info.pad_stride_info().pad().second;
+ const int pool_size = info.pool_size();
+ PoolingType type = info.pool_type();
+ int pool_stride_x = info.pad_stride_info().stride().first;
+ int pool_stride_y = info.pad_stride_info().stride().second;
+ int pad_x = info.pad_stride_info().pad().first;
+ int pad_y = info.pad_stride_info().pad().second;
+ bool exclude_padding = info.exclude_padding();
const auto w_src = static_cast<int>(src.shape()[0]);
const auto h_src = static_cast<int>(src.shape()[1]);
@@ -224,6 +231,11 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
hstart = std::max(hstart, 0);
wend = std::min(wend, w_src);
hend = std::min(hend, h_src);
+ // Exclude padding pixels from the average
+ if(exclude_padding)
+ {
+ pool = (hend - hstart) * (wend - wstart);
+ }
using namespace fixed_point_arithmetic;
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index a721fb9d15..13384620bd 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -44,12 +44,14 @@ namespace validation
namespace
{
/** Input data set for float data types */
-const auto PoolingLayerDatasetFP = combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3, 7, 9 })),
- framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) }));
+const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3, 7, 9 })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
+ framework::dataset::make("ExcludePadding", { true, false }));
/** Input data set for quantized data types */
-const auto PoolingLayerDatasetQS = combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { 2, 3 })),
- framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) }));
+const auto PoolingLayerDatasetQS = combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), framework::dataset::make("PoolingSize", { 2, 3 })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
+ framework::dataset::make("ExcludePadding", { false }));
constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for float types */
#ifdef ARM_COMPUTE_AARCH64_V8_2
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index 775c4125fc..09b9e0ef1a 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -47,10 +47,10 @@ class PoolingLayerValidationFixedPointFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, DataType data_type, int fractional_bits)
+ void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, int fractional_bits)
{
_fractional_bits = fractional_bits;
- PoolingLayerInfo info(pool_type, pool_size, pad_stride_info);
+ PoolingLayerInfo info(pool_type, pool_size, pad_stride_info, exclude_padding);
_target = compute_target(shape, info, data_type, fractional_bits);
_reference = compute_reference(shape, info, data_type, fractional_bits);
@@ -123,9 +123,9 @@ class PoolingLayerValidationFixture : public PoolingLayerValidationFixedPointFix
{
public:
template <typename...>
- void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, DataType data_type)
+ void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type)
{
- PoolingLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, pool_type, pool_size, pad_stride_info, data_type, 0);
+ PoolingLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, pool_type, pool_size, pad_stride_info, exclude_padding, data_type, 0);
}
};
@@ -136,7 +136,7 @@ public:
template <typename...>
void setup(TensorShape shape, PoolingType pool_type, DataType data_type)
{
- PoolingLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, pool_type, shape.x(), PadStrideInfo(1, 1, 0, 0), data_type, 0);
+ PoolingLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, pool_type, shape.x(), PadStrideInfo(1, 1, 0, 0), true, data_type, 0);
}
};
} // namespace validation