aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference/PoolingLayer.cpp
diff options
context:
space:
mode:
authorIsabella Gottardi <isabella.gottardi@arm.com>2018-01-26 12:32:45 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:46:07 +0000
commit6e464c37b5335e362ac3f988cc4b0beed5205ff4 (patch)
tree24f758be47f1dd439a4ab3f8f3631a1ed7ef4566 /tests/validation/reference/PoolingLayer.cpp
parent3364c4ca658d44f449a8d3d6e9eee31d90254f15 (diff)
downloadComputeLibrary-6e464c37b5335e362ac3f988cc4b0beed5205ff4.tar.gz
COMPMID-828 - Add support for non square pool size - Part1
Change-Id: Ib8100e7c659c49694c746fa3f36ce20f44f6929f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/117804 Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/reference/PoolingLayer.cpp')
-rw-r--r--tests/validation/reference/PoolingLayer.cpp47
1 files changed, 25 insertions, 22 deletions
diff --git a/tests/validation/reference/PoolingLayer.cpp b/tests/validation/reference/PoolingLayer.cpp
index d05c0403ff..c14ab98c28 100644
--- a/tests/validation/reference/PoolingLayer.cpp
+++ b/tests/validation/reference/PoolingLayer.cpp
@@ -37,14 +37,15 @@ namespace reference
{
namespace
{
-TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info)
+TensorShape calculate_output_shape(TensorShape shape, const PoolingLayerInfo &info)
{
- TensorShape dst_shape = shape;
- const int pool_size = info.is_global_pooling() ? shape.x() : info.pool_size();
+ TensorShape dst_shape = shape;
+ const int pool_size_x = info.is_global_pooling() ? shape.x() : info.pool_size().width;
+ const int pool_size_y = info.is_global_pooling() ? shape.y() : info.pool_size().height;
const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(shape.x(),
shape.y(),
- pool_size,
- pool_size,
+ pool_size_x,
+ pool_size_y,
info.pad_stride_info());
dst_shape.set(0, scaled_dims.first);
dst_shape.set(1, scaled_dims.second);
@@ -54,11 +55,12 @@ TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info)
} // namespace
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
+SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info)
{
ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
- const int pool_size = info.is_global_pooling() ? src.shape().x() : info.pool_size();
+ const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width;
+ const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height;
PoolingType type = info.pool_type();
int pool_stride_x = info.pad_stride_info().stride().first;
int pool_stride_y = info.pad_stride_info().stride().second;
@@ -88,8 +90,8 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
{
int wstart = w * pool_stride_x - pad_left;
int hstart = h * pool_stride_y - pad_top;
- int wend = std::min(wstart + pool_size, w_src);
- int hend = std::min(hstart + pool_size, h_src);
+ int wend = std::min(wstart + pool_size_x, w_src);
+ int hend = std::min(hstart + pool_size_y, h_src);
wstart = std::max(wstart, 0);
hstart = std::max(hstart, 0);
@@ -122,8 +124,8 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
T avg_val(0);
int wstart = w * pool_stride_x - pad_left;
int hstart = h * pool_stride_y - pad_top;
- int wend = std::min(wstart + pool_size, w_src + pad_right);
- int hend = std::min(hstart + pool_size, h_src + pad_bottom);
+ int wend = std::min(wstart + pool_size_x, w_src + pad_right);
+ int hend = std::min(hstart + pool_size_y, h_src + pad_bottom);
int pool = (hend - hstart) * (wend - wstart);
wstart = std::max(wstart, 0);
hstart = std::max(hstart, 0);
@@ -167,11 +169,12 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
}
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
+SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info)
{
ARM_COMPUTE_ERROR_ON(info.is_global_pooling() && (src.shape().x() != src.shape().y()));
- const int pool_size = info.is_global_pooling() ? src.shape().x() : info.pool_size();
+ const int pool_size_x = info.is_global_pooling() ? src.shape().x() : info.pool_size().width;
+ const int pool_size_y = info.is_global_pooling() ? src.shape().y() : info.pool_size().height;
PoolingType type = info.pool_type();
int pool_stride_x = info.pad_stride_info().stride().first;
int pool_stride_y = info.pad_stride_info().stride().second;
@@ -201,8 +204,8 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
{
int wstart = w * pool_stride_x - pad_left;
int hstart = h * pool_stride_y - pad_top;
- int wend = std::min(wstart + pool_size, w_src);
- int hend = std::min(hstart + pool_size, h_src);
+ int wend = std::min(wstart + pool_size_x, w_src);
+ int hend = std::min(hstart + pool_size_y, h_src);
wstart = std::max(wstart, 0);
hstart = std::max(hstart, 0);
@@ -234,8 +237,8 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
{
int wstart = w * pool_stride_x - pad_left;
int hstart = h * pool_stride_y - pad_top;
- int wend = std::min(wstart + pool_size, w_src + pad_right);
- int hend = std::min(hstart + pool_size, h_src + pad_bottom);
+ int wend = std::min(wstart + pool_size_x, w_src + pad_right);
+ int hend = std::min(hstart + pool_size_y, h_src + pad_bottom);
int pool = (hend - hstart) * (wend - wstart);
wstart = std::max(wstart, 0);
hstart = std::max(hstart, 0);
@@ -288,7 +291,7 @@ SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
}
template <>
-SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, PoolingLayerInfo info)
+SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, const PoolingLayerInfo &info)
{
SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
SimpleTensor<float> dst_tmp = pooling_layer<float>(src_tmp, info);
@@ -296,10 +299,10 @@ SimpleTensor<uint8_t> pooling_layer<uint8_t>(const SimpleTensor<uint8_t> &src, P
return dst;
}
-template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, PoolingLayerInfo info);
-template SimpleTensor<half> pooling_layer(const SimpleTensor<half> &src, PoolingLayerInfo info);
-template SimpleTensor<qint8_t> pooling_layer(const SimpleTensor<qint8_t> &src, PoolingLayerInfo info);
-template SimpleTensor<qint16_t> pooling_layer(const SimpleTensor<qint16_t> &src, PoolingLayerInfo info);
+template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, const PoolingLayerInfo &info);
+template SimpleTensor<half> pooling_layer(const SimpleTensor<half> &src, const PoolingLayerInfo &info);
+template SimpleTensor<qint8_t> pooling_layer(const SimpleTensor<qint8_t> &src, const PoolingLayerInfo &info);
+template SimpleTensor<qint16_t> pooling_layer(const SimpleTensor<qint16_t> &src, const PoolingLayerInfo &info);
} // namespace reference
} // namespace validation
} // namespace test