aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2017-06-22 12:57:51 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitbbd9fb95daa08d6da67c567b40ca2cd032f7a2d3 (patch)
treec1401585f64396d6f22bb790442d8183f3a17a9e /tests
parent2eac5bd444d16e4e81c427d5a99e1534b387e211 (diff)
downloadComputeLibrary-bbd9fb95daa08d6da67c567b40ca2cd032f7a2d3.tar.gz
COMPMID-412: Port PoolingLayer to use fixed point 16.
Change-Id: I2005de4c7c14526996309826d33a0ec8e732d2d5 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78720 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Steven Niu <steven.niu@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/Utils.h7
-rw-r--r--tests/dataset/PoolingLayerDataset.h4
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp20
-rw-r--r--tests/validation/Reference.cpp3
-rw-r--r--tests/validation/ReferenceCPP.cpp4
-rw-r--r--tests/validation/ReferenceCPP.h9
-rw-r--r--tests/validation/TensorOperations.h135
-rw-r--r--tests/validation/TensorVisitors.h8
8 files changed, 156 insertions, 34 deletions
diff --git a/tests/Utils.h b/tests/Utils.h
index 389c9806bb..06d27b87fa 100644
--- a/tests/Utils.h
+++ b/tests/Utils.h
@@ -198,8 +198,11 @@ inline ValidRegion shape_to_valid_region(TensorShape shape, bool border_undefine
ARM_COMPUTE_ERROR_ON(shape.num_dimensions() < 2);
anchor.set(0, border_size.left);
anchor.set(1, border_size.top);
- shape.set(0, shape.x() - border_size.left - border_size.right);
- shape.set(1, shape.y() - border_size.top - border_size.bottom);
+ const int x_dim_shape = shape.x() - border_size.left - border_size.right;
+ const int y_dim_shape = shape.y() - border_size.top - border_size.bottom;
+ ARM_COMPUTE_ERROR_ON(x_dim_shape < 0 || y_dim_shape < 0);
+ shape.set(0, x_dim_shape);
+ shape.set(1, y_dim_shape);
}
return ValidRegion(std::move(anchor), std::move(shape));
}
diff --git a/tests/dataset/PoolingLayerDataset.h b/tests/dataset/PoolingLayerDataset.h
index 5cdece4f66..1496cad379 100644
--- a/tests/dataset/PoolingLayerDataset.h
+++ b/tests/dataset/PoolingLayerDataset.h
@@ -134,7 +134,7 @@ public:
~GoogLeNetPoolingLayerDataset() = default;
};
-class RandomPoolingLayerDataset final : public PoolingLayerDataset<8>
+class RandomPoolingLayerDataset final : public PoolingLayerDataset<10>
{
public:
RandomPoolingLayerDataset()
@@ -148,6 +148,8 @@ public:
PoolingLayerDataObject{ TensorShape(13U, 13U, 32U), TensorShape(6U, 6U, 32U), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(2, 2, 0, 0)) },
PoolingLayerDataObject{ TensorShape(24U, 24U, 10U), TensorShape(12U, 12U, 10U), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(2, 2, 0, 0)) },
PoolingLayerDataObject{ TensorShape(8U, 8U, 30U), TensorShape(4U, 4U, 30U), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(2, 2, 0, 0)) },
+ PoolingLayerDataObject{ TensorShape(7U, 7U, 10U), TensorShape(7U, 7U, 10U), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1)) },
+ PoolingLayerDataObject{ TensorShape(7U, 7U, 10U), TensorShape(7U, 7U, 10U), PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(1, 1, 1, 1)) },
}
{
}
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index 0d2f285dff..8b4ff18f8c 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -81,6 +81,7 @@ Tensor compute_pooling_layer(const TensorShape &shape_in, const TensorShape &sha
max = 1;
break;
case DataType::QS8:
+ case DataType::QS16:
min = -(1 << fixed_point_position);
max = (1 << fixed_point_position);
break;
@@ -168,6 +169,7 @@ BOOST_AUTO_TEST_SUITE_END()
#endif /* ARM_COMPUTE_ENABLE_FP16 */
BOOST_AUTO_TEST_SUITE(Quantized)
+BOOST_AUTO_TEST_SUITE(QS8)
BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
BOOST_DATA_TEST_CASE(RandomDataset,
RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(1, 5),
@@ -184,6 +186,24 @@ BOOST_DATA_TEST_CASE(RandomDataset,
}
BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE(QS16)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RandomDataset,
+ RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::QS16) * boost::unit_test::data::xrange(1, 13),
+ obj, dt, fixed_point_position)
+{
+ // Compute function
+ Tensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
+
+ // Validate output
+ validate(Accessor(dst), ref_dst, tolerance_q, 0);
+}
+BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END()
+
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index 0fca661dc4..9cdd2d74d4 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -525,6 +525,7 @@ RawTensor Reference::compute_reference_pooling_layer(const TensorShape &shape_in
max = 1;
break;
case DataType::QS8:
+ case DataType::QS16:
min = -(1 << fixed_point_position);
max = (1 << fixed_point_position);
break;
@@ -535,7 +536,7 @@ RawTensor Reference::compute_reference_pooling_layer(const TensorShape &shape_in
library->fill(ref_src, distribution, 0.0);
// Compute reference
- ReferenceCPP::pooling_layer(ref_src, ref_dst, pool_info, fixed_point_position);
+ ReferenceCPP::pooling_layer(ref_src, ref_dst, pool_info);
return ref_dst;
}
diff --git a/tests/validation/ReferenceCPP.cpp b/tests/validation/ReferenceCPP.cpp
index 069cc1d871..4a2d7bebba 100644
--- a/tests/validation/ReferenceCPP.cpp
+++ b/tests/validation/ReferenceCPP.cpp
@@ -286,11 +286,11 @@ void ReferenceCPP::fully_connected_layer(const RawTensor &src, const RawTensor &
}
// Pooling Layer
-void ReferenceCPP::pooling_layer(const RawTensor &src, RawTensor &dst, PoolingLayerInfo pool_info, int fixed_point_position)
+void ReferenceCPP::pooling_layer(const RawTensor &src, RawTensor &dst, PoolingLayerInfo pool_info)
{
const TensorVariant s = TensorFactory::get_tensor(src);
TensorVariant d = TensorFactory::get_tensor(dst);
- boost::apply_visitor(tensor_visitors::pooling_layer_visitor(s, pool_info, fixed_point_position), d);
+ boost::apply_visitor(tensor_visitors::pooling_layer_visitor(s, pool_info), d);
}
// ROI Pooling Layer
diff --git a/tests/validation/ReferenceCPP.h b/tests/validation/ReferenceCPP.h
index 2d35fa9590..cc886aefc9 100644
--- a/tests/validation/ReferenceCPP.h
+++ b/tests/validation/ReferenceCPP.h
@@ -263,12 +263,11 @@ public:
static void fully_connected_layer(const RawTensor &src, const RawTensor &weights, const RawTensor &bias, RawTensor &dst);
/** Pooling layer of @p src based on the information from @p pool_info.
*
- * @param[in] src Input tensor.
- * @param[out] dst Result tensor.
- * @param[in] pool_info Pooling Layer information.
- * @param[in] fixed_point_position Fixed point position. (Optional)
+ * @param[in] src Input tensor.
+ * @param[out] dst Result tensor.
+ * @param[in] pool_info Pooling Layer information.
*/
- static void pooling_layer(const RawTensor &src, RawTensor &dst, PoolingLayerInfo pool_info, int fixed_point_position = 0);
+ static void pooling_layer(const RawTensor &src, RawTensor &dst, PoolingLayerInfo pool_info);
/** ROI Pooling layer of @p src based on the information from @p pool_info and @p rois.
*
* @param[in] src Input tensor.
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 3220d80a04..887d52887d 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -24,7 +24,6 @@
#ifndef __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__
#define __ARM_COMPUTE_TEST_TENSOR_OPERATIONS_H__
-#include "arm_compute/core/FixedPoint.h"
#include "arm_compute/core/Types.h"
#include "support/ToolchainSupport.h"
#include "tests/Types.h"
@@ -961,8 +960,8 @@ void fully_connected_layer(const Tensor<T> &in, const Tensor<T> &weights, const
}
// Pooling layer
-template <typename T>
-void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info, int fixed_point_position)
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
+void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info)
{
const int pool_size = pool_info.pool_size();
PoolingType type = pool_info.pool_type();
@@ -1054,38 +1053,136 @@ void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_in
hstart = std::max(hstart, 0);
wend = std::min(wend, w_in);
hend = std::min(hend, h_in);
- if(is_floating_point<T>::value)
+
+ for(int y = hstart; y < hend; ++y)
{
- for(int y = hstart; y < hend; ++y)
+ for(int x = wstart; x < wend; ++x)
{
- for(int x = wstart; x < wend; ++x)
- {
- avg_val += in[r * h_in * w_in + y * w_in + x];
- }
+ avg_val += in[r * h_in * w_in + y * w_in + x];
}
- out[r * h_out * w_out + h * pooled_w + w] = avg_val / pool;
}
- else
- {
- static std::array<qint8_t, 10> scale_values_q8 =
- { { 0x0, 0x0, 0x40, 0x2A, 0x20, 0x19, 0x15, 0x12, 0x10, 0xE } };
+ out[r * h_out * w_out + h * pooled_w + w] = avg_val / pool;
+ }
+ }
+ }
+ }
+}
- for(int y = hstart; y < hend; ++y)
+// Pooling layer
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
+void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info)
+{
+ const int pool_size = pool_info.pool_size();
+ PoolingType type = pool_info.pool_type();
+ int pool_stride_x = 0;
+ int pool_stride_y = 0;
+ int pad_x = 0;
+ int pad_y = 0;
+ std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info().stride();
+ std::tie(pad_x, pad_y) = pool_info.pad_stride_info().pad();
+
+ const int w_in = static_cast<int>(in.shape()[0]);
+ const int h_in = static_cast<int>(in.shape()[1]);
+
+ const int w_out = static_cast<int>(out.shape()[0]);
+ const int h_out = static_cast<int>(out.shape()[1]);
+
+ int upper_dims = in.shape().total_size() / (w_in * h_in);
+
+ int pooled_w = 0;
+ int pooled_h = 0;
+ if(pool_info.pad_stride_info().round() == DimensionRoundingType::CEIL)
+ {
+ pooled_w = static_cast<int>(ceil(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
+ pooled_h = static_cast<int>(ceil(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
+ }
+ else
+ {
+ pooled_w = static_cast<int>(floor(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
+ pooled_h = static_cast<int>(floor(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
+ }
+
+ if((pooled_w - 1) * pool_stride_x >= w_in + pad_x)
+ {
+ --pooled_w;
+ }
+ if((pooled_h - 1) * pool_stride_y >= h_in + pad_y)
+ {
+ --pooled_h;
+ }
+
+ if(type == PoolingType::MAX)
+ {
+ for(int r = 0; r < upper_dims; ++r)
+ {
+ for(int h = 0; h < pooled_h; ++h)
+ {
+ for(int w = 0; w < pooled_w; ++w)
+ {
+ int wstart = w * pool_stride_x - pad_x;
+ int hstart = h * pool_stride_y - pad_y;
+ int wend = std::min(wstart + pool_size, w_in);
+ int hend = std::min(hstart + pool_size, h_in);
+ wstart = std::max(wstart, 0);
+ hstart = std::max(hstart, 0);
+
+ T max_val = std::numeric_limits<T>::lowest();
+ for(int y = hstart; y < hend; ++y)
+ {
+ for(int x = wstart; x < wend; ++x)
{
- for(int x = wstart; x < wend; ++x)
+ T val = in[r * h_in * w_in + y * w_in + x];
+ if(val > max_val)
{
- avg_val = sqadd_qs8(avg_val, in[r * h_in * w_in + y * w_in + x]);
+ max_val = val;
}
}
- out[r * h_out * w_out + h * pooled_w + w] = sqmul_qs8(avg_val, (scale_values_q8[pool] >> (7 - fixed_point_position)), fixed_point_position);
}
+
+ out[r * h_out * w_out + h * pooled_w + w] = max_val;
+ }
+ }
+ }
+ }
+ else // Average pooling
+ {
+ for(int r = 0; r < upper_dims; ++r)
+ {
+ for(int h = 0; h < pooled_h; ++h)
+ {
+ for(int w = 0; w < pooled_w; ++w)
+ {
+ int wstart = w * pool_stride_x - pad_x;
+ int hstart = h * pool_stride_y - pad_y;
+ int wend = std::min(wstart + pool_size, w_in + pad_x);
+ int hend = std::min(hstart + pool_size, h_in + pad_y);
+ int pool = (hend - hstart) * (wend - wstart);
+ wstart = std::max(wstart, 0);
+ hstart = std::max(hstart, 0);
+ wend = std::min(wend, w_in);
+ hend = std::min(hend, h_in);
+
+ using namespace fixed_point_arithmetic;
+
+ const int fixed_point_position = in.fixed_point_position();
+ const fixed_point<T> invpool_fp(1.f / static_cast<float>(pool), fixed_point_position);
+ fixed_point<T> avg_val(0, fixed_point_position, true);
+ for(int y = hstart; y < hend; ++y)
+ {
+ for(int x = wstart; x < wend; ++x)
+ {
+ const fixed_point<T> in_fp(in[r * h_in * w_in + y * w_in + x], fixed_point_position, true);
+ avg_val = add(avg_val, in_fp);
+ }
+ }
+ out[r * h_out * w_out + h * pooled_w + w] = mul(avg_val, invpool_fp).raw();
}
}
}
}
}
-// Pooling layer
+// ROI Pooling layer
template <typename T>
void roi_pooling_layer(const Tensor<T> &in, Tensor<T> &out, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
{
diff --git a/tests/validation/TensorVisitors.h b/tests/validation/TensorVisitors.h
index 5ee7ae3a9f..193697acf0 100644
--- a/tests/validation/TensorVisitors.h
+++ b/tests/validation/TensorVisitors.h
@@ -27,6 +27,7 @@
#include "Tensor.h"
#include "TensorOperations.h"
#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
#include "arm_compute/runtime/Lut.h"
#include "boost_wrapper.h"
@@ -258,8 +259,8 @@ private:
struct pooling_layer_visitor : public boost::static_visitor<>
{
public:
- explicit pooling_layer_visitor(const TensorVariant &in, PoolingLayerInfo pool_info, int fixed_point_position = 0)
- : _in(in), _pool_info(pool_info), _fixed_point_position(fixed_point_position)
+ explicit pooling_layer_visitor(const TensorVariant &in, PoolingLayerInfo pool_info)
+ : _in(in), _pool_info(pool_info)
{
}
@@ -267,13 +268,12 @@ public:
void operator()(Tensor<T> &out) const
{
const Tensor<T> &in = boost::get<Tensor<T>>(_in);
- tensor_operations::pooling_layer(in, out, _pool_info, _fixed_point_position);
+ tensor_operations::pooling_layer(in, out, _pool_info);
}
private:
const TensorVariant &_in;
PoolingLayerInfo _pool_info;
- int _fixed_point_position;
};
// ROI Pooling layer