aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-08-24 19:02:44 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitdc460f13ee65e27b2a428e44c2d80afb1f516a99 (patch)
tree14eddbb68fb653f4b85e89ab54b070a4d99afcdd /tests/validation
parent32982d8129f53b612021660d3007e80a52d18898 (diff)
downloadComputeLibrary-dc460f13ee65e27b2a428e44c2d80afb1f516a99.tar.gz
COMPMID-417: Port PoolingLayer to new validation.
Change-Id: I7f2f5f5f81ad9932661fc4c660bf90614288bc96 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/85270 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/PoolingLayer.cpp175
-rw-r--r--tests/validation/Datasets.h7
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp209
-rw-r--r--tests/validation/Reference.cpp33
-rw-r--r--tests/validation/Reference.h11
-rw-r--r--tests/validation/ReferenceCPP.cpp8
-rw-r--r--tests/validation/ReferenceCPP.h7
-rw-r--r--tests/validation/TensorOperations.h223
-rw-r--r--tests/validation/TensorVisitors.h21
9 files changed, 0 insertions, 694 deletions
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
deleted file mode 100644
index 286b1d98df..0000000000
--- a/tests/validation/CL/PoolingLayer.cpp
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "CL/CLAccessor.h"
-#include "TypePrinter.h"
-#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
-#include "tests/Globals.h"
-#include "tests/Utils.h"
-#include "tests/dataset/PoolingLayerDataset.h"
-#include "validation/Datasets.h"
-#include "validation/Reference.h"
-#include "validation/Validation.h"
-
-#include <random>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-const float tolerance_qs8 = 3; /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
-const float tolerance_qs16 = 6; /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
-const float tolerance_f = 1e-05; /**< Tolerance value for comparing reference's output against implementation's output for float input */
-
-/** Compute CL pooling layer function.
- *
- * @param[in] shape Shape of the input and output tensors.
- * @param[in] dt Data type of input and output tensors.
- * @param[in] pool_info Pooling Layer information.
- * @param[in] fixed_point_position The fixed point position.
- *
- * @return Computed output tensor.
- */
-CLTensor compute_pooling_layer(const TensorShape &shape_in, const TensorShape &shape_out, DataType dt, PoolingLayerInfo pool_info, int fixed_point_position = 0)
-{
- // Create tensors
- CLTensor src = create_tensor<CLTensor>(shape_in, dt, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(shape_out, dt, 1, fixed_point_position);
-
- // Create and configure function
- CLPoolingLayer pool;
- pool.configure(&src, &dst, pool_info);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- BOOST_TEST(!src.info()->is_resizable());
- BOOST_TEST(!dst.info()->is_resizable());
-
- // Fill tensors
- // Fill tensors
- int min = 0;
- int max = 0;
- switch(dt)
- {
- case DataType::F32:
- min = -1;
- max = 1;
- break;
- case DataType::QS8:
- case DataType::QS16:
- min = -(1 << fixed_point_position);
- max = (1 << fixed_point_position);
- break;
- default:
- ARM_COMPUTE_ERROR("DataType not supported.");
- }
- std::uniform_real_distribution<> distribution(min, max);
- library->fill(CLAccessor(src), distribution, 0);
-
- // Compute function
- pool.run();
-
- return dst;
-}
-
-TensorShape get_output_shape(TensorShape in_shape, const PoolingLayerInfo &pool_info)
-{
- TensorShape out_shape(in_shape);
- const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(in_shape.x(),
- in_shape.y(),
- pool_info.pool_size(),
- pool_info.pool_size(),
- pool_info.pad_stride_info());
- out_shape.set(0, scaled_dims.first);
- out_shape.set(1, scaled_dims.second);
- return out_shape;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(CL)
-BOOST_AUTO_TEST_SUITE(PoolingLayer)
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFloatDataTypes() * PoolingTypes() * boost::unit_test::data::make({ 2, 3, 7 }) * boost::unit_test::data::make({ 1, 2 }) * boost::unit_test::data::make({ 0, 1 }),
- src_shape, dt, pool_type, pool_size, pool_stride, pool_pad)
-{
- PoolingLayerInfo pool_info(pool_type, pool_size, PadStrideInfo(pool_stride, pool_stride, pool_pad, pool_pad, DimensionRoundingType::CEIL));
- TensorShape dst_shape = get_output_shape(src_shape, pool_info);
-
- // Compute function
- CLTensor dst = compute_pooling_layer(src_shape, dst_shape, dt, pool_info);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_pooling_layer(src_shape, dst_shape, dt, pool_info);
-
- // Validate output
- validate(CLAccessor(dst), ref_dst, tolerance_f);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-
-BOOST_AUTO_TEST_SUITE(QS8)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
- RandomPoolingLayerDataset() * boost::unit_test::data::xrange(1, 5),
- obj, fixed_point_position)
-{
- // Compute function
- CLTensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, DataType::QS8, obj.info, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, DataType::QS8, obj.info, fixed_point_position);
-
- // Validate output
- validate(CLAccessor(dst), ref_dst, tolerance_qs8, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
- RandomPoolingLayerDataset() * boost::unit_test::data::xrange(1, 12),
- obj, fixed_point_position)
-{
- // Compute function
- CLTensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, DataType::QS16, obj.info, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, DataType::QS16, obj.info, fixed_point_position);
-
- // Validate output
- validate(CLAccessor(dst), ref_dst, tolerance_qs16, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/Datasets.h b/tests/validation/Datasets.h
index 64918fc4f5..15e1b098e6 100644
--- a/tests/validation/Datasets.h
+++ b/tests/validation/Datasets.h
@@ -37,7 +37,6 @@
#include "dataset/MatrixPatternDataset.h"
#include "dataset/NonLinearFilterFunctionDataset.h"
#include "dataset/NormalizationTypeDataset.h"
-#include "dataset/PoolingLayerDataset.h"
#include "dataset/PoolingTypesDataset.h"
#include "dataset/RoundingPolicyDataset.h"
#include "dataset/ShapeDatasets.h"
@@ -177,12 +176,6 @@ struct is_dataset<arm_compute::test::NormalizationTypes> : boost::mpl::true_
/// Register the data set with Boost
template <>
-struct is_dataset<arm_compute::test::RandomPoolingLayerDataset> : boost::mpl::true_
-{
-};
-
-/// Register the data set with Boost
-template <>
struct is_dataset<arm_compute::test::RoundingPolicies> : boost::mpl::true_
{
};
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
deleted file mode 100644
index 8b4ff18f8c..0000000000
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "NEON/Accessor.h"
-#include "TypePrinter.h"
-#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
-#include "tests/Globals.h"
-#include "tests/Utils.h"
-#include "tests/dataset/PoolingLayerDataset.h"
-#include "validation/Datasets.h"
-#include "validation/Reference.h"
-#include "validation/Validation.h"
-
-#include <random>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-const float tolerance_q = 0; /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
-const float tolerance_f32 = 1e-05; /**< Tolerance value for comparing reference's output against implementation's output for float input */
-#ifdef ARM_COMPUTE_ENABLE_FP16
-const float tolerance_f16 = 0.001f; /**< Tolerance value for comparing reference's output against half precision floating point implementation's output */
-#endif /* ARM_COMPUTE_ENABLE_FP16 */
-
-/** Compute Neon pooling layer function.
- *
- * @param[in] shape Shape of the input and output tensors.
- * @param[in] dt Data type of input and output tensors.
- * @param[in] pool_info Pooling Layer information.
- *
- * @return Computed output tensor.
- */
-Tensor compute_pooling_layer(const TensorShape &shape_in, const TensorShape &shape_out, DataType dt, PoolingLayerInfo pool_info, int fixed_point_position = 0)
-{
- // Create tensors
- Tensor src = create_tensor<Tensor>(shape_in, dt, 1, fixed_point_position);
- Tensor dst = create_tensor<Tensor>(shape_out, dt, 1, fixed_point_position);
-
- // Create and configure function
- NEPoolingLayer pool;
- pool.configure(&src, &dst, pool_info);
-
- // Allocate tensors
- src.allocator()->allocate();
- dst.allocator()->allocate();
-
- BOOST_TEST(!src.info()->is_resizable());
- BOOST_TEST(!dst.info()->is_resizable());
-
- // Fill tensors
- int min = 0;
- int max = 0;
- switch(dt)
- {
- case DataType::F32:
- case DataType::F16:
- min = -1;
- max = 1;
- break;
- case DataType::QS8:
- case DataType::QS16:
- min = -(1 << fixed_point_position);
- max = (1 << fixed_point_position);
- break;
- default:
- ARM_COMPUTE_ERROR("DataType not supported.");
- }
- std::uniform_real_distribution<> distribution(min, max);
- library->fill(Accessor(src), distribution, 0);
-
- // Compute function
- pool.run();
-
- return dst;
-}
-
-TensorShape get_output_shape(TensorShape in_shape, const PoolingLayerInfo &pool_info)
-{
- TensorShape out_shape(in_shape);
- const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(in_shape.x(),
- in_shape.y(),
- pool_info.pool_size(),
- pool_info.pool_size(),
- pool_info.pad_stride_info());
- out_shape.set(0, scaled_dims.first);
- out_shape.set(1, scaled_dims.second);
- return out_shape;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(NEON)
-BOOST_AUTO_TEST_SUITE(PoolingLayer)
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
- RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::F32),
- obj, dt)
-{
- // Compute function
- Tensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_f32, 0);
-}
-
-BOOST_DATA_TEST_CASE(RunSmall7x7,
- SmallShapes() * CNNFloatDataTypes() * PoolingTypes() * boost::unit_test::data::make({ 2, 3, 7 }) * boost::unit_test::data::make({ 1, 2 }) * boost::unit_test::data::make({ 0, 1 }),
- src_shape, dt, pool_type, pool_size, pool_stride, pool_pad)
-{
- PoolingLayerInfo pool_info(pool_type, pool_size, PadStrideInfo(pool_stride, pool_stride, pool_pad, pool_pad, DimensionRoundingType::CEIL));
- TensorShape dst_shape = get_output_shape(src_shape, pool_info);
-
- // Compute function
- Tensor dst = compute_pooling_layer(src_shape, dst_shape, dt, pool_info);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_pooling_layer(src_shape, dst_shape, dt, pool_info);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_f32, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-#ifdef ARM_COMPUTE_ENABLE_FP16
-BOOST_AUTO_TEST_SUITE(Float16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
- RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::F16),
- obj, dt)
-{
- // Compute function
- Tensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_f16, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* ARM_COMPUTE_ENABLE_FP16 */
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-BOOST_AUTO_TEST_SUITE(QS8)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
- RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(1, 5),
- obj, dt, fixed_point_position)
-{
- // Compute function
- Tensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_q, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
- RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::QS16) * boost::unit_test::data::xrange(1, 13),
- obj, dt, fixed_point_position)
-{
- // Compute function
- Tensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
-
- // Compute reference
- RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
-
- // Validate output
- validate(Accessor(dst), ref_dst, tolerance_q, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index 1ea017e998..6da92116da 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -461,39 +461,6 @@ RawTensor Reference::compute_reference_batch_normalization_layer(const TensorSha
return ref_dst;
}
-RawTensor Reference::compute_reference_pooling_layer(const TensorShape &shape_in, const TensorShape &shape_out, DataType dt, PoolingLayerInfo pool_info, int fixed_point_position)
-{
- // Create reference
- RawTensor ref_src(shape_in, dt, 1, fixed_point_position);
- RawTensor ref_dst(shape_out, dt, 1, fixed_point_position);
-
- // Fill reference
- int min = 0;
- int max = 0;
- switch(dt)
- {
- case DataType::F32:
- case DataType::F16:
- min = -1;
- max = 1;
- break;
- case DataType::QS8:
- case DataType::QS16:
- min = -(1 << fixed_point_position);
- max = (1 << fixed_point_position);
- break;
- default:
- ARM_COMPUTE_ERROR("DataType not supported.");
- }
- std::uniform_real_distribution<> distribution(min, max);
- library->fill(ref_src, distribution, 0.0);
-
- // Compute reference
- ReferenceCPP::pooling_layer(ref_src, ref_dst, pool_info);
-
- return ref_dst;
-}
-
RawTensor Reference::compute_reference_roi_pooling_layer(const TensorShape &shape, DataType dt, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
{
TensorShape shape_dst;
diff --git a/tests/validation/Reference.h b/tests/validation/Reference.h
index 288dc0e3f7..430c42321f 100644
--- a/tests/validation/Reference.h
+++ b/tests/validation/Reference.h
@@ -293,17 +293,6 @@ public:
* @return Computed raw tensor.
*/
static RawTensor compute_reference_batch_normalization_layer(const TensorShape &shape0, const TensorShape &shape1, DataType dt, float epsilon, int fixed_point_position = 0);
- /** Compute reference pooling layer.
- *
- * @param[in] shape_in Shape of the input tensor.
- * @param[in] shape_out Shape of the output tensor.
- * @param[in] dt Data type of input and output tensors.
- * @param[in] pool_info Pooling Layer information.
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers.
- *
- * @return Computed raw tensor.
- */
- static RawTensor compute_reference_pooling_layer(const TensorShape &shape_in, const TensorShape &shape_out, DataType dt, PoolingLayerInfo pool_info, int fixed_point_position = 0);
/** Compute reference roi pooling layer.
*
* @param[in] shape Shape of the input tensor.
diff --git a/tests/validation/ReferenceCPP.cpp b/tests/validation/ReferenceCPP.cpp
index 58b47f9d81..4c831ebe0a 100644
--- a/tests/validation/ReferenceCPP.cpp
+++ b/tests/validation/ReferenceCPP.cpp
@@ -281,14 +281,6 @@ void ReferenceCPP::batch_normalization_layer(const RawTensor &src, RawTensor &ds
boost::apply_visitor(tensor_visitors::batch_normalization_layer_visitor(s, m, v, b, g, epsilon, fixed_point_position), d);
}
-// Pooling Layer
-void ReferenceCPP::pooling_layer(const RawTensor &src, RawTensor &dst, PoolingLayerInfo pool_info)
-{
- const TensorVariant s = TensorFactory::get_tensor(src);
- TensorVariant d = TensorFactory::get_tensor(dst);
- boost::apply_visitor(tensor_visitors::pooling_layer_visitor(s, pool_info), d);
-}
-
// ROI Pooling Layer
void ReferenceCPP::roi_pooling_layer(const RawTensor &src, RawTensor &dst, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
{
diff --git a/tests/validation/ReferenceCPP.h b/tests/validation/ReferenceCPP.h
index 29612d1e3b..96aade9705 100644
--- a/tests/validation/ReferenceCPP.h
+++ b/tests/validation/ReferenceCPP.h
@@ -259,13 +259,6 @@ public:
*/
static void batch_normalization_layer(const RawTensor &src, RawTensor &dst, const RawTensor &mean, const RawTensor &var, const RawTensor &beta, const RawTensor &gamma, float epsilon,
int fixed_point_position = 0);
- /** Pooling layer of @p src based on the information from @p pool_info.
- *
- * @param[in] src Input tensor.
- * @param[out] dst Result tensor.
- * @param[in] pool_info Pooling Layer information.
- */
- static void pooling_layer(const RawTensor &src, RawTensor &dst, PoolingLayerInfo pool_info);
/** ROI Pooling layer of @p src based on the information from @p pool_info and @p rois.
*
* @param[in] src Input tensor.
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index f5be139dcf..e68a344112 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -1071,229 +1071,6 @@ void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor
}
}
-// Pooling layer
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
-void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info)
-{
- const int pool_size = pool_info.pool_size();
- PoolingType type = pool_info.pool_type();
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- int pad_x = 0;
- int pad_y = 0;
- std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info().stride();
- std::tie(pad_x, pad_y) = pool_info.pad_stride_info().pad();
-
- const int w_in = static_cast<int>(in.shape()[0]);
- const int h_in = static_cast<int>(in.shape()[1]);
-
- const int w_out = static_cast<int>(out.shape()[0]);
- const int h_out = static_cast<int>(out.shape()[1]);
-
- int upper_dims = in.shape().total_size() / (w_in * h_in);
-
- int pooled_w = 0;
- int pooled_h = 0;
- if(pool_info.pad_stride_info().round() == DimensionRoundingType::CEIL)
- {
- pooled_w = static_cast<int>(ceil(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
- pooled_h = static_cast<int>(ceil(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
- }
- else
- {
- pooled_w = static_cast<int>(floor(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
- pooled_h = static_cast<int>(floor(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
- }
-
- if((pooled_w - 1) * pool_stride_x >= w_in + pad_x)
- {
- --pooled_w;
- }
- if((pooled_h - 1) * pool_stride_y >= h_in + pad_y)
- {
- --pooled_h;
- }
-
- if(type == PoolingType::MAX)
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int h = 0; h < pooled_h; ++h)
- {
- for(int w = 0; w < pooled_w; ++w)
- {
- int wstart = w * pool_stride_x - pad_x;
- int hstart = h * pool_stride_y - pad_y;
- int wend = std::min(wstart + pool_size, w_in);
- int hend = std::min(hstart + pool_size, h_in);
- wstart = std::max(wstart, 0);
- hstart = std::max(hstart, 0);
-
- T max_val = std::numeric_limits<T>::lowest();
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- const T val = in[r * h_in * w_in + y * w_in + x];
- if(val > max_val)
- {
- max_val = val;
- }
- }
- }
-
- out[r * h_out * w_out + h * pooled_w + w] = max_val;
- }
- }
- }
- }
- else // Average pooling
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int h = 0; h < pooled_h; ++h)
- {
- for(int w = 0; w < pooled_w; ++w)
- {
- T avg_val(0);
- int wstart = w * pool_stride_x - pad_x;
- int hstart = h * pool_stride_y - pad_y;
- int wend = std::min(wstart + pool_size, w_in + pad_x);
- int hend = std::min(hstart + pool_size, h_in + pad_y);
- int pool = (hend - hstart) * (wend - wstart);
- wstart = std::max(wstart, 0);
- hstart = std::max(hstart, 0);
- wend = std::min(wend, w_in);
- hend = std::min(hend, h_in);
-
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- avg_val += in[r * h_in * w_in + y * w_in + x];
- }
- }
- out[r * h_out * w_out + h * pooled_w + w] = avg_val / pool;
- }
- }
- }
- }
-}
-
-// Pooling layer
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
-void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info)
-{
- const int pool_size = pool_info.pool_size();
- PoolingType type = pool_info.pool_type();
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- int pad_x = 0;
- int pad_y = 0;
- std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info().stride();
- std::tie(pad_x, pad_y) = pool_info.pad_stride_info().pad();
-
- const int w_in = static_cast<int>(in.shape()[0]);
- const int h_in = static_cast<int>(in.shape()[1]);
-
- const int w_out = static_cast<int>(out.shape()[0]);
- const int h_out = static_cast<int>(out.shape()[1]);
-
- int upper_dims = in.shape().total_size() / (w_in * h_in);
-
- int pooled_w = 0;
- int pooled_h = 0;
- if(pool_info.pad_stride_info().round() == DimensionRoundingType::CEIL)
- {
- pooled_w = static_cast<int>(ceil(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
- pooled_h = static_cast<int>(ceil(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
- }
- else
- {
- pooled_w = static_cast<int>(floor(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
- pooled_h = static_cast<int>(floor(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
- }
-
- if((pooled_w - 1) * pool_stride_x >= w_in + pad_x)
- {
- --pooled_w;
- }
- if((pooled_h - 1) * pool_stride_y >= h_in + pad_y)
- {
- --pooled_h;
- }
-
- if(type == PoolingType::MAX)
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int h = 0; h < pooled_h; ++h)
- {
- for(int w = 0; w < pooled_w; ++w)
- {
- int wstart = w * pool_stride_x - pad_x;
- int hstart = h * pool_stride_y - pad_y;
- int wend = std::min(wstart + pool_size, w_in);
- int hend = std::min(hstart + pool_size, h_in);
- wstart = std::max(wstart, 0);
- hstart = std::max(hstart, 0);
-
- T max_val = std::numeric_limits<T>::lowest();
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- T val = in[r * h_in * w_in + y * w_in + x];
- if(val > max_val)
- {
- max_val = val;
- }
- }
- }
-
- out[r * h_out * w_out + h * pooled_w + w] = max_val;
- }
- }
- }
- }
- else // Average pooling
- {
- for(int r = 0; r < upper_dims; ++r)
- {
- for(int h = 0; h < pooled_h; ++h)
- {
- for(int w = 0; w < pooled_w; ++w)
- {
- int wstart = w * pool_stride_x - pad_x;
- int hstart = h * pool_stride_y - pad_y;
- int wend = std::min(wstart + pool_size, w_in + pad_x);
- int hend = std::min(hstart + pool_size, h_in + pad_y);
- int pool = (hend - hstart) * (wend - wstart);
- wstart = std::max(wstart, 0);
- hstart = std::max(hstart, 0);
- wend = std::min(wend, w_in);
- hend = std::min(hend, h_in);
-
- using namespace fixed_point_arithmetic;
-
- const int fixed_point_position = in.fixed_point_position();
- const fixed_point<T> invpool_fp(1.f / static_cast<float>(pool), fixed_point_position);
- fixed_point<T> avg_val(0, fixed_point_position, true);
- for(int y = hstart; y < hend; ++y)
- {
- for(int x = wstart; x < wend; ++x)
- {
- const fixed_point<T> in_fp(in[r * h_in * w_in + y * w_in + x], fixed_point_position, true);
- avg_val = add(avg_val, in_fp);
- }
- }
- out[r * h_out * w_out + h * pooled_w + w] = mul(avg_val, invpool_fp).raw();
- }
- }
- }
- }
-}
-
// ROI Pooling layer
template <typename T>
void roi_pooling_layer(const Tensor<T> &in, Tensor<T> &out, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
diff --git a/tests/validation/TensorVisitors.h b/tests/validation/TensorVisitors.h
index 732cd0e8f1..a15d2ad1dd 100644
--- a/tests/validation/TensorVisitors.h
+++ b/tests/validation/TensorVisitors.h
@@ -233,27 +233,6 @@ private:
int _fixed_point_position;
};
-// Pooling layer
-struct pooling_layer_visitor : public boost::static_visitor<>
-{
-public:
- explicit pooling_layer_visitor(const TensorVariant &in, PoolingLayerInfo pool_info)
- : _in(in), _pool_info(pool_info)
- {
- }
-
- template <typename T>
- void operator()(Tensor<T> &out) const
- {
- const Tensor<T> &in = boost::get<Tensor<T>>(_in);
- tensor_operations::pooling_layer(in, out, _pool_info);
- }
-
-private:
- const TensorVariant &_in;
- PoolingLayerInfo _pool_info;
-};
-
// ROI Pooling layer
struct roi_pooling_layer_visitor : public boost::static_visitor<>
{