aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authormorgolock <pablo.tello@arm.com>2020-04-09 14:17:48 +0100
committerPablo Marquez <pablo.tello@arm.com>2020-06-15 14:04:49 +0000
commit37722d9a81627520fa347eb65199dbfeb84b26bd (patch)
tree3cb811c83e933337e685606625fcd44690b570d7 /tests
parent4a61653202afb018f4f259d3c144a735d73f0a20 (diff)
downloadComputeLibrary-37722d9a81627520fa347eb65199dbfeb84b26bd.tar.gz
COMPMID-2449: Implement NEUnPoolLayer
Change-Id: I5677c87bba97dd395a3e13dbce34a3dd2c437033 Signed-off-by: morgolock <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3289 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/NEON/MaxUnpoolingLayer.cpp82
-rw-r--r--tests/validation/fixtures/MaxUnpoolingLayerFixture.h159
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h1
-rw-r--r--tests/validation/reference/MaxUnpoolingLayer.cpp106
-rw-r--r--tests/validation/reference/MaxUnpoolingLayer.h46
-rw-r--r--tests/validation/reference/PoolingLayer.cpp78
6 files changed, 435 insertions, 37 deletions
diff --git a/tests/validation/NEON/MaxUnpoolingLayer.cpp b/tests/validation/NEON/MaxUnpoolingLayer.cpp
new file mode 100644
index 0000000000..949d569c89
--- /dev/null
+++ b/tests/validation/NEON/MaxUnpoolingLayer.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEMaxUnpoolingLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/MaxUnpoolingLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(NEON)
+TEST_SUITE(PoolingLayer)
+
+template <typename T>
+using NEMaxUnpoolingLayerFixture = MaxUnpoolingLayerValidationFixture<Tensor, Accessor, NEPoolingLayer, NEMaxUnpoolingLayer, T>;
+
+const auto PoolingLayerIndicesDatasetFPSmall = combine(combine(framework::dataset::make("PoolType", { PoolingType::MAX }), framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(2, 2, 0, 0), PadStrideInfo(2, 1, 0, 0) }));
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(MaxUnpooling, NEMaxUnpoolingLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), combine(PoolingLayerIndicesDatasetFPSmall,
+ framework::dataset::make("DataType", DataType::F32))),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })
+
+ ))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP32
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(MaxUnpooling, NEMaxUnpoolingLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), combine(PoolingLayerIndicesDatasetFPSmall,
+ framework::dataset::make("DataType", DataType::F16))),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })
+
+ ))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP16
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // PoolingLayer
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/MaxUnpoolingLayerFixture.h b/tests/validation/fixtures/MaxUnpoolingLayerFixture.h
new file mode 100644
index 0000000000..ee08f59e7e
--- /dev/null
+++ b/tests/validation/fixtures/MaxUnpoolingLayerFixture.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/MaxUnpoolingLayer.h"
+#include "tests/validation/reference/PoolingLayer.h"
+#include <random>
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename PoolingFunctionType, typename MaxUnpoolingFunctionType, typename T>
+class MaxUnpoolingLayerValidationGenericFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout)
+ {
+ std::mt19937 gen(library->seed());
+ std::uniform_int_distribution<> offset_dis(0, 20);
+ const float scale = data_type == DataType::QASYMM8_SIGNED ? 1.f / 127.f : 1.f / 255.f;
+ const int scale_in = data_type == DataType::QASYMM8_SIGNED ? -offset_dis(gen) : offset_dis(gen);
+ const int scale_out = data_type == DataType::QASYMM8_SIGNED ? -offset_dis(gen) : offset_dis(gen);
+ const QuantizationInfo input_qinfo(scale, scale_in);
+ const QuantizationInfo output_qinfo(scale, scale_out);
+ _pool_info = pool_info;
+ _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo);
+ _reference = compute_reference(shape, pool_info, data_type, input_qinfo, output_qinfo);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ if(!is_data_type_quantized(tensor.data_type()))
+ {
+ std::uniform_real_distribution<> distribution(-1.f, 1.f);
+ library->fill(tensor, distribution, 0);
+ }
+ else // data type is quantized_asymmetric
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+ }
+
+ TensorType compute_target(TensorShape input_shape, PoolingLayerInfo pool_info,
+ DataType data_type, DataLayout data_layout,
+ QuantizationInfo input_qinfo, QuantizationInfo output_qinfo)
+ {
+ // Change shape in case of NHWC.
+ if(data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, input_qinfo, data_layout);
+ const TensorShape dst_shape = misc::shape_calculator::compute_pool_shape(*(src.info()), pool_info);
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, output_qinfo, data_layout);
+ TensorType unpooled = create_tensor<TensorType>(input_shape, data_type, 1, output_qinfo, data_layout);
+ TensorType indices = create_tensor<TensorType>(dst_shape, DataType::U32, 1, output_qinfo, data_layout);
+
+ // Create and configure function
+ PoolingFunctionType pool_layer;
+ pool_layer.configure(&src, &dst, pool_info, &indices);
+ // Create and configure function
+
+ MaxUnpoolingFunctionType unpool_layer;
+ unpool_layer.configure(&dst, &indices, &unpooled, pool_info);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(indices.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+ indices.allocator()->allocate();
+ unpooled.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!indices.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!unpooled.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute function
+ pool_layer.run();
+ unpool_layer.run();
+ return unpooled;
+ }
+
+ SimpleTensor<T> compute_reference(TensorShape input_shape, PoolingLayerInfo info, DataType data_type,
+ QuantizationInfo input_qinfo, QuantizationInfo output_qinfo)
+ {
+ SimpleTensor<T> src(input_shape, data_type, 1, input_qinfo);
+ SimpleTensor<uint32_t> indices{};
+ // Fill reference
+ fill(src);
+ auto pooled_tensor = reference::pooling_layer<T>(src, info, output_qinfo, &indices);
+ return reference::max_unpooling_layer<T>(pooled_tensor, info, output_qinfo, indices, input_shape);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ PoolingLayerInfo _pool_info{};
+};
+
+template <typename TensorType, typename AccessorType, typename F1, typename F2, typename T>
+class MaxUnpoolingLayerValidationFixture : public MaxUnpoolingLayerValidationGenericFixture<TensorType, AccessorType, F1, F2, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, DataType data_type, DataLayout data_layout)
+ {
+ MaxUnpoolingLayerValidationGenericFixture<TensorType, AccessorType, F1, F2, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, true),
+ data_type, data_layout);
+ }
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE */
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index eb40cea0c2..b9b5b3857b 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -86,7 +86,6 @@ protected:
{
permute(shape, PermutationVector(2U, 0U, 1U));
}
-
// Create tensors
TensorType src = create_tensor<TensorType>(shape, data_type, 1, input_qinfo, data_layout);
const TensorShape dst_shape = misc::shape_calculator::compute_pool_shape(*(src.info()), info);
diff --git a/tests/validation/reference/MaxUnpoolingLayer.cpp b/tests/validation/reference/MaxUnpoolingLayer.cpp
new file mode 100644
index 0000000000..d74a930856
--- /dev/null
+++ b/tests/validation/reference/MaxUnpoolingLayer.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "MaxUnpoolingLayer.h"
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+using namespace arm_compute::misc::shape_calculator;
+
+template <typename T>
+SimpleTensor<T> max_unpooling_layer_internal(const SimpleTensor<T> &src, const PoolingLayerInfo &info,
+ const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> &indices,
+ TensorShape output_shape, DataLayout data_layout)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_UNUSED(output_qinfo);
+ ARM_COMPUTE_UNUSED(data_layout);
+ // Create reference
+ SimpleTensor<T> dst{ output_shape, src.data_type(), 1 };
+ ARM_COMPUTE_ERROR_ON(indices.shape().total_size() == 0);
+ std::fill_n(dst.data(), dst.num_elements(), 0);
+ const auto w_indices = static_cast<int>(indices.shape()[0]);
+ const auto h_indices = static_cast<int>(indices.shape()[1]);
+ const auto z_indices = static_cast<int>(indices.shape()[2]);
+ const auto b_indices = static_cast<int>(indices.shape()[3]);
+ const auto w_dst = static_cast<int>(dst.shape()[0]);
+ const auto h_dst = static_cast<int>(dst.shape()[1]);
+ const auto z_dst = static_cast<int>(dst.shape()[2]);
+ for(int b = 0; b < b_indices; ++b)
+ {
+ for(int r = 0; r < z_indices; ++r)
+ {
+ for(int h = 0; h < h_indices; ++h)
+ {
+ for(int w = 0; w < w_indices; ++w)
+ {
+ const uint32_t index_into_dst = indices[b * z_indices * h_indices * w_indices + r * h_indices * w_indices + h * w_indices + w];
+ const auto input_val = src[b * z_indices * h_indices * w_indices + r * h_indices * w_indices + h * w_indices + w];
+ auto *ptr = &dst[b * z_dst * h_dst * w_dst];
+ ptr[index_into_dst] = input_val;
+ }
+ }
+ }
+ }
+ return dst;
+}
+
+template <>
+SimpleTensor<uint8_t> max_unpooling_layer<uint8_t>(
+ const SimpleTensor<uint8_t> &src, const PoolingLayerInfo &info,
+ const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> &indices,
+ TensorShape output_shape, DataLayout data_layout)
+
+{
+ SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
+ SimpleTensor<float> dst_tmp = max_unpooling_layer_internal<float>(src_tmp, info, output_qinfo, indices, output_shape, data_layout);
+ SimpleTensor<uint8_t> dst = convert_to_asymmetric<uint8_t>(dst_tmp, output_qinfo);
+ return dst;
+}
+
+template <typename T>
+SimpleTensor<T> max_unpooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info,
+ const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> &indices,
+ TensorShape output_shape, DataLayout data_layout)
+{
+ return max_unpooling_layer_internal<T>(src, info, output_qinfo, indices, output_shape, data_layout);
+}
+
+template SimpleTensor<float> max_unpooling_layer(const SimpleTensor<float> &src, const PoolingLayerInfo &info,
+ const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> &indices,
+ TensorShape output_shape, DataLayout data_layout);
+
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/MaxUnpoolingLayer.h b/tests/validation/reference/MaxUnpoolingLayer.h
new file mode 100644
index 0000000000..b594265099
--- /dev/null
+++ b/tests/validation/reference/MaxUnpoolingLayer.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_MAXUNPOOLING_LAYER_H
+#define ARM_COMPUTE_TEST_MAXUNPOOLING_LAYER_H
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> max_unpooling_layer(const SimpleTensor<T> &src, const PoolingLayerInfo &info, const QuantizationInfo &output_qinfo, SimpleTensor<uint32_t> &indices,
+ TensorShape output_shape, DataLayout data_layout = DataLayout::NCHW);
+
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_MAXUNPOOLING_LAYER_H */
diff --git a/tests/validation/reference/PoolingLayer.cpp b/tests/validation/reference/PoolingLayer.cpp
index 778e28d7c1..c110a67842 100644
--- a/tests/validation/reference/PoolingLayer.cpp
+++ b/tests/validation/reference/PoolingLayer.cpp
@@ -43,9 +43,10 @@ SimpleTensor<T> pooling_layer_internal(const SimpleTensor<T> &src, const Pooling
ARM_COMPUTE_ERROR_ON(info.is_global_pooling && (src.shape().x() != src.shape().y()));
// Create reference
SimpleTensor<T> dst{ compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type()), info), src.data_type(), 1 };
+ auto pooled_shape = compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type()), info);
if(indices)
{
- *indices = SimpleTensor<uint32_t> { compute_pool_shape(TensorInfo(src.shape(), 1, src.data_type()), info), DataType::U32, 1 };
+ *indices = SimpleTensor<uint32_t> { pooled_shape, DataType::U32, 1 };
}
const int pool_size_x = info.is_global_pooling ? src.shape().x() : info.pool_size.width;
const int pool_size_y = info.is_global_pooling ? src.shape().y() : info.pool_size.height;
@@ -58,56 +59,62 @@ SimpleTensor<T> pooling_layer_internal(const SimpleTensor<T> &src, const Pooling
int pad_bottom = info.pad_stride_info.pad_bottom();
bool exclude_padding = info.exclude_padding;
- const auto w_src = static_cast<int>(src.shape()[0]);
- const auto h_src = static_cast<int>(src.shape()[1]);
- const int upper_dims = src.shape().total_size() / (w_src * h_src);
+ const auto w_src = static_cast<int>(src.shape()[0]);
+ const auto h_src = static_cast<int>(src.shape()[1]);
+ const auto z_src = static_cast<int>(src.shape()[2]);
+ const auto b_src = static_cast<int>(src.shape()[3]);
+
+ const int upper_dims = src.shape().total_size() / (w_src * h_src);
+
+ const auto w_dst = static_cast<int>(dst.shape()[0]);
+ const auto h_dst = static_cast<int>(dst.shape()[1]);
+ const auto z_dst = static_cast<int>(dst.shape()[2]);
- const auto w_dst = static_cast<int>(dst.shape()[0]);
- const auto h_dst = static_cast<int>(dst.shape()[1]);
TensorShape shape_nhwc(src.shape());
permute(shape_nhwc, PermutationVector(2U, 0U, 1U));
-
if(type == PoolingType::MAX)
{
- for(int r = 0; r < upper_dims; ++r)
+ for(int b = 0; b < b_src; ++b)
{
- for(int h = 0; h < h_dst; ++h)
+ for(int r = 0; r < z_src; ++r)
{
- for(int w = 0; w < w_dst; ++w)
+ for(int h = 0; h < h_dst; ++h)
{
- int wstart = w * pool_stride_x - pad_left;
- int hstart = h * pool_stride_y - pad_top;
- int wend = std::min(wstart + pool_size_x, w_src);
- int hend = std::min(hstart + pool_size_y, h_src);
- wstart = std::max(wstart, 0);
- hstart = std::max(hstart, 0);
-
- auto max_val = std::numeric_limits<ACC_T>::lowest();
- int max_index{ 0 };
- for(int y = hstart; y < hend; ++y)
+ for(int w = 0; w < w_dst; ++w)
{
- for(int x = wstart; x < wend; ++x)
+ int wstart = w * pool_stride_x - pad_left;
+ int hstart = h * pool_stride_y - pad_top;
+ int wend = std::min(wstart + pool_size_x, w_src);
+ int hend = std::min(hstart + pool_size_y, h_src);
+ wstart = std::max(wstart, 0);
+ hstart = std::max(hstart, 0);
+ auto max_val = std::numeric_limits<ACC_T>::lowest();
+ int max_index{ 0 };
+ for(int y = hstart; y < hend; ++y)
{
- const auto val = static_cast<ACC_T>(src[r * h_src * w_src + y * w_src + x]);
- if(val > max_val)
+ for(int x = wstart; x < wend; ++x)
{
- max_val = val;
- if(data_layout == DataLayout::NCHW)
+ const auto val = static_cast<ACC_T>(src[b * z_src * h_src * w_src + r * h_src * w_src + y * w_src + x]);
+ if(val > max_val)
{
- max_index = coord2index(src.shape(), Coordinates(x, y, r));
- }
- else
- {
- max_index = coord2index(shape_nhwc, Coordinates(r, x, y));
+ max_val = val;
+ if(data_layout == DataLayout::NCHW)
+ {
+ max_index = coord2index(src.shape(), Coordinates(x, y, r, 0));
+ }
+ else
+ {
+ max_index = coord2index(shape_nhwc, Coordinates(r, x, y, 0));
+ }
}
}
}
- }
- dst[r * h_dst * w_dst + h * w_dst + w] = static_cast<T>(max_val);
- if(indices)
- {
- (*indices)[r * h_dst * w_dst + h * w_dst + w] = max_index;
+ dst[b * z_dst * h_dst * w_dst + r * h_dst * w_dst + h * w_dst + w] = static_cast<T>(max_val);
+ if(indices)
+ {
+ (*indices)[b * z_dst * h_dst * w_dst + r * h_dst * w_dst + h * w_dst + w] = max_index;
+ }
}
}
}
@@ -164,7 +171,6 @@ SimpleTensor<T> pooling_layer_internal(const SimpleTensor<T> &src, const Pooling
}
}
}
-
return dst;
}