From a09de0c8b2ed0f1481502d3b023375609362d9e3 Mon Sep 17 00:00:00 2001 From: Moritz Pflanzer Date: Fri, 1 Sep 2017 20:41:12 +0100 Subject: COMPMID-415: Rename and move tests The boost validation is now "standalone" in validation_old and builds as arm_compute_validation_old. The new validation builds now as arm_compute_validation. Change-Id: Ib93ba848a25680ac60afb92b461d574a0757150d Reviewed-on: http://mpd-gerrit.cambridge.arm.com/86187 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- tests/validation/CPP/PoolingLayer.cpp | 243 ++++++++++++++++++++++++++++++++++ 1 file changed, 243 insertions(+) create mode 100644 tests/validation/CPP/PoolingLayer.cpp (limited to 'tests/validation/CPP/PoolingLayer.cpp') diff --git a/tests/validation/CPP/PoolingLayer.cpp b/tests/validation/CPP/PoolingLayer.cpp new file mode 100644 index 0000000000..c4425ca9a1 --- /dev/null +++ b/tests/validation/CPP/PoolingLayer.cpp @@ -0,0 +1,243 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "PoolingLayer.h" + +#include "tests/validation/FixedPoint.h" +#include "tests/validation/half.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +namespace +{ +TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info) +{ + TensorShape dst_shape = shape; + const std::pair scaled_dims = arm_compute::scaled_dimensions(shape.x(), + shape.y(), + info.pool_size(), + info.pool_size(), + info.pad_stride_info()); + dst_shape.set(0, scaled_dims.first); + dst_shape.set(1, scaled_dims.second); + + return dst_shape; +} +} // namespace + +template ::value, int>::type> +SimpleTensor pooling_layer(const SimpleTensor &src, PoolingLayerInfo info) +{ + const int pool_size = info.pool_size(); + PoolingType type = info.pool_type(); + int pool_stride_x = info.pad_stride_info().stride().first; + int pool_stride_y = info.pad_stride_info().stride().second; + int pad_x = info.pad_stride_info().pad().first; + int pad_y = info.pad_stride_info().pad().second; + + const auto w_src = static_cast(src.shape()[0]); + const auto h_src = static_cast(src.shape()[1]); + const int upper_dims = src.shape().total_size() / (w_src * h_src); + + // Create reference + SimpleTensor dst{ calculate_output_shape(src.shape(), info), src.data_type(), 1, src.fixed_point_position() }; + + const auto w_dst = static_cast(dst.shape()[0]); + const auto h_dst = static_cast(dst.shape()[1]); + + if(type == PoolingType::MAX) + { + for(int r = 0; r < upper_dims; ++r) + { + for(int h = 0; h < h_dst; ++h) + { + for(int w = 0; w < w_dst; ++w) + { + int wstart = w * pool_stride_x - pad_x; + int hstart = h * pool_stride_y - pad_y; + int wend = std::min(wstart + pool_size, w_src); + int hend = std::min(hstart + pool_size, h_src); + wstart = std::max(wstart, 0); + hstart = std::max(hstart, 0); + + T max_val = std::numeric_limits::lowest(); + for(int y = hstart; y < hend; ++y) + { + for(int x = wstart; x < wend; ++x) + { + const T val = src[r * h_src * w_src + y * w_src + x]; + if(val > max_val) + { + max_val = val; + } + } + } + + dst[r * h_dst * w_dst + h * w_dst + w] = max_val; + } + } + } + } + else // Average pooling + { + for(int r = 0; r < upper_dims; ++r) + { + for(int h = 0; h < h_dst; ++h) + { + for(int w = 0; w < w_dst; ++w) + { + T avg_val(0); + int wstart = w * pool_stride_x - pad_x; + int hstart = h * pool_stride_y - pad_y; + int wend = std::min(wstart + pool_size, w_src + pad_x); + int hend = std::min(hstart + pool_size, h_src + pad_y); + int pool = (hend - hstart) * (wend - wstart); + wstart = std::max(wstart, 0); + hstart = std::max(hstart, 0); + wend = std::min(wend, w_src); + hend = std::min(hend, h_src); + + for(int y = hstart; y < hend; ++y) + { + for(int x = wstart; x < wend; ++x) + { + avg_val += src[r * h_src * w_src + y * w_src + x]; + } + } + dst[r * h_dst * w_dst + h * w_dst + w] = avg_val / pool; + } + } + } + } + + return dst; +} + +template ::value, int>::type> +SimpleTensor pooling_layer(const SimpleTensor &src, PoolingLayerInfo info) +{ + const int pool_size = info.pool_size(); + PoolingType type = info.pool_type(); + int pool_stride_x = info.pad_stride_info().stride().first; + int pool_stride_y = info.pad_stride_info().stride().second; + int pad_x = info.pad_stride_info().pad().first; + int pad_y = info.pad_stride_info().pad().second; + + const auto w_src = static_cast(src.shape()[0]); + const auto h_src = static_cast(src.shape()[1]); + const int upper_dims = src.shape().total_size() / (w_src * h_src); + + // Create reference + SimpleTensor dst{ calculate_output_shape(src.shape(), info), src.data_type(), 1, src.fixed_point_position() }; + + const auto w_dst = static_cast(dst.shape()[0]); + const auto h_dst = static_cast(dst.shape()[1]); + + if(type == PoolingType::MAX) + { + for(int r = 0; r < upper_dims; ++r) + { + for(int h = 0; h < h_dst; ++h) + { + for(int w = 0; w < w_dst; ++w) + { + int wstart = w * pool_stride_x - pad_x; + int hstart = h * pool_stride_y - pad_y; + int wend = std::min(wstart + pool_size, w_src); + int hend = std::min(hstart + pool_size, h_src); + wstart = std::max(wstart, 0); + hstart = std::max(hstart, 0); + + T max_val = std::numeric_limits::lowest(); + for(int y = hstart; y < hend; ++y) + { + for(int x = wstart; x < wend; ++x) + { + const T val = src[r * h_src * w_src + y * w_src + x]; + if(val > max_val) + { + max_val = val; + } + } + } + + dst[r * h_dst * w_dst + h * w_dst + w] = max_val; + } + } + } + } + else // Average pooling + { + for(int r = 0; r < upper_dims; ++r) + { + for(int h = 0; h < h_dst; ++h) + { + for(int w = 0; w < w_dst; ++w) + { + int wstart = w * pool_stride_x - pad_x; + int hstart = h * pool_stride_y - pad_y; + int wend = std::min(wstart + pool_size, w_src + pad_x); + int hend = std::min(hstart + pool_size, h_src + pad_y); + int pool = (hend - hstart) * (wend - wstart); + wstart = std::max(wstart, 0); + hstart = std::max(hstart, 0); + wend = std::min(wend, w_src); + hend = std::min(hend, h_src); + + using namespace fixed_point_arithmetic; + + const int fixed_point_position = src.fixed_point_position(); + const fixed_point invpool_fp(1.f / static_cast(pool), fixed_point_position); + fixed_point avg_val(0, fixed_point_position, true); + + for(int y = hstart; y < hend; ++y) + { + for(int x = wstart; x < wend; ++x) + { + const fixed_point in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true); + avg_val = add(avg_val, in_fp); + } + } + dst[r * h_dst * w_dst + h * w_dst + w] = mul(avg_val, invpool_fp).raw(); + } + } + } + } + + return dst; +} + +template SimpleTensor pooling_layer(const SimpleTensor &src, PoolingLayerInfo info); +template SimpleTensor pooling_layer(const SimpleTensor &src, PoolingLayerInfo info); +template SimpleTensor pooling_layer(const SimpleTensor &src, PoolingLayerInfo info); +template SimpleTensor pooling_layer(const SimpleTensor &src, PoolingLayerInfo info); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute -- cgit v1.2.1