From 9032ee32da54804806a3f26cbbf5a62b3c764f72 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Wed, 7 Aug 2019 17:04:11 +0100 Subject: MLCE-129: NEPad 30x slower than TensorFlow's implementation Change-Id: I44770e6a3134c70c4bd58f890d06cb43c9bd8bff Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/1853 Reviewed-by: Giorgio Arena Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- arm_compute/core/NEON/NEKernels.h | 1 + arm_compute/core/NEON/kernels/NEPadLayerKernel.h | 113 ++++++++++ arm_compute/runtime/NEON/functions/NEPadLayer.h | 16 +- src/core/NEON/kernels/NEPadLayerKernel.cpp | 259 +++++++++++++++++++++++ src/runtime/NEON/functions/NEPadLayer.cpp | 50 +---- tests/benchmark/NEON/PadLayer.cpp | 87 ++++++++ tests/benchmark/fixtures/PadLayerFixture.h | 109 ++++++++++ tests/validation/fixtures/PadLayerFixture.h | 1 - 8 files changed, 583 insertions(+), 53 deletions(-) create mode 100644 arm_compute/core/NEON/kernels/NEPadLayerKernel.h create mode 100644 src/core/NEON/kernels/NEPadLayerKernel.cpp create mode 100644 tests/benchmark/NEON/PadLayer.cpp create mode 100644 tests/benchmark/fixtures/PadLayerFixture.h diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h index f86a1d0507..8d8f7439a5 100644 --- a/arm_compute/core/NEON/NEKernels.h +++ b/arm_compute/core/NEON/NEKernels.h @@ -114,6 +114,7 @@ #include "arm_compute/core/NEON/kernels/NENonLinearFilterKernel.h" #include "arm_compute/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h" #include "arm_compute/core/NEON/kernels/NENormalizationLayerKernel.h" +#include "arm_compute/core/NEON/kernels/NEPadLayerKernel.h" #include "arm_compute/core/NEON/kernels/NEPermuteKernel.h" #include "arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h" #include "arm_compute/core/NEON/kernels/NEPoolingLayerKernel.h" diff --git a/arm_compute/core/NEON/kernels/NEPadLayerKernel.h b/arm_compute/core/NEON/kernels/NEPadLayerKernel.h new file mode 100644 index 0000000000..0a57b97997 --- /dev/null +++ b/arm_compute/core/NEON/kernels/NEPadLayerKernel.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_NEPADLAYERKERNEL_H__ +#define __ARM_COMPUTE_NEPADLAYERKERNEL_H__ + +#include "arm_compute/core/NEON/INEKernel.h" + +namespace arm_compute +{ +class ITensor; + +/** NEON kernel to add padding to a tensor + * + * Add padding given padding information + */ +class NEPadLayerKernel : public INEKernel +{ +public: + const char *name() const override + { + return "NEPadLayerKernel"; + } + /** Default constructor */ + NEPadLayerKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEPadLayerKernel(const NEPadLayerKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEPadLayerKernel &operator=(const NEPadLayerKernel &) = delete; + /** Allow instances of this class to be moved */ + NEPadLayerKernel(NEPadLayerKernel &&) = default; + /** Allow instances of this class to be moved */ + NEPadLayerKernel &operator=(NEPadLayerKernel &&) = default; + /** Default destructor */ + ~NEPadLayerKernel() = default; + + /** Initialize the function + * + * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. + * @param[out] output Output tensor. Data type supported: same as @p input + * @param[in] padding The padding for each spatial dimension of the input tensor. The pair padding[i] + * specifies the front and the end padding in the i-th dimension. + * @param[in] constant_value (Optional) Constant value to be used for the padding + * @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT. + * Only CONSTANT padding mode is currently supported + */ + void configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value = PixelValue(), const PaddingMode mode = PaddingMode::CONSTANT); + /** Static function to check if given info will lead to a valid configuration of @ref NEPadLayer. + * + * @param[in] input Source tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32. + * @param[in] output Output tensor info. Data type supported: same as @p input + * @param[in] padding The padding for each spatial dimension of the input tensor. The pair padding[i] + * specifies the front and the end padding in the i-th dimension. + * @param[in] constant_value (Optional) Constant value to be used for the padding + * @param[in] mode (Optional) Controls whether the padding should be filled with @p constant_value using CONSTANT. + * Only CONSTANT padding mode is currently supported + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value = PixelValue(), const PaddingMode mode = PaddingMode::CONSTANT); + + // Inherited methods overridden: + void run(const Window &window, const ThreadInfo &info) override; + +private: + /** Template function to run the padding function with constant padding + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + template + void run_pad_constant(const Window &window); + + /** Function to run the padding function with constant padding for 3D input and 1D, 2D, 3D padding + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + void run_pad_constant_uint8_3Dinput_3Dpad(const Window &window); + + /** Common signature for all the specialised permute functions + * + * @param[in] window Region on which to execute the kernel. + */ + using PadFunctionPtr = void (NEPadLayerKernel::*)(const Window &window); + + PadFunctionPtr _func; + const ITensor *_input; + ITensor *_output; + PaddingList _padding; + PixelValue _constant_value; + PaddingMode _mode; +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_NEPADLAYERKERNEL_H__ */ diff --git a/arm_compute/runtime/NEON/functions/NEPadLayer.h b/arm_compute/runtime/NEON/functions/NEPadLayer.h index 67f68b86d3..5ba951a94d 100644 --- a/arm_compute/runtime/NEON/functions/NEPadLayer.h +++ b/arm_compute/runtime/NEON/functions/NEPadLayer.h @@ -30,16 +30,21 @@ #include "arm_compute/runtime/SubTensor.h" #include "arm_compute/core/NEON/kernels/NECopyKernel.h" -#include "arm_compute/core/NEON/kernels/NEMemsetKernel.h" +#include "arm_compute/core/NEON/kernels/NEPadLayerKernel.h" #include "arm_compute/core/Types.h" #include "arm_compute/runtime/Tensor.h" namespace arm_compute { -/** Basic function to pad a tensor. This function calls the following NEON kernels: +/** Basic function to pad a tensor. This function calls the following NEON functions/kernels: + * + * - For padding mode = PaddingMode::CONSTANT: + * -# @ref NEPadLayerKernel + * - Otherwise: + * -# @ref NECopyKernel + * -# @ref NEStridedSlice + * -# @ref NEConcatenateLayer * - * -# @ref NEMemsetKernel - * -# @ref NECopyKernel */ class NEPadLayer : public IFunction { @@ -93,15 +98,14 @@ private: private: NECopyKernel _copy_kernel; + NEPadLayerKernel _pad_kernel; PaddingMode _mode; PaddingList _padding; - NEMemsetKernel _memset_kernel; uint32_t _num_dimensions; std::vector _slice_functions; std::vector _concat_functions; std::vector _slice_results; std::vector _concat_results; - SubTensor _output_subtensor; }; } // namespace arm_compute #endif /*__ARM_COMPUTE_NEPADLAYER_H__ */ diff --git a/src/core/NEON/kernels/NEPadLayerKernel.cpp b/src/core/NEON/kernels/NEPadLayerKernel.cpp new file mode 100644 index 0000000000..88a1c2ec83 --- /dev/null +++ b/src/core/NEON/kernels/NEPadLayerKernel.cpp @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/NEPadLayerKernel.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/wrapper/wrapper.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +namespace arm_compute +{ +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &paddings, const PaddingMode mode) +{ + ARM_COMPUTE_RETURN_ERROR_ON_MSG(mode != PaddingMode::CONSTANT, "Only constant padding mode is supported"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(paddings.size() > 4, "Padding list bigger than 4 dimensions"); + if(output->total_size() != 0) + { + const TensorShape expected_output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(input->tensor_shape(), paddings); + const TensorInfo expected_output_info = input->clone()->set_tensor_shape(expected_output_shape); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &expected_output_info); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + } + return Status{}; +} +} // namespace + +template +void NEPadLayerKernel::run_pad_constant(const Window &window) +{ + Window output_window{ window }; + output_window.set(Window::DimX, Window::Dimension(0, 1, 1)); + + const size_t element_size = _input->info()->element_size(); + Iterator output_it(_output, output_window); + execute_window_loop(output_window, [&](const Coordinates & id) + { + Coordinates idin{ id }; + for(size_t dim = _padding.size() - 1; dim > 0; --dim) + { + idin[dim] -= _padding[dim].first; + if(idin[dim] < 0 || static_cast(_input->info()->dimension(dim)) - 1 < idin[dim]) + { + std::fill_n(reinterpret_cast(output_it.ptr()), _output->info()->dimension(0), _constant_value.get()); + return; + } + } + T *input_it_ptr = reinterpret_cast(_input->ptr_to_element(idin)); + T *output_it_ptr = reinterpret_cast(output_it.ptr()); + std::fill_n(output_it_ptr, _padding[0].first, _constant_value.get()); + memcpy(output_it_ptr + _padding[0].first, input_it_ptr, _input->info()->dimension(0) * element_size); + std::fill_n(output_it_ptr + _padding[0].first + _input->info()->dimension(0), _padding[0].second, _constant_value.get()); + }, + output_it); +} + +void NEPadLayerKernel::run_pad_constant_uint8_3Dinput_3Dpad(const Window &window) +{ + ARM_COMPUTE_UNUSED(window); + + const size_t start_plane = window.z().start(); + const size_t end_plane = window.z().end(); + + const size_t start_plane_input = start_plane - (_padding.size() > 2 && start_plane >= _padding[2].first ? _padding[2].first : 0); + + const int output_plane_size = _output->info()->dimension(0) * _output->info()->dimension(1); + const int input_plane_size = (_input->info()->dimension(0) + _input->info()->padding().right + _input->info()->padding().left) * (_input->info()->dimension( + 1) + + _input->info()->padding().top + _input->info()->padding().bottom); + + const int pad_y_elems_top = (_padding.size() > 1 ? _padding[1].first : 0) * _output->info()->dimension(0); + const int pad_y_elems_bot = (_padding.size() > 1 ? _padding[1].second : 0) * _output->info()->dimension(0); + + const size_t jump_to_next_row_input = _input->info()->dimension(0) + _input->info()->padding().right + _input->info()->padding().left; + const size_t jump_to_next_row_output = _padding[0].first + _padding[0].second; + const size_t jump_to_next_plane_input = _input->info()->padding().empty() ? 0 : _input->info()->dimension(0) * (_input->info()->padding().right + _input->info()->padding().top); + + uint8_t *output_row_ptr = _output->buffer() + start_plane * output_plane_size; + const uint8_t *input_it_ptr = _input->buffer() + _input->info()->offset_first_element_in_bytes() + start_plane_input * input_plane_size; + const auto pad_value = _constant_value.get(); + + for(size_t z_i = start_plane; z_i < end_plane; ++z_i) + { + if(_padding.size() > 2 && z_i < _padding[2].first) + { + memset(output_row_ptr, pad_value, output_plane_size); + output_row_ptr += output_plane_size; + } + else if(_padding.size() > 2 && z_i > _input->info()->dimension(2) + _padding[2].first - 1) + { + memset(output_row_ptr, pad_value, output_plane_size); + output_row_ptr += output_plane_size; + } + else + { + memset(output_row_ptr, pad_value, pad_y_elems_top); + output_row_ptr += pad_y_elems_top; + size_t y_i = _input->info()->dimension(1); + // Basic loop unrolling + for(; y_i > 3; y_i -= 4) + { + memset(output_row_ptr, pad_value, _padding[0].first); + output_row_ptr += _padding[0].first; + + memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0)); + output_row_ptr += _input->info()->dimension(0); + input_it_ptr += jump_to_next_row_input; + + memset(output_row_ptr, pad_value, _padding[0].second + _padding[0].first); + output_row_ptr += jump_to_next_row_output; + + memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0)); + output_row_ptr += _input->info()->dimension(0); + input_it_ptr += jump_to_next_row_input; + + memset(output_row_ptr, pad_value, _padding[0].second + _padding[0].first); + output_row_ptr += jump_to_next_row_output; + + memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0)); + output_row_ptr += _input->info()->dimension(0); + input_it_ptr += jump_to_next_row_input; + + memset(output_row_ptr, pad_value, _padding[0].second + _padding[0].first); + output_row_ptr += jump_to_next_row_output; + + memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0)); + output_row_ptr += _input->info()->dimension(0); + input_it_ptr += jump_to_next_row_input; + + memset(output_row_ptr, pad_value, _padding[0].second); + output_row_ptr += _padding[0].second; + } + for(; y_i > 0; --y_i) + { + memset(output_row_ptr, pad_value, _padding[0].first); + output_row_ptr += _padding[0].first; + + memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0)); + output_row_ptr += _input->info()->dimension(0); + input_it_ptr += _input->info()->dimension(0); + + memset(output_row_ptr, pad_value, _padding[0].second); + output_row_ptr += _padding[0].second; + } + input_it_ptr += jump_to_next_plane_input; + memset(output_row_ptr, pad_value, pad_y_elems_bot); + output_row_ptr += pad_y_elems_bot; + } + } +} + +NEPadLayerKernel::NEPadLayerKernel() + : _func(), _input(nullptr), _output(nullptr), _padding(), _constant_value(), _mode() +{ +} + +void NEPadLayerKernel::configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + // Auto-init + const TensorShape expected_output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), padding); + const TensorInfo expected_output_info = input->info()->clone()->set_tensor_shape(expected_output_shape); + auto_init_if_empty(*output->info(), expected_output_info); + + // Perform validation step + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding, mode)); + + _input = input; + _output = output; + _padding = padding; + _constant_value = constant_value; + _mode = mode; + + if(_mode == PaddingMode::CONSTANT) + { + switch(_input->info()->element_size()) + { + case 1: + if(_input->info()->num_dimensions() == 3 && padding.size() <= 3) + { + _func = &NEPadLayerKernel::run_pad_constant_uint8_3Dinput_3Dpad; + } + else + { + _func = &NEPadLayerKernel::run_pad_constant; + } + break; + case 2: + _func = &NEPadLayerKernel::run_pad_constant; + break; + case 4: + _func = &NEPadLayerKernel::run_pad_constant; + break; + default: + ARM_COMPUTE_ERROR("Element size not supported"); + break; + } + } + else + { + ARM_COMPUTE_ERROR("Padding mode not supported"); + } + + // Configure kernel window + Window win = calculate_max_window(*output->info(), Steps()); + + // The NEPad doesn't need padding so update_window_and_padding() can be skipped + Coordinates coord; + coord.set_num_dimensions(output->info()->num_dimensions()); + output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape())); + + ICPPKernel::configure(win); +} + +Status NEPadLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode) +{ + ARM_COMPUTE_UNUSED(constant_value); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding, mode)); + return Status{}; +} + +void NEPadLayerKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + if(_func != nullptr) + { + (this->*_func)(window); + } +} +} // namespace arm_compute diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp index c608edfdee..cf86240cab 100644 --- a/src/runtime/NEON/functions/NEPadLayer.cpp +++ b/src/runtime/NEON/functions/NEPadLayer.cpp @@ -34,33 +34,6 @@ namespace arm_compute { namespace { -TensorInfo get_expected_output_tensorinfo(const ITensorInfo &input, const PaddingList &paddings) -{ - const TensorShape expected_output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(input.tensor_shape(), paddings); - const TensorInfo expected_output_info = input.clone()->set_tensor_shape(expected_output_shape); - return expected_output_info; -} - -Status validate_arguments(const ITensorInfo &input, ITensorInfo &output, const PaddingList &paddings) -{ - const TensorInfo expected_output_info = get_expected_output_tensorinfo(input, paddings); - auto_init_if_empty(output, expected_output_info); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&output, &expected_output_info); - - return Status{}; -} - -Coordinates get_subtensor_coords(const PaddingList &paddings) -{ - Coordinates coords; - for(unsigned int i = 0; i < paddings.size(); ++i) - { - coords.set(i, paddings[i].first); - } - - return coords; -} - uint32_t last_padding_dimension(const PaddingList &padding) { int last_padding_dim = padding.size() - 1; @@ -76,23 +49,13 @@ uint32_t last_padding_dimension(const PaddingList &padding) } // namespace NEPadLayer::NEPadLayer() - : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results(), _output_subtensor() + : _copy_kernel(), _pad_kernel(), _mode(), _padding(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results() { } void NEPadLayer::configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value) { - // Auto-init - auto_init_if_empty(*output->info(), get_expected_output_tensorinfo(*input->info(), padding)); - - // Create SubTensor (Can use sub-tensor as the kernels to be executed do not require padding) - _output_subtensor = SubTensor(output, input->info()->tensor_shape(), get_subtensor_coords(padding), true); - - // Set the pages of the output to the specified value - _memset_kernel.configure(output, constant_value); - - // Copy the input to the output - _copy_kernel.configure(input, &_output_subtensor); + _pad_kernel.configure(input, output, padding, constant_value, PaddingMode::CONSTANT); } void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *output) @@ -253,11 +216,7 @@ Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, { case PaddingMode::CONSTANT: { - auto output_clone = output->clone(); - SubTensorInfo output_subtensor_info(output_clone.get(), input->tensor_shape(), get_subtensor_coords(padding), true); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *output_clone, padding)); - ARM_COMPUTE_RETURN_ON_ERROR(NECopyKernel::validate(input, &output_subtensor_info)); - break; + return NEPadLayerKernel::validate(input, output, padding, constant_value, mode); } case PaddingMode::REFLECT: case PaddingMode::SYMMETRIC: @@ -293,8 +252,7 @@ void NEPadLayer::run() { case PaddingMode::CONSTANT: { - NEScheduler::get().schedule(&_memset_kernel, Window::DimY); - NEScheduler::get().schedule(&_copy_kernel, Window::DimY); + NEScheduler::get().schedule(&_pad_kernel, Window::DimZ); break; } case PaddingMode::REFLECT: diff --git a/tests/benchmark/NEON/PadLayer.cpp b/tests/benchmark/NEON/PadLayer.cpp new file mode 100644 index 0000000000..c55c93b8a6 --- /dev/null +++ b/tests/benchmark/NEON/PadLayer.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEPadLayer.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "tests/NEON/Accessor.h" +#include "tests/benchmark/fixtures/PadLayerFixture.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/datasets/SplitDataset.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" + +namespace arm_compute +{ +namespace test +{ +namespace benchmark +{ +namespace +{ +const auto Fssd_25_8bit_ShapesDataset = framework::dataset::make("TensorShape", +{ + TensorShape{ 320U, 320U, 3U }, + TensorShape{ 160U, 160U, 16U }, + TensorShape{ 80U, 80U, 32U }, + TensorShape{ 40U, 40U, 64U }, + TensorShape{ 20U, 20U, 128U }, + TensorShape{ 10U, 10U, 256U }, + TensorShape{ 10U, 10U, 64U }, + TensorShape{ 5U, 5U, 32U }, + TensorShape{ 3U, 3U, 32U }, + TensorShape{ 2U, 2U, 32U } +}); + +const auto PaddingSizesDataset = framework::dataset::make("PaddingSize", +{ + PaddingList{ { 1, 1 }, { 1, 1 } }, +}); +} // namespace + +TEST_SUITE(NEON) +TEST_SUITE(PadLayer) + +template +using NEPaddingFixture = PaddingFixture; + +REGISTER_FIXTURE_DATA_TEST_CASE(RunF32, NEPaddingFixture, framework::DatasetMode::ALL, + combine(combine(combine( + Fssd_25_8bit_ShapesDataset, + framework::dataset::make("DataType", { DataType::F32 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT }))); + +REGISTER_FIXTURE_DATA_TEST_CASE(RunQASYMM8, NEPaddingFixture, framework::DatasetMode::ALL, + combine(combine(combine( + Fssd_25_8bit_ShapesDataset, + framework::dataset::make("DataType", { DataType::QASYMM8 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT }))); + +TEST_SUITE_END() // PadLayer +TEST_SUITE_END() // NEON +} // namespace benchmark +} // namespace test +} // namespace arm_compute diff --git a/tests/benchmark/fixtures/PadLayerFixture.h b/tests/benchmark/fixtures/PadLayerFixture.h new file mode 100644 index 0000000000..2f482a0abf --- /dev/null +++ b/tests/benchmark/fixtures/PadLayerFixture.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_PADLAYERFIXTURE +#define ARM_COMPUTE_TEST_PADLAYERFIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "tests/Globals.h" +#include "tests/Utils.h" +#include "tests/framework/Fixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace benchmark +{ +/** Fixture that can be used for NEON and CL */ + +template +class PaddingFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape shape, DataType data_type, const PaddingList &paddings, const PaddingMode mode) + { + PaddingList clamped_padding = paddings; + if(mode != PaddingMode::CONSTANT) + { + // Clamp padding to prevent applying more than is possible. + for(uint32_t i = 0; i < paddings.size(); ++i) + { + if(mode == PaddingMode::REFLECT) + { + clamped_padding[i].first = std::min(static_cast(paddings[i].first), static_cast(shape[i] - 1)); + clamped_padding[i].second = std::min(static_cast(paddings[i].second), static_cast(shape[i] - 1)); + } + else + { + clamped_padding[i].first = std::min(static_cast(paddings[i].first), static_cast(shape[i])); + clamped_padding[i].second = std::min(static_cast(paddings[i].second), static_cast(shape[i])); + } + } + } + + const PixelValue const_value = PixelValue(static_cast(0)); + + TensorShape output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(shape, paddings); + + // Create tensors + src = create_tensor(shape, data_type); + dst = create_tensor(output_shape, data_type); + + // Create and configure function + pad_layer.configure(&src, &dst, paddings, const_value, mode); + + // Allocate tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + } + + void run() + { + pad_layer.run(); + } + + void sync() + { + sync_if_necessary(); + sync_tensor_if_necessary(dst); + } + + void teardown() + { + src.allocator()->free(); + dst.allocator()->free(); + } + +private: + TensorType src{}; + TensorType dst{}; + Function pad_layer{}; +}; +} // namespace benchmark +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_PADLAYERFIXTURE */ diff --git a/tests/validation/fixtures/PadLayerFixture.h b/tests/validation/fixtures/PadLayerFixture.h index 3538cabfeb..58ca81f0df 100644 --- a/tests/validation/fixtures/PadLayerFixture.h +++ b/tests/validation/fixtures/PadLayerFixture.h @@ -109,7 +109,6 @@ protected: // Compute function padding.run(); - return dst; } -- cgit v1.2.1