aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-08-07 17:04:11 +0100
committerManuel Bottini <manuel.bottini@arm.com>2019-09-09 15:16:51 +0000
commit9032ee32da54804806a3f26cbbf5a62b3c764f72 (patch)
tree6264e3def00f2d044b7c28e5159fe8bedb50653d /src
parentffd31defdb84d4ca1e24e9248d628c0075767302 (diff)
downloadComputeLibrary-9032ee32da54804806a3f26cbbf5a62b3c764f72.tar.gz
MLCE-129: NEPad 30x slower than TensorFlow's implementation
Change-Id: I44770e6a3134c70c4bd58f890d06cb43c9bd8bff Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1853 Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/NEON/kernels/NEPadLayerKernel.cpp259
-rw-r--r--src/runtime/NEON/functions/NEPadLayer.cpp50
2 files changed, 263 insertions, 46 deletions
diff --git a/src/core/NEON/kernels/NEPadLayerKernel.cpp b/src/core/NEON/kernels/NEPadLayerKernel.cpp
new file mode 100644
index 0000000000..88a1c2ec83
--- /dev/null
+++ b/src/core/NEON/kernels/NEPadLayerKernel.cpp
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEPadLayerKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/wrapper/wrapper.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+namespace arm_compute
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &paddings, const PaddingMode mode)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(mode != PaddingMode::CONSTANT, "Only constant padding mode is supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(paddings.size() > 4, "Padding list bigger than 4 dimensions");
+ if(output->total_size() != 0)
+ {
+ const TensorShape expected_output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(input->tensor_shape(), paddings);
+ const TensorInfo expected_output_info = input->clone()->set_tensor_shape(expected_output_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &expected_output_info);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+ return Status{};
+}
+} // namespace
+
+template <typename T>
+void NEPadLayerKernel::run_pad_constant(const Window &window)
+{
+ Window output_window{ window };
+ output_window.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ const size_t element_size = _input->info()->element_size();
+ Iterator output_it(_output, output_window);
+ execute_window_loop(output_window, [&](const Coordinates & id)
+ {
+ Coordinates idin{ id };
+ for(size_t dim = _padding.size() - 1; dim > 0; --dim)
+ {
+ idin[dim] -= _padding[dim].first;
+ if(idin[dim] < 0 || static_cast<int>(_input->info()->dimension(dim)) - 1 < idin[dim])
+ {
+ std::fill_n(reinterpret_cast<T *>(output_it.ptr()), _output->info()->dimension(0), _constant_value.get<T>());
+ return;
+ }
+ }
+ T *input_it_ptr = reinterpret_cast<T *>(_input->ptr_to_element(idin));
+ T *output_it_ptr = reinterpret_cast<T *>(output_it.ptr());
+ std::fill_n(output_it_ptr, _padding[0].first, _constant_value.get<T>());
+ memcpy(output_it_ptr + _padding[0].first, input_it_ptr, _input->info()->dimension(0) * element_size);
+ std::fill_n(output_it_ptr + _padding[0].first + _input->info()->dimension(0), _padding[0].second, _constant_value.get<T>());
+ },
+ output_it);
+}
+
+void NEPadLayerKernel::run_pad_constant_uint8_3Dinput_3Dpad(const Window &window)
+{
+ ARM_COMPUTE_UNUSED(window);
+
+ const size_t start_plane = window.z().start();
+ const size_t end_plane = window.z().end();
+
+ const size_t start_plane_input = start_plane - (_padding.size() > 2 && start_plane >= _padding[2].first ? _padding[2].first : 0);
+
+ const int output_plane_size = _output->info()->dimension(0) * _output->info()->dimension(1);
+ const int input_plane_size = (_input->info()->dimension(0) + _input->info()->padding().right + _input->info()->padding().left) * (_input->info()->dimension(
+ 1)
+ + _input->info()->padding().top + _input->info()->padding().bottom);
+
+ const int pad_y_elems_top = (_padding.size() > 1 ? _padding[1].first : 0) * _output->info()->dimension(0);
+ const int pad_y_elems_bot = (_padding.size() > 1 ? _padding[1].second : 0) * _output->info()->dimension(0);
+
+ const size_t jump_to_next_row_input = _input->info()->dimension(0) + _input->info()->padding().right + _input->info()->padding().left;
+ const size_t jump_to_next_row_output = _padding[0].first + _padding[0].second;
+ const size_t jump_to_next_plane_input = _input->info()->padding().empty() ? 0 : _input->info()->dimension(0) * (_input->info()->padding().right + _input->info()->padding().top);
+
+ uint8_t *output_row_ptr = _output->buffer() + start_plane * output_plane_size;
+ const uint8_t *input_it_ptr = _input->buffer() + _input->info()->offset_first_element_in_bytes() + start_plane_input * input_plane_size;
+ const auto pad_value = _constant_value.get<uint8_t>();
+
+ for(size_t z_i = start_plane; z_i < end_plane; ++z_i)
+ {
+ if(_padding.size() > 2 && z_i < _padding[2].first)
+ {
+ memset(output_row_ptr, pad_value, output_plane_size);
+ output_row_ptr += output_plane_size;
+ }
+ else if(_padding.size() > 2 && z_i > _input->info()->dimension(2) + _padding[2].first - 1)
+ {
+ memset(output_row_ptr, pad_value, output_plane_size);
+ output_row_ptr += output_plane_size;
+ }
+ else
+ {
+ memset(output_row_ptr, pad_value, pad_y_elems_top);
+ output_row_ptr += pad_y_elems_top;
+ size_t y_i = _input->info()->dimension(1);
+ // Basic loop unrolling
+ for(; y_i > 3; y_i -= 4)
+ {
+ memset(output_row_ptr, pad_value, _padding[0].first);
+ output_row_ptr += _padding[0].first;
+
+ memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
+ output_row_ptr += _input->info()->dimension(0);
+ input_it_ptr += jump_to_next_row_input;
+
+ memset(output_row_ptr, pad_value, _padding[0].second + _padding[0].first);
+ output_row_ptr += jump_to_next_row_output;
+
+ memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
+ output_row_ptr += _input->info()->dimension(0);
+ input_it_ptr += jump_to_next_row_input;
+
+ memset(output_row_ptr, pad_value, _padding[0].second + _padding[0].first);
+ output_row_ptr += jump_to_next_row_output;
+
+ memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
+ output_row_ptr += _input->info()->dimension(0);
+ input_it_ptr += jump_to_next_row_input;
+
+ memset(output_row_ptr, pad_value, _padding[0].second + _padding[0].first);
+ output_row_ptr += jump_to_next_row_output;
+
+ memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
+ output_row_ptr += _input->info()->dimension(0);
+ input_it_ptr += jump_to_next_row_input;
+
+ memset(output_row_ptr, pad_value, _padding[0].second);
+ output_row_ptr += _padding[0].second;
+ }
+ for(; y_i > 0; --y_i)
+ {
+ memset(output_row_ptr, pad_value, _padding[0].first);
+ output_row_ptr += _padding[0].first;
+
+ memcpy(output_row_ptr, input_it_ptr, _input->info()->dimension(0));
+ output_row_ptr += _input->info()->dimension(0);
+ input_it_ptr += _input->info()->dimension(0);
+
+ memset(output_row_ptr, pad_value, _padding[0].second);
+ output_row_ptr += _padding[0].second;
+ }
+ input_it_ptr += jump_to_next_plane_input;
+ memset(output_row_ptr, pad_value, pad_y_elems_bot);
+ output_row_ptr += pad_y_elems_bot;
+ }
+ }
+}
+
+NEPadLayerKernel::NEPadLayerKernel()
+ : _func(), _input(nullptr), _output(nullptr), _padding(), _constant_value(), _mode()
+{
+}
+
+void NEPadLayerKernel::configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ // Auto-init
+ const TensorShape expected_output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), padding);
+ const TensorInfo expected_output_info = input->info()->clone()->set_tensor_shape(expected_output_shape);
+ auto_init_if_empty(*output->info(), expected_output_info);
+
+ // Perform validation step
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding, mode));
+
+ _input = input;
+ _output = output;
+ _padding = padding;
+ _constant_value = constant_value;
+ _mode = mode;
+
+ if(_mode == PaddingMode::CONSTANT)
+ {
+ switch(_input->info()->element_size())
+ {
+ case 1:
+ if(_input->info()->num_dimensions() == 3 && padding.size() <= 3)
+ {
+ _func = &NEPadLayerKernel::run_pad_constant_uint8_3Dinput_3Dpad;
+ }
+ else
+ {
+ _func = &NEPadLayerKernel::run_pad_constant<uint8_t>;
+ }
+ break;
+ case 2:
+ _func = &NEPadLayerKernel::run_pad_constant<uint16_t>;
+ break;
+ case 4:
+ _func = &NEPadLayerKernel::run_pad_constant<uint32_t>;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Element size not supported");
+ break;
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Padding mode not supported");
+ }
+
+ // Configure kernel window
+ Window win = calculate_max_window(*output->info(), Steps());
+
+ // The NEPad doesn't need padding so update_window_and_padding() can be skipped
+ Coordinates coord;
+ coord.set_num_dimensions(output->info()->num_dimensions());
+ output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
+
+ ICPPKernel::configure(win);
+}
+
+Status NEPadLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
+{
+ ARM_COMPUTE_UNUSED(constant_value);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding, mode));
+ return Status{};
+}
+
+void NEPadLayerKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+ if(_func != nullptr)
+ {
+ (this->*_func)(window);
+ }
+}
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp
index c608edfdee..cf86240cab 100644
--- a/src/runtime/NEON/functions/NEPadLayer.cpp
+++ b/src/runtime/NEON/functions/NEPadLayer.cpp
@@ -34,33 +34,6 @@ namespace arm_compute
{
namespace
{
-TensorInfo get_expected_output_tensorinfo(const ITensorInfo &input, const PaddingList &paddings)
-{
- const TensorShape expected_output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(input.tensor_shape(), paddings);
- const TensorInfo expected_output_info = input.clone()->set_tensor_shape(expected_output_shape);
- return expected_output_info;
-}
-
-Status validate_arguments(const ITensorInfo &input, ITensorInfo &output, const PaddingList &paddings)
-{
- const TensorInfo expected_output_info = get_expected_output_tensorinfo(input, paddings);
- auto_init_if_empty(output, expected_output_info);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&output, &expected_output_info);
-
- return Status{};
-}
-
-Coordinates get_subtensor_coords(const PaddingList &paddings)
-{
- Coordinates coords;
- for(unsigned int i = 0; i < paddings.size(); ++i)
- {
- coords.set(i, paddings[i].first);
- }
-
- return coords;
-}
-
uint32_t last_padding_dimension(const PaddingList &padding)
{
int last_padding_dim = padding.size() - 1;
@@ -76,23 +49,13 @@ uint32_t last_padding_dimension(const PaddingList &padding)
} // namespace
NEPadLayer::NEPadLayer()
- : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results(), _output_subtensor()
+ : _copy_kernel(), _pad_kernel(), _mode(), _padding(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results()
{
}
void NEPadLayer::configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value)
{
- // Auto-init
- auto_init_if_empty(*output->info(), get_expected_output_tensorinfo(*input->info(), padding));
-
- // Create SubTensor (Can use sub-tensor as the kernels to be executed do not require padding)
- _output_subtensor = SubTensor(output, input->info()->tensor_shape(), get_subtensor_coords(padding), true);
-
- // Set the pages of the output to the specified value
- _memset_kernel.configure(output, constant_value);
-
- // Copy the input to the output
- _copy_kernel.configure(input, &_output_subtensor);
+ _pad_kernel.configure(input, output, padding, constant_value, PaddingMode::CONSTANT);
}
void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *output)
@@ -253,11 +216,7 @@ Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output,
{
case PaddingMode::CONSTANT:
{
- auto output_clone = output->clone();
- SubTensorInfo output_subtensor_info(output_clone.get(), input->tensor_shape(), get_subtensor_coords(padding), true);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *output_clone, padding));
- ARM_COMPUTE_RETURN_ON_ERROR(NECopyKernel::validate(input, &output_subtensor_info));
- break;
+ return NEPadLayerKernel::validate(input, output, padding, constant_value, mode);
}
case PaddingMode::REFLECT:
case PaddingMode::SYMMETRIC:
@@ -293,8 +252,7 @@ void NEPadLayer::run()
{
case PaddingMode::CONSTANT:
{
- NEScheduler::get().schedule(&_memset_kernel, Window::DimY);
- NEScheduler::get().schedule(&_copy_kernel, Window::DimY);
+ NEScheduler::get().schedule(&_pad_kernel, Window::DimZ);
break;
}
case PaddingMode::REFLECT: