aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON/functions/NEPadLayer.cpp
diff options
context:
space:
mode:
authorUsama Arif <usama.arif@arm.com>2019-03-14 15:36:54 +0000
committerPablo Marquez <pablo.tello@arm.com>2019-03-27 09:22:04 +0000
commit8cf8c1123440c2002ee108d1949529bf21eac944 (patch)
treecc61d9ed5ee805c4356b8497b2e81f67b194b36a /src/runtime/NEON/functions/NEPadLayer.cpp
parentadc2186c06ca27f368dfe6ceadce449551259efc (diff)
downloadComputeLibrary-8cf8c1123440c2002ee108d1949529bf21eac944.tar.gz
COMPMID-1944 Add support for "reflect" padding mode in NEPad
Change-Id: I56c42524497d37d44708648571fa211ac1afbd98 Signed-off-by: Usama Arif <usama.arif@arm.com> Reviewed-on: https://review.mlplatform.org/c/885 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions/NEPadLayer.cpp')
-rw-r--r--src/runtime/NEON/functions/NEPadLayer.cpp245
1 files changed, 231 insertions, 14 deletions
diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp
index f5c2718cec..62a7d4559b 100644
--- a/src/runtime/NEON/functions/NEPadLayer.cpp
+++ b/src/runtime/NEON/functions/NEPadLayer.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
@@ -61,18 +60,29 @@ Coordinates get_subtensor_coords(const PaddingList &paddings)
return coords;
}
+
+uint32_t last_padding_dimension(const PaddingList &padding)
+{
+ int last_padding_dim = padding.size() - 1;
+ for(; last_padding_dim >= 0; --last_padding_dim)
+ {
+ if(padding[last_padding_dim].first > 0 || padding[last_padding_dim].second > 0)
+ {
+ break;
+ }
+ }
+ return static_cast<uint32_t>(last_padding_dim);
+}
} // namespace
NEPadLayer::NEPadLayer()
- : _memset_kernel(), _copy_kernel(), _output_subtensor()
+ : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(nullptr), _concat_functions(nullptr), _slice_results(nullptr), _concat_results(nullptr),
+ _output_subtensor()
{
}
-void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &padding, PixelValue constant_value)
+void NEPadLayer::configure_constant_mode(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_THROW_ON_ERROR(NEPadLayer::validate(input->info(), output->info(), padding, constant_value));
-
// Auto-init
auto_init_if_empty(*output->info(), get_expected_output_tensorinfo(*input->info(), padding));
@@ -86,23 +96,230 @@ void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &p
_copy_kernel.configure(input, &_output_subtensor);
}
-Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value)
+void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *output)
+{
+ // Reflecting can be performed by effectively unfolding the input as follows:
+ // For each dimension starting at DimX:
+ // For before and after:
+ // Use strided slice to extract and reverse the part of the
+ // input / previously produced tensor required for the padding.
+ // Concatenate the before and after padding with the input / previously
+ // produced tensor along the current dimension.
+
+ // Two strided slice functions will be required for each dimension padded as well as a
+ // concatenate function and the tensors to hold the temporary results.
+ _slice_functions = arm_compute::support::cpp14::make_unique<NEStridedSlice[]>(2 * _num_dimensions);
+ _slice_results = arm_compute::support::cpp14::make_unique<Tensor[]>(2 * _num_dimensions);
+ _concat_functions = arm_compute::support::cpp14::make_unique<NEConcatenateLayer[]>(_num_dimensions);
+ _concat_results = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_dimensions - 1);
+ Coordinates starts_before, ends_before, starts_after, ends_after, strides;
+ ITensor *prev = input;
+ for(uint32_t i = 0; i < _num_dimensions; ++i)
+ {
+ // Values in strides from the previous dimensions need to be set to 1 to avoid reversing again.
+ if(i > 0)
+ {
+ strides.set(i - 1, 1);
+ }
+
+ if(_padding[i].first > 0 || _padding[i].second > 0)
+ {
+ // Set the starts, ends, and strides values for the current dimension.
+ // Due to the bit masks passed to strided slice, the values below the current dimension in
+ // starts and ends will be ignored so do not need to be modified.
+ if(_mode == PaddingMode::REFLECT)
+ {
+ starts_before.set(i, _padding[i].first);
+ ends_before.set(i, 0);
+ starts_after.set(i, input->info()->dimension(i) - 2);
+ ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 2);
+ strides.set(i, -1);
+ }
+ else
+ {
+ starts_before.set(i, _padding[i].first - 1);
+ ends_before.set(i, -1);
+ starts_after.set(i, input->info()->dimension(i) - 1);
+ ends_after.set(i, input->info()->dimension(i) - _padding[i].second - 1);
+ strides.set(i, -1);
+ }
+
+ // Strided slice wraps negative indexes around to the end of the range,
+ // instead this should indicate use of the full range and so the bit mask will be modified.
+ const int32_t begin_mask_before = starts_before[i] < 0 ? ~0 : ~(1u << i);
+ const int32_t end_mask_before = ends_before[i] < 0 ? ~0 : ~(1u << i);
+ const int32_t begin_mask_after = starts_after[i] < 0 ? ~0 : ~(1u << i);
+ const int32_t end_mask_after = ends_after[i] < 0 ? ~0 : ~(1u << i);
+
+ // Reflect the input values for the padding before and after the input.
+ std::vector<ITensor *> concat_vector;
+ if(_padding[i].first > 0)
+ {
+ if(i < prev->info()->num_dimensions())
+ {
+ _slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before);
+ concat_vector.push_back(&_slice_results[2 * i]);
+ }
+ else
+ {
+ // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
+ concat_vector.push_back(prev);
+ }
+ }
+ concat_vector.push_back(prev);
+ if(_padding[i].second > 0)
+ {
+ if(i < prev->info()->num_dimensions())
+ {
+ _slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after);
+ concat_vector.push_back(&_slice_results[2 * i + 1]);
+ }
+ else
+ {
+ // Performing the slice is unnecessary if the result would simply be a copy of the tensor.
+ concat_vector.push_back(prev);
+ }
+ }
+ // Concatenate the padding before and after with the input.
+ ITensor *out = (i == _num_dimensions - 1) ? output : &_concat_results[i];
+ _concat_functions[i].configure(concat_vector, out, get_index_data_layout_dimension(input->info()->data_layout(), i));
+ if(i != _num_dimensions - 1)
+ {
+ _concat_results[i].allocator()->allocate();
+ }
+ prev = out;
+ }
+ _slice_results[2 * i].allocator()->allocate();
+ _slice_results[2 * i + 1].allocator()->allocate();
+ }
+}
+
+void NEPadLayer::configure(ITensor *input, ITensor *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
+{
+ ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), padding, constant_value, mode));
+
+ _padding = padding;
+ _mode = mode;
+
+ const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), _padding);
+
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(padded_shape));
+
+ // Find the last dimension requiring padding so that it is known when to write to output and whether any padding is applied.
+ _num_dimensions = last_padding_dimension(padding) + 1;
+ if(_num_dimensions > 0)
+ {
+ switch(_mode)
+ {
+ case PaddingMode::CONSTANT:
+ {
+ configure_constant_mode(input, output, padding, constant_value);
+ break;
+ }
+ case PaddingMode::REFLECT:
+ case PaddingMode::SYMMETRIC:
+ {
+ configure_reflect_symmetric_mode(input, output);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Padding mode not supported.");
+ }
+ }
+ else
+ {
+ // Copy the input to the whole output if no padding is applied
+ _copy_kernel.configure(input, output);
+ }
+}
+
+Status NEPadLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, const PixelValue constant_value, const PaddingMode mode)
{
ARM_COMPUTE_UNUSED(constant_value);
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- auto output_clone = output->clone();
+ const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding);
- SubTensorInfo output_subtensor_info(output_clone.get(), input->tensor_shape(), get_subtensor_coords(padding), true);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *output_clone, padding));
- ARM_COMPUTE_RETURN_ON_ERROR(NECopyKernel::validate(input, &output_subtensor_info));
+ if(output->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), padded_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+ switch(mode)
+ {
+ case PaddingMode::CONSTANT:
+ {
+ auto output_clone = output->clone();
+ SubTensorInfo output_subtensor_info(output_clone.get(), input->tensor_shape(), get_subtensor_coords(padding), true);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input, *output_clone, padding));
+ ARM_COMPUTE_RETURN_ON_ERROR(NECopyKernel::validate(input, &output_subtensor_info));
+ break;
+ }
+ case PaddingMode::REFLECT:
+ case PaddingMode::SYMMETRIC:
+ {
+ for(uint32_t i = 0; i < padding.size(); ++i)
+ {
+ if(mode == PaddingMode::REFLECT)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first >= input->dimension(i));
+ ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second >= input->dimension(i));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(padding[i].first > input->dimension(i));
+ ARM_COMPUTE_RETURN_ERROR_ON(padding[i].second > input->dimension(i));
+ }
+ }
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Invalid mode");
+ }
+ }
return Status{};
}
void NEPadLayer::run()
{
- NEScheduler::get().schedule(&_memset_kernel, Window::DimY);
- NEScheduler::get().schedule(&_copy_kernel, Window::DimY);
+ if(_num_dimensions > 0)
+ {
+ switch(_mode)
+ {
+ case PaddingMode::CONSTANT:
+ {
+ NEScheduler::get().schedule(&_memset_kernel, Window::DimY);
+ NEScheduler::get().schedule(&_copy_kernel, Window::DimY);
+ break;
+ }
+ case PaddingMode::REFLECT:
+ case PaddingMode::SYMMETRIC:
+ {
+ for(uint32_t i = 0; i < _num_dimensions; ++i)
+ {
+ if(_padding[i].first > 0 || _padding[i].second > 0)
+ {
+ if(_padding[i].first > 0 && _slice_results[2 * i].info()->total_size() > 0)
+ {
+ _slice_functions[2 * i].run();
+ }
+ if(_padding[i].second > 0 && _slice_results[2 * i + 1].info()->total_size() > 0)
+ {
+ _slice_functions[2 * i + 1].run();
+ }
+ _concat_functions[i].run();
+ }
+ }
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Padding mode not supported.");
+ }
+ }
+ else
+ {
+ NEScheduler::get().schedule(&_copy_kernel, Window::DimY);
+ }
}
} // namespace arm_compute