aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2020-07-13 21:21:33 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-07-14 14:28:46 +0000
commit4667dddc0ed403c636348294cd7f70261e5540cf (patch)
tree177b74f377dcbb32cf8a83d407c633df255665a0 /src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp
parent2232a201a9f72de483c12a7857c5f08b81cf7396 (diff)
downloadComputeLibrary-4667dddc0ed403c636348294cd7f70261e5540cf.tar.gz
COMPMID-3374: Remove memory state from NEConcatenateLayer kernels
* Allow the following kernels to accept backing memory at run-time: * NEBatchConcatenateLayerKernel * NEDepthConcatenateLayerKernel * NEHeightConcatenateLayerKernel * NEWidthConcatenateLayerKernel * Allow the following functions to accept backing memory at run-time: * NEConcatenateLayer Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: Ib0b6714cff7f06a52dc74d294bc3e0d72a1c2419 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3569 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp')
-rw-r--r--src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp39
1 files changed, 21 insertions, 18 deletions
diff --git a/src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp
index 0adf996cca..d4043e02b7 100644
--- a/src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEHeightConcatenateLayerKernel.cpp
@@ -58,24 +58,23 @@ Status validate_arguments(const ITensorInfo *input, unsigned int height_offset,
} // namespace
NEHeightConcatenateLayerKernel::NEHeightConcatenateLayerKernel()
- : _input(nullptr), _output(nullptr), _height_offset(0)
+ : _height_offset(0)
{
}
-void NEHeightConcatenateLayerKernel::configure(const ITensor *input, unsigned int height_offset, ITensor *output)
+void NEHeightConcatenateLayerKernel::configure(const ITensorInfo *input, unsigned int height_offset, ITensorInfo *output)
{
+ ARM_COMPUTE_UNUSED(input);
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), height_offset, output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, height_offset, output));
- _input = input;
- _output = output;
_height_offset = height_offset;
// Configure kernel window
- Window win = calculate_max_window(*output->info(), Steps());
+ Window win = calculate_max_window(*output, Steps());
Coordinates coord;
- coord.set_num_dimensions(output->info()->num_dimensions());
- output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
+ coord.set_num_dimensions(output->num_dimensions());
+ output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
INEKernel::configure(win);
}
@@ -85,30 +84,34 @@ Status NEHeightConcatenateLayerKernel::validate(const ITensorInfo *input, unsign
return Status{};
}
-void NEHeightConcatenateLayerKernel::run(const Window &window, const ThreadInfo &info)
+void NEHeightConcatenateLayerKernel::run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs,
+ const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+ const auto src = inputs.at(TensorType::ACL_SRC);
+ auto dst = outputs.at(TensorType::ACL_DST);
+
// Offset output pointer to the correct position
- uint8_t *output_ptr = _output->buffer() + _output->info()->offset_first_element_in_bytes() + _height_offset * _output->info()->strides_in_bytes()[Window::DimY];
+ uint8_t *output_ptr = dst->buffer() + dst->info()->offset_first_element_in_bytes() + _height_offset * dst->info()->strides_in_bytes()[Window::DimY];
const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end()) * static_cast<int>(_output->info()->element_size());
- const int window_step_x = 16;
+ const auto window_end_x = static_cast<int>(window.x().end()) * static_cast<int>(dst->info()->element_size());
+ const int window_step_x = 16;
Window win{ window };
win.set(Window::DimX, Window::Dimension(0, 1, 1));
- win.set(Window::DimY, Window::Dimension(0, _input->info()->tensor_shape().y(), 1));
+ win.set(Window::DimY, Window::Dimension(0, src->info()->tensor_shape().y(), 1));
// Create iterators
- Iterator input(_input, win);
- Iterator output(_output, win);
+ Iterator input(src, win);
+ Iterator output(dst, win);
- const DataType dt = _input->info()->data_type();
- const UniformQuantizationInfo &input_qinfo = _input->info()->quantization_info().uniform();
- const UniformQuantizationInfo &output_qinfo = _output->info()->quantization_info().uniform();
+ const DataType dt = src->info()->data_type();
+ const UniformQuantizationInfo &input_qinfo = src->info()->quantization_info().uniform();
+ const UniformQuantizationInfo &output_qinfo = dst->info()->quantization_info().uniform();
if(dt == DataType::QASYMM8 && input_qinfo != output_qinfo)
{
execute_window_loop(win, [&](const Coordinates &)