aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON/functions
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-05-01 13:03:59 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-05-07 15:35:57 +0000
commit299fdd31bd8e1add3ac557a5e630de55b1b6659c (patch)
treea2ecde5f639d10789fed84ee9cc300abf4b6e03a /src/runtime/NEON/functions
parentdc5d34319a673f6cbcd346a0c7046fb7fd0106ec (diff)
downloadComputeLibrary-299fdd31bd8e1add3ac557a5e630de55b1b6659c.tar.gz
COMPMID-2177 Fix clang warnings
Change-Id: I4beacfd714ee3ed771fd174cce5d8009a2961380 Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/1065 Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions')
-rw-r--r--src/runtime/NEON/functions/NECropResize.cpp39
-rw-r--r--src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp18
-rw-r--r--src/runtime/NEON/functions/NEGaussianPyramid.cpp52
-rw-r--r--src/runtime/NEON/functions/NEHOGMultiDetection.cpp50
-rw-r--r--src/runtime/NEON/functions/NEOpticalFlow.cpp47
5 files changed, 124 insertions, 82 deletions
diff --git a/src/runtime/NEON/functions/NECropResize.cpp b/src/runtime/NEON/functions/NECropResize.cpp
index 4360b50dfb..cc39d0284e 100644
--- a/src/runtime/NEON/functions/NECropResize.cpp
+++ b/src/runtime/NEON/functions/NECropResize.cpp
@@ -30,7 +30,7 @@
namespace arm_compute
{
NECropResize::NECropResize()
- : _output(nullptr), _num_boxes(0), _method(), _extrapolation_value(0), _crop(), _scale()
+ : _output(nullptr), _num_boxes(0), _method(), _extrapolation_value(0), _crop(), _scale(), _crop_results(), _scaled_results()
{
}
@@ -70,22 +70,31 @@ void NECropResize::configure(const ITensor *input, const ITensor *boxes, const I
// - A scale function is used to resize the cropped image to the size specified by crop_size.
// - A tensor is required to hold the final scaled image before it is copied into the 4D output
// that will hold all final cropped and scaled 3D images.
- _crop = arm_compute::support::cpp14::make_unique<NECropKernel[]>(_num_boxes);
- _crop_results = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_boxes);
- _scale = arm_compute::support::cpp14::make_unique<NEScale[]>(_num_boxes);
- _scaled_results = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_boxes);
+ _crop.reserve(_num_boxes);
+ _crop_results.reserve(_num_boxes);
+ _scaled_results.reserve(_num_boxes);
+ _scale.reserve(_num_boxes);
for(unsigned int i = 0; i < _num_boxes; ++i)
{
+ auto crop_tensor = support::cpp14::make_unique<Tensor>();
TensorInfo crop_result_info(1, DataType::F32);
crop_result_info.set_data_layout(DataLayout::NHWC);
- _crop_results[i].allocator()->init(crop_result_info);
+ crop_tensor->allocator()->init(crop_result_info);
+ auto scale_tensor = support::cpp14::make_unique<Tensor>();
TensorInfo scaled_result_info(out_shape, 1, DataType::F32);
scaled_result_info.set_data_layout(DataLayout::NHWC);
- _scaled_results[i].allocator()->init(scaled_result_info);
+ scale_tensor->allocator()->init(scaled_result_info);
- _crop[i].configure(input, boxes, box_ind, &_crop_results[i], i, _extrapolation_value);
+ auto crop_kernel = support::cpp14::make_unique<NECropKernel>();
+ auto scale_kernel = support::cpp14::make_unique<NEScale>();
+ crop_kernel->configure(input, boxes, box_ind, crop_tensor.get(), i, _extrapolation_value);
+
+ _crop.emplace_back(std::move(crop_kernel));
+ _scaled_results.emplace_back(std::move(scale_tensor));
+ _crop_results.emplace_back(std::move(crop_tensor));
+ _scale.emplace_back(std::move(scale_kernel));
}
}
@@ -97,17 +106,17 @@ void NECropResize::run()
{
// Size of the crop box in _boxes and thus the shape of _crop_results[i]
// may not be known until run-time and so the kernels cannot be configured until then.
- _crop[i].configure_output_shape();
- _crop_results[i].allocator()->allocate();
- NEScheduler::get().schedule(&_crop[i], Window::DimZ);
+ _crop[i]->configure_output_shape();
+ _crop_results[i]->allocator()->allocate();
+ NEScheduler::get().schedule(_crop[i].get(), Window::DimZ);
// Scale the cropped image.
- _scale[i].configure(&_crop_results[i], &_scaled_results[i], _method, BorderMode::CONSTANT, PixelValue(_extrapolation_value), SamplingPolicy::TOP_LEFT, false);
- _scaled_results[i].allocator()->allocate();
- _scale[i].run();
+ _scale[i]->configure(_crop_results[i].get(), _scaled_results[i].get(), _method, BorderMode::CONSTANT, PixelValue(_extrapolation_value), SamplingPolicy::TOP_LEFT, false);
+ _scaled_results[i]->allocator()->allocate();
+ _scale[i]->run();
// Copy scaled image into output.
- std::copy_n(_scaled_results[i].buffer(), _scaled_results[i].info()->total_size(), _output->ptr_to_element(Coordinates(0, 0, 0, i)));
+ std::copy_n(_scaled_results[i]->buffer(), _scaled_results[i]->info()->total_size(), _output->ptr_to_element(Coordinates(0, 0, 0, i)));
}
}
} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp
index b814bffa96..8f070a2d7d 100644
--- a/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthConcatenateLayer.cpp
@@ -45,9 +45,7 @@ NEDepthConcatenateLayer::NEDepthConcatenateLayer() // NOLINT
void NEDepthConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, ITensor *output) // NOLINT
{
- _num_inputs = inputs_vector.size();
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<NEDepthConcatenateLayerKernel[]>(_num_inputs);
- _border_handlers_vector = arm_compute::support::cpp14::make_unique<NEFillBorderKernel[]>(_num_inputs);
+ _num_inputs = inputs_vector.size();
std::vector<ITensorInfo *> inputs_vector_info;
for(unsigned int i = 0; i < _num_inputs; i++)
@@ -61,10 +59,16 @@ void NEDepthConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vec
ARM_COMPUTE_ERROR_THROW_ON(NEDepthConcatenateLayer::validate(inputs_vector_info, output->info()));
unsigned int depth_offset = 0;
+ _concat_kernels_vector.reserve(_num_inputs);
+ _border_handlers_vector.reserve(_num_inputs);
for(unsigned int i = 0; i < _num_inputs; ++i)
{
- _concat_kernels_vector[i].configure(inputs_vector.at(i), depth_offset, output);
- _border_handlers_vector[i].configure(inputs_vector.at(i), _concat_kernels_vector[i].border_size(), BorderMode::CONSTANT, PixelValue(static_cast<float>(0.f)));
+ auto concat_kernel = support::cpp14::make_unique<NEDepthConcatenateLayerKernel>();
+ auto border_kernel = support::cpp14::make_unique<NEFillBorderKernel>();
+ concat_kernel->configure(inputs_vector.at(i), depth_offset, output);
+ border_kernel->configure(inputs_vector.at(i), concat_kernel->border_size(), BorderMode::CONSTANT, PixelValue(static_cast<float>(0.f)));
+ _border_handlers_vector.emplace_back(std::move(border_kernel));
+ _concat_kernels_vector.emplace_back(std::move(concat_kernel));
depth_offset += inputs_vector.at(i)->info()->dimension(2);
}
@@ -98,7 +102,7 @@ void NEDepthConcatenateLayer::run()
{
for(unsigned i = 0; i < _num_inputs; ++i)
{
- NEScheduler::get().schedule(&_border_handlers_vector[i], Window::DimX);
- NEScheduler::get().schedule(&_concat_kernels_vector[i], Window::DimX);
+ NEScheduler::get().schedule(_border_handlers_vector[i].get(), Window::DimX);
+ NEScheduler::get().schedule(_concat_kernels_vector[i].get(), Window::DimX);
}
}
diff --git a/src/runtime/NEON/functions/NEGaussianPyramid.cpp b/src/runtime/NEON/functions/NEGaussianPyramid.cpp
index 8a85bba68b..0dbcb1234f 100644
--- a/src/runtime/NEON/functions/NEGaussianPyramid.cpp
+++ b/src/runtime/NEON/functions/NEGaussianPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -74,11 +74,6 @@ void NEGaussianPyramidHalf::configure(const ITensor *input, IPyramid *pyramid, B
if(num_levels > 1)
{
- _horizontal_border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel[]>(num_levels - 1);
- _vertical_border_handler = arm_compute::support::cpp14::make_unique<NEFillBorderKernel[]>(num_levels - 1);
- _horizontal_reduction = arm_compute::support::cpp14::make_unique<NEGaussianPyramidHorKernel[]>(num_levels - 1);
- _vertical_reduction = arm_compute::support::cpp14::make_unique<NEGaussianPyramidVertKernel[]>(num_levels - 1);
-
// Apply half scale to the X dimension of the tensor shape
TensorShape tensor_shape = pyramid->info()->tensor_shape();
tensor_shape.set(0, (pyramid->info()->width() + 1) * SCALE_PYRAMID_HALF);
@@ -86,19 +81,33 @@ void NEGaussianPyramidHalf::configure(const ITensor *input, IPyramid *pyramid, B
PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_HALF, tensor_shape, Format::S16);
_tmp.init(pyramid_info);
+ _horizontal_reduction.reserve(num_levels);
+ _vertical_reduction.reserve(num_levels);
+ _horizontal_border_handler.reserve(num_levels);
+ _vertical_border_handler.reserve(num_levels);
+
for(unsigned int i = 0; i < num_levels - 1; ++i)
{
/* Configure horizontal kernel */
- _horizontal_reduction[i].configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i));
+ auto horizontal_kernel = support::cpp14::make_unique<NEGaussianPyramidHorKernel>();
+ horizontal_kernel->configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i));
/* Configure vertical kernel */
- _vertical_reduction[i].configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1));
+ auto vertical_kernel = support::cpp14::make_unique<NEGaussianPyramidVertKernel>();
+ vertical_kernel->configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1));
/* Configure border */
- _horizontal_border_handler[i].configure(_pyramid->get_pyramid_level(i), _horizontal_reduction[i].border_size(), border_mode, PixelValue(constant_border_value));
+ auto horizontal_border_kernel = support::cpp14::make_unique<NEFillBorderKernel>();
+ horizontal_border_kernel->configure(_pyramid->get_pyramid_level(i), horizontal_kernel->border_size(), border_mode, PixelValue(constant_border_value));
/* Configure border */
- _vertical_border_handler[i].configure(_tmp.get_pyramid_level(i), _vertical_reduction[i].border_size(), border_mode, PixelValue(pixel_value_u16));
+ auto vertical_border_kernel = support::cpp14::make_unique<NEFillBorderKernel>();
+ vertical_border_kernel->configure(_tmp.get_pyramid_level(i), vertical_kernel->border_size(), border_mode, PixelValue(pixel_value_u16));
+
+ _vertical_border_handler.emplace_back(std::move(vertical_border_kernel));
+ _horizontal_border_handler.emplace_back(std::move(horizontal_border_kernel));
+ _vertical_reduction.emplace_back(std::move(vertical_kernel));
+ _horizontal_reduction.emplace_back(std::move(horizontal_kernel));
}
_tmp.allocate();
@@ -117,10 +126,10 @@ void NEGaussianPyramidHalf::run()
for(unsigned int i = 0; i < num_levels - 1; ++i)
{
- NEScheduler::get().schedule(_horizontal_border_handler.get() + i, Window::DimZ);
- NEScheduler::get().schedule(_horizontal_reduction.get() + i, Window::DimY);
- NEScheduler::get().schedule(_vertical_border_handler.get() + i, Window::DimZ);
- NEScheduler::get().schedule(_vertical_reduction.get() + i, Window::DimY);
+ NEScheduler::get().schedule(_horizontal_border_handler[i].get(), Window::DimZ);
+ NEScheduler::get().schedule(_horizontal_reduction[i].get(), Window::DimY);
+ NEScheduler::get().schedule(_vertical_border_handler[i].get(), Window::DimZ);
+ NEScheduler::get().schedule(_vertical_reduction[i].get(), Window::DimY);
}
}
@@ -147,19 +156,20 @@ void NEGaussianPyramidOrb::configure(const ITensor *input, IPyramid *pyramid, Bo
if(num_levels > 1)
{
- _gaus5x5 = arm_compute::support::cpp14::make_unique<NEGaussian5x5[]>(num_levels - 1);
- _scale_nearest = arm_compute::support::cpp14::make_unique<NEScale[]>(num_levels - 1);
-
PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_ORB, pyramid->info()->tensor_shape(), Format::U8);
_tmp.init(pyramid_info);
for(unsigned int i = 0; i < num_levels - 1; ++i)
{
/* Configure gaussian 5x5 */
- _gaus5x5[i].configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i), border_mode, constant_border_value);
+ auto gaus5x5_kernel = support::cpp14::make_unique<NEGaussian5x5>();
+ gaus5x5_kernel->configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i), border_mode, constant_border_value);
+ _gaus5x5.emplace_back(std::move(gaus5x5_kernel));
/* Configure scale */
- _scale_nearest[i].configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1), InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::UNDEFINED);
+ auto scale_kernel = support::cpp14::make_unique<NEScale>();
+ scale_kernel->configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1), InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::UNDEFINED);
+ _scale_nearest.emplace_back(std::move(scale_kernel));
}
_tmp.allocate();
@@ -178,7 +188,7 @@ void NEGaussianPyramidOrb::run()
for(unsigned int i = 0; i < num_levels - 1; ++i)
{
- _gaus5x5[i].run();
- _scale_nearest[i].run();
+ _gaus5x5[i].get()->run();
+ _scale_nearest[i].get()->run();
}
}
diff --git a/src/runtime/NEON/functions/NEHOGMultiDetection.cpp b/src/runtime/NEON/functions/NEHOGMultiDetection.cpp
index 6a6d04573c..26abc9d297 100644
--- a/src/runtime/NEON/functions/NEHOGMultiDetection.cpp
+++ b/src/runtime/NEON/functions/NEHOGMultiDetection.cpp
@@ -126,12 +126,12 @@ void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog,
_num_block_norm_kernel = input_block_norm.size(); // Number of NEHOGBlockNormalizationKernel kernels to compute
_num_hog_detect_kernel = input_hog_detect.size(); // Number of NEHOGDetector functions to compute
- _orient_bin_kernel = arm_compute::support::cpp14::make_unique<NEHOGOrientationBinningKernel[]>(_num_orient_bin_kernel);
- _block_norm_kernel = arm_compute::support::cpp14::make_unique<NEHOGBlockNormalizationKernel[]>(_num_block_norm_kernel);
- _hog_detect_kernel = arm_compute::support::cpp14::make_unique<NEHOGDetector[]>(_num_hog_detect_kernel);
+ _orient_bin_kernel.reserve(_num_orient_bin_kernel);
+ _block_norm_kernel.reserve(_num_block_norm_kernel);
+ _hog_detect_kernel.reserve(_num_hog_detect_kernel);
+ _hog_space.reserve(_num_orient_bin_kernel);
+ _hog_norm_space.reserve(_num_block_norm_kernel);
_non_maxima_kernel = arm_compute::support::cpp14::make_unique<CPPDetectionWindowNonMaximaSuppressionKernel>();
- _hog_space = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_orient_bin_kernel);
- _hog_norm_space = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_block_norm_kernel);
// Allocate tensors for magnitude and phase
TensorInfo info_mag(shape_img, Format::S16);
@@ -167,13 +167,17 @@ void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog,
// Allocate HOG space
TensorInfo info_space(shape_hog_space, num_bins, DataType::F32);
- _hog_space[i].allocator()->init(info_space);
+ auto hog_space_tensor = support::cpp14::make_unique<Tensor>();
+ hog_space_tensor->allocator()->init(info_space);
// Manage intermediate buffers
- _memory_group.manage(_hog_space.get() + i);
+ _memory_group.manage(hog_space_tensor.get());
// Initialise orientation binning kernel
- _orient_bin_kernel[i].configure(&_mag, &_phase, _hog_space.get() + i, multi_hog->model(idx_multi_hog)->info());
+ auto orient_bin_kernel = support::cpp14::make_unique<NEHOGOrientationBinningKernel>();
+ orient_bin_kernel->configure(&_mag, &_phase, hog_space_tensor.get(), multi_hog->model(idx_multi_hog)->info());
+ _orient_bin_kernel.emplace_back(std::move(orient_bin_kernel));
+ _hog_space.emplace_back(std::move(hog_space_tensor));
}
// Allocate intermediate tensors
@@ -188,19 +192,23 @@ void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog,
// Allocate normalized HOG space
TensorInfo tensor_info(*(multi_hog->model(idx_multi_hog)->info()), width, height);
- _hog_norm_space[i].allocator()->init(tensor_info);
+ auto hog_norm_space_tensor = support::cpp14::make_unique<Tensor>();
+ hog_norm_space_tensor->allocator()->init(tensor_info);
// Manage intermediate buffers
- _memory_group.manage(_hog_norm_space.get() + i);
+ _memory_group.manage(hog_norm_space_tensor.get());
// Initialize block normalization kernel
- _block_norm_kernel[i].configure(_hog_space.get() + idx_orient_bin, _hog_norm_space.get() + i, multi_hog->model(idx_multi_hog)->info());
+ auto block_norm_kernel = support::cpp14::make_unique<NEHOGBlockNormalizationKernel>();
+ block_norm_kernel->configure(_hog_space[idx_orient_bin].get(), hog_norm_space_tensor.get(), multi_hog->model(idx_multi_hog)->info());
+ _block_norm_kernel.emplace_back(std::move(block_norm_kernel));
+ _hog_norm_space.emplace_back(std::move(hog_norm_space_tensor));
}
// Allocate intermediate tensors
for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
{
- _hog_space[i].allocator()->allocate();
+ _hog_space[i].get()->allocator()->allocate();
}
// Configure HOG detector kernel
@@ -208,7 +216,9 @@ void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog,
{
const size_t idx_block_norm = input_hog_detect[i];
- _hog_detect_kernel[i].configure(_hog_norm_space.get() + idx_block_norm, multi_hog->model(i), detection_windows, detection_window_strides->at(i), threshold, i);
+ auto hog_detect_kernel = support::cpp14::make_unique<NEHOGDetector>();
+ hog_detect_kernel->configure(_hog_norm_space[idx_block_norm].get(), multi_hog->model(i), detection_windows, detection_window_strides->at(i), threshold, i);
+ _hog_detect_kernel.emplace_back(std::move(hog_detect_kernel));
}
// Configure non maxima suppression kernel
@@ -217,7 +227,7 @@ void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog,
// Allocate intermediate tensors
for(size_t i = 0; i < _num_block_norm_kernel; ++i)
{
- _hog_norm_space[i].allocator()->allocate();
+ _hog_norm_space[i]->allocator()->allocate();
}
}
@@ -234,21 +244,21 @@ void NEHOGMultiDetection::run()
_gradient_kernel.run();
// Run orientation binning kernel
- for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
+ for(auto &kernel : _orient_bin_kernel)
{
- NEScheduler::get().schedule(_orient_bin_kernel.get() + i, Window::DimY);
+ NEScheduler::get().schedule(kernel.get(), Window::DimY);
}
// Run block normalization kernel
- for(size_t i = 0; i < _num_block_norm_kernel; ++i)
+ for(auto &kernel : _block_norm_kernel)
{
- NEScheduler::get().schedule(_block_norm_kernel.get() + i, Window::DimY);
+ NEScheduler::get().schedule(kernel.get(), Window::DimY);
}
// Run HOG detector kernel
- for(size_t i = 0; i < _num_hog_detect_kernel; ++i)
+ for(auto &kernel : _hog_detect_kernel)
{
- _hog_detect_kernel[i].run();
+ kernel->run();
}
// Run non-maxima suppression kernel if enabled
diff --git a/src/runtime/NEON/functions/NEOpticalFlow.cpp b/src/runtime/NEON/functions/NEOpticalFlow.cpp
index db77629ef2..0df01c677d 100644
--- a/src/runtime/NEON/functions/NEOpticalFlow.cpp
+++ b/src/runtime/NEON/functions/NEOpticalFlow.cpp
@@ -74,10 +74,10 @@ void NEOpticalFlow::configure(const Pyramid *old_pyramid, const Pyramid *new_pyr
const float pyr_scale = old_pyramid->info()->scale();
- _func_scharr = arm_compute::support::cpp14::make_unique<NEScharr3x3[]>(_num_levels);
- _kernel_tracker = arm_compute::support::cpp14::make_unique<NELKTrackerKernel[]>(_num_levels);
- _scharr_gx = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_levels);
- _scharr_gy = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_levels);
+ _func_scharr.reserve(_num_levels);
+ _kernel_tracker.reserve(_num_levels);
+ _scharr_gx.reserve(_num_levels);
+ _scharr_gy.reserve(_num_levels);
_old_points_internal = LKInternalKeypointArray(old_points->num_values());
_new_points_internal = LKInternalKeypointArray(old_points->num_values());
@@ -95,25 +95,34 @@ void NEOpticalFlow::configure(const Pyramid *old_pyramid, const Pyramid *new_pyr
TensorInfo tensor_info(TensorShape(width_ith, height_ith), Format::S16);
- _scharr_gx[i].allocator()->init(tensor_info);
- _scharr_gy[i].allocator()->init(tensor_info);
+ auto scharr_gx = support::cpp14::make_unique<Tensor>();
+ auto scharr_gy = support::cpp14::make_unique<Tensor>();
+ scharr_gx->allocator()->init(tensor_info);
+ scharr_gy->allocator()->init(tensor_info);
// Manage intermediate buffers
- _memory_group.manage(_scharr_gx.get() + i);
- _memory_group.manage(_scharr_gy.get() + i);
+ _memory_group.manage(scharr_gx.get());
+ _memory_group.manage(scharr_gy.get());
// Init Scharr kernel
- _func_scharr[i].configure(old_ith_input, _scharr_gx.get() + i, _scharr_gy.get() + i, border_mode, constant_border_value);
+ auto func_scharr = support::cpp14::make_unique<NEScharr3x3>();
+ func_scharr->configure(old_ith_input, scharr_gx.get(), scharr_gy.get(), border_mode, constant_border_value);
// Init Lucas-Kanade kernel
- _kernel_tracker[i].configure(old_ith_input, new_ith_input, _scharr_gx.get() + i, _scharr_gy.get() + i,
- old_points, new_points_estimates, new_points,
- &_old_points_internal, &_new_points_internal,
- termination, use_initial_estimate, epsilon, num_iterations, window_dimension,
- i, _num_levels, pyr_scale);
-
- _scharr_gx[i].allocator()->allocate();
- _scharr_gy[i].allocator()->allocate();
+ auto kernel_tracker = support::cpp14::make_unique<NELKTrackerKernel>();
+ kernel_tracker->configure(old_ith_input, new_ith_input, scharr_gx.get(), scharr_gy.get(),
+ old_points, new_points_estimates, new_points,
+ &_old_points_internal, &_new_points_internal,
+ termination, use_initial_estimate, epsilon, num_iterations, window_dimension,
+ i, _num_levels, pyr_scale);
+
+ scharr_gx->allocator()->allocate();
+ scharr_gy->allocator()->allocate();
+
+ _func_scharr.emplace_back(std::move(func_scharr));
+ _kernel_tracker.emplace_back(std::move(kernel_tracker));
+ _scharr_gx.emplace_back(std::move(scharr_gx));
+ _scharr_gy.emplace_back(std::move(scharr_gy));
}
}
@@ -126,9 +135,9 @@ void NEOpticalFlow::run()
for(unsigned int level = _num_levels; level > 0; --level)
{
// Run Scharr kernel
- _func_scharr[level - 1].run();
+ _func_scharr[level - 1].get()->run();
// Run Lucas-Kanade kernel
- NEScheduler::get().schedule(_kernel_tracker.get() + level - 1, Window::DimX);
+ NEScheduler::get().schedule(_kernel_tracker[level - 1].get(), Window::DimX);
}
}