From 725b173d620726015cfebfd28356c1c1fa6e80b9 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Mon, 20 May 2019 19:40:47 +0100 Subject: COMPMID-2214: Remove std::vector> where possible. Change-Id: I6569aa64a4976966445ed7646129c36fe4654cd9 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/1190 Comments-Addressed: Arm Jenkins Reviewed-by: Michalis Spyrou Tested-by: Arm Jenkins --- .../runtime/NEON/functions/NEGaussianPyramid.h | 14 ++--- .../runtime/NEON/functions/NEHOGMultiDetection.h | 33 ++++++----- arm_compute/runtime/NEON/functions/NEOpticalFlow.h | 24 ++++---- src/runtime/NEON/functions/NEGaussianPyramid.cpp | 64 +++++++++++----------- src/runtime/NEON/functions/NEHOGMultiDetection.cpp | 56 +++++++++---------- src/runtime/NEON/functions/NEOpticalFlow.cpp | 52 ++++++++---------- 6 files changed, 117 insertions(+), 126 deletions(-) diff --git a/arm_compute/runtime/NEON/functions/NEGaussianPyramid.h b/arm_compute/runtime/NEON/functions/NEGaussianPyramid.h index 47fcd5e60f..f675d7299d 100644 --- a/arm_compute/runtime/NEON/functions/NEGaussianPyramid.h +++ b/arm_compute/runtime/NEON/functions/NEGaussianPyramid.h @@ -91,10 +91,10 @@ public: void run() override; private: - std::vector> _horizontal_border_handler; - std::vector> _vertical_border_handler; - std::vector> _horizontal_reduction; - std::vector> _vertical_reduction; + std::vector _horizontal_border_handler; + std::vector _vertical_border_handler; + std::vector _horizontal_reduction; + std::vector _vertical_reduction; }; /** Basic function to execute gaussian pyramid with ORB scale factor. This function calls the following NEON kernels and functions: @@ -115,8 +115,8 @@ public: void run() override; private: - std::vector> _gaus5x5; - std::vector> _scale_nearest; + std::vector _gaus5x5; + std::vector _scale_nearest; }; -} +} // namespace arm_compute #endif /*__ARM_COMPUTE_NEGAUSSIANPYRAMID_H__ */ diff --git a/arm_compute/runtime/NEON/functions/NEHOGMultiDetection.h b/arm_compute/runtime/NEON/functions/NEHOGMultiDetection.h index e21f4639c1..a1907fc762 100644 --- a/arm_compute/runtime/NEON/functions/NEHOGMultiDetection.h +++ b/arm_compute/runtime/NEON/functions/NEHOGMultiDetection.h @@ -89,22 +89,21 @@ public: void run() override; private: - MemoryGroup _memory_group; - NEHOGGradient _gradient_kernel; - std::vector> _orient_bin_kernel; - std::vector> _block_norm_kernel; - std::vector> _hog_detect_kernel; - std::unique_ptr _non_maxima_kernel; - std::vector> _hog_space; - std::vector> _hog_norm_space; - IDetectionWindowArray *_detection_windows; - Tensor _mag; - Tensor _phase; - bool _non_maxima_suppression; - size_t _num_orient_bin_kernel; - size_t _num_block_norm_kernel; - size_t _num_hog_detect_kernel; + MemoryGroup _memory_group; + NEHOGGradient _gradient_kernel; + std::vector _orient_bin_kernel; + std::vector _block_norm_kernel; + std::vector _hog_detect_kernel; + CPPDetectionWindowNonMaximaSuppressionKernel _non_maxima_kernel; + std::vector _hog_space; + std::vector _hog_norm_space; + IDetectionWindowArray *_detection_windows; + Tensor _mag; + Tensor _phase; + bool _non_maxima_suppression; + size_t _num_orient_bin_kernel; + size_t _num_block_norm_kernel; + size_t _num_hog_detect_kernel; }; -} - +} // namespace arm_compute #endif /* __ARM_COMPUTE_NEHOGMULTIDETECTION_H__ */ diff --git a/arm_compute/runtime/NEON/functions/NEOpticalFlow.h b/arm_compute/runtime/NEON/functions/NEOpticalFlow.h index 320247d260..7480b085ae 100644 --- a/arm_compute/runtime/NEON/functions/NEOpticalFlow.h +++ b/arm_compute/runtime/NEON/functions/NEOpticalFlow.h @@ -86,17 +86,17 @@ public: void run() override; private: - MemoryGroup _memory_group; - std::vector> _func_scharr; - std::vector> _kernel_tracker; - std::vector> _scharr_gx; - std::vector> _scharr_gy; - IKeyPointArray *_new_points; - const IKeyPointArray *_new_points_estimates; - const IKeyPointArray *_old_points; - LKInternalKeypointArray _new_points_internal; - LKInternalKeypointArray _old_points_internal; - unsigned int _num_levels; + MemoryGroup _memory_group; + std::vector _func_scharr; + std::vector _kernel_tracker; + std::vector _scharr_gx; + std::vector _scharr_gy; + IKeyPointArray *_new_points; + const IKeyPointArray *_new_points_estimates; + const IKeyPointArray *_old_points; + LKInternalKeypointArray _new_points_internal; + LKInternalKeypointArray _old_points_internal; + unsigned int _num_levels; }; -} +} // namespace arm_compute #endif /*__ARM_COMPUTE_NEOPTICALFLOW_H__ */ diff --git a/src/runtime/NEON/functions/NEGaussianPyramid.cpp b/src/runtime/NEON/functions/NEGaussianPyramid.cpp index 0dbcb1234f..fbf8812cdd 100644 --- a/src/runtime/NEON/functions/NEGaussianPyramid.cpp +++ b/src/runtime/NEON/functions/NEGaussianPyramid.cpp @@ -68,6 +68,7 @@ void NEGaussianPyramidHalf::configure(const ITensor *input, IPyramid *pyramid, B /* Get number of pyramid levels */ const size_t num_levels = pyramid->info()->num_levels(); + const size_t num_stages = num_levels - 1; _input = input; _pyramid = pyramid; @@ -81,33 +82,29 @@ void NEGaussianPyramidHalf::configure(const ITensor *input, IPyramid *pyramid, B PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_HALF, tensor_shape, Format::S16); _tmp.init(pyramid_info); - _horizontal_reduction.reserve(num_levels); - _vertical_reduction.reserve(num_levels); - _horizontal_border_handler.reserve(num_levels); - _vertical_border_handler.reserve(num_levels); + _horizontal_reduction.clear(); + _vertical_reduction.clear(); + _horizontal_border_handler.clear(); + _vertical_border_handler.clear(); - for(unsigned int i = 0; i < num_levels - 1; ++i) + _horizontal_reduction.resize(num_stages); + _vertical_reduction.resize(num_stages); + _horizontal_border_handler.resize(num_stages); + _vertical_border_handler.resize(num_stages); + + for(size_t i = 0; i < num_stages; ++i) { /* Configure horizontal kernel */ - auto horizontal_kernel = support::cpp14::make_unique(); - horizontal_kernel->configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i)); + _horizontal_reduction[i].configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i)); /* Configure vertical kernel */ - auto vertical_kernel = support::cpp14::make_unique(); - vertical_kernel->configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1)); + _vertical_reduction[i].configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1)); /* Configure border */ - auto horizontal_border_kernel = support::cpp14::make_unique(); - horizontal_border_kernel->configure(_pyramid->get_pyramid_level(i), horizontal_kernel->border_size(), border_mode, PixelValue(constant_border_value)); + _horizontal_border_handler[i].configure(_pyramid->get_pyramid_level(i), _horizontal_reduction[i].border_size(), border_mode, PixelValue(constant_border_value)); /* Configure border */ - auto vertical_border_kernel = support::cpp14::make_unique(); - vertical_border_kernel->configure(_tmp.get_pyramid_level(i), vertical_kernel->border_size(), border_mode, PixelValue(pixel_value_u16)); - - _vertical_border_handler.emplace_back(std::move(vertical_border_kernel)); - _horizontal_border_handler.emplace_back(std::move(horizontal_border_kernel)); - _vertical_reduction.emplace_back(std::move(vertical_kernel)); - _horizontal_reduction.emplace_back(std::move(horizontal_kernel)); + _vertical_border_handler[i].configure(_tmp.get_pyramid_level(i), _vertical_reduction[i].border_size(), border_mode, PixelValue(pixel_value_u16)); } _tmp.allocate(); @@ -119,17 +116,17 @@ void NEGaussianPyramidHalf::run() ARM_COMPUTE_ERROR_ON_MSG(_pyramid == nullptr, "Unconfigured function"); /* Get number of pyramid levels */ - const size_t num_levels = _pyramid->info()->num_levels(); + const unsigned int num_levels = _pyramid->info()->num_levels(); /* The first level of the pyramid has the input image */ _pyramid->get_pyramid_level(0)->copy_from(*_input); for(unsigned int i = 0; i < num_levels - 1; ++i) { - NEScheduler::get().schedule(_horizontal_border_handler[i].get(), Window::DimZ); - NEScheduler::get().schedule(_horizontal_reduction[i].get(), Window::DimY); - NEScheduler::get().schedule(_vertical_border_handler[i].get(), Window::DimZ); - NEScheduler::get().schedule(_vertical_reduction[i].get(), Window::DimY); + NEScheduler::get().schedule(&_horizontal_border_handler[i], Window::DimZ); + NEScheduler::get().schedule(&_horizontal_reduction[i], Window::DimY); + NEScheduler::get().schedule(&_vertical_border_handler[i], Window::DimZ); + NEScheduler::get().schedule(&_vertical_reduction[i], Window::DimY); } } @@ -150,26 +147,29 @@ void NEGaussianPyramidOrb::configure(const ITensor *input, IPyramid *pyramid, Bo /* Get number of pyramid levels */ const size_t num_levels = pyramid->info()->num_levels(); + const size_t num_stages = num_levels - 1; _input = input; _pyramid = pyramid; + _gaus5x5.clear(); + _scale_nearest.clear(); + + _gaus5x5.resize(num_stages); + _scale_nearest.resize(num_stages); + if(num_levels > 1) { PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_ORB, pyramid->info()->tensor_shape(), Format::U8); _tmp.init(pyramid_info); - for(unsigned int i = 0; i < num_levels - 1; ++i) + for(size_t i = 0; i < num_levels - 1; ++i) { /* Configure gaussian 5x5 */ - auto gaus5x5_kernel = support::cpp14::make_unique(); - gaus5x5_kernel->configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i), border_mode, constant_border_value); - _gaus5x5.emplace_back(std::move(gaus5x5_kernel)); + _gaus5x5[i].configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i), border_mode, constant_border_value); /* Configure scale */ - auto scale_kernel = support::cpp14::make_unique(); - scale_kernel->configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1), InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::UNDEFINED); - _scale_nearest.emplace_back(std::move(scale_kernel)); + _scale_nearest[i].configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1), InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::UNDEFINED); } _tmp.allocate(); @@ -188,7 +188,7 @@ void NEGaussianPyramidOrb::run() for(unsigned int i = 0; i < num_levels - 1; ++i) { - _gaus5x5[i].get()->run(); - _scale_nearest[i].get()->run(); + _gaus5x5[i].run(); + _scale_nearest[i].run(); } } diff --git a/src/runtime/NEON/functions/NEHOGMultiDetection.cpp b/src/runtime/NEON/functions/NEHOGMultiDetection.cpp index 26abc9d297..4e615808fb 100644 --- a/src/runtime/NEON/functions/NEHOGMultiDetection.cpp +++ b/src/runtime/NEON/functions/NEHOGMultiDetection.cpp @@ -126,12 +126,18 @@ void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog, _num_block_norm_kernel = input_block_norm.size(); // Number of NEHOGBlockNormalizationKernel kernels to compute _num_hog_detect_kernel = input_hog_detect.size(); // Number of NEHOGDetector functions to compute - _orient_bin_kernel.reserve(_num_orient_bin_kernel); - _block_norm_kernel.reserve(_num_block_norm_kernel); - _hog_detect_kernel.reserve(_num_hog_detect_kernel); - _hog_space.reserve(_num_orient_bin_kernel); - _hog_norm_space.reserve(_num_block_norm_kernel); - _non_maxima_kernel = arm_compute::support::cpp14::make_unique(); + _orient_bin_kernel.clear(); + _block_norm_kernel.clear(); + _hog_detect_kernel.clear(); + _hog_space.clear(); + _hog_norm_space.clear(); + + _orient_bin_kernel.resize(_num_orient_bin_kernel); + _block_norm_kernel.resize(_num_block_norm_kernel); + _hog_detect_kernel.resize(_num_hog_detect_kernel); + _hog_space.resize(_num_orient_bin_kernel); + _hog_norm_space.resize(_num_block_norm_kernel); + _non_maxima_kernel = CPPDetectionWindowNonMaximaSuppressionKernel(); // Allocate tensors for magnitude and phase TensorInfo info_mag(shape_img, Format::S16); @@ -167,17 +173,13 @@ void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog, // Allocate HOG space TensorInfo info_space(shape_hog_space, num_bins, DataType::F32); - auto hog_space_tensor = support::cpp14::make_unique(); - hog_space_tensor->allocator()->init(info_space); + _hog_space[i].allocator()->init(info_space); // Manage intermediate buffers - _memory_group.manage(hog_space_tensor.get()); + _memory_group.manage(&_hog_space[i]); // Initialise orientation binning kernel - auto orient_bin_kernel = support::cpp14::make_unique(); - orient_bin_kernel->configure(&_mag, &_phase, hog_space_tensor.get(), multi_hog->model(idx_multi_hog)->info()); - _orient_bin_kernel.emplace_back(std::move(orient_bin_kernel)); - _hog_space.emplace_back(std::move(hog_space_tensor)); + _orient_bin_kernel[i].configure(&_mag, &_phase, &_hog_space[i], multi_hog->model(idx_multi_hog)->info()); } // Allocate intermediate tensors @@ -192,23 +194,19 @@ void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog, // Allocate normalized HOG space TensorInfo tensor_info(*(multi_hog->model(idx_multi_hog)->info()), width, height); - auto hog_norm_space_tensor = support::cpp14::make_unique(); - hog_norm_space_tensor->allocator()->init(tensor_info); + _hog_norm_space[i].allocator()->init(tensor_info); // Manage intermediate buffers - _memory_group.manage(hog_norm_space_tensor.get()); + _memory_group.manage(&_hog_norm_space[i]); // Initialize block normalization kernel - auto block_norm_kernel = support::cpp14::make_unique(); - block_norm_kernel->configure(_hog_space[idx_orient_bin].get(), hog_norm_space_tensor.get(), multi_hog->model(idx_multi_hog)->info()); - _block_norm_kernel.emplace_back(std::move(block_norm_kernel)); - _hog_norm_space.emplace_back(std::move(hog_norm_space_tensor)); + _block_norm_kernel[i].configure(&_hog_space[idx_orient_bin], &_hog_norm_space[i], multi_hog->model(idx_multi_hog)->info()); } // Allocate intermediate tensors for(size_t i = 0; i < _num_orient_bin_kernel; ++i) { - _hog_space[i].get()->allocator()->allocate(); + _hog_space[i].allocator()->allocate(); } // Configure HOG detector kernel @@ -216,18 +214,16 @@ void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog, { const size_t idx_block_norm = input_hog_detect[i]; - auto hog_detect_kernel = support::cpp14::make_unique(); - hog_detect_kernel->configure(_hog_norm_space[idx_block_norm].get(), multi_hog->model(i), detection_windows, detection_window_strides->at(i), threshold, i); - _hog_detect_kernel.emplace_back(std::move(hog_detect_kernel)); + _hog_detect_kernel[i].configure(&_hog_norm_space[idx_block_norm], multi_hog->model(i), detection_windows, detection_window_strides->at(i), threshold, i); } // Configure non maxima suppression kernel - _non_maxima_kernel->configure(_detection_windows, min_distance); + _non_maxima_kernel.configure(_detection_windows, min_distance); // Allocate intermediate tensors for(size_t i = 0; i < _num_block_norm_kernel; ++i) { - _hog_norm_space[i]->allocator()->allocate(); + _hog_norm_space[i].allocator()->allocate(); } } @@ -246,24 +242,24 @@ void NEHOGMultiDetection::run() // Run orientation binning kernel for(auto &kernel : _orient_bin_kernel) { - NEScheduler::get().schedule(kernel.get(), Window::DimY); + NEScheduler::get().schedule(&kernel, Window::DimY); } // Run block normalization kernel for(auto &kernel : _block_norm_kernel) { - NEScheduler::get().schedule(kernel.get(), Window::DimY); + NEScheduler::get().schedule(&kernel, Window::DimY); } // Run HOG detector kernel for(auto &kernel : _hog_detect_kernel) { - kernel->run(); + kernel.run(); } // Run non-maxima suppression kernel if enabled if(_non_maxima_suppression) { - NEScheduler::get().schedule(_non_maxima_kernel.get(), Window::DimY); + NEScheduler::get().schedule(&_non_maxima_kernel, Window::DimY); } } diff --git a/src/runtime/NEON/functions/NEOpticalFlow.cpp b/src/runtime/NEON/functions/NEOpticalFlow.cpp index 0df01c677d..27dc3e058f 100644 --- a/src/runtime/NEON/functions/NEOpticalFlow.cpp +++ b/src/runtime/NEON/functions/NEOpticalFlow.cpp @@ -74,10 +74,15 @@ void NEOpticalFlow::configure(const Pyramid *old_pyramid, const Pyramid *new_pyr const float pyr_scale = old_pyramid->info()->scale(); - _func_scharr.reserve(_num_levels); - _kernel_tracker.reserve(_num_levels); - _scharr_gx.reserve(_num_levels); - _scharr_gy.reserve(_num_levels); + _func_scharr.clear(); + _kernel_tracker.clear(); + _scharr_gx.clear(); + _scharr_gy.clear(); + + _func_scharr.resize(_num_levels); + _kernel_tracker.resize(_num_levels); + _scharr_gx.resize(_num_levels); + _scharr_gy.resize(_num_levels); _old_points_internal = LKInternalKeypointArray(old_points->num_values()); _new_points_internal = LKInternalKeypointArray(old_points->num_values()); @@ -95,34 +100,25 @@ void NEOpticalFlow::configure(const Pyramid *old_pyramid, const Pyramid *new_pyr TensorInfo tensor_info(TensorShape(width_ith, height_ith), Format::S16); - auto scharr_gx = support::cpp14::make_unique(); - auto scharr_gy = support::cpp14::make_unique(); - scharr_gx->allocator()->init(tensor_info); - scharr_gy->allocator()->init(tensor_info); + _scharr_gx[i].allocator()->init(tensor_info); + _scharr_gy[i].allocator()->init(tensor_info); // Manage intermediate buffers - _memory_group.manage(scharr_gx.get()); - _memory_group.manage(scharr_gy.get()); + _memory_group.manage(&_scharr_gx[i]); + _memory_group.manage(&_scharr_gy[i]); // Init Scharr kernel - auto func_scharr = support::cpp14::make_unique(); - func_scharr->configure(old_ith_input, scharr_gx.get(), scharr_gy.get(), border_mode, constant_border_value); + _func_scharr[i].configure(old_ith_input, &_scharr_gx[i], &_scharr_gy[i], border_mode, constant_border_value); // Init Lucas-Kanade kernel - auto kernel_tracker = support::cpp14::make_unique(); - kernel_tracker->configure(old_ith_input, new_ith_input, scharr_gx.get(), scharr_gy.get(), - old_points, new_points_estimates, new_points, - &_old_points_internal, &_new_points_internal, - termination, use_initial_estimate, epsilon, num_iterations, window_dimension, - i, _num_levels, pyr_scale); - - scharr_gx->allocator()->allocate(); - scharr_gy->allocator()->allocate(); - - _func_scharr.emplace_back(std::move(func_scharr)); - _kernel_tracker.emplace_back(std::move(kernel_tracker)); - _scharr_gx.emplace_back(std::move(scharr_gx)); - _scharr_gy.emplace_back(std::move(scharr_gy)); + _kernel_tracker[i].configure(old_ith_input, new_ith_input, &_scharr_gx[i], &_scharr_gy[i], + old_points, new_points_estimates, new_points, + &_old_points_internal, &_new_points_internal, + termination, use_initial_estimate, epsilon, num_iterations, window_dimension, + i, _num_levels, pyr_scale); + + _scharr_gx[i].allocator()->allocate(); + _scharr_gy[i].allocator()->allocate(); } } @@ -135,9 +131,9 @@ void NEOpticalFlow::run() for(unsigned int level = _num_levels; level > 0; --level) { // Run Scharr kernel - _func_scharr[level - 1].get()->run(); + _func_scharr[level - 1].run(); // Run Lucas-Kanade kernel - NEScheduler::get().schedule(_kernel_tracker[level - 1].get(), Window::DimX); + NEScheduler::get().schedule(&_kernel_tracker[level - 1], Window::DimX); } } -- cgit v1.2.1