From 9d0b5f82c2734444145718f12788f2dde436ef45 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Wed, 1 May 2019 13:03:59 +0100 Subject: COMPMID-2177 Fix clang warnings Change-Id: I78039db8c58d7b14a042c41e54c25fb9cb509bf7 Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/1092 Reviewed-by: VidhyaSudhan Loganathan Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- src/runtime/NEON/functions/NEConcatenateLayer.cpp | 1 + .../NEON/functions/NEGEMMAssemblyDispatch.cpp | 2 +- src/runtime/NEON/functions/NEHarrisCorners.cpp | 6 +++--- src/runtime/NEON/functions/NEHistogram.cpp | 8 ++++---- src/runtime/NEON/functions/NELaplacianPyramid.cpp | 6 +++--- .../NEON/functions/NELaplacianReconstruct.cpp | 8 ++++---- src/runtime/NEON/functions/NEPadLayer.cpp | 22 +++++++++++++--------- src/runtime/NEON/functions/NEReduceMean.cpp | 18 +++++++++--------- src/runtime/NEON/functions/NESplit.cpp | 4 ++-- src/runtime/NEON/functions/NEStackLayer.cpp | 4 ++-- src/runtime/NEON/functions/NEUnstack.cpp | 2 +- .../NEON/functions/NEWidthConcatenateLayer.cpp | 4 ++-- 12 files changed, 45 insertions(+), 40 deletions(-) (limited to 'src/runtime/NEON') diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp index b8cfa2b8f2..71af560fb0 100644 --- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp +++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp @@ -51,6 +51,7 @@ void NEConcatenateLayer::configure(const std::vector &inputs_vector, _num_inputs = inputs_vector.size(); std::vector inputs_vector_info; + inputs_vector_info.reserve(_num_inputs); for(unsigned int i = 0; i < _num_inputs; ++i) { ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i)); diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp index fe1f2da457..55e067f52d 100644 --- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp +++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp @@ -35,7 +35,7 @@ namespace arm_compute { namespace { -std::unique_ptr create_function_all_types(arm_gemm::KernelDescription gemm_kernel_info, +std::unique_ptr create_function_all_types(const arm_gemm::KernelDescription &gemm_kernel_info, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint, std::shared_ptr memory_manager) diff --git a/src/runtime/NEON/functions/NEHarrisCorners.cpp b/src/runtime/NEON/functions/NEHarrisCorners.cpp index 15cecc25a0..3eadbee45d 100644 --- a/src/runtime/NEON/functions/NEHarrisCorners.cpp +++ b/src/runtime/NEON/functions/NEHarrisCorners.cpp @@ -90,7 +90,7 @@ void NEHarrisCorners::configure(IImage *input, float threshold, float min_dist, _score.allocator()->init(tensor_info_score); _nonmax.allocator()->init(tensor_info_score); - _corners_list = arm_compute::support::cpp14::make_unique(shape.x() * shape.y()); + _corners_list.resize(shape.x() * shape.y()); // Set/init Sobel kernel accordingly with gradient_size switch(gradient_size) @@ -171,13 +171,13 @@ void NEHarrisCorners::configure(IImage *input, float threshold, float min_dist, _score.allocator()->allocate(); // Init corner candidates kernel - _candidates.configure(&_nonmax, _corners_list.get(), &_num_corner_candidates); + _candidates.configure(&_nonmax, _corners_list.data(), &_num_corner_candidates); // Allocate once all the configure methods have been called _nonmax.allocator()->allocate(); // Init euclidean distance - _sort_euclidean.configure(_corners_list.get(), corners, &_num_corner_candidates, min_dist); + _sort_euclidean.configure(_corners_list.data(), corners, &_num_corner_candidates, min_dist); } void NEHarrisCorners::run() diff --git a/src/runtime/NEON/functions/NEHistogram.cpp b/src/runtime/NEON/functions/NEHistogram.cpp index f333ecb1f8..d56bd7cb16 100644 --- a/src/runtime/NEON/functions/NEHistogram.cpp +++ b/src/runtime/NEON/functions/NEHistogram.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -34,7 +34,7 @@ using namespace arm_compute; NEHistogram::NEHistogram() - : _histogram_kernel(), _local_hist(), _window_lut(arm_compute::support::cpp14::make_unique(window_lut_default_size)), _local_hist_size(0) + : _histogram_kernel(), _local_hist(), _window_lut(window_lut_default_size), _local_hist_size(0) { } @@ -45,10 +45,10 @@ void NEHistogram::configure(const IImage *input, IDistribution1D *output) // Allocate space for threads local histograms _local_hist_size = output->num_bins() * NEScheduler::get().num_threads(); - _local_hist = arm_compute::support::cpp14::make_unique(_local_hist_size); + _local_hist.resize(_local_hist_size); // Configure kernel - _histogram_kernel.configure(input, output, _local_hist.get(), _window_lut.get()); + _histogram_kernel.configure(input, output, _local_hist.data(), _window_lut.data()); } void NEHistogram::run() diff --git a/src/runtime/NEON/functions/NELaplacianPyramid.cpp b/src/runtime/NEON/functions/NELaplacianPyramid.cpp index 0e149d4176..5174a1357e 100644 --- a/src/runtime/NEON/functions/NELaplacianPyramid.cpp +++ b/src/runtime/NEON/functions/NELaplacianPyramid.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -92,8 +92,8 @@ void NELaplacianPyramid::configure(const ITensor *input, IPyramid *pyramid, ITen // Create Gaussian Pyramid function _gaussian_pyr_function.configure(input, &_gauss_pyr, border_mode, constant_border_value); - _convf = arm_compute::support::cpp14::make_unique(_num_levels); - _subf = arm_compute::support::cpp14::make_unique(_num_levels); + _convf.resize(_num_levels); + _subf.resize(_num_levels); for(unsigned int i = 0; i < _num_levels; ++i) { diff --git a/src/runtime/NEON/functions/NELaplacianReconstruct.cpp b/src/runtime/NEON/functions/NELaplacianReconstruct.cpp index 9ad9689b13..b2d889b07f 100644 --- a/src/runtime/NEON/functions/NELaplacianReconstruct.cpp +++ b/src/runtime/NEON/functions/NELaplacianReconstruct.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -64,8 +64,8 @@ void NELaplacianReconstruct::configure(const IPyramid *pyramid, ITensor *input, _tmp_pyr.init(pyramid_info); // Allocate add and scale functions. Level 0 does not need to be scaled. - _addf = arm_compute::support::cpp14::make_unique(num_levels); - _scalef = arm_compute::support::cpp14::make_unique(num_levels - 1); + _addf.resize(num_levels); + _scalef.resize(num_levels - 1); const size_t last_level = num_levels - 1; @@ -86,7 +86,7 @@ void NELaplacianReconstruct::configure(const IPyramid *pyramid, ITensor *input, void NELaplacianReconstruct::run() { - ARM_COMPUTE_ERROR_ON_MSG(_addf == nullptr, "Unconfigured function"); + ARM_COMPUTE_ERROR_ON_MSG(_addf.empty(), "Unconfigured function"); const size_t last_level = _tmp_pyr.info()->num_levels() - 1; diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp index 6af2ee8868..c608edfdee 100644 --- a/src/runtime/NEON/functions/NEPadLayer.cpp +++ b/src/runtime/NEON/functions/NEPadLayer.cpp @@ -76,8 +76,7 @@ uint32_t last_padding_dimension(const PaddingList &padding) } // namespace NEPadLayer::NEPadLayer() - : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(nullptr), _concat_functions(nullptr), _slice_results(nullptr), _concat_results(nullptr), - _output_subtensor() + : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results(), _output_subtensor() { } @@ -108,11 +107,16 @@ void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *outpu // Two strided slice functions will be required for each dimension padded as well as a // concatenate function and the tensors to hold the temporary results. - _slice_functions = arm_compute::support::cpp14::make_unique(2 * _num_dimensions); - _slice_results = arm_compute::support::cpp14::make_unique(2 * _num_dimensions); - _concat_functions = arm_compute::support::cpp14::make_unique(_num_dimensions); - _concat_results = arm_compute::support::cpp14::make_unique(_num_dimensions - 1); - Coordinates starts_before, ends_before, starts_after, ends_after, strides; + _slice_functions.resize(2 * _num_dimensions); + _slice_results.resize(2 * _num_dimensions); + _concat_functions.resize(_num_dimensions); + _concat_results.resize(_num_dimensions - 1); + + Coordinates starts_before{}; + Coordinates ends_before{}; + Coordinates starts_after{}; + Coordinates ends_after{}; + Coordinates strides{}; ITensor *prev = input; for(uint32_t i = 0; i < _num_dimensions; ++i) { @@ -158,7 +162,7 @@ void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *outpu if(i < prev->info()->num_dimensions()) { _slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before); - concat_vector.push_back(&_slice_results[2 * i]); + concat_vector.emplace_back(&_slice_results[2 * i]); } else { @@ -172,7 +176,7 @@ void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *outpu if(i < prev->info()->num_dimensions()) { _slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after); - concat_vector.push_back(&_slice_results[2 * i + 1]); + concat_vector.emplace_back(&_slice_results[2 * i + 1]); } else { diff --git a/src/runtime/NEON/functions/NEReduceMean.cpp b/src/runtime/NEON/functions/NEReduceMean.cpp index 98d3ab943d..38adaa2a92 100644 --- a/src/runtime/NEON/functions/NEReduceMean.cpp +++ b/src/runtime/NEON/functions/NEReduceMean.cpp @@ -78,10 +78,10 @@ void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis, { ARM_COMPUTE_ERROR_ON_NULLPTR(input); - _reduction_ops = reduction_axis.num_dimensions(); - _reduction_kernels = arm_compute::support::cpp14::make_unique(_reduction_ops); - _reduced_outs = arm_compute::support::cpp14::make_unique(_reduction_ops - (keep_dims ? 1 : 0)); - _keep_dims = keep_dims; + _reduction_ops = reduction_axis.num_dimensions(); + _reduction_kernels.resize(_reduction_ops); + _reduced_outs.resize(_reduction_ops - (keep_dims ? 1 : 0)); + _keep_dims = keep_dims; Coordinates axis_local = reduction_axis; const int input_dims = input->info()->num_dimensions(); @@ -96,9 +96,9 @@ void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis, // Perform reduction for every axis for(unsigned int i = 0; i < _reduction_ops; ++i) { - TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (_reduced_outs.get() + i - 1)->info()->tensor_shape(); + TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape(); out_shape.set(axis_local[i], 1); - auto in = (i == 0) ? input : (_reduced_outs.get() + i - 1); + auto in = (i == 0) ? input : (&_reduced_outs[i - 1]); if(i == _reduction_ops - 1 && keep_dims) { @@ -107,8 +107,8 @@ void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis, else { _reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->quantization_info())); - _memory_group.manage(_reduced_outs.get() + i); - _reduction_kernels[i].configure(in, _reduced_outs.get() + i, axis_local[i], ReductionOperation::MEAN_SUM); + _memory_group.manage(&_reduced_outs[i]); + _reduction_kernels[i].configure(in, &_reduced_outs[i], axis_local[i], ReductionOperation::MEAN_SUM); } } @@ -131,7 +131,7 @@ void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis, out_shape.remove_dimension(axis_local[i] - i); } auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape)); - _reshape.configure(_reduced_outs.get() + _reduction_ops - 1, output); + _reshape.configure(&_reduced_outs[_reduction_ops - 1], output); } } diff --git a/src/runtime/NEON/functions/NESplit.cpp b/src/runtime/NEON/functions/NESplit.cpp index e947657934..0373ab6f88 100644 --- a/src/runtime/NEON/functions/NESplit.cpp +++ b/src/runtime/NEON/functions/NESplit.cpp @@ -42,8 +42,8 @@ NESplit::NESplit() void NESplit::configure(const ITensor *input, const std::vector &outputs, unsigned int axis) { // Create Slice functions - _num_outputs = outputs.size(); - _slice_functions = arm_compute::support::cpp14::make_unique(_num_outputs); + _num_outputs = outputs.size(); + _slice_functions.resize(_num_outputs); // Get output shape const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_split_shape(input->info(), axis, _num_outputs); diff --git a/src/runtime/NEON/functions/NEStackLayer.cpp b/src/runtime/NEON/functions/NEStackLayer.cpp index 2f49c225a4..32350b052c 100644 --- a/src/runtime/NEON/functions/NEStackLayer.cpp +++ b/src/runtime/NEON/functions/NEStackLayer.cpp @@ -43,8 +43,8 @@ NEStackLayer::NEStackLayer() // NOLINT void NEStackLayer::configure(const std::vector &input, int axis, ITensor *output) { - _num_inputs = input.size(); - _stack_kernels = arm_compute::support::cpp14::make_unique(_num_inputs); + _num_inputs = input.size(); + _stack_kernels.resize(_num_inputs); // Wrap around negative values const unsigned int axis_u = wrap_around(axis, static_cast(input[0]->info()->num_dimensions() + 1)); diff --git a/src/runtime/NEON/functions/NEUnstack.cpp b/src/runtime/NEON/functions/NEUnstack.cpp index 7532020973..21f35f8312 100644 --- a/src/runtime/NEON/functions/NEUnstack.cpp +++ b/src/runtime/NEON/functions/NEUnstack.cpp @@ -74,7 +74,7 @@ void NEUnstack::configure(const ITensor *input, const std::vector &ou // Wrap around negative values const unsigned int axis_u = wrap_axis(axis, input->info()); _num_slices = std::min(outputs_vector_info.size(), input->info()->dimension(axis_u)); - _strided_slice_vector = arm_compute::support::cpp14::make_unique(_num_slices); + _strided_slice_vector.resize(_num_slices); Coordinates slice_start; int32_t slice_end_mask; diff --git a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp index 9fce13cbd7..25b5216305 100644 --- a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp +++ b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp @@ -79,7 +79,7 @@ inline void NEWidthConcatenateLayer::configure_internal(std::vector(_num_inputs); + _concat_kernels_vector.resize(_num_inputs); for(unsigned int i = 0; i < _num_inputs; ++i) { @@ -112,6 +112,6 @@ void NEWidthConcatenateLayer::run() { for(unsigned i = 0; i < _num_inputs; ++i) { - NEScheduler::get().schedule(_concat_kernels_vector.get() + i, Window::DimY); + NEScheduler::get().schedule(&_concat_kernels_vector[i], Window::DimY); } } -- cgit v1.2.1