aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-04-26 14:54:54 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-05-01 10:06:58 +0000
commita4f378dcd39addd4a63db1c0848f2c120804f4eb (patch)
tree6fa8a0071bef32d2bdef0e5469678a7cfecea348 /src/runtime
parent8ec0bb6d9027bb7505d6fa0eada42a52c6e1073b (diff)
downloadComputeLibrary-a4f378dcd39addd4a63db1c0848f2c120804f4eb.tar.gz
COMPMID-1995: Fix clang-tidy warnings
- Remove VirtualCall checks - Fix some unused variables errors - Use std::array insted of C style arrays - Various fixes Change-Id: Ife6170b7102de42b8f04e298dcf8476bf90779f0 Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/1049 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/CL/functions/CLCropResize.cpp50
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp3
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp6
-rw-r--r--src/runtime/NEON/functions/NEFullyConnectedLayer.cpp3
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp11
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp3
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp8
8 files changed, 52 insertions, 35 deletions
diff --git a/src/runtime/CL/functions/CLCropResize.cpp b/src/runtime/CL/functions/CLCropResize.cpp
index 2cacef1bb1..b22809eb09 100644
--- a/src/runtime/CL/functions/CLCropResize.cpp
+++ b/src/runtime/CL/functions/CLCropResize.cpp
@@ -57,9 +57,9 @@ inline void run_crop(const ICLTensor *input, ICLTensor *output, uint32_t batch_i
bool is_width_flipped = end[0] < start[0];
bool is_height_flipped = end[1] < start[1];
/** The number of rows out of bounds at the start and end of output. */
- int32_t rows_out_of_bounds[2];
+ std::array<int32_t, 2> rows_out_of_bounds{ 0 };
/** The number of columns out of bounds at the start and end of output. */
- int32_t cols_out_of_bounds[2];
+ std::array<int32_t, 2> cols_out_of_bounds{ 0 };
if(is_height_flipped)
{
rows_out_of_bounds[0] = start[1] >= static_cast<int32_t>(input->info()->dimension(2)) ? std::min(start[1] - input->info()->dimension(2) + 1, output->info()->dimension(2)) : 0;
@@ -164,7 +164,7 @@ inline void run_crop(const ICLTensor *input, ICLTensor *output, uint32_t batch_i
} // namespace
CLCropResize::CLCropResize()
- : _input(nullptr), _boxes(nullptr), _box_ind(nullptr), _output(nullptr), _num_boxes(0), _method(), _extrapolation_value(0), _scale(), _copy()
+ : _input(nullptr), _boxes(nullptr), _box_ind(nullptr), _output(nullptr), _num_boxes(0), _method(), _extrapolation_value(0), _scale(), _copy(), _crop_results(), _scaled_results()
{
}
@@ -210,20 +210,19 @@ void CLCropResize::configure(const ICLTensor *input, ICLTensor *boxes, ICLTensor
// - A scale function is used to resize the cropped image to the size specified by crop_size.
// - A tensor is required to hold the final scaled image before it is copied into the 4D output
// that will hold all final cropped and scaled 3D images using CLCopyKernel.
- _crop_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_boxes);
- _scale = arm_compute::support::cpp14::make_unique<CLScale[]>(_num_boxes);
- _scaled_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_boxes);
- _copy = arm_compute::support::cpp14::make_unique<CLCopyKernel[]>(_num_boxes);
-
for(unsigned int i = 0; i < _num_boxes; ++i)
{
+ auto crop_tensor = support::cpp14::make_unique<CLTensor>();
TensorInfo crop_result_info(1, DataType::F32);
crop_result_info.set_data_layout(DataLayout::NHWC);
- _crop_results[i].allocator()->init(crop_result_info);
+ crop_tensor->allocator()->init(crop_result_info);
+ _crop_results.emplace_back(std::move(crop_tensor));
+ auto scale_tensor = support::cpp14::make_unique<CLTensor>();
TensorInfo scaled_result_info(out_shape, 1, DataType::F32);
scaled_result_info.set_data_layout(DataLayout::NHWC);
- _scaled_results[i].allocator()->init(scaled_result_info);
+ scale_tensor->allocator()->init(scaled_result_info);
+ _scaled_results.emplace_back(std::move(scale_tensor));
}
}
@@ -240,32 +239,37 @@ void CLCropResize::run()
// Size of the crop box in _boxes and thus the shape of _crop_results[i]
// may not be known until run-time and so the kernels cannot be configured until then.
uint32_t batch_index;
- Coordinates start, end;
- configure_crop(_input, _boxes, _box_ind, &_crop_results[i], i, start, end, batch_index);
- _scale[i].configure(&_crop_results[i], &_scaled_results[i], _method, BorderMode::CONSTANT, PixelValue(_extrapolation_value), SamplingPolicy::TOP_LEFT);
+ Coordinates start{};
+ Coordinates end{};
+ configure_crop(_input, _boxes, _box_ind, _crop_results[i].get(), i, start, end, batch_index);
+
+ auto scale_kernel = support::cpp14::make_unique<CLScale>();
+ scale_kernel->configure(_crop_results[i].get(), _scaled_results[i].get(), _method, BorderMode::CONSTANT, PixelValue(_extrapolation_value), SamplingPolicy::TOP_LEFT);
+ _scale.emplace_back(std::move(scale_kernel));
Window win = calculate_max_window(*_output->info());
win.set(3, Window::Dimension(i, i + 1, 1));
- _copy[i].configure(&_scaled_results[i], _output, PaddingList(), &win);
- _crop_results[i].allocator()->allocate();
- _scaled_results[i].allocator()->allocate();
+ auto copy_kernel = support::cpp14::make_unique<CLCopyKernel>();
+ copy_kernel->configure(_scaled_results[i].get(), _output, PaddingList(), &win);
+ _copy.emplace_back(std::move(copy_kernel));
+
+ _crop_results[i]->allocator()->allocate();
+ _scaled_results[i]->allocator()->allocate();
- run_crop(_input, &_crop_results[i], batch_index, start, end, _extrapolation_value);
+ run_crop(_input, _crop_results[i].get(), batch_index, start, end, _extrapolation_value);
}
_boxes->unmap(CLScheduler::get().queue());
_box_ind->unmap(CLScheduler::get().queue());
CLScheduler::get().sync();
- for(unsigned int i = 0; i < _num_boxes; ++i)
+ for(auto &kernel : _scale)
{
- // Scale the cropped image
- _scale[i].run();
+ kernel->run();
}
CLScheduler::get().sync();
- for(unsigned int i = 0; i < _num_boxes; ++i)
+ for(auto &kernel : _copy)
{
- // Copy scaled image into output.
- CLScheduler::get().enqueue(_copy[i]);
+ CLScheduler::get().enqueue(*kernel, true);
}
CLScheduler::get().sync();
}
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index add5e16498..7b9229c4ae 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -231,7 +231,8 @@ void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *w
if(_is_quantized)
{
float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
- int output_multiplier, output_shift;
+ int output_multiplier;
+ int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
_gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
_gemmlowp_output.allocator()->allocate();
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index cd537087d3..049db1d461 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -249,7 +249,8 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info));
}
- TensorInfo info_vector_sum_col, info_vector_sum_row;
+ TensorInfo info_vector_sum_col{};
+ TensorInfo info_vector_sum_row{};
// Validate matrix B reduction kernel only if _a_offset is not equal to 0
if(a_offset != 0)
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index 5133756993..3bb69b1ffc 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -112,7 +112,8 @@ void NEDepthwiseConvolutionLayer3x3::configure_generic(ITensor
const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
- int output_multiplier, output_shift;
+ int output_multiplier;
+ int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
_output_stage_kernel.configure(&_accumulator, biases, _is_nchw ? output : &_permuted_output, output_multiplier, output_shift, output_quant_info.offset);
_accumulator.allocator()->allocate();
@@ -461,7 +462,8 @@ void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weigh
const QuantizationInfo output_quant_info = output->info()->quantization_info();
float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
- int output_multiplier, output_shift;
+ int output_multiplier;
+ int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
_output_stage_kernel.configure(&_output_reshaped, biases, output_to_use, output_multiplier, output_shift, output_quant_info.offset);
_output_reshaped.allocator()->allocate();
diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
index 273da278e8..e1a17db6d4 100644
--- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
@@ -228,7 +228,8 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh
if(_is_quantized)
{
float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
- int output_multiplier, output_shift;
+ int output_multiplier;
+ int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
_gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
_gemmlowp_output.allocator()->allocate();
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index beac4ffe5f..a2c4e8a8b1 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -118,7 +118,8 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w
const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input_quantization_info : output->info()->quantization_info();
float multiplier = input_quantization_info.scale * weights->info()->quantization_info().scale / output_quant_info.scale;
- int output_multiplier, output_shift;
+ int output_multiplier;
+ int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
// Merge activation with output stage
@@ -184,7 +185,8 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens
const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input_quantization_info : output->quantization_info();
float multiplier = input_quantization_info.scale * weights->quantization_info().scale / output_quant_info.scale;
- int output_multiplier, output_shift;
+ int output_multiplier;
+ int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
// Merge activation with output stage
@@ -412,7 +414,10 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
const unsigned int kernel_width = weights->dimension(idx_width);
const unsigned int kernel_height = weights->dimension(idx_height);
- TensorInfo im2col_reshaped_info, info_gemm, tmp_info, weights_reshaped_info;
+ TensorInfo im2col_reshaped_info{};
+ TensorInfo info_gemm{};
+ TensorInfo tmp_info{};
+ TensorInfo weights_reshaped_info{};
const ITensorInfo *gemm_input_to_use = input;
const ITensorInfo *gemm_output_to_use = output;
const ITensorInfo *weights_to_use = weights;
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 5ce7bc5f2c..54f49a6707 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -287,7 +287,8 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
}
}
- TensorInfo info_vector_sum_col, info_vector_sum_row;
+ TensorInfo info_vector_sum_col{};
+ TensorInfo info_vector_sum_row{};
// Validate matrix B reduction kernel only if _a_offset is not equal to 0
if(a_offset != 0)
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index 2332450c04..1513786ae5 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -162,7 +162,7 @@ inline Tensor4DShape internal_get_input_shape(const arm_compute::ITensor *input)
const int in_channels = input->info()->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL));
const int in_batches = input->info()->dimension(3);
- return Tensor4DShape({ in_batches, in_height, in_width, in_channels });
+ return Tensor4DShape{ in_batches, in_height, in_width, in_channels };
}
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
@@ -234,7 +234,7 @@ bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_siz
} //namespace
-NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
+NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager)
: _memory_group(memory_manager), _gemm_function(memory_manager), _transform_input_kernel(nullptr), _transform_output_kernel(nullptr), _transform_weights_kernel(nullptr), _activationlayer_function(),
_permute_input(), _permute_weights(), _permute_output(), _input_transformed(), _output_transformed(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(),
_weights_hwio(), _input(), _weights(), _output(), _is_prepared(false), _is_activationlayer_enabled(false)
@@ -428,7 +428,9 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
d_strides.set(2, 0);
d_strides.set(3, data_type_size * output_matrix_stride);
- TensorInfo a_info, b_info, d_info;
+ TensorInfo a_info{};
+ TensorInfo b_info{};
+ TensorInfo d_info{};
a_info.init(a_shape, 1, data_type, a_strides, 0, input_storage_size);
b_info.init(b_shape, 1, data_type, b_strides, 0, kernel_storage_size);
d_info.init(d_shape, 1, data_type, d_strides, 0, output_storage_size);