From a4f378dcd39addd4a63db1c0848f2c120804f4eb Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Fri, 26 Apr 2019 14:54:54 +0100 Subject: COMPMID-1995: Fix clang-tidy warnings - Remove VirtualCall checks - Fix some unused variables errors - Use std::array insted of C style arrays - Various fixes Change-Id: Ife6170b7102de42b8f04e298dcf8476bf90779f0 Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/1049 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio --- src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp | 6 ++++-- src/runtime/NEON/functions/NEFullyConnectedLayer.cpp | 3 ++- src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp | 11 ++++++++--- src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp | 3 ++- src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp | 8 +++++--- 5 files changed, 21 insertions(+), 10 deletions(-) (limited to 'src/runtime/NEON') diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp index 5133756993..3bb69b1ffc 100644 --- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp @@ -112,7 +112,8 @@ void NEDepthwiseConvolutionLayer3x3::configure_generic(ITensor const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info(); float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale; - int output_multiplier, output_shift; + int output_multiplier; + int output_shift; quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); _output_stage_kernel.configure(&_accumulator, biases, _is_nchw ? output : &_permuted_output, output_multiplier, output_shift, output_quant_info.offset); _accumulator.allocator()->allocate(); @@ -461,7 +462,8 @@ void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weigh const QuantizationInfo output_quant_info = output->info()->quantization_info(); float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale; - int output_multiplier, output_shift; + int output_multiplier; + int output_shift; quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); _output_stage_kernel.configure(&_output_reshaped, biases, output_to_use, output_multiplier, output_shift, output_quant_info.offset); _output_reshaped.allocator()->allocate(); diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp index 273da278e8..e1a17db6d4 100644 --- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp +++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp @@ -228,7 +228,8 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh if(_is_quantized) { float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale; - int output_multiplier, output_shift; + int output_multiplier; + int output_shift; quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset); _gemmlowp_output.allocator()->allocate(); diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp index beac4ffe5f..a2c4e8a8b1 100644 --- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp @@ -118,7 +118,8 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input_quantization_info : output->info()->quantization_info(); float multiplier = input_quantization_info.scale * weights->info()->quantization_info().scale / output_quant_info.scale; - int output_multiplier, output_shift; + int output_multiplier; + int output_shift; quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); // Merge activation with output stage @@ -184,7 +185,8 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input_quantization_info : output->quantization_info(); float multiplier = input_quantization_info.scale * weights->quantization_info().scale / output_quant_info.scale; - int output_multiplier, output_shift; + int output_multiplier; + int output_shift; quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); // Merge activation with output stage @@ -412,7 +414,10 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI const unsigned int kernel_width = weights->dimension(idx_width); const unsigned int kernel_height = weights->dimension(idx_height); - TensorInfo im2col_reshaped_info, info_gemm, tmp_info, weights_reshaped_info; + TensorInfo im2col_reshaped_info{}; + TensorInfo info_gemm{}; + TensorInfo tmp_info{}; + TensorInfo weights_reshaped_info{}; const ITensorInfo *gemm_input_to_use = input; const ITensorInfo *gemm_output_to_use = output; const ITensorInfo *weights_to_use = weights; diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp index 5ce7bc5f2c..54f49a6707 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp @@ -287,7 +287,8 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso } } - TensorInfo info_vector_sum_col, info_vector_sum_row; + TensorInfo info_vector_sum_col{}; + TensorInfo info_vector_sum_row{}; // Validate matrix B reduction kernel only if _a_offset is not equal to 0 if(a_offset != 0) diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp index 2332450c04..1513786ae5 100644 --- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp @@ -162,7 +162,7 @@ inline Tensor4DShape internal_get_input_shape(const arm_compute::ITensor *input) const int in_channels = input->info()->dimension(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)); const int in_batches = input->info()->dimension(3); - return Tensor4DShape({ in_batches, in_height, in_width, in_channels }); + return Tensor4DShape{ in_batches, in_height, in_width, in_channels }; } Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info) @@ -234,7 +234,7 @@ bool check_support_fast_math(const Size2D &output_tile, const Size2D &kernel_siz } //namespace -NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(std::shared_ptr memory_manager) +NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(const std::shared_ptr &memory_manager) : _memory_group(memory_manager), _gemm_function(memory_manager), _transform_input_kernel(nullptr), _transform_output_kernel(nullptr), _transform_weights_kernel(nullptr), _activationlayer_function(), _permute_input(), _permute_weights(), _permute_output(), _input_transformed(), _output_transformed(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(), _weights_hwio(), _input(), _weights(), _output(), _is_prepared(false), _is_activationlayer_enabled(false) @@ -428,7 +428,9 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor * d_strides.set(2, 0); d_strides.set(3, data_type_size * output_matrix_stride); - TensorInfo a_info, b_info, d_info; + TensorInfo a_info{}; + TensorInfo b_info{}; + TensorInfo d_info{}; a_info.init(a_shape, 1, data_type, a_strides, 0, input_storage_size); b_info.init(b_shape, 1, data_type, b_strides, 0, kernel_storage_size); d_info.init(d_shape, 1, data_type, d_strides, 0, output_storage_size); -- cgit v1.2.1