From afd38f0c617d6f89b2b4532c6c44f116617e2b6f Mon Sep 17 00:00:00 2001 From: Felix Thomasmathibalan Date: Wed, 27 Sep 2023 17:46:17 +0100 Subject: Apply clang-format on repository Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir --- .../FusedConvolutionBatchNormalizationFunction.h | 31 +++++++++++++--------- 1 file changed, 18 insertions(+), 13 deletions(-) (limited to 'arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h') diff --git a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h b/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h index 19c627d479..27e21cbc7e 100644 --- a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h +++ b/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h @@ -70,15 +70,19 @@ public: * @param[in] fused_act Activation layer information in case of a fused activation. * */ - void configure(TensorType *input, - TensorType *weights, - TensorType *bias, - TensorType *output, - const TensorType *mean, - const TensorType *var, - const TensorType *beta, - const TensorType *gamma, - float epsilon, const PadStrideInfo &conv_info, unsigned int num_groups, bool fast_math, ActivationLayerInfo const &fused_act) + void configure(TensorType *input, + TensorType *weights, + TensorType *bias, + TensorType *output, + const TensorType *mean, + const TensorType *var, + const TensorType *beta, + const TensorType *gamma, + float epsilon, + const PadStrideInfo &conv_info, + unsigned int num_groups, + bool fast_math, + ActivationLayerInfo const &fused_act) { // We don't run any validate, as we assume that the layers have been already validated const bool has_bias = (bias != nullptr); @@ -86,7 +90,7 @@ public: // We check if the layer has a bias. If yes, use it in-place. If not, we need to create one // as batch normalization might end up with a bias != 0 - if(has_bias) + if (has_bias) { _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon); bias_to_use = bias; @@ -97,9 +101,10 @@ public: bias_to_use = &_fused_bias; } - _conv_layer.configure(input, weights, bias_to_use, output, conv_info, WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups); + _conv_layer.configure(input, weights, bias_to_use, output, conv_info, WeightsInfo(), Size2D(1U, 1U), fused_act, + fast_math, num_groups); - if(!has_bias) + if (!has_bias) { _fused_bias.allocator()->allocate(); } @@ -114,7 +119,7 @@ public: void prepare() { - if(!_is_prepared) + if (!_is_prepared) { _fused_batch_norm_layer.run(); _is_prepared = true; -- cgit v1.2.1