From afd38f0c617d6f89b2b4532c6c44f116617e2b6f Mon Sep 17 00:00:00 2001 From: Felix Thomasmathibalan Date: Wed, 27 Sep 2023 17:46:17 +0100 Subject: Apply clang-format on repository Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir --- ...epthwiseConvolutionBatchNormalizationFunction.h | 36 +++++++++++++--------- 1 file changed, 21 insertions(+), 15 deletions(-) (limited to 'arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h') diff --git a/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h b/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h index 4f8a8da1fb..07a2cdd8b8 100644 --- a/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h +++ b/arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h @@ -67,15 +67,18 @@ public: * @param[in] fused_act Activation layer information in case of a fused activation. * */ - void configure(TensorType *input, - TensorType *weights, - TensorType *bias, - TensorType *output, - const TensorType *mean, - const TensorType *var, - const TensorType *beta, - const TensorType *gamma, - float epsilon, const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo const &fused_act) + void configure(TensorType *input, + TensorType *weights, + TensorType *bias, + TensorType *output, + const TensorType *mean, + const TensorType *var, + const TensorType *beta, + const TensorType *gamma, + float epsilon, + const PadStrideInfo &conv_info, + unsigned int depth_multiplier, + ActivationLayerInfo const &fused_act) { // We don't run any validate, as we assume that the layers have been already validated const bool has_bias = (bias != nullptr); @@ -83,20 +86,23 @@ public: // We check if the layer has a bias. If yes, use it in-place. If not, we need to create one // as batch normalization might end up with a bias != 0 - if(has_bias) + if (has_bias) { - _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon, FuseBatchNormalizationType::DEPTHWISECONVOLUTION); + _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon, + FuseBatchNormalizationType::DEPTHWISECONVOLUTION); bias_to_use = bias; } else { - _fused_batch_norm_layer.configure(weights, mean, var, nullptr, &_fused_bias, nullptr, beta, gamma, epsilon, FuseBatchNormalizationType::DEPTHWISECONVOLUTION); + _fused_batch_norm_layer.configure(weights, mean, var, nullptr, &_fused_bias, nullptr, beta, gamma, epsilon, + FuseBatchNormalizationType::DEPTHWISECONVOLUTION); bias_to_use = &_fused_bias; } - _depth_conv_layer.configure(input, weights, bias_to_use, output, conv_info, depth_multiplier, fused_act.enabled() ? fused_act : ActivationLayerInfo()); + _depth_conv_layer.configure(input, weights, bias_to_use, output, conv_info, depth_multiplier, + fused_act.enabled() ? fused_act : ActivationLayerInfo()); - if(!has_bias) + if (!has_bias) { _fused_bias.allocator()->allocate(); } @@ -111,7 +117,7 @@ public: void prepare() { - if(!_is_prepared) + if (!_is_prepared) { _fused_batch_norm_layer.run(); _is_prepared = true; -- cgit v1.2.1