diff options
author | Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-27 17:46:17 +0100 |
---|---|---|
committer | felixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-28 12:08:05 +0000 |
commit | afd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch) | |
tree | 03bc7d5a762099989b16a656fa8d397b490ed70e /arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h | |
parent | bdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff) | |
download | ComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz |
Apply clang-format on repository
Code is formatted as per a revised clang format configuration
file(not part of this delivery). Version 14.0.6 is used.
Exclusion List:
- files with .cl extension
- files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...)
And the following directories
- compute_kernel_writer/validation/
- tests/
- include/
- src/core/NEON/kernels/convolution/
- src/core/NEON/kernels/arm_gemm/
- src/core/NEON/kernels/arm_conv/
- data/
There will be a follow up for formatting of .cl files and the
files under tests/ and compute_kernel_writer/validation/.
Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>
Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h')
-rw-r--r-- | arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h | 31 |
1 files changed, 18 insertions, 13 deletions
diff --git a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h b/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h index 19c627d479..27e21cbc7e 100644 --- a/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h +++ b/arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h @@ -70,15 +70,19 @@ public: * @param[in] fused_act Activation layer information in case of a fused activation. * */ - void configure(TensorType *input, - TensorType *weights, - TensorType *bias, - TensorType *output, - const TensorType *mean, - const TensorType *var, - const TensorType *beta, - const TensorType *gamma, - float epsilon, const PadStrideInfo &conv_info, unsigned int num_groups, bool fast_math, ActivationLayerInfo const &fused_act) + void configure(TensorType *input, + TensorType *weights, + TensorType *bias, + TensorType *output, + const TensorType *mean, + const TensorType *var, + const TensorType *beta, + const TensorType *gamma, + float epsilon, + const PadStrideInfo &conv_info, + unsigned int num_groups, + bool fast_math, + ActivationLayerInfo const &fused_act) { // We don't run any validate, as we assume that the layers have been already validated const bool has_bias = (bias != nullptr); @@ -86,7 +90,7 @@ public: // We check if the layer has a bias. If yes, use it in-place. If not, we need to create one // as batch normalization might end up with a bias != 0 - if(has_bias) + if (has_bias) { _fused_batch_norm_layer.configure(weights, mean, var, nullptr, nullptr, bias, beta, gamma, epsilon); bias_to_use = bias; @@ -97,9 +101,10 @@ public: bias_to_use = &_fused_bias; } - _conv_layer.configure(input, weights, bias_to_use, output, conv_info, WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups); + _conv_layer.configure(input, weights, bias_to_use, output, conv_info, WeightsInfo(), Size2D(1U, 1U), fused_act, + fast_math, num_groups); - if(!has_bias) + if (!has_bias) { _fused_bias.allocator()->allocate(); } @@ -114,7 +119,7 @@ public: void prepare() { - if(!_is_prepared) + if (!_is_prepared) { _fused_batch_norm_layer.run(); _is_prepared = true; |