diff options
author | Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-27 17:46:17 +0100 |
---|---|---|
committer | felixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-28 12:08:05 +0000 |
commit | afd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch) | |
tree | 03bc7d5a762099989b16a656fa8d397b490ed70e /arm_compute/function_info/FullyConnectedLayerInfo.h | |
parent | bdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff) | |
download | ComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz |
Apply clang-format on repository
Code is formatted as per a revised clang format configuration
file(not part of this delivery). Version 14.0.6 is used.
Exclusion List:
- files with .cl extension
- files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...)
And the following directories
- compute_kernel_writer/validation/
- tests/
- include/
- src/core/NEON/kernels/convolution/
- src/core/NEON/kernels/arm_gemm/
- src/core/NEON/kernels/arm_conv/
- data/
There will be a follow up for formatting of .cl files and the
files under tests/ and compute_kernel_writer/validation/.
Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>
Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'arm_compute/function_info/FullyConnectedLayerInfo.h')
-rw-r--r-- | arm_compute/function_info/FullyConnectedLayerInfo.h | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arm_compute/function_info/FullyConnectedLayerInfo.h b/arm_compute/function_info/FullyConnectedLayerInfo.h index 5f5578eadd..e65daeb2d4 100644 --- a/arm_compute/function_info/FullyConnectedLayerInfo.h +++ b/arm_compute/function_info/FullyConnectedLayerInfo.h @@ -35,13 +35,13 @@ struct FullyConnectedLayerInfo /* Fused-activation parameters */ ActivationLayerInfo activation_info{}; /**< Fused activation to apply after the matrix multiplication. */ /* Information about weights */ - DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */ - bool transpose_weights{ true }; /**< Transpose weights if true. */ - bool are_weights_reshaped{ false }; /**< @deprecated Reshape the weights tensor if false. */ - bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */ - bool enable_fast_math{ false }; /**< Enable fast math computation. */ + DataLayout weights_trained_layout{DataLayout::NCHW}; /**< Layout that the weights have been trained with. */ + bool transpose_weights{true}; /**< Transpose weights if true. */ + bool are_weights_reshaped{false}; /**< @deprecated Reshape the weights tensor if false. */ + bool retain_internal_weights{false}; /**< Retain internal reshaped weights. */ + bool enable_fast_math{false}; /**< Enable fast math computation. */ /* Other parameters */ - bool fp_mixed_precision{ false }; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */ + bool fp_mixed_precision{false}; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */ /** Sets the weights trained data layout * |