aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/function_info
diff options
context:
space:
mode:
authorFelix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-27 17:46:17 +0100
committerfelixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-28 12:08:05 +0000
commitafd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch)
tree03bc7d5a762099989b16a656fa8d397b490ed70e /arm_compute/function_info
parentbdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff)
downloadComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz
Apply clang-format on repository
Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'arm_compute/function_info')
-rw-r--r--arm_compute/function_info/ActivationLayerInfo.h29
-rw-r--r--arm_compute/function_info/ConvolutionInfo.h14
-rw-r--r--arm_compute/function_info/FullyConnectedLayerInfo.h12
-rw-r--r--arm_compute/function_info/GEMMInfo.h44
-rw-r--r--arm_compute/function_info/MatMulInfo.h4
5 files changed, 61 insertions, 42 deletions
diff --git a/arm_compute/function_info/ActivationLayerInfo.h b/arm_compute/function_info/ActivationLayerInfo.h
index 84e962cb3a..195b67cf99 100644
--- a/arm_compute/function_info/ActivationLayerInfo.h
+++ b/arm_compute/function_info/ActivationLayerInfo.h
@@ -39,17 +39,17 @@ enum class ActivationFunction
RELU, /**< Rectifier ( \f$ f(x) = max(0,x) \f$ ) */
BOUNDED_RELU, /**< Upper Bounded Rectifier ( \f$ f(x) = min(a, max(0,x)) \f$ ) */
LU_BOUNDED_RELU, /**< Lower and Upper Bounded Rectifier ( \f$ f(x) = min(a, max(b,x)) \f$ ) */
- LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x) = \begin{cases} \alpha x & \quad \text{if } x \text{ < 0}\\ x & \quad \text{if } x \geq \text{ 0 } \end{cases} \f$ ) */
- SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
- ELU, /**< Exponential Linear Unit ( \f$ f(x) = \begin{cases} \alpha (exp(x) - 1) & \quad \text{if } x \text{ < 0}\\ x & \quad \text{if } x \geq \text{ 0 } \end{cases} \f$ ) */
- ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
- SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
- SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
- LINEAR, /**< Linear ( \f$ f(x)= ax + b \f$ ) */
- IDENTITY, /**< Identity ( \f$ f(x)= x \f$ ) */
- HARD_SWISH, /**< Hard-swish ( \f$ f(x) = (x \text{ReLU6}(x+3))/6 = x \min(\max(0,x+3),6)/6 \f$ ) */
- SWISH, /**< Swish ( \f$ f(x) = \frac{x}{1 + e^{-ax}} = x \text{logistic}(ax) \f$ ) */
- GELU /**< GELU ( \f$ f(x) = x * 1/2 * 1 + erf(x / \sqrt{2}) \f$ ) */
+ LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x) = \begin{cases} \alpha x & \quad \text{if } x \text{ < 0}\\ x & \quad \text{if } x \geq \text{ 0 } \end{cases} \f$ ) */
+ SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
+ ELU, /**< Exponential Linear Unit ( \f$ f(x) = \begin{cases} \alpha (exp(x) - 1) & \quad \text{if } x \text{ < 0}\\ x & \quad \text{if } x \geq \text{ 0 } \end{cases} \f$ ) */
+ ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
+ SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
+ SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
+ LINEAR, /**< Linear ( \f$ f(x)= ax + b \f$ ) */
+ IDENTITY, /**< Identity ( \f$ f(x)= x \f$ ) */
+ HARD_SWISH, /**< Hard-swish ( \f$ f(x) = (x \text{ReLU6}(x+3))/6 = x \min(\max(0,x+3),6)/6 \f$ ) */
+ SWISH, /**< Swish ( \f$ f(x) = \frac{x}{1 + e^{-ax}} = x \text{logistic}(ax) \f$ ) */
+ GELU /**< GELU ( \f$ f(x) = x * 1/2 * 1 + erf(x / \sqrt{2}) \f$ ) */
};
/** Activation Layer Information class */
class ActivationLayerInfo
@@ -68,8 +68,7 @@ public:
* (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
* @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::TANH).
*/
- ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f)
- : _act(f), _a(a), _b(b), _enabled(true)
+ ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f) : _act(f), _a(a), _b(b), _enabled(true)
{
}
/** Get the type of activation function */
@@ -104,10 +103,10 @@ public:
}
#endif // __aarch64__
private:
- ActivationFunction _act = { ActivationLayerInfo::ActivationFunction::IDENTITY };
+ ActivationFunction _act = {ActivationLayerInfo::ActivationFunction::IDENTITY};
float _a = {};
float _b = {};
- bool _enabled = { false };
+ bool _enabled = {false};
#ifdef __aarch64__
LookupTable256 _lut = {};
diff --git a/arm_compute/function_info/ConvolutionInfo.h b/arm_compute/function_info/ConvolutionInfo.h
index c27dc523c8..4830cae137 100644
--- a/arm_compute/function_info/ConvolutionInfo.h
+++ b/arm_compute/function_info/ConvolutionInfo.h
@@ -33,14 +33,18 @@ namespace arm_compute
struct ConvolutionInfo
{
ConvolutionInfo() = default;
- ConvolutionInfo(const PadStrideInfo &pad_stride_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation)
+ ConvolutionInfo(const PadStrideInfo &pad_stride_info,
+ unsigned int depth_multiplier,
+ const ActivationLayerInfo &act_info,
+ const Size2D &dilation)
: pad_stride_info(pad_stride_info), depth_multiplier(depth_multiplier), act_info(act_info), dilation(dilation)
{
}
- PadStrideInfo pad_stride_info{}; /**< Convolution info (Pads, strides,...) */
- unsigned int depth_multiplier{ 1 }; /**< Multiplier to apply to input's depth to retrieve the output depth. Defaults to 1 */
- ActivationLayerInfo act_info{}; /**< Fused activation to apply after convolution. */
- Size2D dilation{ Size2D(1, 1) }; /**< Dilation, in elements, across x and y. Defaults to (1, 1). */
+ PadStrideInfo pad_stride_info{}; /**< Convolution info (Pads, strides,...) */
+ unsigned int depth_multiplier{
+ 1}; /**< Multiplier to apply to input's depth to retrieve the output depth. Defaults to 1 */
+ ActivationLayerInfo act_info{}; /**< Fused activation to apply after convolution. */
+ Size2D dilation{Size2D(1, 1)}; /**< Dilation, in elements, across x and y. Defaults to (1, 1). */
};
} // namespace arm_compute
#endif /* ACL_ARM_COMPUTE_FUNCTION_INFO_CONVOLUTIONINFO */
diff --git a/arm_compute/function_info/FullyConnectedLayerInfo.h b/arm_compute/function_info/FullyConnectedLayerInfo.h
index 5f5578eadd..e65daeb2d4 100644
--- a/arm_compute/function_info/FullyConnectedLayerInfo.h
+++ b/arm_compute/function_info/FullyConnectedLayerInfo.h
@@ -35,13 +35,13 @@ struct FullyConnectedLayerInfo
/* Fused-activation parameters */
ActivationLayerInfo activation_info{}; /**< Fused activation to apply after the matrix multiplication. */
/* Information about weights */
- DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
- bool transpose_weights{ true }; /**< Transpose weights if true. */
- bool are_weights_reshaped{ false }; /**< @deprecated Reshape the weights tensor if false. */
- bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
- bool enable_fast_math{ false }; /**< Enable fast math computation. */
+ DataLayout weights_trained_layout{DataLayout::NCHW}; /**< Layout that the weights have been trained with. */
+ bool transpose_weights{true}; /**< Transpose weights if true. */
+ bool are_weights_reshaped{false}; /**< @deprecated Reshape the weights tensor if false. */
+ bool retain_internal_weights{false}; /**< Retain internal reshaped weights. */
+ bool enable_fast_math{false}; /**< Enable fast math computation. */
/* Other parameters */
- bool fp_mixed_precision{ false }; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */
+ bool fp_mixed_precision{false}; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */
/** Sets the weights trained data layout
*
diff --git a/arm_compute/function_info/GEMMInfo.h b/arm_compute/function_info/GEMMInfo.h
index 29a57a00c2..c24762c0aa 100644
--- a/arm_compute/function_info/GEMMInfo.h
+++ b/arm_compute/function_info/GEMMInfo.h
@@ -26,6 +26,7 @@
#include "arm_compute/core/CoreTypes.h"
#include "arm_compute/function_info/ActivationLayerInfo.h"
+
#include <vector>
namespace arm_compute
@@ -43,17 +44,22 @@ enum class GEMMLowpOutputStageType
/** GEMMLowp output stage info */
struct GEMMLowpOutputStageInfo
{
- GEMMLowpOutputStageType type{ GEMMLowpOutputStageType::NONE }; /**< GEMMLowp output stage type */
- int32_t gemmlowp_offset{ 0 }; /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
- int32_t gemmlowp_multiplier{ 0 }; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
- int32_t gemmlowp_shift{ 0 }; /**< GEMMLowp output stage shift used for quantizing to uint8 */
- int32_t gemmlowp_min_bound{ std::numeric_limits<int32_t>::lowest() }; /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
- int32_t gemmlowp_max_bound{ std::numeric_limits<int32_t>::max() }; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
- std::vector<int32_t> gemmlowp_multipliers{}; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
- std::vector<int32_t> gemmlowp_shifts{}; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
- float gemmlowp_real_multiplier{ 0 }; /**< GEMMLowp output stage real multiplier used for quantizing to QASYMM8 */
- bool is_quantized_per_channel{ false }; /**< GEMMLowp quantized per-channel flag */
- DataType output_data_type{ DataType::UNKNOWN }; /**< Output tensor data type to use if the output is not initialized */
+ GEMMLowpOutputStageType type{GEMMLowpOutputStageType::NONE}; /**< GEMMLowp output stage type */
+ int32_t gemmlowp_offset{0}; /**< GEMMLowp output stage offset used for quantizing to QASYMM8 */
+ int32_t gemmlowp_multiplier{0}; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
+ int32_t gemmlowp_shift{0}; /**< GEMMLowp output stage shift used for quantizing to uint8 */
+ int32_t gemmlowp_min_bound{
+ std::numeric_limits<int32_t>::
+ lowest()}; /**< GEMMLowp min value used to saturate down the output result before converting back to QASYMM8 */
+ int32_t gemmlowp_max_bound{
+ std::numeric_limits<int32_t>::
+ max()}; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
+ std::vector<int32_t> gemmlowp_multipliers{}; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
+ std::vector<int32_t> gemmlowp_shifts{}; /**< GEMMLowp output stage multiplier used for quantizing to QASYMM8 */
+ float gemmlowp_real_multiplier{0}; /**< GEMMLowp output stage real multiplier used for quantizing to QASYMM8 */
+ bool is_quantized_per_channel{false}; /**< GEMMLowp quantized per-channel flag */
+ DataType output_data_type{
+ DataType::UNKNOWN}; /**< Output tensor data type to use if the output is not initialized */
};
/** GEMM information class. This class stores the necessary information to compute GEMM functions
*
@@ -100,9 +106,19 @@ public:
* @param[in] fixed_format (Optional) Specify the selection of fixed format kernels for variable weights support in GEMM. These kernels expect the weights tensor to be in amemory format that is fixed by the kernel itself. For more information, see arm_compute::WeightFormat.
* @param[in] weight_format (Optional) arm_gemm:WeightFormat enumeration requested by the user. Default is arm_compute::WeightFormat::UNSPECIFIED.
*/
- GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
- GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false, bool fast_math = false, bool broadcast_bias = false,
- const ActivationLayerInfo &activation_info = ActivationLayerInfo(), bool fixed_format = false, arm_compute::WeightFormat weight_format = arm_compute::WeightFormat::UNSPECIFIED) noexcept
+ GEMMInfo(bool is_a_reshaped,
+ bool is_b_reshaped,
+ bool reshape_b_only_on_first_run,
+ int depth_output_gemm3d = 0,
+ bool reinterpret_input_as_3d = false,
+ bool retain_internal_weights = false,
+ GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(),
+ bool fp_mixed_precision = false,
+ bool fast_math = false,
+ bool broadcast_bias = false,
+ const ActivationLayerInfo &activation_info = ActivationLayerInfo(),
+ bool fixed_format = false,
+ arm_compute::WeightFormat weight_format = arm_compute::WeightFormat::UNSPECIFIED) noexcept
: _is_a_reshaped(is_a_reshaped),
_is_b_reshaped(is_b_reshaped),
_reshape_b_only_on_first_run(reshape_b_only_on_first_run),
diff --git a/arm_compute/function_info/MatMulInfo.h b/arm_compute/function_info/MatMulInfo.h
index cd9ef1f4d9..fc73efb44a 100644
--- a/arm_compute/function_info/MatMulInfo.h
+++ b/arm_compute/function_info/MatMulInfo.h
@@ -55,8 +55,8 @@ public:
}
private:
- bool _adj_lhs{ false };
- bool _adj_rhs{ false };
+ bool _adj_lhs{false};
+ bool _adj_rhs{false};
};
} // namespace arm_compute
#endif /* ACL_ARM_COMPUTE_FUNCTION_INFO_MATMULINFO */