diff options
author | ramelg01 <ramy.elgammal@arm.com> | 2021-09-17 17:36:57 +0100 |
---|---|---|
committer | ramy.elgammal <ramy.elgammal@arm.com> | 2021-09-22 09:44:25 +0000 |
commit | cbbb03813b79b7f0274b18436a78a79ff31e469e (patch) | |
tree | a80e529e419ddff1ef2f5fa72dfa37d29bf53977 /arm_compute | |
parent | 3ae3d88c1a305ef4fc0beed8fda3cfc39ddb2ae8 (diff) | |
download | ComputeLibrary-cbbb03813b79b7f0274b18436a78a79ff31e469e.tar.gz |
Provide logging for configure functions in all NEON functions
Partially Resolves: COMPMID-4718
Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com>
Change-Id: I655268c57fa126d9c99981c49d345a3aac75646e
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6286
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r-- | arm_compute/core/Types.h | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index 0dd1afc240..0acbb3f59e 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -1964,7 +1964,7 @@ public: _fast_math(false), _fp_mixed_precision(false), _broadcast_bias(false), - _pretranpose_B(true), + _pretranspose_B(true), _activation_info(), _constant_weights(true) { @@ -1999,7 +1999,7 @@ public: _fast_math(fast_math), _fp_mixed_precision(fp_mixed_precision), _broadcast_bias(broadcast_bias), - _pretranpose_B(reshape_b_only_on_first_run), + _pretranspose_B(reshape_b_only_on_first_run), _activation_info(activation_info), _constant_weights(constant_weights) { @@ -2098,17 +2098,17 @@ public: * * @return True if b should be pre-transposed else false. */ - bool pretranpose_B() const + bool pretranspose_B() const { - return _pretranpose_B; + return _pretranspose_B; }; /** Set pre-transpose b flag * * @param[in] flag Flag to set */ - void set_pretranpose_B(bool flag) + void set_pretranspose_B(bool flag) { - _pretranpose_B = flag; + _pretranspose_B = flag; } /** Activation layer to apply after the matrix multiplication * @@ -2146,7 +2146,7 @@ private: bool _fast_math; bool _fp_mixed_precision; bool _broadcast_bias; - bool _pretranpose_B; + bool _pretranspose_B; ActivationLayerInfo _activation_info; bool _constant_weights; }; |