aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/Types.h
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2021-09-24 14:04:27 +0100
committerGiorgio Arena <giorgio.arena@arm.com>2021-09-29 10:31:08 +0000
commit63e0beb9fb9646407d123e830165546e9129e95d (patch)
tree9bfe80e8d853327a82f9f622d89c3b43df0400f4 /arm_compute/core/Types.h
parentb1ba1e33f2b03b211f561123559c24517c0e5865 (diff)
downloadComputeLibrary-63e0beb9fb9646407d123e830165546e9129e95d.tar.gz
Add support for non-constant weights and biases in CpuFullyConnected
Changing the approach for specifying that weights and biases tensors are non-constant by making it a member of TensorInfo rather than an option of the functions. Resolves: COMPMID-4222, COMPMID-4811 Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: I9b0081ccbcf8271ce029ba6755563d64c59e1d32 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6313 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core/Types.h')
-rw-r--r--arm_compute/core/Types.h19
1 files changed, 3 insertions, 16 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 0acbb3f59e..31199e138b 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1557,7 +1557,6 @@ struct FullyConnectedLayerInfo
bool transpose_weights{ true }; /**< Transpose weights if true. */
bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
- bool constant_weights{ true }; /**< If false, weights can vary between runs. */
/* Other parameters */
bool fp_mixed_precision{ false }; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */
@@ -1965,8 +1964,7 @@ public:
_fp_mixed_precision(false),
_broadcast_bias(false),
_pretranspose_B(true),
- _activation_info(),
- _constant_weights(true)
+ _activation_info()
{
}
/** Constructor
@@ -1984,11 +1982,10 @@ public:
* @param[in] fast_math (Optional) Use a data type of shorter width to improve performance
* @param[in] broadcast_bias (Optional) Broadcast the shape of the bias tensor from a vector to a matrix.
* @param[in] activation_info (Optional) Activation to apply after the matrix multiplication
- * @param[in] constant_weights (Optional) Weights have constant values throughout multiple executions
*/
GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false, bool fast_math = false, bool broadcast_bias = false,
- const ActivationLayerInfo &activation_info = ActivationLayerInfo(), bool constant_weights = true) noexcept
+ const ActivationLayerInfo &activation_info = ActivationLayerInfo()) noexcept
: _is_a_reshaped(is_a_reshaped),
_is_b_reshaped(is_b_reshaped),
_reshape_b_only_on_first_run(reshape_b_only_on_first_run),
@@ -2000,8 +1997,7 @@ public:
_fp_mixed_precision(fp_mixed_precision),
_broadcast_bias(broadcast_bias),
_pretranspose_B(reshape_b_only_on_first_run),
- _activation_info(activation_info),
- _constant_weights(constant_weights)
+ _activation_info(activation_info)
{
}
/** Flag which specifies if the matrix A has been reshaped
@@ -2126,14 +2122,6 @@ public:
{
_activation_info = activation_info;
}
- /** Flag which specifies if the values of the weights tensor are constant throughout multiple executions or not
- *
- * @return True if the weights tensor is constant
- */
- bool constant_weights() const
- {
- return _constant_weights;
- };
private:
bool _is_a_reshaped;
@@ -2148,7 +2136,6 @@ private:
bool _broadcast_bias;
bool _pretranspose_B;
ActivationLayerInfo _activation_info;
- bool _constant_weights;
};
/** Winograd information */