aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/Types.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/core/Types.h')
-rw-r--r--arm_compute/core/Types.h18
1 files changed, 15 insertions, 3 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index ec9c419dbc..2dc9a77c39 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1954,7 +1954,8 @@ public:
_fp_mixed_precision(false),
_broadcast_bias(false),
_pretranpose_B(true),
- _activation_info()
+ _activation_info(),
+ _constant_weights(true)
{
}
/** Constructor
@@ -1971,10 +1972,11 @@ public:
* @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy.
* @param[in] broadcast_bias (Optional) Broadcast the shape of the bias tensor from a vector to a matrix.
* @param[in] activation_info (Optional) Activation to apply after the matrix multiplication
+ * @param[in] constant_weights (Optional) Weights have constant values throughout multiple executions
*/
GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false, bool broadcast_bias = false,
- const ActivationLayerInfo &activation_info = ActivationLayerInfo()) noexcept
+ const ActivationLayerInfo &activation_info = ActivationLayerInfo(), bool constant_weights = true) noexcept
: _is_a_reshaped(is_a_reshaped),
_is_b_reshaped(is_b_reshaped),
_reshape_b_only_on_first_run(reshape_b_only_on_first_run),
@@ -1985,7 +1987,8 @@ public:
_fp_mixed_precision(fp_mixed_precision),
_broadcast_bias(broadcast_bias),
_pretranpose_B(reshape_b_only_on_first_run),
- _activation_info(activation_info)
+ _activation_info(activation_info),
+ _constant_weights(constant_weights)
{
}
/** Flag which specifies if the matrix A has been reshaped
@@ -2102,6 +2105,14 @@ public:
{
_activation_info = activation_info;
}
+ /** Flag which specifies if the values of the weights tensor are constant throughout multiple executions or not
+ *
+ * @return True if the weights tensor is constant
+ */
+ bool constant_weights() const
+ {
+ return _constant_weights;
+ };
private:
bool _is_a_reshaped;
@@ -2115,6 +2126,7 @@ private:
bool _broadcast_bias;
bool _pretranpose_B;
ActivationLayerInfo _activation_info;
+ bool _constant_weights;
};
/** Winograd information */