aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/Types.h
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2021-05-17 13:03:50 +0100
committerGiorgio Arena <giorgio.arena@arm.com>2021-05-20 15:19:39 +0000
commit4403ed3ed09491686a0b182fa498344b005ca812 (patch)
tree5a231a71d70a7b3ae2412729d8f6a170b54510f7 /arm_compute/core/Types.h
parentea8d266515812c4dec936b2153ffd5335873e583 (diff)
downloadComputeLibrary-4403ed3ed09491686a0b182fa498344b005ca812.tar.gz
Add support for dynamic weights in CL FullyConnected layer
Make GEMM use its native version if weights are dynamic. This ensures no reshape gets performed on the weights tensor Enable dynamic weights tests for the OpenCL backend Resolve COMPMID-4223 Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: Iccc4806701772cede23e24df09c786914d00034c Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5652 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'arm_compute/core/Types.h')
-rw-r--r--arm_compute/core/Types.h18
1 files changed, 15 insertions, 3 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index ec9c419dbc..2dc9a77c39 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1954,7 +1954,8 @@ public:
_fp_mixed_precision(false),
_broadcast_bias(false),
_pretranpose_B(true),
- _activation_info()
+ _activation_info(),
+ _constant_weights(true)
{
}
/** Constructor
@@ -1971,10 +1972,11 @@ public:
* @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy.
* @param[in] broadcast_bias (Optional) Broadcast the shape of the bias tensor from a vector to a matrix.
* @param[in] activation_info (Optional) Activation to apply after the matrix multiplication
+ * @param[in] constant_weights (Optional) Weights have constant values throughout multiple executions
*/
GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false,
GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false, bool broadcast_bias = false,
- const ActivationLayerInfo &activation_info = ActivationLayerInfo()) noexcept
+ const ActivationLayerInfo &activation_info = ActivationLayerInfo(), bool constant_weights = true) noexcept
: _is_a_reshaped(is_a_reshaped),
_is_b_reshaped(is_b_reshaped),
_reshape_b_only_on_first_run(reshape_b_only_on_first_run),
@@ -1985,7 +1987,8 @@ public:
_fp_mixed_precision(fp_mixed_precision),
_broadcast_bias(broadcast_bias),
_pretranpose_B(reshape_b_only_on_first_run),
- _activation_info(activation_info)
+ _activation_info(activation_info),
+ _constant_weights(constant_weights)
{
}
/** Flag which specifies if the matrix A has been reshaped
@@ -2102,6 +2105,14 @@ public:
{
_activation_info = activation_info;
}
+ /** Flag which specifies if the values of the weights tensor are constant throughout multiple executions or not
+ *
+ * @return True if the weights tensor is constant
+ */
+ bool constant_weights() const
+ {
+ return _constant_weights;
+ };
private:
bool _is_a_reshaped;
@@ -2115,6 +2126,7 @@ private:
bool _broadcast_bias;
bool _pretranpose_B;
ActivationLayerInfo _activation_info;
+ bool _constant_weights;
};
/** Winograd information */