From 4403ed3ed09491686a0b182fa498344b005ca812 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Mon, 17 May 2021 13:03:50 +0100 Subject: Add support for dynamic weights in CL FullyConnected layer Make GEMM use its native version if weights are dynamic. This ensures no reshape gets performed on the weights tensor Enable dynamic weights tests for the OpenCL backend Resolve COMPMID-4223 Signed-off-by: Giorgio Arena Change-Id: Iccc4806701772cede23e24df09c786914d00034c Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5652 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio --- arm_compute/core/Types.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'arm_compute/core/Types.h') diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index ec9c419dbc..2dc9a77c39 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -1954,7 +1954,8 @@ public: _fp_mixed_precision(false), _broadcast_bias(false), _pretranpose_B(true), - _activation_info() + _activation_info(), + _constant_weights(true) { } /** Constructor @@ -1971,10 +1972,11 @@ public: * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. * @param[in] broadcast_bias (Optional) Broadcast the shape of the bias tensor from a vector to a matrix. * @param[in] activation_info (Optional) Activation to apply after the matrix multiplication + * @param[in] constant_weights (Optional) Weights have constant values throughout multiple executions */ GEMMInfo(bool is_a_reshaped, bool is_b_reshaped, bool reshape_b_only_on_first_run, int depth_output_gemm3d = 0, bool reinterpret_input_as_3d = false, bool retain_internal_weights = false, GEMMLowpOutputStageInfo gemmlowp_output_stage = GEMMLowpOutputStageInfo(), bool fp_mixed_precision = false, bool broadcast_bias = false, - const ActivationLayerInfo &activation_info = ActivationLayerInfo()) noexcept + const ActivationLayerInfo &activation_info = ActivationLayerInfo(), bool constant_weights = true) noexcept : _is_a_reshaped(is_a_reshaped), _is_b_reshaped(is_b_reshaped), _reshape_b_only_on_first_run(reshape_b_only_on_first_run), @@ -1985,7 +1987,8 @@ public: _fp_mixed_precision(fp_mixed_precision), _broadcast_bias(broadcast_bias), _pretranpose_B(reshape_b_only_on_first_run), - _activation_info(activation_info) + _activation_info(activation_info), + _constant_weights(constant_weights) { } /** Flag which specifies if the matrix A has been reshaped @@ -2102,6 +2105,14 @@ public: { _activation_info = activation_info; } + /** Flag which specifies if the values of the weights tensor are constant throughout multiple executions or not + * + * @return True if the weights tensor is constant + */ + bool constant_weights() const + { + return _constant_weights; + }; private: bool _is_a_reshaped; @@ -2115,6 +2126,7 @@ private: bool _broadcast_bias; bool _pretranpose_B; ActivationLayerInfo _activation_info; + bool _constant_weights; }; /** Winograd information */ -- cgit v1.2.1