From c3f459dc7904a20bb56f89a32f541fe20b25d857 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Wed, 5 May 2021 15:42:20 +0100 Subject: Add constant_weights boolean to FullyConntectedLayerInfo This is needed to enable support for non-constant weights in Fully Connected Layer, useful in NLP use cases where Fully Connected layers weights come from other nodes in the graph and therefore change between runs. Resolves: COMPMID-4220 Change-Id: I0c2fe97eeb7554efac7ea9a6d6b7243332615052 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5579 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- arm_compute/core/Types.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index b5fd21d29d..9e054f26dd 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -1540,12 +1540,16 @@ private: /** Fully connected layer info */ struct FullyConnectedLayerInfo { - DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */ - bool transpose_weights{ true }; /**< Transpose weights if true. */ - bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */ - bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */ - bool fp_mixed_precision{ false }; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */ - ActivationLayerInfo activation_info{}; /**< Fused activation to apply after the matrix multiplication. */ + /* Fused-activation parameters */ + ActivationLayerInfo activation_info{}; /**< Fused activation to apply after the matrix multiplication. */ + /* Information about weights */ + DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */ + bool transpose_weights{ true }; /**< Transpose weights if true. */ + bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */ + bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */ + bool constant_weights{ true }; /**< If false, weights can vary between runs. */ + /* Other parameters */ + bool fp_mixed_precision{ false }; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */ /** Sets the weights trained data layout * -- cgit v1.2.1