aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2021-05-05 15:42:20 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-05-11 12:35:41 +0000
commitc3f459dc7904a20bb56f89a32f541fe20b25d857 (patch)
treebf4f4aaf3a52c6c9144e9092cdb6e48c41d86726
parent5a44b3bda0edd9ebdd1aa07f2668a7540788edb1 (diff)
downloadComputeLibrary-c3f459dc7904a20bb56f89a32f541fe20b25d857.tar.gz
Add constant_weights boolean to FullyConntectedLayerInfo
This is needed to enable support for non-constant weights in Fully Connected Layer, useful in NLP use cases where Fully Connected layers weights come from other nodes in the graph and therefore change between runs. Resolves: COMPMID-4220 Change-Id: I0c2fe97eeb7554efac7ea9a6d6b7243332615052 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5579 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/core/Types.h16
1 files changed, 10 insertions, 6 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index b5fd21d29d..9e054f26dd 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1540,12 +1540,16 @@ private:
/** Fully connected layer info */
struct FullyConnectedLayerInfo
{
- DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
- bool transpose_weights{ true }; /**< Transpose weights if true. */
- bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
- bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
- bool fp_mixed_precision{ false }; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */
- ActivationLayerInfo activation_info{}; /**< Fused activation to apply after the matrix multiplication. */
+ /* Fused-activation parameters */
+ ActivationLayerInfo activation_info{}; /**< Fused activation to apply after the matrix multiplication. */
+ /* Information about weights */
+ DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */
+ bool transpose_weights{ true }; /**< Transpose weights if true. */
+ bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */
+ bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */
+ bool constant_weights{ true }; /**< If false, weights can vary between runs. */
+ /* Other parameters */
+ bool fp_mixed_precision{ false }; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */
/** Sets the weights trained data layout
*