diff options
-rw-r--r-- | arm_compute/core/Types.h | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index b5fd21d29d..9e054f26dd 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -1540,12 +1540,16 @@ private: /** Fully connected layer info */ struct FullyConnectedLayerInfo { - DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */ - bool transpose_weights{ true }; /**< Transpose weights if true. */ - bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */ - bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */ - bool fp_mixed_precision{ false }; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */ - ActivationLayerInfo activation_info{}; /**< Fused activation to apply after the matrix multiplication. */ + /* Fused-activation parameters */ + ActivationLayerInfo activation_info{}; /**< Fused activation to apply after the matrix multiplication. */ + /* Information about weights */ + DataLayout weights_trained_layout{ DataLayout::NCHW }; /**< Layout that the weights have been trained with. */ + bool transpose_weights{ true }; /**< Transpose weights if true. */ + bool are_weights_reshaped{ false }; /**< Reshape the weights tensor if false. */ + bool retain_internal_weights{ false }; /**< Retain internal reshaped weights. */ + bool constant_weights{ true }; /**< If false, weights can vary between runs. */ + /* Other parameters */ + bool fp_mixed_precision{ false }; /**< Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy. */ /** Sets the weights trained data layout * |