From b62280aca3148dd6762e57e5af3da0cb0a9e2db5 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 31 May 2018 17:31:05 +0100 Subject: COMPMID-1244: Allow retaining weights in CLGEMMConvolutionLayer and CLFullyConnectedLayer Change-Id: I1c3b2197906cd4b905309bbd5f2012bbae6a7dba Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133730 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- .../runtime/CL/functions/CLFullyConnectedLayer.h | 34 +++++++++++++--------- .../runtime/CL/functions/CLGEMMConvolutionLayer.h | 1 + 2 files changed, 21 insertions(+), 14 deletions(-) (limited to 'arm_compute/runtime') diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h index 7fb5af9229..127d8acf10 100644 --- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h +++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h @@ -86,26 +86,32 @@ public: CLFullyConnectedLayer &operator=(CLFullyConnectedLayer &&) = default; /** Set the input and output tensors. * - * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32. - * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input - * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input. - * @param[out] output Destination tensor. Data type supported: Same as @p input. - * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true. - * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false. + * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32. + * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input + * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input. + * @param[out] output Destination tensor. Data type supported: Same as @p input. + * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true. + * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false. + * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false. + * Used for reconfiguration purposes. */ - void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights = true, bool are_weights_reshaped = false); + void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights = true, bool are_weights_reshaped = false, + bool retain_internal_weights = false); /** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer * - * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32. - * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input - * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input. - * @param[in] output Destination tensor. Data type supported: Same as @p input. - * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true. - * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false. + * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32. + * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input + * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input. + * @param[in] output Destination tensor. Data type supported: Same as @p input. + * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true. + * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false. + * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false. + * Used for reconfiguration purposes. * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false); + static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false, + bool retain_internal_weights = false); //Inherited methods override void run() override; diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h index 3dde52989b..aaa432616d 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h @@ -195,6 +195,7 @@ private: bool _is_quantized; bool _is_activationlayer_enabled; bool _is_prepared; + bool _retain_internal_weights; }; } #endif /* __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__ */ -- cgit v1.2.1