aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2018-05-31 17:31:05 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:52:54 +0000
commitb62280aca3148dd6762e57e5af3da0cb0a9e2db5 (patch)
treeaa10c3750dcb8b13151d40529facf92667c336c9 /arm_compute
parentda2491fb6d3cefb69846f220356fff282486495c (diff)
downloadComputeLibrary-b62280aca3148dd6762e57e5af3da0cb0a9e2db5.tar.gz
COMPMID-1244: Allow retaining weights in CLGEMMConvolutionLayer and CLFullyConnectedLayer
Change-Id: I1c3b2197906cd4b905309bbd5f2012bbae6a7dba Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/133730 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/Types.h20
-rw-r--r--arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h34
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h1
3 files changed, 34 insertions, 21 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 46e6dba1a0..639170f0fd 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -946,18 +946,19 @@ class WeightsInfo
public:
/** Default constructor */
WeightsInfo()
- : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0)
+ : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0), _retain_internal_weights(false)
{
}
/** Constructor
*
- * @param[in] are_reshaped True if the weights have been reshaped
- * @param[in] kernel_width Kernel width.
- * @param[in] kernel_height Kernel height.
- * @param[in] num_kernels Number of convolution kernels.
+ * @param[in] are_reshaped True if the weights have been reshaped
+ * @param[in] kernel_width Kernel width.
+ * @param[in] kernel_height Kernel height.
+ * @param[in] num_kernels Number of convolution kernels.
+ * @param[in] retain_internal_weights (Optional) True if internal reshaped weights must be retained. Used for reconfiguration purposes. Default is false.
*/
- WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels)
- : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels)
+ WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels, bool retain_internal_weights = false)
+ : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels), _retain_internal_weights(retain_internal_weights)
{
}
/** Flag which specifies if the weights tensor has been reshaped.
@@ -984,12 +985,17 @@ public:
{
return std::make_pair(_kernel_width, _kernel_height);
}
+ bool retain_internal_weights() const
+ {
+ return _retain_internal_weights;
+ }
private:
const bool _are_reshaped;
const unsigned int _kernel_width;
const unsigned int _kernel_height;
const unsigned int _num_kernels;
+ const bool _retain_internal_weights;
};
/** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
index 7fb5af9229..127d8acf10 100644
--- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
@@ -86,26 +86,32 @@ public:
CLFullyConnectedLayer &operator=(CLFullyConnectedLayer &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
- * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
- * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
- * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
+ * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
+ * @param[out] output Destination tensor. Data type supported: Same as @p input.
+ * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
+ * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false.
+ * Used for reconfiguration purposes.
*/
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights = true, bool are_weights_reshaped = false);
+ void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights = true, bool are_weights_reshaped = false,
+ bool retain_internal_weights = false);
/** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
*
- * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
- * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
- * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
- * @param[in] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
- * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
+ * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
+ * @param[in] output Destination tensor. Data type supported: Same as @p input.
+ * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
+ * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false.
+ * Used for reconfiguration purposes.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false,
+ bool retain_internal_weights = false);
//Inherited methods override
void run() override;
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index 3dde52989b..aaa432616d 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -195,6 +195,7 @@ private:
bool _is_quantized;
bool _is_activationlayer_enabled;
bool _is_prepared;
+ bool _retain_internal_weights;
};
}
#endif /* __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__ */