aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/Types.h20
-rw-r--r--arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h34
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h1
3 files changed, 34 insertions, 21 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 46e6dba1a0..639170f0fd 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -946,18 +946,19 @@ class WeightsInfo
public:
/** Default constructor */
WeightsInfo()
- : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0)
+ : _are_reshaped(false), _kernel_width(0), _kernel_height(0), _num_kernels(0), _retain_internal_weights(false)
{
}
/** Constructor
*
- * @param[in] are_reshaped True if the weights have been reshaped
- * @param[in] kernel_width Kernel width.
- * @param[in] kernel_height Kernel height.
- * @param[in] num_kernels Number of convolution kernels.
+ * @param[in] are_reshaped True if the weights have been reshaped
+ * @param[in] kernel_width Kernel width.
+ * @param[in] kernel_height Kernel height.
+ * @param[in] num_kernels Number of convolution kernels.
+ * @param[in] retain_internal_weights (Optional) True if internal reshaped weights must be retained. Used for reconfiguration purposes. Default is false.
*/
- WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels)
- : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels)
+ WeightsInfo(bool are_reshaped, unsigned int kernel_width, unsigned int kernel_height, unsigned int num_kernels, bool retain_internal_weights = false)
+ : _are_reshaped(are_reshaped), _kernel_width(kernel_width), _kernel_height(kernel_height), _num_kernels(num_kernels), _retain_internal_weights(retain_internal_weights)
{
}
/** Flag which specifies if the weights tensor has been reshaped.
@@ -984,12 +985,17 @@ public:
{
return std::make_pair(_kernel_width, _kernel_height);
}
+ bool retain_internal_weights() const
+ {
+ return _retain_internal_weights;
+ }
private:
const bool _are_reshaped;
const unsigned int _kernel_width;
const unsigned int _kernel_height;
const unsigned int _num_kernels;
+ const bool _retain_internal_weights;
};
/** GEMM reshape information class. This class stores the necessary information about matrix A and matrix B reshape.
diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
index 7fb5af9229..127d8acf10 100644
--- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
@@ -86,26 +86,32 @@ public:
CLFullyConnectedLayer &operator=(CLFullyConnectedLayer &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
- * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
- * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
- * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
+ * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
+ * @param[out] output Destination tensor. Data type supported: Same as @p input.
+ * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
+ * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false.
+ * Used for reconfiguration purposes.
*/
- void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights = true, bool are_weights_reshaped = false);
+ void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights = true, bool are_weights_reshaped = false,
+ bool retain_internal_weights = false);
/** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
*
- * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
- * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
- * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
- * @param[in] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
- * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
+ * @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
+ * @param[in] output Destination tensor. Data type supported: Same as @p input.
+ * @param[in] transpose_weights (Optional) Transpose weights if true. Defaults to true.
+ * @param[in] are_weights_reshaped (Optional) Reshape the weights tensor if false. Defaults to false.
+ * @param[in] retain_internal_weights (Optional) Retain internal reshaped weights. Defaults to false.
+ * Used for reconfiguration purposes.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, bool transpose_weights = true, bool are_weights_reshaped = false,
+ bool retain_internal_weights = false);
//Inherited methods override
void run() override;
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index 3dde52989b..aaa432616d 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -195,6 +195,7 @@ private:
bool _is_quantized;
bool _is_activationlayer_enabled;
bool _is_prepared;
+ bool _retain_internal_weights;
};
}
#endif /* __ARM_COMPUTE_CLGEMMCONVOLUTIONLAYER_H__ */