aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2018-08-07 11:53:30 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commitc6aa49b6709edada24b1ab3bc1308e0974f9e057 (patch)
tree0509684f9f402a33d0494c1fa1f34c18f956fcf1 /arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
parentbe4100605230868b5cc50cabee395613dbfb62cd (diff)
downloadComputeLibrary-c6aa49b6709edada24b1ab3bc1308e0974f9e057.tar.gz
COMPMID-1344 Add grouping support to CLWeightsReshapeKernel
Change-Id: Idde333308db71087ec234b3fd1eb4e36a44db46c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/143049 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h')
-rw-r--r--arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h32
1 files changed, 18 insertions, 14 deletions
diff --git a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
index 664fc3c216..6c93c23cec 100644
--- a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
+++ b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
@@ -68,26 +68,30 @@ public:
~CLWeightsReshapeKernel() = default;
/** Set the input and output of the kernel.
*
- * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QASYMM8/F16/F32
- * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
- * dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input
- * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
- * @param[out] output The output tensor. Should be a 2D Tensor. Data types supported: Same as @p input
+ * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
+ * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QASYMM8/F16/F32
+ * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
+ * dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input
+ * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
+ * @param[out] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise.
+ * Data types supported: Same as @p input
+ * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
*/
- void configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output);
+ void configure(const ICLTensor *input, const ICLTensor *biases, ICLTensor *output, const unsigned int num_groups = 1);
/** Static function to check if given info will lead to a valid configuration of @ref CLWeightsReshapeKernel
*
- * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QASYMM8/F16/F32
- * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
- * dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input
- * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
- * @param[in] output The output tensor. Should be a 2D Tensor. Data types supported: Same as @p input
+ * @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
+ * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QASYMM8/F16/F32
+ * @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
+ * dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input
+ * @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
+ * @param[in] output The output tensor. Should be a 2D Tensor if there are no groups and the weights are not shared; a 3D Tensor otherwise.
+ * Data types supported: Same as @p input
+ * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *biases, const ITensorInfo *output, const unsigned int num_groups = 1);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;