diff options
author | Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com> | 2023-06-27 14:25:58 +0100 |
---|---|---|
committer | Mohmun02 <MohammedSuhail.Munshi@arm.com> | 2023-07-11 08:53:19 +0000 |
commit | 8e2dedea8550b1c18c3bbeead8c972f661dcfac8 (patch) | |
tree | 61cd0326b9690e343d62a5c72d935fcd68017eb9 /src/gpu/cl/kernels/ClMatMulNativeKernel.h | |
parent | 5ff480265a110ea1f2ce24491e082f52348b0f92 (diff) | |
download | ComputeLibrary-8e2dedea8550b1c18c3bbeead8c972f661dcfac8.tar.gz |
Add Bias to MatMul Kernels and add support for use in Fully Connected Layer
Resolves: [COMPMID-6316]
Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>
Change-Id: I08e6bac9e6b46b76978da0dc6a48ccfe3dde5086
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9833
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/gpu/cl/kernels/ClMatMulNativeKernel.h')
-rw-r--r-- | src/gpu/cl/kernels/ClMatMulNativeKernel.h | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/src/gpu/cl/kernels/ClMatMulNativeKernel.h b/src/gpu/cl/kernels/ClMatMulNativeKernel.h index 02d8ac3067..fe2b787c12 100644 --- a/src/gpu/cl/kernels/ClMatMulNativeKernel.h +++ b/src/gpu/cl/kernels/ClMatMulNativeKernel.h @@ -43,15 +43,16 @@ public: /** Initialise the kernel's input and output. * * @param[in] compile_context The compile context to be used. - * @param[in] lhs Input tensor for the LHS matrix. Data type supported: F32/F16. + * @param[in] lhs Input tensor info for the LHS matrix. Data type supported: F32/F16. * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. - * @param[in] rhs Input tensor for the RHS matrix. Data type supported: same as @p lhs. + * @param[in] rhs Input tensor info for the RHS matrix. Data type supported: same as @p lhs. * Dimensions above 2 are collapsed onto dimension 2 and represent the batch. + * @param[in] bias Bias tensor info for bias matrix. Can be nullptr. Data type supported: same as @p lhs. * @param[out] dst Output tensor info. Data type supported: same as @p lhs * @param[in] matmul_kernel_info Attributes for Batch MatMul Kernel - * @param[in] act_info Specifies activation function to use after Matrix multiplication. Default is Identity function. + * @param[in] act_info (Optional) Specifies activation function to use after Matrix multiplication. Default is Identity function. */ - void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, + void configure(const ClCompileContext &compile_context, ITensorInfo *lhs, ITensorInfo *rhs, ITensorInfo *bias, ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration * @@ -59,7 +60,8 @@ public: * * @return a status */ - static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *bias, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); // Inherited methods overridden: void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; |