From 9d0c4deb760efc2ca07e5e0b8218995201ad8a1f Mon Sep 17 00:00:00 2001 From: Gunes Bayir Date: Thu, 13 Apr 2023 18:22:58 +0100 Subject: Add quantized CL MatMul kernels for Lhs NT/T, Rhs NT Implement OpenCL kernels for batched Matrix Multiplication for the quantized data types QASYMM8 and QASYMM8_SIGNED. Quantized MatMul is supported with the following MatMul attributes: * adj_x = false, adj_y = false * adj_x = true, adj_y = false We consider native format kernels only. In other words, no reshaping of the operand matrices is done. Resolves: COMPMID-5921, COMPMID-5922 Change-Id: I99e0f68054a2bd635c60ec2641acc2e7ff398473 Signed-off-by: Omar Al Khatib Signed-off-by: Gunes Bayir Signed-off-by: Jakub Sujak Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9435 Reviewed-by: SiCong Li Reviewed-by: Viet-Hoa Do Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- Android.bp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'Android.bp') diff --git a/Android.bp b/Android.bp index 4bd307447b..32651b539c 100644 --- a/Android.bp +++ b/Android.bp @@ -51,6 +51,7 @@ opencl_srcs = [ "src/core/CL/cl_kernels/common/instance_normalization.cl", "src/core/CL/cl_kernels/common/l2_normalize.cl", "src/core/CL/cl_kernels/common/mat_mul.cl", + "src/core/CL/cl_kernels/common/mat_mul_quantized.cl", "src/core/CL/cl_kernels/common/mean_stddev_normalization.cl", "src/core/CL/cl_kernels/common/memset.cl", "src/core/CL/cl_kernels/common/minmax_layer.cl", @@ -695,6 +696,7 @@ cc_library_static { "src/gpu/cl/kernels/ClIm2ColKernel.cpp", "src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.cpp", "src/gpu/cl/kernels/ClIndirectConv2dKernel.cpp", + "src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp", "src/gpu/cl/kernels/ClMatMulNativeKernel.cpp", "src/gpu/cl/kernels/ClMulKernel.cpp", "src/gpu/cl/kernels/ClPermuteKernel.cpp", -- cgit v1.2.1