aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
diff options
context:
space:
mode:
authorMichael Tyler <michael.tyler@arm.com>2024-06-04 15:47:37 +0100
committerMichael Tyler <michael.tyler@arm.com>2024-06-25 09:10:13 +0000
commitfc94f4d23abd4bc427b701f54ad85282e9ec7872 (patch)
tree5e2980599256e2b2f4374e5beb61596fc95c9d5a /src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
parentc2237ec4094c7824f8f7e61bc89504d01c5b59ff (diff)
downloadComputeLibrary-fc94f4d23abd4bc427b701f54ad85282e9ec7872.tar.gz
Update CPU kernels and add mixed sign GEMM support
- Add support for mixed sign quantized convolution. - Add support for mixed sign dequantized GEMM. - Add SME FP16 GEMV kernel. - Change SME vector length function to use RDSVL instead of static variable. - Add GEMM dilation support internally (not exposed yet). - Remove unused "get_default_activation_values" functions. - Add SVE fixed format interleaved BF16 DOT kernel. - Updates and optimizations to assembly kernels. Resolves COMPMID-6926 Change-Id: I227f502502611d4cc4111c89e30c53ce94079544 Signed-off-by: Michael Tyler <michael.tyler@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11570 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp')
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp168
1 files changed, 84 insertions, 84 deletions
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
index f7aa889b56..0641563b63 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,21 +45,21 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"lsr x9, %x[n_channels], #0x2\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
"ld1r { v8.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v7.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_maxval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v7.4s }, [x21]\n"
"ld1r { v6.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v5.16b }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v5.16b }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v3.4s }, [x21]\n"
"ld1r { v2.4s }, [x20]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
"mov x11, #0x0\n"
+ "ld1r { v1.4s }, [x20]\n"
"cbz x9, 6f\n"
"1:" // Channel loop
"movi v23.4s, #0x0\n"
@@ -68,75 +68,75 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldr q23, [%x[bias], x20]\n"
"2:" // Channel loop: Load bias: Done
"ldr s0, [%x[params]], #0x4\n"
- "mov x25, %x[inptrs]\n"
- "ldp x21, x20, [x25], #0x10\n"
- "subs x24, %x[n_points], #0x1\n"
- "ldr s14, [x21, x11]\n"
- "ldr s15, [x20, x11]\n"
+ "mov x23, %x[inptrs]\n"
+ "subs x22, %x[n_points], #0x1\n"
"mov v24.16b, v23.16b\n"
"mov v25.16b, v23.16b\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s16, [x21, x11]\n"
"mov v26.16b, v23.16b\n"
"mov v27.16b, v23.16b\n"
- "ldr s17, [x20, x11]\n"
- "ldp x21, x20, [x25], #0x10\n"
"mov v28.16b, v23.16b\n"
+ "ldp x21, x20, [x23], #0x10\n"
"mov v29.16b, v23.16b\n"
- "ldr s18, [x21, x11]\n"
- "ldr s19, [x20, x11]\n"
"mov v30.16b, v23.16b\n"
"mov v31.16b, v23.16b\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s20, [x21, x11]\n"
"usubl v0.8h, v0.8b, v5.8b\n"
+ "ldr s14, [x21, x11]\n"
+ "ldr s15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x20, x11]\n"
- "ldr x20, [x25], #0x8\n"
"usubl v15.8h, v15.8b, v6.8b\n"
+ "ldr s16, [x21, x11]\n"
+ "ldr s17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x20, x11]\n"
+ "ldr s18, [x21, x11]\n"
+ "ldr s19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v17.8h, v17.8b, v6.8b\n"
"usubl v18.8h, v18.8b, v6.8b\n"
"usubl v19.8h, v19.8b, v6.8b\n"
+ "ldr s20, [x21, x11]\n"
+ "ldr s21, [x20, x11]\n"
+ "ldr x20, [x23], #0x8\n"
"usubl v20.8h, v20.8b, v6.8b\n"
"usubl v21.8h, v21.8b, v6.8b\n"
+ "ldr s22, [x20, x11]\n"
"usubl v22.8h, v22.8b, v6.8b\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x23, x22, [x25], #0x10\n"
- "ldp x21, x20, [x25], #0x10\n"
+ "ldp x21, x20, [x23], #0x10\n"
"smlal v23.4s, v14.4h, v0.4h\n"
"smlal v24.4s, v15.4h, v0.4h\n"
- "ldr s14, [x23, x11]\n"
- "ldr s15, [x22, x11]\n"
+ "subs x22, x22, #0x1\n"
"smlal v25.4s, v16.4h, v0.4h\n"
"smlal v26.4s, v17.4h, v0.4h\n"
- "ldr s16, [x21, x11]\n"
- "ldr s17, [x20, x11]\n"
"smlal v27.4s, v18.4h, v0.4h\n"
"smlal v28.4s, v19.4h, v0.4h\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s18, [x21, x11]\n"
+ "ldr s14, [x21, x11]\n"
+ "ldr s15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"smlal v29.4s, v20.4h, v0.4h\n"
"smlal v30.4s, v21.4h, v0.4h\n"
- "ldr s19, [x20, x11]\n"
- "ldp x21, x20, [x25], #0x10\n"
"smlal v31.4s, v22.4h, v0.4h\n"
- "subs x24, x24, #0x1\n"
"ldr s0, [%x[params]], #0x4\n"
- "ldr s20, [x21, x11]\n"
- "usubl v0.8h, v0.8b, v5.8b\n"
"usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x20, x11]\n"
- "ldr x20, [x25], #0x8\n"
"usubl v15.8h, v15.8b, v6.8b\n"
+ "ldr s16, [x21, x11]\n"
+ "ldr s17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "usubl v0.8h, v0.8b, v5.8b\n"
"usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x20, x11]\n"
"usubl v17.8h, v17.8b, v6.8b\n"
+ "ldr s18, [x21, x11]\n"
+ "ldr s19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v18.8h, v18.8b, v6.8b\n"
"usubl v19.8h, v19.8b, v6.8b\n"
+ "ldr s20, [x21, x11]\n"
+ "ldr s21, [x20, x11]\n"
+ "ldr x20, [x23], #0x8\n"
"usubl v20.8h, v20.8b, v6.8b\n"
+ "ldr s22, [x20, x11]\n"
"usubl v21.8h, v21.8b, v6.8b\n"
"usubl v22.8h, v22.8b, v6.8b\n"
"bgt 3b\n"
@@ -162,27 +162,27 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldp x28, x27, [%x[outptrs], #0x0]\n"
"ldp x26, x25, [%x[outptrs], #0x10]\n"
"sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sshl v26.4s, v26.4s, v3.4s\n"
"ldp x24, x23, [%x[outptrs], #0x20]\n"
"ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "and v18.16b, v23.16b, v1.16b\n"
- "and v17.16b, v24.16b, v1.16b\n"
- "and v16.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
"sshl v27.4s, v27.4s, v3.4s\n"
"sshl v28.4s, v28.4s, v3.4s\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v2.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v2.4s\n"
"sshl v29.4s, v29.4s, v3.4s\n"
"sshl v30.4s, v30.4s, v3.4s\n"
"sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v18.16b, v23.16b, v1.16b\n"
+ "and v17.16b, v24.16b, v1.16b\n"
+ "and v16.16b, v25.16b, v1.16b\n"
"sqrdmulh v26.4s, v26.4s, v2.4s\n"
"sqrdmulh v27.4s, v27.4s, v2.4s\n"
"sqrdmulh v28.4s, v28.4s, v2.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sqrdmulh v29.4s, v29.4s, v2.4s\n"
"sqrdmulh v30.4s, v30.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v2.4s\n"
@@ -254,17 +254,17 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"uzp1 v31.16b, v31.16b, v31.16b\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s23, [x28, x11]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s24, [x27, x11]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s25, [x26, x11]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s26, [x25, x11]\n"
+ "str s23, [x28, x11]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x11]\n"
+ "str s25, [x26, x11]\n"
+ "str s26, [x25, x11]\n"
"str s27, [x24, x11]\n"
"str s28, [x23, x11]\n"
"str s29, [x22, x11]\n"
@@ -290,24 +290,24 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"9:" // Oddments: Load bias: Done
"ldr s0, [%x[params]], #0x4\n"
"mov x10, %x[inptrs]\n"
- "ldp x9, x28, [x10], #0x10\n"
"mov v24.16b, v23.16b\n"
- "ldp x27, x26, [x10], #0x10\n"
- "ldp x25, x24, [x10], #0x10\n"
"mov v25.16b, v23.16b\n"
"mov v26.16b, v23.16b\n"
- "ldp x23, x22, [x10], #0x10\n"
- "ldr x21, [x10], #0x8\n"
"mov v27.16b, v23.16b\n"
"mov v28.16b, v23.16b\n"
"mov v29.16b, v23.16b\n"
+ "ldp x9, x28, [x10], #0x10\n"
"mov v30.16b, v23.16b\n"
- "add x9, x9, x11\n"
- "add x28, x28, x11\n"
"mov v31.16b, v23.16b\n"
"usubl v0.8h, v0.8b, v5.8b\n"
+ "ldp x27, x26, [x10], #0x10\n"
+ "add x9, x9, x11\n"
+ "add x28, x28, x11\n"
+ "ldp x25, x24, [x10], #0x10\n"
"add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
+ "ldr x21, [x10], #0x8\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
@@ -358,27 +358,27 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ble 15f\n"
"12:" // Oddments: Planar loop
"ldp x9, x28, [x10], #0x10\n"
- "ldp x27, x26, [x10], #0x10\n"
"smlal v23.4s, v14.4h, v0.4h\n"
"smlal v24.4s, v15.4h, v0.4h\n"
- "ldp x25, x24, [x10], #0x10\n"
- "ldp x23, x22, [x10], #0x10\n"
"smlal v25.4s, v16.4h, v0.4h\n"
"smlal v26.4s, v17.4h, v0.4h\n"
"smlal v27.4s, v18.4h, v0.4h\n"
"smlal v28.4s, v19.4h, v0.4h\n"
- "ldr x21, [x10], #0x8\n"
- "add x9, x9, x11\n"
+ "ldp x27, x26, [x10], #0x10\n"
"smlal v29.4s, v20.4h, v0.4h\n"
"smlal v30.4s, v21.4h, v0.4h\n"
+ "add x9, x9, x11\n"
"add x28, x28, x11\n"
- "add x27, x27, x11\n"
"smlal v31.4s, v22.4h, v0.4h\n"
"ldr s0, [%x[params]], #0x4\n"
- "usubl v0.8h, v0.8b, v5.8b\n"
+ "ldp x25, x24, [x10], #0x10\n"
+ "add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
+ "usubl v0.8h, v0.8b, v5.8b\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
+ "ldr x21, [x10], #0x8\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
"add x21, x21, x11\n"
@@ -465,36 +465,36 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldp x28, x27, [%x[outptrs], #0x0]\n"
"ldp x26, x25, [%x[outptrs], #0x10]\n"
"sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sshl v26.4s, v26.4s, v3.4s\n"
"ldp x24, x23, [%x[outptrs], #0x20]\n"
"ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
+ "sshl v27.4s, v27.4s, v3.4s\n"
+ "sshl v28.4s, v28.4s, v3.4s\n"
"ldr x20, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v2.4s\n"
"add x28, x28, x11\n"
- "and v18.16b, v23.16b, v1.16b\n"
- "and v17.16b, v24.16b, v1.16b\n"
"add x27, x27, x11\n"
+ "sqrdmulh v25.4s, v25.4s, v2.4s\n"
+ "sshl v29.4s, v29.4s, v3.4s\n"
"add x26, x26, x11\n"
- "and v16.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
"add x25, x25, x11\n"
+ "sshl v30.4s, v30.4s, v3.4s\n"
+ "sshl v31.4s, v31.4s, v3.4s\n"
"add x24, x24, x11\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
"add x23, x23, x11\n"
+ "and v18.16b, v23.16b, v1.16b\n"
+ "and v17.16b, v24.16b, v1.16b\n"
"add x22, x22, x11\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
"add x21, x21, x11\n"
+ "and v16.16b, v25.16b, v1.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v2.4s\n"
"add x20, x20, x11\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v2.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v2.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
"sqrdmulh v29.4s, v29.4s, v2.4s\n"
"sqrdmulh v30.4s, v30.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v2.4s\n"