aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2021-07-16 16:16:43 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-07-22 02:25:50 +0000
commit4ee8b1599dbaf7634d25607fa5ac96ba3dc6b0f2 (patch)
tree2f8362d33cdad4212f4b96995681c68184c759e1 /src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
parent59fd7a722e5bc7e85309d6200bc37a772721a719 (diff)
downloadComputeLibrary-4ee8b1599dbaf7634d25607fa5ac96ba3dc6b0f2.tar.gz
Update GEMM assembly kernels
- Introduce Fp32 kernels with internal calculations in Bfloat16 when fast_mode is enabled - Improve kernel selection heuristics Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: I68a9e7e862b6fd2721b46e0d7cc791091c4ab279 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5965 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp147
1 files changed, 42 insertions, 105 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
index 25d65826b9..3baf7b9715 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
@@ -161,13 +161,12 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "cmp x26, #0x4\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x26, #0x4\n"
+ "add x25, x25, #0x10\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #4, MUL VL]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla z11.s, z7.s, z0.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.s, z6.s, z0.s[1]\n"
@@ -202,7 +201,6 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -241,9 +239,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z10.s, z6.s, z0.s[3]\n"
"fmla z11.s, z7.s, z0.s[3]\n"
"11:" // Height 1: Multiply loop: multiply skip
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x27, x27, #0x1\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
"cmp x27, x19\n"
"bne 6b\n"
"tbz %x[flags], #1, 12f\n"
@@ -347,16 +344,14 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
+ "cmp x26, #0x4\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "cmp x26, #0x4\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #4, MUL VL]\n"
@@ -407,9 +402,7 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
"fmla z9.s, z7.s, z0.s[0]\n"
- "add x24, x24, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
@@ -464,10 +457,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z11.s, z7.s, z0.s[3]\n"
"fmla z15.s, z7.s, z1.s[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x27, x27, #0x1\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
"cmp x27, x19\n"
"bne 19b\n"
"ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -601,21 +592,18 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
+ "cmp x26, #0x4\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1rqw { z2.s }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z13.s, z7.s, z1.s[0]\n"
"add x23, x23, #0x10\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "cmp x26, #0x4\n"
- "fmla z13.s, z7.s, z1.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla z17.s, z7.s, z2.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
@@ -680,12 +668,9 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1rqw { z2.s }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
- "add x23, x23, #0x10\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
@@ -755,11 +740,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z15.s, z7.s, z1.s[3]\n"
"fmla z19.s, z7.s, z2.s[3]\n"
"37:" // Height 3: Multiply loop: multiply skip
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x27, x27, #0x1\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
"cmp x27, x19\n"
"bne 32b\n"
"ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -922,26 +904,22 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
+ "cmp x26, #0x4\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1rqw { z2.s }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"ld1rqw { z3.s }, p0/Z, [x22]\n"
- "add x23, x23, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla z16.s, z6.s, z2.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x22, x22, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla z13.s, z7.s, z1.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "cmp x26, #0x4\n"
+ "add x22, x22, #0x10\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z17.s, z7.s, z2.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
@@ -1020,19 +998,15 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1rqw { z2.s }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
- "fmla z12.s, z6.s, z1.s[0]\n"
"ld1rqw { z3.s }, p0/Z, [x22]\n"
- "add x23, x23, #0x10\n"
- "fmla z16.s, z6.s, z2.s[0]\n"
- "add x22, x22, #0x10\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
"fmla z13.s, z7.s, z1.s[0]\n"
- "fmla z17.s, z7.s, z2.s[0]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -1113,12 +1087,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z19.s, z7.s, z2.s[3]\n"
"fmla z23.s, z7.s, z3.s[3]\n"
"50:" // Height 4: Multiply loop: multiply skip
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x27, x27, #0x1\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
"cmp x27, x19\n"
"bne 45b\n"
"ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -1310,32 +1280,27 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
+ "cmp x26, #0x4\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1rqw { z2.s }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"ld1rqw { z3.s }, p0/Z, [x22]\n"
- "add x23, x23, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"ld1rqw { z4.s }, p0/Z, [x21]\n"
- "add x22, x22, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla z13.s, z7.s, z1.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x22, x22, #0x10\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
"add x21, x21, #0x10\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "cmp x26, #0x4\n"
"fmla z24.s, z6.s, z4.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z17.s, z7.s, z2.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
"fmla z22.s, z6.s, z3.s[0]\n"
@@ -1427,22 +1392,17 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1rqw { z2.s }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
- "fmla z12.s, z6.s, z1.s[0]\n"
"ld1rqw { z3.s }, p0/Z, [x22]\n"
- "add x23, x23, #0x10\n"
- "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
"ld1rqw { z4.s }, p0/Z, [x21]\n"
- "add x22, x22, #0x10\n"
"fmla z13.s, z7.s, z1.s[0]\n"
- "add x21, x21, #0x10\n"
- "fmla z17.s, z7.s, z2.s[0]\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
"fmla z24.s, z6.s, z4.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1538,13 +1498,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z23.s, z7.s, z3.s[3]\n"
"fmla z27.s, z7.s, z4.s[3]\n"
"63:" // Height 5: Multiply loop: multiply skip
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x27, x27, #0x1\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
"cmp x27, x19\n"
"bne 58b\n"
"ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -1768,37 +1723,31 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
+ "cmp x26, #0x4\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1rqw { z2.s }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla z12.s, z6.s, z1.s[0]\n"
"ld1rqw { z3.s }, p0/Z, [x22]\n"
- "add x23, x23, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla z16.s, z6.s, z2.s[0]\n"
"ld1rqw { z4.s }, p0/Z, [x21]\n"
- "add x22, x22, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"ld1rqw { z5.s }, p0/Z, [x20]\n"
- "add x21, x21, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "add x21, x21, #0x10\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
"add x20, x20, #0x10\n"
"fmla z24.s, z6.s, z4.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "cmp x26, #0x4\n"
"fmla z28.s, z6.s, z5.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z17.s, z7.s, z2.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"fmla z29.s, z7.s, z5.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.s, z6.s, z0.s[0]\n"
- "prfm pldl1keep, [x20, #0x80]\n"
"fmla z14.s, z6.s, z1.s[0]\n"
"fmla z18.s, z6.s, z2.s[0]\n"
"fmla z22.s, z6.s, z3.s[0]\n"
@@ -1904,25 +1853,19 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1rqw { z0.s }, p0/Z, [x25]\n"
"fmla z8.s, z6.s, z0.s[0]\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
- "add x25, x25, #0x10\n"
"fmla z9.s, z7.s, z0.s[0]\n"
"ld1rqw { z2.s }, p0/Z, [x23]\n"
- "add x24, x24, #0x10\n"
- "fmla z12.s, z6.s, z1.s[0]\n"
"ld1rqw { z3.s }, p0/Z, [x22]\n"
- "add x23, x23, #0x10\n"
- "fmla z16.s, z6.s, z2.s[0]\n"
+ "fmla z12.s, z6.s, z1.s[0]\n"
"ld1rqw { z4.s }, p0/Z, [x21]\n"
- "add x22, x22, #0x10\n"
"fmla z13.s, z7.s, z1.s[0]\n"
"ld1rqw { z5.s }, p0/Z, [x20]\n"
- "add x21, x21, #0x10\n"
+ "fmla z16.s, z6.s, z2.s[0]\n"
"fmla z20.s, z6.s, z3.s[0]\n"
- "add x20, x20, #0x10\n"
- "fmla z17.s, z7.s, z2.s[0]\n"
"fmla z24.s, z6.s, z4.s[0]\n"
"fmla z28.s, z6.s, z5.s[0]\n"
"ld1w { z6.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z17.s, z7.s, z2.s[0]\n"
"fmla z21.s, z7.s, z3.s[0]\n"
"fmla z25.s, z7.s, z4.s[0]\n"
"fmla z29.s, z7.s, z5.s[0]\n"
@@ -2033,14 +1976,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z27.s, z7.s, z4.s[3]\n"
"fmla z31.s, z7.s, z5.s[3]\n"
"76:" // Height 6: Multiply loop: multiply skip
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x27, x27, #0x1\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
- "prfm pldl1keep, [x20, #0x80]\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x27, x27, #0x1\n"
"cmp x27, x19\n"
"bne 71b\n"
"ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"