aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp146
1 files changed, 58 insertions, 88 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
index 8a7465ba6b..3031f5abf5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -158,7 +158,6 @@ void sve_hybrid_s8qa_dot_4x4VL (
"tbnz %x[flags], #31, 8f\n"
"sdot z11.s, z0.b, z15.b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "prfm pldl1keep, [x23, #0x80]\n"
"sub x24, x24, #0x10\n"
"cmp x24, #0x10\n"
"bgt 7b\n"
@@ -170,7 +169,6 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
- "add x23, x23, #0x10\n"
"sdot z17.s, z5.b, z0.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"addvl x28, x28, #4\n"
@@ -212,9 +210,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"tbnz %x[flags], #31, 11f\n"
"sdot z11.s, z0.b, z15.b\n"
"11:" // Height 1: Multiply loop: unique 2: skip row sum
- "prfm pldl1keep, [x23, #0x80]\n"
- "add x25, x25, #0x1\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
"cmp x25, x19\n"
"bne 4b\n"
"tbnz %x[flags], #31, 12f\n"
@@ -251,16 +248,16 @@ void sve_hybrid_s8qa_dot_4x4VL (
".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n"
"tbz %x[flags], #5, 13f\n"
"and z4.d, z16.d, z0.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"and z5.d, z17.d, z0.d\n"
"and z6.d, z18.d, z0.d\n"
- "asr z5.s, z5.s, #0x1f\n"
"and z7.d, z19.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"sqadd z16.s, z16.s, z4.s\n"
- "asr z7.s, z7.s, #0x1f\n"
"sqadd z17.s, z17.s, z5.s\n"
"sqadd z18.s, z18.s, z6.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"13:" // Height 1: no shift correction
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
@@ -396,9 +393,7 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z11.s, z0.b, z15.b\n"
"sdot z12.s, z1.b, z15.b\n"
"22:" // Height 2: Multiply loop: unique 3: skip row sum
- "prfm pldl1keep, [x23, #0x80]\n"
"sub x24, x24, #0x10\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"cmp x24, #0x10\n"
"bgt 21b\n"
"23:" // Height 2: Multiply loop: Single iteration only
@@ -409,12 +404,10 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
"ld1rqb { z1.b }, p0/Z, [x22]\n"
- "add x23, x23, #0x10\n"
"sdot z17.s, z5.b, z0.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
- "add x22, x22, #0x10\n"
- "sdot z20.s, z4.b, z1.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
"addvl x28, x28, #4\n"
"sdot z21.s, z5.b, z1.b[0]\n"
"sdot z18.s, z6.b, z0.b[0]\n"
@@ -470,10 +463,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z11.s, z0.b, z15.b\n"
"sdot z12.s, z1.b, z15.b\n"
"25:" // Height 2: Multiply loop: unique 4: skip row sum
- "prfm pldl1keep, [x23, #0x80]\n"
- "add x25, x25, #0x1\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
"cmp x25, x19\n"
"bne 18b\n"
"ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -527,27 +518,27 @@ void sve_hybrid_s8qa_dot_4x4VL (
".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
"tbz %x[flags], #5, 27f\n"
"and z4.d, z16.d, z0.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"and z5.d, z17.d, z0.d\n"
"and z6.d, z18.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z4.s\n"
+ "sqadd z17.s, z17.s, z5.s\n"
+ "sqadd z18.s, z18.s, z6.s\n"
"and z7.d, z19.d, z0.d\n"
"and z8.d, z20.d, z0.d\n"
- "asr z6.s, z6.s, #0x1f\n"
"and z9.d, z21.d, z0.d\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
- "and z10.d, z22.d, z0.d\n"
"asr z8.s, z8.s, #0x1f\n"
- "and z4.d, z23.d, z0.d\n"
"asr z9.s, z9.s, #0x1f\n"
- "sqadd z17.s, z17.s, z5.s\n"
- "asr z10.s, z10.s, #0x1f\n"
- "sqadd z18.s, z18.s, z6.s\n"
- "asr z4.s, z4.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"sqadd z20.s, z20.s, z8.s\n"
"sqadd z21.s, z21.s, z9.s\n"
+ "and z10.d, z22.d, z0.d\n"
+ "and z4.d, z23.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
"sqadd z22.s, z22.s, z10.s\n"
"sqadd z23.s, z23.s, z4.s\n"
"27:" // Height 2: no shift correction
@@ -731,11 +722,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z12.s, z1.b, z15.b\n"
"sdot z13.s, z2.b, z15.b\n"
"36:" // Height 3: Multiply loop: unique 5: skip row sum
- "prfm pldl1keep, [x23, #0x80]\n"
"sub x24, x24, #0x10\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"cmp x24, #0x10\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"bgt 35b\n"
"37:" // Height 3: Multiply loop: Single iteration only
"ld1b { z4.b }, p2/Z, [x28]\n"
@@ -745,16 +733,13 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
"ld1rqb { z1.b }, p0/Z, [x22]\n"
- "add x23, x23, #0x10\n"
"sdot z17.s, z5.b, z0.b[0]\n"
"ld1rqb { z2.b }, p0/Z, [x21]\n"
- "add x22, x22, #0x10\n"
- "sdot z20.s, z4.b, z1.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
- "sdot z24.s, z4.b, z2.b[0]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"addvl x28, x28, #4\n"
+ "sdot z24.s, z4.b, z2.b[0]\n"
"sdot z21.s, z5.b, z1.b[0]\n"
"sdot z25.s, z5.b, z2.b[0]\n"
"sdot z18.s, z6.b, z0.b[0]\n"
@@ -825,11 +810,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z12.s, z1.b, z15.b\n"
"sdot z13.s, z2.b, z15.b\n"
"39:" // Height 3: Multiply loop: unique 6: skip row sum
- "prfm pldl1keep, [x23, #0x80]\n"
- "add x25, x25, #0x1\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
"cmp x25, x19\n"
"bne 32b\n"
"ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -899,39 +881,39 @@ void sve_hybrid_s8qa_dot_4x4VL (
".inst 0x04a4777b // sqrdmulh z27.s, z27.s, z4.s\n"
"tbz %x[flags], #5, 41f\n"
"and z4.d, z16.d, z0.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"and z5.d, z17.d, z0.d\n"
"and z6.d, z18.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z4.s\n"
+ "sqadd z17.s, z17.s, z5.s\n"
+ "sqadd z18.s, z18.s, z6.s\n"
"and z7.d, z19.d, z0.d\n"
"and z8.d, z20.d, z0.d\n"
- "asr z6.s, z6.s, #0x1f\n"
"and z9.d, z21.d, z0.d\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
- "and z10.d, z22.d, z0.d\n"
"asr z8.s, z8.s, #0x1f\n"
- "and z4.d, z23.d, z0.d\n"
"asr z9.s, z9.s, #0x1f\n"
- "sqadd z17.s, z17.s, z5.s\n"
- "asr z10.s, z10.s, #0x1f\n"
- "sqadd z18.s, z18.s, z6.s\n"
- "asr z4.s, z4.s, #0x1f\n"
- "and z5.d, z24.d, z0.d\n"
- "asr z5.s, z5.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"sqadd z20.s, z20.s, z8.s\n"
"sqadd z21.s, z21.s, z9.s\n"
+ "and z10.d, z22.d, z0.d\n"
+ "and z4.d, z23.d, z0.d\n"
+ "and z5.d, z24.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z22.s, z22.s, z10.s\n"
"sqadd z23.s, z23.s, z4.s\n"
- "and z6.d, z25.d, z0.d\n"
- "asr z6.s, z6.s, #0x1f\n"
"sqadd z24.s, z24.s, z5.s\n"
+ "and z6.d, z25.d, z0.d\n"
"and z7.d, z26.d, z0.d\n"
- "asr z7.s, z7.s, #0x1f\n"
"and z8.d, z27.d, z0.d\n"
- "sqadd z25.s, z25.s, z6.s\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
+ "sqadd z25.s, z25.s, z6.s\n"
"sqadd z26.s, z26.s, z7.s\n"
"sqadd z27.s, z27.s, z8.s\n"
"41:" // Height 3: no shift correction
@@ -1165,12 +1147,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z13.s, z2.b, z15.b\n"
"sdot z14.s, z3.b, z15.b\n"
"50:" // Height 4: Multiply loop: unique 7: skip row sum
- "prfm pldl1keep, [x23, #0x80]\n"
"sub x24, x24, #0x10\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"cmp x24, #0x10\n"
- "prfm pldl1keep, [x21, #0x80]\n"
- "prfm pldl1keep, [x20, #0x80]\n"
"bgt 49b\n"
"51:" // Height 4: Multiply loop: Single iteration only
"ld1b { z4.b }, p2/Z, [x28]\n"
@@ -1180,19 +1158,15 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ld1rqb { z0.b }, p0/Z, [x23]\n"
"sdot z16.s, z4.b, z0.b[0]\n"
"ld1rqb { z1.b }, p0/Z, [x22]\n"
- "add x23, x23, #0x10\n"
"sdot z17.s, z5.b, z0.b[0]\n"
"ld1rqb { z2.b }, p0/Z, [x21]\n"
- "add x22, x22, #0x10\n"
- "sdot z20.s, z4.b, z1.b[0]\n"
"ld1rqb { z3.b }, p0/Z, [x20]\n"
- "add x21, x21, #0x10\n"
- "sdot z24.s, z4.b, z2.b[0]\n"
+ "sdot z20.s, z4.b, z1.b[0]\n"
"ld1b { z6.b }, p2/Z, [x28, #2, MUL VL]\n"
- "add x20, x20, #0x10\n"
"sdot z21.s, z5.b, z1.b[0]\n"
"ld1b { z7.b }, p2/Z, [x28, #3, MUL VL]\n"
"addvl x28, x28, #4\n"
+ "sdot z24.s, z4.b, z2.b[0]\n"
"sdot z28.s, z4.b, z3.b[0]\n"
"sdot z25.s, z5.b, z2.b[0]\n"
"sdot z29.s, z5.b, z3.b[0]\n"
@@ -1279,12 +1253,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sdot z13.s, z2.b, z15.b\n"
"sdot z14.s, z3.b, z15.b\n"
"53:" // Height 4: Multiply loop: unique 8: skip row sum
- "prfm pldl1keep, [x23, #0x80]\n"
- "add x25, x25, #0x1\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
- "prfm pldl1keep, [x20, #0x80]\n"
"ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x25, x25, #0x1\n"
"cmp x25, x19\n"
"bne 46b\n"
"ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -1370,52 +1340,52 @@ void sve_hybrid_s8qa_dot_4x4VL (
".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
"tbz %x[flags], #5, 55f\n"
"and z4.d, z16.d, z0.d\n"
- "asr z4.s, z4.s, #0x1f\n"
"and z5.d, z17.d, z0.d\n"
"and z6.d, z18.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z4.s\n"
+ "sqadd z17.s, z17.s, z5.s\n"
+ "sqadd z18.s, z18.s, z6.s\n"
"and z7.d, z19.d, z0.d\n"
"and z8.d, z20.d, z0.d\n"
- "asr z6.s, z6.s, #0x1f\n"
"and z9.d, z21.d, z0.d\n"
"asr z7.s, z7.s, #0x1f\n"
- "sqadd z16.s, z16.s, z4.s\n"
- "and z10.d, z22.d, z0.d\n"
"asr z8.s, z8.s, #0x1f\n"
- "and z4.d, z23.d, z0.d\n"
"asr z9.s, z9.s, #0x1f\n"
- "sqadd z17.s, z17.s, z5.s\n"
- "asr z10.s, z10.s, #0x1f\n"
- "sqadd z18.s, z18.s, z6.s\n"
- "asr z4.s, z4.s, #0x1f\n"
- "and z5.d, z24.d, z0.d\n"
- "asr z5.s, z5.s, #0x1f\n"
"sqadd z19.s, z19.s, z7.s\n"
"sqadd z20.s, z20.s, z8.s\n"
"sqadd z21.s, z21.s, z9.s\n"
+ "and z10.d, z22.d, z0.d\n"
+ "and z4.d, z23.d, z0.d\n"
+ "and z5.d, z24.d, z0.d\n"
+ "asr z10.s, z10.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z22.s, z22.s, z10.s\n"
"sqadd z23.s, z23.s, z4.s\n"
- "and z6.d, z25.d, z0.d\n"
- "asr z6.s, z6.s, #0x1f\n"
"sqadd z24.s, z24.s, z5.s\n"
+ "and z6.d, z25.d, z0.d\n"
"and z7.d, z26.d, z0.d\n"
- "asr z7.s, z7.s, #0x1f\n"
"and z8.d, z27.d, z0.d\n"
- "and z9.d, z28.d, z0.d\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z7.s, z7.s, #0x1f\n"
"asr z8.s, z8.s, #0x1f\n"
"sqadd z25.s, z25.s, z6.s\n"
+ "sqadd z26.s, z26.s, z7.s\n"
+ "sqadd z27.s, z27.s, z8.s\n"
+ "and z9.d, z28.d, z0.d\n"
"and z10.d, z29.d, z0.d\n"
- "asr z9.s, z9.s, #0x1f\n"
"and z4.d, z30.d, z0.d\n"
+ "asr z9.s, z9.s, #0x1f\n"
"asr z10.s, z10.s, #0x1f\n"
- "sqadd z26.s, z26.s, z7.s\n"
- "and z5.d, z31.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
- "sqadd z27.s, z27.s, z8.s\n"
- "asr z5.s, z5.s, #0x1f\n"
"sqadd z28.s, z28.s, z9.s\n"
"sqadd z29.s, z29.s, z10.s\n"
"sqadd z30.s, z30.s, z4.s\n"
+ "and z5.d, z31.d, z0.d\n"
+ "asr z5.s, z5.s, #0x1f\n"
"sqadd z31.s, z31.s, z5.s\n"
"55:" // Height 4: no shift correction
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
@@ -1529,4 +1499,4 @@ void sve_hybrid_s8qa_dot_4x4VL (
}
} // namespace arm_gemm
-#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // __ARM_FEATURE_SVE