From be13cead34e566bdd561ad3ffc3f645b460e482e Mon Sep 17 00:00:00 2001 From: Michael Tyler Date: Tue, 17 Jan 2023 11:04:14 +0000 Subject: Revert "Update CPU kernels to remove x19" This reverts commit 3c59f01c209d2732a15d97d65565ead964787a8b. Resolves: COMPMID-5817 Change-Id: Ie2443a21854a95db1e3d0cafa2121c0187a5e237 Signed-off-by: Michael Tyler Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8974 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice Benchmark: Arm Jenkins --- .../sve_interleaved_fp32_mla_8x3VL/a64fx.cpp | 70 +++++++++---------- .../sve_interleaved_fp32_mla_8x3VL/generic.cpp | 78 +++++++++++----------- 2 files changed, 74 insertions(+), 74 deletions(-) (limited to 'src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL') diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp index 3141a258a8..6defe0e223 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023 Arm Limited. + * Copyright (c) 2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -10,16 +10,16 @@ * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. */ #ifdef ARM_COMPUTE_ENABLE_SVE @@ -32,34 +32,34 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( float *Cpanel, int ablocks, int bblocks, int K) { struct KernelArgs { + size_t bblocks = {}; size_t K = {}; const float *Bpanel = {}; - size_t bblocks = {}; } ka; + ka.bblocks = bblocks; ka.K = (K/1) - 1; ka.Bpanel = Bpanel; - ka.bblocks = bblocks; __asm__ __volatile__( "ptrue p0.b\n" "1:" // Height loop - "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n" - "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n" + "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n" "mov x21, %x[Apanel]\n" + "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n" "2:" // Width loop - "ldr x20, [%x[args_ptr], %[offsetof_K]]\n" + "ldr x19, [%x[args_ptr], %[offsetof_K]]\n" "mov %x[Apanel], x21\n" - "cmp x20, #0x2\n" + "cmp x19, #0x2\n" "mov z8.b, #0x0\n" "mov z9.b, #0x0\n" - "ld1w { z0.s }, p0/Z, [x22]\n" + "ld1w { z0.s }, p0/Z, [x20]\n" "mov z10.b, #0x0\n" "mov z11.b, #0x0\n" - "ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n" + "ld1w { z1.s }, p0/Z, [x20, #1, MUL VL]\n" "mov z12.b, #0x0\n" "mov z13.b, #0x0\n" - "ld1w { z2.s }, p0/Z, [x22, #2, MUL VL]\n" + "ld1w { z2.s }, p0/Z, [x20, #2, MUL VL]\n" "mov z14.b, #0x0\n" "mov z15.b, #0x0\n" "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n" @@ -86,7 +86,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( "3:" // main loop head "fmla z8.s, p0/M, z0.s, z3.s\n" "fmla z9.s, p0/M, z1.s, z3.s\n" - "sub x20, x20, #0x2\n" + "sub x19, x19, #0x2\n" "fmla z10.s, p0/M, z2.s, z3.s\n" "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n" "fmla z11.s, p0/M, z0.s, z4.s\n" @@ -95,7 +95,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n" "fmla z14.s, p0/M, z0.s, z5.s\n" "fmla z15.s, p0/M, z1.s, z5.s\n" - "cmp x20, #0x2\n" + "cmp x19, #0x2\n" "fmla z16.s, p0/M, z2.s, z5.s\n" "ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n" "fmla z17.s, p0/M, z0.s, z6.s\n" @@ -115,11 +115,11 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( "fmla z28.s, p0/M, z2.s, z5.s\n" "ld1rw { z5.s }, p0/Z, [%x[Apanel], #40]\n" "fmla z29.s, p0/M, z0.s, z6.s\n" - "ld1w { z0.s }, p0/Z, [x22, #3, MUL VL]\n" + "ld1w { z0.s }, p0/Z, [x20, #3, MUL VL]\n" "fmla z30.s, p0/M, z1.s, z6.s\n" "fmla z31.s, p0/M, z2.s, z6.s\n" - "ld1w { z1.s }, p0/Z, [x22, #4, MUL VL]\n" - "ld1w { z2.s }, p0/Z, [x22, #5, MUL VL]\n" + "ld1w { z1.s }, p0/Z, [x20, #4, MUL VL]\n" + "ld1w { z2.s }, p0/Z, [x20, #5, MUL VL]\n" "fmla z8.s, p0/M, z0.s, z3.s\n" "ld1rw { z6.s }, p0/Z, [%x[Apanel], #44]\n" "fmla z9.s, p0/M, z1.s, z3.s\n" @@ -131,7 +131,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( "ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n" "fmla z14.s, p0/M, z0.s, z5.s\n" "fmla z15.s, p0/M, z1.s, z5.s\n" - "addvl x22, x22, #6\n" + "addvl x20, x20, #6\n" "fmla z16.s, p0/M, z2.s, z5.s\n" "ld1rw { z5.s }, p0/Z, [%x[Apanel], #56]\n" "fmla z17.s, p0/M, z0.s, z6.s\n" @@ -151,18 +151,18 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( "fmla z27.s, p0/M, z1.s, z5.s\n" "fmla z28.s, p0/M, z2.s, z5.s\n" "fmla z29.s, p0/M, z0.s, z6.s\n" - "ld1w { z0.s }, p0/Z, [x22]\n" + "ld1w { z0.s }, p0/Z, [x20]\n" "fmla z30.s, p0/M, z1.s, z6.s\n" "fmla z31.s, p0/M, z2.s, z6.s\n" - "ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n" - "ld1w { z2.s }, p0/Z, [x22, #2, MUL VL]\n" + "ld1w { z1.s }, p0/Z, [x20, #1, MUL VL]\n" + "ld1w { z2.s }, p0/Z, [x20, #2, MUL VL]\n" "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n" "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n" "bge 3b\n" "4:" // main loop skip "fmla z8.s, p0/M, z0.s, z3.s\n" "fmla z9.s, p0/M, z1.s, z3.s\n" - "addvl x22, x22, #3\n" + "addvl x20, x20, #3\n" "fmla z10.s, p0/M, z2.s, z3.s\n" "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n" "fmla z11.s, p0/M, z0.s, z4.s\n" @@ -190,10 +190,10 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( "fmla z29.s, p0/M, z0.s, z6.s\n" "fmla z30.s, p0/M, z1.s, z6.s\n" "fmla z31.s, p0/M, z2.s, z6.s\n" - "cbz x20, 5f\n" - "ld1w { z0.s }, p0/Z, [x22]\n" - "ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n" - "ld1w { z2.s }, p0/Z, [x22, #2, MUL VL]\n" + "cbz x19, 5f\n" + "ld1w { z0.s }, p0/Z, [x20]\n" + "ld1w { z1.s }, p0/Z, [x20, #1, MUL VL]\n" + "ld1w { z2.s }, p0/Z, [x20, #2, MUL VL]\n" "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n" "fmla z8.s, p0/M, z0.s, z3.s\n" "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n" @@ -202,24 +202,24 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n" "fmla z10.s, p0/M, z2.s, z3.s\n" "fmla z11.s, p0/M, z0.s, z4.s\n" + "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n" "fmla z12.s, p0/M, z1.s, z4.s\n" "fmla z13.s, p0/M, z2.s, z4.s\n" - "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n" + "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n" "fmla z14.s, p0/M, z0.s, z5.s\n" "fmla z15.s, p0/M, z1.s, z5.s\n" - "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n" "fmla z16.s, p0/M, z2.s, z5.s\n" "fmla z17.s, p0/M, z0.s, z6.s\n" "ld1rw { z5.s }, p0/Z, [%x[Apanel], #24]\n" "fmla z18.s, p0/M, z1.s, z6.s\n" "fmla z19.s, p0/M, z2.s, z6.s\n" "ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n" + "addvl x20, x20, #3\n" "fmla z20.s, p0/M, z0.s, z3.s\n" "fmla z21.s, p0/M, z1.s, z3.s\n" - "addvl x22, x22, #3\n" + "add %x[Apanel], %x[Apanel], #0x20\n" "fmla z22.s, p0/M, z2.s, z3.s\n" "fmla z23.s, p0/M, z0.s, z4.s\n" - "add %x[Apanel], %x[Apanel], #0x20\n" "fmla z24.s, p0/M, z1.s, z4.s\n" "fmla z25.s, p0/M, z2.s, z4.s\n" "fmla z26.s, p0/M, z0.s, z5.s\n" @@ -230,7 +230,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( "fmla z31.s, p0/M, z2.s, z6.s\n" "5:" // multiply loop done "st1w { z8.s }, p0, [%x[Cpanel]]\n" - "subs x23, x23, #0x1\n" + "subs x22, x22, #0x1\n" "st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n" "st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n" "st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n" @@ -261,7 +261,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( "bne 1b\n" : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks) : [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks)) - : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); } diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp index 9d1c0c3728..e02db6ec48 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -10,16 +10,16 @@ * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. */ #ifdef ARM_COMPUTE_ENABLE_SVE @@ -32,44 +32,42 @@ void sve_interleaved_fp32_mla_8x3VL( float *Cpanel, int ablocks, int bblocks, int K) { struct KernelArgs { + size_t bblocks = {}; size_t K = {}; const float *Bpanel = {}; - size_t bblocks = {}; } ka; + ka.bblocks = bblocks; ka.K = (K/1) - 1; ka.Bpanel = Bpanel; - ka.bblocks = bblocks; __asm__ __volatile__( "ptrue p0.b\n" "1:" // Height loop - "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n" - "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n" + "ldr x22, [%x[args_ptr], %[offsetof_bblocks]]\n" "mov x21, %x[Apanel]\n" + "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n" "2:" // Width loop - "ldr x20, [%x[args_ptr], %[offsetof_K]]\n" - "mov %x[Apanel], x21\n" - "cmp x20, #0x2\n" "mov z8.b, #0x0\n" "mov z9.b, #0x0\n" - "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n" + "ldr x19, [%x[args_ptr], %[offsetof_K]]\n" "mov z10.b, #0x0\n" "mov z11.b, #0x0\n" - "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n" + "ld1w { z4.s }, p0/Z, [x20]\n" "mov z12.b, #0x0\n" "mov z13.b, #0x0\n" - "ld1w { z4.s }, p0/Z, [x22]\n" + "mov %x[Apanel], x21\n" "mov z14.b, #0x0\n" "mov z15.b, #0x0\n" - "ld1w { z5.s }, p0/Z, [x22, #1, MUL VL]\n" + "cmp x19, #0x2\n" "mov z16.b, #0x0\n" "mov z17.b, #0x0\n" - "ld1w { z6.s }, p0/Z, [x22, #2, MUL VL]\n" "mov z18.b, #0x0\n" "mov z19.b, #0x0\n" + "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n" "mov z20.b, #0x0\n" "mov z21.b, #0x0\n" + "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n" "mov z22.b, #0x0\n" "mov z23.b, #0x0\n" "mov z24.b, #0x0\n" @@ -84,29 +82,31 @@ void sve_interleaved_fp32_mla_8x3VL( "3:" // main loop head "fmla z8.s, z4.s, z0.s[0]\n" "fmla z11.s, z4.s, z0.s[1]\n" - "ld1rqw { z2.s }, p0/Z, [%x[Apanel], #32]\n" + "ld1w { z5.s }, p0/Z, [x20, #1, MUL VL]\n" "fmla z14.s, z4.s, z0.s[2]\n" "fmla z17.s, z4.s, z0.s[3]\n" - "ld1rqw { z3.s }, p0/Z, [%x[Apanel], #48]\n" + "ld1w { z6.s }, p0/Z, [x20, #2, MUL VL]\n" "fmla z20.s, z4.s, z1.s[0]\n" "fmla z23.s, z4.s, z1.s[1]\n" - "sub x20, x20, #0x2\n" + "ld1rqw { z2.s }, p0/Z, [%x[Apanel], #32]\n" "fmla z26.s, z4.s, z1.s[2]\n" "fmla z29.s, z4.s, z1.s[3]\n" - "ld1w { z4.s }, p0/Z, [x22, #3, MUL VL]\n" + "ld1rqw { z3.s }, p0/Z, [%x[Apanel], #48]\n" "fmla z9.s, z5.s, z0.s[0]\n" "fmla z12.s, z5.s, z0.s[1]\n" - "cmp x20, #0x2\n" + "ld1w { z4.s }, p0/Z, [x20, #3, MUL VL]\n" "fmla z15.s, z5.s, z0.s[2]\n" "fmla z18.s, z5.s, z0.s[3]\n" - "add %x[Apanel], %x[Apanel], #0x40\n" + "sub x19, x19, #0x2\n" "fmla z21.s, z5.s, z1.s[0]\n" "fmla z24.s, z5.s, z1.s[1]\n" + "cmp x19, #0x2\n" "fmla z27.s, z5.s, z1.s[2]\n" "fmla z30.s, z5.s, z1.s[3]\n" - "ld1w { z5.s }, p0/Z, [x22, #4, MUL VL]\n" + "ld1w { z5.s }, p0/Z, [x20, #4, MUL VL]\n" "fmla z10.s, z6.s, z0.s[0]\n" "fmla z13.s, z6.s, z0.s[1]\n" + "add %x[Apanel], %x[Apanel], #0x40\n" "fmla z16.s, z6.s, z0.s[2]\n" "fmla z19.s, z6.s, z0.s[3]\n" "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n" @@ -114,27 +114,26 @@ void sve_interleaved_fp32_mla_8x3VL( "fmla z25.s, z6.s, z1.s[1]\n" "fmla z28.s, z6.s, z1.s[2]\n" "fmla z31.s, z6.s, z1.s[3]\n" - "ld1w { z6.s }, p0/Z, [x22, #5, MUL VL]\n" - "addvl x22, x22, #6\n" + "ld1w { z6.s }, p0/Z, [x20, #5, MUL VL]\n" "fmla z8.s, z4.s, z2.s[0]\n" "fmla z11.s, z4.s, z2.s[1]\n" "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n" "fmla z14.s, z4.s, z2.s[2]\n" "fmla z17.s, z4.s, z2.s[3]\n" + "addvl x20, x20, #6\n" "fmla z20.s, z4.s, z3.s[0]\n" "fmla z23.s, z4.s, z3.s[1]\n" "fmla z26.s, z4.s, z3.s[2]\n" "fmla z29.s, z4.s, z3.s[3]\n" - "ld1w { z4.s }, p0/Z, [x22]\n" "fmla z9.s, z5.s, z2.s[0]\n" "fmla z12.s, z5.s, z2.s[1]\n" + "ld1w { z4.s }, p0/Z, [x20]\n" "fmla z15.s, z5.s, z2.s[2]\n" "fmla z18.s, z5.s, z2.s[3]\n" "fmla z21.s, z5.s, z3.s[0]\n" "fmla z24.s, z5.s, z3.s[1]\n" "fmla z27.s, z5.s, z3.s[2]\n" "fmla z30.s, z5.s, z3.s[3]\n" - "ld1w { z5.s }, p0/Z, [x22, #1, MUL VL]\n" "fmla z10.s, z6.s, z2.s[0]\n" "fmla z13.s, z6.s, z2.s[1]\n" "fmla z16.s, z6.s, z2.s[2]\n" @@ -143,19 +142,20 @@ void sve_interleaved_fp32_mla_8x3VL( "fmla z25.s, z6.s, z3.s[1]\n" "fmla z28.s, z6.s, z3.s[2]\n" "fmla z31.s, z6.s, z3.s[3]\n" - "ld1w { z6.s }, p0/Z, [x22, #2, MUL VL]\n" "bge 3b\n" "4:" // main loop skip "fmla z8.s, z4.s, z0.s[0]\n" "fmla z11.s, z4.s, z0.s[1]\n" - "add %x[Apanel], %x[Apanel], #0x20\n" + "ld1w { z5.s }, p0/Z, [x20, #1, MUL VL]\n" "fmla z14.s, z4.s, z0.s[2]\n" "fmla z17.s, z4.s, z0.s[3]\n" - "addvl x22, x22, #3\n" + "ld1w { z6.s }, p0/Z, [x20, #2, MUL VL]\n" "fmla z20.s, z4.s, z1.s[0]\n" "fmla z23.s, z4.s, z1.s[1]\n" + "add %x[Apanel], %x[Apanel], #0x20\n" "fmla z26.s, z4.s, z1.s[2]\n" "fmla z29.s, z4.s, z1.s[3]\n" + "addvl x20, x20, #3\n" "fmla z9.s, z5.s, z0.s[0]\n" "fmla z12.s, z5.s, z0.s[1]\n" "fmla z15.s, z5.s, z0.s[2]\n" @@ -172,19 +172,19 @@ void sve_interleaved_fp32_mla_8x3VL( "fmla z25.s, z6.s, z1.s[1]\n" "fmla z28.s, z6.s, z1.s[2]\n" "fmla z31.s, z6.s, z1.s[3]\n" - "cbz x20, 5f\n" + "cbz x19, 5f\n" "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n" "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n" "add %x[Apanel], %x[Apanel], #0x20\n" - "ld1w { z7.s }, p0/Z, [x22]\n" - "ld1w { z4.s }, p0/Z, [x22, #1, MUL VL]\n" + "ld1w { z7.s }, p0/Z, [x20]\n" + "ld1w { z4.s }, p0/Z, [x20, #1, MUL VL]\n" + "ld1w { z5.s }, p0/Z, [x20, #2, MUL VL]\n" + "addvl x20, x20, #3\n" "fmla z8.s, z7.s, z0.s[0]\n" - "ld1w { z5.s }, p0/Z, [x22, #2, MUL VL]\n" "fmla z11.s, z7.s, z0.s[1]\n" "fmla z14.s, z7.s, z0.s[2]\n" "fmla z17.s, z7.s, z0.s[3]\n" "fmla z20.s, z7.s, z1.s[0]\n" - "addvl x22, x22, #3\n" "fmla z23.s, z7.s, z1.s[1]\n" "fmla z26.s, z7.s, z1.s[2]\n" "fmla z29.s, z7.s, z1.s[3]\n" @@ -206,7 +206,7 @@ void sve_interleaved_fp32_mla_8x3VL( "fmla z31.s, z5.s, z1.s[3]\n" "5:" // multiply loop done "st1w { z8.s }, p0, [%x[Cpanel]]\n" - "subs x23, x23, #0x1\n" + "subs x22, x22, #0x1\n" "st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n" "st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n" "st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n" @@ -237,7 +237,7 @@ void sve_interleaved_fp32_mla_8x3VL( "bne 1b\n" : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks) : [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks)) - : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + : "cc", "memory", "p0", "x19", "x20", "x21", "x22", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); } -- cgit v1.2.1