diff options
author | Anthony Barbier <anthony.barbier@arm.com> | 2018-07-03 16:22:02 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:54:10 +0000 |
commit | 5f707736413aeac77818c42838296966f8dc6761 (patch) | |
tree | b829ed3243ea5f3085f288836132416c78bc2e72 /src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6 | |
parent | 7485d5a62685cb745ab50e970adb722cb71557ac (diff) | |
download | ComputeLibrary-5f707736413aeac77818c42838296966f8dc6761.tar.gz |
COMPMID-1369: Revert accidental formatting of RSH's repo
Pulled latest fixes from David's repo:
commit f43ebe932c84083332b0b1a0348241b69dda63a7
Author: David Mansell <David.Mansell@arm.com>
Date: Tue Jul 3 18:09:01 2018 +0100
Whitespace tidying, fixed comment in gemv_batched imported from ACL.
Change-Id: Ie37a623f44e90d88072236cb853ac55ac82d5f51
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/138530
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-by: David Mansell <david.mansell@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6')
3 files changed, 904 insertions, 867 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a53.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a53.cpp index 428498f79e..e3844d8825 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a53.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a53.cpp @@ -37,360 +37,370 @@ // Note that the intent of this is that either ablocks or bblocks will be 1 // - this construction allows the output loop to proceed in either order. -namespace arm_gemm -{ -void a32_sgemm_8x6_a53(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) -{ +namespace arm_gemm { + +void a32_sgemm_8x6_a53(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) { const float *a_ptr = Apanel; - float *c_ptr = Cpanel; + float *c_ptr = Cpanel; - for(int yb = 0; yb < ablocks; yb++) - { + for (int yb=0; yb<ablocks; yb++) { const float *a_ptr0 = a_ptr; - const float *b_ptr = Bpanel; + const float *b_ptr = Bpanel; - for(int xb = 0; xb < bblocks; xb++) - { - a_ptr = a_ptr0; + for (int xb=0; xb<bblocks; xb++) { + a_ptr = a_ptr0; int tails = (K & 3); - if(tails == 0) - { + if (tails == 0) { tails = 4; } - int k = ((K + 3) / 4) - 1; - - __asm __volatile( - "vmov.i32 q4, #0\n" - "vld1.32 {d0-d1}, [%[a_ptr] :64]\n" - "vmov.i32 q5, #0\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]\n" - "vmov.i32 q6, #0\n" - "ldr r0, [%[a_ptr], #0x10]\n" - "vmov.i32 q7, #0\n" - "ldr r1, [%[a_ptr], #0x14]\n" - "vmov.i32 q8, #0\n" ASM_PREFETCH("[%[a_ptr], #0x40]") "vmov.i32 q9, #0\n" ASM_PREFETCH("[%[b_ptr], #0x40]") "vmov.i32 q10, #0\n" ASM_PREFETCH("[%[a_ptr], #0x80]") "vmov.i32 q11, #0\n" + int k = ((K+3)/4) - 1; + + __asm __volatile ( + "vmov.i32 q4, #0\n" + "vld1.32 {d0-d1}, [%[a_ptr] :64]\n" + "vmov.i32 q5, #0\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]\n" + "vmov.i32 q6, #0\n" + "ldr r0, [%[a_ptr], #0x10]\n" + "vmov.i32 q7, #0\n" + "ldr r1, [%[a_ptr], #0x14]\n" + "vmov.i32 q8, #0\n" + ASM_PREFETCH("[%[a_ptr], #0x40]") + "vmov.i32 q9, #0\n" + ASM_PREFETCH("[%[b_ptr], #0x40]") + "vmov.i32 q10, #0\n" + ASM_PREFETCH("[%[a_ptr], #0x80]") + "vmov.i32 q11, #0\n" ASM_PREFETCH("[%[b_ptr], #0x80]") - "vmov.i32 q12, #0\n" - "vmov.i32 q13, #0\n" ASM_PREFETCH("[%[a_ptr], #0xC0]") "vmov.i32 q14, #0\n" ASM_PREFETCH("[%[b_ptr], #0XC0]") - "vmov.i32 q15, #0\n" - "cmp %[k], #0\n" - "beq 6f\n" + "vmov.i32 q12, #0\n" + "vmov.i32 q13, #0\n" + ASM_PREFETCH("[%[a_ptr], #0xC0]") + "vmov.i32 q14, #0\n" + ASM_PREFETCH("[%[b_ptr], #0XC0]") + "vmov.i32 q15, #0\n" + "cmp %[k], #0\n" + "beq 6f\n" "1:\n" // Unroll 0 - "vldr d6, [%[b_ptr], #0x10]\n" - "vmov d2, r0, r1\n" - "vmla.f32 q4, q2, d0[0]\n" - "ldr r0, [%[b_ptr], #0x18]\n" - "vmla.f32 q5, q2, d0[1]\n" - "ldr r1, [%[b_ptr], #0x1C]\n" - "vmla.f32 q6, q2, d1[0]\n" - - "vldr d3, [%[a_ptr], #0x18]\n" - "vmov d7, r0, r1\n" - "vmla.f32 q7, q2, d1[1]\n" ASM_PREFETCH("[%[a_ptr], #0x100]") - "vmla.f32 q8, q2, d2[0]\n" - "vmla.f32 q9, q2, d2[1]\n" - - "vldr d4, [%[b_ptr], #0x20]\n" - "vmla.f32 q10, q3, d0[0]\n" - "ldr r0, [%[b_ptr], #0x28]\n" - "vmla.f32 q11, q3, d0[1]\n" - "ldr r1, [%[b_ptr], #0x2C]\n" - "vmla.f32 q12, q3, d1[0]\n" - - "vldr d0, [%[a_ptr], #0x20]\n" - "vmov d5, r0, r1\n" - "vmla.f32 q13, q3, d1[1]\n" - "ldr r0, [%[a_ptr], #0x28]\n" - "vmla.f32 q14, q3, d2[0]\n" - "ldr r1, [%[a_ptr], #0x2C]\n" - "vmla.f32 q15, q3, d2[1]\n" + "vldr d6, [%[b_ptr], #0x10]\n" + "vmov d2, r0, r1\n" + "vmla.f32 q4, q2, d0[0]\n" + "ldr r0, [%[b_ptr], #0x18]\n" + "vmla.f32 q5, q2, d0[1]\n" + "ldr r1, [%[b_ptr], #0x1C]\n" + "vmla.f32 q6, q2, d1[0]\n" + + "vldr d3, [%[a_ptr], #0x18]\n" + "vmov d7, r0, r1\n" + "vmla.f32 q7, q2, d1[1]\n" + ASM_PREFETCH("[%[a_ptr], #0x100]") + "vmla.f32 q8, q2, d2[0]\n" + "vmla.f32 q9, q2, d2[1]\n" + + "vldr d4, [%[b_ptr], #0x20]\n" + "vmla.f32 q10, q3, d0[0]\n" + "ldr r0, [%[b_ptr], #0x28]\n" + "vmla.f32 q11, q3, d0[1]\n" + "ldr r1, [%[b_ptr], #0x2C]\n" + "vmla.f32 q12, q3, d1[0]\n" + + "vldr d0, [%[a_ptr], #0x20]\n" + "vmov d5, r0, r1\n" + "vmla.f32 q13, q3, d1[1]\n" + "ldr r0, [%[a_ptr], #0x28]\n" + "vmla.f32 q14, q3, d2[0]\n" + "ldr r1, [%[a_ptr], #0x2C]\n" + "vmla.f32 q15, q3, d2[1]\n" // Unroll 1 - "vldr d6, [%[b_ptr], #0x30]\n" - "vmov d1, r0, r1\n" - "vmla.f32 q4, q2, d3[0]\n" - "ldr r0, [%[b_ptr], #0x38]\n" - "vmla.f32 q5, q2, d3[1]\n" - "ldr r1, [%[b_ptr], #0x3C]\n" - "vmla.f32 q6, q2, d0[0]\n" - - "vldr d2, [%[a_ptr], #0x30]\n" - "vmov d7, r0, r1\n" - "vmla.f32 q7, q2, d0[1]\n" ASM_PREFETCH("[%[b_ptr], #0x100]") - "vmla.f32 q8, q2, d1[0]\n" - "vmla.f32 q9, q2, d1[1]\n" - - "vldr d4, [%[b_ptr], #0x40]\n" - "vmla.f32 q10, q3, d3[0]\n" - "ldr r0, [%[b_ptr], #0x48]\n" - "vmla.f32 q11, q3, d3[1]\n" - "ldr r1, [%[b_ptr], #0x4C]\n" - "vmla.f32 q12, q3, d0[0]\n" - - "vldr d3, [%[a_ptr], #0x38]\n" - "vmov d5, r0, r1\n" - "vmla.f32 q13, q3, d0[1]\n" - "ldr r0, [%[a_ptr], #0x40]\n" - "vmla.f32 q14, q3, d1[0]\n" - "ldr r1, [%[a_ptr], #0x44]\n" - "vmla.f32 q15, q3, d1[1]\n" + "vldr d6, [%[b_ptr], #0x30]\n" + "vmov d1, r0, r1\n" + "vmla.f32 q4, q2, d3[0]\n" + "ldr r0, [%[b_ptr], #0x38]\n" + "vmla.f32 q5, q2, d3[1]\n" + "ldr r1, [%[b_ptr], #0x3C]\n" + "vmla.f32 q6, q2, d0[0]\n" + + "vldr d2, [%[a_ptr], #0x30]\n" + "vmov d7, r0, r1\n" + "vmla.f32 q7, q2, d0[1]\n" + ASM_PREFETCH("[%[b_ptr], #0x100]") + "vmla.f32 q8, q2, d1[0]\n" + "vmla.f32 q9, q2, d1[1]\n" + + "vldr d4, [%[b_ptr], #0x40]\n" + "vmla.f32 q10, q3, d3[0]\n" + "ldr r0, [%[b_ptr], #0x48]\n" + "vmla.f32 q11, q3, d3[1]\n" + "ldr r1, [%[b_ptr], #0x4C]\n" + "vmla.f32 q12, q3, d0[0]\n" + + "vldr d3, [%[a_ptr], #0x38]\n" + "vmov d5, r0, r1\n" + "vmla.f32 q13, q3, d0[1]\n" + "ldr r0, [%[a_ptr], #0x40]\n" + "vmla.f32 q14, q3, d1[0]\n" + "ldr r1, [%[a_ptr], #0x44]\n" + "vmla.f32 q15, q3, d1[1]\n" // Unroll 2 - "vldr d6, [%[b_ptr], #0x50]\n" - "vmov d0, r0, r1\n" - "vmla.f32 q4, q2, d2[0]\n" - "ldr r0, [%[b_ptr], #0x58]\n" - "vmla.f32 q5, q2, d2[1]\n" - "ldr r1, [%[b_ptr], #0x5C]\n" - "vmla.f32 q6, q2, d3[0]\n" - - "vldr d1, [%[a_ptr], #0x48]\n" - "vmov d7, r0, r1\n" - "vmla.f32 q7, q2, d3[1]\n" ASM_PREFETCH("[%[a_ptr], #0x140]") - "vmla.f32 q8, q2, d0[0]\n" - "vmla.f32 q9, q2, d0[1]\n" - - "vldr d4, [%[b_ptr], #0x60]\n" - "vmla.f32 q10, q3, d2[0]\n" - "ldr r0, [%[b_ptr], #0x68]\n" - "vmla.f32 q11, q3, d2[1]\n" - "ldr r1, [%[b_ptr], #0x6C]\n" - "vmla.f32 q12, q3, d3[0]\n" - - "vldr d2, [%[a_ptr], #0x50]\n" - "vmov d5, r0, r1\n" - "vmla.f32 q13, q3, d3[1]\n" - "ldr r0, [%[a_ptr], #0x58]\n" - "vmla.f32 q14, q3, d0[0]\n" - "ldr r1, [%[a_ptr], #0x5C]\n" - "vmla.f32 q15, q3, d0[1]\n" - "add %[a_ptr], %[a_ptr], #0x60\n" + "vldr d6, [%[b_ptr], #0x50]\n" + "vmov d0, r0, r1\n" + "vmla.f32 q4, q2, d2[0]\n" + "ldr r0, [%[b_ptr], #0x58]\n" + "vmla.f32 q5, q2, d2[1]\n" + "ldr r1, [%[b_ptr], #0x5C]\n" + "vmla.f32 q6, q2, d3[0]\n" + + "vldr d1, [%[a_ptr], #0x48]\n" + "vmov d7, r0, r1\n" + "vmla.f32 q7, q2, d3[1]\n" + ASM_PREFETCH("[%[a_ptr], #0x140]") + "vmla.f32 q8, q2, d0[0]\n" + "vmla.f32 q9, q2, d0[1]\n" + + "vldr d4, [%[b_ptr], #0x60]\n" + "vmla.f32 q10, q3, d2[0]\n" + "ldr r0, [%[b_ptr], #0x68]\n" + "vmla.f32 q11, q3, d2[1]\n" + "ldr r1, [%[b_ptr], #0x6C]\n" + "vmla.f32 q12, q3, d3[0]\n" + + "vldr d2, [%[a_ptr], #0x50]\n" + "vmov d5, r0, r1\n" + "vmla.f32 q13, q3, d3[1]\n" + "ldr r0, [%[a_ptr], #0x58]\n" + "vmla.f32 q14, q3, d0[0]\n" + "ldr r1, [%[a_ptr], #0x5C]\n" + "vmla.f32 q15, q3, d0[1]\n" + "add %[a_ptr], %[a_ptr], #0x60\n" // Unroll 3 - "vldr d6, [%[b_ptr], #0x70]\n" - "vmov d3, r0, r1\n" - "vmla.f32 q4, q2, d1[0]\n" - "ldr r0, [%[b_ptr], #0x78]\n" - "vmla.f32 q5, q2, d1[1]\n" - "ldr r1, [%[b_ptr], #0x7C]\n" - "vmla.f32 q6, q2, d2[0]\n" - "add %[b_ptr], %[b_ptr], #0x80\n" - - "vldr d0, [%[a_ptr], #0x00]\n" - "vmov d7, r0, r1\n" - "vmla.f32 q7, q2, d2[1]\n" ASM_PREFETCH("[%[b_ptr], #0xC0]") - "vmla.f32 q8, q2, d3[0]\n" - "vmla.f32 q9, q2, d3[1]\n" - - "vldr d4, [%[b_ptr], #0x00]\n" - "vmla.f32 q10, q3, d1[0]\n" - "ldr r0, [%[b_ptr], #0x08]\n" - "vmla.f32 q11, q3, d1[1]\n" - "ldr r1, [%[b_ptr], #0x0C]\n" - "vmla.f32 q12, q3, d2[0]\n" - "subs %[k], %[k], #1\n" - - "vldr d1, [%[a_ptr], #0x08]\n" - "vmov d5, r0, r1\n" - "vmla.f32 q13, q3, d2[1]\n" - "ldr r0, [%[a_ptr], #0x10]\n" - "vmla.f32 q14, q3, d3[0]\n" - "ldr r1, [%[a_ptr], #0x14]\n" - "vmla.f32 q15, q3, d3[1]\n" - "bne 1b\n" + "vldr d6, [%[b_ptr], #0x70]\n" + "vmov d3, r0, r1\n" + "vmla.f32 q4, q2, d1[0]\n" + "ldr r0, [%[b_ptr], #0x78]\n" + "vmla.f32 q5, q2, d1[1]\n" + "ldr r1, [%[b_ptr], #0x7C]\n" + "vmla.f32 q6, q2, d2[0]\n" + "add %[b_ptr], %[b_ptr], #0x80\n" + + "vldr d0, [%[a_ptr], #0x00]\n" + "vmov d7, r0, r1\n" + "vmla.f32 q7, q2, d2[1]\n" + ASM_PREFETCH("[%[b_ptr], #0xC0]") + "vmla.f32 q8, q2, d3[0]\n" + "vmla.f32 q9, q2, d3[1]\n" + + "vldr d4, [%[b_ptr], #0x00]\n" + "vmla.f32 q10, q3, d1[0]\n" + "ldr r0, [%[b_ptr], #0x08]\n" + "vmla.f32 q11, q3, d1[1]\n" + "ldr r1, [%[b_ptr], #0x0C]\n" + "vmla.f32 q12, q3, d2[0]\n" + "subs %[k], %[k], #1\n" + + "vldr d1, [%[a_ptr], #0x08]\n" + "vmov d5, r0, r1\n" + "vmla.f32 q13, q3, d2[1]\n" + "ldr r0, [%[a_ptr], #0x10]\n" + "vmla.f32 q14, q3, d3[0]\n" + "ldr r1, [%[a_ptr], #0x14]\n" + "vmla.f32 q15, q3, d3[1]\n" + "bne 1b\n" // "Tails" shows how many multiply blocks are needed at the // end, must be 1-4 inclusive. Bail out to alternative tail // immediately if it's 1. "6:\n" - "subs %[tails], %[tails], #1\n" - "beq 3f\n" + "subs %[tails], %[tails], #1\n" + "beq 3f\n" // Detached final iteration - for now adapt the generic // tails rather than reimplementing for A53. // Unroll 0 - "vmov d2, r0, r1\n" - "add %[a_ptr], %[a_ptr], #0x18\n" - "vmla.f32 q4, q2, d0[0]\n" - "vld1.32 {d3}, [%[a_ptr] :64]!\n" - "vmla.f32 q5, q2, d0[1]\n" - "add %[b_ptr], %[b_ptr], #0x10\n" - "vmla.f32 q6, q2, d1[0]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "vmla.f32 q7, q2, d1[1]\n" - "vmla.f32 q8, q2, d2[0]\n" - "subs %[tails], %[tails], #1\n" - "vmla.f32 q9, q2, d2[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d0[0]\n" - "vmla.f32 q11, q3, d0[1]\n" - "vmla.f32 q12, q3, d1[0]\n" - "vmla.f32 q13, q3, d1[1]\n" - "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" - "vmla.f32 q14, q3, d2[0]\n" - "vmla.f32 q15, q3, d2[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "beq 4f\n" + "vmov d2, r0, r1\n" + "add %[a_ptr], %[a_ptr], #0x18\n" + "vmla.f32 q4, q2, d0[0]\n" + "vld1.32 {d3}, [%[a_ptr] :64]!\n" + "vmla.f32 q5, q2, d0[1]\n" + "add %[b_ptr], %[b_ptr], #0x10\n" + "vmla.f32 q6, q2, d1[0]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vmla.f32 q7, q2, d1[1]\n" + "vmla.f32 q8, q2, d2[0]\n" + "subs %[tails], %[tails], #1\n" + "vmla.f32 q9, q2, d2[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d0[0]\n" + "vmla.f32 q11, q3, d0[1]\n" + "vmla.f32 q12, q3, d1[0]\n" + "vmla.f32 q13, q3, d1[1]\n" + "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" + "vmla.f32 q14, q3, d2[0]\n" + "vmla.f32 q15, q3, d2[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "beq 4f\n" // Unroll 1 - "vmla.f32 q4, q2, d3[0]\n" - "vmla.f32 q5, q2, d3[1]\n" - "subs %[tails], %[tails], #1\n" - "vmla.f32 q6, q2, d0[0]\n" - "vmla.f32 q7, q2, d0[1]\n" - "vmla.f32 q8, q2, d1[0]\n" - "vmla.f32 q9, q2, d1[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d3[0]\n" - "vmla.f32 q11, q3, d3[1]\n" - "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" - "vmla.f32 q12, q3, d0[0]\n" - "vmla.f32 q13, q3, d0[1]\n" - "vmla.f32 q14, q3, d1[0]\n" - "vmla.f32 q15, q3, d1[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "beq 5f\n" + "vmla.f32 q4, q2, d3[0]\n" + "vmla.f32 q5, q2, d3[1]\n" + "subs %[tails], %[tails], #1\n" + "vmla.f32 q6, q2, d0[0]\n" + "vmla.f32 q7, q2, d0[1]\n" + "vmla.f32 q8, q2, d1[0]\n" + "vmla.f32 q9, q2, d1[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d3[0]\n" + "vmla.f32 q11, q3, d3[1]\n" + "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" + "vmla.f32 q12, q3, d0[0]\n" + "vmla.f32 q13, q3, d0[1]\n" + "vmla.f32 q14, q3, d1[0]\n" + "vmla.f32 q15, q3, d1[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "beq 5f\n" // Unroll 2 - "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" - "vmla.f32 q4, q2, d2[0]\n" - "vmla.f32 q5, q2, d2[1]\n" - "vmla.f32 q6, q2, d3[0]\n" - "vmla.f32 q7, q2, d3[1]\n" - "vmla.f32 q8, q2, d0[0]\n" - "vmla.f32 q9, q2, d0[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d2[0]\n" - "vmla.f32 q11, q3, d2[1]\n" - "vmla.f32 q12, q3, d3[0]\n" - "vmla.f32 q13, q3, d3[1]\n" - "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" - "vmla.f32 q14, q3, d0[0]\n" - "vmla.f32 q15, q3, d0[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" + "vmla.f32 q4, q2, d2[0]\n" + "vmla.f32 q5, q2, d2[1]\n" + "vmla.f32 q6, q2, d3[0]\n" + "vmla.f32 q7, q2, d3[1]\n" + "vmla.f32 q8, q2, d0[0]\n" + "vmla.f32 q9, q2, d0[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d2[0]\n" + "vmla.f32 q11, q3, d2[1]\n" + "vmla.f32 q12, q3, d3[0]\n" + "vmla.f32 q13, q3, d3[1]\n" + "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" + "vmla.f32 q14, q3, d0[0]\n" + "vmla.f32 q15, q3, d0[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" // Unroll 3 - "vmla.f32 q4, q2, d1[0]\n" - "vmla.f32 q10, q3, d1[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q5, q2, d1[1]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d1[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q6, q2, d2[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d2[0]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d2[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d2[1]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d3[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d3[0]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d3[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d3[1]\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "b 2f\n" + "vmla.f32 q4, q2, d1[0]\n" + "vmla.f32 q10, q3, d1[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q5, q2, d1[1]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d1[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q6, q2, d2[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d2[0]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d2[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d2[1]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d3[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d3[0]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d3[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d3[1]\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "b 2f\n" // tails==1 final tail "3:\n" - "vmov d2, r0, r1\n" - "add %[b_ptr], %[b_ptr], #0x10\n" - "vmla.f32 q4, q2, d0[0]\n" - "add %[a_ptr], %[a_ptr], #0x18\n" - "vmla.f32 q5, q2, d0[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "vmla.f32 q6, q2, d1[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q10, q3, d0[0]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d0[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d1[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d1[1]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d1[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d2[0]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d2[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d2[1]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d2[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "b 2f\n" + "vmov d2, r0, r1\n" + "add %[b_ptr], %[b_ptr], #0x10\n" + "vmla.f32 q4, q2, d0[0]\n" + "add %[a_ptr], %[a_ptr], #0x18\n" + "vmla.f32 q5, q2, d0[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vmla.f32 q6, q2, d1[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q10, q3, d0[0]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d0[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d1[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d1[1]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d1[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d2[0]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d2[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d2[1]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d2[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "b 2f\n" // tails==2 final tail "4:\n" - "vmla.f32 q4, q2, d3[0]\n" - "vmla.f32 q10, q3, d3[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q5, q2, d3[1]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d3[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q6, q2, d0[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d0[0]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d0[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d0[1]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d1[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d1[0]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d1[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d1[1]\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "b 2f\n" + "vmla.f32 q4, q2, d3[0]\n" + "vmla.f32 q10, q3, d3[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q5, q2, d3[1]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d3[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q6, q2, d0[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d0[0]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d0[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d0[1]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d1[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d1[0]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d1[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d1[1]\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "b 2f\n" // tails==3 final tail "5:\n" - "vmla.f32 q4, q2, d2[0]\n" - "vld1.32 {d0}, [%[a_ptr] :64]!\n" - "vmla.f32 q5, q2, d2[1]\n" - "vmla.f32 q6, q2, d3[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q10, q3, d2[0]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d2[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d3[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d3[1]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d3[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d0[0]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d0[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d0[1]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d0[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "vmla.f32 q4, q2, d2[0]\n" + "vld1.32 {d0}, [%[a_ptr] :64]!\n" + "vmla.f32 q5, q2, d2[1]\n" + "vmla.f32 q6, q2, d3[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q10, q3, d2[0]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d2[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d3[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d3[1]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d3[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d0[0]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d0[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d0[1]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d0[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" "2:\n" - "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n" - : [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr), [c_ptr] "+r"(c_ptr), [k] "+r"(k), [tails] "+r"(tails) - : - : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", "r0", "r1"); + "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n" + : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), [k] "+r" (k), [tails] "+r" (tails) + : + : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", "r0", "r1" + ); } } } diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a55r1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a55r1.cpp index 4cfb72a455..c5976cfe8d 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a55r1.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a55r1.cpp @@ -37,358 +37,375 @@ // Note that the intent of this is that either ablocks or bblocks will be 1 // - this construction allows the output loop to proceed in either order. -namespace arm_gemm -{ -void a32_sgemm_8x6_a55r1(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) -{ +namespace arm_gemm { + +void a32_sgemm_8x6_a55r1(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) { const float *a_ptr = Apanel; - float *c_ptr = Cpanel; + float *c_ptr = Cpanel; /* Work out starting values for "k" and "tails" in the inner loop. */ int tails_initial = (K & 3); - if(tails_initial == 0) - { + if (tails_initial == 0) { tails_initial = 4; } - int k_initial = ((K + 3) / 4) - 1; + int k_initial = ((K+3)/4) - 1; - for(int yb = 0; yb < ablocks; yb++) - { + for (int yb=0; yb<ablocks; yb++) { const float *a_ptr0 = a_ptr; - const float *b_ptr = Bpanel; + const float *b_ptr = Bpanel; - for(int xb = 0; xb < bblocks; xb++) - { + for (int xb=0; xb<bblocks; xb++) { int tails = tails_initial; - int k = k_initial; + int k = k_initial; a_ptr = a_ptr0; - __asm __volatile( - "vldr d0, [%[a_ptr]]\n" - "vmov.i32 q4, #0\n" - "vldr d1, [%[a_ptr], #0x08]\n" - "vmov.i32 q5, #0\n" - "vldr d4, [%[b_ptr]]\n" - "vmov.i32 q6, #0\n" - "vldr d5, [%[b_ptr], #0x08]\n" - "vmov.i32 q7, #0\n" - "vldr d2, [%[a_ptr], #0x10]\n" - "vmov.i32 q8, #0\n" ASM_PREFETCH("[%[b_ptr], #0x40]") "vmov.i32 q9, #0\n" ASM_PREFETCH("[%[a_ptr], #0x40]") "vmov.i32 q10, #0\n" ASM_PREFETCH("[%[b_ptr], #0x80]") "vmov.i32 q11, #0\n" - ASM_PREFETCH("[%[a_ptr], #0x80]") "vmov.i32 q12, #0\n" ASM_PREFETCH("[%[b_ptr], #0XC0]") "vmov.i32 q13, #0\n" ASM_PREFETCH("[%[a_ptr], #0xC0]") "vmov.i32 q14, #0\n" - ASM_PREFETCH("[%[b_ptr], #0x100]") "vmov.i32 q15, #0\n" ASM_PREFETCH("[%[a_ptr], #0x100]") "cmp %[k], #0\n" ASM_PREFETCH("[%[b_ptr], #0x140]") "beq 6f\n" + __asm __volatile ( + "vldr d0, [%[a_ptr]]\n" + "vmov.i32 q4, #0\n" + "vldr d1, [%[a_ptr], #0x08]\n" + "vmov.i32 q5, #0\n" + "vldr d4, [%[b_ptr]]\n" + "vmov.i32 q6, #0\n" + "vldr d5, [%[b_ptr], #0x08]\n" + "vmov.i32 q7, #0\n" + "vldr d2, [%[a_ptr], #0x10]\n" + "vmov.i32 q8, #0\n" + ASM_PREFETCH("[%[b_ptr], #0x40]") + "vmov.i32 q9, #0\n" + ASM_PREFETCH("[%[a_ptr], #0x40]") + "vmov.i32 q10, #0\n" + ASM_PREFETCH("[%[b_ptr], #0x80]") + "vmov.i32 q11, #0\n" + ASM_PREFETCH("[%[a_ptr], #0x80]") + "vmov.i32 q12, #0\n" + ASM_PREFETCH("[%[b_ptr], #0XC0]") + "vmov.i32 q13, #0\n" + ASM_PREFETCH("[%[a_ptr], #0xC0]") + "vmov.i32 q14, #0\n" + ASM_PREFETCH("[%[b_ptr], #0x100]") + "vmov.i32 q15, #0\n" + ASM_PREFETCH("[%[a_ptr], #0x100]") + "cmp %[k], #0\n" + ASM_PREFETCH("[%[b_ptr], #0x140]") + "beq 6f\n" ASM_PREFETCH("[%[b_ptr], #0x180]") "1:\n" // Unroll 0 - "vmla.f32 q4, q2, d0[0]\n" - "vldr d6, [%[b_ptr], #0x10]\n" - "vmla.f32 q5, q2, d0[1]\n" - "vldr d7, [%[b_ptr], #0x18]\n" - "vmla.f32 q6, q2, d1[0]\n" - "vldr d3, [%[a_ptr], #0x18]\n" - "vmla.f32 q7, q2, d1[1]\n" ASM_PREFETCH("[%[a_ptr], #0x140]") - "vmla.f32 q8, q2, d2[0]\n" - "subs %[k], %[k], #1\n" - "vmla.f32 q9, q2, d2[1]\n" - "vldr d4, [%[b_ptr], #0x20]\n" - "vmla.f32 q10, q3, d0[0]\n" - "vldr d5, [%[b_ptr], #0x28]\n" - "vmla.f32 q11, q3, d0[1]\n" - "vldr d0, [%[a_ptr], #0x20]\n" - "vmla.f32 q12, q3, d1[0]\n" - - "vmla.f32 q13, q3, d1[1]\n" - "vldr d1, [%[a_ptr], #0x28]\n" - "vmla.f32 q14, q3, d2[0]\n" - - "vmla.f32 q15, q3, d2[1]\n" - "vldr d6, [%[b_ptr], #0x30]\n" + "vmla.f32 q4, q2, d0[0]\n" + "vldr d6, [%[b_ptr], #0x10]\n" + "vmla.f32 q5, q2, d0[1]\n" + "vldr d7, [%[b_ptr], #0x18]\n" + "vmla.f32 q6, q2, d1[0]\n" + "vldr d3, [%[a_ptr], #0x18]\n" + "vmla.f32 q7, q2, d1[1]\n" + ASM_PREFETCH("[%[a_ptr], #0x140]") + "vmla.f32 q8, q2, d2[0]\n" + "subs %[k], %[k], #1\n" + "vmla.f32 q9, q2, d2[1]\n" + "vldr d4, [%[b_ptr], #0x20]\n" + "vmla.f32 q10, q3, d0[0]\n" + "vldr d5, [%[b_ptr], #0x28]\n" + "vmla.f32 q11, q3, d0[1]\n" + "vldr d0, [%[a_ptr], #0x20]\n" + "vmla.f32 q12, q3, d1[0]\n" + + "vmla.f32 q13, q3, d1[1]\n" + "vldr d1, [%[a_ptr], #0x28]\n" + "vmla.f32 q14, q3, d2[0]\n" + + "vmla.f32 q15, q3, d2[1]\n" + "vldr d6, [%[b_ptr], #0x30]\n" // Unroll 1 - "vmla.f32 q4, q2, d3[0]\n" - "vldr d7, [%[b_ptr], #0x38]\n" - "vmla.f32 q5, q2, d3[1]\n" - "vldr d2, [%[a_ptr], #0x30]\n" - "vmla.f32 q6, q2, d0[0]\n" - - "vmla.f32 q7, q2, d0[1]\n" ASM_PREFETCH("[%[b_ptr], #0x1C0]") - "vmla.f32 q8, q2, d1[0]\n" - - "vmla.f32 q9, q2, d1[1]\n" - "vldr d4, [%[b_ptr], #0x40]\n" - "vmla.f32 q10, q3, d3[0]\n" - "vldr d5, [%[b_ptr], #0x48]\n" - "vmla.f32 q11, q3, d3[1]\n" - "vldr d3, [%[a_ptr], #0x38]\n" - "vmla.f32 q12, q3, d0[0]\n" - - "vmla.f32 q13, q3, d0[1]\n" - "vldr d0, [%[a_ptr], #0x40]\n" - "vmla.f32 q14, q3, d1[0]\n" - - "vmla.f32 q15, q3, d1[1]\n" - "vldr d6, [%[b_ptr], #0x50]\n" + "vmla.f32 q4, q2, d3[0]\n" + "vldr d7, [%[b_ptr], #0x38]\n" + "vmla.f32 q5, q2, d3[1]\n" + "vldr d2, [%[a_ptr], #0x30]\n" + "vmla.f32 q6, q2, d0[0]\n" + + "vmla.f32 q7, q2, d0[1]\n" + ASM_PREFETCH("[%[b_ptr], #0x1C0]") + "vmla.f32 q8, q2, d1[0]\n" + + "vmla.f32 q9, q2, d1[1]\n" + "vldr d4, [%[b_ptr], #0x40]\n" + "vmla.f32 q10, q3, d3[0]\n" + "vldr d5, [%[b_ptr], #0x48]\n" + "vmla.f32 q11, q3, d3[1]\n" + "vldr d3, [%[a_ptr], #0x38]\n" + "vmla.f32 q12, q3, d0[0]\n" + + "vmla.f32 q13, q3, d0[1]\n" + "vldr d0, [%[a_ptr], #0x40]\n" + "vmla.f32 q14, q3, d1[0]\n" + + "vmla.f32 q15, q3, d1[1]\n" + "vldr d6, [%[b_ptr], #0x50]\n" // Unroll 2 - "vmla.f32 q4, q2, d2[0]\n" - "vldr d7, [%[b_ptr], #0x58]\n" - "vmla.f32 q5, q2, d2[1]\n" - "vldr d1, [%[a_ptr], #0x48]\n" - "vmla.f32 q6, q2, d3[0]\n" - - "vmla.f32 q7, q2, d3[1]\n" ASM_PREFETCH("[%[a_ptr], #0x180]") - "vmla.f32 q8, q2, d0[0]\n" - - "vmla.f32 q9, q2, d0[1]\n" - "vldr d4, [%[b_ptr], #0x60]\n" - "vmla.f32 q10, q3, d2[0]\n" - "vldr d5, [%[b_ptr], #0x68]\n" - "vmla.f32 q11, q3, d2[1]\n" - "vldr d2, [%[a_ptr], #0x50]\n" - "vmla.f32 q12, q3, d3[0]\n" - - "vmla.f32 q13, q3, d3[1]\n" - "vldr d3, [%[a_ptr], #0x58]\n" - "vmla.f32 q14, q3, d0[0]\n" - "add %[a_ptr], %[a_ptr], #0x60\n" - "vmla.f32 q15, q3, d0[1]\n" - "vldr d6, [%[b_ptr], #0x70]\n" + "vmla.f32 q4, q2, d2[0]\n" + "vldr d7, [%[b_ptr], #0x58]\n" + "vmla.f32 q5, q2, d2[1]\n" + "vldr d1, [%[a_ptr], #0x48]\n" + "vmla.f32 q6, q2, d3[0]\n" + + "vmla.f32 q7, q2, d3[1]\n" + ASM_PREFETCH("[%[a_ptr], #0x180]") + "vmla.f32 q8, q2, d0[0]\n" + + "vmla.f32 q9, q2, d0[1]\n" + "vldr d4, [%[b_ptr], #0x60]\n" + "vmla.f32 q10, q3, d2[0]\n" + "vldr d5, [%[b_ptr], #0x68]\n" + "vmla.f32 q11, q3, d2[1]\n" + "vldr d2, [%[a_ptr], #0x50]\n" + "vmla.f32 q12, q3, d3[0]\n" + + "vmla.f32 q13, q3, d3[1]\n" + "vldr d3, [%[a_ptr], #0x58]\n" + "vmla.f32 q14, q3, d0[0]\n" + "add %[a_ptr], %[a_ptr], #0x60\n" + "vmla.f32 q15, q3, d0[1]\n" + "vldr d6, [%[b_ptr], #0x70]\n" // Unroll 3 - "vmla.f32 q4, q2, d1[0]\n" - "vldr d7, [%[b_ptr], #0x78]\n" - "vmla.f32 q5, q2, d1[1]\n" - "add %[b_ptr], %[b_ptr], #0x80\n" - "vmla.f32 q6, q2, d2[0]\n" - "vldr d0, [%[a_ptr], #0x00]\n" - "vmla.f32 q7, q2, d2[1]\n" ASM_PREFETCH("[%[b_ptr], #0x180]") - "vmla.f32 q8, q2, d3[0]\n" - - "vmla.f32 q9, q2, d3[1]\n" - "vldr d4, [%[b_ptr], #0x00]\n" - "vmla.f32 q10, q3, d1[0]\n" - "vldr d5, [%[b_ptr], #0x08]\n" - "vmla.f32 q11, q3, d1[1]\n" - "vldr d1, [%[a_ptr], #0x08]\n" - "vmla.f32 q12, q3, d2[0]\n" - - "vmla.f32 q13, q3, d2[1]\n" - "vldr d2, [%[a_ptr], #0x10]\n" - "vmla.f32 q14, q3, d3[0]\n" - - "vmla.f32 q15, q3, d3[1]\n" - "bne 1b\n" + "vmla.f32 q4, q2, d1[0]\n" + "vldr d7, [%[b_ptr], #0x78]\n" + "vmla.f32 q5, q2, d1[1]\n" + "add %[b_ptr], %[b_ptr], #0x80\n" + "vmla.f32 q6, q2, d2[0]\n" + "vldr d0, [%[a_ptr], #0x00]\n" + "vmla.f32 q7, q2, d2[1]\n" + ASM_PREFETCH("[%[b_ptr], #0x180]") + "vmla.f32 q8, q2, d3[0]\n" + + "vmla.f32 q9, q2, d3[1]\n" + "vldr d4, [%[b_ptr], #0x00]\n" + "vmla.f32 q10, q3, d1[0]\n" + "vldr d5, [%[b_ptr], #0x08]\n" + "vmla.f32 q11, q3, d1[1]\n" + "vldr d1, [%[a_ptr], #0x08]\n" + "vmla.f32 q12, q3, d2[0]\n" + + "vmla.f32 q13, q3, d2[1]\n" + "vldr d2, [%[a_ptr], #0x10]\n" + "vmla.f32 q14, q3, d3[0]\n" + + "vmla.f32 q15, q3, d3[1]\n" + "bne 1b\n" // "Tails" shows how many multiply blocks are needed at the // end, must be 1-4 inclusive. Bail out to alternative tail // immediately if it's 1. "6:\n" - "subs %[tails], %[tails], #1\n" - "beq 3f\n" + "subs %[tails], %[tails], #1\n" + "beq 3f\n" // Detached final iteration // Unroll 0 - "vmla.f32 q4, q2, d0[0]\n" - "vldr d6, [%[b_ptr], #0x10]\n" - "vmla.f32 q5, q2, d0[1]\n" - "vldr d7, [%[b_ptr], #0x18]\n" - "vmla.f32 q6, q2, d1[0]\n" - "vldr d3, [%[a_ptr], #0x18]\n" - "vmla.f32 q7, q2, d1[1]\n" - "subs %[tails], %[tails], #1\n" - "vmla.f32 q8, q2, d2[0]\n" - "vmla.f32 q9, q2, d2[1]\n" - "vldr d4, [%[b_ptr], #0x20]\n" - - "vmla.f32 q10, q3, d0[0]\n" - "vldr d5, [%[b_ptr], #0x28]\n" - "vmla.f32 q11, q3, d0[1]\n" - "vldr d0, [%[a_ptr], #0x20]\n" - "vmla.f32 q12, q3, d1[0]\n" - "add %[b_ptr], %[b_ptr], #0x30\n" - "vmla.f32 q13, q3, d1[1]\n" - "vldr d1, [%[a_ptr], #0x28]\n" - "vmla.f32 q14, q3, d2[0]\n" - "vmla.f32 q15, q3, d2[1]\n" - "beq 4f\n" + "vmla.f32 q4, q2, d0[0]\n" + "vldr d6, [%[b_ptr], #0x10]\n" + "vmla.f32 q5, q2, d0[1]\n" + "vldr d7, [%[b_ptr], #0x18]\n" + "vmla.f32 q6, q2, d1[0]\n" + "vldr d3, [%[a_ptr], #0x18]\n" + "vmla.f32 q7, q2, d1[1]\n" + "subs %[tails], %[tails], #1\n" + "vmla.f32 q8, q2, d2[0]\n" + "vmla.f32 q9, q2, d2[1]\n" + "vldr d4, [%[b_ptr], #0x20]\n" + + "vmla.f32 q10, q3, d0[0]\n" + "vldr d5, [%[b_ptr], #0x28]\n" + "vmla.f32 q11, q3, d0[1]\n" + "vldr d0, [%[a_ptr], #0x20]\n" + "vmla.f32 q12, q3, d1[0]\n" + "add %[b_ptr], %[b_ptr], #0x30\n" + "vmla.f32 q13, q3, d1[1]\n" + "vldr d1, [%[a_ptr], #0x28]\n" + "vmla.f32 q14, q3, d2[0]\n" + "vmla.f32 q15, q3, d2[1]\n" + "beq 4f\n" // Unroll 1 - "vmla.f32 q4, q2, d3[0]\n" - "vldr d6, [%[b_ptr], #0x30]\n" - "vmla.f32 q5, q2, d3[1]\n" - "vldr d7, [%[b_ptr], #0x38]\n" - "vmla.f32 q6, q2, d0[0]\n" - "vldr d2, [%[a_ptr], #0x30]\n" - "vmla.f32 q7, q2, d0[1]\n" - "subs %[tails], %[tails], #1\n" - "vmla.f32 q8, q2, d1[0]\n" - - "vmla.f32 q9, q2, d1[1]\n" - - "vmla.f32 q10, q3, d3[0]\n" - "vldr d4, [%[b_ptr], #0x40]\n" - "vmla.f32 q11, q3, d3[1]\n" - "vldr d5, [%[b_ptr], #0x48]\n" - "vmla.f32 q12, q3, d0[0]\n" - "vldr d3, [%[a_ptr], #0x38]\n" - "vmla.f32 q13, q3, d0[1]\n" - "vldr d0, [%[a_ptr], #0x40]\n" - "vmla.f32 q14, q3, d1[0]\n" - "vmla.f32 q15, q3, d1[1]\n" - "beq 5f\n" + "vmla.f32 q4, q2, d3[0]\n" + "vldr d6, [%[b_ptr], #0x30]\n" + "vmla.f32 q5, q2, d3[1]\n" + "vldr d7, [%[b_ptr], #0x38]\n" + "vmla.f32 q6, q2, d0[0]\n" + "vldr d2, [%[a_ptr], #0x30]\n" + "vmla.f32 q7, q2, d0[1]\n" + "subs %[tails], %[tails], #1\n" + "vmla.f32 q8, q2, d1[0]\n" + + "vmla.f32 q9, q2, d1[1]\n" + + "vmla.f32 q10, q3, d3[0]\n" + "vldr d4, [%[b_ptr], #0x40]\n" + "vmla.f32 q11, q3, d3[1]\n" + "vldr d5, [%[b_ptr], #0x48]\n" + "vmla.f32 q12, q3, d0[0]\n" + "vldr d3, [%[a_ptr], #0x38]\n" + "vmla.f32 q13, q3, d0[1]\n" + "vldr d0, [%[a_ptr], #0x40]\n" + "vmla.f32 q14, q3, d1[0]\n" + "vmla.f32 q15, q3, d1[1]\n" + "beq 5f\n" // Unroll 2 - "vmla.f32 q4, q2, d2[0]\n" - "vldr d6, [%[b_ptr], #0x50]\n" - "vmla.f32 q5, q2, d2[1]\n" - "vldr d7, [%[b_ptr], #0x58]\n" - "vmla.f32 q6, q2, d3[0]\n" - "vldr d1, [%[a_ptr], #0x48]\n" - "vmla.f32 q7, q2, d3[1]\n" - "vmla.f32 q8, q2, d0[0]\n" - "vmla.f32 q9, q2, d0[1]\n" - - "vmla.f32 q10, q3, d2[0]\n" - "vldr d4, [%[b_ptr], #0x60]\n" - "vmla.f32 q11, q3, d2[1]\n" - "vldr d5, [%[b_ptr], #0x68]\n" - "vmla.f32 q12, q3, d3[0]\n" - "vldr d2, [%[a_ptr], #0x50]\n" - "vmla.f32 q13, q3, d3[1]\n" - "vldr d3, [%[a_ptr], #0x58]\n" - "vmla.f32 q14, q3, d0[0]\n" - "vmla.f32 q15, q3, d0[1]\n" + "vmla.f32 q4, q2, d2[0]\n" + "vldr d6, [%[b_ptr], #0x50]\n" + "vmla.f32 q5, q2, d2[1]\n" + "vldr d7, [%[b_ptr], #0x58]\n" + "vmla.f32 q6, q2, d3[0]\n" + "vldr d1, [%[a_ptr], #0x48]\n" + "vmla.f32 q7, q2, d3[1]\n" + "vmla.f32 q8, q2, d0[0]\n" + "vmla.f32 q9, q2, d0[1]\n" + + "vmla.f32 q10, q3, d2[0]\n" + "vldr d4, [%[b_ptr], #0x60]\n" + "vmla.f32 q11, q3, d2[1]\n" + "vldr d5, [%[b_ptr], #0x68]\n" + "vmla.f32 q12, q3, d3[0]\n" + "vldr d2, [%[a_ptr], #0x50]\n" + "vmla.f32 q13, q3, d3[1]\n" + "vldr d3, [%[a_ptr], #0x58]\n" + "vmla.f32 q14, q3, d0[0]\n" + "vmla.f32 q15, q3, d0[1]\n" // Unroll 3 - "vmla.f32 q4, q2, d1[0]\n" - "vldr d6, [%[b_ptr], #0x70]\n" - "vmla.f32 q5, q2, d1[1]\n" - "vldr d7, [%[b_ptr], #0x78]\n" - "vmla.f32 q10, q3, d1[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d1[1]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q6, q2, d2[0]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d2[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d2[1]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d2[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d3[0]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d3[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d3[1]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d3[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "add %[a_ptr], %[a_ptr], #0x60\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "add %[b_ptr], %[b_ptr], #0x80\n" - "b 2f\n" + "vmla.f32 q4, q2, d1[0]\n" + "vldr d6, [%[b_ptr], #0x70]\n" + "vmla.f32 q5, q2, d1[1]\n" + "vldr d7, [%[b_ptr], #0x78]\n" + "vmla.f32 q10, q3, d1[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d1[1]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q6, q2, d2[0]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d2[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d2[1]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d2[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d3[0]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d3[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d3[1]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d3[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "add %[a_ptr], %[a_ptr], #0x60\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "add %[b_ptr], %[b_ptr], #0x80\n" + "b 2f\n" // tails==1 final tail "3:\n" - "vmla.f32 q4, q2, d0[0]\n" - "vldr d6, [%[b_ptr], #0x10]\n" - "vmla.f32 q5, q2, d0[1]\n" - "vldr d7, [%[b_ptr], #0x18]\n" - "vmla.f32 q6, q2, d1[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q10, q3, d0[0]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d0[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d1[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d1[1]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d1[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d2[0]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d2[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d2[1]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d2[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "add %[a_ptr], %[a_ptr], #0x18\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "add %[b_ptr], %[b_ptr], #0x20\n" - "b 2f\n" + "vmla.f32 q4, q2, d0[0]\n" + "vldr d6, [%[b_ptr], #0x10]\n" + "vmla.f32 q5, q2, d0[1]\n" + "vldr d7, [%[b_ptr], #0x18]\n" + "vmla.f32 q6, q2, d1[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q10, q3, d0[0]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d0[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d1[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d1[1]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d1[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d2[0]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d2[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d2[1]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d2[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "add %[a_ptr], %[a_ptr], #0x18\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "add %[b_ptr], %[b_ptr], #0x20\n" + "b 2f\n" // tails==2 final tail "4:\n" - "vmla.f32 q4, q2, d3[0]\n" - "vldr d6, [%[b_ptr], #0x30]\n" - "vmla.f32 q5, q2, d3[1]\n" - "vldr d7, [%[b_ptr], #0x38]\n" - "vmla.f32 q10, q3, d3[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d3[1]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q6, q2, d0[0]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d0[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d0[1]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d0[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d1[0]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d1[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d1[1]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d1[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "add %[b_ptr], %[b_ptr], #0x40\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "add %[a_ptr], %[a_ptr], #0x30\n" - "b 2f\n" + "vmla.f32 q4, q2, d3[0]\n" + "vldr d6, [%[b_ptr], #0x30]\n" + "vmla.f32 q5, q2, d3[1]\n" + "vldr d7, [%[b_ptr], #0x38]\n" + "vmla.f32 q10, q3, d3[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d3[1]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q6, q2, d0[0]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d0[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d0[1]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d0[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d1[0]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d1[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d1[1]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d1[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "add %[b_ptr], %[b_ptr], #0x40\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "add %[a_ptr], %[a_ptr], #0x30\n" + "b 2f\n" // tails==3 final tail "5:\n" - "vmla.f32 q4, q2, d2[0]\n" - "vldr d6, [%[b_ptr], #0x50]\n" - "vmla.f32 q5, q2, d2[1]\n" - "vldr d7, [%[b_ptr], #0x58]\n" - "vmla.f32 q6, q2, d3[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q10, q3, d2[0]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d2[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d3[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d3[1]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d3[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d0[0]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d0[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d0[1]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d0[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "add %[a_ptr], %[a_ptr], #0x48\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "add %[b_ptr], %[b_ptr], #0x60\n" + "vmla.f32 q4, q2, d2[0]\n" + "vldr d6, [%[b_ptr], #0x50]\n" + "vmla.f32 q5, q2, d2[1]\n" + "vldr d7, [%[b_ptr], #0x58]\n" + "vmla.f32 q6, q2, d3[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q10, q3, d2[0]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d2[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d3[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d3[1]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d3[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d0[0]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d0[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d0[1]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d0[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "add %[a_ptr], %[a_ptr], #0x48\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "add %[b_ptr], %[b_ptr], #0x60\n" "2:\n" - "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n" - : [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr), [c_ptr] "+r"(c_ptr), [k] "+r"(k), [tails] "+r"(tails) - : - : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", "r0", "r1"); + "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n" + : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), [k] "+r" (k), [tails] "+r" (tails) + : + : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", "r0", "r1" + ); } } } diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/generic.cpp index d7d0484610..7b36e8e4ef 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/generic.cpp @@ -37,120 +37,129 @@ // Note that the intent of this is that either ablocks or bblocks will be 1 // - this construction allows the output loop to proceed in either order. -namespace arm_gemm -{ -void a32_sgemm_8x6(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) -{ +namespace arm_gemm { + +void a32_sgemm_8x6(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) { const float *a_ptr = Apanel; - float *c_ptr = Cpanel; + float *c_ptr = Cpanel; - for(int yb = 0; yb < ablocks; yb++) - { + for (int yb=0; yb<ablocks; yb++) { const float *a_ptr0 = a_ptr; - const float *b_ptr = Bpanel; + const float *b_ptr = Bpanel; - for(int xb = 0; xb < bblocks; xb++) - { - a_ptr = a_ptr0; + for (int xb=0; xb<bblocks; xb++) { + a_ptr = a_ptr0; int tails = (K & 3); - if(tails == 0) - { + if (tails == 0) { tails = 4; } - int k = ((K + 3) / 4) - 1; - - __asm __volatile( - "vmov.i32 q4, #0\n" - "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" - "vmov.i32 q5, #0\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - "vmov.i32 q6, #0\n" ASM_PREFETCH("[%[a_ptr], #48]") "vmov.i32 q7, #0\n" ASM_PREFETCH("[%[b_ptr], #48]") "vmov.i32 q8, #0\n" ASM_PREFETCH("[%[a_ptr], #112]") "vmov.i32 q9, #0\n" + int k = ((K+3)/4) - 1; + + __asm __volatile ( + "vmov.i32 q4, #0\n" + "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" + "vmov.i32 q5, #0\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + "vmov.i32 q6, #0\n" + ASM_PREFETCH("[%[a_ptr], #48]") + "vmov.i32 q7, #0\n" + ASM_PREFETCH("[%[b_ptr], #48]") + "vmov.i32 q8, #0\n" + ASM_PREFETCH("[%[a_ptr], #112]") + "vmov.i32 q9, #0\n" ASM_PREFETCH("[%[b_ptr], #112]") - "vmov.i32 q10, #0\n" - "vmov.i32 q11, #0\n" - "vmov.i32 q12, #0\n" - "vmov.i32 q13, #0\n" ASM_PREFETCH("[%[a_ptr], #176]") "vmov.i32 q14, #0\n" ASM_PREFETCH("[%[b_ptr], #176]") - "vmov.i32 q15, #0\n" + "vmov.i32 q10, #0\n" + "vmov.i32 q11, #0\n" + "vmov.i32 q12, #0\n" + "vmov.i32 q13, #0\n" + ASM_PREFETCH("[%[a_ptr], #176]") + "vmov.i32 q14, #0\n" + ASM_PREFETCH("[%[b_ptr], #176]") + "vmov.i32 q15, #0\n" - "cmp %[k], #0\n" - "beq 6f\n" + "cmp %[k], #0\n" + "beq 6f\n" "1:\n" // Unroll 0 - "vmla.f32 q4, q2, d0[0]\n" - "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" - "vmla.f32 q5, q2, d0[1]\n" - "vmla.f32 q6, q2, d1[0]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "vmla.f32 q7, q2, d1[1]\n" - "vmla.f32 q8, q2, d2[0]\n" - "vmla.f32 q9, q2, d2[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d0[0]\n" - "vmla.f32 q11, q3, d0[1]\n" - "vmla.f32 q12, q3, d1[0]\n" - "vmla.f32 q13, q3, d1[1]\n" - "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" - "vmla.f32 q14, q3, d2[0]\n" - "vmla.f32 q15, q3, d2[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vmla.f32 q4, q2, d0[0]\n" + "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" + "vmla.f32 q5, q2, d0[1]\n" + "vmla.f32 q6, q2, d1[0]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vmla.f32 q7, q2, d1[1]\n" + "vmla.f32 q8, q2, d2[0]\n" + "vmla.f32 q9, q2, d2[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d0[0]\n" + "vmla.f32 q11, q3, d0[1]\n" + "vmla.f32 q12, q3, d1[0]\n" + "vmla.f32 q13, q3, d1[1]\n" + "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" + "vmla.f32 q14, q3, d2[0]\n" + "vmla.f32 q15, q3, d2[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" // Unroll 1 - "vmla.f32 q4, q2, d3[0]\n" - "subs %[k], %[k], #1\n" - "vmla.f32 q5, q2, d3[1]\n" ASM_PREFETCH("[%[a_ptr], #208]") - "vmla.f32 q6, q2, d0[0]\n" - "vmla.f32 q7, q2, d0[1]\n" ASM_PREFETCH("[%[b_ptr], #192]") - "vmla.f32 q8, q2, d1[0]\n" - "vmla.f32 q9, q2, d1[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d3[0]\n" - "vmla.f32 q11, q3, d3[1]\n" - "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" - "vmla.f32 q12, q3, d0[0]\n" - "vmla.f32 q13, q3, d0[1]\n" - "vmla.f32 q14, q3, d1[0]\n" - "vmla.f32 q15, q3, d1[1]\n" - "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" + "vmla.f32 q4, q2, d3[0]\n" + "subs %[k], %[k], #1\n" + "vmla.f32 q5, q2, d3[1]\n" + ASM_PREFETCH("[%[a_ptr], #208]") + "vmla.f32 q6, q2, d0[0]\n" + "vmla.f32 q7, q2, d0[1]\n" + ASM_PREFETCH("[%[b_ptr], #192]") + "vmla.f32 q8, q2, d1[0]\n" + "vmla.f32 q9, q2, d1[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d3[0]\n" + "vmla.f32 q11, q3, d3[1]\n" + "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" + "vmla.f32 q12, q3, d0[0]\n" + "vmla.f32 q13, q3, d0[1]\n" + "vmla.f32 q14, q3, d1[0]\n" + "vmla.f32 q15, q3, d1[1]\n" + "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" // Unroll 2 - "vmla.f32 q4, q2, d2[0]\n" - "vmla.f32 q5, q2, d2[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "vmla.f32 q6, q2, d3[0]\n" - "vmla.f32 q7, q2, d3[1]\n" ASM_PREFETCH("[%[a_ptr], #240]") - "vmla.f32 q8, q2, d0[0]\n" - "vmla.f32 q9, q2, d0[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d2[0]\n" - "vmla.f32 q11, q3, d2[1]\n" ASM_PREFETCH("[%[b_ptr], #208]") - "vmla.f32 q12, q3, d3[0]\n" - "vmla.f32 q13, q3, d3[1]\n" - "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" - "vmla.f32 q14, q3, d0[0]\n" - "vmla.f32 q15, q3, d0[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vmla.f32 q4, q2, d2[0]\n" + "vmla.f32 q5, q2, d2[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vmla.f32 q6, q2, d3[0]\n" + "vmla.f32 q7, q2, d3[1]\n" + ASM_PREFETCH("[%[a_ptr], #240]") + "vmla.f32 q8, q2, d0[0]\n" + "vmla.f32 q9, q2, d0[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d2[0]\n" + "vmla.f32 q11, q3, d2[1]\n" + ASM_PREFETCH("[%[b_ptr], #208]") + "vmla.f32 q12, q3, d3[0]\n" + "vmla.f32 q13, q3, d3[1]\n" + "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" + "vmla.f32 q14, q3, d0[0]\n" + "vmla.f32 q15, q3, d0[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" // Unroll 3 - "vmla.f32 q4, q2, d1[0]\n" - "vmla.f32 q5, q2, d1[1]\n" - "vmla.f32 q6, q2, d2[0]\n" - "vmla.f32 q7, q2, d2[1]\n" - "vmla.f32 q8, q2, d3[0]\n" - "vmla.f32 q9, q2, d3[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d1[0]\n" - "vmla.f32 q11, q3, d1[1]\n" - "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" - "vmla.f32 q12, q3, d2[0]\n" - "vmla.f32 q13, q3, d2[1]\n" - "vmla.f32 q14, q3, d3[0]\n" - "vmla.f32 q15, q3, d3[1]\n" - "bne 1b\n" + "vmla.f32 q4, q2, d1[0]\n" + "vmla.f32 q5, q2, d1[1]\n" + "vmla.f32 q6, q2, d2[0]\n" + "vmla.f32 q7, q2, d2[1]\n" + "vmla.f32 q8, q2, d3[0]\n" + "vmla.f32 q9, q2, d3[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d1[0]\n" + "vmla.f32 q11, q3, d1[1]\n" + "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" + "vmla.f32 q12, q3, d2[0]\n" + "vmla.f32 q13, q3, d2[1]\n" + "vmla.f32 q14, q3, d3[0]\n" + "vmla.f32 q15, q3, d3[1]\n" + "bne 1b\n" // Branch here if we never execute main loop. "6:\n" @@ -158,185 +167,186 @@ void a32_sgemm_8x6(const float *Apanel, const float *Bpanel, float *Cpanel, int // "Tails" shows how many multiply blocks are needed at the // end, must be 1-4 inclusive. Bail out to alternative tail // immediately if it's 1. - "subs %[tails], %[tails], #1\n" - "beq 3f\n" + "subs %[tails], %[tails], #1\n" + "beq 3f\n" // Detached final iteration // Unroll 0 - "vmla.f32 q4, q2, d0[0]\n" - "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" - "vmla.f32 q5, q2, d0[1]\n" - "vmla.f32 q6, q2, d1[0]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "vmla.f32 q7, q2, d1[1]\n" - "vmla.f32 q8, q2, d2[0]\n" - "subs %[tails], %[tails], #1\n" - "vmla.f32 q9, q2, d2[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d0[0]\n" - "vmla.f32 q11, q3, d0[1]\n" - "vmla.f32 q12, q3, d1[0]\n" - "vmla.f32 q13, q3, d1[1]\n" - "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" - "vmla.f32 q14, q3, d2[0]\n" - "vmla.f32 q15, q3, d2[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "beq 4f\n" + "vmla.f32 q4, q2, d0[0]\n" + "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" + "vmla.f32 q5, q2, d0[1]\n" + "vmla.f32 q6, q2, d1[0]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vmla.f32 q7, q2, d1[1]\n" + "vmla.f32 q8, q2, d2[0]\n" + "subs %[tails], %[tails], #1\n" + "vmla.f32 q9, q2, d2[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d0[0]\n" + "vmla.f32 q11, q3, d0[1]\n" + "vmla.f32 q12, q3, d1[0]\n" + "vmla.f32 q13, q3, d1[1]\n" + "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" + "vmla.f32 q14, q3, d2[0]\n" + "vmla.f32 q15, q3, d2[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "beq 4f\n" // Unroll 1 - "vmla.f32 q4, q2, d3[0]\n" - "vmla.f32 q5, q2, d3[1]\n" - "subs %[tails], %[tails], #1\n" - "vmla.f32 q6, q2, d0[0]\n" - "vmla.f32 q7, q2, d0[1]\n" - "vmla.f32 q8, q2, d1[0]\n" - "vmla.f32 q9, q2, d1[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d3[0]\n" - "vmla.f32 q11, q3, d3[1]\n" - "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" - "vmla.f32 q12, q3, d0[0]\n" - "vmla.f32 q13, q3, d0[1]\n" - "vmla.f32 q14, q3, d1[0]\n" - "vmla.f32 q15, q3, d1[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "beq 5f\n" + "vmla.f32 q4, q2, d3[0]\n" + "vmla.f32 q5, q2, d3[1]\n" + "subs %[tails], %[tails], #1\n" + "vmla.f32 q6, q2, d0[0]\n" + "vmla.f32 q7, q2, d0[1]\n" + "vmla.f32 q8, q2, d1[0]\n" + "vmla.f32 q9, q2, d1[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d3[0]\n" + "vmla.f32 q11, q3, d3[1]\n" + "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" + "vmla.f32 q12, q3, d0[0]\n" + "vmla.f32 q13, q3, d0[1]\n" + "vmla.f32 q14, q3, d1[0]\n" + "vmla.f32 q15, q3, d1[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "beq 5f\n" // Unroll 2 - "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" - "vmla.f32 q4, q2, d2[0]\n" - "vmla.f32 q5, q2, d2[1]\n" - "vmla.f32 q6, q2, d3[0]\n" - "vmla.f32 q7, q2, d3[1]\n" - "vmla.f32 q8, q2, d0[0]\n" - "vmla.f32 q9, q2, d0[1]\n" - "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" - - "vmla.f32 q10, q3, d2[0]\n" - "vmla.f32 q11, q3, d2[1]\n" - "vmla.f32 q12, q3, d3[0]\n" - "vmla.f32 q13, q3, d3[1]\n" - "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" - "vmla.f32 q14, q3, d0[0]\n" - "vmla.f32 q15, q3, d0[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n" + "vmla.f32 q4, q2, d2[0]\n" + "vmla.f32 q5, q2, d2[1]\n" + "vmla.f32 q6, q2, d3[0]\n" + "vmla.f32 q7, q2, d3[1]\n" + "vmla.f32 q8, q2, d0[0]\n" + "vmla.f32 q9, q2, d0[1]\n" + "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n" + + "vmla.f32 q10, q3, d2[0]\n" + "vmla.f32 q11, q3, d2[1]\n" + "vmla.f32 q12, q3, d3[0]\n" + "vmla.f32 q13, q3, d3[1]\n" + "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n" + "vmla.f32 q14, q3, d0[0]\n" + "vmla.f32 q15, q3, d0[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" // Unroll 3 - "vmla.f32 q4, q2, d1[0]\n" - "vmla.f32 q10, q3, d1[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q5, q2, d1[1]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d1[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q6, q2, d2[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d2[0]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d2[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d2[1]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d3[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d3[0]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d3[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d3[1]\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "b 2f\n" + "vmla.f32 q4, q2, d1[0]\n" + "vmla.f32 q10, q3, d1[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q5, q2, d1[1]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d1[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q6, q2, d2[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d2[0]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d2[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d2[1]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d3[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d3[0]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d3[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d3[1]\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "b 2f\n" // tails==1 final tail "3:\n" - "vmla.f32 q4, q2, d0[0]\n" - "vld1.32 {d2}, [%[a_ptr] :64]!\n" - "vmla.f32 q5, q2, d0[1]\n" - "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" - "vmla.f32 q6, q2, d1[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q10, q3, d0[0]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d0[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d1[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d1[1]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d1[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d2[0]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d2[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d2[1]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d2[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "b 2f\n" + "vmla.f32 q4, q2, d0[0]\n" + "vld1.32 {d2}, [%[a_ptr] :64]!\n" + "vmla.f32 q5, q2, d0[1]\n" + "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n" + "vmla.f32 q6, q2, d1[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q10, q3, d0[0]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d0[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d1[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d1[1]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d1[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d2[0]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d2[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d2[1]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d2[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "b 2f\n" // tails==2 final tail "4:\n" - "vmla.f32 q4, q2, d3[0]\n" - "vmla.f32 q10, q3, d3[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q5, q2, d3[1]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d3[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q6, q2, d0[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d0[0]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d0[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d0[1]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d1[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d1[0]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d1[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d1[1]\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" - "b 2f\n" + "vmla.f32 q4, q2, d3[0]\n" + "vmla.f32 q10, q3, d3[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q5, q2, d3[1]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d3[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q6, q2, d0[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d0[0]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d0[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d0[1]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d1[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d1[0]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d1[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d1[1]\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "b 2f\n" // tails==3 final tail "5:\n" - "vmla.f32 q4, q2, d2[0]\n" - "vld1.32 {d0}, [%[a_ptr] :64]!\n" - "vmla.f32 q5, q2, d2[1]\n" - "vmla.f32 q6, q2, d3[0]\n" - "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" - "vmla.f32 q10, q3, d2[0]\n" - "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" - "vmla.f32 q11, q3, d2[1]\n" - "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" - "vmla.f32 q12, q3, d3[0]\n" - "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" - "vmla.f32 q7, q2, d3[1]\n" - "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" - "vmla.f32 q13, q3, d3[1]\n" - "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" - "vmla.f32 q8, q2, d0[0]\n" - "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" - "vmla.f32 q14, q3, d0[0]\n" - "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" - "vmla.f32 q9, q2, d0[1]\n" - "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" - "vmla.f32 q15, q3, d0[1]\n" - "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" - "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" + "vmla.f32 q4, q2, d2[0]\n" + "vld1.32 {d0}, [%[a_ptr] :64]!\n" + "vmla.f32 q5, q2, d2[1]\n" + "vmla.f32 q6, q2, d3[0]\n" + "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n" + "vmla.f32 q10, q3, d2[0]\n" + "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n" + "vmla.f32 q11, q3, d2[1]\n" + "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n" + "vmla.f32 q12, q3, d3[0]\n" + "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n" + "vmla.f32 q7, q2, d3[1]\n" + "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n" + "vmla.f32 q13, q3, d3[1]\n" + "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n" + "vmla.f32 q8, q2, d0[0]\n" + "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n" + "vmla.f32 q14, q3, d0[0]\n" + "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n" + "vmla.f32 q9, q2, d0[1]\n" + "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n" + "vmla.f32 q15, q3, d0[1]\n" + "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n" + "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n" "2:\n" - "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n" - : [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr), [c_ptr] "+r"(c_ptr), [k] "+r"(k), [tails] "+r"(tails) - : - : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", "cc"); + "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n" + : [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), [k] "+r" (k), [tails] "+r" (tails) + : + : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", "cc" + ); } } } |