diff options
author | Pablo Tello <pablo.tello@arm.com> | 2017-11-22 13:37:19 +0000 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:35:32 +0000 |
commit | cc441cb2321adefb0fb244b592d32af43766695d (patch) | |
tree | 263ac11ba2234101f664a56028ee7cf23b7ed6c8 /arm_compute/core/NEON/kernels | |
parent | ee8be2d09573d4ec9960d8f2b947ef38716f0c62 (diff) | |
download | ComputeLibrary-cc441cb2321adefb0fb244b592d32af43766695d.tar.gz |
COMPMID-696: Integrated assembly SGEMM for Arm Cortex-A55 and Arm Cortex-A55r1.
Change-Id: I9c8fa13d53c7310cacf3446faa42026fdf3396fa
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110209
Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com>
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/kernels')
3 files changed, 737 insertions, 0 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8.hpp index e229e215ef..603ad8dc0a 100644 --- a/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8.hpp +++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8.hpp @@ -28,6 +28,9 @@ // Actual kernel implementations #include "a64_sgemm_12x8/generic.hpp" #include "a64_sgemm_12x8/a53.hpp" +#include "a64_sgemm_12x8/a55.hpp" +#include "a64_sgemm_12x8/a55r1.hpp" + // 12x8 SGEMM "strategy" class. // @@ -66,6 +69,12 @@ public: if (ci->CPU == CPUTarget::A53) { kernel = a64_sgemm_asimd_12x8_a53; } + else if (ci->CPU == CPUTarget::A55) { + kernel = a64_sgemm_asimd_12x8_a55; + } + else if (ci->CPU == CPUTarget::A55_DOT) { + kernel = a64_sgemm_asimd_12x8_a55r1; + } } }; diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8/a55.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8/a55.hpp new file mode 100644 index 0000000000..85d8a502f8 --- /dev/null +++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8/a55.hpp @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +inline void a64_sgemm_asimd_12x8_a55(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) { + const float *a_ptr = Apanel; + float *c_ptr = Cpanel; + + for (int yb=0; yb<ablocks; yb++) { + const float *a_ptr0 = a_ptr; + const float *b_ptr = Bpanel; + + for (int xb=0; xb<bblocks; xb++) { + a_ptr = a_ptr0; + // Fix up for odd lengths - set a flag if K is odd, but make + // sure we round up the iteration count. + int oddk = (K & 1); + int k = ((K+1)/2) - 1; + + register float32x4_t a0 asm("v0"); + register float32x4_t a1 asm("v1"); + register float32x4_t b0 asm("v2"); + register float32x4_t b1 asm("v3"); + register float32x4_t b2 asm("v4"); + register float32x4_t a0a asm("v5"); + register float32x4_t a1a asm("v6"); + + __asm __volatile ( + // Initialize result registers, load initial operands, prime prefetches. + "movi v8.4s, #0x0\n" + "ldr %q[a0], [%[a_ptr]]\n" + "movi v9.4s, #0x0\n" + "ldr %q[b0], [%[b_ptr]]\n" + "movi v10.4s, #0x0\n" + "ldr %q[a1], [%[a_ptr], #16]\n" + "movi v11.4s, #0x0\n" + "ldr %q[b1], [%[b_ptr], #16]\n" + "movi v12.4s, #0x0\n" + ASM_PREFETCH("[%[b_ptr], #64]") + "movi v13.4s, #0x0\n" + ASM_PREFETCH("[%[a_ptr], #64]") + "movi v14.4s, #0x0\n" + ASM_PREFETCH("[%[b_ptr], #128]") + "movi v15.4s, #0x0\n" + ASM_PREFETCH("[%[a_ptr], #128]") + "movi v16.4s, #0x0\n" + ASM_PREFETCH("[%[b_ptr], #192]") + "movi v17.4s, #0x0\n" + ASM_PREFETCH("[%[b_ptr], #256]") + "movi v18.4s, #0x0\n" + ASM_PREFETCH("[%[a_ptr], #192]") + "movi v19.4s, #0x0\n" + ASM_PREFETCH("[%[b_ptr], #320]") + "movi v20.4s, #0x0\n" + ASM_PREFETCH("[%[a_ptr], #256]") + "movi v21.4s, #0x0\n" + ASM_PREFETCH("[%[b_ptr], #384]") + "movi v22.4s, #0x0\n" + "movi v23.4s, #0x0\n" + "movi v24.4s, #0x0\n" + "movi v25.4s, #0x0\n" + "movi v26.4s, #0x0\n" + "movi v27.4s, #0x0\n" + "movi v28.4s, #0x0\n" + "movi v29.4s, #0x0\n" + "movi v30.4s, #0x0\n" + "movi v31.4s, #0x0\n" + + // Skip loop if we are doing zero iterations of it. + "cbz %w[k], 4f\n" + + "1:\n" + // Unroll 0 + "ldr %d[b2], [%[b_ptr], #32]\n" + "fmla v8.4s , %[b0].4s, %[a0].s[0]\n" + + "fmla v9.4s , %[b0].4s, %[a0].s[1]\n" + "ldr x20, [%[b_ptr], #40]\n" + "fmla v10.4s, %[b0].4s, %[a0].s[2]\n" + "fmla v11.4s, %[b0].4s, %[a0].s[3]\n" + "subs %w[k], %w[k], #1\n" + + + "ldr %d[a0a], [%[a_ptr], #32]\n" + "ins %[b2].d[1], x20\n" + + "fmla v12.4s, %[b0].4s, %[a1].s[0]\n" + "fmla v13.4s, %[b0].4s, %[a1].s[1]\n" + "ldr x20, [%[a_ptr], #40]\n" + "fmla v14.4s, %[b0].4s, %[a1].s[2]\n" + "fmla v15.4s, %[b0].4s, %[a1].s[3]\n" + + "ldr %d[a1a], [%[a_ptr], #48]\n" + "ins %[a0a].d[1], x20\n" + + "fmla v16.4s, %[b1].4s, %[a0].s[0]\n" + "fmla v17.4s, %[b1].4s, %[a0].s[1]\n" + "ldr x20, [%[a_ptr], #56]\n" + "fmla v18.4s, %[b1].4s, %[a0].s[2]\n" + "fmla v19.4s, %[b1].4s, %[a0].s[3]\n" + + "ldr %d[b0], [%[b_ptr], #48]\n" + "ins %[a1a].d[1], x20\n" + ASM_PREFETCH("[%[a_ptr], #320]") + "fmla v20.4s, %[b1].4s, %[a1].s[0]\n" + "fmla v21.4s, %[b1].4s, %[a1].s[1]\n" + "ldr x20, [%[b_ptr], #56]\n" + "fmla v22.4s, %[b1].4s, %[a1].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1].s[3]\n" + + "ldr %d[b1], [%[b_ptr], #64]\n" + "ins %[b0].d[1], x20\n" + + "fmla v24.4s, %[b2].4s, %[a0].s[0]\n" + "fmla v25.4s, %[b2].4s, %[a0].s[1]\n" + "ldr x20, [%[b_ptr], #72]\n" + "fmla v26.4s, %[b2].4s, %[a0].s[2]\n" + "fmla v27.4s, %[b2].4s, %[a0].s[3]\n" + ASM_PREFETCH("[%[b_ptr], #448]") + + + "fmla v28.4s, %[b2].4s, %[a1].s[0]\n" + "fmla v29.4s, %[b2].4s, %[a1].s[1]\n" + ASM_PREFETCH("[%[b_ptr], #512]") + "fmla v30.4s, %[b2].4s, %[a1].s[2]\n" + "fmla v31.4s, %[b2].4s, %[a1].s[3]\n" + + // Unroll 1 + "ldr %d[b2], [%[b_ptr], #80]\n" + "ins %[b1].d[1], x20\n" + + "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n" + "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n" + "ldr x20, [%[b_ptr], #88]\n" + "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n" + "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n" + + "ldr %d[a0], [%[a_ptr], #64]\n" + "ins %[b2].d[1], x20\n" + + "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n" + "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n" + "ldr x20, [%[a_ptr], #72]\n" + "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n" + "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n" + + "ldr %d[a1], [%[a_ptr], #80]\n" + "ins %[a0].d[1], x20\n" + + "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n" + "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n" + "ldr x20, [%[a_ptr], #88]\n" + "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n" + "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n" + + + "ldr %d[b0], [%[b_ptr], #96]\n" + "ins %[a1].d[1], x20\n" + + "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n" + "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n" + "ldr x20, [%[b_ptr], #104]\n" + "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n" + + "ldr %d[b1], [%[b_ptr], #112]\n" + "ins %[b0].d[1], x20\n" + + "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n" + "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n" + "ldr x20, [%[b_ptr], #120]\n" + "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n" + "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n" + "add %[a_ptr], %[a_ptr], #64\n" + + "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n" + "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n" + "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n" + "add %[b_ptr], %[b_ptr], #96\n" + "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n" + + + "ldr %d[b2], [%[b_ptr], #32]\n" + "ins %[b1].d[1], x20\n" + + + "bne 1b\n" + + // Branch here if K=1 or 2. Do the right thing for odd/even at the end. + "4:\n" + "cbnz %w[oddk], 2f\n" + "fmla v8.4s , %[b0].4s, %[a0].s[0]\n" + + // Detached final iteration. (even K) + "ldr x20, [%[b_ptr], #40]\n" + "fmla v9.4s , %[b0].4s, %[a0].s[1]\n" + "subs %w[k], %w[k], #1\n" + "fmla v10.4s, %[b0].4s, %[a0].s[2]\n" + "fmla v11.4s, %[b0].4s, %[a0].s[3]\n" + + "ldr %d[a0a], [%[a_ptr], #32]\n" + "ins %[b2].d[1], x20\n" + + "fmla v12.4s, %[b0].4s, %[a1].s[0]\n" + "fmla v13.4s, %[b0].4s, %[a1].s[1]\n" + "ldr x20, [%[a_ptr], #40]\n" + "fmla v14.4s, %[b0].4s, %[a1].s[2]\n" + "fmla v15.4s, %[b0].4s, %[a1].s[3]\n" + + "ldr %d[a1a], [%[a_ptr], #48]\n" + "ins %[a0a].d[1], x20\n" + + "fmla v16.4s, %[b1].4s, %[a0].s[0]\n" + "fmla v17.4s, %[b1].4s, %[a0].s[1]\n" + "ldr x20, [%[a_ptr], #56]\n" + "fmla v18.4s, %[b1].4s, %[a0].s[2]\n" + "fmla v19.4s, %[b1].4s, %[a0].s[3]\n" + + "ldr %d[b0], [%[b_ptr], #48]\n" + "ins %[a1a].d[1], x20\n" + + "fmla v20.4s, %[b1].4s, %[a1].s[0]\n" + "fmla v21.4s, %[b1].4s, %[a1].s[1]\n" + "ldr x20, [%[b_ptr], #56]\n" + "fmla v22.4s, %[b1].4s, %[a1].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1].s[3]\n" + + "fmla v24.4s, %[b2].4s, %[a0].s[0]\n" + "fmla v25.4s, %[b2].4s, %[a0].s[1]\n" + "fmla v26.4s, %[b2].4s, %[a0].s[2]\n" + "fmla v27.4s, %[b2].4s, %[a0].s[3]\n" + + "ldr %d[b1], [%[b_ptr], #64]\n" + "ins %[b0].d[1], x20\n" + + "fmla v28.4s, %[b2].4s, %[a1].s[0]\n" + "fmla v29.4s, %[b2].4s, %[a1].s[1]\n" + "ldr x20, [%[b_ptr], #72]\n" + "fmla v30.4s, %[b2].4s, %[a1].s[2]\n" + "fmla v31.4s, %[b2].4s, %[a1].s[3]\n" + + "ldr %d[b2], [%[b_ptr], #80]\n" + "ins %[b1].d[1], x20\n" + + "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n" + "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n" + "ldr x20, [%[b_ptr], #88]\n" + "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n" + + "ins %[b2].d[1], x20\n" + "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n" + "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n" + "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n" + "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n" + "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n" + "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n" + "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n" + "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n" + "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n" + "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n" + "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n" + "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n" + "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n" + "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n" + "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n" + "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n" + "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n" + "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n" + "add %[a_ptr], %[a_ptr], #64\n" + "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n" + "add %[b_ptr], %[b_ptr], #96\n" + "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n" + "b 3f\n" + + // Detached final iteration. (odd K) + "2:\n" + + "ldr %d[b2], [%[b_ptr], #32]\n" + "fmla v8.4s , %[b0].4s, %[a0].s[0]\n" + + "fmla v9.4s , %[b0].4s, %[a0].s[1]\n" + "ldr x20, [%[b_ptr], #40]\n" + "fmla v10.4s, %[b0].4s, %[a0].s[2]\n" + "fmla v11.4s, %[b0].4s, %[a0].s[3]\n" + "ins %[b2].d[1], x20\n" + "fmla v12.4s, %[b0].4s, %[a1].s[0]\n" + "fmla v13.4s, %[b0].4s, %[a1].s[1]\n" + "fmla v14.4s, %[b0].4s, %[a1].s[2]\n" + "fmla v15.4s, %[b0].4s, %[a1].s[3]\n" + "fmla v16.4s, %[b1].4s, %[a0].s[0]\n" + "fmla v17.4s, %[b1].4s, %[a0].s[1]\n" + "fmla v18.4s, %[b1].4s, %[a0].s[2]\n" + "fmla v19.4s, %[b1].4s, %[a0].s[3]\n" + "fmla v20.4s, %[b1].4s, %[a1].s[0]\n" + "fmla v21.4s, %[b1].4s, %[a1].s[1]\n" + "fmla v22.4s, %[b1].4s, %[a1].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1].s[3]\n" + "fmla v24.4s, %[b2].4s, %[a0].s[0]\n" + "fmla v25.4s, %[b2].4s, %[a0].s[1]\n" + "fmla v26.4s, %[b2].4s, %[a0].s[2]\n" + "fmla v27.4s, %[b2].4s, %[a0].s[3]\n" + "fmla v28.4s, %[b2].4s, %[a1].s[0]\n" + "fmla v29.4s, %[b2].4s, %[a1].s[1]\n" + "add %[a_ptr], %[a_ptr], #32\n" + "fmla v30.4s, %[b2].4s, %[a1].s[2]\n" + "add %[b_ptr], %[b_ptr], #48\n" + "fmla v31.4s, %[b2].4s, %[a1].s[3]\n" + + // Common tail + "3:\n" + "str q8, [%[c_ptr]]\n" + "str q16, [%[c_ptr], #16]\n" + "str q24, [%[c_ptr], #32]\n" + "str q9, [%[c_ptr], #48]\n" + "str q17, [%[c_ptr], #64]\n" + "str q25, [%[c_ptr], #80]\n" + "str q10, [%[c_ptr], #96]\n" + "str q18, [%[c_ptr], #112]\n" + "str q26, [%[c_ptr], #128]\n" + "str q11, [%[c_ptr], #144]\n" + "str q19, [%[c_ptr], #160]\n" + "str q27, [%[c_ptr], #176]\n" + "str q12, [%[c_ptr], #192]\n" + "str q20, [%[c_ptr], #208]\n" + "str q28, [%[c_ptr], #224]\n" + "str q13, [%[c_ptr], #240]\n" + "str q21, [%[c_ptr], #256]\n" + "str q29, [%[c_ptr], #272]\n" + "str q14, [%[c_ptr], #288]\n" + "str q22, [%[c_ptr], #304]\n" + "str q30, [%[c_ptr], #320]\n" + "str q15, [%[c_ptr], #336]\n" + "str q23, [%[c_ptr], #352]\n" + "str q31, [%[c_ptr], #368]\n" + "add %[c_ptr], %[c_ptr], #384\n" + : + [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), + [a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a), + [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k) + : [oddk] "r" (oddk) + : "x20", "x21", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", + "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc" + ); + } + } +} diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8/a55r1.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8/a55r1.hpp new file mode 100644 index 0000000000..295308053f --- /dev/null +++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_sgemm_12x8/a55r1.hpp @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +inline void a64_sgemm_asimd_12x8_a55r1(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) { + const float *a_ptr = Apanel; + float *c_ptr = Cpanel; + for (int yb=0; yb<ablocks; yb++) { + const float *a_ptr0 = a_ptr; + const float *b_ptr = Bpanel; + for (int xb=0; xb<bblocks; xb++) { + a_ptr = a_ptr0; + // Fix up for odd lengths - set a flag if K is odd, but make + // sure we round up the iteration count. + int oddk = (K & 1); + int k = ((K+1)/2) - 1; + + register float32x4_t a0 asm("v0"); + register float32x4_t a1 asm("v1"); + register float32x4_t b0 asm("v2"); + register float32x4_t b1 asm("v3"); + register float32x4_t b2 asm("v4"); + register float32x4_t a0a asm("v5"); + register float32x4_t a1a asm("v6"); + + __asm __volatile ( + // Initialize result registers, load initial operands, prime prefetches. + "ldp %q[a0], %q[a1], [%[a_ptr]]\n" + ASM_PREFETCH("[%[a_ptr], #64]") + + ASM_PREFETCH("[%[a_ptr], #128]") + ASM_PREFETCH("[%[a_ptr], #192]") + "ldp %q[b0], %q[b1], [%[b_ptr]]\n" + ASM_PREFETCH("[%[b_ptr], #64]") + + ASM_PREFETCH("[%[b_ptr], #128]") + ASM_PREFETCH("[%[b_ptr], #192]") + ASM_PREFETCH("[%[b_ptr], #256]") + + ASM_PREFETCH("[%[a_ptr], #256]") + ASM_PREFETCH("[%[a_ptr], #320]") + ASM_PREFETCH("[%[a_ptr], #384]") + + ASM_PREFETCH("[%[b_ptr], #320]") + ASM_PREFETCH("[%[b_ptr], #384]") + ASM_PREFETCH("[%[b_ptr], #448]") + ASM_PREFETCH("[%[b_ptr], #512]") + + "movi v8.4s, #0x0\n" + "movi v9.4s, #0x0\n" + "movi v10.4s, #0x0\n" + "movi v11.4s, #0x0\n" + "movi v12.4s, #0x0\n" + "movi v13.4s, #0x0\n" + "movi v14.4s, #0x0\n" + "movi v15.4s, #0x0\n" + "movi v16.4s, #0x0\n" + "movi v17.4s, #0x0\n" + + "movi v18.4s, #0x0\n" + "movi v19.4s, #0x0\n" + "movi v20.4s, #0x0\n" + "movi v21.4s, #0x0\n" + "movi v22.4s, #0x0\n" + "movi v23.4s, #0x0\n" + "movi v24.4s, #0x0\n" + "movi v25.4s, #0x0\n" + "movi v26.4s, #0x0\n" + "movi v27.4s, #0x0\n" + "movi v28.4s, #0x0\n" + "movi v29.4s, #0x0\n" + "movi v30.4s, #0x0\n" + "movi v31.4s, #0x0\n" + + // Skip loop if we are doing zero iterations of it. + "cbz %w[k], 4f\n" + + "1:\n" + // Unroll 0 + "fmla v8.4s , %[b0].4s, %[a0].s[0]\n" + "ldr %d[b2], [%[b_ptr], #32]\n" + + "fmla v9.4s , %[b0].4s, %[a0].s[1]\n" + "ldr x20, [%[b_ptr], #40]\n" + "fmla v10.4s, %[b0].4s, %[a0].s[2]\n" + "subs %w[k], %w[k], #1\n" + "fmla v11.4s, %[b0].4s, %[a0].s[3]\n" + "ldr %d[a0a], [%[a_ptr], #32]\n" + + "fmla v12.4s, %[b0].4s, %[a1].s[0]\n" + "ins %[b2].d[1], x20\n" + "fmla v13.4s, %[b0].4s, %[a1].s[1]\n" + "ldr x20, [%[a_ptr], #40]\n" + "fmla v14.4s, %[b0].4s, %[a1].s[2]\n" + "fmla v15.4s, %[b0].4s, %[a1].s[3]\n" + "ldr %d[a1a], [%[a_ptr], #48]\n" + + "fmla v16.4s, %[b1].4s, %[a0].s[0]\n" + "ins %[a0a].d[1], x20\n" + "fmla v17.4s, %[b1].4s, %[a0].s[1]\n" + "ldr x20, [%[a_ptr], #56]\n" + "fmla v18.4s, %[b1].4s, %[a0].s[2]\n" + "fmla v19.4s, %[b1].4s, %[a0].s[3]\n" + "ldr %d[b0], [%[b_ptr], #48]\n" + + "fmla v20.4s, %[b1].4s, %[a1].s[0]\n" + "ins %[a1a].d[1], x20\n" + "fmla v21.4s, %[b1].4s, %[a1].s[1]\n" + "ldr x20, [%[b_ptr], #56]\n" + "fmla v22.4s, %[b1].4s, %[a1].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1].s[3]\n" + "ldr %d[b1], [%[b_ptr], #64]\n" + + "fmla v24.4s, %[b2].4s, %[a0].s[0]\n" + "ins %[b0].d[1], x20\n" + "fmla v25.4s, %[b2].4s, %[a0].s[1]\n" + "ldr x20, [%[b_ptr], #72]\n" + "fmla v26.4s, %[b2].4s, %[a0].s[2]\n" + "fmla v27.4s, %[b2].4s, %[a0].s[3]\n" + ASM_PREFETCH("[%[a_ptr], #448]") + + "fmla v28.4s, %[b2].4s, %[a1].s[0]\n" + "fmla v29.4s, %[b2].4s, %[a1].s[1]\n" + ASM_PREFETCH("[%[b_ptr], #576]") + "fmla v30.4s, %[b2].4s, %[a1].s[2]\n" + "fmla v31.4s, %[b2].4s, %[a1].s[3]\n" + + // Unroll 1 + "ldr %d[b2], [%[b_ptr], #80]\n" + + "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n" + "ins %[b1].d[1], x20\n" + "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n" + "ldr x20, [%[b_ptr], #88]\n" + "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n" + "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n" + "ldr %d[a0], [%[a_ptr], #64]\n" + + "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n" + "ins %[b2].d[1], x20\n" + "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n" + "ldr x20, [%[a_ptr], #72]\n" + "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n" + "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n" + "ldr %d[a1], [%[a_ptr], #80]\n" + + "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n" + "ins %[a0].d[1], x20\n" + "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n" + "ldr x20, [%[a_ptr], #88]\n" + "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n" + "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n" + "ldr %d[b0], [%[b_ptr], #96]\n" + + "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n" + "ins %[a1].d[1], x20\n" + "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n" + "ldr x20, [%[b_ptr], #104]\n" + "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n" + "ldr %d[b1], [%[b_ptr], #112]\n" + + "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n" + "ins %[b0].d[1], x20\n" + "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n" + "ldr x20, [%[b_ptr], #120]\n" + "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n" + + "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n" + "add %[a_ptr], %[a_ptr], #64\n" + + "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n" + ASM_PREFETCH("[%[b_ptr], #640]") + "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n" + "add %[b_ptr], %[b_ptr], #96\n" + "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n" + "ins %[b1].d[1], x20\n" + "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n" + "ldr %d[b2], [%[b_ptr], #32]\n" + + + "bne 1b\n" + + // Branch here if K=1 or 2. Do the right thing for odd/even at the end. + "4:\n" + "cbnz %w[oddk], 2f\n" + "fmla v8.4s , %[b0].4s, %[a0].s[0]\n" + + // Detached final iteration. (even K) + "fmla v9.4s , %[b0].4s, %[a0].s[1]\n" + "ldr x20, [%[b_ptr], #40]\n" + "fmla v10.4s, %[b0].4s, %[a0].s[2]\n" + "subs %w[k], %w[k], #1\n" + "fmla v11.4s, %[b0].4s, %[a0].s[3]\n" + "ldr %d[a0a], [%[a_ptr], #32]\n" + + "fmla v12.4s, %[b0].4s, %[a1].s[0]\n" + "ins %[b2].d[1], x20\n" + "fmla v13.4s, %[b0].4s, %[a1].s[1]\n" + "ldr x20, [%[a_ptr], #40]\n" + "fmla v14.4s, %[b0].4s, %[a1].s[2]\n" + + "fmla v15.4s, %[b0].4s, %[a1].s[3]\n" + "ldr %d[a1a], [%[a_ptr], #48]\n" + + "fmla v16.4s, %[b1].4s, %[a0].s[0]\n" + "ins %[a0a].d[1], x20\n" + "fmla v17.4s, %[b1].4s, %[a0].s[1]\n" + "ldr x20, [%[a_ptr], #56]\n" + "fmla v18.4s, %[b1].4s, %[a0].s[2]\n" + + "fmla v19.4s, %[b1].4s, %[a0].s[3]\n" + "ldr %d[b0], [%[b_ptr], #48]\n" + + "fmla v20.4s, %[b1].4s, %[a1].s[0]\n" + "ins %[a1a].d[1], x20\n" + "fmla v21.4s, %[b1].4s, %[a1].s[1]\n" + "ldr x20, [%[b_ptr], #56]\n" + "fmla v22.4s, %[b1].4s, %[a1].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1].s[3]\n" + + "fmla v24.4s, %[b2].4s, %[a0].s[0]\n" + "fmla v25.4s, %[b2].4s, %[a0].s[1]\n" + "fmla v26.4s, %[b2].4s, %[a0].s[2]\n" + "fmla v27.4s, %[b2].4s, %[a0].s[3]\n" + "ldr %d[b1], [%[b_ptr], #64]\n" + + "fmla v28.4s, %[b2].4s, %[a1].s[0]\n" + "ins %[b0].d[1], x20\n" + "fmla v29.4s, %[b2].4s, %[a1].s[1]\n" + "ldr x20, [%[b_ptr], #72]\n" + "fmla v30.4s, %[b2].4s, %[a1].s[2]\n" + + "fmla v31.4s, %[b2].4s, %[a1].s[3]\n" + "ldr %d[b2], [%[b_ptr], #80]\n" + + "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n" + "ins %[b1].d[1], x20\n" + "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n" + "ldr x20, [%[b_ptr], #88]\n" + "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n" + "ins %[b2].d[1], x20\n" + + "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n" + "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n" + "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n" + "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n" + "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n" + "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n" + "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n" + "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n" + "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n" + "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n" + "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n" + "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n" + "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n" + "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n" + "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n" + "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n" + "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n" + "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n" + "add %[a_ptr], %[a_ptr], #64\n" + "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n" + "add %[b_ptr], %[b_ptr], #96\n" + "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n" + "b 3f\n" + + // Detached final iteration. (odd K) + "2:\n" + + "fmla v8.4s , %[b0].4s, %[a0].s[0]\n" + "ldr %d[b2], [%[b_ptr], #32]\n" + "fmla v9.4s , %[b0].4s, %[a0].s[1]\n" + "ldr x20, [%[b_ptr], #40]\n" + "fmla v10.4s, %[b0].4s, %[a0].s[2]\n" + "fmla v11.4s, %[b0].4s, %[a0].s[3]\n" + "fmla v12.4s, %[b0].4s, %[a1].s[0]\n" + "ins %[b2].d[1], x20\n" + "fmla v13.4s, %[b0].4s, %[a1].s[1]\n" + "fmla v14.4s, %[b0].4s, %[a1].s[2]\n" + "fmla v15.4s, %[b0].4s, %[a1].s[3]\n" + "fmla v16.4s, %[b1].4s, %[a0].s[0]\n" + "fmla v17.4s, %[b1].4s, %[a0].s[1]\n" + "fmla v18.4s, %[b1].4s, %[a0].s[2]\n" + "fmla v19.4s, %[b1].4s, %[a0].s[3]\n" + "fmla v20.4s, %[b1].4s, %[a1].s[0]\n" + "fmla v21.4s, %[b1].4s, %[a1].s[1]\n" + "fmla v22.4s, %[b1].4s, %[a1].s[2]\n" + "fmla v23.4s, %[b1].4s, %[a1].s[3]\n" + "fmla v24.4s, %[b2].4s, %[a0].s[0]\n" + "fmla v25.4s, %[b2].4s, %[a0].s[1]\n" + "fmla v26.4s, %[b2].4s, %[a0].s[2]\n" + "fmla v27.4s, %[b2].4s, %[a0].s[3]\n" + "fmla v28.4s, %[b2].4s, %[a1].s[0]\n" + "fmla v29.4s, %[b2].4s, %[a1].s[1]\n" + "add %[a_ptr], %[a_ptr], #32\n" + "fmla v30.4s, %[b2].4s, %[a1].s[2]\n" + "add %[b_ptr], %[b_ptr], #48\n" + "fmla v31.4s, %[b2].4s, %[a1].s[3]\n" + + // Common tail + "3:\n" + "str q8, [%[c_ptr]]\n" + "str q16, [%[c_ptr], #16]\n" + "str q24, [%[c_ptr], #32]\n" + "str q9, [%[c_ptr], #48]\n" + "str q17, [%[c_ptr], #64]\n" + "str q25, [%[c_ptr], #80]\n" + "str q10, [%[c_ptr], #96]\n" + "str q18, [%[c_ptr], #112]\n" + "str q26, [%[c_ptr], #128]\n" + "str q11, [%[c_ptr], #144]\n" + "str q19, [%[c_ptr], #160]\n" + "str q27, [%[c_ptr], #176]\n" + "str q12, [%[c_ptr], #192]\n" + "str q20, [%[c_ptr], #208]\n" + "str q28, [%[c_ptr], #224]\n" + "str q13, [%[c_ptr], #240]\n" + "str q21, [%[c_ptr], #256]\n" + "str q29, [%[c_ptr], #272]\n" + "str q14, [%[c_ptr], #288]\n" + "str q22, [%[c_ptr], #304]\n" + "str q30, [%[c_ptr], #320]\n" + "str q15, [%[c_ptr], #336]\n" + "str q23, [%[c_ptr], #352]\n" + "str q31, [%[c_ptr], #368]\n" + "add %[c_ptr], %[c_ptr], #384\n" + : + [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), + [a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a), + [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k) + : [oddk] "r" (oddk) + : "x20", "x21", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", + "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc" + ); + } + } +} |