From 5aa1a0b7ca5eed010e4b297a95b1c4851f741328 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 2 Jul 2020 20:02:20 +0100 Subject: COMPID-3324: Clean GEMM kernels Signed-off-by: Georgios Pinitas Change-Id: I170de1671e061a78740caee31fb4a1b8642c1369 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3505 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Michele Di Giorgio --- .../transforms/a32_interleave_6way_32bit.hpp | 3 +- .../transforms/a64_block16_interleave4_8bit.hpp | 12 +- .../transforms/a64_interleave_8way_16bit.hpp | 3 +- .../transforms/a64_interleave_8way_32bit.hpp | 3 +- .../transforms/a64_interleave_8way_block4_8bit.hpp | 5 +- .../a64_interleave_8way_half_to_float.hpp | 3 +- ...64_transpose_interleave_12way_half_to_float.hpp | 2 +- .../a64_transpose_interleave_24way_16bit.hpp | 2 +- src/core/NEON/kernels/arm_gemm/transforms/list.hpp | 3 +- .../transforms/sve_interleave_8way_32bit.hpp | 2 +- .../sve_interleave_8way_block2_32bit.hpp | 552 ++++++++++----------- .../transforms/sve_interleave_8way_block4_8bit.hpp | 2 +- 12 files changed, 275 insertions(+), 317 deletions(-) (limited to 'src/core/NEON/kernels/arm_gemm/transforms') diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a32_interleave_6way_32bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a32_interleave_6way_32bit.hpp index 543664bb0e..5e5f65183c 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/a32_interleave_6way_32bit.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/a32_interleave_6way_32bit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -59,7 +59,6 @@ inline void TransformImpl<6, 1, false, 4, 4, false>::Transform(T *out, const T * /* 'first' forces this to always run at least once, needed if the total size is <=7. */ if ((y + 5) >= ymax) { switch ((y + 5) - ymax) { - /* Everything falls through in here */ case 4: inptr1 = zerobuff; // fall through diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_block16_interleave4_8bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_block16_interleave4_8bit.hpp index 6b742c8776..9b6f4de543 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/a64_block16_interleave4_8bit.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_block16_interleave4_8bit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -38,8 +38,8 @@ void TransformImpl<4, 16, false, 1, 1, false>::Transform(T *out, const T *in, in uint8_t zerobuff[16] = { 0 }; - for (uint64_t y = y0 ; y < static_cast(ymax) ; y+=4) { - const uint8_t *inptr0 = inptr + y * ldin + k0; + for (int y=y0; y(y) * ldin + k0; const uint8_t *inptr1 = inptr0 + ldin; const uint8_t *inptr2 = inptr1 + ldin; const uint8_t *inptr3 = inptr2 + ldin; @@ -52,9 +52,8 @@ void TransformImpl<4, 16, false, 1, 1, false>::Transform(T *out, const T *in, in int x=(kmax-k0); for (;x>15;x-=16) { /* Cope with ragged cases by copying from a buffer of zeroes instead */ - if ((y + 3) >= static_cast(ymax)) { + if ((y + 3) >= ymax) { switch ((y + 3) - ymax) { - /* Everything falls through in here */ case 2: inptr1 = zerobuff; // fall through @@ -90,9 +89,8 @@ void TransformImpl<4, 16, false, 1, 1, false>::Transform(T *out, const T *in, in if (x>0) { /* Need to duplicate this here, in case we didn't run the main loop. */ - if ((y + 3) >= static_cast(ymax)) { + if ((y + 3) >= ymax) { switch ((y + 3) - ymax) { - /* Everything falls through in here */ case 2: inptr1 = zerobuff; // fall through diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_16bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_16bit.hpp index 80dd6c5e25..3d912c4675 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_16bit.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_16bit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -63,7 +63,6 @@ void TransformImpl<8, 1, false, 2, 2, false>::Transform(T *out, const T *in, int /* 'first' forces this to always run at least once, needed if the total size is <=7. */ if ((y + 7) >= ymax) { switch ((y + 7) - ymax) { - /* Everything falls through in here */ case 6: inptr1 = zerobuff; // fall through diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_32bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_32bit.hpp index 9dfc1346e6..701d688af2 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_32bit.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_32bit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -63,7 +63,6 @@ inline void TransformImpl<8, 1, false, 4, 4, false>::Transform(T *out, const T * /* 'first' forces this to always run at least once, needed if the total size is <=7. */ if ((y + 7) >= ymax) { switch ((y + 7) - ymax) { - /* Everything falls through in here */ case 6: inptr1 = zerobuff; // fall through diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp index 2bc7801b15..2546cc571a 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2017-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -60,7 +60,7 @@ inline void TransformImpl<8, 4, false, 1, 1, false>::Transform(T *out, const T * } }; - uint8_t zerobuff[64]; // 32 for asm loop plus up to 31 for overflow loop + uint8_t zerobuff[64] = { 0 }; // 32 for asm loop plus up to 31 for overflow loop for (int y=y0; y::Transform(T *out, const T * /* 'first' forces this to always run at least once, needed if the total size is <=32. */ if ((y + 7) >= ymax) { switch ((y + 7) - ymax) { - /* Everything falls through in here */ case 6: inptr1 = zerobuff; // fall through diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp index bde3274926..a342d6c3d1 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -63,7 +63,6 @@ inline void TransformImpl<8, 1, false, 4, 2, false>::Transform(float *out, const /* 'first' forces this to always run at least once, needed if the total size is <=7. */ if ((y + 7) >= ymax) { switch ((y + 7) - ymax) { - /* Everything falls through in here */ case 6: inptr1 = zerobuff; // fall through diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12way_half_to_float.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12way_half_to_float.hpp index 8992c1010d..d7de9ff934 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12way_half_to_float.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12way_half_to_float.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 Arm Limited. * * SPDX-License-Identifier: MIT * diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24way_16bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24way_16bit.hpp index 6d627334cd..a137f9360a 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24way_16bit.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24way_16bit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 Arm Limited. * * SPDX-License-Identifier: MIT * diff --git a/src/core/NEON/kernels/arm_gemm/transforms/list.hpp b/src/core/NEON/kernels/arm_gemm/transforms/list.hpp index be66cd42ff..2c698b2576 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/list.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/list.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "a64_transpose_interleave_8way_32bit.hpp" #include "sve_interleave_8way_32bit.hpp" #include "sve_interleave_8way_block2_16bit.hpp" +#include "sve_interleave_8way_block2_32bit.hpp" #include "sve_interleave_8way_block4_16bit.hpp" #include "sve_interleave_8way_block4_8bit.hpp" #include "sve_interleave_8way_block8_8bit.hpp" diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp index 881dc7bb72..348d78e3f5 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_32bit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2019 Arm Limited. * * SPDX-License-Identifier: MIT * diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp index 4cc4311cee..f21933b8de 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block2_32bit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Arm Limited. + * Copyright (c) 2019 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,12 +36,12 @@ inline void TransformImpl<8, 2, false, 4, 4, false>::Transform(T *out, const T * { const int height = ymax-y; const long inwidth = (kmax - k0); - const long outwidth = (inwidth * 8 + 1) / 2; + const long outwidth = ((inwidth + 1) / 2) * 16; long inpos = 0; long outpos = 0; uint32_t *outptr = master_outptr; - master_outptr += (outwidth * 2); + master_outptr += outwidth; const uint32_t *inptr0 = inptr + y * ldin + k0; const uint32_t *inptr1 = inptr0 + ldin; @@ -60,571 +60,535 @@ inline void TransformImpl<8, 2, false, 4, 4, false>::Transform(T *out, const T * "whilelt p0.s, %[inpos], %[inwidth]\n" "b.none 2f\n" "mov z4.s, #0\n" - "ld1w z0.s, p0/z, [%[inptr0]]\n" - "zip1 z8.d, z0.d, z4.d\n" + "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n" "incw %[inpos], all, mul #1\n" + "whilelt p0.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "zip1 z8.d, z0.d, z4.d\n" "zip2 z9.d, z0.d, z4.d\n" - "addvl %[inptr0], %[inptr0], #1\n" + "whilelt p1.s, %[outpos], %[outwidth]\n" "zip1 z0.d, z8.d, z4.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" "zip2 z1.d, z8.d, z4.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z2.d, z9.d, z4.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" "zip2 z3.d, z9.d, z4.d\n" - "incd %[outpos], all, mul #1\n" + "whilelt p2.s, %[outpos], %[outwidth]\n" "zip1 z8.d, z0.d, z4.d\n" - "st1d z8.d, p0, [%[outptr]]\n" + "incw %[outpos], all, mul #1\n" "zip2 z9.d, z0.d, z4.d\n" - "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n" "zip1 z10.d, z1.d, z4.d\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" + "st1w z8.s, p0, [%[outptr]]\n" "zip2 z11.d, z1.d, z4.d\n" - "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n" + "whilelt p3.s, %[outpos], %[outwidth]\n" "zip1 z12.d, z2.d, z4.d\n" - "incd %[outpos], all, mul #1\n" + "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n" "zip2 z13.d, z2.d, z4.d\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" "zip1 z14.d, z3.d, z4.d\n" - "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n" + "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n" "zip2 z15.d, z3.d, z4.d\n" - "incd %[outpos], all, mul #1\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" - "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" - "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" - "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" - "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n" - "incd %[outpos], all, mul #1\n" + "whilelt p4.s, %[outpos], %[outwidth]\n" + "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n" + "incw %[outpos], all, mul #1\n" + "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n" + "whilelt p5.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n" + "whilelt p6.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n" + "whilelt p7.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n" "addvl %[outptr], %[outptr], #8\n" "b 1b\n" "2:\n" : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0) : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth) - : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" + : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" ); break; - + case 2: __asm __volatile( "1:\n" "whilelt p0.s, %[inpos], %[inwidth]\n" "b.none 2f\n" "mov z4.s, #0\n" - "ld1w z0.s, p0/z, [%[inptr0]]\n" + "mov z14.s, #0\n" + "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n" + "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n" + "incw %[inpos], all, mul #1\n" + "whilelt p0.s, %[outpos], %[outwidth]\n" "zip1 z8.d, z0.d, z4.d\n" - "ld1w z1.s, p0/z, [%[inptr1]]\n" + "incw %[outpos], all, mul #1\n" "zip2 z9.d, z0.d, z4.d\n" - "incw %[inpos], all, mul #1\n" "zip1 z10.d, z1.d, z4.d\n" - "addvl %[inptr0], %[inptr0], #1\n" "zip2 z11.d, z1.d, z4.d\n" - "addvl %[inptr1], %[inptr1], #1\n" + "whilelt p1.s, %[outpos], %[outwidth]\n" "zip1 z0.d, z8.d, z4.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" "zip2 z1.d, z8.d, z4.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z2.d, z9.d, z4.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" "zip2 z3.d, z9.d, z4.d\n" - "incd %[outpos], all, mul #1\n" - "mov z14.s, #0\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" + "whilelt p2.s, %[outpos], %[outwidth]\n" "zip1 z4.d, z10.d, z14.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip2 z5.d, z10.d, z14.d\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" "zip1 z6.d, z11.d, z14.d\n" - "incd %[outpos], all, mul #1\n" "zip2 z7.d, z11.d, z14.d\n" + "whilelt p3.s, %[outpos], %[outwidth]\n" "zip1 z8.d, z0.d, z4.d\n" - "st1d z8.d, p0, [%[outptr]]\n" + "incw %[outpos], all, mul #1\n" "zip2 z9.d, z0.d, z4.d\n" - "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n" "zip1 z10.d, z1.d, z5.d\n" - "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n" + "st1w z8.s, p0, [%[outptr]]\n" "zip2 z11.d, z1.d, z5.d\n" - "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n" + "whilelt p4.s, %[outpos], %[outwidth]\n" "zip1 z12.d, z2.d, z6.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n" "zip2 z13.d, z2.d, z6.d\n" - "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n" + "incw %[outpos], all, mul #1\n" "zip1 z14.d, z3.d, z7.d\n" - "incd %[outpos], all, mul #1\n" + "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n" "zip2 z15.d, z3.d, z7.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" - "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" - "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" - "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n" - "incd %[outpos], all, mul #1\n" + "whilelt p5.s, %[outpos], %[outwidth]\n" + "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n" + "incw %[outpos], all, mul #1\n" + "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n" + "whilelt p6.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n" + "whilelt p7.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n" + "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n" "addvl %[outptr], %[outptr], #8\n" "b 1b\n" "2:\n" : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1) : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth) - : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" + : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" ); break; - + case 3: __asm __volatile( "1:\n" "whilelt p0.s, %[inpos], %[inwidth]\n" "b.none 2f\n" "mov z4.s, #0\n" - "ld1w z0.s, p0/z, [%[inptr0]]\n" + "mov z14.s, #0\n" + "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n" + "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n" + "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n" + "incw %[inpos], all, mul #1\n" "zip1 z8.d, z0.d, z4.d\n" - "ld1w z1.s, p0/z, [%[inptr1]]\n" + "whilelt p0.s, %[outpos], %[outwidth]\n" "zip2 z9.d, z0.d, z4.d\n" - "ld1w z2.s, p0/z, [%[inptr2]]\n" + "incw %[outpos], all, mul #1\n" "zip1 z10.d, z1.d, z4.d\n" - "incw %[inpos], all, mul #1\n" "zip2 z11.d, z1.d, z4.d\n" - "addvl %[inptr0], %[inptr0], #1\n" "zip1 z12.d, z2.d, z4.d\n" - "addvl %[inptr1], %[inptr1], #1\n" + "whilelt p1.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z4.d\n" - "addvl %[inptr2], %[inptr2], #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z0.d, z8.d, z12.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" "zip2 z1.d, z8.d, z12.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z2.d, z9.d, z13.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" + "whilelt p2.s, %[outpos], %[outwidth]\n" "zip2 z3.d, z9.d, z13.d\n" - "incd %[outpos], all, mul #1\n" - "mov z14.s, #0\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" "zip1 z4.d, z10.d, z14.d\n" - "incd %[outpos], all, mul #1\n" "zip2 z5.d, z10.d, z14.d\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" "zip1 z6.d, z11.d, z14.d\n" - "incd %[outpos], all, mul #1\n" + "whilelt p3.s, %[outpos], %[outwidth]\n" "zip2 z7.d, z11.d, z14.d\n" + "incw %[outpos], all, mul #1\n" "zip1 z8.d, z0.d, z4.d\n" - "st1d z8.d, p0, [%[outptr]]\n" "zip2 z9.d, z0.d, z4.d\n" - "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n" "zip1 z10.d, z1.d, z5.d\n" - "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n" + "whilelt p4.s, %[outpos], %[outwidth]\n" "zip2 z11.d, z1.d, z5.d\n" - "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n" + "st1w z8.s, p0, [%[outptr]]\n" "zip1 z12.d, z2.d, z6.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" "zip2 z13.d, z2.d, z6.d\n" - "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n" + "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n" "zip1 z14.d, z3.d, z7.d\n" - "incd %[outpos], all, mul #1\n" "zip2 z15.d, z3.d, z7.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" - "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" - "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" - "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n" - "incd %[outpos], all, mul #1\n" + "whilelt p5.s, %[outpos], %[outwidth]\n" + "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n" + "incw %[outpos], all, mul #1\n" + "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n" + "whilelt p6.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n" + "whilelt p7.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n" + "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n" + "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n" "addvl %[outptr], %[outptr], #8\n" "b 1b\n" "2:\n" : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2) : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth) - : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" + : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" ); break; - + case 4: __asm __volatile( "1:\n" "whilelt p0.s, %[inpos], %[inwidth]\n" "b.none 2f\n" "mov z4.s, #0\n" - "ld1w z0.s, p0/z, [%[inptr0]]\n" + "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n" + "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n" + "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n" + "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n" + "incw %[inpos], all, mul #1\n" "zip1 z8.d, z0.d, z4.d\n" - "ld1w z1.s, p0/z, [%[inptr1]]\n" + "whilelt p0.s, %[outpos], %[outwidth]\n" "zip2 z9.d, z0.d, z4.d\n" - "ld1w z2.s, p0/z, [%[inptr2]]\n" + "incw %[outpos], all, mul #1\n" "zip1 z10.d, z1.d, z4.d\n" - "ld1w z3.s, p0/z, [%[inptr3]]\n" "zip2 z11.d, z1.d, z4.d\n" - "incw %[inpos], all, mul #1\n" "zip1 z12.d, z2.d, z4.d\n" - "addvl %[inptr0], %[inptr0], #1\n" + "whilelt p1.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z4.d\n" - "addvl %[inptr1], %[inptr1], #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z14.d, z3.d, z4.d\n" - "addvl %[inptr2], %[inptr2], #1\n" "zip2 z15.d, z3.d, z4.d\n" - "addvl %[inptr3], %[inptr3], #1\n" "zip1 z0.d, z8.d, z12.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "whilelt p2.s, %[outpos], %[outwidth]\n" "zip2 z1.d, z8.d, z12.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z2.d, z9.d, z13.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" "zip2 z3.d, z9.d, z13.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z4.d, z10.d, z14.d\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" + "whilelt p3.s, %[outpos], %[outwidth]\n" "zip2 z5.d, z10.d, z14.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z6.d, z11.d, z15.d\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" "zip2 z7.d, z11.d, z15.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z8.d, z0.d, z4.d\n" - "st1d z8.d, p0, [%[outptr]]\n" + "whilelt p4.s, %[outpos], %[outwidth]\n" "zip2 z9.d, z0.d, z4.d\n" - "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n" + "incw %[outpos], all, mul #1\n" "zip1 z10.d, z1.d, z5.d\n" - "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n" + "st1w z8.s, p0, [%[outptr]]\n" "zip2 z11.d, z1.d, z5.d\n" - "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n" "zip1 z12.d, z2.d, z6.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "whilelt p5.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z6.d\n" - "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n" + "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n" "zip1 z14.d, z3.d, z7.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip2 z15.d, z3.d, z7.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" - "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" - "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" - "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n" - "incd %[outpos], all, mul #1\n" + "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n" + "whilelt p6.s, %[outpos], %[outwidth]\n" + "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n" + "incw %[outpos], all, mul #1\n" + "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n" + "whilelt p7.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n" + "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n" + "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n" "addvl %[outptr], %[outptr], #8\n" "b 1b\n" "2:\n" : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3) : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth) - : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" + : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" ); break; - + case 5: __asm __volatile( "1:\n" "whilelt p0.s, %[inpos], %[inwidth]\n" "b.none 2f\n" "mov z5.s, #0\n" - "ld1w z0.s, p0/z, [%[inptr0]]\n" - "ld1w z1.s, p0/z, [%[inptr1]]\n" + "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n" + "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n" + "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n" + "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n" + "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n" "incw %[inpos], all, mul #1\n" "zip1 z10.d, z1.d, z5.d\n" - "ld1w z2.s, p0/z, [%[inptr2]]\n" - "zip2 z11.d, z1.d, z5.d\n" - "ld1w z3.s, p0/z, [%[inptr3]]\n" - "zip1 z12.d, z2.d, z5.d\n" - "ld1w z4.s, p0/z, [%[inptr4]]\n" + "whilelt p0.s, %[outpos], %[outwidth]\n" "zip1 z8.d, z0.d, z4.d\n" - "addvl %[inptr0], %[inptr0], #1\n" + "incw %[outpos], all, mul #1\n" "zip2 z9.d, z0.d, z4.d\n" - "addvl %[inptr1], %[inptr1], #1\n" + "zip2 z11.d, z1.d, z5.d\n" + "zip1 z12.d, z2.d, z5.d\n" + "whilelt p1.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z5.d\n" - "addvl %[inptr2], %[inptr2], #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z14.d, z3.d, z5.d\n" - "addvl %[inptr3], %[inptr3], #1\n" "zip2 z15.d, z3.d, z5.d\n" - "addvl %[inptr4], %[inptr4], #1\n" "zip1 z0.d, z8.d, z12.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "whilelt p2.s, %[outpos], %[outwidth]\n" "zip2 z1.d, z8.d, z12.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z2.d, z9.d, z13.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" "zip2 z3.d, z9.d, z13.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z4.d, z10.d, z14.d\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" + "whilelt p3.s, %[outpos], %[outwidth]\n" "zip2 z5.d, z10.d, z14.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z6.d, z11.d, z15.d\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" "zip2 z7.d, z11.d, z15.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z8.d, z0.d, z4.d\n" - "st1d z8.d, p0, [%[outptr]]\n" + "whilelt p4.s, %[outpos], %[outwidth]\n" "zip2 z9.d, z0.d, z4.d\n" - "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n" + "incw %[outpos], all, mul #1\n" "zip1 z10.d, z1.d, z5.d\n" - "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n" + "st1w z8.s, p0, [%[outptr]]\n" "zip2 z11.d, z1.d, z5.d\n" - "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n" "zip1 z12.d, z2.d, z6.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "whilelt p5.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z6.d\n" - "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n" + "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n" "zip1 z14.d, z3.d, z7.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip2 z15.d, z3.d, z7.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" - "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" - "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" - "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n" - "incd %[outpos], all, mul #1\n" + "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n" + "whilelt p6.s, %[outpos], %[outwidth]\n" + "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n" + "incw %[outpos], all, mul #1\n" + "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n" + "whilelt p7.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n" + "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n" + "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n" "addvl %[outptr], %[outptr], #8\n" "b 1b\n" "2:\n" : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4) : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth) - : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" + : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" ); break; - + case 6: __asm __volatile( "1:\n" "whilelt p0.s, %[inpos], %[inwidth]\n" "b.none 2f\n" "mov z6.s, #0\n" - "ld1w z0.s, p0/z, [%[inptr0]]\n" - "ld1w z1.s, p0/z, [%[inptr1]]\n" + "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n" + "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n" + "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n" + "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n" + "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n" + "ld1w z5.s, p0/z, [%[inptr5], %[inpos], LSL #2]\n" "incw %[inpos], all, mul #1\n" - "ld1w z2.s, p0/z, [%[inptr2]]\n" - "addvl %[inptr0], %[inptr0], #1\n" "zip1 z12.d, z2.d, z6.d\n" - "ld1w z3.s, p0/z, [%[inptr3]]\n" - "zip2 z13.d, z2.d, z6.d\n" - "ld1w z4.s, p0/z, [%[inptr4]]\n" + "whilelt p0.s, %[outpos], %[outwidth]\n" "zip1 z8.d, z0.d, z4.d\n" - "ld1w z5.s, p0/z, [%[inptr5]]\n" + "incw %[outpos], all, mul #1\n" "zip2 z9.d, z0.d, z4.d\n" - "addvl %[inptr1], %[inptr1], #1\n" "zip1 z10.d, z1.d, z5.d\n" - "addvl %[inptr2], %[inptr2], #1\n" "zip2 z11.d, z1.d, z5.d\n" - "addvl %[inptr3], %[inptr3], #1\n" + "whilelt p1.s, %[outpos], %[outwidth]\n" + "zip2 z13.d, z2.d, z6.d\n" + "incw %[outpos], all, mul #1\n" "zip1 z14.d, z3.d, z6.d\n" - "addvl %[inptr4], %[inptr4], #1\n" "zip2 z15.d, z3.d, z6.d\n" - "addvl %[inptr5], %[inptr5], #1\n" "zip1 z0.d, z8.d, z12.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "whilelt p2.s, %[outpos], %[outwidth]\n" "zip2 z1.d, z8.d, z12.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z2.d, z9.d, z13.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" "zip2 z3.d, z9.d, z13.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z4.d, z10.d, z14.d\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" + "whilelt p3.s, %[outpos], %[outwidth]\n" "zip2 z5.d, z10.d, z14.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z6.d, z11.d, z15.d\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" "zip2 z7.d, z11.d, z15.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z8.d, z0.d, z4.d\n" - "st1d z8.d, p0, [%[outptr]]\n" + "whilelt p4.s, %[outpos], %[outwidth]\n" "zip2 z9.d, z0.d, z4.d\n" - "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n" + "incw %[outpos], all, mul #1\n" "zip1 z10.d, z1.d, z5.d\n" - "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n" + "st1w z8.s, p0, [%[outptr]]\n" "zip2 z11.d, z1.d, z5.d\n" - "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n" "zip1 z12.d, z2.d, z6.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "whilelt p5.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z6.d\n" - "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n" + "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n" "zip1 z14.d, z3.d, z7.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip2 z15.d, z3.d, z7.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" - "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" - "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" - "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n" - "incd %[outpos], all, mul #1\n" + "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n" + "whilelt p6.s, %[outpos], %[outwidth]\n" + "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n" + "incw %[outpos], all, mul #1\n" + "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n" + "whilelt p7.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n" + "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n" + "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n" "addvl %[outptr], %[outptr], #8\n" "b 1b\n" "2:\n" : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5) : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth) - : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" + : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" ); break; - + case 7: __asm __volatile( "1:\n" "whilelt p0.s, %[inpos], %[inwidth]\n" "b.none 2f\n" "mov z7.s, #0\n" - "ld1w z0.s, p0/z, [%[inptr0]]\n" - "ld1w z1.s, p0/z, [%[inptr1]]\n" + "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n" + "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n" + "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n" + "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n" + "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n" + "ld1w z5.s, p0/z, [%[inptr5], %[inpos], LSL #2]\n" + "ld1w z6.s, p0/z, [%[inptr6], %[inpos], LSL #2]\n" "incw %[inpos], all, mul #1\n" - "ld1w z2.s, p0/z, [%[inptr2]]\n" - "addvl %[inptr0], %[inptr0], #1\n" - "ld1w z3.s, p0/z, [%[inptr3]]\n" - "addvl %[inptr1], %[inptr1], #1\n" - "zip1 z14.d, z3.d, z7.d\n" - "ld1w z4.s, p0/z, [%[inptr4]]\n" "zip1 z8.d, z0.d, z4.d\n" - "ld1w z5.s, p0/z, [%[inptr5]]\n" + "whilelt p0.s, %[outpos], %[outwidth]\n" "zip2 z9.d, z0.d, z4.d\n" - "ld1w z6.s, p0/z, [%[inptr6]]\n" + "incw %[outpos], all, mul #1\n" "zip1 z10.d, z1.d, z5.d\n" - "addvl %[inptr2], %[inptr2], #1\n" "zip2 z11.d, z1.d, z5.d\n" - "addvl %[inptr3], %[inptr3], #1\n" "zip1 z12.d, z2.d, z6.d\n" - "addvl %[inptr4], %[inptr4], #1\n" + "whilelt p1.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z6.d\n" - "addvl %[inptr5], %[inptr5], #1\n" + "incw %[outpos], all, mul #1\n" + "zip1 z14.d, z3.d, z7.d\n" "zip2 z15.d, z3.d, z7.d\n" - "addvl %[inptr6], %[inptr6], #1\n" "zip1 z0.d, z8.d, z12.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "whilelt p2.s, %[outpos], %[outwidth]\n" "zip2 z1.d, z8.d, z12.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z2.d, z9.d, z13.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" "zip2 z3.d, z9.d, z13.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z4.d, z10.d, z14.d\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" + "whilelt p3.s, %[outpos], %[outwidth]\n" "zip2 z5.d, z10.d, z14.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z6.d, z11.d, z15.d\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" "zip2 z7.d, z11.d, z15.d\n" - "incd %[outpos], all, mul #1\n" "zip1 z8.d, z0.d, z4.d\n" - "st1d z8.d, p0, [%[outptr]]\n" + "whilelt p4.s, %[outpos], %[outwidth]\n" "zip2 z9.d, z0.d, z4.d\n" - "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n" + "incw %[outpos], all, mul #1\n" "zip1 z10.d, z1.d, z5.d\n" - "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n" + "st1w z8.s, p0, [%[outptr]]\n" "zip2 z11.d, z1.d, z5.d\n" - "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n" "zip1 z12.d, z2.d, z6.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "whilelt p5.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z6.d\n" - "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n" + "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n" "zip1 z14.d, z3.d, z7.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip2 z15.d, z3.d, z7.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" - "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" - "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" - "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n" - "incd %[outpos], all, mul #1\n" + "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n" + "whilelt p6.s, %[outpos], %[outwidth]\n" + "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n" + "incw %[outpos], all, mul #1\n" + "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n" + "whilelt p7.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n" + "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n" + "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n" "addvl %[outptr], %[outptr], #8\n" "b 1b\n" "2:\n" : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6) : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth) - : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" + : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" ); break; - + default: case 8: __asm __volatile( "1:\n" "whilelt p0.s, %[inpos], %[inwidth]\n" "b.none 2f\n" - "ld1w z0.s, p0/z, [%[inptr0]]\n" + "ld1w z0.s, p0/z, [%[inptr0], %[inpos], LSL #2]\n" + "ld1w z1.s, p0/z, [%[inptr1], %[inpos], LSL #2]\n" + "ld1w z2.s, p0/z, [%[inptr2], %[inpos], LSL #2]\n" + "ld1w z3.s, p0/z, [%[inptr3], %[inpos], LSL #2]\n" + "ld1w z4.s, p0/z, [%[inptr4], %[inpos], LSL #2]\n" + "ld1w z5.s, p0/z, [%[inptr5], %[inpos], LSL #2]\n" + "ld1w z6.s, p0/z, [%[inptr6], %[inpos], LSL #2]\n" + "ld1w z7.s, p0/z, [%[inptr7], %[inpos], LSL #2]\n" "incw %[inpos], all, mul #1\n" - "ld1w z1.s, p0/z, [%[inptr1]]\n" - "addvl %[inptr0], %[inptr0], #1\n" - "ld1w z2.s, p0/z, [%[inptr2]]\n" - "addvl %[inptr1], %[inptr1], #1\n" - "ld1w z3.s, p0/z, [%[inptr3]]\n" - "addvl %[inptr2], %[inptr2], #1\n" - "ld1w z4.s, p0/z, [%[inptr4]]\n" - "addvl %[inptr3], %[inptr3], #1\n" "zip1 z8.d, z0.d, z4.d\n" - "ld1w z5.s, p0/z, [%[inptr5]]\n" + "whilelt p0.s, %[outpos], %[outwidth]\n" "zip2 z9.d, z0.d, z4.d\n" - "ld1w z6.s, p0/z, [%[inptr6]]\n" + "incw %[outpos], all, mul #1\n" "zip1 z10.d, z1.d, z5.d\n" - "ld1w z7.s, p0/z, [%[inptr7]]\n" "zip2 z11.d, z1.d, z5.d\n" - "addvl %[inptr4], %[inptr4], #1\n" "zip1 z12.d, z2.d, z6.d\n" - "addvl %[inptr5], %[inptr5], #1\n" + "whilelt p1.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z6.d\n" - "addvl %[inptr6], %[inptr6], #1\n" + "incw %[outpos], all, mul #1\n" "zip1 z14.d, z3.d, z7.d\n" - "addvl %[inptr7], %[inptr7], #1\n" "zip2 z15.d, z3.d, z7.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" "zip1 z0.d, z8.d, z12.d\n" - "incd %[outpos], all, mul #1\n" + "whilelt p2.s, %[outpos], %[outwidth]\n" "zip2 z1.d, z8.d, z12.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" "zip1 z2.d, z9.d, z13.d\n" - "incd %[outpos], all, mul #1\n" "zip2 z3.d, z9.d, z13.d\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" "zip1 z4.d, z10.d, z14.d\n" - "incd %[outpos], all, mul #1\n" + "whilelt p3.s, %[outpos], %[outwidth]\n" "zip2 z5.d, z10.d, z14.d\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" "zip1 z6.d, z11.d, z15.d\n" - "incd %[outpos], all, mul #1\n" "zip2 z7.d, z11.d, z15.d\n" "zip1 z8.d, z0.d, z4.d\n" - "st1d z8.d, p0, [%[outptr]]\n" + "whilelt p4.s, %[outpos], %[outwidth]\n" "zip2 z9.d, z0.d, z4.d\n" - "st1d z9.d, p1, [%[outptr], #1, MUL VL]\n" + "incw %[outpos], all, mul #1\n" "zip1 z10.d, z1.d, z5.d\n" - "st1d z10.d, p2, [%[outptr], #2, MUL VL]\n" + "st1w z8.s, p0, [%[outptr]]\n" "zip2 z11.d, z1.d, z5.d\n" - "st1d z11.d, p3, [%[outptr], #3, MUL VL]\n" "zip1 z12.d, z2.d, z6.d\n" - "whilelt p0.d, %[outpos], %[outwidth]\n" + "whilelt p5.s, %[outpos], %[outwidth]\n" "zip2 z13.d, z2.d, z6.d\n" - "st1d z12.d, p0, [%[outptr], #4, MUL VL]\n" + "st1w z9.s, p1, [%[outptr], #1, MUL VL]\n" "zip1 z14.d, z3.d, z7.d\n" - "incd %[outpos], all, mul #1\n" + "incw %[outpos], all, mul #1\n" "zip2 z15.d, z3.d, z7.d\n" - "whilelt p1.d, %[outpos], %[outwidth]\n" - "st1d z13.d, p1, [%[outptr], #5, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p2.d, %[outpos], %[outwidth]\n" - "st1d z14.d, p2, [%[outptr], #6, MUL VL]\n" - "incd %[outpos], all, mul #1\n" - "whilelt p3.d, %[outpos], %[outwidth]\n" - "st1d z15.d, p3, [%[outptr], #7, MUL VL]\n" - "incd %[outpos], all, mul #1\n" + "st1w z10.s, p2, [%[outptr], #2, MUL VL]\n" + "whilelt p6.s, %[outpos], %[outwidth]\n" + "st1w z11.s, p3, [%[outptr], #3, MUL VL]\n" + "incw %[outpos], all, mul #1\n" + "st1w z12.s, p4, [%[outptr], #4, MUL VL]\n" + "whilelt p7.s, %[outpos], %[outwidth]\n" + "incw %[outpos], all, mul #1\n" + "st1w z13.s, p5, [%[outptr], #5, MUL VL]\n" + "st1w z14.s, p6, [%[outptr], #6, MUL VL]\n" + "st1w z15.s, p7, [%[outptr], #7, MUL VL]\n" "addvl %[outptr], %[outptr], #8\n" "b 1b\n" "2:\n" : [inpos] "+r" (inpos), [outpos] "+r" (outpos), [outptr] "+r" (outptr), [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7) : [outwidth] "r" (outwidth), [inwidth] "r" (inwidth) - : "p0", "p1", "p2", "p3", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" + : "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "cc", "memory" ); break; - - + + } } } diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp index a96a43cbeb..ed0d58aa91 100644 --- a/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp +++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_interleave_8way_block4_8bit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 - 2019 Arm Limited. + * Copyright (c) 2018-2020 Arm Limited. * * SPDX-License-Identifier: MIT * -- cgit v1.2.1