diff options
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/indirect-interleaves')
25 files changed, 6620 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a32_interleave6_block1_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a32_interleave6_block1_fp32_fp32.hpp new file mode 100644 index 0000000000..807511f0d2 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a32_interleave6_block1_fp32_fp32.hpp @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2017-2018 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef __arm__ + +#include <arm_neon.h> + +#include "../asmlib.hpp" + +template<> +void interleave_block<6, 1, VLType::None, false>( + float * &outptr, const float * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + const float *inptr0 = in[0] + row_offset; + const float *inptr1 = in[1] + row_offset; + const float *inptr2 = in[2] + row_offset; + const float *inptr3 = in[3] + row_offset; + const float *inptr4 = in[4] + row_offset; + const float *inptr5 = in[5] + row_offset; + + // Cope with ragged cases by aliasing the first row (which is always valid). + // The nonsense output produced will be suppressed later anyway. + switch (height) { + case 1: + inptr1 = inptr0; + // fall through + case 2: + inptr2 = inptr0; + // fall through + case 3: + inptr3 = inptr0; + // fall through + case 4: + inptr4 = inptr0; + // fall through + case 5: + inptr5 = inptr0; + // fall through + default: + case 6: + break; + } + + //prefetch_2x(inptr0); + //prefetch_2x(inptr1); + //prefetch_2x(inptr2); + //prefetch_2x(inptr3); + //prefetch_2x(inptr4); + //prefetch_2x(inptr5); + + for (;width>7;width-=8) { + __asm __volatile ( + // Load up 8 elements (2 vectors) from each of 8 sources. + "VLD1.32 {d0-d3}, [%[inptr0]]!\n" // q0=A0A1A2A3 + "VLD1.32 {d4-d7}, [%[inptr1]]!\n" // q2=B0B1B2B3 + "VLD1.32 {d8-d11}, [%[inptr2]]!\n" // q4=C0C1C2C3 + "VZIP.32 q0, q4\n" // q0=A0C0A1C1, q4 = A2C2A3C3 + "VLD1.32 {d12-d15}, [%[inptr3]]!\n" // q6=D0D1D2D3 + "VZIP.32 q2, q6\n" // q2=B0D0B1D1, q6 = B2D2B3D3 + "VLD1.32 {d16-d19}, [%[inptr4]]!\n" + "VLD1.32 {d20-d23}, [%[inptr5]]!\n" + "VZIP.32 q8, q10\n" // q8=E0F0E1F1, q10 = E2F2E3F3 + ASM_PREFETCH("[%[inptr0], #128]") + "VZIP.32 q0, q2\n" // q0 = A0B0C0D0, q2 = A1B1C1D1 + + // Store first elements + "VST1.32 {d0-d1}, [%[outptr]]!\n" + "VST1.32 {d16}, [%[outptr]]!\n" + + "VZIP.32 q4, q6\n" // q4 = A2B2C2D2, q6 = A3B3C3D3 + + // Store second elements + "VST1.32 {d4-d5}, [%[outptr]]!\n" + "VZIP.32 q1, q5\n" + ASM_PREFETCH("[%[inptr1], #128]") + "VST1.32 {d17}, [%[outptr]]!\n" + "VZIP.32 q3, q7\n" + + // Store third elements + "VZIP.32 q9, q11\n" + "VST1.32 {d8-d9}, [%[outptr]]!\n" + "VZIP.32 q1, q3\n" + ASM_PREFETCH("[%[inptr2], #128]") + "VST1.32 {d20}, [%[outptr]]!\n" + + // Store fourth elements + "VZIP.32 q5, q7\n" + "VST1.32 {d12-d13}, [%[outptr]]!\n" + ASM_PREFETCH("[%[inptr3], #128]") + "VST1.32 {d21}, [%[outptr]]!\n" + + // Fifth + "VST1.32 {d2-d3}, [%[outptr]]!\n" + ASM_PREFETCH("[%[inptr4], #128]") + "VST1.32 {d18}, [%[outptr]]!\n" + + // Sixth + "VST1.32 {d6-d7}, [%[outptr]]!\n" + ASM_PREFETCH("[%[inptr5], #128]") + "VST1.32 {d19}, [%[outptr]]!\n" + + // Seventh + "VST1.32 {d10-d11}, [%[outptr]]!\n" + "VST1.32 {d22}, [%[outptr]]!\n" + + // Eighth + "VST1.32 {d14-d15}, [%[outptr]]!\n" + "VST1.32 {d23}, [%[outptr]]!\n" + + : [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), + [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [outptr] "+r" (outptr) + : + : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "memory" + ); + } + + for (;width>0;width--) { + *outptr++ = *inptr0++; + *outptr++ = *inptr1++; + *outptr++ = *inptr2++; + *outptr++ = *inptr3++; + *outptr++ = *inptr4++; + *outptr++ = *inptr5++; + } +} + +#endif // __arm__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp new file mode 100644 index 0000000000..8054c2b96b --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<4, 16, VLType::None, false>( + int8_t * &out_ptr, const int8_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x22, [%x[in], #0x0]\n" + "cmp %x[height], #0x4\n" + "ldr x21, [%x[in], #0x8]\n" + "add x22, x22, %x[row_offset]\n" + "ldr x20, [%x[in], #0x10]\n" + "ldr x19, [%x[in], #0x18]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "add x19, x19, %x[row_offset]\n" + "beq 1f\n" + "mov x19, x22\n" + "cmp %x[height], #0x2\n" + "csel x21, x21, x22, GE\n" + "csel x20, x20, x22, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x22, #0x0]\n" + "cmp %x[width], #0x10\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x19, #0x0]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "prfm pldl1keep, [x19, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr q19, [x22], #0x10\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q18, [x21], #0x10\n" + "ldr q17, [x20], #0x10\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ldr q16, [x19], #0x10\n" + "prfm pldl1keep, [x20, #0x70]\n" + "str q19, [%x[out_ptr], #0x0]\n" + "str q18, [%x[out_ptr], #0x10]\n" + "prfm pldl1keep, [x19, #0x70]\n" + "str q17, [%x[out_ptr], #0x20]\n" + "str q16, [%x[out_ptr], #0x30]\n" + "subs %x[width], %x[width], #0x10\n" + "cmp %x[width], #0x10\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 12f\n" + "tbz %x[width], #3, 7f\n" + "ldr d19, [x22], #0x8\n" + "ldr d18, [x21], #0x8\n" + "ldr d17, [x20], #0x8\n" + "ldr d16, [x19], #0x8\n" + "tbz %x[width], #2, 5f\n" + "ld1 { v19.s }[2], [x22], #0x4\n" + "ld1 { v18.s }[2], [x21], #0x4\n" + "ld1 { v17.s }[2], [x20], #0x4\n" + "ld1 { v16.s }[2], [x19], #0x4\n" + "tbz %x[width], #1, 4f\n" + "ld1 { v19.h }[6], [x22], #0x2\n" + "ld1 { v18.h }[6], [x21], #0x2\n" + "ld1 { v17.h }[6], [x20], #0x2\n" + "ld1 { v16.h }[6], [x19], #0x2\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v19.b }[14], [x22]\n" + "ld1 { v18.b }[14], [x21]\n" + "ld1 { v17.b }[14], [x20]\n" + "ld1 { v16.b }[14], [x19]\n" + "b 11f\n" + "4:" // odd_loads_1_12 + "tbz %x[width], #0, 11f\n" + "ld1 { v19.b }[12], [x22]\n" + "ld1 { v18.b }[12], [x21]\n" + "ld1 { v17.b }[12], [x20]\n" + "ld1 { v16.b }[12], [x19]\n" + "b 11f\n" + "5:" // odd_loads_2_8 + "tbz %x[width], #1, 6f\n" + "ld1 { v19.h }[4], [x22], #0x2\n" + "ld1 { v18.h }[4], [x21], #0x2\n" + "ld1 { v17.h }[4], [x20], #0x2\n" + "ld1 { v16.h }[4], [x19], #0x2\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v19.b }[10], [x22]\n" + "ld1 { v18.b }[10], [x21]\n" + "ld1 { v17.b }[10], [x20]\n" + "ld1 { v16.b }[10], [x19]\n" + "b 11f\n" + "6:" // odd_loads_1_8 + "tbz %x[width], #0, 11f\n" + "ld1 { v19.b }[8], [x22]\n" + "ld1 { v18.b }[8], [x21]\n" + "ld1 { v17.b }[8], [x20]\n" + "ld1 { v16.b }[8], [x19]\n" + "b 11f\n" + "7:" // odd_loads_4_0 + "tbz %x[width], #2, 9f\n" + "ldr s19, [x22], #0x4\n" + "ldr s18, [x21], #0x4\n" + "ldr s17, [x20], #0x4\n" + "ldr s16, [x19], #0x4\n" + "tbz %x[width], #1, 8f\n" + "ld1 { v19.h }[2], [x22], #0x2\n" + "ld1 { v18.h }[2], [x21], #0x2\n" + "ld1 { v17.h }[2], [x20], #0x2\n" + "ld1 { v16.h }[2], [x19], #0x2\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v19.b }[6], [x22]\n" + "ld1 { v18.b }[6], [x21]\n" + "ld1 { v17.b }[6], [x20]\n" + "ld1 { v16.b }[6], [x19]\n" + "b 11f\n" + "8:" // odd_loads_1_4 + "tbz %x[width], #0, 11f\n" + "ld1 { v19.b }[4], [x22]\n" + "ld1 { v18.b }[4], [x21]\n" + "ld1 { v17.b }[4], [x20]\n" + "ld1 { v16.b }[4], [x19]\n" + "b 11f\n" + "9:" // odd_loads_2_0 + "tbz %x[width], #1, 10f\n" + "ldr h19, [x22], #0x2\n" + "ldr h18, [x21], #0x2\n" + "ldr h17, [x20], #0x2\n" + "ldr h16, [x19], #0x2\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v19.b }[2], [x22]\n" + "ld1 { v18.b }[2], [x21]\n" + "ld1 { v17.b }[2], [x20]\n" + "ld1 { v16.b }[2], [x19]\n" + "b 11f\n" + "10:" // odd_loads_1_0 + "ldr b19, [x22, #0x0]\n" + "ldr b18, [x21, #0x0]\n" + "ldr b17, [x20, #0x0]\n" + "ldr b16, [x19, #0x0]\n" + "11:" // Odd load end + "str q19, [%x[out_ptr], #0x0]\n" + "str q18, [%x[out_ptr], #0x10]\n" + "str q17, [%x[out_ptr], #0x20]\n" + "str q16, [%x[out_ptr], #0x30]\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "12:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "x19", "x20", "x21", "x22" + ); +} + +template<> +void interleave_block<4, 16, VLType::None, false>( + uint8_t * &out_ptr, const uint8_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + int8_t * &out_cast = reinterpret_cast<int8_t * &>(out_ptr); + const int8_t * const * in_cast = reinterpret_cast<const int8_t * const *>(in); + + interleave_block<4, 16, VLType::None, false>(out_cast, in_cast, width, height, row_offset, false); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8_summing.hpp new file mode 100644 index 0000000000..1650916f9f --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8_summing.hpp @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<4, 16, VLType::None, true>( + int8_t * &out_ptr, const int8_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v28.8h, #0x0\n" + "ldr x23, [%x[in], #0x0]\n" + "mov x22, #0x0\n" + "movi v27.8h, #0x0\n" + "ldr x21, [%x[in], #0x8]\n" + "cmp %x[height], #0x4\n" + "movi v26.8h, #0x0\n" + "ldr x20, [%x[in], #0x10]\n" + "add x23, x23, %x[row_offset]\n" + "movi v25.8h, #0x0\n" + "ldr x19, [%x[in], #0x18]\n" + "movi v24.4s, #0x0\n" + "add x21, x21, %x[row_offset]\n" + "movi v23.4s, #0x0\n" + "add x20, x20, %x[row_offset]\n" + "movi v22.4s, #0x0\n" + "add x19, x19, %x[row_offset]\n" + "movi v21.4s, #0x0\n" + "beq 1f\n" + "mov x19, x23\n" + "cmp %x[height], #0x2\n" + "csel x21, x21, x23, GE\n" + "csel x20, x20, x23, GT\n" + "1:" // no_pointer_adj + "movi v20.4s, #0x0\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x19, #0x0]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "prfm pldl1keep, [x19, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x10\n" + "ld1 { v20.4s }, [%x[out_ptr]]\n" + "2:" // first_pass + "cmp %x[width], #0x10\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x22, #0x7e\n" + "ble 4f\n" + "sadalp v24.4s, v28.8h\n" + "movi v28.8h, #0x0\n" + "sadalp v23.4s, v27.8h\n" + "movi v27.8h, #0x0\n" + "sadalp v22.4s, v26.8h\n" + "movi v26.8h, #0x0\n" + "sadalp v21.4s, v25.8h\n" + "movi v25.8h, #0x0\n" + "mov x22, #0x0\n" + "4:" // no_accumulate_16 + "ldr q19, [x23], #0x10\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q18, [x21], #0x10\n" + "ldr q17, [x20], #0x10\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ldr q16, [x19], #0x10\n" + "prfm pldl1keep, [x20, #0x70]\n" + "str q19, [%x[out_ptr], #0x0]\n" + "sadalp v28.8h, v19.16b\n" + "prfm pldl1keep, [x19, #0x70]\n" + "str q18, [%x[out_ptr], #0x10]\n" + "sadalp v27.8h, v18.16b\n" + "str q17, [%x[out_ptr], #0x20]\n" + "sadalp v26.8h, v17.16b\n" + "str q16, [%x[out_ptr], #0x30]\n" + "sadalp v25.8h, v16.16b\n" + "add x22, x22, #0x1\n" + "subs %x[width], %x[width], #0x10\n" + "cmp %x[width], #0x10\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 14f\n" + "tbz %x[width], #3, 9f\n" + "ldr d19, [x23], #0x8\n" + "ldr d18, [x21], #0x8\n" + "ldr d17, [x20], #0x8\n" + "ldr d16, [x19], #0x8\n" + "tbz %x[width], #2, 7f\n" + "ld1 { v19.s }[2], [x23], #0x4\n" + "ld1 { v18.s }[2], [x21], #0x4\n" + "ld1 { v17.s }[2], [x20], #0x4\n" + "ld1 { v16.s }[2], [x19], #0x4\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v19.h }[6], [x23], #0x2\n" + "ld1 { v18.h }[6], [x21], #0x2\n" + "ld1 { v17.h }[6], [x20], #0x2\n" + "ld1 { v16.h }[6], [x19], #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[14], [x23]\n" + "ld1 { v18.b }[14], [x21]\n" + "ld1 { v17.b }[14], [x20]\n" + "ld1 { v16.b }[14], [x19]\n" + "b 13f\n" + "6:" // odd_loads_1_12 + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[12], [x23]\n" + "ld1 { v18.b }[12], [x21]\n" + "ld1 { v17.b }[12], [x20]\n" + "ld1 { v16.b }[12], [x19]\n" + "b 13f\n" + "7:" // odd_loads_2_8 + "tbz %x[width], #1, 8f\n" + "ld1 { v19.h }[4], [x23], #0x2\n" + "ld1 { v18.h }[4], [x21], #0x2\n" + "ld1 { v17.h }[4], [x20], #0x2\n" + "ld1 { v16.h }[4], [x19], #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[10], [x23]\n" + "ld1 { v18.b }[10], [x21]\n" + "ld1 { v17.b }[10], [x20]\n" + "ld1 { v16.b }[10], [x19]\n" + "b 13f\n" + "8:" // odd_loads_1_8 + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[8], [x23]\n" + "ld1 { v18.b }[8], [x21]\n" + "ld1 { v17.b }[8], [x20]\n" + "ld1 { v16.b }[8], [x19]\n" + "b 13f\n" + "9:" // odd_loads_4_0 + "tbz %x[width], #2, 11f\n" + "ldr s19, [x23], #0x4\n" + "ldr s18, [x21], #0x4\n" + "ldr s17, [x20], #0x4\n" + "ldr s16, [x19], #0x4\n" + "tbz %x[width], #1, 10f\n" + "ld1 { v19.h }[2], [x23], #0x2\n" + "ld1 { v18.h }[2], [x21], #0x2\n" + "ld1 { v17.h }[2], [x20], #0x2\n" + "ld1 { v16.h }[2], [x19], #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[6], [x23]\n" + "ld1 { v18.b }[6], [x21]\n" + "ld1 { v17.b }[6], [x20]\n" + "ld1 { v16.b }[6], [x19]\n" + "b 13f\n" + "10:" // odd_loads_1_4 + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[4], [x23]\n" + "ld1 { v18.b }[4], [x21]\n" + "ld1 { v17.b }[4], [x20]\n" + "ld1 { v16.b }[4], [x19]\n" + "b 13f\n" + "11:" // odd_loads_2_0 + "tbz %x[width], #1, 12f\n" + "ldr h19, [x23], #0x2\n" + "ldr h18, [x21], #0x2\n" + "ldr h17, [x20], #0x2\n" + "ldr h16, [x19], #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[2], [x23]\n" + "ld1 { v18.b }[2], [x21]\n" + "ld1 { v17.b }[2], [x20]\n" + "ld1 { v16.b }[2], [x19]\n" + "b 13f\n" + "12:" // odd_loads_1_0 + "ldr b19, [x23, #0x0]\n" + "ldr b18, [x21, #0x0]\n" + "ldr b17, [x20, #0x0]\n" + "ldr b16, [x19, #0x0]\n" + "13:" // Odd load end + "str q19, [%x[out_ptr], #0x0]\n" + "sadalp v28.8h, v19.16b\n" + "str q18, [%x[out_ptr], #0x10]\n" + "sadalp v27.8h, v18.16b\n" + "str q17, [%x[out_ptr], #0x20]\n" + "sadalp v26.8h, v17.16b\n" + "str q16, [%x[out_ptr], #0x30]\n" + "sadalp v25.8h, v16.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "14:" // Odds skip + "sadalp v24.4s, v28.8h\n" + "sadalp v23.4s, v27.8h\n" + "addp v24.4s, v24.4s, v23.4s\n" + "sadalp v22.4s, v26.8h\n" + "sadalp v21.4s, v25.8h\n" + "addp v23.4s, v22.4s, v21.4s\n" + "addp v24.4s, v24.4s, v23.4s\n" + "add v24.4s, v24.4s, v20.4s\n" + "str q24, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x19", "x20", "x21", "x22", "x23" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_u8_u8_summing.hpp new file mode 100644 index 0000000000..af3efb25b2 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_u8_u8_summing.hpp @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<4, 16, VLType::None, true>( + uint8_t * &out_ptr, const uint8_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v28.8h, #0x0\n" + "ldr x23, [%x[in], #0x0]\n" + "mov x22, #0x0\n" + "movi v27.8h, #0x0\n" + "ldr x21, [%x[in], #0x8]\n" + "cmp %x[height], #0x4\n" + "movi v26.8h, #0x0\n" + "ldr x20, [%x[in], #0x10]\n" + "add x23, x23, %x[row_offset]\n" + "movi v25.8h, #0x0\n" + "ldr x19, [%x[in], #0x18]\n" + "movi v24.4s, #0x0\n" + "add x21, x21, %x[row_offset]\n" + "movi v23.4s, #0x0\n" + "add x20, x20, %x[row_offset]\n" + "movi v22.4s, #0x0\n" + "add x19, x19, %x[row_offset]\n" + "movi v21.4s, #0x0\n" + "beq 1f\n" + "mov x19, x23\n" + "cmp %x[height], #0x2\n" + "csel x21, x21, x23, GE\n" + "csel x20, x20, x23, GT\n" + "1:" // no_pointer_adj + "movi v20.4s, #0x0\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x19, #0x0]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "prfm pldl1keep, [x19, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x10\n" + "ld1 { v20.4s }, [%x[out_ptr]]\n" + "2:" // first_pass + "cmp %x[width], #0x10\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x22, #0x7e\n" + "ble 4f\n" + "uadalp v24.4s, v28.8h\n" + "movi v28.8h, #0x0\n" + "uadalp v23.4s, v27.8h\n" + "movi v27.8h, #0x0\n" + "uadalp v22.4s, v26.8h\n" + "movi v26.8h, #0x0\n" + "uadalp v21.4s, v25.8h\n" + "movi v25.8h, #0x0\n" + "mov x22, #0x0\n" + "4:" // no_accumulate_16 + "ldr q19, [x23], #0x10\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q18, [x21], #0x10\n" + "ldr q17, [x20], #0x10\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ldr q16, [x19], #0x10\n" + "prfm pldl1keep, [x20, #0x70]\n" + "str q19, [%x[out_ptr], #0x0]\n" + "uadalp v28.8h, v19.16b\n" + "prfm pldl1keep, [x19, #0x70]\n" + "str q18, [%x[out_ptr], #0x10]\n" + "uadalp v27.8h, v18.16b\n" + "str q17, [%x[out_ptr], #0x20]\n" + "uadalp v26.8h, v17.16b\n" + "str q16, [%x[out_ptr], #0x30]\n" + "uadalp v25.8h, v16.16b\n" + "add x22, x22, #0x1\n" + "subs %x[width], %x[width], #0x10\n" + "cmp %x[width], #0x10\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 14f\n" + "tbz %x[width], #3, 9f\n" + "ldr d19, [x23], #0x8\n" + "ldr d18, [x21], #0x8\n" + "ldr d17, [x20], #0x8\n" + "ldr d16, [x19], #0x8\n" + "tbz %x[width], #2, 7f\n" + "ld1 { v19.s }[2], [x23], #0x4\n" + "ld1 { v18.s }[2], [x21], #0x4\n" + "ld1 { v17.s }[2], [x20], #0x4\n" + "ld1 { v16.s }[2], [x19], #0x4\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v19.h }[6], [x23], #0x2\n" + "ld1 { v18.h }[6], [x21], #0x2\n" + "ld1 { v17.h }[6], [x20], #0x2\n" + "ld1 { v16.h }[6], [x19], #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[14], [x23]\n" + "ld1 { v18.b }[14], [x21]\n" + "ld1 { v17.b }[14], [x20]\n" + "ld1 { v16.b }[14], [x19]\n" + "b 13f\n" + "6:" // odd_loads_1_12 + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[12], [x23]\n" + "ld1 { v18.b }[12], [x21]\n" + "ld1 { v17.b }[12], [x20]\n" + "ld1 { v16.b }[12], [x19]\n" + "b 13f\n" + "7:" // odd_loads_2_8 + "tbz %x[width], #1, 8f\n" + "ld1 { v19.h }[4], [x23], #0x2\n" + "ld1 { v18.h }[4], [x21], #0x2\n" + "ld1 { v17.h }[4], [x20], #0x2\n" + "ld1 { v16.h }[4], [x19], #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[10], [x23]\n" + "ld1 { v18.b }[10], [x21]\n" + "ld1 { v17.b }[10], [x20]\n" + "ld1 { v16.b }[10], [x19]\n" + "b 13f\n" + "8:" // odd_loads_1_8 + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[8], [x23]\n" + "ld1 { v18.b }[8], [x21]\n" + "ld1 { v17.b }[8], [x20]\n" + "ld1 { v16.b }[8], [x19]\n" + "b 13f\n" + "9:" // odd_loads_4_0 + "tbz %x[width], #2, 11f\n" + "ldr s19, [x23], #0x4\n" + "ldr s18, [x21], #0x4\n" + "ldr s17, [x20], #0x4\n" + "ldr s16, [x19], #0x4\n" + "tbz %x[width], #1, 10f\n" + "ld1 { v19.h }[2], [x23], #0x2\n" + "ld1 { v18.h }[2], [x21], #0x2\n" + "ld1 { v17.h }[2], [x20], #0x2\n" + "ld1 { v16.h }[2], [x19], #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[6], [x23]\n" + "ld1 { v18.b }[6], [x21]\n" + "ld1 { v17.b }[6], [x20]\n" + "ld1 { v16.b }[6], [x19]\n" + "b 13f\n" + "10:" // odd_loads_1_4 + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[4], [x23]\n" + "ld1 { v18.b }[4], [x21]\n" + "ld1 { v17.b }[4], [x20]\n" + "ld1 { v16.b }[4], [x19]\n" + "b 13f\n" + "11:" // odd_loads_2_0 + "tbz %x[width], #1, 12f\n" + "ldr h19, [x23], #0x2\n" + "ldr h18, [x21], #0x2\n" + "ldr h17, [x20], #0x2\n" + "ldr h16, [x19], #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v19.b }[2], [x23]\n" + "ld1 { v18.b }[2], [x21]\n" + "ld1 { v17.b }[2], [x20]\n" + "ld1 { v16.b }[2], [x19]\n" + "b 13f\n" + "12:" // odd_loads_1_0 + "ldr b19, [x23, #0x0]\n" + "ldr b18, [x21, #0x0]\n" + "ldr b17, [x20, #0x0]\n" + "ldr b16, [x19, #0x0]\n" + "13:" // Odd load end + "str q19, [%x[out_ptr], #0x0]\n" + "uadalp v28.8h, v19.16b\n" + "str q18, [%x[out_ptr], #0x10]\n" + "uadalp v27.8h, v18.16b\n" + "str q17, [%x[out_ptr], #0x20]\n" + "uadalp v26.8h, v17.16b\n" + "str q16, [%x[out_ptr], #0x30]\n" + "uadalp v25.8h, v16.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "14:" // Odds skip + "uadalp v24.4s, v28.8h\n" + "uadalp v23.4s, v27.8h\n" + "addp v24.4s, v24.4s, v23.4s\n" + "uadalp v22.4s, v26.8h\n" + "uadalp v21.4s, v25.8h\n" + "addp v23.4s, v22.4s, v21.4s\n" + "addp v24.4s, v24.4s, v23.4s\n" + "add v24.4s, v24.4s, v20.4s\n" + "str q24, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x19", "x20", "x21", "x22", "x23" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_bf16_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_bf16_fp32.hpp new file mode 100644 index 0000000000..34d25f27b8 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_bf16_fp32.hpp @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, false>( + float * &out_ptr, const bfloat16 * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "movi v29.8h, #0x0\n" + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset], LSL #1\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset], LSL #1\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset], LSL #1\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #1\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #1\n" + "add x22, x22, %x[row_offset], LSL #1\n" + "add x21, x21, %x[row_offset], LSL #1\n" + "add x20, x20, %x[row_offset], LSL #1\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x4\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr d28, [x27], #0x8\n" + "zip1 v28.8h, v29.8h, v28.8h\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr d27, [x26], #0x8\n" + "zip1 v27.8h, v29.8h, v27.8h\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr d26, [x25], #0x8\n" + "zip1 v26.8h, v29.8h, v26.8h\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr d25, [x24], #0x8\n" + "zip1 v20.4s, v28.4s, v26.4s\n" + "prfm pldl1keep, [x24, #0x70]\n" + "zip1 v25.8h, v29.8h, v25.8h\n" + "ldr d24, [x23], #0x8\n" + "zip1 v19.4s, v27.4s, v25.4s\n" + "prfm pldl1keep, [x23, #0x70]\n" + "zip1 v24.8h, v29.8h, v24.8h\n" + "ldr d23, [x22], #0x8\n" + "zip1 v16.4s, v20.4s, v19.4s\n" + "prfm pldl1keep, [x22, #0x70]\n" + "zip1 v23.8h, v29.8h, v23.8h\n" + "ldr d22, [x21], #0x8\n" + "zip2 v19.4s, v20.4s, v19.4s\n" + "prfm pldl1keep, [x21, #0x70]\n" + "zip1 v22.8h, v29.8h, v22.8h\n" + "ldr d21, [x20], #0x8\n" + "zip1 v18.4s, v24.4s, v22.4s\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v21.8h, v29.8h, v21.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v17.4s, v23.4s, v21.4s\n" + "subs %x[width], %x[width], #0x4\n" + "zip2 v20.4s, v28.4s, v26.4s\n" + "cmp %x[width], #0x4\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v16.4s, v18.4s, v17.4s\n" + "str q19, [%x[out_ptr], #0x20]\n" + "zip2 v19.4s, v27.4s, v25.4s\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip1 v16.4s, v20.4s, v19.4s\n" + "str q16, [%x[out_ptr], #0x40]\n" + "zip2 v18.4s, v24.4s, v22.4s\n" + "zip2 v17.4s, v23.4s, v21.4s\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v16.4s, v20.4s, v19.4s\n" + "str q16, [%x[out_ptr], #0x60]\n" + "zip2 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 6f\n" + "tbz %x[width], #1, 4f\n" + "ldr s28, [x27], #0x4\n" + "ldr s27, [x26], #0x4\n" + "ldr s26, [x25], #0x4\n" + "ldr s25, [x24], #0x4\n" + "ldr s24, [x23], #0x4\n" + "ldr s23, [x22], #0x4\n" + "ldr s22, [x21], #0x4\n" + "ldr s21, [x20], #0x4\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 5f\n" + "ld1 { v28.h }[2], [x27]\n" + "ld1 { v27.h }[2], [x26]\n" + "ld1 { v26.h }[2], [x25]\n" + "ld1 { v25.h }[2], [x24]\n" + "ld1 { v24.h }[2], [x23]\n" + "ld1 { v23.h }[2], [x22]\n" + "ld1 { v22.h }[2], [x21]\n" + "ld1 { v21.h }[2], [x20]\n" + "mov x19, #0x3\n" + "b 5f\n" + "4:" // odd_loads_1_0 + "ldr h28, [x27, #0x0]\n" + "ldr h27, [x26, #0x0]\n" + "ldr h26, [x25, #0x0]\n" + "ldr h25, [x24, #0x0]\n" + "ldr h24, [x23, #0x0]\n" + "ldr h23, [x22, #0x0]\n" + "ldr h22, [x21, #0x0]\n" + "ldr h21, [x20, #0x0]\n" + "mov x19, #0x1\n" + "5:" // Odd load end + "zip1 v28.8h, v29.8h, v28.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v27.8h, v29.8h, v27.8h\n" + "zip1 v26.8h, v29.8h, v26.8h\n" + "zip1 v25.8h, v29.8h, v25.8h\n" + "zip1 v24.8h, v29.8h, v24.8h\n" + "zip1 v23.8h, v29.8h, v23.8h\n" + "zip1 v22.8h, v29.8h, v22.8h\n" + "zip1 v21.8h, v29.8h, v21.8h\n" + "zip1 v20.4s, v28.4s, v26.4s\n" + "zip1 v19.4s, v27.4s, v25.4s\n" + "zip1 v16.4s, v20.4s, v19.4s\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v18.4s, v24.4s, v22.4s\n" + "zip1 v17.4s, v23.4s, v21.4s\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 6f\n" + "zip2 v19.4s, v20.4s, v19.4s\n" + "zip2 v16.4s, v18.4s, v17.4s\n" + "str q19, [%x[out_ptr], #0x0]\n" + "str q16, [%x[out_ptr], #0x10]\n" + "subs x19, x19, #0x1\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 6f\n" + "zip2 v20.4s, v28.4s, v26.4s\n" + "zip2 v19.4s, v27.4s, v25.4s\n" + "zip1 v16.4s, v20.4s, v19.4s\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip2 v18.4s, v24.4s, v22.4s\n" + "zip2 v17.4s, v23.4s, v21.4s\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "6:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp new file mode 100644 index 0000000000..d547957129 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, false>( + __fp16 * &out_ptr, const __fp16 * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset], LSL #1\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset], LSL #1\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset], LSL #1\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #1\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #1\n" + "add x22, x22, %x[row_offset], LSL #1\n" + "add x21, x21, %x[row_offset], LSL #1\n" + "add x20, x20, %x[row_offset], LSL #1\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x8\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr q30, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q29, [x26], #0x10\n" + "ldr q28, [x25], #0x10\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q27, [x24], #0x10\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q24, [x23], #0x10\n" + "zip1 v26.8h, v30.8h, v24.8h\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q25, [x22], #0x10\n" + "zip2 v24.8h, v30.8h, v24.8h\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q23, [x21], #0x10\n" + "zip1 v21.8h, v29.8h, v25.8h\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q22, [x20], #0x10\n" + "zip1 v18.8h, v28.8h, v23.8h\n" + "prfm pldl1keep, [x21, #0x70]\n" + "subs %x[width], %x[width], #0x8\n" + "zip1 v20.8h, v26.8h, v18.8h\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v19.8h, v27.8h, v22.8h\n" + "cmp %x[width], #0x8\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip2 v18.8h, v26.8h, v18.8h\n" + "zip1 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip2 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x20]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip2 v21.8h, v28.8h, v23.8h\n" + "zip1 v18.8h, v24.8h, v21.8h\n" + "zip2 v20.8h, v29.8h, v25.8h\n" + "zip2 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x40]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v18.8h, v24.8h, v21.8h\n" + "zip2 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x60]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 8f\n" + "tbz %x[width], #2, 5f\n" + "ldr d30, [x27], #0x8\n" + "ldr d29, [x26], #0x8\n" + "ldr d28, [x25], #0x8\n" + "ldr d27, [x24], #0x8\n" + "ldr d24, [x23], #0x8\n" + "ldr d25, [x22], #0x8\n" + "ldr d23, [x21], #0x8\n" + "ldr d22, [x20], #0x8\n" + "tbz %x[width], #1, 4f\n" + "ld1 { v30.s }[2], [x27], #0x4\n" + "ld1 { v29.s }[2], [x26], #0x4\n" + "ld1 { v28.s }[2], [x25], #0x4\n" + "ld1 { v27.s }[2], [x24], #0x4\n" + "ld1 { v24.s }[2], [x23], #0x4\n" + "ld1 { v25.s }[2], [x22], #0x4\n" + "ld1 { v23.s }[2], [x21], #0x4\n" + "ld1 { v22.s }[2], [x20], #0x4\n" + "mov x19, #0x6\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.h }[6], [x27]\n" + "ld1 { v29.h }[6], [x26]\n" + "ld1 { v28.h }[6], [x25]\n" + "ld1 { v27.h }[6], [x24]\n" + "ld1 { v24.h }[6], [x23]\n" + "ld1 { v25.h }[6], [x22]\n" + "ld1 { v23.h }[6], [x21]\n" + "ld1 { v22.h }[6], [x20]\n" + "mov x19, #0x7\n" + "b 7f\n" + "4:" // odd_loads_1_4 + "mov x19, #0x4\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.h }[4], [x27]\n" + "ld1 { v29.h }[4], [x26]\n" + "ld1 { v28.h }[4], [x25]\n" + "ld1 { v27.h }[4], [x24]\n" + "ld1 { v24.h }[4], [x23]\n" + "ld1 { v25.h }[4], [x22]\n" + "ld1 { v23.h }[4], [x21]\n" + "ld1 { v22.h }[4], [x20]\n" + "mov x19, #0x5\n" + "b 7f\n" + "5:" // odd_loads_2_0 + "tbz %x[width], #1, 6f\n" + "ldr s30, [x27], #0x4\n" + "ldr s29, [x26], #0x4\n" + "ldr s28, [x25], #0x4\n" + "ldr s27, [x24], #0x4\n" + "ldr s24, [x23], #0x4\n" + "ldr s25, [x22], #0x4\n" + "ldr s23, [x21], #0x4\n" + "ldr s22, [x20], #0x4\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.h }[2], [x27]\n" + "ld1 { v29.h }[2], [x26]\n" + "ld1 { v28.h }[2], [x25]\n" + "ld1 { v27.h }[2], [x24]\n" + "ld1 { v24.h }[2], [x23]\n" + "ld1 { v25.h }[2], [x22]\n" + "ld1 { v23.h }[2], [x21]\n" + "ld1 { v22.h }[2], [x20]\n" + "mov x19, #0x3\n" + "b 7f\n" + "6:" // odd_loads_1_0 + "ldr h30, [x27, #0x0]\n" + "ldr h29, [x26, #0x0]\n" + "ldr h28, [x25, #0x0]\n" + "ldr h27, [x24, #0x0]\n" + "ldr h24, [x23, #0x0]\n" + "ldr h25, [x22, #0x0]\n" + "ldr h23, [x21, #0x0]\n" + "ldr h22, [x20, #0x0]\n" + "mov x19, #0x1\n" + "7:" // Odd load end + "zip1 v26.8h, v30.8h, v24.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v28.8h, v23.8h\n" + "zip1 v20.8h, v26.8h, v18.8h\n" + "zip1 v21.8h, v29.8h, v25.8h\n" + "zip1 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v20.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v18.8h, v26.8h, v18.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v24.8h, v30.8h, v24.8h\n" + "zip2 v21.8h, v28.8h, v23.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v24.8h, v21.8h\n" + "zip2 v20.8h, v29.8h, v25.8h\n" + "zip2 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v18.8h, v24.8h, v21.8h\n" + "zip2 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "8:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp new file mode 100644 index 0000000000..b45e622a47 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, false>( + float * &out_ptr, const __fp16 * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset], LSL #1\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset], LSL #1\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset], LSL #1\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #1\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #1\n" + "add x22, x22, %x[row_offset], LSL #1\n" + "add x21, x21, %x[row_offset], LSL #1\n" + "add x20, x20, %x[row_offset], LSL #1\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x4\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr d29, [x27], #0x8\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr d28, [x26], #0x8\n" + "ldr d27, [x25], #0x8\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr d26, [x24], #0x8\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr d25, [x23], #0x8\n" + "ldr d24, [x22], #0x8\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr d23, [x21], #0x8\n" + "ldr d22, [x20], #0x8\n" + "prfm pldl1keep, [x23, #0x70]\n" + "prfm pldl1keep, [x22, #0x70]\n" + "fcvtl v29.4s, v29.4h\n" + "fcvtl v28.4s, v28.4h\n" + "prfm pldl1keep, [x21, #0x70]\n" + "fcvtl v27.4s, v27.4h\n" + "zip1 v20.4s, v29.4s, v27.4s\n" + "prfm pldl1keep, [x20, #0x70]\n" + "fcvtl v26.4s, v26.4h\n" + "zip2 v18.4s, v29.4s, v27.4s\n" + "fcvtl v25.4s, v25.4h\n" + "fcvtl v24.4s, v24.4h\n" + "zip1 v19.4s, v28.4s, v26.4s\n" + "fcvtl v23.4s, v23.4h\n" + "zip2 v17.4s, v28.4s, v26.4s\n" + "fcvtl v22.4s, v22.4h\n" + "zip1 v16.4s, v20.4s, v19.4s\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip2 v21.4s, v20.4s, v19.4s\n" + "subs %x[width], %x[width], #0x4\n" + "zip1 v20.4s, v18.4s, v17.4s\n" + "cmp %x[width], #0x4\n" + "zip2 v19.4s, v18.4s, v17.4s\n" + "zip1 v18.4s, v25.4s, v23.4s\n" + "zip1 v17.4s, v24.4s, v22.4s\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v16.4s, v18.4s, v17.4s\n" + "str q21, [%x[out_ptr], #0x20]\n" + "zip2 v18.4s, v25.4s, v23.4s\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip2 v17.4s, v24.4s, v22.4s\n" + "str q20, [%x[out_ptr], #0x40]\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v16.4s, v18.4s, v17.4s\n" + "str q19, [%x[out_ptr], #0x60]\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 6f\n" + "tbz %x[width], #1, 4f\n" + "ldr s29, [x27], #0x4\n" + "ldr s28, [x26], #0x4\n" + "ldr s27, [x25], #0x4\n" + "ldr s26, [x24], #0x4\n" + "ldr s25, [x23], #0x4\n" + "ldr s24, [x22], #0x4\n" + "ldr s23, [x21], #0x4\n" + "ldr s22, [x20], #0x4\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 5f\n" + "ld1 { v29.h }[2], [x27]\n" + "ld1 { v28.h }[2], [x26]\n" + "ld1 { v27.h }[2], [x25]\n" + "ld1 { v26.h }[2], [x24]\n" + "ld1 { v25.h }[2], [x23]\n" + "ld1 { v24.h }[2], [x22]\n" + "ld1 { v23.h }[2], [x21]\n" + "ld1 { v22.h }[2], [x20]\n" + "mov x19, #0x3\n" + "b 5f\n" + "4:" // odd_loads_1_0 + "ldr h29, [x27, #0x0]\n" + "ldr h28, [x26, #0x0]\n" + "ldr h27, [x25, #0x0]\n" + "ldr h26, [x24, #0x0]\n" + "ldr h25, [x23, #0x0]\n" + "ldr h24, [x22, #0x0]\n" + "ldr h23, [x21, #0x0]\n" + "ldr h22, [x20, #0x0]\n" + "mov x19, #0x1\n" + "5:" // Odd load end + "fcvtl v29.4s, v29.4h\n" + "fcvtl v28.4s, v28.4h\n" + "fcvtl v27.4s, v27.4h\n" + "zip1 v20.4s, v29.4s, v27.4s\n" + "fcvtl v26.4s, v26.4h\n" + "fcvtl v25.4s, v25.4h\n" + "zip1 v19.4s, v28.4s, v26.4s\n" + "fcvtl v24.4s, v24.4h\n" + "fcvtl v23.4s, v23.4h\n" + "zip1 v16.4s, v20.4s, v19.4s\n" + "fcvtl v22.4s, v22.4h\n" + "zip1 v18.4s, v25.4s, v23.4s\n" + "str q16, [%x[out_ptr], #0x0]\n" + "subs x19, x19, #0x1\n" + "zip1 v17.4s, v24.4s, v22.4s\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 6f\n" + "zip2 v21.4s, v20.4s, v19.4s\n" + "zip2 v16.4s, v18.4s, v17.4s\n" + "str q21, [%x[out_ptr], #0x0]\n" + "str q16, [%x[out_ptr], #0x10]\n" + "subs x19, x19, #0x1\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 6f\n" + "zip2 v18.4s, v29.4s, v27.4s\n" + "zip2 v17.4s, v28.4s, v26.4s\n" + "zip1 v20.4s, v18.4s, v17.4s\n" + "str q20, [%x[out_ptr], #0x0]\n" + "zip2 v18.4s, v25.4s, v23.4s\n" + "zip2 v17.4s, v24.4s, v22.4s\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "6:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp new file mode 100644 index 0000000000..3f38859c1c --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, false>( + float * &out_ptr, const float * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset], LSL #2\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset], LSL #2\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset], LSL #2\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #2\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #2\n" + "add x22, x22, %x[row_offset], LSL #2\n" + "add x21, x21, %x[row_offset], LSL #2\n" + "add x20, x20, %x[row_offset], LSL #2\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x4\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr q28, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q27, [x26], #0x10\n" + "ldr q26, [x25], #0x10\n" + "zip1 v23.4s, v28.4s, v26.4s\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q22, [x24], #0x10\n" + "zip2 v26.4s, v28.4s, v26.4s\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q25, [x23], #0x10\n" + "zip1 v20.4s, v27.4s, v22.4s\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q24, [x22], #0x10\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v23.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x22, #0x70]\n" + "zip2 v22.4s, v27.4s, v22.4s\n" + "ldr q21, [x20], #0x10\n" + "zip1 v18.4s, v25.4s, v19.4s\n" + "prfm pldl1keep, [x21, #0x70]\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v20.4s, v26.4s, v22.4s\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v16.4s, v24.4s, v21.4s\n" + "subs %x[width], %x[width], #0x4\n" + "zip1 v17.4s, v18.4s, v16.4s\n" + "cmp %x[width], #0x4\n" + "zip2 v16.4s, v18.4s, v16.4s\n" + "str q17, [%x[out_ptr], #0x10]\n" + "zip2 v19.4s, v25.4s, v19.4s\n" + "str q23, [%x[out_ptr], #0x20]\n" + "zip2 v18.4s, v24.4s, v21.4s\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip1 v16.4s, v19.4s, v18.4s\n" + "str q20, [%x[out_ptr], #0x40]\n" + "zip2 v17.4s, v26.4s, v22.4s\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v16.4s, v19.4s, v18.4s\n" + "str q17, [%x[out_ptr], #0x60]\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 6f\n" + "tbz %x[width], #1, 4f\n" + "ldr d28, [x27], #0x8\n" + "ldr d27, [x26], #0x8\n" + "ldr d26, [x25], #0x8\n" + "ldr d22, [x24], #0x8\n" + "ldr d25, [x23], #0x8\n" + "ldr d24, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d21, [x20], #0x8\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 5f\n" + "ld1 { v28.s }[2], [x27]\n" + "ld1 { v27.s }[2], [x26]\n" + "ld1 { v26.s }[2], [x25]\n" + "ld1 { v22.s }[2], [x24]\n" + "ld1 { v25.s }[2], [x23]\n" + "ld1 { v24.s }[2], [x22]\n" + "ld1 { v19.s }[2], [x21]\n" + "ld1 { v21.s }[2], [x20]\n" + "mov x19, #0x3\n" + "b 5f\n" + "4:" // odd_loads_1_0 + "ldr s28, [x27, #0x0]\n" + "ldr s27, [x26, #0x0]\n" + "ldr s26, [x25, #0x0]\n" + "ldr s22, [x24, #0x0]\n" + "ldr s25, [x23, #0x0]\n" + "ldr s24, [x22, #0x0]\n" + "ldr s19, [x21, #0x0]\n" + "ldr s21, [x20, #0x0]\n" + "mov x19, #0x1\n" + "5:" // Odd load end + "zip1 v23.4s, v28.4s, v26.4s\n" + "subs x19, x19, #0x1\n" + "zip1 v20.4s, v27.4s, v22.4s\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v18.4s, v25.4s, v19.4s\n" + "zip1 v16.4s, v24.4s, v21.4s\n" + "zip1 v17.4s, v18.4s, v16.4s\n" + "str q17, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 6f\n" + "zip2 v23.4s, v23.4s, v20.4s\n" + "zip2 v16.4s, v18.4s, v16.4s\n" + "str q23, [%x[out_ptr], #0x0]\n" + "str q16, [%x[out_ptr], #0x10]\n" + "subs x19, x19, #0x1\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 6f\n" + "zip2 v26.4s, v28.4s, v26.4s\n" + "zip2 v22.4s, v27.4s, v22.4s\n" + "zip1 v20.4s, v26.4s, v22.4s\n" + "str q20, [%x[out_ptr], #0x0]\n" + "zip2 v19.4s, v25.4s, v19.4s\n" + "zip2 v18.4s, v24.4s, v21.4s\n" + "zip1 v16.4s, v19.4s, v18.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "6:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp new file mode 100644 index 0000000000..03f552a575 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, false>( + int16_t * &out_ptr, const int16_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset], LSL #1\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset], LSL #1\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset], LSL #1\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #1\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #1\n" + "add x22, x22, %x[row_offset], LSL #1\n" + "add x21, x21, %x[row_offset], LSL #1\n" + "add x20, x20, %x[row_offset], LSL #1\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x8\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr q30, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q29, [x26], #0x10\n" + "ldr q28, [x25], #0x10\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q27, [x24], #0x10\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q24, [x23], #0x10\n" + "zip1 v26.8h, v30.8h, v24.8h\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q25, [x22], #0x10\n" + "zip2 v24.8h, v30.8h, v24.8h\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q23, [x21], #0x10\n" + "zip1 v21.8h, v29.8h, v25.8h\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q22, [x20], #0x10\n" + "zip1 v18.8h, v28.8h, v23.8h\n" + "prfm pldl1keep, [x21, #0x70]\n" + "subs %x[width], %x[width], #0x8\n" + "zip1 v20.8h, v26.8h, v18.8h\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v19.8h, v27.8h, v22.8h\n" + "cmp %x[width], #0x8\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip2 v18.8h, v26.8h, v18.8h\n" + "zip1 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip2 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x20]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip2 v21.8h, v28.8h, v23.8h\n" + "zip1 v18.8h, v24.8h, v21.8h\n" + "zip2 v20.8h, v29.8h, v25.8h\n" + "zip2 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x40]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v18.8h, v24.8h, v21.8h\n" + "zip2 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x60]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 8f\n" + "tbz %x[width], #2, 5f\n" + "ldr d30, [x27], #0x8\n" + "ldr d29, [x26], #0x8\n" + "ldr d28, [x25], #0x8\n" + "ldr d27, [x24], #0x8\n" + "ldr d24, [x23], #0x8\n" + "ldr d25, [x22], #0x8\n" + "ldr d23, [x21], #0x8\n" + "ldr d22, [x20], #0x8\n" + "tbz %x[width], #1, 4f\n" + "ld1 { v30.s }[2], [x27], #0x4\n" + "ld1 { v29.s }[2], [x26], #0x4\n" + "ld1 { v28.s }[2], [x25], #0x4\n" + "ld1 { v27.s }[2], [x24], #0x4\n" + "ld1 { v24.s }[2], [x23], #0x4\n" + "ld1 { v25.s }[2], [x22], #0x4\n" + "ld1 { v23.s }[2], [x21], #0x4\n" + "ld1 { v22.s }[2], [x20], #0x4\n" + "mov x19, #0x6\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.h }[6], [x27]\n" + "ld1 { v29.h }[6], [x26]\n" + "ld1 { v28.h }[6], [x25]\n" + "ld1 { v27.h }[6], [x24]\n" + "ld1 { v24.h }[6], [x23]\n" + "ld1 { v25.h }[6], [x22]\n" + "ld1 { v23.h }[6], [x21]\n" + "ld1 { v22.h }[6], [x20]\n" + "mov x19, #0x7\n" + "b 7f\n" + "4:" // odd_loads_1_4 + "mov x19, #0x4\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.h }[4], [x27]\n" + "ld1 { v29.h }[4], [x26]\n" + "ld1 { v28.h }[4], [x25]\n" + "ld1 { v27.h }[4], [x24]\n" + "ld1 { v24.h }[4], [x23]\n" + "ld1 { v25.h }[4], [x22]\n" + "ld1 { v23.h }[4], [x21]\n" + "ld1 { v22.h }[4], [x20]\n" + "mov x19, #0x5\n" + "b 7f\n" + "5:" // odd_loads_2_0 + "tbz %x[width], #1, 6f\n" + "ldr s30, [x27], #0x4\n" + "ldr s29, [x26], #0x4\n" + "ldr s28, [x25], #0x4\n" + "ldr s27, [x24], #0x4\n" + "ldr s24, [x23], #0x4\n" + "ldr s25, [x22], #0x4\n" + "ldr s23, [x21], #0x4\n" + "ldr s22, [x20], #0x4\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.h }[2], [x27]\n" + "ld1 { v29.h }[2], [x26]\n" + "ld1 { v28.h }[2], [x25]\n" + "ld1 { v27.h }[2], [x24]\n" + "ld1 { v24.h }[2], [x23]\n" + "ld1 { v25.h }[2], [x22]\n" + "ld1 { v23.h }[2], [x21]\n" + "ld1 { v22.h }[2], [x20]\n" + "mov x19, #0x3\n" + "b 7f\n" + "6:" // odd_loads_1_0 + "ldr h30, [x27, #0x0]\n" + "ldr h29, [x26, #0x0]\n" + "ldr h28, [x25, #0x0]\n" + "ldr h27, [x24, #0x0]\n" + "ldr h24, [x23, #0x0]\n" + "ldr h25, [x22, #0x0]\n" + "ldr h23, [x21, #0x0]\n" + "ldr h22, [x20, #0x0]\n" + "mov x19, #0x1\n" + "7:" // Odd load end + "zip1 v26.8h, v30.8h, v24.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v28.8h, v23.8h\n" + "zip1 v20.8h, v26.8h, v18.8h\n" + "zip1 v21.8h, v29.8h, v25.8h\n" + "zip1 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v20.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v18.8h, v26.8h, v18.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v24.8h, v30.8h, v24.8h\n" + "zip2 v21.8h, v28.8h, v23.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v24.8h, v21.8h\n" + "zip2 v20.8h, v29.8h, v25.8h\n" + "zip2 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v18.8h, v24.8h, v21.8h\n" + "zip2 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "8:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + +template<> +void interleave_block<8, 1, VLType::None, false>( + uint16_t * &out_ptr, const uint16_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + int16_t * &out_cast = reinterpret_cast<int16_t * &>(out_ptr); + const int16_t * const * in_cast = reinterpret_cast<const int16_t * const *>(in); + + interleave_block<8, 1, VLType::None, false>(out_cast, in_cast, width, height, row_offset, false); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16_summing.hpp new file mode 100644 index 0000000000..35c7719de7 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16_summing.hpp @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, true>( + int16_t * &out_ptr, const int16_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v1.8h, #0x0\n" + "ldr x27, [%x[in], #0x0]\n" + "mov x19, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr x26, [%x[in], #0x8]\n" + "cmp %x[height], #0x8\n" + "movi v31.4s, #0x0\n" + "ldr x25, [%x[in], #0x10]\n" + "add x27, x27, %x[row_offset], LSL #1\n" + "ldr x24, [%x[in], #0x18]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x26, x26, %x[row_offset], LSL #1\n" + "ldr x22, [%x[in], #0x28]\n" + "add x25, x25, %x[row_offset], LSL #1\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #1\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #1\n" + "add x22, x22, %x[row_offset], LSL #1\n" + "add x21, x21, %x[row_offset], LSL #1\n" + "add x20, x20, %x[row_offset], LSL #1\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x20\n" + "ld1 { v0.4s }, [%x[out_ptr]]\n" + "ldr q31, [%x[out_ptr], #0x10]\n" + "2:" // first_pass + "cmp %x[width], #0x8\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x19, #0xe\n" + "ble 4f\n" + "saddw v0.4s, v0.4s, v1.4h\n" + "saddw2 v31.4s, v31.4s, v1.8h\n" + "mov x19, #0x0\n" + "movi v1.8h, #0x0\n" + "4:" // no_accumulate_16 + "ldr q30, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q29, [x26], #0x10\n" + "ldr q28, [x25], #0x10\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q27, [x24], #0x10\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q24, [x23], #0x10\n" + "zip1 v26.8h, v30.8h, v24.8h\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q25, [x22], #0x10\n" + "zip2 v24.8h, v30.8h, v24.8h\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q23, [x21], #0x10\n" + "zip1 v21.8h, v29.8h, v25.8h\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q22, [x20], #0x10\n" + "zip1 v18.8h, v28.8h, v23.8h\n" + "prfm pldl1keep, [x21, #0x70]\n" + "add x19, x19, #0x1\n" + "zip1 v20.8h, v26.8h, v18.8h\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v19.8h, v27.8h, v22.8h\n" + "subs %x[width], %x[width], #0x8\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "cmp %x[width], #0x8\n" + "zip2 v18.8h, v26.8h, v18.8h\n" + "zip1 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v17.8h, v20.8h, v17.8h\n" + "str q17, [%x[out_ptr], #0x10]\n" + "zip2 v16.8h, v21.8h, v19.8h\n" + "add v1.8h, v1.8h, v17.8h\n" + "zip1 v17.8h, v18.8h, v16.8h\n" + "str q17, [%x[out_ptr], #0x20]\n" + "zip2 v16.8h, v18.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x30]\n" + "add v1.8h, v1.8h, v17.8h\n" + "zip2 v21.8h, v28.8h, v23.8h\n" + "zip1 v18.8h, v24.8h, v21.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v20.8h, v29.8h, v25.8h\n" + "zip2 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x40]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v18.8h, v24.8h, v21.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x60]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "add v1.8h, v1.8h, v16.8h\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 10f\n" + "tbz %x[width], #2, 7f\n" + "ldr d30, [x27], #0x8\n" + "ldr d29, [x26], #0x8\n" + "ldr d28, [x25], #0x8\n" + "ldr d27, [x24], #0x8\n" + "ldr d24, [x23], #0x8\n" + "ldr d25, [x22], #0x8\n" + "ldr d23, [x21], #0x8\n" + "ldr d22, [x20], #0x8\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v30.s }[2], [x27], #0x4\n" + "ld1 { v29.s }[2], [x26], #0x4\n" + "ld1 { v28.s }[2], [x25], #0x4\n" + "ld1 { v27.s }[2], [x24], #0x4\n" + "ld1 { v24.s }[2], [x23], #0x4\n" + "ld1 { v25.s }[2], [x22], #0x4\n" + "ld1 { v23.s }[2], [x21], #0x4\n" + "ld1 { v22.s }[2], [x20], #0x4\n" + "mov x19, #0x6\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.h }[6], [x27]\n" + "ld1 { v29.h }[6], [x26]\n" + "ld1 { v28.h }[6], [x25]\n" + "ld1 { v27.h }[6], [x24]\n" + "ld1 { v24.h }[6], [x23]\n" + "ld1 { v25.h }[6], [x22]\n" + "ld1 { v23.h }[6], [x21]\n" + "ld1 { v22.h }[6], [x20]\n" + "mov x19, #0x7\n" + "b 9f\n" + "6:" // odd_loads_1_4 + "mov x19, #0x4\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.h }[4], [x27]\n" + "ld1 { v29.h }[4], [x26]\n" + "ld1 { v28.h }[4], [x25]\n" + "ld1 { v27.h }[4], [x24]\n" + "ld1 { v24.h }[4], [x23]\n" + "ld1 { v25.h }[4], [x22]\n" + "ld1 { v23.h }[4], [x21]\n" + "ld1 { v22.h }[4], [x20]\n" + "mov x19, #0x5\n" + "b 9f\n" + "7:" // odd_loads_2_0 + "tbz %x[width], #1, 8f\n" + "ldr s30, [x27], #0x4\n" + "ldr s29, [x26], #0x4\n" + "ldr s28, [x25], #0x4\n" + "ldr s27, [x24], #0x4\n" + "ldr s24, [x23], #0x4\n" + "ldr s25, [x22], #0x4\n" + "ldr s23, [x21], #0x4\n" + "ldr s22, [x20], #0x4\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.h }[2], [x27]\n" + "ld1 { v29.h }[2], [x26]\n" + "ld1 { v28.h }[2], [x25]\n" + "ld1 { v27.h }[2], [x24]\n" + "ld1 { v24.h }[2], [x23]\n" + "ld1 { v25.h }[2], [x22]\n" + "ld1 { v23.h }[2], [x21]\n" + "ld1 { v22.h }[2], [x20]\n" + "mov x19, #0x3\n" + "b 9f\n" + "8:" // odd_loads_1_0 + "ldr h30, [x27, #0x0]\n" + "ldr h29, [x26, #0x0]\n" + "ldr h28, [x25, #0x0]\n" + "ldr h27, [x24, #0x0]\n" + "ldr h24, [x23, #0x0]\n" + "ldr h25, [x22, #0x0]\n" + "ldr h23, [x21, #0x0]\n" + "ldr h22, [x20, #0x0]\n" + "mov x19, #0x1\n" + "9:" // Odd load end + "zip1 v26.8h, v30.8h, v24.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v28.8h, v23.8h\n" + "zip1 v20.8h, v26.8h, v18.8h\n" + "zip1 v21.8h, v29.8h, v25.8h\n" + "zip1 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v17.8h, v20.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v17.8h\n" + "str q17, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v18.8h, v26.8h, v18.8h\n" + "zip2 v16.8h, v21.8h, v19.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v17.8h, v18.8h, v16.8h\n" + "str q17, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v17.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v16.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v24.8h, v30.8h, v24.8h\n" + "zip2 v21.8h, v28.8h, v23.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v24.8h, v21.8h\n" + "zip2 v20.8h, v29.8h, v25.8h\n" + "zip2 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v18.8h, v24.8h, v21.8h\n" + "zip2 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "10:" // Odds skip + "saddw v0.4s, v0.4s, v1.4h\n" + "str q0, [%x[out_ptr], #0x0]\n" + "saddw2 v31.4s, v31.4s, v1.8h\n" + "str q31, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp new file mode 100644 index 0000000000..582836fe67 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, false>( + int16_t * &out_ptr, const int8_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset]\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset]\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x8\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr d30, [x27], #0x8\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr d29, [x26], #0x8\n" + "ldr d28, [x25], #0x8\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr d27, [x24], #0x8\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr d23, [x23], #0x8\n" + "ldr d21, [x22], #0x8\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr d26, [x21], #0x8\n" + "ldr d25, [x20], #0x8\n" + "prfm pldl1keep, [x23, #0x70]\n" + "prfm pldl1keep, [x22, #0x70]\n" + "sshll v30.8h, v30.8b, #0x0\n" + "sshll v29.8h, v29.8b, #0x0\n" + "prfm pldl1keep, [x21, #0x70]\n" + "sshll v28.8h, v28.8b, #0x0\n" + "prfm pldl1keep, [x20, #0x70]\n" + "sshll v27.8h, v27.8b, #0x0\n" + "sshll v23.8h, v23.8b, #0x0\n" + "zip1 v24.8h, v30.8h, v23.8h\n" + "sshll v21.8h, v21.8b, #0x0\n" + "zip2 v23.8h, v30.8h, v23.8h\n" + "sshll v26.8h, v26.8b, #0x0\n" + "sshll v25.8h, v25.8b, #0x0\n" + "zip1 v22.8h, v29.8h, v21.8h\n" + "subs %x[width], %x[width], #0x8\n" + "zip2 v21.8h, v29.8h, v21.8h\n" + "cmp %x[width], #0x8\n" + "zip1 v20.8h, v28.8h, v26.8h\n" + "zip1 v18.8h, v24.8h, v20.8h\n" + "zip1 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v18.8h, v24.8h, v20.8h\n" + "zip2 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x20]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip2 v20.8h, v28.8h, v26.8h\n" + "zip1 v18.8h, v23.8h, v20.8h\n" + "zip2 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x40]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v18.8h, v23.8h, v20.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x60]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 8f\n" + "tbz %x[width], #2, 5f\n" + "ldr s30, [x27], #0x4\n" + "ldr s29, [x26], #0x4\n" + "ldr s28, [x25], #0x4\n" + "ldr s27, [x24], #0x4\n" + "ldr s23, [x23], #0x4\n" + "ldr s21, [x22], #0x4\n" + "ldr s26, [x21], #0x4\n" + "ldr s25, [x20], #0x4\n" + "tbz %x[width], #1, 4f\n" + "ld1 { v30.h }[2], [x27], #0x2\n" + "ld1 { v29.h }[2], [x26], #0x2\n" + "ld1 { v28.h }[2], [x25], #0x2\n" + "ld1 { v27.h }[2], [x24], #0x2\n" + "ld1 { v23.h }[2], [x23], #0x2\n" + "ld1 { v21.h }[2], [x22], #0x2\n" + "ld1 { v26.h }[2], [x21], #0x2\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "mov x19, #0x6\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.b }[6], [x27]\n" + "ld1 { v29.b }[6], [x26]\n" + "ld1 { v28.b }[6], [x25]\n" + "ld1 { v27.b }[6], [x24]\n" + "ld1 { v23.b }[6], [x23]\n" + "ld1 { v21.b }[6], [x22]\n" + "ld1 { v26.b }[6], [x21]\n" + "ld1 { v25.b }[6], [x20]\n" + "mov x19, #0x7\n" + "b 7f\n" + "4:" // odd_loads_1_4 + "mov x19, #0x4\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.b }[4], [x27]\n" + "ld1 { v29.b }[4], [x26]\n" + "ld1 { v28.b }[4], [x25]\n" + "ld1 { v27.b }[4], [x24]\n" + "ld1 { v23.b }[4], [x23]\n" + "ld1 { v21.b }[4], [x22]\n" + "ld1 { v26.b }[4], [x21]\n" + "ld1 { v25.b }[4], [x20]\n" + "mov x19, #0x5\n" + "b 7f\n" + "5:" // odd_loads_2_0 + "tbz %x[width], #1, 6f\n" + "ldr h30, [x27], #0x2\n" + "ldr h29, [x26], #0x2\n" + "ldr h28, [x25], #0x2\n" + "ldr h27, [x24], #0x2\n" + "ldr h23, [x23], #0x2\n" + "ldr h21, [x22], #0x2\n" + "ldr h26, [x21], #0x2\n" + "ldr h25, [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.b }[2], [x27]\n" + "ld1 { v29.b }[2], [x26]\n" + "ld1 { v28.b }[2], [x25]\n" + "ld1 { v27.b }[2], [x24]\n" + "ld1 { v23.b }[2], [x23]\n" + "ld1 { v21.b }[2], [x22]\n" + "ld1 { v26.b }[2], [x21]\n" + "ld1 { v25.b }[2], [x20]\n" + "mov x19, #0x3\n" + "b 7f\n" + "6:" // odd_loads_1_0 + "ldr b30, [x27, #0x0]\n" + "ldr b29, [x26, #0x0]\n" + "ldr b28, [x25, #0x0]\n" + "ldr b27, [x24, #0x0]\n" + "ldr b23, [x23, #0x0]\n" + "ldr b21, [x22, #0x0]\n" + "ldr b26, [x21, #0x0]\n" + "ldr b25, [x20, #0x0]\n" + "mov x19, #0x1\n" + "7:" // Odd load end + "sshll v30.8h, v30.8b, #0x0\n" + "sshll v29.8h, v29.8b, #0x0\n" + "sshll v28.8h, v28.8b, #0x0\n" + "sshll v27.8h, v27.8b, #0x0\n" + "sshll v23.8h, v23.8b, #0x0\n" + "zip1 v24.8h, v30.8h, v23.8h\n" + "sshll v21.8h, v21.8b, #0x0\n" + "sshll v26.8h, v26.8b, #0x0\n" + "zip1 v20.8h, v28.8h, v26.8h\n" + "sshll v25.8h, v25.8b, #0x0\n" + "zip1 v22.8h, v29.8h, v21.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v24.8h, v20.8h\n" + "zip1 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v18.8h, v24.8h, v20.8h\n" + "zip2 v17.8h, v22.8h, v19.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v23.8h, v30.8h, v23.8h\n" + "zip2 v20.8h, v28.8h, v26.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v23.8h, v20.8h\n" + "zip2 v21.8h, v29.8h, v21.8h\n" + "zip2 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v18.8h, v23.8h, v20.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "8:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16_summing.hpp new file mode 100644 index 0000000000..35dc3dc0d4 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16_summing.hpp @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, true>( + int16_t * &out_ptr, const int8_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v1.8h, #0x0\n" + "ldr x27, [%x[in], #0x0]\n" + "mov x19, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr x26, [%x[in], #0x8]\n" + "cmp %x[height], #0x8\n" + "movi v31.4s, #0x0\n" + "ldr x25, [%x[in], #0x10]\n" + "add x27, x27, %x[row_offset]\n" + "ldr x24, [%x[in], #0x18]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x26, x26, %x[row_offset]\n" + "ldr x22, [%x[in], #0x28]\n" + "add x25, x25, %x[row_offset]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x20\n" + "ld1 { v0.4s }, [%x[out_ptr]]\n" + "ldr q31, [%x[out_ptr], #0x10]\n" + "2:" // first_pass + "cmp %x[width], #0x8\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x19, #0xe\n" + "ble 4f\n" + "saddw v0.4s, v0.4s, v1.4h\n" + "saddw2 v31.4s, v31.4s, v1.8h\n" + "mov x19, #0x0\n" + "movi v1.8h, #0x0\n" + "4:" // no_accumulate_16 + "ldr d30, [x27], #0x8\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr d29, [x26], #0x8\n" + "ldr d28, [x25], #0x8\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr d27, [x24], #0x8\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr d23, [x23], #0x8\n" + "ldr d21, [x22], #0x8\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr d26, [x21], #0x8\n" + "ldr d25, [x20], #0x8\n" + "prfm pldl1keep, [x23, #0x70]\n" + "prfm pldl1keep, [x22, #0x70]\n" + "sshll v30.8h, v30.8b, #0x0\n" + "sshll v29.8h, v29.8b, #0x0\n" + "prfm pldl1keep, [x21, #0x70]\n" + "sshll v28.8h, v28.8b, #0x0\n" + "prfm pldl1keep, [x20, #0x70]\n" + "sshll v27.8h, v27.8b, #0x0\n" + "sshll v23.8h, v23.8b, #0x0\n" + "zip1 v24.8h, v30.8h, v23.8h\n" + "sshll v21.8h, v21.8b, #0x0\n" + "zip2 v23.8h, v30.8h, v23.8h\n" + "sshll v26.8h, v26.8b, #0x0\n" + "sshll v25.8h, v25.8b, #0x0\n" + "zip1 v22.8h, v29.8h, v21.8h\n" + "add x19, x19, #0x1\n" + "zip2 v21.8h, v29.8h, v21.8h\n" + "subs %x[width], %x[width], #0x8\n" + "zip1 v20.8h, v28.8h, v26.8h\n" + "cmp %x[width], #0x8\n" + "zip1 v18.8h, v24.8h, v20.8h\n" + "zip1 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v18.8h, v24.8h, v20.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x20]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip2 v20.8h, v28.8h, v26.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip1 v18.8h, v23.8h, v20.8h\n" + "zip2 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x40]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v18.8h, v23.8h, v20.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x60]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "add v1.8h, v1.8h, v16.8h\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 10f\n" + "tbz %x[width], #2, 7f\n" + "ldr s30, [x27], #0x4\n" + "ldr s29, [x26], #0x4\n" + "ldr s28, [x25], #0x4\n" + "ldr s27, [x24], #0x4\n" + "ldr s23, [x23], #0x4\n" + "ldr s21, [x22], #0x4\n" + "ldr s26, [x21], #0x4\n" + "ldr s25, [x20], #0x4\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v30.h }[2], [x27], #0x2\n" + "ld1 { v29.h }[2], [x26], #0x2\n" + "ld1 { v28.h }[2], [x25], #0x2\n" + "ld1 { v27.h }[2], [x24], #0x2\n" + "ld1 { v23.h }[2], [x23], #0x2\n" + "ld1 { v21.h }[2], [x22], #0x2\n" + "ld1 { v26.h }[2], [x21], #0x2\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "mov x19, #0x6\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.b }[6], [x27]\n" + "ld1 { v29.b }[6], [x26]\n" + "ld1 { v28.b }[6], [x25]\n" + "ld1 { v27.b }[6], [x24]\n" + "ld1 { v23.b }[6], [x23]\n" + "ld1 { v21.b }[6], [x22]\n" + "ld1 { v26.b }[6], [x21]\n" + "ld1 { v25.b }[6], [x20]\n" + "mov x19, #0x7\n" + "b 9f\n" + "6:" // odd_loads_1_4 + "mov x19, #0x4\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.b }[4], [x27]\n" + "ld1 { v29.b }[4], [x26]\n" + "ld1 { v28.b }[4], [x25]\n" + "ld1 { v27.b }[4], [x24]\n" + "ld1 { v23.b }[4], [x23]\n" + "ld1 { v21.b }[4], [x22]\n" + "ld1 { v26.b }[4], [x21]\n" + "ld1 { v25.b }[4], [x20]\n" + "mov x19, #0x5\n" + "b 9f\n" + "7:" // odd_loads_2_0 + "tbz %x[width], #1, 8f\n" + "ldr h30, [x27], #0x2\n" + "ldr h29, [x26], #0x2\n" + "ldr h28, [x25], #0x2\n" + "ldr h27, [x24], #0x2\n" + "ldr h23, [x23], #0x2\n" + "ldr h21, [x22], #0x2\n" + "ldr h26, [x21], #0x2\n" + "ldr h25, [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.b }[2], [x27]\n" + "ld1 { v29.b }[2], [x26]\n" + "ld1 { v28.b }[2], [x25]\n" + "ld1 { v27.b }[2], [x24]\n" + "ld1 { v23.b }[2], [x23]\n" + "ld1 { v21.b }[2], [x22]\n" + "ld1 { v26.b }[2], [x21]\n" + "ld1 { v25.b }[2], [x20]\n" + "mov x19, #0x3\n" + "b 9f\n" + "8:" // odd_loads_1_0 + "ldr b30, [x27, #0x0]\n" + "ldr b29, [x26, #0x0]\n" + "ldr b28, [x25, #0x0]\n" + "ldr b27, [x24, #0x0]\n" + "ldr b23, [x23, #0x0]\n" + "ldr b21, [x22, #0x0]\n" + "ldr b26, [x21, #0x0]\n" + "ldr b25, [x20, #0x0]\n" + "mov x19, #0x1\n" + "9:" // Odd load end + "sshll v30.8h, v30.8b, #0x0\n" + "sshll v29.8h, v29.8b, #0x0\n" + "sshll v28.8h, v28.8b, #0x0\n" + "sshll v27.8h, v27.8b, #0x0\n" + "sshll v23.8h, v23.8b, #0x0\n" + "zip1 v24.8h, v30.8h, v23.8h\n" + "sshll v21.8h, v21.8b, #0x0\n" + "sshll v26.8h, v26.8b, #0x0\n" + "zip1 v20.8h, v28.8h, v26.8h\n" + "sshll v25.8h, v25.8b, #0x0\n" + "zip1 v22.8h, v29.8h, v21.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v24.8h, v20.8h\n" + "zip1 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v18.8h, v24.8h, v20.8h\n" + "zip2 v17.8h, v22.8h, v19.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v23.8h, v30.8h, v23.8h\n" + "zip2 v20.8h, v28.8h, v26.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v23.8h, v20.8h\n" + "zip2 v21.8h, v29.8h, v21.8h\n" + "zip2 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v18.8h, v23.8h, v20.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "10:" // Odds skip + "saddw v0.4s, v0.4s, v1.4h\n" + "str q0, [%x[out_ptr], #0x0]\n" + "saddw2 v31.4s, v31.4s, v1.8h\n" + "str q31, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u16_u16_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u16_u16_summing.hpp new file mode 100644 index 0000000000..bfa8989a4d --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u16_u16_summing.hpp @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, true>( + uint16_t * &out_ptr, const uint16_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v1.8h, #0x0\n" + "ldr x27, [%x[in], #0x0]\n" + "mov x19, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr x26, [%x[in], #0x8]\n" + "cmp %x[height], #0x8\n" + "movi v31.4s, #0x0\n" + "ldr x25, [%x[in], #0x10]\n" + "add x27, x27, %x[row_offset], LSL #1\n" + "ldr x24, [%x[in], #0x18]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x26, x26, %x[row_offset], LSL #1\n" + "ldr x22, [%x[in], #0x28]\n" + "add x25, x25, %x[row_offset], LSL #1\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #1\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #1\n" + "add x22, x22, %x[row_offset], LSL #1\n" + "add x21, x21, %x[row_offset], LSL #1\n" + "add x20, x20, %x[row_offset], LSL #1\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x20\n" + "ld1 { v0.4s }, [%x[out_ptr]]\n" + "ldr q31, [%x[out_ptr], #0x10]\n" + "2:" // first_pass + "cmp %x[width], #0x8\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x19, #0xe\n" + "ble 4f\n" + "uaddw v0.4s, v0.4s, v1.4h\n" + "uaddw2 v31.4s, v31.4s, v1.8h\n" + "mov x19, #0x0\n" + "movi v1.8h, #0x0\n" + "4:" // no_accumulate_16 + "ldr q30, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q29, [x26], #0x10\n" + "ldr q28, [x25], #0x10\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q27, [x24], #0x10\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q24, [x23], #0x10\n" + "zip1 v26.8h, v30.8h, v24.8h\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q25, [x22], #0x10\n" + "zip2 v24.8h, v30.8h, v24.8h\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q23, [x21], #0x10\n" + "zip1 v21.8h, v29.8h, v25.8h\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q22, [x20], #0x10\n" + "zip1 v18.8h, v28.8h, v23.8h\n" + "prfm pldl1keep, [x21, #0x70]\n" + "add x19, x19, #0x1\n" + "zip1 v20.8h, v26.8h, v18.8h\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v19.8h, v27.8h, v22.8h\n" + "subs %x[width], %x[width], #0x8\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "cmp %x[width], #0x8\n" + "zip2 v18.8h, v26.8h, v18.8h\n" + "zip1 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v17.8h, v20.8h, v17.8h\n" + "str q17, [%x[out_ptr], #0x10]\n" + "zip2 v16.8h, v21.8h, v19.8h\n" + "add v1.8h, v1.8h, v17.8h\n" + "zip1 v17.8h, v18.8h, v16.8h\n" + "str q17, [%x[out_ptr], #0x20]\n" + "zip2 v16.8h, v18.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x30]\n" + "add v1.8h, v1.8h, v17.8h\n" + "zip2 v21.8h, v28.8h, v23.8h\n" + "zip1 v18.8h, v24.8h, v21.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v20.8h, v29.8h, v25.8h\n" + "zip2 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x40]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v18.8h, v24.8h, v21.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x60]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "add v1.8h, v1.8h, v16.8h\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 10f\n" + "tbz %x[width], #2, 7f\n" + "ldr d30, [x27], #0x8\n" + "ldr d29, [x26], #0x8\n" + "ldr d28, [x25], #0x8\n" + "ldr d27, [x24], #0x8\n" + "ldr d24, [x23], #0x8\n" + "ldr d25, [x22], #0x8\n" + "ldr d23, [x21], #0x8\n" + "ldr d22, [x20], #0x8\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v30.s }[2], [x27], #0x4\n" + "ld1 { v29.s }[2], [x26], #0x4\n" + "ld1 { v28.s }[2], [x25], #0x4\n" + "ld1 { v27.s }[2], [x24], #0x4\n" + "ld1 { v24.s }[2], [x23], #0x4\n" + "ld1 { v25.s }[2], [x22], #0x4\n" + "ld1 { v23.s }[2], [x21], #0x4\n" + "ld1 { v22.s }[2], [x20], #0x4\n" + "mov x19, #0x6\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.h }[6], [x27]\n" + "ld1 { v29.h }[6], [x26]\n" + "ld1 { v28.h }[6], [x25]\n" + "ld1 { v27.h }[6], [x24]\n" + "ld1 { v24.h }[6], [x23]\n" + "ld1 { v25.h }[6], [x22]\n" + "ld1 { v23.h }[6], [x21]\n" + "ld1 { v22.h }[6], [x20]\n" + "mov x19, #0x7\n" + "b 9f\n" + "6:" // odd_loads_1_4 + "mov x19, #0x4\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.h }[4], [x27]\n" + "ld1 { v29.h }[4], [x26]\n" + "ld1 { v28.h }[4], [x25]\n" + "ld1 { v27.h }[4], [x24]\n" + "ld1 { v24.h }[4], [x23]\n" + "ld1 { v25.h }[4], [x22]\n" + "ld1 { v23.h }[4], [x21]\n" + "ld1 { v22.h }[4], [x20]\n" + "mov x19, #0x5\n" + "b 9f\n" + "7:" // odd_loads_2_0 + "tbz %x[width], #1, 8f\n" + "ldr s30, [x27], #0x4\n" + "ldr s29, [x26], #0x4\n" + "ldr s28, [x25], #0x4\n" + "ldr s27, [x24], #0x4\n" + "ldr s24, [x23], #0x4\n" + "ldr s25, [x22], #0x4\n" + "ldr s23, [x21], #0x4\n" + "ldr s22, [x20], #0x4\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.h }[2], [x27]\n" + "ld1 { v29.h }[2], [x26]\n" + "ld1 { v28.h }[2], [x25]\n" + "ld1 { v27.h }[2], [x24]\n" + "ld1 { v24.h }[2], [x23]\n" + "ld1 { v25.h }[2], [x22]\n" + "ld1 { v23.h }[2], [x21]\n" + "ld1 { v22.h }[2], [x20]\n" + "mov x19, #0x3\n" + "b 9f\n" + "8:" // odd_loads_1_0 + "ldr h30, [x27, #0x0]\n" + "ldr h29, [x26, #0x0]\n" + "ldr h28, [x25, #0x0]\n" + "ldr h27, [x24, #0x0]\n" + "ldr h24, [x23, #0x0]\n" + "ldr h25, [x22, #0x0]\n" + "ldr h23, [x21, #0x0]\n" + "ldr h22, [x20, #0x0]\n" + "mov x19, #0x1\n" + "9:" // Odd load end + "zip1 v26.8h, v30.8h, v24.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v28.8h, v23.8h\n" + "zip1 v20.8h, v26.8h, v18.8h\n" + "zip1 v21.8h, v29.8h, v25.8h\n" + "zip1 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v20.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v17.8h, v20.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v17.8h\n" + "str q17, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v18.8h, v26.8h, v18.8h\n" + "zip2 v16.8h, v21.8h, v19.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v17.8h, v18.8h, v16.8h\n" + "str q17, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v17.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v16.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v24.8h, v30.8h, v24.8h\n" + "zip2 v21.8h, v28.8h, v23.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v24.8h, v21.8h\n" + "zip2 v20.8h, v29.8h, v25.8h\n" + "zip2 v19.8h, v27.8h, v22.8h\n" + "zip1 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v18.8h, v24.8h, v21.8h\n" + "zip2 v17.8h, v20.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "10:" // Odds skip + "uaddw v0.4s, v0.4s, v1.4h\n" + "str q0, [%x[out_ptr], #0x0]\n" + "uaddw2 v31.4s, v31.4s, v1.8h\n" + "str q31, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp new file mode 100644 index 0000000000..86b90f1898 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, false>( + uint16_t * &out_ptr, const uint8_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset]\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset]\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x8\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr d30, [x27], #0x8\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr d29, [x26], #0x8\n" + "ldr d28, [x25], #0x8\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr d27, [x24], #0x8\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr d23, [x23], #0x8\n" + "ldr d21, [x22], #0x8\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr d26, [x21], #0x8\n" + "ldr d25, [x20], #0x8\n" + "prfm pldl1keep, [x23, #0x70]\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ushll v29.8h, v29.8b, #0x0\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "prfm pldl1keep, [x20, #0x70]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "ushll v23.8h, v23.8b, #0x0\n" + "zip1 v24.8h, v30.8h, v23.8h\n" + "ushll v21.8h, v21.8b, #0x0\n" + "zip2 v23.8h, v30.8h, v23.8h\n" + "ushll v26.8h, v26.8b, #0x0\n" + "ushll v25.8h, v25.8b, #0x0\n" + "zip1 v22.8h, v29.8h, v21.8h\n" + "subs %x[width], %x[width], #0x8\n" + "zip2 v21.8h, v29.8h, v21.8h\n" + "cmp %x[width], #0x8\n" + "zip1 v20.8h, v28.8h, v26.8h\n" + "zip1 v18.8h, v24.8h, v20.8h\n" + "zip1 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v18.8h, v24.8h, v20.8h\n" + "zip2 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x20]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip2 v20.8h, v28.8h, v26.8h\n" + "zip1 v18.8h, v23.8h, v20.8h\n" + "zip2 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x40]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v18.8h, v23.8h, v20.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x60]\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 8f\n" + "tbz %x[width], #2, 5f\n" + "ldr s30, [x27], #0x4\n" + "ldr s29, [x26], #0x4\n" + "ldr s28, [x25], #0x4\n" + "ldr s27, [x24], #0x4\n" + "ldr s23, [x23], #0x4\n" + "ldr s21, [x22], #0x4\n" + "ldr s26, [x21], #0x4\n" + "ldr s25, [x20], #0x4\n" + "tbz %x[width], #1, 4f\n" + "ld1 { v30.h }[2], [x27], #0x2\n" + "ld1 { v29.h }[2], [x26], #0x2\n" + "ld1 { v28.h }[2], [x25], #0x2\n" + "ld1 { v27.h }[2], [x24], #0x2\n" + "ld1 { v23.h }[2], [x23], #0x2\n" + "ld1 { v21.h }[2], [x22], #0x2\n" + "ld1 { v26.h }[2], [x21], #0x2\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "mov x19, #0x6\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.b }[6], [x27]\n" + "ld1 { v29.b }[6], [x26]\n" + "ld1 { v28.b }[6], [x25]\n" + "ld1 { v27.b }[6], [x24]\n" + "ld1 { v23.b }[6], [x23]\n" + "ld1 { v21.b }[6], [x22]\n" + "ld1 { v26.b }[6], [x21]\n" + "ld1 { v25.b }[6], [x20]\n" + "mov x19, #0x7\n" + "b 7f\n" + "4:" // odd_loads_1_4 + "mov x19, #0x4\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.b }[4], [x27]\n" + "ld1 { v29.b }[4], [x26]\n" + "ld1 { v28.b }[4], [x25]\n" + "ld1 { v27.b }[4], [x24]\n" + "ld1 { v23.b }[4], [x23]\n" + "ld1 { v21.b }[4], [x22]\n" + "ld1 { v26.b }[4], [x21]\n" + "ld1 { v25.b }[4], [x20]\n" + "mov x19, #0x5\n" + "b 7f\n" + "5:" // odd_loads_2_0 + "tbz %x[width], #1, 6f\n" + "ldr h30, [x27], #0x2\n" + "ldr h29, [x26], #0x2\n" + "ldr h28, [x25], #0x2\n" + "ldr h27, [x24], #0x2\n" + "ldr h23, [x23], #0x2\n" + "ldr h21, [x22], #0x2\n" + "ldr h26, [x21], #0x2\n" + "ldr h25, [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v30.b }[2], [x27]\n" + "ld1 { v29.b }[2], [x26]\n" + "ld1 { v28.b }[2], [x25]\n" + "ld1 { v27.b }[2], [x24]\n" + "ld1 { v23.b }[2], [x23]\n" + "ld1 { v21.b }[2], [x22]\n" + "ld1 { v26.b }[2], [x21]\n" + "ld1 { v25.b }[2], [x20]\n" + "mov x19, #0x3\n" + "b 7f\n" + "6:" // odd_loads_1_0 + "ldr b30, [x27, #0x0]\n" + "ldr b29, [x26, #0x0]\n" + "ldr b28, [x25, #0x0]\n" + "ldr b27, [x24, #0x0]\n" + "ldr b23, [x23, #0x0]\n" + "ldr b21, [x22, #0x0]\n" + "ldr b26, [x21, #0x0]\n" + "ldr b25, [x20, #0x0]\n" + "mov x19, #0x1\n" + "7:" // Odd load end + "ushll v30.8h, v30.8b, #0x0\n" + "ushll v29.8h, v29.8b, #0x0\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ushll v27.8h, v27.8b, #0x0\n" + "ushll v23.8h, v23.8b, #0x0\n" + "zip1 v24.8h, v30.8h, v23.8h\n" + "ushll v21.8h, v21.8b, #0x0\n" + "ushll v26.8h, v26.8b, #0x0\n" + "zip1 v20.8h, v28.8h, v26.8h\n" + "ushll v25.8h, v25.8b, #0x0\n" + "zip1 v22.8h, v29.8h, v21.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v24.8h, v20.8h\n" + "zip1 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v18.8h, v24.8h, v20.8h\n" + "zip2 v17.8h, v22.8h, v19.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v23.8h, v30.8h, v23.8h\n" + "zip2 v20.8h, v28.8h, v26.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v23.8h, v20.8h\n" + "zip2 v21.8h, v29.8h, v21.8h\n" + "zip2 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 8f\n" + "zip2 v18.8h, v23.8h, v20.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "8:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16_summing.hpp new file mode 100644 index 0000000000..cefb70c57b --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16_summing.hpp @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 1, VLType::None, true>( + uint16_t * &out_ptr, const uint8_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v1.8h, #0x0\n" + "ldr x27, [%x[in], #0x0]\n" + "mov x19, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr x26, [%x[in], #0x8]\n" + "cmp %x[height], #0x8\n" + "movi v31.4s, #0x0\n" + "ldr x25, [%x[in], #0x10]\n" + "add x27, x27, %x[row_offset]\n" + "ldr x24, [%x[in], #0x18]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x26, x26, %x[row_offset]\n" + "ldr x22, [%x[in], #0x28]\n" + "add x25, x25, %x[row_offset]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x20\n" + "ld1 { v0.4s }, [%x[out_ptr]]\n" + "ldr q31, [%x[out_ptr], #0x10]\n" + "2:" // first_pass + "cmp %x[width], #0x8\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x19, #0xe\n" + "ble 4f\n" + "uaddw v0.4s, v0.4s, v1.4h\n" + "uaddw2 v31.4s, v31.4s, v1.8h\n" + "mov x19, #0x0\n" + "movi v1.8h, #0x0\n" + "4:" // no_accumulate_16 + "ldr d30, [x27], #0x8\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr d29, [x26], #0x8\n" + "ldr d28, [x25], #0x8\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr d27, [x24], #0x8\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr d23, [x23], #0x8\n" + "ldr d21, [x22], #0x8\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr d26, [x21], #0x8\n" + "ldr d25, [x20], #0x8\n" + "prfm pldl1keep, [x23, #0x70]\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ushll v29.8h, v29.8b, #0x0\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "prfm pldl1keep, [x20, #0x70]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "ushll v23.8h, v23.8b, #0x0\n" + "zip1 v24.8h, v30.8h, v23.8h\n" + "ushll v21.8h, v21.8b, #0x0\n" + "zip2 v23.8h, v30.8h, v23.8h\n" + "ushll v26.8h, v26.8b, #0x0\n" + "ushll v25.8h, v25.8b, #0x0\n" + "zip1 v22.8h, v29.8h, v21.8h\n" + "add x19, x19, #0x1\n" + "zip2 v21.8h, v29.8h, v21.8h\n" + "subs %x[width], %x[width], #0x8\n" + "zip1 v20.8h, v28.8h, v26.8h\n" + "cmp %x[width], #0x8\n" + "zip1 v18.8h, v24.8h, v20.8h\n" + "zip1 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v18.8h, v24.8h, v20.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x20]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip2 v20.8h, v28.8h, v26.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip1 v18.8h, v23.8h, v20.8h\n" + "zip2 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x40]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v18.8h, v23.8h, v20.8h\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x60]\n" + "add v1.8h, v1.8h, v16.8h\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "add v1.8h, v1.8h, v16.8h\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 10f\n" + "tbz %x[width], #2, 7f\n" + "ldr s30, [x27], #0x4\n" + "ldr s29, [x26], #0x4\n" + "ldr s28, [x25], #0x4\n" + "ldr s27, [x24], #0x4\n" + "ldr s23, [x23], #0x4\n" + "ldr s21, [x22], #0x4\n" + "ldr s26, [x21], #0x4\n" + "ldr s25, [x20], #0x4\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v30.h }[2], [x27], #0x2\n" + "ld1 { v29.h }[2], [x26], #0x2\n" + "ld1 { v28.h }[2], [x25], #0x2\n" + "ld1 { v27.h }[2], [x24], #0x2\n" + "ld1 { v23.h }[2], [x23], #0x2\n" + "ld1 { v21.h }[2], [x22], #0x2\n" + "ld1 { v26.h }[2], [x21], #0x2\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "mov x19, #0x6\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.b }[6], [x27]\n" + "ld1 { v29.b }[6], [x26]\n" + "ld1 { v28.b }[6], [x25]\n" + "ld1 { v27.b }[6], [x24]\n" + "ld1 { v23.b }[6], [x23]\n" + "ld1 { v21.b }[6], [x22]\n" + "ld1 { v26.b }[6], [x21]\n" + "ld1 { v25.b }[6], [x20]\n" + "mov x19, #0x7\n" + "b 9f\n" + "6:" // odd_loads_1_4 + "mov x19, #0x4\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.b }[4], [x27]\n" + "ld1 { v29.b }[4], [x26]\n" + "ld1 { v28.b }[4], [x25]\n" + "ld1 { v27.b }[4], [x24]\n" + "ld1 { v23.b }[4], [x23]\n" + "ld1 { v21.b }[4], [x22]\n" + "ld1 { v26.b }[4], [x21]\n" + "ld1 { v25.b }[4], [x20]\n" + "mov x19, #0x5\n" + "b 9f\n" + "7:" // odd_loads_2_0 + "tbz %x[width], #1, 8f\n" + "ldr h30, [x27], #0x2\n" + "ldr h29, [x26], #0x2\n" + "ldr h28, [x25], #0x2\n" + "ldr h27, [x24], #0x2\n" + "ldr h23, [x23], #0x2\n" + "ldr h21, [x22], #0x2\n" + "ldr h26, [x21], #0x2\n" + "ldr h25, [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 9f\n" + "ld1 { v30.b }[2], [x27]\n" + "ld1 { v29.b }[2], [x26]\n" + "ld1 { v28.b }[2], [x25]\n" + "ld1 { v27.b }[2], [x24]\n" + "ld1 { v23.b }[2], [x23]\n" + "ld1 { v21.b }[2], [x22]\n" + "ld1 { v26.b }[2], [x21]\n" + "ld1 { v25.b }[2], [x20]\n" + "mov x19, #0x3\n" + "b 9f\n" + "8:" // odd_loads_1_0 + "ldr b30, [x27, #0x0]\n" + "ldr b29, [x26, #0x0]\n" + "ldr b28, [x25, #0x0]\n" + "ldr b27, [x24, #0x0]\n" + "ldr b23, [x23, #0x0]\n" + "ldr b21, [x22, #0x0]\n" + "ldr b26, [x21, #0x0]\n" + "ldr b25, [x20, #0x0]\n" + "mov x19, #0x1\n" + "9:" // Odd load end + "ushll v30.8h, v30.8b, #0x0\n" + "ushll v29.8h, v29.8b, #0x0\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ushll v27.8h, v27.8b, #0x0\n" + "ushll v23.8h, v23.8b, #0x0\n" + "zip1 v24.8h, v30.8h, v23.8h\n" + "ushll v21.8h, v21.8b, #0x0\n" + "ushll v26.8h, v26.8b, #0x0\n" + "zip1 v20.8h, v28.8h, v26.8h\n" + "ushll v25.8h, v25.8b, #0x0\n" + "zip1 v22.8h, v29.8h, v21.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v24.8h, v20.8h\n" + "zip1 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v22.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v18.8h, v24.8h, v20.8h\n" + "zip2 v17.8h, v22.8h, v19.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v23.8h, v30.8h, v23.8h\n" + "zip2 v20.8h, v28.8h, v26.8h\n" + "subs x19, x19, #0x1\n" + "zip1 v18.8h, v23.8h, v20.8h\n" + "zip2 v21.8h, v29.8h, v21.8h\n" + "zip2 v19.8h, v27.8h, v25.8h\n" + "zip1 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v16.8h, v18.8h, v17.8h\n" + "subs x19, x19, #0x1\n" + "add v1.8h, v1.8h, v16.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "beq 10f\n" + "zip2 v18.8h, v23.8h, v20.8h\n" + "zip2 v17.8h, v21.8h, v19.8h\n" + "zip1 v16.8h, v18.8h, v17.8h\n" + "str q16, [%x[out_ptr], #0x0]\n" + "add v1.8h, v1.8h, v16.8h\n" + "add %x[out_ptr], %x[out_ptr], #0x10\n" + "10:" // Odds skip + "uaddw v0.4s, v0.4s, v1.4h\n" + "str q0, [%x[out_ptr], #0x0]\n" + "uaddw2 v31.4s, v31.4s, v1.8h\n" + "str q31, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp new file mode 100644 index 0000000000..5377edc1e1 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 2, VLType::None, false>( + bfloat16 * &out_ptr, const bfloat16 * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset], LSL #1\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset], LSL #1\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset], LSL #1\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #1\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #1\n" + "add x22, x22, %x[row_offset], LSL #1\n" + "add x21, x21, %x[row_offset], LSL #1\n" + "add x20, x20, %x[row_offset], LSL #1\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x8\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr q28, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q27, [x26], #0x10\n" + "ldr q26, [x25], #0x10\n" + "zip1 v23.4s, v28.4s, v26.4s\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q22, [x24], #0x10\n" + "zip2 v26.4s, v28.4s, v26.4s\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q25, [x23], #0x10\n" + "zip1 v20.4s, v27.4s, v22.4s\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q24, [x22], #0x10\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v23.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x22, #0x70]\n" + "zip2 v22.4s, v27.4s, v22.4s\n" + "ldr q21, [x20], #0x10\n" + "zip1 v18.4s, v25.4s, v19.4s\n" + "prfm pldl1keep, [x21, #0x70]\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v20.4s, v26.4s, v22.4s\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v16.4s, v24.4s, v21.4s\n" + "subs %x[width], %x[width], #0x8\n" + "zip1 v17.4s, v18.4s, v16.4s\n" + "cmp %x[width], #0x8\n" + "zip2 v16.4s, v18.4s, v16.4s\n" + "str q17, [%x[out_ptr], #0x10]\n" + "zip2 v19.4s, v25.4s, v19.4s\n" + "str q23, [%x[out_ptr], #0x20]\n" + "zip2 v18.4s, v24.4s, v21.4s\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip1 v16.4s, v19.4s, v18.4s\n" + "str q20, [%x[out_ptr], #0x40]\n" + "zip2 v17.4s, v26.4s, v22.4s\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v16.4s, v19.4s, v18.4s\n" + "str q17, [%x[out_ptr], #0x60]\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 8f\n" + "tbz %x[width], #2, 5f\n" + "ldr d28, [x27], #0x8\n" + "ldr d27, [x26], #0x8\n" + "ldr d26, [x25], #0x8\n" + "ldr d22, [x24], #0x8\n" + "ldr d25, [x23], #0x8\n" + "ldr d24, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d21, [x20], #0x8\n" + "tbz %x[width], #1, 4f\n" + "ld1 { v28.s }[2], [x27], #0x4\n" + "ld1 { v27.s }[2], [x26], #0x4\n" + "ld1 { v26.s }[2], [x25], #0x4\n" + "ld1 { v22.s }[2], [x24], #0x4\n" + "ld1 { v25.s }[2], [x23], #0x4\n" + "ld1 { v24.s }[2], [x22], #0x4\n" + "ld1 { v19.s }[2], [x21], #0x4\n" + "ld1 { v21.s }[2], [x20], #0x4\n" + "mov x19, #0x3\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v28.h }[6], [x27]\n" + "ld1 { v27.h }[6], [x26]\n" + "ld1 { v26.h }[6], [x25]\n" + "ld1 { v22.h }[6], [x24]\n" + "ld1 { v25.h }[6], [x23]\n" + "ld1 { v24.h }[6], [x22]\n" + "ld1 { v19.h }[6], [x21]\n" + "ld1 { v21.h }[6], [x20]\n" + "mov x19, #0x4\n" + "b 7f\n" + "4:" // odd_loads_1_4 + "mov x19, #0x2\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v28.h }[4], [x27]\n" + "ld1 { v27.h }[4], [x26]\n" + "ld1 { v26.h }[4], [x25]\n" + "ld1 { v22.h }[4], [x24]\n" + "ld1 { v25.h }[4], [x23]\n" + "ld1 { v24.h }[4], [x22]\n" + "ld1 { v19.h }[4], [x21]\n" + "ld1 { v21.h }[4], [x20]\n" + "mov x19, #0x3\n" + "b 7f\n" + "5:" // odd_loads_2_0 + "tbz %x[width], #1, 6f\n" + "ldr s28, [x27], #0x4\n" + "ldr s27, [x26], #0x4\n" + "ldr s26, [x25], #0x4\n" + "ldr s22, [x24], #0x4\n" + "ldr s25, [x23], #0x4\n" + "ldr s24, [x22], #0x4\n" + "ldr s19, [x21], #0x4\n" + "ldr s21, [x20], #0x4\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v28.h }[2], [x27]\n" + "ld1 { v27.h }[2], [x26]\n" + "ld1 { v26.h }[2], [x25]\n" + "ld1 { v22.h }[2], [x24]\n" + "ld1 { v25.h }[2], [x23]\n" + "ld1 { v24.h }[2], [x22]\n" + "ld1 { v19.h }[2], [x21]\n" + "ld1 { v21.h }[2], [x20]\n" + "mov x19, #0x2\n" + "b 7f\n" + "6:" // odd_loads_1_0 + "ldr h28, [x27, #0x0]\n" + "ldr h27, [x26, #0x0]\n" + "ldr h26, [x25, #0x0]\n" + "ldr h22, [x24, #0x0]\n" + "ldr h25, [x23, #0x0]\n" + "ldr h24, [x22, #0x0]\n" + "ldr h19, [x21, #0x0]\n" + "ldr h21, [x20, #0x0]\n" + "mov x19, #0x1\n" + "7:" // Odd load end + "zip1 v23.4s, v28.4s, v26.4s\n" + "subs x19, x19, #0x1\n" + "zip1 v20.4s, v27.4s, v22.4s\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v18.4s, v25.4s, v19.4s\n" + "zip1 v16.4s, v24.4s, v21.4s\n" + "zip1 v17.4s, v18.4s, v16.4s\n" + "str q17, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 8f\n" + "zip2 v23.4s, v23.4s, v20.4s\n" + "zip2 v16.4s, v18.4s, v16.4s\n" + "str q23, [%x[out_ptr], #0x0]\n" + "str q16, [%x[out_ptr], #0x10]\n" + "subs x19, x19, #0x1\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 8f\n" + "zip2 v26.4s, v28.4s, v26.4s\n" + "zip2 v22.4s, v27.4s, v22.4s\n" + "subs x19, x19, #0x1\n" + "zip1 v20.4s, v26.4s, v22.4s\n" + "str q20, [%x[out_ptr], #0x0]\n" + "zip2 v19.4s, v25.4s, v19.4s\n" + "zip2 v18.4s, v24.4s, v21.4s\n" + "zip1 v16.4s, v19.4s, v18.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 8f\n" + "zip2 v17.4s, v26.4s, v22.4s\n" + "zip2 v16.4s, v19.4s, v18.4s\n" + "str q17, [%x[out_ptr], #0x0]\n" + "str q16, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "8:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp new file mode 100644 index 0000000000..3aea6a8999 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 2, VLType::None, false>( + float * &out_ptr, const float * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset], LSL #2\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset], LSL #2\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset], LSL #2\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #2\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #2\n" + "add x22, x22, %x[row_offset], LSL #2\n" + "add x21, x21, %x[row_offset], LSL #2\n" + "add x20, x20, %x[row_offset], LSL #2\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x4\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr q27, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q24, [x26], #0x10\n" + "zip1 v26.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q25, [x25], #0x10\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q21, [x24], #0x10\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q22, [x23], #0x10\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q18, [x22], #0x10\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ldr q16, [x20], #0x10\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "prfm pldl1keep, [x20, #0x70]\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "str q23, [%x[out_ptr], #0x10]\n" + "str q20, [%x[out_ptr], #0x20]\n" + "str q17, [%x[out_ptr], #0x30]\n" + "str q24, [%x[out_ptr], #0x40]\n" + "str q21, [%x[out_ptr], #0x50]\n" + "str q18, [%x[out_ptr], #0x60]\n" + "str q16, [%x[out_ptr], #0x70]\n" + "subs %x[width], %x[width], #0x4\n" + "cmp %x[width], #0x4\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 6f\n" + "tbz %x[width], #1, 4f\n" + "ldr d27, [x27], #0x8\n" + "ldr d24, [x26], #0x8\n" + "ldr d25, [x25], #0x8\n" + "ldr d21, [x24], #0x8\n" + "ldr d22, [x23], #0x8\n" + "ldr d18, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d16, [x20], #0x8\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 5f\n" + "ld1 { v27.s }[2], [x27]\n" + "ld1 { v24.s }[2], [x26]\n" + "ld1 { v25.s }[2], [x25]\n" + "ld1 { v21.s }[2], [x24]\n" + "ld1 { v22.s }[2], [x23]\n" + "ld1 { v18.s }[2], [x22]\n" + "ld1 { v19.s }[2], [x21]\n" + "ld1 { v16.s }[2], [x20]\n" + "mov x19, #0x2\n" + "b 5f\n" + "4:" // odd_loads_1_0 + "ldr s27, [x27, #0x0]\n" + "ldr s24, [x26, #0x0]\n" + "ldr s25, [x25, #0x0]\n" + "ldr s21, [x24, #0x0]\n" + "ldr s22, [x23, #0x0]\n" + "ldr s18, [x22, #0x0]\n" + "ldr s19, [x21, #0x0]\n" + "ldr s16, [x20, #0x0]\n" + "mov x19, #0x1\n" + "5:" // Odd load end + "zip1 v26.2d, v27.2d, v24.2d\n" + "subs x19, x19, #0x1\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "str q23, [%x[out_ptr], #0x10]\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "str q20, [%x[out_ptr], #0x20]\n" + "str q17, [%x[out_ptr], #0x30]\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "beq 6f\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "str q24, [%x[out_ptr], #0x0]\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "str q21, [%x[out_ptr], #0x10]\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "str q18, [%x[out_ptr], #0x20]\n" + "str q16, [%x[out_ptr], #0x30]\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "6:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp new file mode 100644 index 0000000000..4780b77a4a --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 4, VLType::None, false>( + bfloat16 * &out_ptr, const bfloat16 * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset], LSL #1\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset], LSL #1\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset], LSL #1\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset], LSL #1\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset], LSL #1\n" + "add x22, x22, %x[row_offset], LSL #1\n" + "add x21, x21, %x[row_offset], LSL #1\n" + "add x20, x20, %x[row_offset], LSL #1\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x8\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr q27, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q24, [x26], #0x10\n" + "zip1 v26.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q25, [x25], #0x10\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q21, [x24], #0x10\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q22, [x23], #0x10\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q18, [x22], #0x10\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ldr q16, [x20], #0x10\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "prfm pldl1keep, [x20, #0x70]\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "str q23, [%x[out_ptr], #0x10]\n" + "str q20, [%x[out_ptr], #0x20]\n" + "str q17, [%x[out_ptr], #0x30]\n" + "str q24, [%x[out_ptr], #0x40]\n" + "str q21, [%x[out_ptr], #0x50]\n" + "str q18, [%x[out_ptr], #0x60]\n" + "str q16, [%x[out_ptr], #0x70]\n" + "subs %x[width], %x[width], #0x8\n" + "cmp %x[width], #0x8\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 8f\n" + "tbz %x[width], #2, 5f\n" + "ldr d27, [x27], #0x8\n" + "ldr d24, [x26], #0x8\n" + "ldr d25, [x25], #0x8\n" + "ldr d21, [x24], #0x8\n" + "ldr d22, [x23], #0x8\n" + "ldr d18, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d16, [x20], #0x8\n" + "tbz %x[width], #1, 4f\n" + "ld1 { v27.s }[2], [x27], #0x4\n" + "ld1 { v24.s }[2], [x26], #0x4\n" + "ld1 { v25.s }[2], [x25], #0x4\n" + "ld1 { v21.s }[2], [x24], #0x4\n" + "ld1 { v22.s }[2], [x23], #0x4\n" + "ld1 { v18.s }[2], [x22], #0x4\n" + "ld1 { v19.s }[2], [x21], #0x4\n" + "ld1 { v16.s }[2], [x20], #0x4\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v27.h }[6], [x27]\n" + "ld1 { v24.h }[6], [x26]\n" + "ld1 { v25.h }[6], [x25]\n" + "ld1 { v21.h }[6], [x24]\n" + "ld1 { v22.h }[6], [x23]\n" + "ld1 { v18.h }[6], [x22]\n" + "ld1 { v19.h }[6], [x21]\n" + "ld1 { v16.h }[6], [x20]\n" + "b 7f\n" + "4:" // odd_loads_1_4 + "mov x19, #0x1\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v27.h }[4], [x27]\n" + "ld1 { v24.h }[4], [x26]\n" + "ld1 { v25.h }[4], [x25]\n" + "ld1 { v21.h }[4], [x24]\n" + "ld1 { v22.h }[4], [x23]\n" + "ld1 { v18.h }[4], [x22]\n" + "ld1 { v19.h }[4], [x21]\n" + "ld1 { v16.h }[4], [x20]\n" + "mov x19, #0x2\n" + "b 7f\n" + "5:" // odd_loads_2_0 + "tbz %x[width], #1, 6f\n" + "ldr s27, [x27], #0x4\n" + "ldr s24, [x26], #0x4\n" + "ldr s25, [x25], #0x4\n" + "ldr s21, [x24], #0x4\n" + "ldr s22, [x23], #0x4\n" + "ldr s18, [x22], #0x4\n" + "ldr s19, [x21], #0x4\n" + "ldr s16, [x20], #0x4\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 7f\n" + "ld1 { v27.h }[2], [x27]\n" + "ld1 { v24.h }[2], [x26]\n" + "ld1 { v25.h }[2], [x25]\n" + "ld1 { v21.h }[2], [x24]\n" + "ld1 { v22.h }[2], [x23]\n" + "ld1 { v18.h }[2], [x22]\n" + "ld1 { v19.h }[2], [x21]\n" + "ld1 { v16.h }[2], [x20]\n" + "b 7f\n" + "6:" // odd_loads_1_0 + "ldr h27, [x27, #0x0]\n" + "ldr h24, [x26, #0x0]\n" + "ldr h25, [x25, #0x0]\n" + "ldr h21, [x24, #0x0]\n" + "ldr h22, [x23, #0x0]\n" + "ldr h18, [x22, #0x0]\n" + "ldr h19, [x21, #0x0]\n" + "ldr h16, [x20, #0x0]\n" + "mov x19, #0x1\n" + "7:" // Odd load end + "zip1 v26.2d, v27.2d, v24.2d\n" + "subs x19, x19, #0x1\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "str q23, [%x[out_ptr], #0x10]\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "str q20, [%x[out_ptr], #0x20]\n" + "str q17, [%x[out_ptr], #0x30]\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "beq 8f\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "str q24, [%x[out_ptr], #0x0]\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "str q21, [%x[out_ptr], #0x10]\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "str q18, [%x[out_ptr], #0x20]\n" + "str q16, [%x[out_ptr], #0x30]\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "8:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp new file mode 100644 index 0000000000..a9034f5742 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 4, VLType::None, false>( + int8_t * &out_ptr, const int8_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset]\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset]\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x10\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr q28, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q27, [x26], #0x10\n" + "ldr q26, [x25], #0x10\n" + "zip1 v23.4s, v28.4s, v26.4s\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q22, [x24], #0x10\n" + "zip2 v26.4s, v28.4s, v26.4s\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q25, [x23], #0x10\n" + "zip1 v20.4s, v27.4s, v22.4s\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q24, [x22], #0x10\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v23.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x22, #0x70]\n" + "zip2 v22.4s, v27.4s, v22.4s\n" + "ldr q21, [x20], #0x10\n" + "zip1 v18.4s, v25.4s, v19.4s\n" + "prfm pldl1keep, [x21, #0x70]\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v20.4s, v26.4s, v22.4s\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v16.4s, v24.4s, v21.4s\n" + "subs %x[width], %x[width], #0x10\n" + "zip1 v17.4s, v18.4s, v16.4s\n" + "cmp %x[width], #0x10\n" + "zip2 v16.4s, v18.4s, v16.4s\n" + "str q17, [%x[out_ptr], #0x10]\n" + "zip2 v19.4s, v25.4s, v19.4s\n" + "str q23, [%x[out_ptr], #0x20]\n" + "zip2 v18.4s, v24.4s, v21.4s\n" + "str q16, [%x[out_ptr], #0x30]\n" + "zip1 v16.4s, v19.4s, v18.4s\n" + "str q20, [%x[out_ptr], #0x40]\n" + "zip2 v17.4s, v26.4s, v22.4s\n" + "str q16, [%x[out_ptr], #0x50]\n" + "zip2 v16.4s, v19.4s, v18.4s\n" + "str q17, [%x[out_ptr], #0x60]\n" + "str q16, [%x[out_ptr], #0x70]\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 12f\n" + "tbz %x[width], #3, 7f\n" + "ldr d28, [x27], #0x8\n" + "ldr d27, [x26], #0x8\n" + "ldr d26, [x25], #0x8\n" + "ldr d22, [x24], #0x8\n" + "ldr d25, [x23], #0x8\n" + "ldr d24, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d21, [x20], #0x8\n" + "tbz %x[width], #2, 5f\n" + "ld1 { v28.s }[2], [x27], #0x4\n" + "ld1 { v27.s }[2], [x26], #0x4\n" + "ld1 { v26.s }[2], [x25], #0x4\n" + "ld1 { v22.s }[2], [x24], #0x4\n" + "ld1 { v25.s }[2], [x23], #0x4\n" + "ld1 { v24.s }[2], [x22], #0x4\n" + "ld1 { v19.s }[2], [x21], #0x4\n" + "ld1 { v21.s }[2], [x20], #0x4\n" + "tbz %x[width], #1, 4f\n" + "ld1 { v28.h }[6], [x27], #0x2\n" + "ld1 { v27.h }[6], [x26], #0x2\n" + "ld1 { v26.h }[6], [x25], #0x2\n" + "ld1 { v22.h }[6], [x24], #0x2\n" + "ld1 { v25.h }[6], [x23], #0x2\n" + "ld1 { v24.h }[6], [x22], #0x2\n" + "ld1 { v19.h }[6], [x21], #0x2\n" + "ld1 { v21.h }[6], [x20], #0x2\n" + "mov x19, #0x4\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v28.b }[14], [x27]\n" + "ld1 { v27.b }[14], [x26]\n" + "ld1 { v26.b }[14], [x25]\n" + "ld1 { v22.b }[14], [x24]\n" + "ld1 { v25.b }[14], [x23]\n" + "ld1 { v24.b }[14], [x22]\n" + "ld1 { v19.b }[14], [x21]\n" + "ld1 { v21.b }[14], [x20]\n" + "b 11f\n" + "4:" // odd_loads_1_12 + "mov x19, #0x3\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v28.b }[12], [x27]\n" + "ld1 { v27.b }[12], [x26]\n" + "ld1 { v26.b }[12], [x25]\n" + "ld1 { v22.b }[12], [x24]\n" + "ld1 { v25.b }[12], [x23]\n" + "ld1 { v24.b }[12], [x22]\n" + "ld1 { v19.b }[12], [x21]\n" + "ld1 { v21.b }[12], [x20]\n" + "mov x19, #0x4\n" + "b 11f\n" + "5:" // odd_loads_2_8 + "tbz %x[width], #1, 6f\n" + "ld1 { v28.h }[4], [x27], #0x2\n" + "ld1 { v27.h }[4], [x26], #0x2\n" + "ld1 { v26.h }[4], [x25], #0x2\n" + "ld1 { v22.h }[4], [x24], #0x2\n" + "ld1 { v25.h }[4], [x23], #0x2\n" + "ld1 { v24.h }[4], [x22], #0x2\n" + "ld1 { v19.h }[4], [x21], #0x2\n" + "ld1 { v21.h }[4], [x20], #0x2\n" + "mov x19, #0x3\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v28.b }[10], [x27]\n" + "ld1 { v27.b }[10], [x26]\n" + "ld1 { v26.b }[10], [x25]\n" + "ld1 { v22.b }[10], [x24]\n" + "ld1 { v25.b }[10], [x23]\n" + "ld1 { v24.b }[10], [x22]\n" + "ld1 { v19.b }[10], [x21]\n" + "ld1 { v21.b }[10], [x20]\n" + "b 11f\n" + "6:" // odd_loads_1_8 + "mov x19, #0x2\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v28.b }[8], [x27]\n" + "ld1 { v27.b }[8], [x26]\n" + "ld1 { v26.b }[8], [x25]\n" + "ld1 { v22.b }[8], [x24]\n" + "ld1 { v25.b }[8], [x23]\n" + "ld1 { v24.b }[8], [x22]\n" + "ld1 { v19.b }[8], [x21]\n" + "ld1 { v21.b }[8], [x20]\n" + "mov x19, #0x3\n" + "b 11f\n" + "7:" // odd_loads_4_0 + "tbz %x[width], #2, 9f\n" + "ldr s28, [x27], #0x4\n" + "ldr s27, [x26], #0x4\n" + "ldr s26, [x25], #0x4\n" + "ldr s22, [x24], #0x4\n" + "ldr s25, [x23], #0x4\n" + "ldr s24, [x22], #0x4\n" + "ldr s19, [x21], #0x4\n" + "ldr s21, [x20], #0x4\n" + "tbz %x[width], #1, 8f\n" + "ld1 { v28.h }[2], [x27], #0x2\n" + "ld1 { v27.h }[2], [x26], #0x2\n" + "ld1 { v26.h }[2], [x25], #0x2\n" + "ld1 { v22.h }[2], [x24], #0x2\n" + "ld1 { v25.h }[2], [x23], #0x2\n" + "ld1 { v24.h }[2], [x22], #0x2\n" + "ld1 { v19.h }[2], [x21], #0x2\n" + "ld1 { v21.h }[2], [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v28.b }[6], [x27]\n" + "ld1 { v27.b }[6], [x26]\n" + "ld1 { v26.b }[6], [x25]\n" + "ld1 { v22.b }[6], [x24]\n" + "ld1 { v25.b }[6], [x23]\n" + "ld1 { v24.b }[6], [x22]\n" + "ld1 { v19.b }[6], [x21]\n" + "ld1 { v21.b }[6], [x20]\n" + "b 11f\n" + "8:" // odd_loads_1_4 + "mov x19, #0x1\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v28.b }[4], [x27]\n" + "ld1 { v27.b }[4], [x26]\n" + "ld1 { v26.b }[4], [x25]\n" + "ld1 { v22.b }[4], [x24]\n" + "ld1 { v25.b }[4], [x23]\n" + "ld1 { v24.b }[4], [x22]\n" + "ld1 { v19.b }[4], [x21]\n" + "ld1 { v21.b }[4], [x20]\n" + "mov x19, #0x2\n" + "b 11f\n" + "9:" // odd_loads_2_0 + "tbz %x[width], #1, 10f\n" + "ldr h28, [x27], #0x2\n" + "ldr h27, [x26], #0x2\n" + "ldr h26, [x25], #0x2\n" + "ldr h22, [x24], #0x2\n" + "ldr h25, [x23], #0x2\n" + "ldr h24, [x22], #0x2\n" + "ldr h19, [x21], #0x2\n" + "ldr h21, [x20], #0x2\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v28.b }[2], [x27]\n" + "ld1 { v27.b }[2], [x26]\n" + "ld1 { v26.b }[2], [x25]\n" + "ld1 { v22.b }[2], [x24]\n" + "ld1 { v25.b }[2], [x23]\n" + "ld1 { v24.b }[2], [x22]\n" + "ld1 { v19.b }[2], [x21]\n" + "ld1 { v21.b }[2], [x20]\n" + "b 11f\n" + "10:" // odd_loads_1_0 + "ldr b28, [x27, #0x0]\n" + "ldr b27, [x26, #0x0]\n" + "ldr b26, [x25, #0x0]\n" + "ldr b22, [x24, #0x0]\n" + "ldr b25, [x23, #0x0]\n" + "ldr b24, [x22, #0x0]\n" + "ldr b19, [x21, #0x0]\n" + "ldr b21, [x20, #0x0]\n" + "mov x19, #0x1\n" + "11:" // Odd load end + "zip1 v23.4s, v28.4s, v26.4s\n" + "subs x19, x19, #0x1\n" + "zip1 v20.4s, v27.4s, v22.4s\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v18.4s, v25.4s, v19.4s\n" + "zip1 v16.4s, v24.4s, v21.4s\n" + "zip1 v17.4s, v18.4s, v16.4s\n" + "str q17, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 12f\n" + "zip2 v23.4s, v23.4s, v20.4s\n" + "zip2 v16.4s, v18.4s, v16.4s\n" + "str q23, [%x[out_ptr], #0x0]\n" + "str q16, [%x[out_ptr], #0x10]\n" + "subs x19, x19, #0x1\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 12f\n" + "zip2 v26.4s, v28.4s, v26.4s\n" + "zip2 v22.4s, v27.4s, v22.4s\n" + "subs x19, x19, #0x1\n" + "zip1 v20.4s, v26.4s, v22.4s\n" + "str q20, [%x[out_ptr], #0x0]\n" + "zip2 v19.4s, v25.4s, v19.4s\n" + "zip2 v18.4s, v24.4s, v21.4s\n" + "zip1 v16.4s, v19.4s, v18.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 12f\n" + "zip2 v17.4s, v26.4s, v22.4s\n" + "zip2 v16.4s, v19.4s, v18.4s\n" + "str q17, [%x[out_ptr], #0x0]\n" + "str q16, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "12:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + +template<> +void interleave_block<8, 4, VLType::None, false>( + uint8_t * &out_ptr, const uint8_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + int8_t * &out_cast = reinterpret_cast<int8_t * &>(out_ptr); + const int8_t * const * in_cast = reinterpret_cast<const int8_t * const *>(in); + + interleave_block<8, 4, VLType::None, false>(out_cast, in_cast, width, height, row_offset, false); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8_summing.hpp new file mode 100644 index 0000000000..2831cb79a6 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8_summing.hpp @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 4, VLType::None, true>( + int8_t * &out_ptr, const int8_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v1.8h, #0x0\n" + "ldr x27, [%x[in], #0x0]\n" + "mov x19, #0x0\n" + "movi v0.8h, #0x0\n" + "ldr x26, [%x[in], #0x8]\n" + "cmp %x[height], #0x8\n" + "movi v31.4s, #0x0\n" + "ldr x25, [%x[in], #0x10]\n" + "add x27, x27, %x[row_offset]\n" + "movi v30.4s, #0x0\n" + "ldr x24, [%x[in], #0x18]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x26, x26, %x[row_offset]\n" + "ldr x22, [%x[in], #0x28]\n" + "add x25, x25, %x[row_offset]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x20\n" + "ld1 { v31.4s }, [%x[out_ptr]]\n" + "ldr q30, [%x[out_ptr], #0x10]\n" + "2:" // first_pass + "cmp %x[width], #0x10\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x19, #0x1e\n" + "ble 4f\n" + "sadalp v31.4s, v1.8h\n" + "movi v1.8h, #0x0\n" + "sadalp v30.4s, v0.8h\n" + "movi v0.8h, #0x0\n" + "mov x19, #0x0\n" + "4:" // no_accumulate_16 + "ldr q29, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q28, [x26], #0x10\n" + "ldr q27, [x25], #0x10\n" + "zip1 v23.4s, v29.4s, v27.4s\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q21, [x24], #0x10\n" + "zip2 v27.4s, v29.4s, v27.4s\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q26, [x23], #0x10\n" + "zip1 v20.4s, v28.4s, v21.4s\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q25, [x22], #0x10\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v24.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x22, #0x70]\n" + "zip2 v23.4s, v28.4s, v21.4s\n" + "ldr q22, [x20], #0x10\n" + "zip1 v18.4s, v26.4s, v19.4s\n" + "prfm pldl1keep, [x21, #0x70]\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v21.4s, v27.4s, v23.4s\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v17.4s, v25.4s, v22.4s\n" + "sadalp v1.8h, v16.16b\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "add x19, x19, #0x1\n" + "zip2 v20.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v19.4s, v26.4s, v19.4s\n" + "sadalp v0.8h, v16.16b\n" + "zip2 v16.4s, v25.4s, v22.4s\n" + "str q24, [%x[out_ptr], #0x20]\n" + "zip1 v18.4s, v19.4s, v16.4s\n" + "sadalp v1.8h, v24.16b\n" + "zip2 v17.4s, v27.4s, v23.4s\n" + "str q20, [%x[out_ptr], #0x30]\n" + "zip2 v16.4s, v19.4s, v16.4s\n" + "str q21, [%x[out_ptr], #0x40]\n" + "str q18, [%x[out_ptr], #0x50]\n" + "sadalp v0.8h, v20.16b\n" + "str q17, [%x[out_ptr], #0x60]\n" + "sadalp v1.8h, v21.16b\n" + "str q16, [%x[out_ptr], #0x70]\n" + "subs %x[width], %x[width], #0x10\n" + "sadalp v0.8h, v18.16b\n" + "cmp %x[width], #0x10\n" + "sadalp v1.8h, v17.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "sadalp v0.8h, v16.16b\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 14f\n" + "tbz %x[width], #3, 9f\n" + "ldr d29, [x27], #0x8\n" + "ldr d28, [x26], #0x8\n" + "ldr d27, [x25], #0x8\n" + "ldr d21, [x24], #0x8\n" + "ldr d26, [x23], #0x8\n" + "ldr d25, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d22, [x20], #0x8\n" + "tbz %x[width], #2, 7f\n" + "ld1 { v29.s }[2], [x27], #0x4\n" + "ld1 { v28.s }[2], [x26], #0x4\n" + "ld1 { v27.s }[2], [x25], #0x4\n" + "ld1 { v21.s }[2], [x24], #0x4\n" + "ld1 { v26.s }[2], [x23], #0x4\n" + "ld1 { v25.s }[2], [x22], #0x4\n" + "ld1 { v19.s }[2], [x21], #0x4\n" + "ld1 { v22.s }[2], [x20], #0x4\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v29.h }[6], [x27], #0x2\n" + "ld1 { v28.h }[6], [x26], #0x2\n" + "ld1 { v27.h }[6], [x25], #0x2\n" + "ld1 { v21.h }[6], [x24], #0x2\n" + "ld1 { v26.h }[6], [x23], #0x2\n" + "ld1 { v25.h }[6], [x22], #0x2\n" + "ld1 { v19.h }[6], [x21], #0x2\n" + "ld1 { v22.h }[6], [x20], #0x2\n" + "mov x19, #0x4\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[14], [x27]\n" + "ld1 { v28.b }[14], [x26]\n" + "ld1 { v27.b }[14], [x25]\n" + "ld1 { v21.b }[14], [x24]\n" + "ld1 { v26.b }[14], [x23]\n" + "ld1 { v25.b }[14], [x22]\n" + "ld1 { v19.b }[14], [x21]\n" + "ld1 { v22.b }[14], [x20]\n" + "b 13f\n" + "6:" // odd_loads_1_12 + "mov x19, #0x3\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[12], [x27]\n" + "ld1 { v28.b }[12], [x26]\n" + "ld1 { v27.b }[12], [x25]\n" + "ld1 { v21.b }[12], [x24]\n" + "ld1 { v26.b }[12], [x23]\n" + "ld1 { v25.b }[12], [x22]\n" + "ld1 { v19.b }[12], [x21]\n" + "ld1 { v22.b }[12], [x20]\n" + "mov x19, #0x4\n" + "b 13f\n" + "7:" // odd_loads_2_8 + "tbz %x[width], #1, 8f\n" + "ld1 { v29.h }[4], [x27], #0x2\n" + "ld1 { v28.h }[4], [x26], #0x2\n" + "ld1 { v27.h }[4], [x25], #0x2\n" + "ld1 { v21.h }[4], [x24], #0x2\n" + "ld1 { v26.h }[4], [x23], #0x2\n" + "ld1 { v25.h }[4], [x22], #0x2\n" + "ld1 { v19.h }[4], [x21], #0x2\n" + "ld1 { v22.h }[4], [x20], #0x2\n" + "mov x19, #0x3\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[10], [x27]\n" + "ld1 { v28.b }[10], [x26]\n" + "ld1 { v27.b }[10], [x25]\n" + "ld1 { v21.b }[10], [x24]\n" + "ld1 { v26.b }[10], [x23]\n" + "ld1 { v25.b }[10], [x22]\n" + "ld1 { v19.b }[10], [x21]\n" + "ld1 { v22.b }[10], [x20]\n" + "b 13f\n" + "8:" // odd_loads_1_8 + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[8], [x27]\n" + "ld1 { v28.b }[8], [x26]\n" + "ld1 { v27.b }[8], [x25]\n" + "ld1 { v21.b }[8], [x24]\n" + "ld1 { v26.b }[8], [x23]\n" + "ld1 { v25.b }[8], [x22]\n" + "ld1 { v19.b }[8], [x21]\n" + "ld1 { v22.b }[8], [x20]\n" + "mov x19, #0x3\n" + "b 13f\n" + "9:" // odd_loads_4_0 + "tbz %x[width], #2, 11f\n" + "ldr s29, [x27], #0x4\n" + "ldr s28, [x26], #0x4\n" + "ldr s27, [x25], #0x4\n" + "ldr s21, [x24], #0x4\n" + "ldr s26, [x23], #0x4\n" + "ldr s25, [x22], #0x4\n" + "ldr s19, [x21], #0x4\n" + "ldr s22, [x20], #0x4\n" + "tbz %x[width], #1, 10f\n" + "ld1 { v29.h }[2], [x27], #0x2\n" + "ld1 { v28.h }[2], [x26], #0x2\n" + "ld1 { v27.h }[2], [x25], #0x2\n" + "ld1 { v21.h }[2], [x24], #0x2\n" + "ld1 { v26.h }[2], [x23], #0x2\n" + "ld1 { v25.h }[2], [x22], #0x2\n" + "ld1 { v19.h }[2], [x21], #0x2\n" + "ld1 { v22.h }[2], [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[6], [x27]\n" + "ld1 { v28.b }[6], [x26]\n" + "ld1 { v27.b }[6], [x25]\n" + "ld1 { v21.b }[6], [x24]\n" + "ld1 { v26.b }[6], [x23]\n" + "ld1 { v25.b }[6], [x22]\n" + "ld1 { v19.b }[6], [x21]\n" + "ld1 { v22.b }[6], [x20]\n" + "b 13f\n" + "10:" // odd_loads_1_4 + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[4], [x27]\n" + "ld1 { v28.b }[4], [x26]\n" + "ld1 { v27.b }[4], [x25]\n" + "ld1 { v21.b }[4], [x24]\n" + "ld1 { v26.b }[4], [x23]\n" + "ld1 { v25.b }[4], [x22]\n" + "ld1 { v19.b }[4], [x21]\n" + "ld1 { v22.b }[4], [x20]\n" + "mov x19, #0x2\n" + "b 13f\n" + "11:" // odd_loads_2_0 + "tbz %x[width], #1, 12f\n" + "ldr h29, [x27], #0x2\n" + "ldr h28, [x26], #0x2\n" + "ldr h27, [x25], #0x2\n" + "ldr h21, [x24], #0x2\n" + "ldr h26, [x23], #0x2\n" + "ldr h25, [x22], #0x2\n" + "ldr h19, [x21], #0x2\n" + "ldr h22, [x20], #0x2\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[2], [x27]\n" + "ld1 { v28.b }[2], [x26]\n" + "ld1 { v27.b }[2], [x25]\n" + "ld1 { v21.b }[2], [x24]\n" + "ld1 { v26.b }[2], [x23]\n" + "ld1 { v25.b }[2], [x22]\n" + "ld1 { v19.b }[2], [x21]\n" + "ld1 { v22.b }[2], [x20]\n" + "b 13f\n" + "12:" // odd_loads_1_0 + "ldr b29, [x27, #0x0]\n" + "ldr b28, [x26, #0x0]\n" + "ldr b27, [x25, #0x0]\n" + "ldr b21, [x24, #0x0]\n" + "ldr b26, [x23, #0x0]\n" + "ldr b25, [x22, #0x0]\n" + "ldr b19, [x21, #0x0]\n" + "ldr b22, [x20, #0x0]\n" + "mov x19, #0x1\n" + "13:" // Odd load end + "zip1 v23.4s, v29.4s, v27.4s\n" + "subs x19, x19, #0x1\n" + "zip1 v20.4s, v28.4s, v21.4s\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v18.4s, v26.4s, v19.4s\n" + "sadalp v1.8h, v16.16b\n" + "zip1 v17.4s, v25.4s, v22.4s\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "sadalp v0.8h, v16.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 14f\n" + "zip2 v24.4s, v23.4s, v20.4s\n" + "zip2 v20.4s, v18.4s, v17.4s\n" + "str q24, [%x[out_ptr], #0x0]\n" + "sadalp v1.8h, v24.16b\n" + "str q20, [%x[out_ptr], #0x10]\n" + "sadalp v0.8h, v20.16b\n" + "subs x19, x19, #0x1\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 14f\n" + "zip2 v27.4s, v29.4s, v27.4s\n" + "zip2 v23.4s, v28.4s, v21.4s\n" + "subs x19, x19, #0x1\n" + "zip1 v21.4s, v27.4s, v23.4s\n" + "str q21, [%x[out_ptr], #0x0]\n" + "zip2 v19.4s, v26.4s, v19.4s\n" + "sadalp v1.8h, v21.16b\n" + "zip2 v16.4s, v25.4s, v22.4s\n" + "zip1 v18.4s, v19.4s, v16.4s\n" + "str q18, [%x[out_ptr], #0x10]\n" + "sadalp v0.8h, v18.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 14f\n" + "zip2 v17.4s, v27.4s, v23.4s\n" + "zip2 v16.4s, v19.4s, v16.4s\n" + "str q17, [%x[out_ptr], #0x0]\n" + "sadalp v1.8h, v17.16b\n" + "str q16, [%x[out_ptr], #0x10]\n" + "sadalp v0.8h, v16.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "14:" // Odds skip + "sadalp v31.4s, v1.8h\n" + "sadalp v30.4s, v0.8h\n" + "str q31, [%x[out_ptr], #0x0]\n" + "str q30, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_u8_u8_summing.hpp new file mode 100644 index 0000000000..7c7857bcd0 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_u8_u8_summing.hpp @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 4, VLType::None, true>( + uint8_t * &out_ptr, const uint8_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v1.8h, #0x0\n" + "ldr x27, [%x[in], #0x0]\n" + "mov x19, #0x0\n" + "movi v0.8h, #0x0\n" + "ldr x26, [%x[in], #0x8]\n" + "cmp %x[height], #0x8\n" + "movi v31.4s, #0x0\n" + "ldr x25, [%x[in], #0x10]\n" + "add x27, x27, %x[row_offset]\n" + "movi v30.4s, #0x0\n" + "ldr x24, [%x[in], #0x18]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x26, x26, %x[row_offset]\n" + "ldr x22, [%x[in], #0x28]\n" + "add x25, x25, %x[row_offset]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x20\n" + "ld1 { v31.4s }, [%x[out_ptr]]\n" + "ldr q30, [%x[out_ptr], #0x10]\n" + "2:" // first_pass + "cmp %x[width], #0x10\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x19, #0x1e\n" + "ble 4f\n" + "uadalp v31.4s, v1.8h\n" + "movi v1.8h, #0x0\n" + "uadalp v30.4s, v0.8h\n" + "movi v0.8h, #0x0\n" + "mov x19, #0x0\n" + "4:" // no_accumulate_16 + "ldr q29, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q28, [x26], #0x10\n" + "ldr q27, [x25], #0x10\n" + "zip1 v23.4s, v29.4s, v27.4s\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q21, [x24], #0x10\n" + "zip2 v27.4s, v29.4s, v27.4s\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q26, [x23], #0x10\n" + "zip1 v20.4s, v28.4s, v21.4s\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q25, [x22], #0x10\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v24.4s, v23.4s, v20.4s\n" + "prfm pldl1keep, [x22, #0x70]\n" + "zip2 v23.4s, v28.4s, v21.4s\n" + "ldr q22, [x20], #0x10\n" + "zip1 v18.4s, v26.4s, v19.4s\n" + "prfm pldl1keep, [x21, #0x70]\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v21.4s, v27.4s, v23.4s\n" + "prfm pldl1keep, [x20, #0x70]\n" + "zip1 v17.4s, v25.4s, v22.4s\n" + "uadalp v1.8h, v16.16b\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "add x19, x19, #0x1\n" + "zip2 v20.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "zip2 v19.4s, v26.4s, v19.4s\n" + "uadalp v0.8h, v16.16b\n" + "zip2 v16.4s, v25.4s, v22.4s\n" + "str q24, [%x[out_ptr], #0x20]\n" + "zip1 v18.4s, v19.4s, v16.4s\n" + "uadalp v1.8h, v24.16b\n" + "zip2 v17.4s, v27.4s, v23.4s\n" + "str q20, [%x[out_ptr], #0x30]\n" + "zip2 v16.4s, v19.4s, v16.4s\n" + "str q21, [%x[out_ptr], #0x40]\n" + "str q18, [%x[out_ptr], #0x50]\n" + "uadalp v0.8h, v20.16b\n" + "str q17, [%x[out_ptr], #0x60]\n" + "uadalp v1.8h, v21.16b\n" + "str q16, [%x[out_ptr], #0x70]\n" + "subs %x[width], %x[width], #0x10\n" + "uadalp v0.8h, v18.16b\n" + "cmp %x[width], #0x10\n" + "uadalp v1.8h, v17.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "uadalp v0.8h, v16.16b\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 14f\n" + "tbz %x[width], #3, 9f\n" + "ldr d29, [x27], #0x8\n" + "ldr d28, [x26], #0x8\n" + "ldr d27, [x25], #0x8\n" + "ldr d21, [x24], #0x8\n" + "ldr d26, [x23], #0x8\n" + "ldr d25, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d22, [x20], #0x8\n" + "tbz %x[width], #2, 7f\n" + "ld1 { v29.s }[2], [x27], #0x4\n" + "ld1 { v28.s }[2], [x26], #0x4\n" + "ld1 { v27.s }[2], [x25], #0x4\n" + "ld1 { v21.s }[2], [x24], #0x4\n" + "ld1 { v26.s }[2], [x23], #0x4\n" + "ld1 { v25.s }[2], [x22], #0x4\n" + "ld1 { v19.s }[2], [x21], #0x4\n" + "ld1 { v22.s }[2], [x20], #0x4\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v29.h }[6], [x27], #0x2\n" + "ld1 { v28.h }[6], [x26], #0x2\n" + "ld1 { v27.h }[6], [x25], #0x2\n" + "ld1 { v21.h }[6], [x24], #0x2\n" + "ld1 { v26.h }[6], [x23], #0x2\n" + "ld1 { v25.h }[6], [x22], #0x2\n" + "ld1 { v19.h }[6], [x21], #0x2\n" + "ld1 { v22.h }[6], [x20], #0x2\n" + "mov x19, #0x4\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[14], [x27]\n" + "ld1 { v28.b }[14], [x26]\n" + "ld1 { v27.b }[14], [x25]\n" + "ld1 { v21.b }[14], [x24]\n" + "ld1 { v26.b }[14], [x23]\n" + "ld1 { v25.b }[14], [x22]\n" + "ld1 { v19.b }[14], [x21]\n" + "ld1 { v22.b }[14], [x20]\n" + "b 13f\n" + "6:" // odd_loads_1_12 + "mov x19, #0x3\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[12], [x27]\n" + "ld1 { v28.b }[12], [x26]\n" + "ld1 { v27.b }[12], [x25]\n" + "ld1 { v21.b }[12], [x24]\n" + "ld1 { v26.b }[12], [x23]\n" + "ld1 { v25.b }[12], [x22]\n" + "ld1 { v19.b }[12], [x21]\n" + "ld1 { v22.b }[12], [x20]\n" + "mov x19, #0x4\n" + "b 13f\n" + "7:" // odd_loads_2_8 + "tbz %x[width], #1, 8f\n" + "ld1 { v29.h }[4], [x27], #0x2\n" + "ld1 { v28.h }[4], [x26], #0x2\n" + "ld1 { v27.h }[4], [x25], #0x2\n" + "ld1 { v21.h }[4], [x24], #0x2\n" + "ld1 { v26.h }[4], [x23], #0x2\n" + "ld1 { v25.h }[4], [x22], #0x2\n" + "ld1 { v19.h }[4], [x21], #0x2\n" + "ld1 { v22.h }[4], [x20], #0x2\n" + "mov x19, #0x3\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[10], [x27]\n" + "ld1 { v28.b }[10], [x26]\n" + "ld1 { v27.b }[10], [x25]\n" + "ld1 { v21.b }[10], [x24]\n" + "ld1 { v26.b }[10], [x23]\n" + "ld1 { v25.b }[10], [x22]\n" + "ld1 { v19.b }[10], [x21]\n" + "ld1 { v22.b }[10], [x20]\n" + "b 13f\n" + "8:" // odd_loads_1_8 + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[8], [x27]\n" + "ld1 { v28.b }[8], [x26]\n" + "ld1 { v27.b }[8], [x25]\n" + "ld1 { v21.b }[8], [x24]\n" + "ld1 { v26.b }[8], [x23]\n" + "ld1 { v25.b }[8], [x22]\n" + "ld1 { v19.b }[8], [x21]\n" + "ld1 { v22.b }[8], [x20]\n" + "mov x19, #0x3\n" + "b 13f\n" + "9:" // odd_loads_4_0 + "tbz %x[width], #2, 11f\n" + "ldr s29, [x27], #0x4\n" + "ldr s28, [x26], #0x4\n" + "ldr s27, [x25], #0x4\n" + "ldr s21, [x24], #0x4\n" + "ldr s26, [x23], #0x4\n" + "ldr s25, [x22], #0x4\n" + "ldr s19, [x21], #0x4\n" + "ldr s22, [x20], #0x4\n" + "tbz %x[width], #1, 10f\n" + "ld1 { v29.h }[2], [x27], #0x2\n" + "ld1 { v28.h }[2], [x26], #0x2\n" + "ld1 { v27.h }[2], [x25], #0x2\n" + "ld1 { v21.h }[2], [x24], #0x2\n" + "ld1 { v26.h }[2], [x23], #0x2\n" + "ld1 { v25.h }[2], [x22], #0x2\n" + "ld1 { v19.h }[2], [x21], #0x2\n" + "ld1 { v22.h }[2], [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[6], [x27]\n" + "ld1 { v28.b }[6], [x26]\n" + "ld1 { v27.b }[6], [x25]\n" + "ld1 { v21.b }[6], [x24]\n" + "ld1 { v26.b }[6], [x23]\n" + "ld1 { v25.b }[6], [x22]\n" + "ld1 { v19.b }[6], [x21]\n" + "ld1 { v22.b }[6], [x20]\n" + "b 13f\n" + "10:" // odd_loads_1_4 + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[4], [x27]\n" + "ld1 { v28.b }[4], [x26]\n" + "ld1 { v27.b }[4], [x25]\n" + "ld1 { v21.b }[4], [x24]\n" + "ld1 { v26.b }[4], [x23]\n" + "ld1 { v25.b }[4], [x22]\n" + "ld1 { v19.b }[4], [x21]\n" + "ld1 { v22.b }[4], [x20]\n" + "mov x19, #0x2\n" + "b 13f\n" + "11:" // odd_loads_2_0 + "tbz %x[width], #1, 12f\n" + "ldr h29, [x27], #0x2\n" + "ldr h28, [x26], #0x2\n" + "ldr h27, [x25], #0x2\n" + "ldr h21, [x24], #0x2\n" + "ldr h26, [x23], #0x2\n" + "ldr h25, [x22], #0x2\n" + "ldr h19, [x21], #0x2\n" + "ldr h22, [x20], #0x2\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v29.b }[2], [x27]\n" + "ld1 { v28.b }[2], [x26]\n" + "ld1 { v27.b }[2], [x25]\n" + "ld1 { v21.b }[2], [x24]\n" + "ld1 { v26.b }[2], [x23]\n" + "ld1 { v25.b }[2], [x22]\n" + "ld1 { v19.b }[2], [x21]\n" + "ld1 { v22.b }[2], [x20]\n" + "b 13f\n" + "12:" // odd_loads_1_0 + "ldr b29, [x27, #0x0]\n" + "ldr b28, [x26, #0x0]\n" + "ldr b27, [x25, #0x0]\n" + "ldr b21, [x24, #0x0]\n" + "ldr b26, [x23, #0x0]\n" + "ldr b25, [x22, #0x0]\n" + "ldr b19, [x21, #0x0]\n" + "ldr b22, [x20, #0x0]\n" + "mov x19, #0x1\n" + "13:" // Odd load end + "zip1 v23.4s, v29.4s, v27.4s\n" + "subs x19, x19, #0x1\n" + "zip1 v20.4s, v28.4s, v21.4s\n" + "zip1 v16.4s, v23.4s, v20.4s\n" + "str q16, [%x[out_ptr], #0x0]\n" + "zip1 v18.4s, v26.4s, v19.4s\n" + "uadalp v1.8h, v16.16b\n" + "zip1 v17.4s, v25.4s, v22.4s\n" + "zip1 v16.4s, v18.4s, v17.4s\n" + "str q16, [%x[out_ptr], #0x10]\n" + "uadalp v0.8h, v16.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 14f\n" + "zip2 v24.4s, v23.4s, v20.4s\n" + "zip2 v20.4s, v18.4s, v17.4s\n" + "str q24, [%x[out_ptr], #0x0]\n" + "uadalp v1.8h, v24.16b\n" + "str q20, [%x[out_ptr], #0x10]\n" + "uadalp v0.8h, v20.16b\n" + "subs x19, x19, #0x1\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 14f\n" + "zip2 v27.4s, v29.4s, v27.4s\n" + "zip2 v23.4s, v28.4s, v21.4s\n" + "subs x19, x19, #0x1\n" + "zip1 v21.4s, v27.4s, v23.4s\n" + "str q21, [%x[out_ptr], #0x0]\n" + "zip2 v19.4s, v26.4s, v19.4s\n" + "uadalp v1.8h, v21.16b\n" + "zip2 v16.4s, v25.4s, v22.4s\n" + "zip1 v18.4s, v19.4s, v16.4s\n" + "str q18, [%x[out_ptr], #0x10]\n" + "uadalp v0.8h, v18.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "beq 14f\n" + "zip2 v17.4s, v27.4s, v23.4s\n" + "zip2 v16.4s, v19.4s, v16.4s\n" + "str q17, [%x[out_ptr], #0x0]\n" + "uadalp v1.8h, v17.16b\n" + "str q16, [%x[out_ptr], #0x10]\n" + "uadalp v0.8h, v16.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + "14:" // Odds skip + "uadalp v31.4s, v1.8h\n" + "uadalp v30.4s, v0.8h\n" + "str q31, [%x[out_ptr], #0x0]\n" + "str q30, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp new file mode 100644 index 0000000000..704a4c9210 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 8, VLType::None, false>( + int8_t * &out_ptr, const int8_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + __asm__ __volatile__( + "ldr x27, [%x[in], #0x0]\n" + "cmp %x[height], #0x8\n" + "ldr x26, [%x[in], #0x8]\n" + "add x27, x27, %x[row_offset]\n" + "ldr x25, [%x[in], #0x10]\n" + "ldr x24, [%x[in], #0x18]\n" + "add x26, x26, %x[row_offset]\n" + "ldr x23, [%x[in], #0x20]\n" + "add x25, x25, %x[row_offset]\n" + "ldr x22, [%x[in], #0x28]\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "prfm pldl1keep, [x27, #0x0]\n" + "cmp %x[width], #0x10\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "blt 3f\n" + "2:" // Main loop head + "ldr q27, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q24, [x26], #0x10\n" + "zip1 v26.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q25, [x25], #0x10\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q21, [x24], #0x10\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q22, [x23], #0x10\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q18, [x22], #0x10\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ldr q16, [x20], #0x10\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "prfm pldl1keep, [x20, #0x70]\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "str q23, [%x[out_ptr], #0x10]\n" + "str q20, [%x[out_ptr], #0x20]\n" + "str q17, [%x[out_ptr], #0x30]\n" + "str q24, [%x[out_ptr], #0x40]\n" + "str q21, [%x[out_ptr], #0x50]\n" + "str q18, [%x[out_ptr], #0x60]\n" + "str q16, [%x[out_ptr], #0x70]\n" + "subs %x[width], %x[width], #0x10\n" + "cmp %x[width], #0x10\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 2b\n" + "3:" // Main loop skip + "cbz %x[width], 12f\n" + "tbz %x[width], #3, 7f\n" + "ldr d27, [x27], #0x8\n" + "ldr d24, [x26], #0x8\n" + "ldr d25, [x25], #0x8\n" + "ldr d21, [x24], #0x8\n" + "ldr d22, [x23], #0x8\n" + "ldr d18, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d16, [x20], #0x8\n" + "tbz %x[width], #2, 5f\n" + "ld1 { v27.s }[2], [x27], #0x4\n" + "ld1 { v24.s }[2], [x26], #0x4\n" + "ld1 { v25.s }[2], [x25], #0x4\n" + "ld1 { v21.s }[2], [x24], #0x4\n" + "ld1 { v22.s }[2], [x23], #0x4\n" + "ld1 { v18.s }[2], [x22], #0x4\n" + "ld1 { v19.s }[2], [x21], #0x4\n" + "ld1 { v16.s }[2], [x20], #0x4\n" + "tbz %x[width], #1, 4f\n" + "ld1 { v27.h }[6], [x27], #0x2\n" + "ld1 { v24.h }[6], [x26], #0x2\n" + "ld1 { v25.h }[6], [x25], #0x2\n" + "ld1 { v21.h }[6], [x24], #0x2\n" + "ld1 { v22.h }[6], [x23], #0x2\n" + "ld1 { v18.h }[6], [x22], #0x2\n" + "ld1 { v19.h }[6], [x21], #0x2\n" + "ld1 { v16.h }[6], [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v27.b }[14], [x27]\n" + "ld1 { v24.b }[14], [x26]\n" + "ld1 { v25.b }[14], [x25]\n" + "ld1 { v21.b }[14], [x24]\n" + "ld1 { v22.b }[14], [x23]\n" + "ld1 { v18.b }[14], [x22]\n" + "ld1 { v19.b }[14], [x21]\n" + "ld1 { v16.b }[14], [x20]\n" + "b 11f\n" + "4:" // odd_loads_1_12 + "mov x19, #0x2\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v27.b }[12], [x27]\n" + "ld1 { v24.b }[12], [x26]\n" + "ld1 { v25.b }[12], [x25]\n" + "ld1 { v21.b }[12], [x24]\n" + "ld1 { v22.b }[12], [x23]\n" + "ld1 { v18.b }[12], [x22]\n" + "ld1 { v19.b }[12], [x21]\n" + "ld1 { v16.b }[12], [x20]\n" + "b 11f\n" + "5:" // odd_loads_2_8 + "tbz %x[width], #1, 6f\n" + "ld1 { v27.h }[4], [x27], #0x2\n" + "ld1 { v24.h }[4], [x26], #0x2\n" + "ld1 { v25.h }[4], [x25], #0x2\n" + "ld1 { v21.h }[4], [x24], #0x2\n" + "ld1 { v22.h }[4], [x23], #0x2\n" + "ld1 { v18.h }[4], [x22], #0x2\n" + "ld1 { v19.h }[4], [x21], #0x2\n" + "ld1 { v16.h }[4], [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v27.b }[10], [x27]\n" + "ld1 { v24.b }[10], [x26]\n" + "ld1 { v25.b }[10], [x25]\n" + "ld1 { v21.b }[10], [x24]\n" + "ld1 { v22.b }[10], [x23]\n" + "ld1 { v18.b }[10], [x22]\n" + "ld1 { v19.b }[10], [x21]\n" + "ld1 { v16.b }[10], [x20]\n" + "b 11f\n" + "6:" // odd_loads_1_8 + "mov x19, #0x1\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v27.b }[8], [x27]\n" + "ld1 { v24.b }[8], [x26]\n" + "ld1 { v25.b }[8], [x25]\n" + "ld1 { v21.b }[8], [x24]\n" + "ld1 { v22.b }[8], [x23]\n" + "ld1 { v18.b }[8], [x22]\n" + "ld1 { v19.b }[8], [x21]\n" + "ld1 { v16.b }[8], [x20]\n" + "mov x19, #0x2\n" + "b 11f\n" + "7:" // odd_loads_4_0 + "tbz %x[width], #2, 9f\n" + "ldr s27, [x27], #0x4\n" + "ldr s24, [x26], #0x4\n" + "ldr s25, [x25], #0x4\n" + "ldr s21, [x24], #0x4\n" + "ldr s22, [x23], #0x4\n" + "ldr s18, [x22], #0x4\n" + "ldr s19, [x21], #0x4\n" + "ldr s16, [x20], #0x4\n" + "tbz %x[width], #1, 8f\n" + "ld1 { v27.h }[2], [x27], #0x2\n" + "ld1 { v24.h }[2], [x26], #0x2\n" + "ld1 { v25.h }[2], [x25], #0x2\n" + "ld1 { v21.h }[2], [x24], #0x2\n" + "ld1 { v22.h }[2], [x23], #0x2\n" + "ld1 { v18.h }[2], [x22], #0x2\n" + "ld1 { v19.h }[2], [x21], #0x2\n" + "ld1 { v16.h }[2], [x20], #0x2\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v27.b }[6], [x27]\n" + "ld1 { v24.b }[6], [x26]\n" + "ld1 { v25.b }[6], [x25]\n" + "ld1 { v21.b }[6], [x24]\n" + "ld1 { v22.b }[6], [x23]\n" + "ld1 { v18.b }[6], [x22]\n" + "ld1 { v19.b }[6], [x21]\n" + "ld1 { v16.b }[6], [x20]\n" + "b 11f\n" + "8:" // odd_loads_1_4 + "mov x19, #0x1\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v27.b }[4], [x27]\n" + "ld1 { v24.b }[4], [x26]\n" + "ld1 { v25.b }[4], [x25]\n" + "ld1 { v21.b }[4], [x24]\n" + "ld1 { v22.b }[4], [x23]\n" + "ld1 { v18.b }[4], [x22]\n" + "ld1 { v19.b }[4], [x21]\n" + "ld1 { v16.b }[4], [x20]\n" + "b 11f\n" + "9:" // odd_loads_2_0 + "tbz %x[width], #1, 10f\n" + "ldr h27, [x27], #0x2\n" + "ldr h24, [x26], #0x2\n" + "ldr h25, [x25], #0x2\n" + "ldr h21, [x24], #0x2\n" + "ldr h22, [x23], #0x2\n" + "ldr h18, [x22], #0x2\n" + "ldr h19, [x21], #0x2\n" + "ldr h16, [x20], #0x2\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 11f\n" + "ld1 { v27.b }[2], [x27]\n" + "ld1 { v24.b }[2], [x26]\n" + "ld1 { v25.b }[2], [x25]\n" + "ld1 { v21.b }[2], [x24]\n" + "ld1 { v22.b }[2], [x23]\n" + "ld1 { v18.b }[2], [x22]\n" + "ld1 { v19.b }[2], [x21]\n" + "ld1 { v16.b }[2], [x20]\n" + "b 11f\n" + "10:" // odd_loads_1_0 + "ldr b27, [x27, #0x0]\n" + "ldr b24, [x26, #0x0]\n" + "ldr b25, [x25, #0x0]\n" + "ldr b21, [x24, #0x0]\n" + "ldr b22, [x23, #0x0]\n" + "ldr b18, [x22, #0x0]\n" + "ldr b19, [x21, #0x0]\n" + "ldr b16, [x20, #0x0]\n" + "mov x19, #0x1\n" + "11:" // Odd load end + "zip1 v26.2d, v27.2d, v24.2d\n" + "subs x19, x19, #0x1\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "str q23, [%x[out_ptr], #0x10]\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "str q20, [%x[out_ptr], #0x20]\n" + "str q17, [%x[out_ptr], #0x30]\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "beq 12f\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "str q24, [%x[out_ptr], #0x0]\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "str q21, [%x[out_ptr], #0x10]\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "str q18, [%x[out_ptr], #0x20]\n" + "str q16, [%x[out_ptr], #0x30]\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "12:" // Odds skip + + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + +template<> +void interleave_block<8, 8, VLType::None, false>( + uint8_t * &out_ptr, const uint8_t * const * in, size_t width, size_t height, + size_t row_offset, bool +) +{ + int8_t * &out_cast = reinterpret_cast<int8_t * &>(out_ptr); + const int8_t * const * in_cast = reinterpret_cast<const int8_t * const *>(in); + + interleave_block<8, 8, VLType::None, false>(out_cast, in_cast, width, height, row_offset, false); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8_summing.hpp new file mode 100644 index 0000000000..2317ece790 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8_summing.hpp @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 8, VLType::None, true>( + int8_t * &out_ptr, const int8_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v5.8h, #0x0\n" + "ldr x27, [%x[in], #0x0]\n" + "mov x19, #0x0\n" + "movi v4.8h, #0x0\n" + "ldr x26, [%x[in], #0x8]\n" + "cmp %x[height], #0x8\n" + "movi v3.8h, #0x0\n" + "ldr x25, [%x[in], #0x10]\n" + "add x27, x27, %x[row_offset]\n" + "movi v2.8h, #0x0\n" + "ldr x24, [%x[in], #0x18]\n" + "movi v1.4s, #0x0\n" + "ldr x23, [%x[in], #0x20]\n" + "add x26, x26, %x[row_offset]\n" + "movi v0.4s, #0x0\n" + "ldr x22, [%x[in], #0x28]\n" + "add x25, x25, %x[row_offset]\n" + "movi v31.4s, #0x0\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "movi v30.4s, #0x0\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "movi v29.4s, #0x0\n" + "prfm pldl1keep, [x27, #0x0]\n" + "movi v28.4s, #0x0\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x20\n" + "ld1 { v29.4s }, [%x[out_ptr]]\n" + "ldr q28, [%x[out_ptr], #0x10]\n" + "2:" // first_pass + "cmp %x[width], #0x10\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x19, #0x3e\n" + "ble 4f\n" + "sadalp v1.4s, v5.8h\n" + "movi v5.8h, #0x0\n" + "sadalp v0.4s, v4.8h\n" + "movi v4.8h, #0x0\n" + "sadalp v31.4s, v3.8h\n" + "movi v3.8h, #0x0\n" + "sadalp v30.4s, v2.8h\n" + "movi v2.8h, #0x0\n" + "mov x19, #0x0\n" + "4:" // no_accumulate_16 + "ldr q27, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q24, [x26], #0x10\n" + "zip1 v26.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q25, [x25], #0x10\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q21, [x24], #0x10\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q22, [x23], #0x10\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q18, [x22], #0x10\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ldr q16, [x20], #0x10\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "prfm pldl1keep, [x20, #0x70]\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "sadalp v5.8h, v26.16b\n" + "str q23, [%x[out_ptr], #0x10]\n" + "sadalp v4.8h, v23.16b\n" + "str q20, [%x[out_ptr], #0x20]\n" + "sadalp v3.8h, v20.16b\n" + "str q17, [%x[out_ptr], #0x30]\n" + "sadalp v2.8h, v17.16b\n" + "str q24, [%x[out_ptr], #0x40]\n" + "sadalp v5.8h, v24.16b\n" + "str q21, [%x[out_ptr], #0x50]\n" + "sadalp v4.8h, v21.16b\n" + "str q18, [%x[out_ptr], #0x60]\n" + "sadalp v3.8h, v18.16b\n" + "str q16, [%x[out_ptr], #0x70]\n" + "sadalp v2.8h, v16.16b\n" + "add x19, x19, #0x1\n" + "subs %x[width], %x[width], #0x10\n" + "cmp %x[width], #0x10\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 14f\n" + "tbz %x[width], #3, 9f\n" + "ldr d27, [x27], #0x8\n" + "ldr d24, [x26], #0x8\n" + "ldr d25, [x25], #0x8\n" + "ldr d21, [x24], #0x8\n" + "ldr d22, [x23], #0x8\n" + "ldr d18, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d16, [x20], #0x8\n" + "tbz %x[width], #2, 7f\n" + "ld1 { v27.s }[2], [x27], #0x4\n" + "ld1 { v24.s }[2], [x26], #0x4\n" + "ld1 { v25.s }[2], [x25], #0x4\n" + "ld1 { v21.s }[2], [x24], #0x4\n" + "ld1 { v22.s }[2], [x23], #0x4\n" + "ld1 { v18.s }[2], [x22], #0x4\n" + "ld1 { v19.s }[2], [x21], #0x4\n" + "ld1 { v16.s }[2], [x20], #0x4\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v27.h }[6], [x27], #0x2\n" + "ld1 { v24.h }[6], [x26], #0x2\n" + "ld1 { v25.h }[6], [x25], #0x2\n" + "ld1 { v21.h }[6], [x24], #0x2\n" + "ld1 { v22.h }[6], [x23], #0x2\n" + "ld1 { v18.h }[6], [x22], #0x2\n" + "ld1 { v19.h }[6], [x21], #0x2\n" + "ld1 { v16.h }[6], [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[14], [x27]\n" + "ld1 { v24.b }[14], [x26]\n" + "ld1 { v25.b }[14], [x25]\n" + "ld1 { v21.b }[14], [x24]\n" + "ld1 { v22.b }[14], [x23]\n" + "ld1 { v18.b }[14], [x22]\n" + "ld1 { v19.b }[14], [x21]\n" + "ld1 { v16.b }[14], [x20]\n" + "b 13f\n" + "6:" // odd_loads_1_12 + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[12], [x27]\n" + "ld1 { v24.b }[12], [x26]\n" + "ld1 { v25.b }[12], [x25]\n" + "ld1 { v21.b }[12], [x24]\n" + "ld1 { v22.b }[12], [x23]\n" + "ld1 { v18.b }[12], [x22]\n" + "ld1 { v19.b }[12], [x21]\n" + "ld1 { v16.b }[12], [x20]\n" + "b 13f\n" + "7:" // odd_loads_2_8 + "tbz %x[width], #1, 8f\n" + "ld1 { v27.h }[4], [x27], #0x2\n" + "ld1 { v24.h }[4], [x26], #0x2\n" + "ld1 { v25.h }[4], [x25], #0x2\n" + "ld1 { v21.h }[4], [x24], #0x2\n" + "ld1 { v22.h }[4], [x23], #0x2\n" + "ld1 { v18.h }[4], [x22], #0x2\n" + "ld1 { v19.h }[4], [x21], #0x2\n" + "ld1 { v16.h }[4], [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[10], [x27]\n" + "ld1 { v24.b }[10], [x26]\n" + "ld1 { v25.b }[10], [x25]\n" + "ld1 { v21.b }[10], [x24]\n" + "ld1 { v22.b }[10], [x23]\n" + "ld1 { v18.b }[10], [x22]\n" + "ld1 { v19.b }[10], [x21]\n" + "ld1 { v16.b }[10], [x20]\n" + "b 13f\n" + "8:" // odd_loads_1_8 + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[8], [x27]\n" + "ld1 { v24.b }[8], [x26]\n" + "ld1 { v25.b }[8], [x25]\n" + "ld1 { v21.b }[8], [x24]\n" + "ld1 { v22.b }[8], [x23]\n" + "ld1 { v18.b }[8], [x22]\n" + "ld1 { v19.b }[8], [x21]\n" + "ld1 { v16.b }[8], [x20]\n" + "mov x19, #0x2\n" + "b 13f\n" + "9:" // odd_loads_4_0 + "tbz %x[width], #2, 11f\n" + "ldr s27, [x27], #0x4\n" + "ldr s24, [x26], #0x4\n" + "ldr s25, [x25], #0x4\n" + "ldr s21, [x24], #0x4\n" + "ldr s22, [x23], #0x4\n" + "ldr s18, [x22], #0x4\n" + "ldr s19, [x21], #0x4\n" + "ldr s16, [x20], #0x4\n" + "tbz %x[width], #1, 10f\n" + "ld1 { v27.h }[2], [x27], #0x2\n" + "ld1 { v24.h }[2], [x26], #0x2\n" + "ld1 { v25.h }[2], [x25], #0x2\n" + "ld1 { v21.h }[2], [x24], #0x2\n" + "ld1 { v22.h }[2], [x23], #0x2\n" + "ld1 { v18.h }[2], [x22], #0x2\n" + "ld1 { v19.h }[2], [x21], #0x2\n" + "ld1 { v16.h }[2], [x20], #0x2\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[6], [x27]\n" + "ld1 { v24.b }[6], [x26]\n" + "ld1 { v25.b }[6], [x25]\n" + "ld1 { v21.b }[6], [x24]\n" + "ld1 { v22.b }[6], [x23]\n" + "ld1 { v18.b }[6], [x22]\n" + "ld1 { v19.b }[6], [x21]\n" + "ld1 { v16.b }[6], [x20]\n" + "b 13f\n" + "10:" // odd_loads_1_4 + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[4], [x27]\n" + "ld1 { v24.b }[4], [x26]\n" + "ld1 { v25.b }[4], [x25]\n" + "ld1 { v21.b }[4], [x24]\n" + "ld1 { v22.b }[4], [x23]\n" + "ld1 { v18.b }[4], [x22]\n" + "ld1 { v19.b }[4], [x21]\n" + "ld1 { v16.b }[4], [x20]\n" + "b 13f\n" + "11:" // odd_loads_2_0 + "tbz %x[width], #1, 12f\n" + "ldr h27, [x27], #0x2\n" + "ldr h24, [x26], #0x2\n" + "ldr h25, [x25], #0x2\n" + "ldr h21, [x24], #0x2\n" + "ldr h22, [x23], #0x2\n" + "ldr h18, [x22], #0x2\n" + "ldr h19, [x21], #0x2\n" + "ldr h16, [x20], #0x2\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[2], [x27]\n" + "ld1 { v24.b }[2], [x26]\n" + "ld1 { v25.b }[2], [x25]\n" + "ld1 { v21.b }[2], [x24]\n" + "ld1 { v22.b }[2], [x23]\n" + "ld1 { v18.b }[2], [x22]\n" + "ld1 { v19.b }[2], [x21]\n" + "ld1 { v16.b }[2], [x20]\n" + "b 13f\n" + "12:" // odd_loads_1_0 + "ldr b27, [x27, #0x0]\n" + "ldr b24, [x26, #0x0]\n" + "ldr b25, [x25, #0x0]\n" + "ldr b21, [x24, #0x0]\n" + "ldr b22, [x23, #0x0]\n" + "ldr b18, [x22, #0x0]\n" + "ldr b19, [x21, #0x0]\n" + "ldr b16, [x20, #0x0]\n" + "mov x19, #0x1\n" + "13:" // Odd load end + "zip1 v26.2d, v27.2d, v24.2d\n" + "subs x19, x19, #0x1\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "sadalp v5.8h, v26.16b\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "str q23, [%x[out_ptr], #0x10]\n" + "sadalp v4.8h, v23.16b\n" + "str q20, [%x[out_ptr], #0x20]\n" + "sadalp v3.8h, v20.16b\n" + "str q17, [%x[out_ptr], #0x30]\n" + "sadalp v2.8h, v17.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "beq 14f\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "str q24, [%x[out_ptr], #0x0]\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "sadalp v5.8h, v24.16b\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "str q21, [%x[out_ptr], #0x10]\n" + "sadalp v4.8h, v21.16b\n" + "str q18, [%x[out_ptr], #0x20]\n" + "sadalp v3.8h, v18.16b\n" + "str q16, [%x[out_ptr], #0x30]\n" + "sadalp v2.8h, v16.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "14:" // Odds skip + "sadalp v1.4s, v5.8h\n" + "sadalp v0.4s, v4.8h\n" + "addp v1.4s, v1.4s, v0.4s\n" + "sadalp v31.4s, v3.8h\n" + "sadalp v30.4s, v2.8h\n" + "add v1.4s, v1.4s, v29.4s\n" + "str q1, [%x[out_ptr], #0x0]\n" + "addp v0.4s, v31.4s, v30.4s\n" + "add v0.4s, v0.4s, v28.4s\n" + "str q0, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_u8_u8_summing.hpp new file mode 100644 index 0000000000..07164d6b24 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_u8_u8_summing.hpp @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2019-2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifdef __aarch64__ + +template<> +void interleave_block<8, 8, VLType::None, true>( + uint8_t * &out_ptr, const uint8_t * const * in, size_t width, size_t height, + size_t row_offset, bool first +) +{ + __asm__ __volatile__( + "movi v5.8h, #0x0\n" + "ldr x27, [%x[in], #0x0]\n" + "mov x19, #0x0\n" + "movi v4.8h, #0x0\n" + "ldr x26, [%x[in], #0x8]\n" + "cmp %x[height], #0x8\n" + "movi v3.8h, #0x0\n" + "ldr x25, [%x[in], #0x10]\n" + "add x27, x27, %x[row_offset]\n" + "movi v2.8h, #0x0\n" + "ldr x24, [%x[in], #0x18]\n" + "movi v1.4s, #0x0\n" + "ldr x23, [%x[in], #0x20]\n" + "add x26, x26, %x[row_offset]\n" + "movi v0.4s, #0x0\n" + "ldr x22, [%x[in], #0x28]\n" + "add x25, x25, %x[row_offset]\n" + "movi v31.4s, #0x0\n" + "ldr x21, [%x[in], #0x30]\n" + "add x24, x24, %x[row_offset]\n" + "movi v30.4s, #0x0\n" + "ldr x20, [%x[in], #0x38]\n" + "add x23, x23, %x[row_offset]\n" + "add x22, x22, %x[row_offset]\n" + "add x21, x21, %x[row_offset]\n" + "add x20, x20, %x[row_offset]\n" + "beq 1f\n" + "mov x20, x27\n" + "cmp %x[height], #0x2\n" + "csel x26, x26, x27, GE\n" + "csel x25, x25, x27, GT\n" + "cmp %x[height], #0x4\n" + "csel x24, x24, x27, GE\n" + "csel x23, x23, x27, GT\n" + "cmp %x[height], #0x6\n" + "csel x22, x22, x27, GE\n" + "csel x21, x21, x27, GT\n" + "1:" // no_pointer_adj + "movi v29.4s, #0x0\n" + "prfm pldl1keep, [x27, #0x0]\n" + "movi v28.4s, #0x0\n" + "prfm pldl1keep, [x26, #0x0]\n" + "prfm pldl1keep, [x25, #0x0]\n" + "prfm pldl1keep, [x24, #0x0]\n" + "prfm pldl1keep, [x23, #0x0]\n" + "prfm pldl1keep, [x22, #0x0]\n" + "prfm pldl1keep, [x21, #0x0]\n" + "prfm pldl1keep, [x20, #0x0]\n" + "prfm pldl1keep, [x27, #0x40]\n" + "prfm pldl1keep, [x26, #0x40]\n" + "prfm pldl1keep, [x25, #0x40]\n" + "prfm pldl1keep, [x24, #0x40]\n" + "prfm pldl1keep, [x23, #0x40]\n" + "prfm pldl1keep, [x22, #0x40]\n" + "prfm pldl1keep, [x21, #0x40]\n" + "prfm pldl1keep, [x20, #0x40]\n" + "cbnz %w[first], 2f\n" + "sub %x[out_ptr], %x[out_ptr], #0x20\n" + "ld1 { v29.4s }, [%x[out_ptr]]\n" + "ldr q28, [%x[out_ptr], #0x10]\n" + "2:" // first_pass + "cmp %x[width], #0x10\n" + "blt 5f\n" + "3:" // Main loop head + "cmp x19, #0x3e\n" + "ble 4f\n" + "uadalp v1.4s, v5.8h\n" + "movi v5.8h, #0x0\n" + "uadalp v0.4s, v4.8h\n" + "movi v4.8h, #0x0\n" + "uadalp v31.4s, v3.8h\n" + "movi v3.8h, #0x0\n" + "uadalp v30.4s, v2.8h\n" + "movi v2.8h, #0x0\n" + "mov x19, #0x0\n" + "4:" // no_accumulate_16 + "ldr q27, [x27], #0x10\n" + "prfm pldl1keep, [x27, #0x70]\n" + "ldr q24, [x26], #0x10\n" + "zip1 v26.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x26, #0x70]\n" + "ldr q25, [x25], #0x10\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "prfm pldl1keep, [x25, #0x70]\n" + "ldr q21, [x24], #0x10\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x24, #0x70]\n" + "ldr q22, [x23], #0x10\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "prfm pldl1keep, [x23, #0x70]\n" + "ldr q18, [x22], #0x10\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x22, #0x70]\n" + "ldr q19, [x21], #0x10\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "prfm pldl1keep, [x21, #0x70]\n" + "ldr q16, [x20], #0x10\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "prfm pldl1keep, [x20, #0x70]\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "uadalp v5.8h, v26.16b\n" + "str q23, [%x[out_ptr], #0x10]\n" + "uadalp v4.8h, v23.16b\n" + "str q20, [%x[out_ptr], #0x20]\n" + "uadalp v3.8h, v20.16b\n" + "str q17, [%x[out_ptr], #0x30]\n" + "uadalp v2.8h, v17.16b\n" + "str q24, [%x[out_ptr], #0x40]\n" + "uadalp v5.8h, v24.16b\n" + "str q21, [%x[out_ptr], #0x50]\n" + "uadalp v4.8h, v21.16b\n" + "str q18, [%x[out_ptr], #0x60]\n" + "uadalp v3.8h, v18.16b\n" + "str q16, [%x[out_ptr], #0x70]\n" + "uadalp v2.8h, v16.16b\n" + "add x19, x19, #0x1\n" + "subs %x[width], %x[width], #0x10\n" + "cmp %x[width], #0x10\n" + "add %x[out_ptr], %x[out_ptr], #0x80\n" + "bge 3b\n" + "5:" // Main loop skip + "cbz %x[width], 14f\n" + "tbz %x[width], #3, 9f\n" + "ldr d27, [x27], #0x8\n" + "ldr d24, [x26], #0x8\n" + "ldr d25, [x25], #0x8\n" + "ldr d21, [x24], #0x8\n" + "ldr d22, [x23], #0x8\n" + "ldr d18, [x22], #0x8\n" + "ldr d19, [x21], #0x8\n" + "ldr d16, [x20], #0x8\n" + "tbz %x[width], #2, 7f\n" + "ld1 { v27.s }[2], [x27], #0x4\n" + "ld1 { v24.s }[2], [x26], #0x4\n" + "ld1 { v25.s }[2], [x25], #0x4\n" + "ld1 { v21.s }[2], [x24], #0x4\n" + "ld1 { v22.s }[2], [x23], #0x4\n" + "ld1 { v18.s }[2], [x22], #0x4\n" + "ld1 { v19.s }[2], [x21], #0x4\n" + "ld1 { v16.s }[2], [x20], #0x4\n" + "tbz %x[width], #1, 6f\n" + "ld1 { v27.h }[6], [x27], #0x2\n" + "ld1 { v24.h }[6], [x26], #0x2\n" + "ld1 { v25.h }[6], [x25], #0x2\n" + "ld1 { v21.h }[6], [x24], #0x2\n" + "ld1 { v22.h }[6], [x23], #0x2\n" + "ld1 { v18.h }[6], [x22], #0x2\n" + "ld1 { v19.h }[6], [x21], #0x2\n" + "ld1 { v16.h }[6], [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[14], [x27]\n" + "ld1 { v24.b }[14], [x26]\n" + "ld1 { v25.b }[14], [x25]\n" + "ld1 { v21.b }[14], [x24]\n" + "ld1 { v22.b }[14], [x23]\n" + "ld1 { v18.b }[14], [x22]\n" + "ld1 { v19.b }[14], [x21]\n" + "ld1 { v16.b }[14], [x20]\n" + "b 13f\n" + "6:" // odd_loads_1_12 + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[12], [x27]\n" + "ld1 { v24.b }[12], [x26]\n" + "ld1 { v25.b }[12], [x25]\n" + "ld1 { v21.b }[12], [x24]\n" + "ld1 { v22.b }[12], [x23]\n" + "ld1 { v18.b }[12], [x22]\n" + "ld1 { v19.b }[12], [x21]\n" + "ld1 { v16.b }[12], [x20]\n" + "b 13f\n" + "7:" // odd_loads_2_8 + "tbz %x[width], #1, 8f\n" + "ld1 { v27.h }[4], [x27], #0x2\n" + "ld1 { v24.h }[4], [x26], #0x2\n" + "ld1 { v25.h }[4], [x25], #0x2\n" + "ld1 { v21.h }[4], [x24], #0x2\n" + "ld1 { v22.h }[4], [x23], #0x2\n" + "ld1 { v18.h }[4], [x22], #0x2\n" + "ld1 { v19.h }[4], [x21], #0x2\n" + "ld1 { v16.h }[4], [x20], #0x2\n" + "mov x19, #0x2\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[10], [x27]\n" + "ld1 { v24.b }[10], [x26]\n" + "ld1 { v25.b }[10], [x25]\n" + "ld1 { v21.b }[10], [x24]\n" + "ld1 { v22.b }[10], [x23]\n" + "ld1 { v18.b }[10], [x22]\n" + "ld1 { v19.b }[10], [x21]\n" + "ld1 { v16.b }[10], [x20]\n" + "b 13f\n" + "8:" // odd_loads_1_8 + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[8], [x27]\n" + "ld1 { v24.b }[8], [x26]\n" + "ld1 { v25.b }[8], [x25]\n" + "ld1 { v21.b }[8], [x24]\n" + "ld1 { v22.b }[8], [x23]\n" + "ld1 { v18.b }[8], [x22]\n" + "ld1 { v19.b }[8], [x21]\n" + "ld1 { v16.b }[8], [x20]\n" + "mov x19, #0x2\n" + "b 13f\n" + "9:" // odd_loads_4_0 + "tbz %x[width], #2, 11f\n" + "ldr s27, [x27], #0x4\n" + "ldr s24, [x26], #0x4\n" + "ldr s25, [x25], #0x4\n" + "ldr s21, [x24], #0x4\n" + "ldr s22, [x23], #0x4\n" + "ldr s18, [x22], #0x4\n" + "ldr s19, [x21], #0x4\n" + "ldr s16, [x20], #0x4\n" + "tbz %x[width], #1, 10f\n" + "ld1 { v27.h }[2], [x27], #0x2\n" + "ld1 { v24.h }[2], [x26], #0x2\n" + "ld1 { v25.h }[2], [x25], #0x2\n" + "ld1 { v21.h }[2], [x24], #0x2\n" + "ld1 { v22.h }[2], [x23], #0x2\n" + "ld1 { v18.h }[2], [x22], #0x2\n" + "ld1 { v19.h }[2], [x21], #0x2\n" + "ld1 { v16.h }[2], [x20], #0x2\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[6], [x27]\n" + "ld1 { v24.b }[6], [x26]\n" + "ld1 { v25.b }[6], [x25]\n" + "ld1 { v21.b }[6], [x24]\n" + "ld1 { v22.b }[6], [x23]\n" + "ld1 { v18.b }[6], [x22]\n" + "ld1 { v19.b }[6], [x21]\n" + "ld1 { v16.b }[6], [x20]\n" + "b 13f\n" + "10:" // odd_loads_1_4 + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[4], [x27]\n" + "ld1 { v24.b }[4], [x26]\n" + "ld1 { v25.b }[4], [x25]\n" + "ld1 { v21.b }[4], [x24]\n" + "ld1 { v22.b }[4], [x23]\n" + "ld1 { v18.b }[4], [x22]\n" + "ld1 { v19.b }[4], [x21]\n" + "ld1 { v16.b }[4], [x20]\n" + "b 13f\n" + "11:" // odd_loads_2_0 + "tbz %x[width], #1, 12f\n" + "ldr h27, [x27], #0x2\n" + "ldr h24, [x26], #0x2\n" + "ldr h25, [x25], #0x2\n" + "ldr h21, [x24], #0x2\n" + "ldr h22, [x23], #0x2\n" + "ldr h18, [x22], #0x2\n" + "ldr h19, [x21], #0x2\n" + "ldr h16, [x20], #0x2\n" + "mov x19, #0x1\n" + "tbz %x[width], #0, 13f\n" + "ld1 { v27.b }[2], [x27]\n" + "ld1 { v24.b }[2], [x26]\n" + "ld1 { v25.b }[2], [x25]\n" + "ld1 { v21.b }[2], [x24]\n" + "ld1 { v22.b }[2], [x23]\n" + "ld1 { v18.b }[2], [x22]\n" + "ld1 { v19.b }[2], [x21]\n" + "ld1 { v16.b }[2], [x20]\n" + "b 13f\n" + "12:" // odd_loads_1_0 + "ldr b27, [x27, #0x0]\n" + "ldr b24, [x26, #0x0]\n" + "ldr b25, [x25, #0x0]\n" + "ldr b21, [x24, #0x0]\n" + "ldr b22, [x23, #0x0]\n" + "ldr b18, [x22, #0x0]\n" + "ldr b19, [x21, #0x0]\n" + "ldr b16, [x20, #0x0]\n" + "mov x19, #0x1\n" + "13:" // Odd load end + "zip1 v26.2d, v27.2d, v24.2d\n" + "subs x19, x19, #0x1\n" + "zip1 v23.2d, v25.2d, v21.2d\n" + "str q26, [%x[out_ptr], #0x0]\n" + "zip1 v20.2d, v22.2d, v18.2d\n" + "uadalp v5.8h, v26.16b\n" + "zip1 v17.2d, v19.2d, v16.2d\n" + "str q23, [%x[out_ptr], #0x10]\n" + "uadalp v4.8h, v23.16b\n" + "str q20, [%x[out_ptr], #0x20]\n" + "uadalp v3.8h, v20.16b\n" + "str q17, [%x[out_ptr], #0x30]\n" + "uadalp v2.8h, v17.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "beq 14f\n" + "zip2 v24.2d, v27.2d, v24.2d\n" + "zip2 v21.2d, v25.2d, v21.2d\n" + "str q24, [%x[out_ptr], #0x0]\n" + "zip2 v18.2d, v22.2d, v18.2d\n" + "uadalp v5.8h, v24.16b\n" + "zip2 v16.2d, v19.2d, v16.2d\n" + "str q21, [%x[out_ptr], #0x10]\n" + "uadalp v4.8h, v21.16b\n" + "str q18, [%x[out_ptr], #0x20]\n" + "uadalp v3.8h, v18.16b\n" + "str q16, [%x[out_ptr], #0x30]\n" + "uadalp v2.8h, v16.16b\n" + "add %x[out_ptr], %x[out_ptr], #0x40\n" + "14:" // Odds skip + "uadalp v1.4s, v5.8h\n" + "uadalp v0.4s, v4.8h\n" + "addp v1.4s, v1.4s, v0.4s\n" + "uadalp v31.4s, v3.8h\n" + "uadalp v30.4s, v2.8h\n" + "add v1.4s, v1.4s, v29.4s\n" + "str q1, [%x[out_ptr], #0x0]\n" + "addp v0.4s, v31.4s, v30.4s\n" + "add v0.4s, v0.4s, v28.4s\n" + "str q0, [%x[out_ptr], #0x10]\n" + "add %x[out_ptr], %x[out_ptr], #0x20\n" + : [out_ptr] "+r" (out_ptr), [width] "+r" (width) + : [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list.hpp new file mode 100644 index 0000000000..52b49c0f0c --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/list.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "a32_interleave6_block1_fp32_fp32.hpp" +#include "a64_interleave4_block16_s8_s8.hpp" +#include "a64_interleave4_block16_s8_s8_summing.hpp" +#include "a64_interleave4_block16_u8_u8_summing.hpp" +#include "a64_interleave8_block1_bf16_fp32.hpp" +#include "a64_interleave8_block1_fp16_fp16.hpp" +#include "a64_interleave8_block1_fp16_fp32.hpp" +#include "a64_interleave8_block1_fp32_fp32.hpp" +#include "a64_interleave8_block1_s16_s16.hpp" +#include "a64_interleave8_block1_s16_s16_summing.hpp" +#include "a64_interleave8_block1_s8_s16.hpp" +#include "a64_interleave8_block1_s8_s16_summing.hpp" +#include "a64_interleave8_block1_u16_u16_summing.hpp" +#include "a64_interleave8_block1_u8_u16.hpp" +#include "a64_interleave8_block1_u8_u16_summing.hpp" +#include "a64_interleave8_block2_bf16_bf16.hpp" +#include "a64_interleave8_block2_fp32_fp32.hpp" +#include "a64_interleave8_block4_bf16_bf16.hpp" +#include "a64_interleave8_block4_s8_s8.hpp" +#include "a64_interleave8_block4_s8_s8_summing.hpp" +#include "a64_interleave8_block4_u8_u8_summing.hpp" +#include "a64_interleave8_block8_s8_s8.hpp" +#include "a64_interleave8_block8_s8_s8_summing.hpp" +#include "a64_interleave8_block8_u8_u8_summing.hpp" |