aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/kernels/lut/generic
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/kernels/lut/generic')
-rw-r--r--src/cpu/kernels/lut/generic/neon/u8.cpp408
-rw-r--r--src/cpu/kernels/lut/generic/sve/u16.cpp103
-rw-r--r--src/cpu/kernels/lut/generic/sve2/u8.cpp644
3 files changed, 1155 insertions, 0 deletions
diff --git a/src/cpu/kernels/lut/generic/neon/u8.cpp b/src/cpu/kernels/lut/generic/neon/u8.cpp
new file mode 100644
index 0000000000..5516f5b33d
--- /dev/null
+++ b/src/cpu/kernels/lut/generic/neon/u8.cpp
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/lut/list.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#ifdef __aarch64__
+
+void lut_u8_neon(
+ const uint8_t *table, size_t num_strings, size_t string_length, const uint8_t *const *input, uint8_t *const *output)
+{
+ __asm__ __volatile__("ldr q16, [%x[table], #0x0]\n"
+ "ldr q17, [%x[table], #0x10]\n"
+ "mov x23, #0x0\n"
+ "ldr q18, [%x[table], #0x20]\n"
+ "ldr q19, [%x[table], #0x30]\n"
+ "ldr q20, [%x[table], #0x40]\n"
+ "ldr q21, [%x[table], #0x50]\n"
+ "ldr q22, [%x[table], #0x60]\n"
+ "ldr q23, [%x[table], #0x70]\n"
+ "ldr q24, [%x[table], #0x80]\n"
+ "ldr q25, [%x[table], #0x90]\n"
+ "ldr q26, [%x[table], #0xa0]\n"
+ "ldr q27, [%x[table], #0xb0]\n"
+ "ldr q28, [%x[table], #0xc0]\n"
+ "ldr q29, [%x[table], #0xd0]\n"
+ "ldr q30, [%x[table], #0xe0]\n"
+ "ldr q31, [%x[table], #0xf0]\n"
+ "1:" // string loop
+ "ldr x22, [%x[input], x23, LSL #0x3]\n"
+ "ldr x21, [%x[output], x23, LSL #0x3]\n"
+ "movi v11.16b, #0x40\n"
+ "movi v10.16b, #0x80\n"
+ "movi v9.16b, #0xc0\n"
+ "mov x20, %x[string_length]\n"
+ "2:" // 4 rounds: width loop
+ "cmp x20, #0x30\n"
+ "bge 27f\n"
+ "tbz x20, #5, 10f\n"
+ "ld1 { v8.16b }, [x22], #0x10\n"
+ "ld1 { v13.16b }, [x22], #0x10\n"
+ "tbz x20, #3, 6f\n"
+ "ldr d12, [x22], #0x8\n"
+ "tbz x20, #2, 4f\n"
+ "ld1 { v12.s }[2], [x22], #0x4\n"
+ "tbz x20, #1, 3f\n"
+ "ld1 { v12.h }[6], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[14], [x22]\n"
+ "b 26f\n"
+ "3:" // 4 rounds: Partial load: partial_1_44
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[12], [x22]\n"
+ "b 26f\n"
+ "4:" // 4 rounds: Partial load: partial_2_40
+ "tbz x20, #1, 5f\n"
+ "ld1 { v12.h }[4], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[10], [x22]\n"
+ "b 26f\n"
+ "5:" // 4 rounds: Partial load: partial_1_40
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[8], [x22]\n"
+ "b 26f\n"
+ "6:" // 4 rounds: Partial load: partial_4_32
+ "tbz x20, #2, 8f\n"
+ "ldr s12, [x22], #0x4\n"
+ "tbz x20, #1, 7f\n"
+ "ld1 { v12.h }[2], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[6], [x22]\n"
+ "b 26f\n"
+ "7:" // 4 rounds: Partial load: partial_1_36
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[4], [x22]\n"
+ "b 26f\n"
+ "8:" // 4 rounds: Partial load: partial_2_32
+ "tbz x20, #1, 9f\n"
+ "ldr h12, [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[2], [x22]\n"
+ "b 26f\n"
+ "9:" // 4 rounds: Partial load: partial_1_32
+ "tbz x20, #0, 26f\n"
+ "ldr b12, [x22, #0x0]\n"
+ "b 26f\n"
+ "10:" // 4 rounds: Partial load: partial_16_0
+ "tbz x20, #4, 18f\n"
+ "ld1 { v8.16b }, [x22], #0x10\n"
+ "tbz x20, #3, 14f\n"
+ "ldr d13, [x22], #0x8\n"
+ "tbz x20, #2, 12f\n"
+ "ld1 { v13.s }[2], [x22], #0x4\n"
+ "tbz x20, #1, 11f\n"
+ "ld1 { v13.h }[6], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[14], [x22]\n"
+ "b 26f\n"
+ "11:" // 4 rounds: Partial load: partial_1_28
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[12], [x22]\n"
+ "b 26f\n"
+ "12:" // 4 rounds: Partial load: partial_2_24
+ "tbz x20, #1, 13f\n"
+ "ld1 { v13.h }[4], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[10], [x22]\n"
+ "b 26f\n"
+ "13:" // 4 rounds: Partial load: partial_1_24
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[8], [x22]\n"
+ "b 26f\n"
+ "14:" // 4 rounds: Partial load: partial_4_16
+ "tbz x20, #2, 16f\n"
+ "ldr s13, [x22], #0x4\n"
+ "tbz x20, #1, 15f\n"
+ "ld1 { v13.h }[2], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[6], [x22]\n"
+ "b 26f\n"
+ "15:" // 4 rounds: Partial load: partial_1_20
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[4], [x22]\n"
+ "b 26f\n"
+ "16:" // 4 rounds: Partial load: partial_2_16
+ "tbz x20, #1, 17f\n"
+ "ldr h13, [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[2], [x22]\n"
+ "b 26f\n"
+ "17:" // 4 rounds: Partial load: partial_1_16
+ "tbz x20, #0, 26f\n"
+ "ldr b13, [x22, #0x0]\n"
+ "b 26f\n"
+ "18:" // 4 rounds: Partial load: partial_8_0
+ "tbz x20, #3, 22f\n"
+ "ldr d8, [x22], #0x8\n"
+ "tbz x20, #2, 20f\n"
+ "ld1 { v8.s }[2], [x22], #0x4\n"
+ "tbz x20, #1, 19f\n"
+ "ld1 { v8.h }[6], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[14], [x22]\n"
+ "b 26f\n"
+ "19:" // 4 rounds: Partial load: partial_1_12
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[12], [x22]\n"
+ "b 26f\n"
+ "20:" // 4 rounds: Partial load: partial_2_8
+ "tbz x20, #1, 21f\n"
+ "ld1 { v8.h }[4], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[10], [x22]\n"
+ "b 26f\n"
+ "21:" // 4 rounds: Partial load: partial_1_8
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[8], [x22]\n"
+ "b 26f\n"
+ "22:" // 4 rounds: Partial load: partial_4_0
+ "tbz x20, #2, 24f\n"
+ "ldr s8, [x22], #0x4\n"
+ "tbz x20, #1, 23f\n"
+ "ld1 { v8.h }[2], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[6], [x22]\n"
+ "b 26f\n"
+ "23:" // 4 rounds: Partial load: partial_1_4
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[4], [x22]\n"
+ "b 26f\n"
+ "24:" // 4 rounds: Partial load: partial_2_0
+ "tbz x20, #1, 25f\n"
+ "ldr h8, [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[2], [x22]\n"
+ "b 26f\n"
+ "25:" // 4 rounds: Partial load: partial_1_0
+ "ldr b8, [x22, #0x0]\n"
+ "26:" // 4 rounds: Partial load: Done
+ "b 28f\n"
+ "27:" // 4 rounds: Full load
+ "ldr q8, [x22, #0x0]\n"
+ "ldr q13, [x22, #0x10]\n"
+ "ldr q12, [x22, #0x20]\n"
+ "add x22, x22, #0x30\n"
+ "28:" // 4 rounds: Load done
+ "sub v0.16b, v8.16b, v11.16b\n"
+ "sub v7.16b, v8.16b, v10.16b\n"
+ "tbl v0.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v0.16b\n"
+ "sub v6.16b, v8.16b, v9.16b\n"
+ "sub v5.16b, v13.16b, v11.16b\n"
+ "tbl v8.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v8.16b\n"
+ "sub v4.16b, v13.16b, v10.16b\n"
+ "sub v3.16b, v13.16b, v9.16b\n"
+ "tbl v7.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v7.16b\n"
+ "sub v2.16b, v12.16b, v11.16b\n"
+ "sub v1.16b, v12.16b, v10.16b\n"
+ "tbl v6.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v6.16b\n"
+ "tbl v13.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v13.16b\n"
+ "tbl v5.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v5.16b\n"
+ "orr v8.16b, v8.16b, v0.16b\n"
+ "sub v0.16b, v12.16b, v9.16b\n"
+ "tbl v4.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v4.16b\n"
+ "tbl v3.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v3.16b\n"
+ "tbl v12.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v12.16b\n"
+ "tbl v2.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v2.16b\n"
+ "orr v7.16b, v7.16b, v6.16b\n"
+ "tbl v1.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v1.16b\n"
+ "tbl v0.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v0.16b\n"
+ "orr v13.16b, v13.16b, v5.16b\n"
+ "orr v4.16b, v4.16b, v3.16b\n"
+ "orr v12.16b, v12.16b, v2.16b\n"
+ "cmp x20, #0x30\n"
+ "orr v1.16b, v1.16b, v0.16b\n"
+ "orr v8.16b, v8.16b, v7.16b\n"
+ "orr v13.16b, v13.16b, v4.16b\n"
+ "orr v12.16b, v12.16b, v1.16b\n"
+ "bge 53f\n"
+ "tbz x20, #5, 36f\n"
+ "st1 { v8.16b }, [x21], #0x10\n"
+ "st1 { v13.16b }, [x21], #0x10\n"
+ "tbz x20, #3, 32f\n"
+ "str d12, [x21], #0x8\n"
+ "tbz x20, #2, 30f\n"
+ "st1 { v12.s }[2], [x21], #0x4\n"
+ "tbz x20, #1, 29f\n"
+ "st1 { v12.h }[6], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[14], [x21]\n"
+ "b 52f\n"
+ "29:" // 4 rounds: Partial writeback: partial_1_44
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[12], [x21]\n"
+ "b 52f\n"
+ "30:" // 4 rounds: Partial writeback: partial_2_40
+ "tbz x20, #1, 31f\n"
+ "st1 { v12.h }[4], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[10], [x21]\n"
+ "b 52f\n"
+ "31:" // 4 rounds: Partial writeback: partial_1_40
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[8], [x21]\n"
+ "b 52f\n"
+ "32:" // 4 rounds: Partial writeback: partial_4_32
+ "tbz x20, #2, 34f\n"
+ "str s12, [x21], #0x4\n"
+ "tbz x20, #1, 33f\n"
+ "st1 { v12.h }[2], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[6], [x21]\n"
+ "b 52f\n"
+ "33:" // 4 rounds: Partial writeback: partial_1_36
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[4], [x21]\n"
+ "b 52f\n"
+ "34:" // 4 rounds: Partial writeback: partial_2_32
+ "tbz x20, #1, 35f\n"
+ "str h12, [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[2], [x21]\n"
+ "b 52f\n"
+ "35:" // 4 rounds: Partial writeback: partial_1_32
+ "tbz x20, #0, 52f\n"
+ "str b12, [x21, #0x0]\n"
+ "b 52f\n"
+ "36:" // 4 rounds: Partial writeback: partial_16_0
+ "tbz x20, #4, 44f\n"
+ "st1 { v8.16b }, [x21], #0x10\n"
+ "tbz x20, #3, 40f\n"
+ "str d13, [x21], #0x8\n"
+ "tbz x20, #2, 38f\n"
+ "st1 { v13.s }[2], [x21], #0x4\n"
+ "tbz x20, #1, 37f\n"
+ "st1 { v13.h }[6], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[14], [x21]\n"
+ "b 52f\n"
+ "37:" // 4 rounds: Partial writeback: partial_1_28
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[12], [x21]\n"
+ "b 52f\n"
+ "38:" // 4 rounds: Partial writeback: partial_2_24
+ "tbz x20, #1, 39f\n"
+ "st1 { v13.h }[4], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[10], [x21]\n"
+ "b 52f\n"
+ "39:" // 4 rounds: Partial writeback: partial_1_24
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[8], [x21]\n"
+ "b 52f\n"
+ "40:" // 4 rounds: Partial writeback: partial_4_16
+ "tbz x20, #2, 42f\n"
+ "str s13, [x21], #0x4\n"
+ "tbz x20, #1, 41f\n"
+ "st1 { v13.h }[2], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[6], [x21]\n"
+ "b 52f\n"
+ "41:" // 4 rounds: Partial writeback: partial_1_20
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[4], [x21]\n"
+ "b 52f\n"
+ "42:" // 4 rounds: Partial writeback: partial_2_16
+ "tbz x20, #1, 43f\n"
+ "str h13, [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[2], [x21]\n"
+ "b 52f\n"
+ "43:" // 4 rounds: Partial writeback: partial_1_16
+ "tbz x20, #0, 52f\n"
+ "str b13, [x21, #0x0]\n"
+ "b 52f\n"
+ "44:" // 4 rounds: Partial writeback: partial_8_0
+ "tbz x20, #3, 48f\n"
+ "str d8, [x21], #0x8\n"
+ "tbz x20, #2, 46f\n"
+ "st1 { v8.s }[2], [x21], #0x4\n"
+ "tbz x20, #1, 45f\n"
+ "st1 { v8.h }[6], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[14], [x21]\n"
+ "b 52f\n"
+ "45:" // 4 rounds: Partial writeback: partial_1_12
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[12], [x21]\n"
+ "b 52f\n"
+ "46:" // 4 rounds: Partial writeback: partial_2_8
+ "tbz x20, #1, 47f\n"
+ "st1 { v8.h }[4], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[10], [x21]\n"
+ "b 52f\n"
+ "47:" // 4 rounds: Partial writeback: partial_1_8
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[8], [x21]\n"
+ "b 52f\n"
+ "48:" // 4 rounds: Partial writeback: partial_4_0
+ "tbz x20, #2, 50f\n"
+ "str s8, [x21], #0x4\n"
+ "tbz x20, #1, 49f\n"
+ "st1 { v8.h }[2], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[6], [x21]\n"
+ "b 52f\n"
+ "49:" // 4 rounds: Partial writeback: partial_1_4
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[4], [x21]\n"
+ "b 52f\n"
+ "50:" // 4 rounds: Partial writeback: partial_2_0
+ "tbz x20, #1, 51f\n"
+ "str h8, [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[2], [x21]\n"
+ "b 52f\n"
+ "51:" // 4 rounds: Partial writeback: partial_1_0
+ "str b8, [x21, #0x0]\n"
+ "52:" // 4 rounds: Partial writeback: Done
+ "b 54f\n"
+ "53:" // 4 rounds: Full writeback
+ "str q8, [x21, #0x0]\n"
+ "str q13, [x21, #0x10]\n"
+ "str q12, [x21, #0x20]\n"
+ "add x21, x21, #0x30\n"
+ "54:" // 4 rounds: Writeback done
+ "subs x20, x20, #0x30\n"
+ "bgt 2b\n"
+ "add x23, x23, #0x1\n"
+ "cmp x23, %x[num_strings]\n"
+ "bne 1b\n"
+ :
+ : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output),
+ [string_length] "r"(string_length), [table] "r"(table)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
+ "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
+ "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23");
+}
+
+#endif // __aarch64__
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/lut/generic/sve/u16.cpp b/src/cpu/kernels/lut/generic/sve/u16.cpp
new file mode 100644
index 0000000000..75b8dcaae2
--- /dev/null
+++ b/src/cpu/kernels/lut/generic/sve/u16.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Error.h"
+
+#include "src/cpu/kernels/lut/list.h"
+
+#ifdef __aarch64__
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <arm_sve.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+void lut_u16_sve(const uint16_t *table, size_t num_strings, size_t size, const uint16_t *input, uint16_t *output)
+{
+ int64_t cnth = svcnth();
+ int64_t tail = size & (4 * cnth - 1);
+ int64_t count = size - tail;
+ int64_t pos = 0;
+ ARM_COMPUTE_UNUSED(num_strings);
+ __asm __volatile("cbz %[count], 2f\n"
+ "mov z31.s, #0\n"
+ "cnth x7, ALL, MUL #4\n"
+ "cntb x8, ALL, MUL #4\n"
+ "ptrue p0.b\n"
+ "1:"
+ "ld1h z0.h, p0/z, [%[input]]\n"
+ "ld1h z1.h, p0/z, [%[input], #1, MUL VL]\n"
+ "ld1h z2.h, p0/z, [%[input], #2, MUL VL]\n"
+ "ld1h z3.h, p0/z, [%[input], #3, MUL VL]\n"
+ "add %[input], %[input], x8\n"
+
+ "zip1 z8.h, z0.h, z31.h\n"
+ "ld1h z8.s, p0/z, [%[table], z8.s, UXTW #1]\n"
+ "zip2 z0.h, z0.h, z31.h\n"
+ "ld1h z0.s, p0/z, [%[table], z0.s, UXTW #1]\n"
+ "uzp1 z0.h, z8.h, z0.h\n"
+ "st1h z0.h, p0, [%[output]]\n"
+
+ "zip1 z10.h, z1.h, z31.h\n"
+ "ld1h z10.s, p0/z, [%[table], z10.s, UXTW #1]\n"
+ "zip2 z1.h, z1.h, z31.h\n"
+ "ld1h z1.s, p0/z, [%[table], z1.s, UXTW #1]\n"
+ "uzp1 z1.h, z10.h, z1.h\n"
+ "st1h z1.h, p0, [%[output], #1, MUL VL]\n"
+
+ "zip1 z12.h, z2.h, z31.h\n"
+ "ld1h z12.s, p0/z, [%[table], z12.s, UXTW #1]\n"
+ "zip2 z2.h, z2.h, z31.h\n"
+ "ld1h z2.s, p0/z, [%[table], z2.s, UXTW #1]\n"
+ "uzp1 z2.h, z12.h, z2.h\n"
+ "st1h z2.h, p0, [%[output], #2, MUL VL]\n"
+
+ "zip1 z14.h, z3.h, z31.h\n"
+ "ld1h z14.s, p0/z, [%[table], z14.s, UXTW #1]\n"
+ "zip2 z3.h, z3.h, z31.h\n"
+ "ld1h z3.s, p0/z, [%[table], z3.s, UXTW #1]\n"
+ "uzp1 z3.h, z14.h, z3.h\n"
+ "st1h z3.h, p0, [%[output], #3, MUL VL]\n"
+
+ "add %[pos], %[pos], x7\n"
+ "add %[output], %[output], x8\n"
+ "cmp %[pos], %[count]\n"
+ "blt 1b\n"
+ "2:\n"
+ : [count] "+r"(count), [input] "+r"(input), [output] "+r"(output), [pos] "+r"(pos)
+ : [table] "r"(table)
+ : "memory", "cc", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12",
+ "z14", "z31", "p0", "p1", "z2", "z3", "z4", "x7", "x8");
+ for (int i = 0; i < tail; i++)
+ {
+ output[i] = table[input[i]];
+ }
+}
+
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // __aarch64__
diff --git a/src/cpu/kernels/lut/generic/sve2/u8.cpp b/src/cpu/kernels/lut/generic/sve2/u8.cpp
new file mode 100644
index 0000000000..ee8572703e
--- /dev/null
+++ b/src/cpu/kernels/lut/generic/sve2/u8.cpp
@@ -0,0 +1,644 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/lut/list.h"
+
+#ifdef __aarch64__
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+namespace arm_compute
+{
+namespace cpu
+{
+void lut_u8_sve2(
+ const uint8_t *table, size_t num_strings, size_t string_length, const uint8_t *const *input, uint8_t *const *output)
+{
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "cntd x25\n"
+ "addvl %x[table], %x[table], #8\n"
+ "ld1b { z16.b }, p0/Z, [%x[table], #-8, MUL VL]\n"
+ "tbnz x25, #5, 1f\n"
+ "ld1b { z17.b }, p0/Z, [%x[table], #-7, MUL VL]\n"
+ "tbnz x25, #4, 1f\n"
+ "ld1b { z18.b }, p0/Z, [%x[table], #-6, MUL VL]\n"
+ "ld1b { z19.b }, p0/Z, [%x[table], #-5, MUL VL]\n"
+ "tbnz x25, #3, 1f\n"
+ "ld1b { z20.b }, p0/Z, [%x[table], #-4, MUL VL]\n"
+ "ld1b { z21.b }, p0/Z, [%x[table], #-3, MUL VL]\n"
+ "ld1b { z22.b }, p0/Z, [%x[table], #-2, MUL VL]\n"
+ "ld1b { z23.b }, p0/Z, [%x[table], #-1, MUL VL]\n"
+ "tbnz x25, #2, 1f\n"
+ "ld1b { z24.b }, p0/Z, [%x[table]]\n"
+ "ld1b { z25.b }, p0/Z, [%x[table], #1, MUL VL]\n"
+ "ld1b { z26.b }, p0/Z, [%x[table], #2, MUL VL]\n"
+ "ld1b { z27.b }, p0/Z, [%x[table], #3, MUL VL]\n"
+ "ld1b { z28.b }, p0/Z, [%x[table], #4, MUL VL]\n"
+ "ld1b { z29.b }, p0/Z, [%x[table], #5, MUL VL]\n"
+ "ld1b { z30.b }, p0/Z, [%x[table], #6, MUL VL]\n"
+ "ld1b { z31.b }, p0/Z, [%x[table], #7, MUL VL]\n"
+ "1:" // Table load done
+ "mov x24, #0x0\n"
+ "2:" // string loop
+ "ldr x23, [%x[input], x24, LSL #0x3]\n"
+ "ldr x22, [%x[output], x24, LSL #0x3]\n"
+ "tbnz x25, #5, 14f\n"
+ "tbnz x25, #4, 11f\n"
+ "tbnz x25, #3, 8f\n"
+ "tbnz x25, #2, 5f\n"
+ "mov z12.b, #0x10\n"
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "3:" // 16 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 4f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "4:" // 16 rounds: predicate OK
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "sub z11.b, z11.b, z12.b\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f09 // tbx z9.b, z24.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f04 // tbx z4.b, z24.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f03 // tbx z3.b, z24.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f02 // tbx z2.b, z24.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f01 // tbx z1.b, z24.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f00 // tbx z0.b, z24.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f29 // tbx z9.b, z25.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f24 // tbx z4.b, z25.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f23 // tbx z3.b, z25.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f22 // tbx z2.b, z25.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f21 // tbx z1.b, z25.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f20 // tbx z0.b, z25.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f49 // tbx z9.b, z26.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f44 // tbx z4.b, z26.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f43 // tbx z3.b, z26.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f42 // tbx z2.b, z26.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f41 // tbx z1.b, z26.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f40 // tbx z0.b, z26.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f69 // tbx z9.b, z27.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f64 // tbx z4.b, z27.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f63 // tbx z3.b, z27.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f62 // tbx z2.b, z27.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f61 // tbx z1.b, z27.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f60 // tbx z0.b, z27.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f89 // tbx z9.b, z28.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f84 // tbx z4.b, z28.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f83 // tbx z3.b, z28.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f82 // tbx z2.b, z28.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f81 // tbx z1.b, z28.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f80 // tbx z0.b, z28.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2fa9 // tbx z9.b, z29.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2fa4 // tbx z4.b, z29.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282fa3 // tbx z3.b, z29.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272fa2 // tbx z2.b, z29.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262fa1 // tbx z1.b, z29.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252fa0 // tbx z0.b, z29.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "addvl x21, x21, #-6\n"
+ ".inst 0x052b2fc9 // tbx z9.b, z30.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2fc4 // tbx z4.b, z30.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282fc3 // tbx z3.b, z30.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272fc2 // tbx z2.b, z30.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262fc1 // tbx z1.b, z30.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252fc0 // tbx z0.b, z30.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "cmp x21, XZR\n"
+ ".inst 0x052b2fe9 // tbx z9.b, z31.b, z11.b\n"
+ ".inst 0x052a2fe4 // tbx z4.b, z31.b, z10.b\n"
+ ".inst 0x05282fe3 // tbx z3.b, z31.b, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ ".inst 0x05272fe2 // tbx z2.b, z31.b, z7.b\n"
+ ".inst 0x05262fe1 // tbx z1.b, z31.b, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ ".inst 0x05252fe0 // tbx z0.b, z31.b, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 3b\n"
+ "b 17f\n"
+ "5:" // 256 bits
+ "mov z12.b, #0x20\n"
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "6:" // 8 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 7f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "7:" // 8 rounds: predicate OK
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "sub z11.b, z11.b, z12.b\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "addvl x21, x21, #-6\n"
+ ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "cmp x21, XZR\n"
+ ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
+ ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
+ ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
+ ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 6b\n"
+ "b 17f\n"
+ "8:" // 512 bits
+ "mov z12.b, #0x40\n"
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "9:" // 4 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 10f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "10:" // 4 rounds: predicate OK
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "sub z11.b, z11.b, z12.b\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "addvl x21, x21, #-6\n"
+ ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "cmp x21, XZR\n"
+ ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
+ ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
+ ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
+ ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 9b\n"
+ "b 17f\n"
+ "11:" // 1024 bits
+ "mov z12.b, #0x80\n"
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "12:" // 2 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 13f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "13:" // 2 rounds: predicate OK
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "addvl x21, x21, #-6\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "sub z11.b, z11.b, z12.b\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "cmp x21, XZR\n"
+ ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+ ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+ ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+ ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 12b\n"
+ "b 17f\n"
+ "14:" // 2048 bits
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "15:" // 1 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 16f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "16:" // 1 rounds: predicate OK
+ "addvl x21, x21, #-6\n"
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "cmp x21, XZR\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 15b\n"
+ "17:" // SVE body done
+ "add x24, x24, #0x1\n"
+ "cmp x24, %x[num_strings]\n"
+ "bne 2b\n"
+ : [table] "+&r"(table)
+ : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length)
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1",
+ "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21",
+ "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31");
+}
+
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // __aarch64__