aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_conv/depthwise/interleaves
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_conv/depthwise/interleaves')
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp244
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp244
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic.cpp152
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic.hpp82
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic_quantized_dot_product.cpp161
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic_quantized_dot_product.hpp53
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp59
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp135
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp135
9 files changed, 1265 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
new file mode 100644
index 0000000000..3de4bdc1fb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(__aarch64__)
+
+#include "arm_gemm.hpp"
+#include "utils.hpp"
+#include "depthwise.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+struct interleave_a64_s8q_3x3_dot
+{
+ static size_t get_packed_size(const DepthwiseArgs &);
+ static void pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row);
+};
+
+size_t interleave_a64_s8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
+{
+ // We store 7 vectors for every <vector_of_ints> of channels.
+ const unsigned int n = arm_gemm::roundup(
+ arm_gemm::iceildiv((long unsigned int) args.input_channels * args.channel_multiplier,
+ get_vector_length<int32_t>(arm_gemm::VLType::None)), 4lu
+ );
+ return n * 7 * get_vector_length<int8_t>(arm_gemm::VLType::None);
+}
+
+void interleave_a64_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
+{
+ __asm__ __volatile__(
+ "cmp %x[ld_weight_col], XZR\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "movi v16.4s, #0x9\n"
+ "movi v31.16b, #0x0\n"
+ "mov x21, #0x3\n"
+ "mul x21, %x[ld_weight_col], x21\n"
+ "add x20, %x[qp], %[offsetof_input_offset]\n"
+ "ld1r { v30.4s }, [x20]\n"
+ "add x20, %x[qp], %[offsetof_weights_offset]\n"
+ "ld1r { v29.4s }, [x20]\n"
+ "cmp %x[ld_weight_row], XZR\n"
+ "mul v29.4s, v29.4s, v30.4s\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x21, NE\n"
+ "lsr x21, %x[n_channels], #0x2\n"
+ "movi v28.16b, #0x1\n"
+ "mul v29.4s, v29.4s, v16.4s\n"
+ "add x25, %x[weights], %x[ld_weight_row]\n"
+ "add x20, %x[qp], %[offsetof_per_layer_mul]\n"
+ "ld1r { v27.4s }, [x20]\n"
+ "add x20, %x[qp], %[offsetof_per_layer_right_shift]\n"
+ "ld1r { v26.4s }, [x20]\n"
+ "add x24, x25, %x[ld_weight_row]\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "mov x22, #0x0\n"
+ "cbz x21, 4f\n"
+ "1:" // Loop
+ "movi v25.4s, #0x0\n"
+ "cbz %x[bias], 2f\n"
+ "ldr q25, [%x[bias], x22]\n"
+ "2:" // Loop: Skip bias load
+ "ldr s19, [%x[weights], #0x0]\n"
+ "ldr s16, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 v17.16b, v16.16b, v31.16b\n"
+ "movi v21.4s, #0x0\n"
+ "ldr s16, [%x[weights], x23]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v20.16b, v16.16b, v17.16b\n"
+ "ldr s17, [x25, %x[ld_weight_col]]\n"
+ "ldr s16, [x25, x23]\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "zip1 v16.16b, v17.16b, v31.16b\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s19, [x24, %x[ld_weight_col]]\n"
+ ".inst 0x4e949795 // sdot v21.4s, v28.16b, v20.16b\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "ldr s16, [x24, x23]\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v19.16b, v31.16b\n"
+ ".inst 0x4e929795 // sdot v21.4s, v28.16b, v18.16b\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ ".inst 0x4e909795 // sdot v21.4s, v28.16b, v16.16b\n"
+ "add %x[weights], %x[weights], #0x4\n"
+ "add x25, x25, #0x4\n"
+ "mls v25.4s, v21.4s, v30.4s\n"
+ "add x24, x24, #0x4\n"
+ "add v25.4s, v25.4s, v29.4s\n"
+ "str q25, [%x[outptr], #0x0]\n"
+ "str q20, [%x[outptr], #0x10]\n"
+ "str q18, [%x[outptr], #0x20]\n"
+ "str q16, [%x[outptr], #0x30]\n"
+ "add %x[outptr], %x[outptr], #0x40\n"
+ "cbz %x[rq_mul_perchannel], 3f\n"
+ "ldr q27, [%x[rq_mul_perchannel], x22]\n"
+ "ldr q26, [%x[rq_shift_perchannel], x22]\n"
+ "3:" // Loop: Quantisation parameters: Store
+ "subs x21, x21, #0x1\n"
+ "str q27, [%x[outptr], #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "str q26, [%x[outptr], #0x10]\n"
+ "add %x[outptr], %x[outptr], #0x20\n"
+ "bgt 1b\n"
+ "tst %x[n_channels], #0x3\n"
+ "beq 13f\n"
+ "4:" // Oddments
+ "movi v25.4s, #0x0\n"
+ "cbz %x[bias], 7f\n"
+ "add %x[bias], %x[bias], x22\n"
+ "tbz %x[n_channels], #1, 5f\n"
+ "ld1 { v25.d }[0], [%x[bias]], #0x8\n"
+ "tbz %x[n_channels], #0, 6f\n"
+ "ld1 { v25.s }[2], [%x[bias]], #0x4\n"
+ "b 6f\n"
+ "5:" // Oddments: Load bias: Bit 1: Unset
+ "ld1 { v25.s }[0], [%x[bias]], #0x4\n"
+ "6:" // Oddments: Load bias: Bit 1: End
+ "7:" // Oddments: Skip bias load
+ "tbz %x[n_channels], #1, 8f\n"
+ "ld1 { v17.h }[0], [%x[weights]]\n"
+ "ld1 { v24.h }[0], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v20.h }[0], [x21]\n"
+ "ld1 { v16.h }[0], [x20]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v19.h }[0], [x21]\n"
+ "ld1 { v18.h }[0], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v23.h }[0], [x24]\n"
+ "ld1 { v22.h }[0], [x21]\n"
+ "add %x[weights], %x[weights], #0x2\n"
+ "add x25, x25, #0x2\n"
+ "ld1 { v21.h }[0], [x20]\n"
+ "add x24, x24, #0x2\n"
+ "tbz %x[n_channels], #0, 9f\n"
+ "ld1 { v17.b }[2], [%x[weights]]\n"
+ "ld1 { v24.b }[2], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v20.b }[2], [x21]\n"
+ "ld1 { v16.b }[2], [x20]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v18.b }[2], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v23.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x21]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v21.b }[2], [x20]\n"
+ "b 9f\n"
+ "8:" // Oddments: Load weights: Bit 1: Unset
+ "ld1 { v17.b }[0], [%x[weights]]\n"
+ "ld1 { v24.b }[0], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v20.b }[0], [x21]\n"
+ "ld1 { v16.b }[0], [x20]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v19.b }[0], [x21]\n"
+ "ld1 { v18.b }[0], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v23.b }[0], [x24]\n"
+ "ld1 { v22.b }[0], [x21]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v21.b }[0], [x20]\n"
+ "9:" // Oddments: Load weights: Bit 1: End
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v20.16b, v31.16b\n"
+ "zip1 v20.16b, v17.16b, v16.16b\n"
+ "zip1 v17.16b, v24.16b, v18.16b\n"
+ "zip1 v16.16b, v19.16b, v31.16b\n"
+ "movi v19.4s, #0x0\n"
+ ".inst 0x4e949793 // sdot v19.4s, v28.16b, v20.16b\n"
+ "zip1 v18.16b, v17.16b, v16.16b\n"
+ "zip1 v17.16b, v23.16b, v21.16b\n"
+ ".inst 0x4e929793 // sdot v19.4s, v28.16b, v18.16b\n"
+ "zip1 v16.16b, v22.16b, v31.16b\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ ".inst 0x4e909793 // sdot v19.4s, v28.16b, v16.16b\n"
+ "mls v25.4s, v19.4s, v30.4s\n"
+ "add v25.4s, v25.4s, v29.4s\n"
+ "str q25, [%x[outptr], #0x0]\n"
+ "str q20, [%x[outptr], #0x10]\n"
+ "str q18, [%x[outptr], #0x20]\n"
+ "str q16, [%x[outptr], #0x30]\n"
+ "add %x[outptr], %x[outptr], #0x40\n"
+ "cbz %x[rq_mul_perchannel], 12f\n"
+ "add x21, %x[rq_mul_perchannel], x22\n"
+ "add x20, %x[rq_shift_perchannel], x22\n"
+ "tbz %x[n_channels], #1, 10f\n"
+ "ld1 { v27.d }[0], [x21], #0x8\n"
+ "ld1 { v26.d }[0], [x20], #0x8\n"
+ "tbz %x[n_channels], #0, 11f\n"
+ "ld1 { v27.s }[2], [x21], #0x4\n"
+ "ld1 { v26.s }[2], [x20], #0x4\n"
+ "b 11f\n"
+ "10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
+ "ld1 { v27.s }[0], [x21], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
+ "12:" // Oddments: Quantisation parameters: Store
+ "str q27, [%x[outptr], #0x0]\n"
+ "str q26, [%x[outptr], #0x10]\n"
+ "add %x[outptr], %x[outptr], #0x20\n"
+ "13:" // End
+ : [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
+ : [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
new file mode 100644
index 0000000000..19264c9fce
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(__aarch64__)
+
+#include "arm_gemm.hpp"
+#include "utils.hpp"
+#include "depthwise.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+struct interleave_a64_u8q_3x3_dot
+{
+ static size_t get_packed_size(const DepthwiseArgs &);
+ static void pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row);
+};
+
+size_t interleave_a64_u8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
+{
+ // We store 7 vectors for every <vector_of_ints> of channels.
+ const unsigned int n = arm_gemm::roundup(
+ arm_gemm::iceildiv((long unsigned int) args.input_channels * args.channel_multiplier,
+ get_vector_length<int32_t>(arm_gemm::VLType::None)), 4lu
+ );
+ return n * 7 * get_vector_length<uint8_t>(arm_gemm::VLType::None);
+}
+
+void interleave_a64_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
+{
+ __asm__ __volatile__(
+ "cmp %x[ld_weight_col], XZR\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "movi v16.4s, #0x9\n"
+ "movi v31.16b, #0x0\n"
+ "mov x21, #0x3\n"
+ "mul x21, %x[ld_weight_col], x21\n"
+ "add x20, %x[qp], %[offsetof_input_offset]\n"
+ "ld1r { v30.4s }, [x20]\n"
+ "add x20, %x[qp], %[offsetof_weights_offset]\n"
+ "ld1r { v29.4s }, [x20]\n"
+ "cmp %x[ld_weight_row], XZR\n"
+ "mul v29.4s, v29.4s, v30.4s\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x21, NE\n"
+ "lsr x21, %x[n_channels], #0x2\n"
+ "movi v28.16b, #0x1\n"
+ "mul v29.4s, v29.4s, v16.4s\n"
+ "add x25, %x[weights], %x[ld_weight_row]\n"
+ "add x20, %x[qp], %[offsetof_per_layer_mul]\n"
+ "ld1r { v27.4s }, [x20]\n"
+ "add x20, %x[qp], %[offsetof_per_layer_right_shift]\n"
+ "ld1r { v26.4s }, [x20]\n"
+ "add x24, x25, %x[ld_weight_row]\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "mov x22, #0x0\n"
+ "cbz x21, 4f\n"
+ "1:" // Loop
+ "movi v25.4s, #0x0\n"
+ "cbz %x[bias], 2f\n"
+ "ldr q25, [%x[bias], x22]\n"
+ "2:" // Loop: Skip bias load
+ "ldr s19, [%x[weights], #0x0]\n"
+ "ldr s16, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 v17.16b, v16.16b, v31.16b\n"
+ "movi v21.4s, #0x0\n"
+ "ldr s16, [%x[weights], x23]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v20.16b, v16.16b, v17.16b\n"
+ "ldr s17, [x25, %x[ld_weight_col]]\n"
+ "ldr s16, [x25, x23]\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "zip1 v16.16b, v17.16b, v31.16b\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s19, [x24, %x[ld_weight_col]]\n"
+ ".inst 0x6e949795 // udot v21.4s, v28.16b, v20.16b\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "ldr s16, [x24, x23]\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v19.16b, v31.16b\n"
+ ".inst 0x6e929795 // udot v21.4s, v28.16b, v18.16b\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ ".inst 0x6e909795 // udot v21.4s, v28.16b, v16.16b\n"
+ "add %x[weights], %x[weights], #0x4\n"
+ "add x25, x25, #0x4\n"
+ "mls v25.4s, v21.4s, v30.4s\n"
+ "add x24, x24, #0x4\n"
+ "add v25.4s, v25.4s, v29.4s\n"
+ "str q25, [%x[outptr], #0x0]\n"
+ "str q20, [%x[outptr], #0x10]\n"
+ "str q18, [%x[outptr], #0x20]\n"
+ "str q16, [%x[outptr], #0x30]\n"
+ "add %x[outptr], %x[outptr], #0x40\n"
+ "cbz %x[rq_mul_perchannel], 3f\n"
+ "ldr q27, [%x[rq_mul_perchannel], x22]\n"
+ "ldr q26, [%x[rq_shift_perchannel], x22]\n"
+ "3:" // Loop: Quantisation parameters: Store
+ "subs x21, x21, #0x1\n"
+ "str q27, [%x[outptr], #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "str q26, [%x[outptr], #0x10]\n"
+ "add %x[outptr], %x[outptr], #0x20\n"
+ "bgt 1b\n"
+ "tst %x[n_channels], #0x3\n"
+ "beq 13f\n"
+ "4:" // Oddments
+ "movi v25.4s, #0x0\n"
+ "cbz %x[bias], 7f\n"
+ "add %x[bias], %x[bias], x22\n"
+ "tbz %x[n_channels], #1, 5f\n"
+ "ld1 { v25.d }[0], [%x[bias]], #0x8\n"
+ "tbz %x[n_channels], #0, 6f\n"
+ "ld1 { v25.s }[2], [%x[bias]], #0x4\n"
+ "b 6f\n"
+ "5:" // Oddments: Load bias: Bit 1: Unset
+ "ld1 { v25.s }[0], [%x[bias]], #0x4\n"
+ "6:" // Oddments: Load bias: Bit 1: End
+ "7:" // Oddments: Skip bias load
+ "tbz %x[n_channels], #1, 8f\n"
+ "ld1 { v17.h }[0], [%x[weights]]\n"
+ "ld1 { v24.h }[0], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v20.h }[0], [x21]\n"
+ "ld1 { v16.h }[0], [x20]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v19.h }[0], [x21]\n"
+ "ld1 { v18.h }[0], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v23.h }[0], [x24]\n"
+ "ld1 { v22.h }[0], [x21]\n"
+ "add %x[weights], %x[weights], #0x2\n"
+ "add x25, x25, #0x2\n"
+ "ld1 { v21.h }[0], [x20]\n"
+ "add x24, x24, #0x2\n"
+ "tbz %x[n_channels], #0, 9f\n"
+ "ld1 { v17.b }[2], [%x[weights]]\n"
+ "ld1 { v24.b }[2], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v20.b }[2], [x21]\n"
+ "ld1 { v16.b }[2], [x20]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v18.b }[2], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v23.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x21]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v21.b }[2], [x20]\n"
+ "b 9f\n"
+ "8:" // Oddments: Load weights: Bit 1: Unset
+ "ld1 { v17.b }[0], [%x[weights]]\n"
+ "ld1 { v24.b }[0], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v20.b }[0], [x21]\n"
+ "ld1 { v16.b }[0], [x20]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v19.b }[0], [x21]\n"
+ "ld1 { v18.b }[0], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v23.b }[0], [x24]\n"
+ "ld1 { v22.b }[0], [x21]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v21.b }[0], [x20]\n"
+ "9:" // Oddments: Load weights: Bit 1: End
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v20.16b, v31.16b\n"
+ "zip1 v20.16b, v17.16b, v16.16b\n"
+ "zip1 v17.16b, v24.16b, v18.16b\n"
+ "zip1 v16.16b, v19.16b, v31.16b\n"
+ "movi v19.4s, #0x0\n"
+ ".inst 0x6e949793 // udot v19.4s, v28.16b, v20.16b\n"
+ "zip1 v18.16b, v17.16b, v16.16b\n"
+ "zip1 v17.16b, v23.16b, v21.16b\n"
+ ".inst 0x6e929793 // udot v19.4s, v28.16b, v18.16b\n"
+ "zip1 v16.16b, v22.16b, v31.16b\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ ".inst 0x6e909793 // udot v19.4s, v28.16b, v16.16b\n"
+ "mls v25.4s, v19.4s, v30.4s\n"
+ "add v25.4s, v25.4s, v29.4s\n"
+ "str q25, [%x[outptr], #0x0]\n"
+ "str q20, [%x[outptr], #0x10]\n"
+ "str q18, [%x[outptr], #0x20]\n"
+ "str q16, [%x[outptr], #0x30]\n"
+ "add %x[outptr], %x[outptr], #0x40\n"
+ "cbz %x[rq_mul_perchannel], 12f\n"
+ "add x21, %x[rq_mul_perchannel], x22\n"
+ "add x20, %x[rq_shift_perchannel], x22\n"
+ "tbz %x[n_channels], #1, 10f\n"
+ "ld1 { v27.d }[0], [x21], #0x8\n"
+ "ld1 { v26.d }[0], [x20], #0x8\n"
+ "tbz %x[n_channels], #0, 11f\n"
+ "ld1 { v27.s }[2], [x21], #0x4\n"
+ "ld1 { v26.s }[2], [x20], #0x4\n"
+ "b 11f\n"
+ "10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
+ "ld1 { v27.s }[0], [x21], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
+ "12:" // Oddments: Quantisation parameters: Store
+ "str q27, [%x[outptr], #0x0]\n"
+ "str q26, [%x[outptr], #0x10]\n"
+ "add %x[outptr], %x[outptr], #0x20\n"
+ "13:" // End
+ : [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
+ : [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic.cpp
new file mode 100644
index 0000000000..dc505a013d
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "generic.hpp"
+
+#include <functional>
+
+namespace arm_conv {
+namespace depthwise {
+namespace interleaves {
+
+PackingArguments::PackingArguments(
+ unsigned int kernel_rows, unsigned int kernel_cols, size_t weight_element_size,
+ bool include_bias, size_t bias_element_size, bool premultiply,
+ arm_gemm::VLType vl_type, size_t accumulator_element_size, unsigned int accumulator_depth_vl,
+ std::function<bool(unsigned int, unsigned int &, unsigned int &)> get_weight_pos
+) : kernel_rows(kernel_rows), kernel_cols(kernel_cols), weight_element_size(weight_element_size),
+ include_bias(include_bias), bias_element_size(bias_element_size), premultiply(premultiply),
+ vl_type(vl_type), accumulator_element_size(accumulator_element_size), accumulator_depth_vl(accumulator_depth_vl),
+ get_weight_pos(get_weight_pos)
+{
+}
+
+size_t get_storage_size_generic(const PackingArguments &packing_args, const DepthwiseArgs &args)
+{
+ // If the channel multiplier is greater than one, then we treat this as a
+ // repeated packing of `channel_multiplier`-sized problems.
+ if (args.channel_multiplier > 1 && !packing_args.premultiply)
+ {
+ DepthwiseArgs args_per_input_channel(args);
+ args_per_input_channel.input_channels = args.channel_multiplier;
+ args_per_input_channel.channel_multiplier = 1;
+
+ return args.input_channels * get_storage_size_generic(packing_args, args_per_input_channel);
+ }
+
+ const unsigned int vl =
+ packing_args.accumulator_depth_vl *
+ arm_gemm::utils::get_vector_length<uint8_t>(packing_args.vl_type) / packing_args.accumulator_element_size;
+ const unsigned int n_packs = arm_gemm::iceildiv(args.input_channels * args.channel_multiplier, vl);
+ const auto pack_size = (packing_args.include_bias ? packing_args.bias_element_size : 0) +
+ packing_args.kernel_points() * packing_args.weight_element_size;
+ return n_packs * pack_size * vl;
+}
+
+void pack_parameters_generic(
+ const PackingArguments &packing_args,
+ const DepthwiseArgs &args,
+ void *buffer_raw,
+ const void *biases_raw,
+ const void *weights_raw,
+ size_t ld_weight_col,
+ size_t ld_weight_row
+)
+{
+ // Cast the pointers to byte sizes
+ auto *buffer = static_cast<uint8_t *>(buffer_raw);
+ auto *biases = static_cast<const uint8_t *>(biases_raw);
+ auto *weights = static_cast<const uint8_t *>(weights_raw);
+
+ // If the channel multiplier is greater than one, then we treat this as a
+ // repeated packing of `channel_multiplier`-sized problems.
+ if (args.channel_multiplier > 1 && !packing_args.premultiply)
+ {
+ // Get a modified copy of the depthwise arguments
+ DepthwiseArgs args_per_input_channel(args);
+ args_per_input_channel.input_channels = args.channel_multiplier;
+ args_per_input_channel.channel_multiplier = 1;
+
+ // Resolve the strides here
+ ld_weight_col = ld_weight_col ? ld_weight_col : args.input_channels * args.channel_multiplier;
+ ld_weight_row = ld_weight_row ? ld_weight_row : ld_weight_col * packing_args.kernel_cols;
+
+ auto per_input_channel_size = get_storage_size_generic(packing_args, args_per_input_channel);
+
+ for (unsigned int c = 0; c < args.input_channels; c++)
+ {
+ pack_parameters_generic(
+ packing_args, args_per_input_channel, buffer, biases, weights, ld_weight_col, ld_weight_row);
+
+ // Update the pointers
+ buffer += per_input_channel_size;
+ biases += (biases == nullptr) ? 0 : packing_args.bias_element_size * args.channel_multiplier;
+ weights += packing_args.weight_element_size * args.channel_multiplier;
+ }
+ return;
+ }
+
+ auto input_channels = args.input_channels * args.channel_multiplier;
+
+ // Finalise the weight strides
+ ld_weight_col = (ld_weight_col == 0) ? input_channels : ld_weight_col;
+ ld_weight_row = (ld_weight_row == 0) ? packing_args.kernel_cols * ld_weight_col : ld_weight_row;
+
+ const unsigned int vl =
+ packing_args.accumulator_depth_vl *
+ arm_gemm::utils::get_vector_length<uint8_t>(packing_args.vl_type) / packing_args.accumulator_element_size;
+
+ for (unsigned int n = 0; n < input_channels; n += vl)
+ {
+ const unsigned int todo = std::min(vl, input_channels - n);
+
+ if (packing_args.include_bias)
+ {
+ if (biases != nullptr)
+ {
+ memcpy(buffer, biases, todo * packing_args.bias_element_size);
+ biases += todo * packing_args.bias_element_size;
+ }
+ else
+ {
+ memset(buffer, 0, vl * packing_args.bias_element_size);
+ }
+
+ buffer += vl * packing_args.bias_element_size;
+ }
+
+ // Copy each of the weights in turn
+ unsigned int kx, ky;
+ for (int kindex = 0; packing_args.get_weight_pos(kindex, kx, ky); kindex++)
+ {
+ const auto src_ptr = weights + (kx*ld_weight_row + ky*ld_weight_col + n) * packing_args.weight_element_size;
+ memcpy(buffer, src_ptr, todo * packing_args.weight_element_size);
+ buffer += vl * packing_args.weight_element_size;
+ }
+ }
+}
+
+} // namespace interleaves
+} // namespace depthwise
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic.hpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic.hpp
new file mode 100644
index 0000000000..1842f10150
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic.hpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "utils.hpp"
+#include "depthwise.hpp"
+
+#include <functional>
+
+namespace arm_conv {
+namespace depthwise {
+namespace interleaves {
+
+struct PackingArguments
+{
+ const unsigned int kernel_rows;
+ const unsigned int kernel_cols;
+ const size_t weight_element_size;
+ const bool include_bias;
+ const size_t bias_element_size;
+ const bool premultiply;
+ arm_gemm::VLType vl_type;
+ const size_t accumulator_element_size;
+ const unsigned int accumulator_depth_vl;
+ std::function<bool(unsigned int, unsigned int &, unsigned int &)> get_weight_pos;
+
+ unsigned int kernel_points(void) const { return kernel_cols * kernel_rows; }
+
+ PackingArguments(
+ unsigned int kernel_rows,
+ unsigned int kernel_cols,
+ size_t weight_element_size,
+ bool include_bias,
+ size_t bias_element_size,
+ bool premultiply,
+ arm_gemm::VLType vl_type,
+ size_t accumulator_element_size,
+ unsigned int accumulator_depth_vl,
+ std::function<bool(unsigned int, unsigned int &, unsigned int &)> get_weight_pos
+ );
+};
+
+size_t get_storage_size_generic(
+ const PackingArguments &packing_args,
+ const DepthwiseArgs &args
+);
+
+void pack_parameters_generic(
+ const PackingArguments &packing_args,
+ const DepthwiseArgs &args,
+ void *buffer_raw,
+ const void *biases_raw,
+ const void *weights_raw,
+ size_t ld_weight_col,
+ size_t ld_weight_row
+);
+
+} // namespace interleaves
+} // namespace depthwise
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic_quantized_dot_product.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic_quantized_dot_product.cpp
new file mode 100644
index 0000000000..a6389054d1
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic_quantized_dot_product.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "generic_quantized_dot_product.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+namespace interleaves {
+namespace quantized {
+
+size_t get_storage_size(
+ const DepthwiseArgs &args,
+ const arm_gemm::VLType vl_type,
+ const unsigned int accumulator_depth_vl
+)
+{
+ // We produce VL<int32_t> channels at a time, for each of these blocks of
+ // channels we store a vector of biases, weights (complicated) and
+ // requantize parameters.
+ const unsigned int iter_length = accumulator_depth_vl * arm_gemm::utils::get_vector_length<int32_t>(vl_type);
+ const unsigned int n_iters = args.input_channels * arm_gemm::iceildiv(args.channel_multiplier, iter_length);
+
+ // Compute the cost of storing the weights
+ const unsigned int n_dots_per_kernel_row = arm_gemm::iceildiv(args.kernel_cols, 4u);
+
+ return n_iters * iter_length * (
+ sizeof(int32_t) + // Bias
+ 4 * n_dots_per_kernel_row * args.kernel_rows * sizeof(int8_t) + // Weights
+ 2 * sizeof(int32_t) // Requantisation parameters
+ );
+}
+
+template <typename T>
+void pack_parameters(
+ void *_buffer, const int32_t *biases,
+ const T *weights, size_t ld_weight_col, size_t ld_weight_row,
+ const DepthwiseArgs &args,
+ const arm_gemm::Requantize32 &qp,
+ const arm_gemm::VLType vl_type,
+ const unsigned int accumulator_depth_vl
+)
+{
+ auto buffer = static_cast<uint8_t *>(_buffer);
+ auto requant_muls = qp.per_channel_muls;
+ auto requant_shifts = qp.per_channel_right_shifts;
+
+ const unsigned int iter_length = accumulator_depth_vl * arm_gemm::utils::get_vector_length<int32_t>(vl_type);
+ const unsigned int n_iters_per_input_channel = arm_gemm::iceildiv(args.channel_multiplier, iter_length);
+ const unsigned int n_dots_per_kernel_row = arm_gemm::iceildiv(args.kernel_cols, 4u);
+
+ const size_t iter_stride = iter_length * (
+ sizeof(int32_t) + // Bias
+ 4 * n_dots_per_kernel_row * args.kernel_rows * sizeof(T) + // Weights
+ 2 * sizeof(int32_t) // Requantisation parameters
+ );
+
+ ld_weight_col = (ld_weight_col == 0) ? args.input_channels * args.channel_multiplier : ld_weight_col;
+ ld_weight_row = (ld_weight_row == 0) ? args.kernel_cols * ld_weight_col : ld_weight_row;
+
+ for (unsigned int input_channel = 0; input_channel < args.input_channels; input_channel++)
+ {
+ auto buffer_input_channel = buffer + input_channel * n_iters_per_input_channel * iter_stride;
+ auto weights_input_channel = weights + input_channel * args.channel_multiplier;
+
+ for (unsigned int iter = 0; iter < n_iters_per_input_channel; iter++)
+ {
+ // Get a pointer to the start of this portion of the buffer; consequently
+ // derive pointers to the bias, weight and requantisation portions of
+ // this frame.
+ auto buffer_base = buffer_input_channel + iter_stride * iter;
+ auto buffer_biases = reinterpret_cast<int32_t *>(buffer_base);
+ auto buffer_weights = buffer_base + sizeof(int32_t) * iter_length;
+ auto buffer_requant_mul = reinterpret_cast<int32_t *>(
+ buffer_weights + args.kernel_rows * n_dots_per_kernel_row * 4 * iter_length);
+ auto buffer_requant_shift = buffer_requant_mul + iter_length;
+ auto weights_base = weights_input_channel + iter * iter_length;
+
+ // Hence work through the data for this iteration, on a
+ // channel-by-channel basis.
+ const auto this_iter_length = std::min<unsigned int>(
+ iter_length, args.channel_multiplier - iter * iter_length
+ );
+ for (unsigned int i = 0; i < this_iter_length; i++)
+ {
+ auto weights_channel = weights_base + i;
+
+ // Read the bias value, we modify this as we read the weights.
+ auto bias_value = biases == nullptr ? 0 : *(biases++);
+ int32_t elements_sum = 0;
+
+ // Read through the kernel; for each row, marshal together as many dot
+ // product terms as are required.
+ for (unsigned int ki = 0; ki < args.kernel_rows; ki++)
+ {
+ auto buffer_row = buffer_weights + i*4 + ki * 4 * n_dots_per_kernel_row * iter_length;
+ auto weights_row = weights_channel + ki * ld_weight_row;
+
+ unsigned int kj = 0;
+ for (; kj < args.kernel_cols; kj++)
+ {
+ // Determine which element to which we're writing
+ const auto dot = kj / 4;
+ const auto elem = kj % 4;
+
+ // Copy the value; include in the sum
+ const auto val = weights_row[kj * ld_weight_col];
+ buffer_row[dot * 4 * iter_length + elem] = val;
+ elements_sum += val;
+ }
+ for (; kj < 4 * n_dots_per_kernel_row; kj++)
+ {
+ const auto dot = kj / 4;
+ const auto elem = kj % 4;
+ buffer_row[dot * 4 * iter_length + elem] = 0;
+ }
+
+ buffer_row += 4 * n_dots_per_kernel_row * iter_length;
+ }
+
+ // Write back the bias and offset values
+ *(buffer_biases++) =
+ bias_value - qp.a_offset * elements_sum +
+ args.kernel_rows * args.kernel_cols * qp.a_offset * qp.b_offset;
+
+ // Write out the requantisation parameters
+ *(buffer_requant_mul++) = qp.per_channel_requant ? *(requant_muls++) : qp.per_layer_mul;
+ *(buffer_requant_shift++) = qp.per_channel_requant ? *(requant_shifts++) : qp.per_layer_right_shift;
+ }
+ }
+ }
+}
+
+template void pack_parameters(void *, const int32_t *, const int8_t *, size_t, size_t, const DepthwiseArgs &, const arm_gemm::Requantize32 &, arm_gemm::VLType, unsigned int);
+template void pack_parameters(void *, const int32_t *, const uint8_t *, size_t, size_t, const DepthwiseArgs &, const arm_gemm::Requantize32 &, arm_gemm::VLType, unsigned int);
+
+} // namespace quantized
+} // namespace interleaves
+} // namespace depthwise
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic_quantized_dot_product.hpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic_quantized_dot_product.hpp
new file mode 100644
index 0000000000..779d67d3f4
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/generic_quantized_dot_product.hpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+#include "generic.hpp"
+
+namespace arm_conv {
+namespace depthwise {
+namespace interleaves {
+namespace quantized {
+
+size_t get_storage_size(
+ const DepthwiseArgs &args,
+ arm_gemm::VLType vl_type,
+ unsigned int accumulator_depth_vl=1
+);
+
+template <typename T>
+void pack_parameters(
+ void *buffer, const int32_t *biases,
+ const T *weights, size_t ld_weight_col, size_t ld_weight_row,
+ const DepthwiseArgs &args,
+ const arm_gemm::Requantize32 &qp,
+ arm_gemm::VLType vl_type,
+ unsigned int accumulator_depth_vl
+);
+
+} // namespace quantized
+} // namespace interleaves
+} // namespace depthwise
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp
new file mode 100644
index 0000000000..76f38eb335
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2021-2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace depthwise {
+
+#if defined(ARM_COMPUTE_ENABLE_SVE)
+
+struct interleave_sve_u8q_3x3_dot
+{
+ static void pack_parameters(unsigned int, void *, const int32_t *, const uint8_t *, const arm_gemm::Requantize32 &, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+struct interleave_sve_s8q_3x3_dot
+{
+ static void pack_parameters(unsigned int, void *, const int32_t *, const int8_t *, const arm_gemm::Requantize32 &, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+#endif // defined(ARM_COMPUTE_ENABLE_SVE)
+
+struct interleave_a64_u8q_3x3_dot
+{
+ static void pack_parameters(unsigned int, void *, const int32_t *, const uint8_t *, const arm_gemm::Requantize32 &, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+struct interleave_a64_s8q_3x3_dot
+{
+ static void pack_parameters(unsigned int, void *, const int32_t *, const int8_t *, const arm_gemm::Requantize32 &, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+} // namespace depthwise
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
new file mode 100644
index 0000000000..5d7b54f235
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SVE)
+
+#include "arm_gemm.hpp"
+#include "utils.hpp"
+#include "depthwise.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+struct interleave_sve_s8q_3x3_dot
+{
+ static size_t get_packed_size(const DepthwiseArgs &);
+ static void pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row);
+};
+
+size_t interleave_sve_s8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
+{
+ // We store 7 vectors for every <vector_of_ints> of channels.
+ const unsigned int n = arm_gemm::roundup(
+ arm_gemm::iceildiv((long unsigned int) args.input_channels * args.channel_multiplier,
+ get_vector_length<int32_t>(arm_gemm::VLType::SVE)), 4lu
+ );
+ return n * 7 * get_vector_length<int8_t>(arm_gemm::VLType::SVE);
+}
+
+void interleave_sve_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
+{
+ __asm__ __volatile__(
+ "cmp %x[ld_weight_col], XZR\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "mov z16.s, #0x9\n"
+ "mov z28.b, #0x0\n"
+ "mov x20, #0x3\n"
+ "ptrue p2.b\n"
+ "mul x20, %x[ld_weight_col], x20\n"
+ "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "cmp %x[ld_weight_row], XZR\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
+ "mov z25.b, #0x1\n"
+ "mul z26.s, p2/M, z26.s, z27.s\n"
+ "add x24, %x[weights], %x[ld_weight_row]\n"
+ "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "add x23, x24, %x[ld_weight_row]\n"
+ "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "whilelt p1.s, XZR, %x[n_channels]\n"
+ "mov x21, #0x0\n"
+ "mul z26.s, p2/M, z26.s, z16.s\n"
+ "pfalse p8.b\n"
+ "cbz %x[bias], 1f\n"
+ "ptrue p8.s\n"
+ "1:" // No bias
+ "2:" // Loop
+ "cntp x20, p2, p1.s\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z18.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z17.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x22]\n"
+ "zip1 z20.b, z18.b, z16.b\n"
+ "zip1 z19.b, z17.b, z28.b\n"
+ "ld1b { z18.b }, p0/Z, [x24]\n"
+ "ld1b { z17.b }, p0/Z, [x24, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x24, x22]\n"
+ "zip1 z22.b, z20.b, z19.b\n"
+ "zip1 z21.b, z18.b, z16.b\n"
+ "zip1 z19.b, z17.b, z28.b\n"
+ "mov z20.s, #0x0\n"
+ "ld1b { z18.b }, p0/Z, [x23]\n"
+ "ld1b { z17.b }, p0/Z, [x23, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x23, x22]\n"
+ "sdot z20.s, z25.b, z22.b\n"
+ "zip1 z19.b, z21.b, z19.b\n"
+ "sdot z20.s, z25.b, z19.b\n"
+ "zip1 z18.b, z18.b, z16.b\n"
+ "zip1 z16.b, z17.b, z28.b\n"
+ "and p0.b, p2/Z, p8.b, p1.b\n"
+ "ld1w { z17.s }, p0/Z, [%x[bias], x21, LSL #2]\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "sdot z20.s, z25.b, z16.b\n"
+ "mls z17.s, p2/M, z20.s, z27.s\n"
+ "add %x[weights], %x[weights], x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add z17.s, z17.s, z26.s\n"
+ "st1w { z17.s }, p2, [%x[outptr]]\n"
+ "st1b { z22.b }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z19.b }, p2, [%x[outptr], #2, MUL VL]\n"
+ "st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
+ "addvl %x[outptr], %x[outptr], #4\n"
+ "cbz %x[rq_mul_perchannel], 3f\n"
+ "ld1w { z24.s }, p1/Z, [%x[rq_mul_perchannel], x21, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [%x[rq_shift_perchannel], x21, LSL #2]\n"
+ "3:" // Loop: Quantisation parameters: Store
+ "incw x21\n"
+ "whilelt p1.s, x21, %x[n_channels]\n"
+ "st1w { z24.s }, p2, [%x[outptr]]\n"
+ "st1w { z23.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "addvl %x[outptr], %x[outptr], #2\n"
+ "b.any 2b\n"
+ : [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
+ : [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
+ : "cc", "memory", "p0", "p1", "p2", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
new file mode 100644
index 0000000000..c3da81448b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2021, 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SVE)
+
+#include "arm_gemm.hpp"
+#include "utils.hpp"
+#include "depthwise.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+struct interleave_sve_u8q_3x3_dot
+{
+ static size_t get_packed_size(const DepthwiseArgs &);
+ static void pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row);
+};
+
+size_t interleave_sve_u8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
+{
+ // We store 7 vectors for every <vector_of_ints> of channels.
+ const unsigned int n = arm_gemm::roundup(
+ arm_gemm::iceildiv((long unsigned int) args.input_channels * args.channel_multiplier,
+ get_vector_length<int32_t>(arm_gemm::VLType::SVE)), 4lu
+ );
+ return n * 7 * get_vector_length<uint8_t>(arm_gemm::VLType::SVE);
+}
+
+void interleave_sve_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
+{
+ __asm__ __volatile__(
+ "cmp %x[ld_weight_col], XZR\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "mov z16.s, #0x9\n"
+ "mov z28.b, #0x0\n"
+ "mov x20, #0x3\n"
+ "ptrue p2.b\n"
+ "mul x20, %x[ld_weight_col], x20\n"
+ "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "cmp %x[ld_weight_row], XZR\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
+ "mov z25.b, #0x1\n"
+ "mul z26.s, p2/M, z26.s, z27.s\n"
+ "add x24, %x[weights], %x[ld_weight_row]\n"
+ "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "add x23, x24, %x[ld_weight_row]\n"
+ "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "whilelt p1.s, XZR, %x[n_channels]\n"
+ "mov x21, #0x0\n"
+ "mul z26.s, p2/M, z26.s, z16.s\n"
+ "pfalse p8.b\n"
+ "cbz %x[bias], 1f\n"
+ "ptrue p8.s\n"
+ "1:" // No bias
+ "2:" // Loop
+ "cntp x20, p2, p1.s\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z18.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z17.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x22]\n"
+ "zip1 z20.b, z18.b, z16.b\n"
+ "zip1 z19.b, z17.b, z28.b\n"
+ "ld1b { z18.b }, p0/Z, [x24]\n"
+ "ld1b { z17.b }, p0/Z, [x24, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x24, x22]\n"
+ "zip1 z22.b, z20.b, z19.b\n"
+ "zip1 z21.b, z18.b, z16.b\n"
+ "zip1 z19.b, z17.b, z28.b\n"
+ "mov z20.s, #0x0\n"
+ "ld1b { z18.b }, p0/Z, [x23]\n"
+ "ld1b { z17.b }, p0/Z, [x23, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x23, x22]\n"
+ "udot z20.s, z25.b, z22.b\n"
+ "zip1 z19.b, z21.b, z19.b\n"
+ "udot z20.s, z25.b, z19.b\n"
+ "zip1 z18.b, z18.b, z16.b\n"
+ "zip1 z16.b, z17.b, z28.b\n"
+ "and p0.b, p2/Z, p8.b, p1.b\n"
+ "ld1w { z17.s }, p0/Z, [%x[bias], x21, LSL #2]\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "udot z20.s, z25.b, z16.b\n"
+ "mls z17.s, p2/M, z20.s, z27.s\n"
+ "add %x[weights], %x[weights], x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add z17.s, z17.s, z26.s\n"
+ "st1w { z17.s }, p2, [%x[outptr]]\n"
+ "st1b { z22.b }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z19.b }, p2, [%x[outptr], #2, MUL VL]\n"
+ "st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
+ "addvl %x[outptr], %x[outptr], #4\n"
+ "cbz %x[rq_mul_perchannel], 3f\n"
+ "ld1w { z24.s }, p1/Z, [%x[rq_mul_perchannel], x21, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [%x[rq_shift_perchannel], x21, LSL #2]\n"
+ "3:" // Loop: Quantisation parameters: Store
+ "incw x21\n"
+ "whilelt p1.s, x21, %x[n_channels]\n"
+ "st1w { z24.s }, p2, [%x[outptr]]\n"
+ "st1w { z23.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "addvl %x[outptr], %x[outptr], #2\n"
+ "b.any 2b\n"
+ : [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
+ : [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
+ : "cc", "memory", "p0", "p1", "p2", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SVE)