aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_conv/depthwise/interleaves
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2021-01-22 09:47:04 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2021-06-18 10:33:48 +0000
commitd02d5edfa15ba6c04a9986a8a362a945cb38ac31 (patch)
treeced4f49691d6c7038e347a8709b315bff59c64cf /src/core/NEON/kernels/arm_conv/depthwise/interleaves
parentb014c27ba6db9840e4a72519760d51a87a2af7e7 (diff)
downloadComputeLibrary-d02d5edfa15ba6c04a9986a8a362a945cb38ac31.tar.gz
Integrate improved CPU depthwise convolution kernels
* Replace assembly kernels for depthwise convolution with more optimized ones. * Add int8 assembly kernels. * Fix implicit padding on optimized kernels Resolves: COMPMID-3867, COMPMID-4361 Change-Id: I0b0867e05f61be4f368f62190d55e14d0ab3ebf2 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5622 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_conv/depthwise/interleaves')
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/8b_mla.cpp128
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp250
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp250
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp119
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp136
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp136
6 files changed, 1019 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/8b_mla.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/8b_mla.cpp
new file mode 100644
index 0000000000..6c5ef23684
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/8b_mla.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_gemm.hpp"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "src/core/NEON/kernels/assembly/depthwise.hpp"
+#include <cstdint>
+#include <cstring>
+
+using namespace arm_gemm;
+
+size_t generic_get_packed_size(
+ const VLType vec_type,
+ const unsigned int acc_depth,
+ const unsigned int kernel_rows,
+ const unsigned int kernel_cols,
+ const unsigned int n_input_channels
+)
+{
+ const auto per_iter = acc_depth * arm_gemm::utils::get_vector_length<int32_t>(vec_type);
+ return arm_gemm::roundup((long unsigned int) n_input_channels, per_iter) * kernel_rows * kernel_cols * sizeof(int8_t);
+}
+
+void generic_pack(
+ const VLType vec_type,
+ const unsigned int acc_depth,
+ const unsigned int kernel_rows,
+ const unsigned int kernel_cols,
+ const unsigned int n_channels,
+ void *_outptr,
+ const void *_weights,
+ size_t ld_weight_col,
+ size_t ld_weight_row
+)
+{
+ int8_t *outptr = reinterpret_cast<int8_t *>(_outptr);
+ const int8_t *weights = reinterpret_cast<const int8_t *>(_weights);
+
+ // Get the strides
+ ld_weight_col = (ld_weight_col == 0) ? n_channels * sizeof(int8_t) : ld_weight_col;
+ ld_weight_row = (ld_weight_row == 0) ? kernel_cols * ld_weight_col : ld_weight_row;
+
+ // Pack into per-iter chunks.
+ const auto per_iter = acc_depth * arm_gemm::utils::get_vector_length<int32_t>(vec_type);
+ for (unsigned int c = 0; c < n_channels; c += per_iter)
+ {
+ auto weight_row = weights + c;
+ const auto to_copy = std::min<unsigned int>(per_iter, n_channels - c);
+
+ for (unsigned int i = 0; i < kernel_rows; i++)
+ {
+ auto weight_col = weight_row;
+
+ for (unsigned int j = 0; j < kernel_cols; j++)
+ {
+ memcpy(outptr, weight_col, to_copy);
+ outptr += per_iter;
+ weight_col += ld_weight_col;
+ }
+
+ weight_row += ld_weight_row;
+ }
+ }
+}
+
+
+#define ADD_IMPLEMENTATION(ARCH, TYPENAME, TYPE, VEC_TYPE, ACC_DEPTH, KERN_ROWS, KERN_COLS) \
+struct interleave_ ## ARCH ## _ ## TYPENAME ## _ ## KERN_ROWS ## x ## KERN_COLS ## _mla \
+{ \
+ static size_t get_packed_size(const DepthwiseArgs &args); \
+ static void pack_parameters( \
+ unsigned int n_channels, void *outptr, \
+ const TYPE *weights, size_t ld_weight_col, size_t ld_weight_row \
+ ); \
+}; \
+\
+size_t interleave_ ## ARCH ## _ ## TYPENAME ## _ ## KERN_ROWS ## x ## KERN_COLS ## _mla::get_packed_size(const DepthwiseArgs &args) \
+{ \
+ return generic_get_packed_size(VLType::VEC_TYPE, ACC_DEPTH, KERN_ROWS, KERN_COLS, args.input_channels); \
+} \
+\
+void interleave_ ## ARCH ## _ ## TYPENAME ## _ ## KERN_ROWS ## x ## KERN_COLS ## _mla::pack_parameters(unsigned int n_channels, void *outptr, \
+ const TYPE *weights, size_t ld_weight_col, size_t ld_weight_row) \
+{ \
+ generic_pack(VLType::VEC_TYPE, ACC_DEPTH, KERN_ROWS, KERN_COLS, n_channels, outptr, weights, ld_weight_col, ld_weight_row); \
+}
+
+
+namespace arm_conv {
+namespace depthwise {
+
+#if defined(__ARM_FEATURE_SVE)
+
+ADD_IMPLEMENTATION(sve, s8q, int8_t, SVE, 2, 3, 3)
+ADD_IMPLEMENTATION(sve, s8q, int8_t, SVE, 2, 5, 5)
+ADD_IMPLEMENTATION(sve, u8q, uint8_t, SVE, 2, 3, 3)
+ADD_IMPLEMENTATION(sve, u8q, uint8_t, SVE, 2, 5, 5)
+
+#endif // defined(__ARM_FEATURE_SVE)
+
+ADD_IMPLEMENTATION(a64, s8q, int8_t, None, 2, 3, 3)
+ADD_IMPLEMENTATION(a64, s8q, int8_t, None, 2, 5, 5)
+ADD_IMPLEMENTATION(a64, u8q, uint8_t, None, 2, 3, 3)
+ADD_IMPLEMENTATION(a64, u8q, uint8_t, None, 2, 5, 5)
+
+} // namespace depthwise
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
new file mode 100644
index 0000000000..3d3447bf3c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(__aarch64__)
+
+#include "arm_gemm.hpp"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "src/core/NEON/kernels/assembly/depthwise.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+struct interleave_a64_s8q_3x3_dot
+{
+ static size_t get_packed_size(const DepthwiseArgs &);
+ static void pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row);
+};
+
+size_t interleave_a64_s8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
+{
+ // We store 7 vectors for every <vector_of_ints> of channels.
+ const unsigned int n = arm_gemm::roundup(
+ arm_gemm::iceildiv((long unsigned int) args.input_channels,
+ get_vector_length<int32_t>(arm_gemm::VLType::None)), 4lu
+ );
+ return n * 7 * get_vector_length<int8_t>(arm_gemm::VLType::None);
+}
+
+void interleave_a64_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
+{
+ __asm__ __volatile__(
+ "movi v0.16b, #0x0\n"
+ "cmp %x[ld_weight_col], XZR\n"
+ "movi v31.16b, #0x1\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "movi v16.4s, #0x9\n"
+ "mov x19, #0x3\n"
+ "cmp %x[ld_weight_row], XZR\n"
+ "mul x19, %x[ld_weight_col], x19\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
+ "add x24, %x[weights], %x[ld_weight_row]\n"
+ "add x23, x24, %x[ld_weight_row]\n"
+ "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "lsr x20, %x[n_channels], #0x2\n"
+ "mov x21, #0x0\n"
+ "add x19, %x[qp], %[offsetof_input_offset]\n"
+ "ld1r { v30.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_weights_offset]\n"
+ "ld1r { v29.4s }, [x19]\n"
+ "mul v29.4s, v29.4s, v30.4s\n"
+ "add x19, %x[qp], %[offsetof_per_layer_mul]\n"
+ "ld1r { v28.4s }, [x19]\n"
+ "mul v29.4s, v29.4s, v16.4s\n"
+ "add x19, %x[qp], %[offsetof_per_layer_right_shift]\n"
+ "ld1r { v27.4s }, [x19]\n"
+ "cbz x20, 4f\n"
+ "1:" // Loop
+ "movi v26.4s, #0x0\n"
+ "cbz %x[bias], 2f\n"
+ "ldr q26, [%x[bias], x21]\n"
+ "2:" // Loop: Skip bias load
+ "movi v25.4s, #0x0\n"
+ "ldr s24, [%x[weights], #0x0]\n"
+ "ldr s23, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 v23.16b, v23.16b, v0.16b\n"
+ "ldr s21, [%x[weights], x22]\n"
+ "add %x[weights], %x[weights], #0x4\n"
+ "zip1 v21.16b, v24.16b, v21.16b\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s20, [x24, %x[ld_weight_col]]\n"
+ "zip1 v21.16b, v21.16b, v23.16b\n"
+ "ldr s18, [x24, x22]\n"
+ ".inst 0x4e9597f9 // sdot v25.4s, v31.16b, v21.16b\n"
+ "add x24, x24, #0x4\n"
+ "zip1 v20.16b, v20.16b, v0.16b\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s17, [x23, %x[ld_weight_col]]\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
+ "ldr s16, [x23, x22]\n"
+ "zip1 v18.16b, v18.16b, v20.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x4e9297f9 // sdot v25.4s, v31.16b, v18.16b\n"
+ "zip1 v17.16b, v17.16b, v0.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v16.16b, v16.16b, v17.16b\n"
+ ".inst 0x4e9097f9 // sdot v25.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "str q26, [%x[outptr], #0x0]\n"
+ "str q21, [%x[outptr], #0x10]\n"
+ "str q18, [%x[outptr], #0x20]\n"
+ "str q16, [%x[outptr], #0x30]\n"
+ "add %x[outptr], %x[outptr], #0x40\n"
+ "cbz %x[rq_mul_perchannel], 3f\n"
+ "ldr q28, [%x[rq_mul_perchannel], x21]\n"
+ "ldr q27, [%x[rq_shift_perchannel], x21]\n"
+ "3:" // Loop: Quantisation parameters: Store
+ "str q28, [%x[outptr], #0x0]\n"
+ "add x21, x21, #0x10\n"
+ "str q27, [%x[outptr], #0x10]\n"
+ "subs x20, x20, #0x1\n"
+ "add %x[outptr], %x[outptr], #0x20\n"
+ "bgt 1b\n"
+ "tst %x[n_channels], #0x3\n"
+ "beq 13f\n"
+ "4:" // Oddments
+ "movi v26.4s, #0x0\n"
+ "cbz %x[bias], 7f\n"
+ "add %x[bias], %x[bias], x21\n"
+ "tbz %x[n_channels], #1, 5f\n"
+ "ld1 { v26.d }[0], [%x[bias]], #0x8\n"
+ "tbz %x[n_channels], #0, 6f\n"
+ "ld1 { v26.s }[2], [%x[bias]], #0x4\n"
+ "b 6f\n"
+ "5:" // Oddments: Load bias: Bit 1: Unset
+ "tbz %x[n_channels], #0, 6f\n"
+ "ld1 { v26.s }[0], [%x[bias]], #0x4\n"
+ "6:" // Oddments: Load bias: Bit 1: End
+
+ "7:" // Oddments: Skip bias load
+ "tbz %x[n_channels], #1, 8f\n"
+ "ld1 { v24.h }[0], [%x[weights]]\n"
+ "ld1 { v22.h }[0], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.h }[0], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.h }[0], [x20]\n"
+ "add %x[weights], %x[weights], #0x2\n"
+ "ld1 { v21.h }[0], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.h }[0], [x20]\n"
+ "ld1 { v18.h }[0], [x19]\n"
+ "add x24, x24, #0x2\n"
+ "add x19, x23, %x[ld_weight_col]\n"
+ "ld1 { v17.h }[0], [x19]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v16.h }[0], [x19]\n"
+ "add x23, x23, #0x2\n"
+ "tbz %x[n_channels], #0, 9f\n"
+ "ld1 { v24.b }[2], [%x[weights]]\n"
+ "ld1 { v22.b }[2], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.b }[2], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.b }[2], [x20]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v21.b }[2], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x19]\n"
+ "add x20, x23, %x[ld_weight_col]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x19]\n"
+ "b 9f\n"
+ "8:" // Oddments: Load weights: Bit 1: Unset
+ "tbz %x[n_channels], #0, 9f\n"
+ "ld1 { v24.b }[0], [%x[weights]]\n"
+ "ld1 { v22.b }[0], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.b }[0], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.b }[0], [x20]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v21.b }[0], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x19]\n"
+ "add x20, x23, %x[ld_weight_col]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v17.b }[0], [x20]\n"
+ "ld1 { v16.b }[0], [x19]\n"
+ "9:" // Oddments: Load weights: Bit 1: End
+ "zip1 v21.16b, v24.16b, v21.16b\n"
+ "zip1 v23.16b, v23.16b, v0.16b\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
+ "zip1 v20.16b, v20.16b, v0.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v17.16b, v17.16b, v0.16b\n"
+ "zip1 v21.16b, v21.16b, v23.16b\n"
+ "zip1 v18.16b, v18.16b, v20.16b\n"
+ "zip1 v16.16b, v16.16b, v17.16b\n"
+ "movi v25.4s, #0x0\n"
+ ".inst 0x4e9597f9 // sdot v25.4s, v31.16b, v21.16b\n"
+ ".inst 0x4e9297f9 // sdot v25.4s, v31.16b, v18.16b\n"
+ ".inst 0x4e9097f9 // sdot v25.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "str q26, [%x[outptr], #0x0]\n"
+ "str q21, [%x[outptr], #0x10]\n"
+ "str q18, [%x[outptr], #0x20]\n"
+ "str q16, [%x[outptr], #0x30]\n"
+ "add %x[outptr], %x[outptr], #0x40\n"
+ "cbz %x[rq_mul_perchannel], 12f\n"
+ "add x20, %x[rq_mul_perchannel], x21\n"
+ "add x19, %x[rq_shift_perchannel], x21\n"
+ "tbz %x[n_channels], #1, 10f\n"
+ "ld1 { v28.d }[0], [x20], #0x8\n"
+ "ld1 { v27.d }[0], [x19], #0x8\n"
+ "tbz %x[n_channels], #0, 11f\n"
+ "ld1 { v28.s }[2], [x20], #0x4\n"
+ "ld1 { v27.s }[2], [x19], #0x4\n"
+ "b 11f\n"
+ "10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
+ "tbz %x[n_channels], #0, 11f\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
+
+ "12:" // Oddments: Quantisation parameters: Store
+ "str q28, [%x[outptr], #0x0]\n"
+ "str q27, [%x[outptr], #0x10]\n"
+ "add %x[outptr], %x[outptr], #0x20\n"
+ "13:" // End
+
+ : [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
+ : [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
+ : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
new file mode 100644
index 0000000000..a725dcab59
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(__aarch64__)
+
+#include "arm_gemm.hpp"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "src/core/NEON/kernels/assembly/depthwise.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+struct interleave_a64_u8q_3x3_dot
+{
+ static size_t get_packed_size(const DepthwiseArgs &);
+ static void pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row);
+};
+
+size_t interleave_a64_u8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
+{
+ // We store 7 vectors for every <vector_of_ints> of channels.
+ const unsigned int n = arm_gemm::roundup(
+ arm_gemm::iceildiv((long unsigned int) args.input_channels,
+ get_vector_length<int32_t>(arm_gemm::VLType::None)), 4lu
+ );
+ return n * 7 * get_vector_length<uint8_t>(arm_gemm::VLType::None);
+}
+
+void interleave_a64_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
+{
+ __asm__ __volatile__(
+ "movi v0.16b, #0x0\n"
+ "cmp %x[ld_weight_col], XZR\n"
+ "movi v31.16b, #0x1\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "movi v16.4s, #0x9\n"
+ "mov x19, #0x3\n"
+ "cmp %x[ld_weight_row], XZR\n"
+ "mul x19, %x[ld_weight_col], x19\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
+ "add x24, %x[weights], %x[ld_weight_row]\n"
+ "add x23, x24, %x[ld_weight_row]\n"
+ "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "lsr x20, %x[n_channels], #0x2\n"
+ "mov x21, #0x0\n"
+ "add x19, %x[qp], %[offsetof_input_offset]\n"
+ "ld1r { v30.4s }, [x19]\n"
+ "add x19, %x[qp], %[offsetof_weights_offset]\n"
+ "ld1r { v29.4s }, [x19]\n"
+ "mul v29.4s, v29.4s, v30.4s\n"
+ "add x19, %x[qp], %[offsetof_per_layer_mul]\n"
+ "ld1r { v28.4s }, [x19]\n"
+ "mul v29.4s, v29.4s, v16.4s\n"
+ "add x19, %x[qp], %[offsetof_per_layer_right_shift]\n"
+ "ld1r { v27.4s }, [x19]\n"
+ "cbz x20, 4f\n"
+ "1:" // Loop
+ "movi v26.4s, #0x0\n"
+ "cbz %x[bias], 2f\n"
+ "ldr q26, [%x[bias], x21]\n"
+ "2:" // Loop: Skip bias load
+ "movi v25.4s, #0x0\n"
+ "ldr s24, [%x[weights], #0x0]\n"
+ "ldr s23, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 v23.16b, v23.16b, v0.16b\n"
+ "ldr s21, [%x[weights], x22]\n"
+ "add %x[weights], %x[weights], #0x4\n"
+ "zip1 v21.16b, v24.16b, v21.16b\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s20, [x24, %x[ld_weight_col]]\n"
+ "zip1 v21.16b, v21.16b, v23.16b\n"
+ "ldr s18, [x24, x22]\n"
+ ".inst 0x6e9597f9 // udot v25.4s, v31.16b, v21.16b\n"
+ "add x24, x24, #0x4\n"
+ "zip1 v20.16b, v20.16b, v0.16b\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s17, [x23, %x[ld_weight_col]]\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
+ "ldr s16, [x23, x22]\n"
+ "zip1 v18.16b, v18.16b, v20.16b\n"
+ "add x23, x23, #0x4\n"
+ ".inst 0x6e9297f9 // udot v25.4s, v31.16b, v18.16b\n"
+ "zip1 v17.16b, v17.16b, v0.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v16.16b, v16.16b, v17.16b\n"
+ ".inst 0x6e9097f9 // udot v25.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "str q26, [%x[outptr], #0x0]\n"
+ "str q21, [%x[outptr], #0x10]\n"
+ "str q18, [%x[outptr], #0x20]\n"
+ "str q16, [%x[outptr], #0x30]\n"
+ "add %x[outptr], %x[outptr], #0x40\n"
+ "cbz %x[rq_mul_perchannel], 3f\n"
+ "ldr q28, [%x[rq_mul_perchannel], x21]\n"
+ "ldr q27, [%x[rq_shift_perchannel], x21]\n"
+ "3:" // Loop: Quantisation parameters: Store
+ "str q28, [%x[outptr], #0x0]\n"
+ "add x21, x21, #0x10\n"
+ "str q27, [%x[outptr], #0x10]\n"
+ "subs x20, x20, #0x1\n"
+ "add %x[outptr], %x[outptr], #0x20\n"
+ "bgt 1b\n"
+ "tst %x[n_channels], #0x3\n"
+ "beq 13f\n"
+ "4:" // Oddments
+ "movi v26.4s, #0x0\n"
+ "cbz %x[bias], 7f\n"
+ "add %x[bias], %x[bias], x21\n"
+ "tbz %x[n_channels], #1, 5f\n"
+ "ld1 { v26.d }[0], [%x[bias]], #0x8\n"
+ "tbz %x[n_channels], #0, 6f\n"
+ "ld1 { v26.s }[2], [%x[bias]], #0x4\n"
+ "b 6f\n"
+ "5:" // Oddments: Load bias: Bit 1: Unset
+ "tbz %x[n_channels], #0, 6f\n"
+ "ld1 { v26.s }[0], [%x[bias]], #0x4\n"
+ "6:" // Oddments: Load bias: Bit 1: End
+
+ "7:" // Oddments: Skip bias load
+ "tbz %x[n_channels], #1, 8f\n"
+ "ld1 { v24.h }[0], [%x[weights]]\n"
+ "ld1 { v22.h }[0], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.h }[0], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.h }[0], [x20]\n"
+ "add %x[weights], %x[weights], #0x2\n"
+ "ld1 { v21.h }[0], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.h }[0], [x20]\n"
+ "ld1 { v18.h }[0], [x19]\n"
+ "add x24, x24, #0x2\n"
+ "add x19, x23, %x[ld_weight_col]\n"
+ "ld1 { v17.h }[0], [x19]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v16.h }[0], [x19]\n"
+ "add x23, x23, #0x2\n"
+ "tbz %x[n_channels], #0, 9f\n"
+ "ld1 { v24.b }[2], [%x[weights]]\n"
+ "ld1 { v22.b }[2], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.b }[2], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.b }[2], [x20]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v21.b }[2], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x19]\n"
+ "add x20, x23, %x[ld_weight_col]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x19]\n"
+ "b 9f\n"
+ "8:" // Oddments: Load weights: Bit 1: Unset
+ "tbz %x[n_channels], #0, 9f\n"
+ "ld1 { v24.b }[0], [%x[weights]]\n"
+ "ld1 { v22.b }[0], [x24]\n"
+ "add x20, %x[weights], %x[ld_weight_col]\n"
+ "ld1 { v19.b }[0], [x23]\n"
+ "add x19, %x[weights], x22\n"
+ "ld1 { v23.b }[0], [x20]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v21.b }[0], [x19]\n"
+ "add x20, x24, %x[ld_weight_col]\n"
+ "add x19, x24, x22\n"
+ "ld1 { v20.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x19]\n"
+ "add x20, x23, %x[ld_weight_col]\n"
+ "add x19, x23, x22\n"
+ "ld1 { v17.b }[0], [x20]\n"
+ "ld1 { v16.b }[0], [x19]\n"
+ "9:" // Oddments: Load weights: Bit 1: End
+ "zip1 v21.16b, v24.16b, v21.16b\n"
+ "zip1 v23.16b, v23.16b, v0.16b\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
+ "zip1 v20.16b, v20.16b, v0.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "zip1 v17.16b, v17.16b, v0.16b\n"
+ "zip1 v21.16b, v21.16b, v23.16b\n"
+ "zip1 v18.16b, v18.16b, v20.16b\n"
+ "zip1 v16.16b, v16.16b, v17.16b\n"
+ "movi v25.4s, #0x0\n"
+ ".inst 0x6e9597f9 // udot v25.4s, v31.16b, v21.16b\n"
+ ".inst 0x6e9297f9 // udot v25.4s, v31.16b, v18.16b\n"
+ ".inst 0x6e9097f9 // udot v25.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "str q26, [%x[outptr], #0x0]\n"
+ "str q21, [%x[outptr], #0x10]\n"
+ "str q18, [%x[outptr], #0x20]\n"
+ "str q16, [%x[outptr], #0x30]\n"
+ "add %x[outptr], %x[outptr], #0x40\n"
+ "cbz %x[rq_mul_perchannel], 12f\n"
+ "add x20, %x[rq_mul_perchannel], x21\n"
+ "add x19, %x[rq_shift_perchannel], x21\n"
+ "tbz %x[n_channels], #1, 10f\n"
+ "ld1 { v28.d }[0], [x20], #0x8\n"
+ "ld1 { v27.d }[0], [x19], #0x8\n"
+ "tbz %x[n_channels], #0, 11f\n"
+ "ld1 { v28.s }[2], [x20], #0x4\n"
+ "ld1 { v27.s }[2], [x19], #0x4\n"
+ "b 11f\n"
+ "10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
+ "tbz %x[n_channels], #0, 11f\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v27.s }[0], [x19], #0x4\n"
+ "11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
+
+ "12:" // Oddments: Quantisation parameters: Store
+ "str q28, [%x[outptr], #0x0]\n"
+ "str q27, [%x[outptr], #0x10]\n"
+ "add %x[outptr], %x[outptr], #0x20\n"
+ "13:" // End
+
+ : [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
+ : [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
+ : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(__aarch64__)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp
new file mode 100644
index 0000000000..41f0495acf
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+
+namespace arm_conv {
+namespace depthwise {
+
+#if defined(__ARM_FEATURE_SVE)
+
+class interleave_sve_u8q_3x3_dot
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const int32_t *, const uint8_t *, const arm_gemm::Requantize32 &, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_sve_s8q_3x3_dot
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const int32_t *, const int8_t *, const arm_gemm::Requantize32 &, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_sve_u8q_3x3_mla
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const uint8_t *, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_sve_s8q_3x3_mla
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const int8_t *, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_sve_u8q_5x5_mla
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const uint8_t *, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_sve_s8q_5x5_mla
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const int8_t *, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+#endif // defined(__ARM_FEATURE_SVE)
+
+class interleave_a64_u8q_3x3_dot
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const int32_t *, const uint8_t *, const arm_gemm::Requantize32 &, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_a64_s8q_3x3_dot
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const int32_t *, const int8_t *, const arm_gemm::Requantize32 &, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_a64_u8q_3x3_mla
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const uint8_t *, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_a64_s8q_3x3_mla
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const int8_t *, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_a64_u8q_5x5_mla
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const uint8_t *, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+class interleave_a64_s8q_5x5_mla
+{
+ public:
+ static void pack_parameters(unsigned int, void *, const int8_t *, size_t, size_t);
+ static size_t get_packed_size(const DepthwiseArgs &);
+};
+
+} // namespace depthwise
+} // namespace arm_conv
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
new file mode 100644
index 0000000000..ea0c35b7ce
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+#include "arm_gemm.hpp"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "src/core/NEON/kernels/assembly/depthwise.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+struct interleave_sve_s8q_3x3_dot
+{
+ static size_t get_packed_size(const DepthwiseArgs &);
+ static void pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row);
+};
+
+size_t interleave_sve_s8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
+{
+ // We store 7 vectors for every <vector_of_ints> of channels.
+ const unsigned int n = arm_gemm::roundup(
+ arm_gemm::iceildiv((long unsigned int) args.input_channels,
+ get_vector_length<int32_t>(arm_gemm::VLType::SVE)), 4lu
+ );
+ return n * 7 * get_vector_length<int8_t>(arm_gemm::VLType::SVE);
+}
+
+void interleave_sve_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
+{
+ __asm__ __volatile__(
+ "mov z30.b, #0x0\n"
+ "ptrue p2.b\n"
+ "ld1rw { z29.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "mov z28.b, #0x1\n"
+ "cmp %x[ld_weight_col], XZR\n"
+ "mov z16.s, #0x9\n"
+ "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "mul z27.s, p2/M, z27.s, z29.s\n"
+ "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "mov x19, #0x3\n"
+ "mul z27.s, p2/M, z27.s, z16.s\n"
+ "ld1rw { z25.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "mul x19, %x[ld_weight_col], x19\n"
+ "cmp %x[ld_weight_row], XZR\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
+ "add x22, %x[weights], %x[ld_weight_row]\n"
+ "add x21, x22, %x[ld_weight_row]\n"
+ "whilelt p1.s, XZR, %x[n_channels]\n"
+ "mov x20, #0x0\n"
+ "pfalse p8.b\n"
+ "cbz %x[bias], 1f\n"
+ "ptrue p8.s\n"
+ "1:" // No bias
+
+ "2:" // Loop
+ "mov z24.s, #0x0\n"
+ "cntp x19, p2, p1.s\n"
+ "and p0.b, p2/Z, p8.b, p1.b\n"
+ "ld1w { z23.s }, p0/Z, [%x[bias], x20, LSL #2]\n"
+ "whilelt p0.b, XZR, x19\n"
+ "ld1b { z17.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 z18.b, z16.b, z30.b\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x23]\n"
+ "add %x[weights], %x[weights], x19\n"
+ "zip1 z16.b, z17.b, z16.b\n"
+ "ld1b { z22.b }, p0/Z, [x22]\n"
+ "ld1b { z17.b }, p0/Z, [x22, %x[ld_weight_col]]\n"
+ "zip1 z21.b, z16.b, z18.b\n"
+ "ld1b { z16.b }, p0/Z, [x22, x23]\n"
+ "sdot z24.s, z28.b, z21.b\n"
+ "add x22, x22, x19\n"
+ "zip1 z18.b, z17.b, z30.b\n"
+ "ld1b { z20.b }, p0/Z, [x21]\n"
+ "ld1b { z19.b }, p0/Z, [x21, %x[ld_weight_col]]\n"
+ "zip1 z17.b, z22.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x21, x23]\n"
+ "zip1 z18.b, z17.b, z18.b\n"
+ "add x21, x21, x19\n"
+ "zip1 z17.b, z19.b, z30.b\n"
+ "sdot z24.s, z28.b, z18.b\n"
+ "zip1 z16.b, z20.b, z16.b\n"
+ "zip1 z16.b, z16.b, z17.b\n"
+ "sdot z24.s, z28.b, z16.b\n"
+ "mls z23.s, p2/M, z24.s, z29.s\n"
+ "add z23.s, z23.s, z27.s\n"
+ "st1w { z23.s }, p2, [%x[outptr]]\n"
+ "st1b { z21.b }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z18.b }, p2, [%x[outptr], #2, MUL VL]\n"
+ "st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
+ "addvl %x[outptr], %x[outptr], #4\n"
+ "cbz %x[rq_mul_perchannel], 3f\n"
+ "ld1w { z26.s }, p1/Z, [%x[rq_mul_perchannel], x20, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [%x[rq_shift_perchannel], x20, LSL #2]\n"
+ "3:" // Loop: Quantisation parameters: Store
+ "st1w { z26.s }, p2, [%x[outptr]]\n"
+ "incw x20\n"
+ "st1w { z25.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "whilelt p1.s, x20, %x[n_channels]\n"
+ "addvl %x[outptr], %x[outptr], #2\n"
+ "b.any 2b\n"
+ : [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
+ : [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
+ : "cc", "memory", "p0", "p1", "p2", "p8", "x19", "x20", "x21", "x22", "x23", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(__ARM_FEATURE_SVE)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
new file mode 100644
index 0000000000..edd32a43f5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(__ARM_FEATURE_SVE)
+
+#include "arm_gemm.hpp"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "src/core/NEON/kernels/assembly/depthwise.hpp"
+#include <cstdint>
+
+namespace arm_conv {
+namespace depthwise {
+
+struct interleave_sve_u8q_3x3_dot
+{
+ static size_t get_packed_size(const DepthwiseArgs &);
+ static void pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row);
+};
+
+size_t interleave_sve_u8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
+{
+ // We store 7 vectors for every <vector_of_ints> of channels.
+ const unsigned int n = arm_gemm::roundup(
+ arm_gemm::iceildiv((long unsigned int) args.input_channels,
+ get_vector_length<int32_t>(arm_gemm::VLType::SVE)), 4lu
+ );
+ return n * 7 * get_vector_length<uint8_t>(arm_gemm::VLType::SVE);
+}
+
+void interleave_sve_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
+{
+ __asm__ __volatile__(
+ "mov z30.b, #0x0\n"
+ "ptrue p2.b\n"
+ "ld1rw { z29.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "mov z28.b, #0x1\n"
+ "cmp %x[ld_weight_col], XZR\n"
+ "mov z16.s, #0x9\n"
+ "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "mul z27.s, p2/M, z27.s, z29.s\n"
+ "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "mov x19, #0x3\n"
+ "mul z27.s, p2/M, z27.s, z16.s\n"
+ "ld1rw { z25.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "mul x19, %x[ld_weight_col], x19\n"
+ "cmp %x[ld_weight_row], XZR\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
+ "add x22, %x[weights], %x[ld_weight_row]\n"
+ "add x21, x22, %x[ld_weight_row]\n"
+ "whilelt p1.s, XZR, %x[n_channels]\n"
+ "mov x20, #0x0\n"
+ "pfalse p8.b\n"
+ "cbz %x[bias], 1f\n"
+ "ptrue p8.s\n"
+ "1:" // No bias
+
+ "2:" // Loop
+ "mov z24.s, #0x0\n"
+ "cntp x19, p2, p1.s\n"
+ "and p0.b, p2/Z, p8.b, p1.b\n"
+ "ld1w { z23.s }, p0/Z, [%x[bias], x20, LSL #2]\n"
+ "whilelt p0.b, XZR, x19\n"
+ "ld1b { z17.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 z18.b, z16.b, z30.b\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x23]\n"
+ "add %x[weights], %x[weights], x19\n"
+ "zip1 z16.b, z17.b, z16.b\n"
+ "ld1b { z22.b }, p0/Z, [x22]\n"
+ "ld1b { z17.b }, p0/Z, [x22, %x[ld_weight_col]]\n"
+ "zip1 z21.b, z16.b, z18.b\n"
+ "ld1b { z16.b }, p0/Z, [x22, x23]\n"
+ "udot z24.s, z28.b, z21.b\n"
+ "add x22, x22, x19\n"
+ "zip1 z18.b, z17.b, z30.b\n"
+ "ld1b { z20.b }, p0/Z, [x21]\n"
+ "ld1b { z19.b }, p0/Z, [x21, %x[ld_weight_col]]\n"
+ "zip1 z17.b, z22.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x21, x23]\n"
+ "zip1 z18.b, z17.b, z18.b\n"
+ "add x21, x21, x19\n"
+ "zip1 z17.b, z19.b, z30.b\n"
+ "udot z24.s, z28.b, z18.b\n"
+ "zip1 z16.b, z20.b, z16.b\n"
+ "zip1 z16.b, z16.b, z17.b\n"
+ "udot z24.s, z28.b, z16.b\n"
+ "mls z23.s, p2/M, z24.s, z29.s\n"
+ "add z23.s, z23.s, z27.s\n"
+ "st1w { z23.s }, p2, [%x[outptr]]\n"
+ "st1b { z21.b }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z18.b }, p2, [%x[outptr], #2, MUL VL]\n"
+ "st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
+ "addvl %x[outptr], %x[outptr], #4\n"
+ "cbz %x[rq_mul_perchannel], 3f\n"
+ "ld1w { z26.s }, p1/Z, [%x[rq_mul_perchannel], x20, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [%x[rq_shift_perchannel], x20, LSL #2]\n"
+ "3:" // Loop: Quantisation parameters: Store
+ "st1w { z26.s }, p2, [%x[outptr]]\n"
+ "incw x20\n"
+ "st1w { z25.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "whilelt p1.s, x20, %x[n_channels]\n"
+ "addvl %x[outptr], %x[outptr], #2\n"
+ "b.any 2b\n"
+ : [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
+ : [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
+ : "cc", "memory", "p0", "p1", "p2", "p8", "x19", "x20", "x21", "x22", "x23", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
+ );
+}
+
+} // namespace depthwise
+} // namespace arm_conv
+
+#endif // defined(__ARM_FEATURE_SVE)