aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_conv/depthwise/interleaves
diff options
context:
space:
mode:
authorMichael Tyler <michael.tyler@arm.com>2022-12-15 12:39:29 +0000
committermichael.tyler <michael.tyler@arm.com>2023-01-16 09:31:00 +0000
commitba209750abc1ac7e42bab9fef5db284384d70fb3 (patch)
tree1065f242db9a9a5e48bd4a9f2fd68aef1924827a /src/core/NEON/kernels/arm_conv/depthwise/interleaves
parent8094f9dd5307c55f545b2cb41ec80a739a9b4d6f (diff)
downloadComputeLibrary-ba209750abc1ac7e42bab9fef5db284384d70fb3.tar.gz
Update CPU kernels to remove x19
Resolves: COMPMID-5805 Signed-off-by: Michael Tyler <michael.tyler@arm.com> Change-Id: I250f64531e209625e4ff176dd5a552c1c34bc484 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8909 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_conv/depthwise/interleaves')
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp256
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp256
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp116
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp116
4 files changed, 366 insertions, 378 deletions
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
index 3d3447bf3c..adda78f164 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,76 +51,76 @@ size_t interleave_a64_s8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
void interleave_a64_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
{
__asm__ __volatile__(
- "movi v0.16b, #0x0\n"
"cmp %x[ld_weight_col], XZR\n"
- "movi v31.16b, #0x1\n"
"csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
"movi v16.4s, #0x9\n"
- "mov x19, #0x3\n"
+ "movi v0.16b, #0x0\n"
+ "mov x21, #0x3\n"
+ "mul x21, %x[ld_weight_col], x21\n"
+ "add x20, %x[qp], %[offsetof_input_offset]\n"
+ "ld1r { v31.4s }, [x20]\n"
+ "add x20, %x[qp], %[offsetof_weights_offset]\n"
+ "ld1r { v30.4s }, [x20]\n"
"cmp %x[ld_weight_row], XZR\n"
- "mul x19, %x[ld_weight_col], x19\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
- "add x24, %x[weights], %x[ld_weight_row]\n"
- "add x23, x24, %x[ld_weight_row]\n"
- "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
- "lsr x20, %x[n_channels], #0x2\n"
- "mov x21, #0x0\n"
- "add x19, %x[qp], %[offsetof_input_offset]\n"
- "ld1r { v30.4s }, [x19]\n"
- "add x19, %x[qp], %[offsetof_weights_offset]\n"
- "ld1r { v29.4s }, [x19]\n"
- "mul v29.4s, v29.4s, v30.4s\n"
- "add x19, %x[qp], %[offsetof_per_layer_mul]\n"
- "ld1r { v28.4s }, [x19]\n"
- "mul v29.4s, v29.4s, v16.4s\n"
- "add x19, %x[qp], %[offsetof_per_layer_right_shift]\n"
- "ld1r { v27.4s }, [x19]\n"
- "cbz x20, 4f\n"
+ "mul v30.4s, v30.4s, v31.4s\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x21, NE\n"
+ "lsr x21, %x[n_channels], #0x2\n"
+ "movi v29.16b, #0x1\n"
+ "mul v30.4s, v30.4s, v16.4s\n"
+ "add x25, %x[weights], %x[ld_weight_row]\n"
+ "add x20, %x[qp], %[offsetof_per_layer_mul]\n"
+ "ld1r { v28.4s }, [x20]\n"
+ "add x20, %x[qp], %[offsetof_per_layer_right_shift]\n"
+ "ld1r { v27.4s }, [x20]\n"
+ "add x24, x25, %x[ld_weight_row]\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "mov x22, #0x0\n"
+ "cbz x21, 4f\n"
"1:" // Loop
"movi v26.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "ldr q26, [%x[bias], x21]\n"
+ "ldr q26, [%x[bias], x22]\n"
"2:" // Loop: Skip bias load
- "movi v25.4s, #0x0\n"
- "ldr s24, [%x[weights], #0x0]\n"
- "ldr s23, [%x[weights], %x[ld_weight_col]]\n"
- "zip1 v23.16b, v23.16b, v0.16b\n"
- "ldr s21, [%x[weights], x22]\n"
+ "ldr s25, [%x[weights], #0x0]\n"
+ "ldr s22, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 v22.16b, v22.16b, v0.16b\n"
+ "movi v24.4s, #0x0\n"
+ "ldr s20, [%x[weights], x23]\n"
+ "ldr s23, [x25, #0x0]\n"
+ "zip1 v20.16b, v25.16b, v20.16b\n"
+ "zip1 v22.16b, v20.16b, v22.16b\n"
+ "ldr s21, [x25, %x[ld_weight_col]]\n"
+ "ldr s18, [x25, x23]\n"
+ "zip1 v20.16b, v23.16b, v18.16b\n"
+ "zip1 v18.16b, v21.16b, v0.16b\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s19, [x24, %x[ld_weight_col]]\n"
+ ".inst 0x4e9697b8 // sdot v24.4s, v29.16b, v22.16b\n"
+ "zip1 v18.16b, v20.16b, v18.16b\n"
+ "ldr s16, [x24, x23]\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v19.16b, v0.16b\n"
+ ".inst 0x4e9297b8 // sdot v24.4s, v29.16b, v18.16b\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ ".inst 0x4e9097b8 // sdot v24.4s, v29.16b, v16.16b\n"
"add %x[weights], %x[weights], #0x4\n"
- "zip1 v21.16b, v24.16b, v21.16b\n"
- "ldr s22, [x24, #0x0]\n"
- "ldr s20, [x24, %x[ld_weight_col]]\n"
- "zip1 v21.16b, v21.16b, v23.16b\n"
- "ldr s18, [x24, x22]\n"
- ".inst 0x4e9597f9 // sdot v25.4s, v31.16b, v21.16b\n"
+ "add x25, x25, #0x4\n"
+ "mls v26.4s, v24.4s, v31.4s\n"
"add x24, x24, #0x4\n"
- "zip1 v20.16b, v20.16b, v0.16b\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s17, [x23, %x[ld_weight_col]]\n"
- "zip1 v18.16b, v22.16b, v18.16b\n"
- "ldr s16, [x23, x22]\n"
- "zip1 v18.16b, v18.16b, v20.16b\n"
- "add x23, x23, #0x4\n"
- ".inst 0x4e9297f9 // sdot v25.4s, v31.16b, v18.16b\n"
- "zip1 v17.16b, v17.16b, v0.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "zip1 v16.16b, v16.16b, v17.16b\n"
- ".inst 0x4e9097f9 // sdot v25.4s, v31.16b, v16.16b\n"
- "mls v26.4s, v25.4s, v30.4s\n"
- "add v26.4s, v26.4s, v29.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
"str q26, [%x[outptr], #0x0]\n"
- "str q21, [%x[outptr], #0x10]\n"
+ "str q22, [%x[outptr], #0x10]\n"
"str q18, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ldr q28, [%x[rq_mul_perchannel], x21]\n"
- "ldr q27, [%x[rq_shift_perchannel], x21]\n"
+ "ldr q28, [%x[rq_mul_perchannel], x22]\n"
+ "ldr q27, [%x[rq_shift_perchannel], x22]\n"
"3:" // Loop: Quantisation parameters: Store
+ "subs x21, x21, #0x1\n"
"str q28, [%x[outptr], #0x0]\n"
- "add x21, x21, #0x10\n"
+ "add x22, x22, #0x10\n"
"str q27, [%x[outptr], #0x10]\n"
- "subs x20, x20, #0x1\n"
"add %x[outptr], %x[outptr], #0x20\n"
"bgt 1b\n"
"tst %x[n_channels], #0x3\n"
@@ -128,119 +128,113 @@ void interleave_a64_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *
"4:" // Oddments
"movi v26.4s, #0x0\n"
"cbz %x[bias], 7f\n"
- "add %x[bias], %x[bias], x21\n"
+ "add %x[bias], %x[bias], x22\n"
"tbz %x[n_channels], #1, 5f\n"
"ld1 { v26.d }[0], [%x[bias]], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
"ld1 { v26.s }[2], [%x[bias]], #0x4\n"
"b 6f\n"
"5:" // Oddments: Load bias: Bit 1: Unset
- "tbz %x[n_channels], #0, 6f\n"
"ld1 { v26.s }[0], [%x[bias]], #0x4\n"
"6:" // Oddments: Load bias: Bit 1: End
-
"7:" // Oddments: Skip bias load
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v24.h }[0], [%x[weights]]\n"
- "ld1 { v22.h }[0], [x24]\n"
- "add x20, %x[weights], %x[ld_weight_col]\n"
- "ld1 { v19.h }[0], [x23]\n"
- "add x19, %x[weights], x22\n"
- "ld1 { v23.h }[0], [x20]\n"
- "add %x[weights], %x[weights], #0x2\n"
- "ld1 { v21.h }[0], [x19]\n"
- "add x20, x24, %x[ld_weight_col]\n"
- "add x19, x24, x22\n"
+ "ld1 { v25.h }[0], [%x[weights]]\n"
+ "ld1 { v23.h }[0], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v22.h }[0], [x21]\n"
"ld1 { v20.h }[0], [x20]\n"
- "ld1 { v18.h }[0], [x19]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v21.h }[0], [x21]\n"
+ "ld1 { v18.h }[0], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v17.h }[0], [x24]\n"
+ "ld1 { v19.h }[0], [x21]\n"
+ "add %x[weights], %x[weights], #0x2\n"
+ "add x25, x25, #0x2\n"
+ "ld1 { v16.h }[0], [x20]\n"
"add x24, x24, #0x2\n"
- "add x19, x23, %x[ld_weight_col]\n"
- "ld1 { v17.h }[0], [x19]\n"
- "add x19, x23, x22\n"
- "ld1 { v16.h }[0], [x19]\n"
- "add x23, x23, #0x2\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v24.b }[2], [%x[weights]]\n"
- "ld1 { v22.b }[2], [x24]\n"
- "add x20, %x[weights], %x[ld_weight_col]\n"
- "ld1 { v19.b }[2], [x23]\n"
- "add x19, %x[weights], x22\n"
- "ld1 { v23.b }[2], [x20]\n"
- "add %x[weights], %x[weights], #0x1\n"
- "ld1 { v21.b }[2], [x19]\n"
- "add x20, x24, %x[ld_weight_col]\n"
- "add x19, x24, x22\n"
+ "ld1 { v25.b }[2], [%x[weights]]\n"
+ "ld1 { v23.b }[2], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v22.b }[2], [x21]\n"
"ld1 { v20.b }[2], [x20]\n"
- "ld1 { v18.b }[2], [x19]\n"
- "add x20, x23, %x[ld_weight_col]\n"
- "add x19, x23, x22\n"
- "ld1 { v17.b }[2], [x20]\n"
- "ld1 { v16.b }[2], [x19]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v21.b }[2], [x21]\n"
+ "ld1 { v18.b }[2], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v17.b }[2], [x24]\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 9f\n"
"8:" // Oddments: Load weights: Bit 1: Unset
- "tbz %x[n_channels], #0, 9f\n"
- "ld1 { v24.b }[0], [%x[weights]]\n"
- "ld1 { v22.b }[0], [x24]\n"
- "add x20, %x[weights], %x[ld_weight_col]\n"
- "ld1 { v19.b }[0], [x23]\n"
- "add x19, %x[weights], x22\n"
- "ld1 { v23.b }[0], [x20]\n"
- "add %x[weights], %x[weights], #0x1\n"
- "ld1 { v21.b }[0], [x19]\n"
- "add x20, x24, %x[ld_weight_col]\n"
- "add x19, x24, x22\n"
+ "ld1 { v25.b }[0], [%x[weights]]\n"
+ "ld1 { v23.b }[0], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v22.b }[0], [x21]\n"
"ld1 { v20.b }[0], [x20]\n"
- "ld1 { v18.b }[0], [x19]\n"
- "add x20, x23, %x[ld_weight_col]\n"
- "add x19, x23, x22\n"
- "ld1 { v17.b }[0], [x20]\n"
- "ld1 { v16.b }[0], [x19]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v21.b }[0], [x21]\n"
+ "ld1 { v18.b }[0], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v17.b }[0], [x24]\n"
+ "ld1 { v19.b }[0], [x21]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v16.b }[0], [x20]\n"
"9:" // Oddments: Load weights: Bit 1: End
- "zip1 v21.16b, v24.16b, v21.16b\n"
- "zip1 v23.16b, v23.16b, v0.16b\n"
- "zip1 v18.16b, v22.16b, v18.16b\n"
- "zip1 v20.16b, v20.16b, v0.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "zip1 v17.16b, v17.16b, v0.16b\n"
- "zip1 v21.16b, v21.16b, v23.16b\n"
- "zip1 v18.16b, v18.16b, v20.16b\n"
- "zip1 v16.16b, v16.16b, v17.16b\n"
- "movi v25.4s, #0x0\n"
- ".inst 0x4e9597f9 // sdot v25.4s, v31.16b, v21.16b\n"
- ".inst 0x4e9297f9 // sdot v25.4s, v31.16b, v18.16b\n"
- ".inst 0x4e9097f9 // sdot v25.4s, v31.16b, v16.16b\n"
- "mls v26.4s, v25.4s, v30.4s\n"
- "add v26.4s, v26.4s, v29.4s\n"
+ "zip1 v20.16b, v25.16b, v20.16b\n"
+ "zip1 v22.16b, v22.16b, v0.16b\n"
+ "zip1 v22.16b, v20.16b, v22.16b\n"
+ "zip1 v20.16b, v23.16b, v18.16b\n"
+ "zip1 v18.16b, v21.16b, v0.16b\n"
+ "movi v24.4s, #0x0\n"
+ ".inst 0x4e9697b8 // sdot v24.4s, v29.16b, v22.16b\n"
+ "zip1 v18.16b, v20.16b, v18.16b\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ ".inst 0x4e9297b8 // sdot v24.4s, v29.16b, v18.16b\n"
+ "zip1 v16.16b, v19.16b, v0.16b\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ ".inst 0x4e9097b8 // sdot v24.4s, v29.16b, v16.16b\n"
+ "mls v26.4s, v24.4s, v31.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
"str q26, [%x[outptr], #0x0]\n"
- "str q21, [%x[outptr], #0x10]\n"
+ "str q22, [%x[outptr], #0x10]\n"
"str q18, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 12f\n"
- "add x20, %x[rq_mul_perchannel], x21\n"
- "add x19, %x[rq_shift_perchannel], x21\n"
+ "add x21, %x[rq_mul_perchannel], x22\n"
+ "add x20, %x[rq_shift_perchannel], x22\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v28.d }[0], [x20], #0x8\n"
- "ld1 { v27.d }[0], [x19], #0x8\n"
+ "ld1 { v28.d }[0], [x21], #0x8\n"
+ "ld1 { v27.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v28.s }[2], [x20], #0x4\n"
- "ld1 { v27.s }[2], [x19], #0x4\n"
+ "ld1 { v28.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x20], #0x4\n"
"b 11f\n"
"10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "ld1 { v27.s }[0], [x19], #0x4\n"
+ "ld1 { v28.s }[0], [x21], #0x4\n"
+ "ld1 { v27.s }[0], [x20], #0x4\n"
"11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
-
"12:" // Oddments: Quantisation parameters: Store
"str q28, [%x[outptr], #0x0]\n"
"str q27, [%x[outptr], #0x10]\n"
"add %x[outptr], %x[outptr], #0x20\n"
"13:" // End
-
: [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
+ : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
index a725dcab59..b89886ae0c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,76 +51,76 @@ size_t interleave_a64_u8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
void interleave_a64_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
{
__asm__ __volatile__(
- "movi v0.16b, #0x0\n"
"cmp %x[ld_weight_col], XZR\n"
- "movi v31.16b, #0x1\n"
"csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
"movi v16.4s, #0x9\n"
- "mov x19, #0x3\n"
+ "movi v0.16b, #0x0\n"
+ "mov x21, #0x3\n"
+ "mul x21, %x[ld_weight_col], x21\n"
+ "add x20, %x[qp], %[offsetof_input_offset]\n"
+ "ld1r { v31.4s }, [x20]\n"
+ "add x20, %x[qp], %[offsetof_weights_offset]\n"
+ "ld1r { v30.4s }, [x20]\n"
"cmp %x[ld_weight_row], XZR\n"
- "mul x19, %x[ld_weight_col], x19\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
- "add x24, %x[weights], %x[ld_weight_row]\n"
- "add x23, x24, %x[ld_weight_row]\n"
- "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
- "lsr x20, %x[n_channels], #0x2\n"
- "mov x21, #0x0\n"
- "add x19, %x[qp], %[offsetof_input_offset]\n"
- "ld1r { v30.4s }, [x19]\n"
- "add x19, %x[qp], %[offsetof_weights_offset]\n"
- "ld1r { v29.4s }, [x19]\n"
- "mul v29.4s, v29.4s, v30.4s\n"
- "add x19, %x[qp], %[offsetof_per_layer_mul]\n"
- "ld1r { v28.4s }, [x19]\n"
- "mul v29.4s, v29.4s, v16.4s\n"
- "add x19, %x[qp], %[offsetof_per_layer_right_shift]\n"
- "ld1r { v27.4s }, [x19]\n"
- "cbz x20, 4f\n"
+ "mul v30.4s, v30.4s, v31.4s\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x21, NE\n"
+ "lsr x21, %x[n_channels], #0x2\n"
+ "movi v29.16b, #0x1\n"
+ "mul v30.4s, v30.4s, v16.4s\n"
+ "add x25, %x[weights], %x[ld_weight_row]\n"
+ "add x20, %x[qp], %[offsetof_per_layer_mul]\n"
+ "ld1r { v28.4s }, [x20]\n"
+ "add x20, %x[qp], %[offsetof_per_layer_right_shift]\n"
+ "ld1r { v27.4s }, [x20]\n"
+ "add x24, x25, %x[ld_weight_row]\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "mov x22, #0x0\n"
+ "cbz x21, 4f\n"
"1:" // Loop
"movi v26.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "ldr q26, [%x[bias], x21]\n"
+ "ldr q26, [%x[bias], x22]\n"
"2:" // Loop: Skip bias load
- "movi v25.4s, #0x0\n"
- "ldr s24, [%x[weights], #0x0]\n"
- "ldr s23, [%x[weights], %x[ld_weight_col]]\n"
- "zip1 v23.16b, v23.16b, v0.16b\n"
- "ldr s21, [%x[weights], x22]\n"
+ "ldr s25, [%x[weights], #0x0]\n"
+ "ldr s22, [%x[weights], %x[ld_weight_col]]\n"
+ "zip1 v22.16b, v22.16b, v0.16b\n"
+ "movi v24.4s, #0x0\n"
+ "ldr s20, [%x[weights], x23]\n"
+ "ldr s23, [x25, #0x0]\n"
+ "zip1 v20.16b, v25.16b, v20.16b\n"
+ "zip1 v22.16b, v20.16b, v22.16b\n"
+ "ldr s21, [x25, %x[ld_weight_col]]\n"
+ "ldr s18, [x25, x23]\n"
+ "zip1 v20.16b, v23.16b, v18.16b\n"
+ "zip1 v18.16b, v21.16b, v0.16b\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s19, [x24, %x[ld_weight_col]]\n"
+ ".inst 0x6e9697b8 // udot v24.4s, v29.16b, v22.16b\n"
+ "zip1 v18.16b, v20.16b, v18.16b\n"
+ "ldr s16, [x24, x23]\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v19.16b, v0.16b\n"
+ ".inst 0x6e9297b8 // udot v24.4s, v29.16b, v18.16b\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ ".inst 0x6e9097b8 // udot v24.4s, v29.16b, v16.16b\n"
"add %x[weights], %x[weights], #0x4\n"
- "zip1 v21.16b, v24.16b, v21.16b\n"
- "ldr s22, [x24, #0x0]\n"
- "ldr s20, [x24, %x[ld_weight_col]]\n"
- "zip1 v21.16b, v21.16b, v23.16b\n"
- "ldr s18, [x24, x22]\n"
- ".inst 0x6e9597f9 // udot v25.4s, v31.16b, v21.16b\n"
+ "add x25, x25, #0x4\n"
+ "mls v26.4s, v24.4s, v31.4s\n"
"add x24, x24, #0x4\n"
- "zip1 v20.16b, v20.16b, v0.16b\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s17, [x23, %x[ld_weight_col]]\n"
- "zip1 v18.16b, v22.16b, v18.16b\n"
- "ldr s16, [x23, x22]\n"
- "zip1 v18.16b, v18.16b, v20.16b\n"
- "add x23, x23, #0x4\n"
- ".inst 0x6e9297f9 // udot v25.4s, v31.16b, v18.16b\n"
- "zip1 v17.16b, v17.16b, v0.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "zip1 v16.16b, v16.16b, v17.16b\n"
- ".inst 0x6e9097f9 // udot v25.4s, v31.16b, v16.16b\n"
- "mls v26.4s, v25.4s, v30.4s\n"
- "add v26.4s, v26.4s, v29.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
"str q26, [%x[outptr], #0x0]\n"
- "str q21, [%x[outptr], #0x10]\n"
+ "str q22, [%x[outptr], #0x10]\n"
"str q18, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ldr q28, [%x[rq_mul_perchannel], x21]\n"
- "ldr q27, [%x[rq_shift_perchannel], x21]\n"
+ "ldr q28, [%x[rq_mul_perchannel], x22]\n"
+ "ldr q27, [%x[rq_shift_perchannel], x22]\n"
"3:" // Loop: Quantisation parameters: Store
+ "subs x21, x21, #0x1\n"
"str q28, [%x[outptr], #0x0]\n"
- "add x21, x21, #0x10\n"
+ "add x22, x22, #0x10\n"
"str q27, [%x[outptr], #0x10]\n"
- "subs x20, x20, #0x1\n"
"add %x[outptr], %x[outptr], #0x20\n"
"bgt 1b\n"
"tst %x[n_channels], #0x3\n"
@@ -128,119 +128,113 @@ void interleave_a64_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *
"4:" // Oddments
"movi v26.4s, #0x0\n"
"cbz %x[bias], 7f\n"
- "add %x[bias], %x[bias], x21\n"
+ "add %x[bias], %x[bias], x22\n"
"tbz %x[n_channels], #1, 5f\n"
"ld1 { v26.d }[0], [%x[bias]], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
"ld1 { v26.s }[2], [%x[bias]], #0x4\n"
"b 6f\n"
"5:" // Oddments: Load bias: Bit 1: Unset
- "tbz %x[n_channels], #0, 6f\n"
"ld1 { v26.s }[0], [%x[bias]], #0x4\n"
"6:" // Oddments: Load bias: Bit 1: End
-
"7:" // Oddments: Skip bias load
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v24.h }[0], [%x[weights]]\n"
- "ld1 { v22.h }[0], [x24]\n"
- "add x20, %x[weights], %x[ld_weight_col]\n"
- "ld1 { v19.h }[0], [x23]\n"
- "add x19, %x[weights], x22\n"
- "ld1 { v23.h }[0], [x20]\n"
- "add %x[weights], %x[weights], #0x2\n"
- "ld1 { v21.h }[0], [x19]\n"
- "add x20, x24, %x[ld_weight_col]\n"
- "add x19, x24, x22\n"
+ "ld1 { v25.h }[0], [%x[weights]]\n"
+ "ld1 { v23.h }[0], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v22.h }[0], [x21]\n"
"ld1 { v20.h }[0], [x20]\n"
- "ld1 { v18.h }[0], [x19]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v21.h }[0], [x21]\n"
+ "ld1 { v18.h }[0], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v17.h }[0], [x24]\n"
+ "ld1 { v19.h }[0], [x21]\n"
+ "add %x[weights], %x[weights], #0x2\n"
+ "add x25, x25, #0x2\n"
+ "ld1 { v16.h }[0], [x20]\n"
"add x24, x24, #0x2\n"
- "add x19, x23, %x[ld_weight_col]\n"
- "ld1 { v17.h }[0], [x19]\n"
- "add x19, x23, x22\n"
- "ld1 { v16.h }[0], [x19]\n"
- "add x23, x23, #0x2\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v24.b }[2], [%x[weights]]\n"
- "ld1 { v22.b }[2], [x24]\n"
- "add x20, %x[weights], %x[ld_weight_col]\n"
- "ld1 { v19.b }[2], [x23]\n"
- "add x19, %x[weights], x22\n"
- "ld1 { v23.b }[2], [x20]\n"
- "add %x[weights], %x[weights], #0x1\n"
- "ld1 { v21.b }[2], [x19]\n"
- "add x20, x24, %x[ld_weight_col]\n"
- "add x19, x24, x22\n"
+ "ld1 { v25.b }[2], [%x[weights]]\n"
+ "ld1 { v23.b }[2], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v22.b }[2], [x21]\n"
"ld1 { v20.b }[2], [x20]\n"
- "ld1 { v18.b }[2], [x19]\n"
- "add x20, x23, %x[ld_weight_col]\n"
- "add x19, x23, x22\n"
- "ld1 { v17.b }[2], [x20]\n"
- "ld1 { v16.b }[2], [x19]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v21.b }[2], [x21]\n"
+ "ld1 { v18.b }[2], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v17.b }[2], [x24]\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 9f\n"
"8:" // Oddments: Load weights: Bit 1: Unset
- "tbz %x[n_channels], #0, 9f\n"
- "ld1 { v24.b }[0], [%x[weights]]\n"
- "ld1 { v22.b }[0], [x24]\n"
- "add x20, %x[weights], %x[ld_weight_col]\n"
- "ld1 { v19.b }[0], [x23]\n"
- "add x19, %x[weights], x22\n"
- "ld1 { v23.b }[0], [x20]\n"
- "add %x[weights], %x[weights], #0x1\n"
- "ld1 { v21.b }[0], [x19]\n"
- "add x20, x24, %x[ld_weight_col]\n"
- "add x19, x24, x22\n"
+ "ld1 { v25.b }[0], [%x[weights]]\n"
+ "ld1 { v23.b }[0], [x25]\n"
+ "add x21, %x[weights], %x[ld_weight_col]\n"
+ "add x20, %x[weights], x23\n"
+ "ld1 { v22.b }[0], [x21]\n"
"ld1 { v20.b }[0], [x20]\n"
- "ld1 { v18.b }[0], [x19]\n"
- "add x20, x23, %x[ld_weight_col]\n"
- "add x19, x23, x22\n"
- "ld1 { v17.b }[0], [x20]\n"
- "ld1 { v16.b }[0], [x19]\n"
+ "add x21, x25, %x[ld_weight_col]\n"
+ "add x20, x25, x23\n"
+ "ld1 { v21.b }[0], [x21]\n"
+ "ld1 { v18.b }[0], [x20]\n"
+ "add x21, x24, %x[ld_weight_col]\n"
+ "add x20, x24, x23\n"
+ "ld1 { v17.b }[0], [x24]\n"
+ "ld1 { v19.b }[0], [x21]\n"
+ "add %x[weights], %x[weights], #0x1\n"
+ "ld1 { v16.b }[0], [x20]\n"
"9:" // Oddments: Load weights: Bit 1: End
- "zip1 v21.16b, v24.16b, v21.16b\n"
- "zip1 v23.16b, v23.16b, v0.16b\n"
- "zip1 v18.16b, v22.16b, v18.16b\n"
- "zip1 v20.16b, v20.16b, v0.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "zip1 v17.16b, v17.16b, v0.16b\n"
- "zip1 v21.16b, v21.16b, v23.16b\n"
- "zip1 v18.16b, v18.16b, v20.16b\n"
- "zip1 v16.16b, v16.16b, v17.16b\n"
- "movi v25.4s, #0x0\n"
- ".inst 0x6e9597f9 // udot v25.4s, v31.16b, v21.16b\n"
- ".inst 0x6e9297f9 // udot v25.4s, v31.16b, v18.16b\n"
- ".inst 0x6e9097f9 // udot v25.4s, v31.16b, v16.16b\n"
- "mls v26.4s, v25.4s, v30.4s\n"
- "add v26.4s, v26.4s, v29.4s\n"
+ "zip1 v20.16b, v25.16b, v20.16b\n"
+ "zip1 v22.16b, v22.16b, v0.16b\n"
+ "zip1 v22.16b, v20.16b, v22.16b\n"
+ "zip1 v20.16b, v23.16b, v18.16b\n"
+ "zip1 v18.16b, v21.16b, v0.16b\n"
+ "movi v24.4s, #0x0\n"
+ ".inst 0x6e9697b8 // udot v24.4s, v29.16b, v22.16b\n"
+ "zip1 v18.16b, v20.16b, v18.16b\n"
+ "zip1 v17.16b, v17.16b, v16.16b\n"
+ ".inst 0x6e9297b8 // udot v24.4s, v29.16b, v18.16b\n"
+ "zip1 v16.16b, v19.16b, v0.16b\n"
+ "zip1 v16.16b, v17.16b, v16.16b\n"
+ ".inst 0x6e9097b8 // udot v24.4s, v29.16b, v16.16b\n"
+ "mls v26.4s, v24.4s, v31.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
"str q26, [%x[outptr], #0x0]\n"
- "str q21, [%x[outptr], #0x10]\n"
+ "str q22, [%x[outptr], #0x10]\n"
"str q18, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 12f\n"
- "add x20, %x[rq_mul_perchannel], x21\n"
- "add x19, %x[rq_shift_perchannel], x21\n"
+ "add x21, %x[rq_mul_perchannel], x22\n"
+ "add x20, %x[rq_shift_perchannel], x22\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v28.d }[0], [x20], #0x8\n"
- "ld1 { v27.d }[0], [x19], #0x8\n"
+ "ld1 { v28.d }[0], [x21], #0x8\n"
+ "ld1 { v27.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v28.s }[2], [x20], #0x4\n"
- "ld1 { v27.s }[2], [x19], #0x4\n"
+ "ld1 { v28.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x20], #0x4\n"
"b 11f\n"
"10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
- "tbz %x[n_channels], #0, 11f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "ld1 { v27.s }[0], [x19], #0x4\n"
+ "ld1 { v28.s }[0], [x21], #0x4\n"
+ "ld1 { v27.s }[0], [x20], #0x4\n"
"11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
-
"12:" // Oddments: Quantisation parameters: Store
"str q28, [%x[outptr], #0x0]\n"
"str q27, [%x[outptr], #0x10]\n"
"add %x[outptr], %x[outptr], #0x20\n"
"13:" // End
-
: [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24"
+ : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
index dfb6457ed9..0cf8044733 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,82 +51,82 @@ size_t interleave_sve_s8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
void interleave_sve_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const int8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
{
__asm__ __volatile__(
- "mov z30.b, #0x0\n"
- "ptrue p2.b\n"
- "ld1rw { z29.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
- "mov z28.b, #0x1\n"
"cmp %x[ld_weight_col], XZR\n"
- "mov z16.s, #0x9\n"
- "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
"csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
- "mul z27.s, p2/M, z27.s, z29.s\n"
- "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
- "mov x19, #0x3\n"
- "mul z27.s, p2/M, z27.s, z16.s\n"
- "ld1rw { z25.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
- "mul x19, %x[ld_weight_col], x19\n"
+ "mov z16.s, #0x9\n"
+ "mov z28.b, #0x0\n"
+ "mov x20, #0x3\n"
+ "ptrue p2.b\n"
+ "mul x20, %x[ld_weight_col], x20\n"
+ "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
"cmp %x[ld_weight_row], XZR\n"
- "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
- "add x22, %x[weights], %x[ld_weight_row]\n"
- "add x21, x22, %x[ld_weight_row]\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
+ "mov z25.b, #0x1\n"
+ "mul z26.s, p2/M, z26.s, z27.s\n"
+ "add x24, %x[weights], %x[ld_weight_row]\n"
+ "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "add x23, x24, %x[ld_weight_row]\n"
+ "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
"whilelt p1.s, XZR, %x[n_channels]\n"
- "mov x20, #0x0\n"
+ "mov x21, #0x0\n"
+ "mul z26.s, p2/M, z26.s, z16.s\n"
"pfalse p8.b\n"
"cbz %x[bias], 1f\n"
"ptrue p8.s\n"
"1:" // No bias
"2:" // Loop
- "mov z24.s, #0x0\n"
- "cntp x19, p2, p1.s\n"
+ "cntp x20, p2, p1.s\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z18.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z17.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x22]\n"
+ "zip1 z20.b, z18.b, z16.b\n"
+ "zip1 z19.b, z17.b, z28.b\n"
+ "ld1b { z18.b }, p0/Z, [x24]\n"
+ "ld1b { z17.b }, p0/Z, [x24, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x24, x22]\n"
+ "zip1 z22.b, z20.b, z19.b\n"
+ "zip1 z21.b, z18.b, z16.b\n"
+ "zip1 z19.b, z17.b, z28.b\n"
+ "mov z20.s, #0x0\n"
+ "ld1b { z18.b }, p0/Z, [x23]\n"
+ "ld1b { z17.b }, p0/Z, [x23, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x23, x22]\n"
+ "sdot z20.s, z25.b, z22.b\n"
+ "zip1 z19.b, z21.b, z19.b\n"
+ "sdot z20.s, z25.b, z19.b\n"
+ "zip1 z18.b, z18.b, z16.b\n"
+ "zip1 z16.b, z17.b, z28.b\n"
"and p0.b, p2/Z, p8.b, p1.b\n"
- "ld1w { z23.s }, p0/Z, [%x[bias], x20, LSL #2]\n"
- "whilelt p0.b, XZR, x19\n"
- "ld1b { z17.b }, p0/Z, [%x[weights]]\n"
- "ld1b { z16.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
- "zip1 z18.b, z16.b, z30.b\n"
- "ld1b { z16.b }, p0/Z, [%x[weights], x23]\n"
- "add %x[weights], %x[weights], x19\n"
- "zip1 z16.b, z17.b, z16.b\n"
- "ld1b { z22.b }, p0/Z, [x22]\n"
- "ld1b { z17.b }, p0/Z, [x22, %x[ld_weight_col]]\n"
- "zip1 z21.b, z16.b, z18.b\n"
- "ld1b { z16.b }, p0/Z, [x22, x23]\n"
- "sdot z24.s, z28.b, z21.b\n"
- "add x22, x22, x19\n"
- "zip1 z18.b, z17.b, z30.b\n"
- "ld1b { z20.b }, p0/Z, [x21]\n"
- "ld1b { z19.b }, p0/Z, [x21, %x[ld_weight_col]]\n"
- "zip1 z17.b, z22.b, z16.b\n"
- "ld1b { z16.b }, p0/Z, [x21, x23]\n"
- "zip1 z18.b, z17.b, z18.b\n"
- "add x21, x21, x19\n"
- "zip1 z17.b, z19.b, z30.b\n"
- "sdot z24.s, z28.b, z18.b\n"
- "zip1 z16.b, z20.b, z16.b\n"
- "zip1 z16.b, z16.b, z17.b\n"
- "sdot z24.s, z28.b, z16.b\n"
- "mls z23.s, p2/M, z24.s, z29.s\n"
- "add z23.s, z23.s, z27.s\n"
- "st1w { z23.s }, p2, [%x[outptr]]\n"
- "st1b { z21.b }, p2, [%x[outptr], #1, MUL VL]\n"
- "st1b { z18.b }, p2, [%x[outptr], #2, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [%x[bias], x21, LSL #2]\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "sdot z20.s, z25.b, z16.b\n"
+ "mls z17.s, p2/M, z20.s, z27.s\n"
+ "add %x[weights], %x[weights], x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add z17.s, z17.s, z26.s\n"
+ "st1w { z17.s }, p2, [%x[outptr]]\n"
+ "st1b { z22.b }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z19.b }, p2, [%x[outptr], #2, MUL VL]\n"
"st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #4\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ld1w { z26.s }, p1/Z, [%x[rq_mul_perchannel], x20, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [%x[rq_shift_perchannel], x20, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [%x[rq_mul_perchannel], x21, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [%x[rq_shift_perchannel], x21, LSL #2]\n"
"3:" // Loop: Quantisation parameters: Store
- "st1w { z26.s }, p2, [%x[outptr]]\n"
- "incw x20\n"
- "st1w { z25.s }, p2, [%x[outptr], #1, MUL VL]\n"
- "whilelt p1.s, x20, %x[n_channels]\n"
+ "incw x21\n"
+ "whilelt p1.s, x21, %x[n_channels]\n"
+ "st1w { z24.s }, p2, [%x[outptr]]\n"
+ "st1w { z23.s }, p2, [%x[outptr], #1, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #2\n"
"b.any 2b\n"
: [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "p0", "p1", "p2", "p8", "x19", "x20", "x21", "x22", "x23", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
+ : "cc", "memory", "p0", "p1", "p2", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
index 6c16bdc2fb..e5bc8198f8 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,82 +51,82 @@ size_t interleave_sve_u8q_3x3_dot::get_packed_size(const DepthwiseArgs &args)
void interleave_sve_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *outptr, const int32_t *bias, const uint8_t *weights, const arm_gemm::Requantize32 &qp, size_t ld_weight_col, size_t ld_weight_row)
{
__asm__ __volatile__(
- "mov z30.b, #0x0\n"
- "ptrue p2.b\n"
- "ld1rw { z29.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
- "mov z28.b, #0x1\n"
"cmp %x[ld_weight_col], XZR\n"
- "mov z16.s, #0x9\n"
- "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
"csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
- "mul z27.s, p2/M, z27.s, z29.s\n"
- "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
- "mov x19, #0x3\n"
- "mul z27.s, p2/M, z27.s, z16.s\n"
- "ld1rw { z25.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
- "mul x19, %x[ld_weight_col], x19\n"
+ "mov z16.s, #0x9\n"
+ "mov z28.b, #0x0\n"
+ "mov x20, #0x3\n"
+ "ptrue p2.b\n"
+ "mul x20, %x[ld_weight_col], x20\n"
+ "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
"cmp %x[ld_weight_row], XZR\n"
- "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x19, NE\n"
- "add x22, %x[weights], %x[ld_weight_row]\n"
- "add x21, x22, %x[ld_weight_row]\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
+ "mov z25.b, #0x1\n"
+ "mul z26.s, p2/M, z26.s, z27.s\n"
+ "add x24, %x[weights], %x[ld_weight_row]\n"
+ "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "add x23, x24, %x[ld_weight_row]\n"
+ "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
"whilelt p1.s, XZR, %x[n_channels]\n"
- "mov x20, #0x0\n"
+ "mov x21, #0x0\n"
+ "mul z26.s, p2/M, z26.s, z16.s\n"
"pfalse p8.b\n"
"cbz %x[bias], 1f\n"
"ptrue p8.s\n"
"1:" // No bias
"2:" // Loop
- "mov z24.s, #0x0\n"
- "cntp x19, p2, p1.s\n"
+ "cntp x20, p2, p1.s\n"
+ "whilelt p0.b, XZR, x20\n"
+ "ld1b { z18.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z17.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x22]\n"
+ "zip1 z20.b, z18.b, z16.b\n"
+ "zip1 z19.b, z17.b, z28.b\n"
+ "ld1b { z18.b }, p0/Z, [x24]\n"
+ "ld1b { z17.b }, p0/Z, [x24, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x24, x22]\n"
+ "zip1 z22.b, z20.b, z19.b\n"
+ "zip1 z21.b, z18.b, z16.b\n"
+ "zip1 z19.b, z17.b, z28.b\n"
+ "mov z20.s, #0x0\n"
+ "ld1b { z18.b }, p0/Z, [x23]\n"
+ "ld1b { z17.b }, p0/Z, [x23, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x23, x22]\n"
+ "udot z20.s, z25.b, z22.b\n"
+ "zip1 z19.b, z21.b, z19.b\n"
+ "udot z20.s, z25.b, z19.b\n"
+ "zip1 z18.b, z18.b, z16.b\n"
+ "zip1 z16.b, z17.b, z28.b\n"
"and p0.b, p2/Z, p8.b, p1.b\n"
- "ld1w { z23.s }, p0/Z, [%x[bias], x20, LSL #2]\n"
- "whilelt p0.b, XZR, x19\n"
- "ld1b { z17.b }, p0/Z, [%x[weights]]\n"
- "ld1b { z16.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
- "zip1 z18.b, z16.b, z30.b\n"
- "ld1b { z16.b }, p0/Z, [%x[weights], x23]\n"
- "add %x[weights], %x[weights], x19\n"
- "zip1 z16.b, z17.b, z16.b\n"
- "ld1b { z22.b }, p0/Z, [x22]\n"
- "ld1b { z17.b }, p0/Z, [x22, %x[ld_weight_col]]\n"
- "zip1 z21.b, z16.b, z18.b\n"
- "ld1b { z16.b }, p0/Z, [x22, x23]\n"
- "udot z24.s, z28.b, z21.b\n"
- "add x22, x22, x19\n"
- "zip1 z18.b, z17.b, z30.b\n"
- "ld1b { z20.b }, p0/Z, [x21]\n"
- "ld1b { z19.b }, p0/Z, [x21, %x[ld_weight_col]]\n"
- "zip1 z17.b, z22.b, z16.b\n"
- "ld1b { z16.b }, p0/Z, [x21, x23]\n"
- "zip1 z18.b, z17.b, z18.b\n"
- "add x21, x21, x19\n"
- "zip1 z17.b, z19.b, z30.b\n"
- "udot z24.s, z28.b, z18.b\n"
- "zip1 z16.b, z20.b, z16.b\n"
- "zip1 z16.b, z16.b, z17.b\n"
- "udot z24.s, z28.b, z16.b\n"
- "mls z23.s, p2/M, z24.s, z29.s\n"
- "add z23.s, z23.s, z27.s\n"
- "st1w { z23.s }, p2, [%x[outptr]]\n"
- "st1b { z21.b }, p2, [%x[outptr], #1, MUL VL]\n"
- "st1b { z18.b }, p2, [%x[outptr], #2, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [%x[bias], x21, LSL #2]\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "udot z20.s, z25.b, z16.b\n"
+ "mls z17.s, p2/M, z20.s, z27.s\n"
+ "add %x[weights], %x[weights], x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add z17.s, z17.s, z26.s\n"
+ "st1w { z17.s }, p2, [%x[outptr]]\n"
+ "st1b { z22.b }, p2, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z19.b }, p2, [%x[outptr], #2, MUL VL]\n"
"st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #4\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ld1w { z26.s }, p1/Z, [%x[rq_mul_perchannel], x20, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [%x[rq_shift_perchannel], x20, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [%x[rq_mul_perchannel], x21, LSL #2]\n"
+ "ld1w { z23.s }, p1/Z, [%x[rq_shift_perchannel], x21, LSL #2]\n"
"3:" // Loop: Quantisation parameters: Store
- "st1w { z26.s }, p2, [%x[outptr]]\n"
- "incw x20\n"
- "st1w { z25.s }, p2, [%x[outptr], #1, MUL VL]\n"
- "whilelt p1.s, x20, %x[n_channels]\n"
+ "incw x21\n"
+ "whilelt p1.s, x21, %x[n_channels]\n"
+ "st1w { z24.s }, p2, [%x[outptr]]\n"
+ "st1w { z23.s }, p2, [%x[outptr], #1, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #2\n"
"b.any 2b\n"
: [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "p0", "p1", "p2", "p8", "x19", "x20", "x21", "x22", "x23", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
+ : "cc", "memory", "p0", "p1", "p2", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28"
);
}