diff options
author | ramelg01 <ramy.elgammal@arm.com> | 2022-04-07 02:42:52 +0100 |
---|---|---|
committer | Ramy Elgammal <ramy.elgammal@arm.com> | 2022-04-26 15:51:22 +0000 |
commit | 8a164884dddf769643cf3b9f7f94e43cb4f3c20b (patch) | |
tree | 35958dd48b6df1a851c880dad2b2ce285671b611 /src/core/NEON/kernels/arm_conv/depthwise/kernels | |
parent | c827e99fc46521f43719b0c2d1b6f05d66abf68c (diff) | |
download | ComputeLibrary-8a164884dddf769643cf3b9f7f94e43cb4f3c20b.tar.gz |
Update Neon™ depthwise kernel
- Reduce duplication and simplify overall structure.
- Improve multi-threaded performance by sharing more data
in lower-level caches.
Partially Resolves: COMPMID-5054
Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com>
Change-Id: Iac747f39b21c540122fa75218762631c4d787911
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7449
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Andrew Mundy
Reviewed-by: Sheri Zhang <sheri.zhang@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_conv/depthwise/kernels')
101 files changed, 15727 insertions, 12364 deletions
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index be50d1c535..d2db12535f 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,60 +56,13 @@ class a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(2, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp index 39fa7f62fd..75368dfcf9 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,60 +56,13 @@ class a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 3; constexpr static unsigned int output_cols = 3; - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst(const CPUInfo *) {} + a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(3, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp index e0abca91a2..faf6c91181 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -1283,7 +1283,8 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl( : : [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (¶ms_struct) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); } } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp index 1e0d922be4..4f0de6b61c 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,60 +56,13 @@ class a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 4; constexpr static unsigned int output_cols = 4; - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst(const CPUInfo *) {} + a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(4, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index d89ae0cd6d..d52f48064f 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,60 +56,13 @@ class a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(2, 3, 2) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index 6b5f91fa64..81a608e349 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; @@ -59,60 +56,13 @@ class a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(2, 5, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst.hpp index 3468b70f29..1ccd3408e2 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,32 +28,24 @@ #pragma once -#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) namespace arm_conv { namespace depthwise { void a64_fp16_nhwc_generic_output9_mla_depthfirst_impl(const __fp16 *const *const, __fp16 *const *const, const void *, const void *, const unsigned int, const unsigned int, const __fp16, const __fp16); -struct a64_fp16_nhwc_generic_output9_mla_depthfirst +class a64_fp16_nhwc_generic_output9_mla_depthfirst : public GenericDepthfirstKernelStrategy<__fp16, __fp16, __fp16, __fp16> { - typedef __fp16 bias_type; - typedef __fp16 input_type; - typedef __fp16 weight_type; - typedef __fp16 return_type; + KernelType kernel = a64_fp16_nhwc_generic_output9_mla_depthfirst_impl; - typedef void (*kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, const void *, const unsigned int, const unsigned int, const __fp16, const __fp16); + public: + a64_fp16_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) : GenericDepthfirstKernelStrategy<__fp16, __fp16, __fp16, __fp16>(9, arm_gemm::VLType::None) {} - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - constexpr static unsigned int n_output_points = 9; - - kern_type kernel = a64_fp16_nhwc_generic_output9_mla_depthfirst_impl; - - a64_fp16_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) {} + virtual KernelType get_kernel() const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#endif // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp index 8ac79f82fa..423ee4190c 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,7 +25,7 @@ #include <cstddef> #include <cstdint> -#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) namespace arm_conv { namespace depthwise { @@ -524,4 +524,4 @@ void a64_fp16_nhwc_generic_output9_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#endif // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp index a02a2b2984..8fcbce2cfe 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,35 +28,25 @@ #pragma once -#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) namespace arm_conv { namespace depthwise { void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl(const __fp16 *const *const, __fp16 *const *const, const __fp16 *, const __fp16 *, const unsigned int, const unsigned int, const __fp16, const __fp16); -struct a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst +struct a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst : GenericDepthfirstMultiplierKernelStrategy<__fp16, __fp16, __fp16, __fp16> { - typedef __fp16 bias_type; - typedef __fp16 input_type; - typedef __fp16 weight_type; - typedef __fp16 return_type; - - typedef void (*kern_type)(const __fp16 *const *const, __fp16 *const *const, const __fp16 *, const __fp16 *, const unsigned int, const unsigned int, const __fp16, const __fp16); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - constexpr static unsigned int output_rows(void) { return 2; }; - constexpr static unsigned int output_cols(void) { return 8; }; - - constexpr static unsigned int output_col_regs(void) { return 1; }; - - kern_type kernel = a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; - - a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) {} + using Parent = GenericDepthfirstMultiplierKernelStrategy<__fp16, __fp16, __fp16, __fp16>; + a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) + : Parent(2, 8, arm_gemm::VLType::None) + { + } + Parent::KernelType kernel = a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#endif // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp index 7ed7c52db2..d9fc1403b2 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,7 +25,7 @@ #include <cstddef> #include <cstdint> -#if defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#if defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) namespace arm_conv { namespace depthwise { @@ -1046,4 +1046,4 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im } // namespace depthwise } // namespace arm_conv -#endif // defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) +#endif // defined(__aarch64__) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index a888eb5776..420e95384d 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ +#if defined(__aarch64__) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(2, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp index 01bb06a561..0e9a3ba3fc 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ +#if defined(__aarch64__) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 3; constexpr static unsigned int output_cols = 3; - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst(const CPUInfo *) {} + a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(3, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp index 17084b57d5..6c897d6eaa 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ +#if defined(__aarch64__) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 4; constexpr static unsigned int output_cols = 4; - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst(const CPUInfo *) {} + a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(4, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index f23862b699..ff521fb2ca 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ +#if defined(__aarch64__) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(2, 3, 2) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index e4bfbe6783..c88a7d57ce 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,19 +36,16 @@ namespace depthwise { void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::None; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; @@ -59,60 +56,13 @@ class a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(2, 5, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst.hpp index 0f6cecdc56..6fa02b781e 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,28 +28,24 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_fp32_nhwc_generic_output9_mla_depthfirst_impl(const float *const *const, float *const *const, const void *, const void *, const unsigned int, const unsigned int, const float, const float); -struct a64_fp32_nhwc_generic_output9_mla_depthfirst +class a64_fp32_nhwc_generic_output9_mla_depthfirst : public GenericDepthfirstKernelStrategy<float, float, float, float> { - typedef float bias_type; - typedef float input_type; - typedef float weight_type; - typedef float return_type; - - typedef void (*kern_type)(const float *const *const, float *const *const, const void *, const void *, const unsigned int, const unsigned int, const float, const float); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; + KernelType kernel = a64_fp32_nhwc_generic_output9_mla_depthfirst_impl; - constexpr static unsigned int n_output_points = 9; + public: + a64_fp32_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) : GenericDepthfirstKernelStrategy<float, float, float, float>(9, arm_gemm::VLType::None) {} - kern_type kernel = a64_fp32_nhwc_generic_output9_mla_depthfirst_impl; - - a64_fp32_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) {} + KernelType get_kernel() const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst.hpp index 60f5ddd68f..2ec0525226 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,39 +28,34 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_impl(const float *const *const, float *const *const, const void *, const unsigned int, const float, const float); -struct a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst +struct a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst : DepthfirstMultiplierStrategy<float, float, float, float> { - typedef float bias_type; - typedef float input_type; - typedef float weight_type; - typedef float return_type; - - typedef void (*kern_type)(const float *const *const, float *const *const, const void *, const unsigned int, const float, const float); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - + using Parent = DepthfirstMultiplierStrategy<float, float, float, float>; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 3; - constexpr static unsigned int output_cols = 3; - - constexpr static unsigned int input_rows = 7; - constexpr static unsigned int input_cols = 7; - constexpr static unsigned int input_col_quads = 2; + a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst(const CPUInfo *) + : Parent(3, 3, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::None; } - a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv + +#endif // defined(__aarch64__)
\ No newline at end of file diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst.hpp index 92d6a757f2..5ae8dd3653 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,39 +28,34 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_impl(const float *const *const, float *const *const, const void *, const unsigned int, const float, const float); -struct a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst +struct a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst : DepthfirstMultiplierStrategy<float, float, float, float> { - typedef float bias_type; - typedef float input_type; - typedef float weight_type; - typedef float return_type; - - typedef void (*kern_type)(const float *const *const, float *const *const, const void *, const unsigned int, const float, const float); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - + using Parent = DepthfirstMultiplierStrategy<float, float, float, float>; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 4; - - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 8; - constexpr static unsigned int input_col_quads = 2; + a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst(const CPUInfo *) + : Parent(2, 4, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::None; } - a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp index 2cc2f7c103..d60e15ec84 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,31 +28,25 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl(const float *const *const, float *const *const, const float *, const float *, const unsigned int, const unsigned int, const float, const float); -struct a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst +struct a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst : GenericDepthfirstMultiplierKernelStrategy<float, float, float, float> { - typedef float bias_type; - typedef float input_type; - typedef float weight_type; - typedef float return_type; - - typedef void (*kern_type)(const float *const *const, float *const *const, const float *, const float *, const unsigned int, const unsigned int, const float, const float); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - constexpr static unsigned int output_rows(void) { return 2; }; - constexpr static unsigned int output_cols(void) { return 8; }; - - constexpr static unsigned int output_col_regs(void) { return 2; }; - - kern_type kernel = a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; - - a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) {} + using Parent = GenericDepthfirstMultiplierKernelStrategy<float, float, float, float>; + a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) + : Parent(2, 8, arm_gemm::VLType::None) + { + } + Parent::KernelType kernel = a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp index c76cb9906f..62e4a82fbf 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,39 +34,40 @@ namespace arm_conv { namespace depthwise { -void a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *, int8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); +void a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32&, const int32_t *, const int32_t *, int8_t *const *); -struct a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst +class a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(const int8_t *const *, int8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int32_t *, const int8_t *, const arm_gemm::Requantize32 &, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_s8q_3x3_dot::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_s8q_3x3_dot::get_packed_size; - - kern_type kernel = a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; - - a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) {} + a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} + + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } + + Parent::KernelType kernel = a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + size_t get_storage_size(const DepthwiseArgs &args) const override + { + return interleave_a64_s8q_3x3_dot::get_packed_size(args); + } + + void pack_parameters( + const DepthwiseArgs &args, void *buffer, const void *biases, const arm_gemm::Requantize32 &qp, + const void *weights, size_t ld_weight_col, size_t ld_weight_row + ) const override + { + interleave_a64_s8q_3x3_dot::pack_parameters( + args.input_channels, buffer, reinterpret_cast<const int32_t *>(biases), + reinterpret_cast<const int8_t *>(weights), qp, ld_weight_col, ld_weight_row + ); + } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp index ed8cd4861e..f8245fc5d9 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,15 @@ namespace arm_conv { namespace depthwise { -void a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *const inptrs, int8_t *const *const outptrs, const void *params, const uint64_t n_channels, const arm_gemm::Requantize32& qp) +void a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl( + const unsigned int n_channels, + const int8_t *const *const inptrs, + const int8_t *params, + const int32_t *, // Bias, should be wrapped into the parameters + const arm_gemm::Requantize32& qp, + const int32_t *, const int32_t *, // Requant parameters, also wrapped + int8_t *const *const outptrs +) { __asm__ __volatile__( "ldp x13, x12, [%x[inptrs], #0x0]\n" @@ -1307,7 +1315,7 @@ void a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *cons "34:" // End "add SP, SP, #0x80\n" : [params] "+&r" (params) - : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) + : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index 76c927abcb..c1baab43e5 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,24 @@ namespace depthwise { void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); -struct a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst +class a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_s8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_s8q_3x3_mla::get_packed_size; + a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} - kern_type kernel = a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } - a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp index 3001276fb5..0e8d16fc40 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const int8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const int8_t *const *inptrs_raw, - const int8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -91,513 +91,497 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( + "ldr x19, [%x[params], %[offsetof_Params_requant]]\n" "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x17, #0x0\n" - "ldr x16, [%x[params], %[offsetof_Params_weights]]\n" + "add x24, x19, %[offsetof_Requantize32_a_offset]\n" + "add x23, x19, %[offsetof_Requantize32_b_offset]\n" + "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x21, x19, %[offsetof_Requantize32_c_offset]\n" + "add x20, x19, %[offsetof_Requantize32_minval]\n" + "ldr x17, [%x[params], %[offsetof_Params_weights]]\n" + "add x19, x19, %[offsetof_Requantize32_maxval]\n" + "ld1r { v22.16b }, [x24]\n" + "ld1r { v12.16b }, [x23]\n" + "lsr x16, x8, #0x3\n" + "ld1r { v14.8h }, [x21]\n" + "ld1r { v17.8h }, [x20]\n" "mov x15, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x14, %x[params], %[offsetof_Params_inptrs]\n" + "mov x14, #0x0\n" + "ld1r { v15.8h }, [x19]\n" "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x12, x8, #0x3\n" + "add x12, %x[params], %[offsetof_Params_inptrs]\n" "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x19, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v14.16b }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v9.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v15.4s }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v24.4s }, [x20]\n" - "ld1r { v12.4s }, [x19]\n" - "ldp x10, x9, [x21, #0x0]\n" - "ldp x28, x27, [x21, #0x10]\n" - "cbz x12, 3f\n" - "subs x12, x12, #0x1\n" + "ldp x10, x9, [x22, #0x0]\n" + "ldp x28, x27, [x22, #0x10]\n" + "cbz x16, 3f\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q13, [x19, #0x0]\n" - "mov v17.16b, v13.16b\n" - "ldr q19, [x19, #0x10]\n" + "subs x16, x16, #0x1\n" + "mov v19.16b, v13.16b\n" + "ldr q26, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v13.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v23.16b, v13.16b\n" - "ldr d0, [x16, #0x0]\n" - "ssubl v0.8h, v0.8b, v9.8b\n" - "mov v25.16b, v19.16b\n" - "ldr d1, [x16, #0x8]\n" - "mov v21.16b, v19.16b\n" - "ldr d2, [x16, #0x10]\n" - "ssubl v1.8h, v1.8b, v9.8b\n" - "mov v20.16b, v19.16b\n" - "ldr d3, [x16, #0x18]\n" - "ldr d4, [x16, #0x20]\n" - "ssubl v2.8h, v2.8b, v9.8b\n" - "ldr d5, [x16, #0x28]\n" - "ssubl v3.8h, v3.8b, v9.8b\n" - "ldr d6, [x16, #0x30]\n" - "ldr d7, [x16, #0x38]\n" - "ssubl v4.8h, v4.8b, v9.8b\n" - "ldr d8, [x16, #0x40]\n" - "ssubl v5.8h, v5.8b, v9.8b\n" - "ldp x23, x22, [x14, #0x0]\n" - "ssubl v6.8h, v6.8b, v9.8b\n" - "ldp x21, x20, [x14, #0x10]\n" - "ssubl v7.8h, v7.8b, v9.8b\n" - "ssubl v8.8h, v8.8b, v9.8b\n" - "ldr x19, [x14, #0x20]\n" - "ldr d31, [x23, x17]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" - "ldr d30, [x22, x17]\n" - "ldr d29, [x21, x17]\n" - "ssubl v30.8h, v30.8b, v14.8b\n" - "ldr d28, [x20, x17]\n" - "ldr d27, [x19, x17]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" - "ssubl v28.8h, v28.8b, v14.8b\n" - "ssubl v27.8h, v27.8b, v14.8b\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v11.16b, v26.16b\n" + "mov v18.16b, v13.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v24.16b, v26.16b\n" + "mov v9.16b, v13.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v23.16b, v26.16b\n" + "ssubl v0.8h, v0.8b, v12.8b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "ssubl v1.8h, v1.8b, v12.8b\n" + "ssubl v2.8h, v2.8b, v12.8b\n" + "ldp x23, x22, [x12, #0x0]\n" + "ldp x21, x20, [x12, #0x10]\n" + "ssubl v3.8h, v3.8b, v12.8b\n" + "ssubl v4.8h, v4.8b, v12.8b\n" + "ldr x19, [x12, #0x20]\n" + "ldr d31, [x23, x15]\n" + "ssubl v5.8h, v5.8b, v12.8b\n" + "ssubl v6.8h, v6.8b, v12.8b\n" + "ldr d30, [x22, x15]\n" + "ldr d29, [x21, x15]\n" + "ssubl v7.8h, v7.8b, v12.8b\n" + "ssubl v8.8h, v8.8b, v12.8b\n" + "ldr d28, [x20, x15]\n" + "ldr d27, [x19, x15]\n" + "ssubl v31.8h, v31.8b, v22.8b\n" + "ssubl v30.8h, v30.8b, v22.8b\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "ssubl v27.8h, v27.8b, v22.8b\n" "beq 2f\n" "1:" // Loop "smlal v13.4s, v31.4h, v4.4h\n" - "ldr x21, [x14, #0x28]\n" - "add x16, x16, #0x48\n" - "smlal2 v19.4s, v31.8h, v4.8h\n" - "ldr x20, [x14, #0x30]\n" - "subs x12, x12, #0x1\n" - "smlal v17.4s, v31.4h, v3.4h\n" - "ldr x26, [x14, #0x38]\n" - "smlal2 v25.4s, v31.8h, v3.8h\n" - "ldr x25, [x14, #0x40]\n" - "smlal v16.4s, v31.4h, v1.4h\n" - "ldr x19, [x14, #0x48]\n" - "smlal2 v21.4s, v31.8h, v1.8h\n" - "ldr x24, [x14, #0x50]\n" - "smlal v23.4s, v31.4h, v0.4h\n" - "ldr x23, [x14, #0x58]\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr d31, [x21, x17]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v31.8h, v4.8h\n" + "ldr x21, [x12, #0x28]\n" + "ldr x26, [x12, #0x38]\n" + "smlal v19.4s, v31.4h, v3.4h\n" + "smlal2 v11.4s, v31.8h, v3.8h\n" + "ldr x20, [x12, #0x30]\n" + "ldr x25, [x12, #0x40]\n" "smlal v13.4s, v30.4h, v0.4h\n" - "ldr x22, [x14, #0x60]\n" - "smlal2 v19.4s, v30.8h, v0.8h\n" - "ldr d30, [x19, x17]\n" - "ssubl v30.8h, v30.8b, v14.8b\n" - "smlal v17.4s, v29.4h, v2.4h\n" - "ldr x21, [x14, #0x68]\n" - "smlal2 v25.4s, v29.8h, v2.8h\n" - "ldr d29, [x20, x17]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" + "smlal2 v26.4s, v30.8h, v0.8h\n" + "ldr x19, [x12, #0x48]\n" + "ldr d30, [x19, x15]\n" + "smlal v19.4s, v29.4h, v2.4h\n" + "smlal2 v11.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x15]\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v1.4h\n" + "smlal2 v24.4s, v31.8h, v1.8h\n" + "ldr x24, [x12, #0x50]\n" + "ldr x23, [x12, #0x58]\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v23.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x15]\n" + "ssubl v31.8h, v31.8b, v22.8b\n" "smlal v13.4s, v28.4h, v5.4h\n" - "ldr x20, [x14, #0x70]\n" - "smlal2 v19.4s, v28.8h, v5.8h\n" - "ldr x19, [x14, #0x78]\n" - "smlal v17.4s, v28.4h, v4.4h\n" - "ldr q26, [x13, #0x0]\n" - "smlal2 v25.4s, v28.8h, v4.8h\n" - "ldr q10, [x11, #0x0]\n" - "smlal v16.4s, v28.4h, v2.4h\n" - "ldr q11, [x13, #0x10]\n" - "add x13, x13, #0x20\n" - "smlal2 v21.4s, v28.8h, v2.8h\n" - "ldr q18, [x11, #0x10]\n" - "add x11, x11, #0x20\n" - "smlal v23.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x26, x17]\n" - "ssubl v28.8h, v28.8b, v14.8b\n" - "smlal v16.4s, v31.4h, v6.4h\n" - "smlal2 v21.4s, v31.8h, v6.8h\n" - "ldr d31, [x25, x17]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v28.8h, v5.8h\n" + "ssubl v30.8h, v30.8b, v22.8b\n" + "ldr x22, [x12, #0x60]\n" + "smlal v19.4s, v28.4h, v4.4h\n" + "smlal2 v11.4s, v28.8h, v4.8h\n" + "ldr x21, [x12, #0x68]\n" + "ldr x20, [x12, #0x70]\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v24.4s, v28.8h, v2.8h\n" + "ldr x19, [x12, #0x78]\n" + "ldr q21, [x13, #0x0]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v23.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x15]\n" + "ssubl v28.8h, v28.8b, v22.8b\n" "smlal v13.4s, v27.4h, v7.4h\n" - "smlal2 v19.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v27.4h, v6.4h\n" - "smlal2 v25.4s, v27.8h, v6.8h\n" - "smlal v16.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "smlal v23.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "ldr q25, [x11, #0x0]\n" + "ldr q10, [x13, #0x10]\n" + "smlal v19.4s, v27.4h, v6.4h\n" + "smlal2 v11.4s, v27.8h, v6.8h\n" + "ldr q16, [x11, #0x10]\n" + "add x17, x17, #0x48\n" + "smlal v18.4s, v31.4h, v6.4h\n" + "smlal2 v24.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x15]\n" + "ssubl v31.8h, v31.8b, v22.8b\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v23.4s, v27.8h, v3.8h\n" + "subs x16, x16, #0x1\n" + "add x13, x13, #0x20\n" "smlal v13.4s, v28.4h, v1.4h\n" - "smlal2 v19.4s, v28.8h, v1.8h\n" - "smlal v23.4s, v29.4h, v8.4h\n" - "smlal2 v20.4s, v29.8h, v8.8h\n" - "ldr d29, [x24, x17]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v0.4h\n" - "smlal2 v25.4s, v28.8h, v0.8h\n" - "ldr d28, [x23, x17]\n" - "ssubl v28.8h, v28.8b, v14.8b\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "add x11, x11, #0x20\n" + "smlal v19.4s, v28.4h, v0.4h\n" + "smlal2 v11.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x15]\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "smlal v18.4s, v27.4h, v4.4h\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v24.4s, v27.8h, v4.8h\n" + "smlal2 v23.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x15]\n" + "ssubl v29.8h, v29.8b, v22.8b\n" "smlal v13.4s, v31.4h, v2.4h\n" - "smlal2 v19.4s, v31.8h, v2.8h\n" - "smlal v17.4s, v31.4h, v1.4h\n" - "smlal2 v25.4s, v31.8h, v1.8h\n" - "ldr d31, [x22, x17]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v31.8h, v2.8h\n" + "smlal v19.4s, v31.4h, v1.4h\n" + "smlal2 v11.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x15]\n" + "ssubl v31.8h, v31.8b, v22.8b\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal v9.4s, v30.4h, v4.4h\n" "smlal v13.4s, v30.4h, v8.4h\n" - "smlal2 v19.4s, v30.8h, v8.8h\n" - "smlal v17.4s, v30.4h, v7.4h\n" - "smlal2 v25.4s, v30.8h, v7.8h\n" - "smlal v16.4s, v30.4h, v5.4h\n" - "smlal2 v21.4s, v30.8h, v5.8h\n" - "smlal v23.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x17]\n" - "ssubl v30.8h, v30.8b, v14.8b\n" + "smlal2 v26.4s, v30.8h, v8.8h\n" + "smlal v19.4s, v30.4h, v7.4h\n" + "smlal2 v11.4s, v30.8h, v7.8h\n" + "smlal2 v24.4s, v30.8h, v5.8h\n" + "smlal2 v23.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x15]\n" + "ssubl v30.8h, v30.8b, v22.8b\n" + "smlal v18.4s, v29.4h, v0.4h\n" + "smlal v9.4s, v28.4h, v2.4h\n" "smlal v13.4s, v29.4h, v3.4h\n" - "smlal2 v19.4s, v29.8h, v3.8h\n" - "smlal v16.4s, v29.4h, v0.4h\n" - "smlal2 v21.4s, v29.8h, v0.8h\n" - "ldr d29, [x20, x17]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v5.4h\n" - "smlal2 v25.4s, v28.8h, v5.8h\n" - "smlal v23.4s, v28.4h, v2.4h\n" - "smlal2 v20.4s, v28.8h, v2.8h\n" - "ldr d28, [x19, x17]\n" - "add x17, x17, #0x8\n" + "smlal2 v26.4s, v29.8h, v3.8h\n" + "smlal2 v24.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x15]\n" + "smlal2 v23.4s, v28.8h, v2.8h\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v3.4h\n" + "smlal v9.4s, v30.4h, v5.4h\n" + "smlal v19.4s, v28.4h, v5.4h\n" + "smlal2 v11.4s, v28.8h, v5.8h\n" + "ldr d28, [x19, x15]\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "smlal2 v24.4s, v31.8h, v3.8h\n" + "smlal2 v23.4s, v30.8h, v5.8h\n" + "add x15, x15, #0x8\n" + "smlal v18.4s, v29.4h, v7.4h\n" + "smlal v9.4s, v29.4h, v6.4h\n" + "smlal2 v24.4s, v29.8h, v7.8h\n" + "smlal2 v23.4s, v29.8h, v6.8h\n" "smlal v13.4s, v31.4h, v6.4h\n" - "ssubl v28.8h, v28.8b, v14.8b\n" - "smlal2 v19.4s, v31.8h, v6.8h\n" - "smlal v16.4s, v31.4h, v3.4h\n" - "smlal2 v21.4s, v31.8h, v3.8h\n" - "smlal v17.4s, v30.4h, v8.4h\n" - "smlal2 v25.4s, v30.8h, v8.8h\n" - "smlal v23.4s, v30.4h, v5.4h\n" - "smlal2 v20.4s, v30.8h, v5.8h\n" - "smlal v16.4s, v29.4h, v7.4h\n" - "smlal2 v21.4s, v29.8h, v7.8h\n" - "smlal v23.4s, v29.4h, v6.4h\n" - "smlal2 v20.4s, v29.8h, v6.8h\n" - "smlal v16.4s, v28.4h, v8.4h\n" - "smlal2 v21.4s, v28.8h, v8.8h\n" - "smlal v23.4s, v28.4h, v7.4h\n" - "smlal2 v20.4s, v28.8h, v7.8h\n" - "sqrdmulh v13.4s, v13.4s, v26.4s\n" - "sqrdmulh v19.4s, v19.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v26.4s\n" - "sqrdmulh v25.4s, v25.4s, v11.4s\n" - "and v22.16b, v13.16b, v10.16b\n" - "sshr v22.4s, v22.4s, #0x1f\n" - "and v28.16b, v19.16b, v18.16b\n" - "and v3.16b, v17.16b, v10.16b\n" - "sshr v28.4s, v28.4s, #0x1f\n" - "and v6.16b, v25.16b, v18.16b\n" - "sqrdmulh v16.4s, v16.4s, v26.4s\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v21.4s, v21.4s, v11.4s\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sqadd v13.4s, v13.4s, v22.4s\n" - "sqrdmulh v23.4s, v23.4s, v26.4s\n" - "and v0.16b, v16.16b, v10.16b\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "srshl v13.4s, v13.4s, v10.4s\n" - "sqadd v19.4s, v19.4s, v28.4s\n" - "sqadd v17.4s, v17.4s, v3.4s\n" - "sqadd v25.4s, v25.4s, v6.4s\n" - "and v29.16b, v21.16b, v18.16b\n" + "smlal v19.4s, v30.4h, v8.4h\n" + "sqdmulh v13.4s, v13.4s, v21.4s\n" + "smlal v18.4s, v28.4h, v8.4h\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "sqdmulh v19.4s, v19.4s, v21.4s\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal2 v11.4s, v30.8h, v8.8h\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "smlal2 v24.4s, v28.8h, v8.8h\n" + "smlal2 v23.4s, v28.8h, v7.8h\n" + "sqdmulh v9.4s, v9.4s, v21.4s\n" + "and v7.16b, v13.16b, v25.16b\n" + "sqdmulh v26.4s, v26.4s, v10.4s\n" + "and v4.16b, v19.16b, v25.16b\n" + "sqdmulh v11.4s, v11.4s, v10.4s\n" + "and v21.16b, v18.16b, v25.16b\n" + "sqdmulh v24.4s, v24.4s, v10.4s\n" + "and v20.16b, v9.16b, v25.16b\n" + "sqdmulh v23.4s, v23.4s, v10.4s\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "and v29.16b, v26.16b, v16.16b\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v10.16b, v11.16b, v16.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v31.16b, v24.16b, v16.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v30.16b, v23.16b, v16.16b\n" + "sqadd v13.4s, v13.4s, v7.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" - "add v13.4s, v13.4s, v15.4s\n" - "srshl v19.4s, v19.4s, v18.4s\n" - "srshl v17.4s, v17.4s, v10.4s\n" - "srshl v25.4s, v25.4s, v18.4s\n" - "smin v13.4s, v13.4s, v12.4s\n" - "add v19.4s, v19.4s, v15.4s\n" - "add v17.4s, v17.4s, v15.4s\n" - "smax v13.4s, v13.4s, v24.4s\n" - "smin v19.4s, v19.4s, v12.4s\n" - "smin v17.4s, v17.4s, v12.4s\n" - "add v25.4s, v25.4s, v15.4s\n" - "smax v19.4s, v19.4s, v24.4s\n" - "smax v17.4s, v17.4s, v24.4s\n" - "smin v25.4s, v25.4s, v12.4s\n" - "uzp1 v13.16b, v13.16b, v19.16b\n" - "sqadd v16.4s, v16.4s, v0.4s\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "sshr v10.4s, v10.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v20.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "srshl v13.4s, v13.4s, v25.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "srshl v19.4s, v19.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v10.4s\n" + "srshl v18.4s, v18.4s, v25.4s\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "srshl v9.4s, v9.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v30.4s\n" + "srshl v26.4s, v26.4s, v16.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v11.4s, v11.4s, v16.4s\n" + "sqxtn v19.4h, v19.4s\n" + "srshl v24.4s, v24.4s, v16.4s\n" + "sqxtn v18.4h, v18.4s\n" + "srshl v23.4s, v23.4s, v16.4s\n" + "sqxtn v9.4h, v9.4s\n" + "sqxtn2 v13.8h, v26.4s\n" + "sqxtn2 v19.8h, v11.4s\n" + "sqxtn2 v18.8h, v24.4s\n" + "sqxtn2 v9.8h, v23.4s\n" + "sqadd v13.8h, v13.8h, v14.8h\n" + "sqadd v19.8h, v19.8h, v14.8h\n" + "sqadd v18.8h, v18.8h, v14.8h\n" + "sqadd v9.8h, v9.8h, v14.8h\n" + "smax v13.8h, v13.8h, v17.8h\n" + "smax v19.8h, v19.8h, v17.8h\n" + "smax v18.8h, v18.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smin v13.8h, v13.8h, v15.8h\n" + "smin v19.8h, v19.8h, v15.8h\n" + "smin v18.8h, v18.8h, v15.8h\n" + "smin v9.8h, v9.8h, v15.8h\n" "uzp1 v13.16b, v13.16b, v13.16b\n" - "str d13, [x10, x15]\n" - "smax v25.4s, v25.4s, v24.4s\n" - "sqadd v21.4s, v21.4s, v29.4s\n" - "srshl v16.4s, v16.4s, v10.4s\n" - "and v3.16b, v23.16b, v10.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "uzp1 v17.16b, v17.16b, v25.16b\n" - "add v16.4s, v16.4s, v15.4s\n" - "srshl v21.4s, v21.4s, v18.4s\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x9, x15]\n" - "smin v16.4s, v16.4s, v12.4s\n" - "sqrdmulh v20.4s, v20.4s, v11.4s\n" - "add v21.4s, v21.4s, v15.4s\n" - "sqadd v23.4s, v23.4s, v3.4s\n" - "smax v16.4s, v16.4s, v24.4s\n" - "smin v21.4s, v21.4s, v12.4s\n" - "and v25.16b, v20.16b, v18.16b\n" - "sshr v25.4s, v25.4s, #0x1f\n" - "smax v21.4s, v21.4s, v24.4s\n" - "srshl v23.4s, v23.4s, v10.4s\n" - "uzp1 v16.16b, v16.16b, v21.16b\n" - "add v23.4s, v23.4s, v15.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x28, x15]\n" - "smin v23.4s, v23.4s, v12.4s\n" - "sqadd v20.4s, v20.4s, v25.4s\n" - "smax v23.4s, v23.4s, v24.4s\n" - "srshl v20.4s, v20.4s, v18.4s\n" - "add v20.4s, v20.4s, v15.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smax v20.4s, v20.4s, v24.4s\n" - "uzp1 v23.16b, v23.16b, v20.16b\n" - "uzp1 v23.16b, v23.16b, v23.16b\n" - "str d23, [x27, x15]\n" - "add x15, x15, #0x8\n" + "uzp1 v19.16b, v19.16b, v19.16b\n" + "str d13, [x10, x14]\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "str d19, [x9, x14]\n" + "str d18, [x28, x14]\n" + "str d9, [x27, x14]\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q13, [x19, #0x0]\n" - "mov v17.16b, v13.16b\n" - "ldr q19, [x19, #0x10]\n" + "add x14, x14, #0x8\n" + "ldr q26, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v13.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v23.16b, v13.16b\n" - "ldr d0, [x16, #0x0]\n" - "ssubl v0.8h, v0.8b, v9.8b\n" - "mov v25.16b, v19.16b\n" - "ldr d1, [x16, #0x8]\n" - "mov v21.16b, v19.16b\n" - "ldr d2, [x16, #0x10]\n" - "ssubl v1.8h, v1.8b, v9.8b\n" - "mov v20.16b, v19.16b\n" - "ldr d3, [x16, #0x18]\n" - "ldr d4, [x16, #0x20]\n" - "ssubl v2.8h, v2.8b, v9.8b\n" - "ldr d5, [x16, #0x28]\n" - "ssubl v3.8h, v3.8b, v9.8b\n" - "ldr d6, [x16, #0x30]\n" - "ldr d7, [x16, #0x38]\n" - "ssubl v4.8h, v4.8b, v9.8b\n" - "ldr d8, [x16, #0x40]\n" - "ssubl v5.8h, v5.8b, v9.8b\n" - "ldp x23, x22, [x14, #0x0]\n" - "ssubl v6.8h, v6.8b, v9.8b\n" - "ldp x21, x20, [x14, #0x10]\n" - "ssubl v7.8h, v7.8b, v9.8b\n" - "ssubl v8.8h, v8.8b, v9.8b\n" - "ldr x19, [x14, #0x20]\n" - "ldr d31, [x23, x17]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" - "ldr d30, [x22, x17]\n" - "ldr d29, [x21, x17]\n" - "ssubl v30.8h, v30.8b, v14.8b\n" - "ldr d28, [x20, x17]\n" - "ldr d27, [x19, x17]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" - "ssubl v28.8h, v28.8b, v14.8b\n" - "ssubl v27.8h, v27.8b, v14.8b\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v19.16b, v13.16b\n" + "mov v11.16b, v26.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v18.16b, v13.16b\n" + "mov v24.16b, v26.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v9.16b, v13.16b\n" + "mov v23.16b, v26.16b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "ssubl v0.8h, v0.8b, v12.8b\n" + "ssubl v1.8h, v1.8b, v12.8b\n" + "ldp x23, x22, [x12, #0x0]\n" + "ldp x21, x20, [x12, #0x10]\n" + "ssubl v2.8h, v2.8b, v12.8b\n" + "ssubl v3.8h, v3.8b, v12.8b\n" + "ldr x19, [x12, #0x20]\n" + "ldr d31, [x23, x15]\n" + "ssubl v4.8h, v4.8b, v12.8b\n" + "ssubl v5.8h, v5.8b, v12.8b\n" + "ldr d30, [x22, x15]\n" + "ldr d29, [x21, x15]\n" + "ssubl v6.8h, v6.8b, v12.8b\n" + "ssubl v7.8h, v7.8b, v12.8b\n" + "ldr d28, [x20, x15]\n" + "ldr d27, [x19, x15]\n" + "ssubl v8.8h, v8.8b, v12.8b\n" + "ssubl v31.8h, v31.8b, v22.8b\n" + "ssubl v30.8h, v30.8b, v22.8b\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "ssubl v27.8h, v27.8b, v22.8b\n" "bgt 1b\n" "2:" // Tail "smlal v13.4s, v31.4h, v4.4h\n" - "ldr x21, [x14, #0x28]\n" - "tst x8, #0x7\n" - "smlal2 v19.4s, v31.8h, v4.8h\n" - "ldr x20, [x14, #0x30]\n" - "smlal v17.4s, v31.4h, v3.4h\n" - "ldr x26, [x14, #0x38]\n" - "smlal2 v25.4s, v31.8h, v3.8h\n" - "ldr x25, [x14, #0x40]\n" - "smlal v16.4s, v31.4h, v1.4h\n" - "ldr x19, [x14, #0x48]\n" - "smlal2 v21.4s, v31.8h, v1.8h\n" - "ldr x24, [x14, #0x50]\n" - "smlal v23.4s, v31.4h, v0.4h\n" - "ldr x23, [x14, #0x58]\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr d31, [x21, x17]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v31.8h, v4.8h\n" + "ldr x21, [x12, #0x28]\n" + "ldr x26, [x12, #0x38]\n" + "smlal v19.4s, v31.4h, v3.4h\n" + "smlal2 v11.4s, v31.8h, v3.8h\n" + "ldr x20, [x12, #0x30]\n" + "ldr x25, [x12, #0x40]\n" "smlal v13.4s, v30.4h, v0.4h\n" - "ldr x22, [x14, #0x60]\n" - "smlal2 v19.4s, v30.8h, v0.8h\n" - "ldr d30, [x19, x17]\n" - "ssubl v30.8h, v30.8b, v14.8b\n" - "smlal v17.4s, v29.4h, v2.4h\n" - "ldr x21, [x14, #0x68]\n" - "smlal2 v25.4s, v29.8h, v2.8h\n" - "ldr d29, [x20, x17]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" + "smlal2 v26.4s, v30.8h, v0.8h\n" + "ldr x19, [x12, #0x48]\n" + "ldr d30, [x19, x15]\n" + "smlal v19.4s, v29.4h, v2.4h\n" + "smlal2 v11.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x15]\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v1.4h\n" + "smlal2 v24.4s, v31.8h, v1.8h\n" + "ldr x24, [x12, #0x50]\n" + "ldr x23, [x12, #0x58]\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v23.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x15]\n" + "ssubl v31.8h, v31.8b, v22.8b\n" "smlal v13.4s, v28.4h, v5.4h\n" - "ldr x20, [x14, #0x70]\n" - "smlal2 v19.4s, v28.8h, v5.8h\n" - "ldr x19, [x14, #0x78]\n" - "smlal v17.4s, v28.4h, v4.4h\n" - "ldr q26, [x13, #0x0]\n" - "smlal2 v25.4s, v28.8h, v4.8h\n" - "ldr q10, [x11, #0x0]\n" - "smlal v16.4s, v28.4h, v2.4h\n" - "ldr q11, [x13, #0x10]\n" + "smlal2 v26.4s, v28.8h, v5.8h\n" + "ssubl v30.8h, v30.8b, v22.8b\n" + "ldr x22, [x12, #0x60]\n" + "smlal v19.4s, v28.4h, v4.4h\n" + "smlal2 v11.4s, v28.8h, v4.8h\n" + "ldr x21, [x12, #0x68]\n" + "ldr x20, [x12, #0x70]\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v24.4s, v28.8h, v2.8h\n" + "ldr x19, [x12, #0x78]\n" + "ldr q21, [x13, #0x0]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v23.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x15]\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "smlal v13.4s, v27.4h, v7.4h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "ldr q25, [x11, #0x0]\n" + "ldr q10, [x13, #0x10]\n" + "smlal v19.4s, v27.4h, v6.4h\n" + "smlal2 v11.4s, v27.8h, v6.8h\n" + "ldr q16, [x11, #0x10]\n" + "tst x8, #0x7\n" + "smlal v18.4s, v31.4h, v6.4h\n" + "smlal2 v24.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x15]\n" + "ssubl v31.8h, v31.8b, v22.8b\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v23.4s, v27.8h, v3.8h\n" "add x13, x13, #0x20\n" - "smlal2 v21.4s, v28.8h, v2.8h\n" - "ldr q18, [x11, #0x10]\n" "add x11, x11, #0x20\n" - "smlal v23.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x26, x17]\n" - "ssubl v28.8h, v28.8b, v14.8b\n" - "smlal v16.4s, v31.4h, v6.4h\n" - "smlal2 v21.4s, v31.8h, v6.8h\n" - "ldr d31, [x25, x17]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" - "smlal v13.4s, v27.4h, v7.4h\n" - "smlal2 v19.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v27.4h, v6.4h\n" - "smlal2 v25.4s, v27.8h, v6.8h\n" - "smlal v16.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "smlal v23.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" "smlal v13.4s, v28.4h, v1.4h\n" - "smlal2 v19.4s, v28.8h, v1.8h\n" - "smlal v23.4s, v29.4h, v8.4h\n" - "smlal2 v20.4s, v29.8h, v8.8h\n" - "ldr d29, [x24, x17]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v0.4h\n" - "smlal2 v25.4s, v28.8h, v0.8h\n" - "ldr d28, [x23, x17]\n" - "ssubl v28.8h, v28.8b, v14.8b\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "smlal v19.4s, v28.4h, v0.4h\n" + "smlal2 v11.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x15]\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "smlal v18.4s, v27.4h, v4.4h\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v24.4s, v27.8h, v4.8h\n" + "smlal2 v23.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x15]\n" + "ssubl v29.8h, v29.8b, v22.8b\n" "smlal v13.4s, v31.4h, v2.4h\n" - "smlal2 v19.4s, v31.8h, v2.8h\n" - "smlal v17.4s, v31.4h, v1.4h\n" - "smlal2 v25.4s, v31.8h, v1.8h\n" - "ldr d31, [x22, x17]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v31.8h, v2.8h\n" + "smlal v19.4s, v31.4h, v1.4h\n" + "smlal2 v11.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x15]\n" + "ssubl v31.8h, v31.8b, v22.8b\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal v9.4s, v30.4h, v4.4h\n" "smlal v13.4s, v30.4h, v8.4h\n" - "smlal2 v19.4s, v30.8h, v8.8h\n" - "smlal v17.4s, v30.4h, v7.4h\n" - "smlal2 v25.4s, v30.8h, v7.8h\n" - "smlal v16.4s, v30.4h, v5.4h\n" - "smlal2 v21.4s, v30.8h, v5.8h\n" - "smlal v23.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x17]\n" - "ssubl v30.8h, v30.8b, v14.8b\n" + "smlal2 v26.4s, v30.8h, v8.8h\n" + "smlal v19.4s, v30.4h, v7.4h\n" + "smlal2 v11.4s, v30.8h, v7.8h\n" + "smlal2 v24.4s, v30.8h, v5.8h\n" + "smlal2 v23.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x15]\n" + "ssubl v30.8h, v30.8b, v22.8b\n" + "smlal v18.4s, v29.4h, v0.4h\n" + "smlal v9.4s, v28.4h, v2.4h\n" "smlal v13.4s, v29.4h, v3.4h\n" - "smlal2 v19.4s, v29.8h, v3.8h\n" - "smlal v16.4s, v29.4h, v0.4h\n" - "smlal2 v21.4s, v29.8h, v0.8h\n" - "ldr d29, [x20, x17]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v5.4h\n" - "smlal2 v25.4s, v28.8h, v5.8h\n" - "smlal v23.4s, v28.4h, v2.4h\n" - "smlal2 v20.4s, v28.8h, v2.8h\n" - "ldr d28, [x19, x17]\n" - "add x17, x17, #0x8\n" + "smlal2 v26.4s, v29.8h, v3.8h\n" + "smlal2 v24.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x15]\n" + "smlal2 v23.4s, v28.8h, v2.8h\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v3.4h\n" + "smlal v9.4s, v30.4h, v5.4h\n" + "smlal v19.4s, v28.4h, v5.4h\n" + "smlal2 v11.4s, v28.8h, v5.8h\n" + "ldr d28, [x19, x15]\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "smlal2 v24.4s, v31.8h, v3.8h\n" + "smlal2 v23.4s, v30.8h, v5.8h\n" + "add x15, x15, #0x8\n" + "smlal v18.4s, v29.4h, v7.4h\n" + "smlal v9.4s, v29.4h, v6.4h\n" + "smlal2 v24.4s, v29.8h, v7.8h\n" + "smlal2 v23.4s, v29.8h, v6.8h\n" "smlal v13.4s, v31.4h, v6.4h\n" - "ssubl v28.8h, v28.8b, v14.8b\n" - "smlal2 v19.4s, v31.8h, v6.8h\n" - "smlal v16.4s, v31.4h, v3.4h\n" - "smlal2 v21.4s, v31.8h, v3.8h\n" - "smlal v17.4s, v30.4h, v8.4h\n" - "smlal2 v25.4s, v30.8h, v8.8h\n" - "smlal v23.4s, v30.4h, v5.4h\n" - "smlal2 v20.4s, v30.8h, v5.8h\n" - "smlal v16.4s, v29.4h, v7.4h\n" - "smlal2 v21.4s, v29.8h, v7.8h\n" - "smlal v23.4s, v29.4h, v6.4h\n" - "smlal2 v20.4s, v29.8h, v6.8h\n" - "smlal v16.4s, v28.4h, v8.4h\n" - "smlal2 v21.4s, v28.8h, v8.8h\n" - "smlal v23.4s, v28.4h, v7.4h\n" - "smlal2 v20.4s, v28.8h, v7.8h\n" - "sqrdmulh v13.4s, v13.4s, v26.4s\n" - "sqrdmulh v19.4s, v19.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v26.4s\n" - "sqrdmulh v25.4s, v25.4s, v11.4s\n" - "and v22.16b, v13.16b, v10.16b\n" - "sshr v22.4s, v22.4s, #0x1f\n" - "and v28.16b, v19.16b, v18.16b\n" - "and v3.16b, v17.16b, v10.16b\n" - "sshr v28.4s, v28.4s, #0x1f\n" - "and v6.16b, v25.16b, v18.16b\n" - "sqrdmulh v16.4s, v16.4s, v26.4s\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v21.4s, v21.4s, v11.4s\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sqadd v13.4s, v13.4s, v22.4s\n" - "sqrdmulh v23.4s, v23.4s, v26.4s\n" - "and v0.16b, v16.16b, v10.16b\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "srshl v13.4s, v13.4s, v10.4s\n" - "sqadd v19.4s, v19.4s, v28.4s\n" - "sqadd v17.4s, v17.4s, v3.4s\n" - "sqadd v25.4s, v25.4s, v6.4s\n" - "and v29.16b, v21.16b, v18.16b\n" + "smlal v19.4s, v30.4h, v8.4h\n" + "sqdmulh v13.4s, v13.4s, v21.4s\n" + "smlal v18.4s, v28.4h, v8.4h\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "sqdmulh v19.4s, v19.4s, v21.4s\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal2 v11.4s, v30.8h, v8.8h\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "smlal2 v24.4s, v28.8h, v8.8h\n" + "smlal2 v23.4s, v28.8h, v7.8h\n" + "sqdmulh v9.4s, v9.4s, v21.4s\n" + "and v7.16b, v13.16b, v25.16b\n" + "sqdmulh v26.4s, v26.4s, v10.4s\n" + "and v4.16b, v19.16b, v25.16b\n" + "sqdmulh v11.4s, v11.4s, v10.4s\n" + "and v21.16b, v18.16b, v25.16b\n" + "sqdmulh v24.4s, v24.4s, v10.4s\n" + "and v20.16b, v9.16b, v25.16b\n" + "sqdmulh v23.4s, v23.4s, v10.4s\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "and v29.16b, v26.16b, v16.16b\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v10.16b, v11.16b, v16.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v31.16b, v24.16b, v16.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v30.16b, v23.16b, v16.16b\n" + "sqadd v13.4s, v13.4s, v7.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" - "add v13.4s, v13.4s, v15.4s\n" - "srshl v19.4s, v19.4s, v18.4s\n" - "srshl v17.4s, v17.4s, v10.4s\n" - "srshl v25.4s, v25.4s, v18.4s\n" - "smin v13.4s, v13.4s, v12.4s\n" - "add v19.4s, v19.4s, v15.4s\n" - "add v17.4s, v17.4s, v15.4s\n" - "smax v13.4s, v13.4s, v24.4s\n" - "smin v19.4s, v19.4s, v12.4s\n" - "smin v17.4s, v17.4s, v12.4s\n" - "add v25.4s, v25.4s, v15.4s\n" - "smax v19.4s, v19.4s, v24.4s\n" - "smax v17.4s, v17.4s, v24.4s\n" - "smin v25.4s, v25.4s, v12.4s\n" - "uzp1 v13.16b, v13.16b, v19.16b\n" - "sqadd v16.4s, v16.4s, v0.4s\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "sshr v10.4s, v10.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v20.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "srshl v13.4s, v13.4s, v25.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "srshl v19.4s, v19.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v10.4s\n" + "srshl v18.4s, v18.4s, v25.4s\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "srshl v9.4s, v9.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v30.4s\n" + "srshl v26.4s, v26.4s, v16.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v11.4s, v11.4s, v16.4s\n" + "sqxtn v19.4h, v19.4s\n" + "srshl v24.4s, v24.4s, v16.4s\n" + "sqxtn v18.4h, v18.4s\n" + "srshl v23.4s, v23.4s, v16.4s\n" + "sqxtn v9.4h, v9.4s\n" + "sqxtn2 v13.8h, v26.4s\n" + "sqxtn2 v19.8h, v11.4s\n" + "sqxtn2 v18.8h, v24.4s\n" + "sqxtn2 v9.8h, v23.4s\n" + "sqadd v13.8h, v13.8h, v14.8h\n" + "sqadd v19.8h, v19.8h, v14.8h\n" + "sqadd v18.8h, v18.8h, v14.8h\n" + "sqadd v9.8h, v9.8h, v14.8h\n" + "smax v13.8h, v13.8h, v17.8h\n" + "smax v19.8h, v19.8h, v17.8h\n" + "smax v18.8h, v18.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smin v13.8h, v13.8h, v15.8h\n" + "smin v19.8h, v19.8h, v15.8h\n" + "smin v18.8h, v18.8h, v15.8h\n" + "smin v9.8h, v9.8h, v15.8h\n" "uzp1 v13.16b, v13.16b, v13.16b\n" - "str d13, [x10, x15]\n" - "smax v25.4s, v25.4s, v24.4s\n" - "sqadd v21.4s, v21.4s, v29.4s\n" - "srshl v16.4s, v16.4s, v10.4s\n" - "and v3.16b, v23.16b, v10.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "uzp1 v17.16b, v17.16b, v25.16b\n" - "add v16.4s, v16.4s, v15.4s\n" - "srshl v21.4s, v21.4s, v18.4s\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x9, x15]\n" - "smin v16.4s, v16.4s, v12.4s\n" - "sqrdmulh v20.4s, v20.4s, v11.4s\n" - "add v21.4s, v21.4s, v15.4s\n" - "sqadd v23.4s, v23.4s, v3.4s\n" - "smax v16.4s, v16.4s, v24.4s\n" - "smin v21.4s, v21.4s, v12.4s\n" - "and v25.16b, v20.16b, v18.16b\n" - "sshr v25.4s, v25.4s, #0x1f\n" - "smax v21.4s, v21.4s, v24.4s\n" - "srshl v23.4s, v23.4s, v10.4s\n" - "uzp1 v16.16b, v16.16b, v21.16b\n" - "add v23.4s, v23.4s, v15.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x28, x15]\n" - "smin v23.4s, v23.4s, v12.4s\n" - "sqadd v20.4s, v20.4s, v25.4s\n" - "smax v23.4s, v23.4s, v24.4s\n" - "srshl v20.4s, v20.4s, v18.4s\n" - "add v20.4s, v20.4s, v15.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smax v20.4s, v20.4s, v24.4s\n" - "uzp1 v23.16b, v23.16b, v20.16b\n" - "uzp1 v23.16b, v23.16b, v23.16b\n" - "str d23, [x27, x15]\n" - "add x15, x15, #0x8\n" + "uzp1 v19.16b, v19.16b, v19.16b\n" + "str d13, [x10, x14]\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "str d19, [x9, x14]\n" + "str d18, [x28, x14]\n" + "str d9, [x27, x14]\n" + "add x14, x14, #0x8\n" "beq 64f\n" - "add x16, x16, #0x48\n" + "add x17, x17, #0x48\n" "3:" // Oddments "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "tbz x8, #2, 5f\n" "ld1 { v13.4s }, [x19], #0x10\n" "tbz x8, #1, 4f\n" - "ld1 { v19.d }[0], [x19], #0x8\n" + "ld1 { v26.d }[0], [x19], #0x8\n" "tbz x8, #0, 7f\n" - "ld1 { v19.s }[2], [x19]\n" + "ld1 { v26.s }[2], [x19]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset "tbz x8, #0, 7f\n" - "ld1 { v19.s }[0], [x19]\n" + "ld1 { v26.s }[0], [x19]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset "tbz x8, #1, 6f\n" @@ -609,38 +593,38 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 7f\n" "ld1 { v13.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v17.16b, v13.16b\n" - "ldr d0, [x16, #0x0]\n" - "mov v25.16b, v19.16b\n" - "ldr d1, [x16, #0x8]\n" - "mov v16.16b, v13.16b\n" - "ldr d2, [x16, #0x10]\n" - "mov v21.16b, v19.16b\n" - "ldr d3, [x16, #0x18]\n" - "mov v23.16b, v13.16b\n" - "ldr d4, [x16, #0x20]\n" - "ssubl v0.8h, v0.8b, v9.8b\n" - "mov v20.16b, v19.16b\n" - "ldr d5, [x16, #0x28]\n" - "ssubl v1.8h, v1.8b, v9.8b\n" - "ldr d6, [x16, #0x30]\n" - "ssubl v2.8h, v2.8b, v9.8b\n" - "ldr d7, [x16, #0x38]\n" - "ssubl v3.8h, v3.8b, v9.8b\n" - "ldr d8, [x16, #0x40]\n" - "ssubl v4.8h, v4.8b, v9.8b\n" - "ldp x23, x22, [x14, #0x0]\n" - "ssubl v5.8h, v5.8b, v9.8b\n" - "ldp x21, x20, [x14, #0x10]\n" - "ssubl v6.8h, v6.8b, v9.8b\n" - "ssubl v7.8h, v7.8b, v9.8b\n" - "ldr x19, [x14, #0x20]\n" - "ssubl v8.8h, v8.8b, v9.8b\n" - "add x23, x23, x17\n" - "add x22, x22, x17\n" - "add x21, x21, x17\n" - "add x20, x20, x17\n" - "add x19, x19, x17\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "mov v19.16b, v13.16b\n" + "mov v11.16b, v26.16b\n" + "ldr d2, [x17, #0x10]\n" + "ldr d3, [x17, #0x18]\n" + "mov v18.16b, v13.16b\n" + "mov v24.16b, v26.16b\n" + "ldr d4, [x17, #0x20]\n" + "ldr d5, [x17, #0x28]\n" + "mov v9.16b, v13.16b\n" + "mov v23.16b, v26.16b\n" + "ldr d6, [x17, #0x30]\n" + "ldr d7, [x17, #0x38]\n" + "ssubl v0.8h, v0.8b, v12.8b\n" + "ssubl v1.8h, v1.8b, v12.8b\n" + "ldr d8, [x17, #0x40]\n" + "ldp x23, x22, [x12, #0x0]\n" + "ssubl v2.8h, v2.8b, v12.8b\n" + "ssubl v3.8h, v3.8b, v12.8b\n" + "ldp x21, x20, [x12, #0x10]\n" + "ldr x19, [x12, #0x20]\n" + "ssubl v4.8h, v4.8b, v12.8b\n" + "ssubl v5.8h, v5.8b, v12.8b\n" + "ssubl v6.8h, v6.8b, v12.8b\n" + "ssubl v7.8h, v7.8b, v12.8b\n" + "ssubl v8.8h, v8.8b, v12.8b\n" + "add x23, x23, x15\n" + "add x22, x22, x15\n" + "add x21, x21, x15\n" + "add x20, x20, x15\n" + "add x19, x19, x15\n" "tbz x8, #2, 9f\n" "ld1 { v31.s }[0], [x23], #0x4\n" "ld1 { v30.s }[0], [x22], #0x4\n" @@ -690,33 +674,33 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "ld1 { v28.b }[0], [x20]\n" "ld1 { v27.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End - "ldr x21, [x14, #0x28]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" + "ssubl v31.8h, v31.8b, v22.8b\n" "smlal v13.4s, v31.4h, v4.4h\n" - "ssubl v30.8h, v30.8b, v14.8b\n" - "smlal2 v19.4s, v31.8h, v4.8h\n" - "ssubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v31.4h, v3.4h\n" - "ssubl v28.8h, v28.8b, v14.8b\n" - "smlal2 v25.4s, v31.8h, v3.8h\n" - "ssubl v27.8h, v27.8b, v14.8b\n" - "smlal v16.4s, v31.4h, v1.4h\n" - "add x21, x21, x17\n" - "smlal2 v21.4s, v31.8h, v1.8h\n" - "smlal v23.4s, v31.4h, v0.4h\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" + "smlal2 v26.4s, v31.8h, v4.8h\n" + "ldr x21, [x12, #0x28]\n" + "smlal v19.4s, v31.4h, v3.4h\n" + "smlal2 v11.4s, v31.8h, v3.8h\n" + "ssubl v30.8h, v30.8b, v22.8b\n" + "add x21, x21, x15\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v1.4h\n" + "smlal2 v24.4s, v31.8h, v1.8h\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v23.4s, v31.8h, v0.8h\n" + "ssubl v28.8h, v28.8b, v22.8b\n" "smlal v13.4s, v30.4h, v0.4h\n" - "smlal2 v19.4s, v30.8h, v0.8h\n" - "smlal v17.4s, v29.4h, v2.4h\n" - "smlal2 v25.4s, v29.8h, v2.8h\n" + "smlal2 v26.4s, v30.8h, v0.8h\n" + "ssubl v27.8h, v27.8b, v22.8b\n" + "smlal v19.4s, v29.4h, v2.4h\n" + "smlal2 v11.4s, v29.8h, v2.8h\n" "smlal v13.4s, v28.4h, v5.4h\n" - "smlal2 v19.4s, v28.8h, v5.8h\n" - "smlal v17.4s, v28.4h, v4.4h\n" - "smlal2 v25.4s, v28.8h, v4.8h\n" - "smlal v16.4s, v28.4h, v2.4h\n" - "smlal2 v21.4s, v28.8h, v2.8h\n" - "smlal v23.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" + "smlal2 v26.4s, v28.8h, v5.8h\n" + "smlal v19.4s, v28.4h, v4.4h\n" + "smlal2 v11.4s, v28.8h, v4.8h\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v24.4s, v28.8h, v2.8h\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v23.4s, v28.8h, v1.8h\n" "tbz x8, #2, 13f\n" "ld1 { v31.s }[0], [x21], #0x4\n" "tbz x8, #1, 12f\n" @@ -738,19 +722,19 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 15f\n" "ld1 { v31.b }[0], [x21]\n" "15:" // Oddments: Load (3, 0): Bit 2: End + "ssubl v31.8h, v31.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v6.4h\n" + "smlal2 v24.4s, v31.8h, v6.8h\n" + "ldr x20, [x12, #0x30]\n" "smlal v13.4s, v27.4h, v7.4h\n" - "ldr x20, [x14, #0x30]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" - "smlal2 v19.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v27.4h, v6.4h\n" - "add x20, x20, x17\n" - "smlal2 v25.4s, v27.8h, v6.8h\n" - "smlal v23.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "smlal v16.4s, v31.4h, v6.4h\n" - "smlal2 v21.4s, v31.8h, v6.8h\n" - "smlal v16.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "add x20, x20, x15\n" + "smlal v19.4s, v27.4h, v6.4h\n" + "smlal2 v11.4s, v27.8h, v6.8h\n" + "smlal v18.4s, v27.4h, v4.4h\n" + "smlal2 v24.4s, v27.8h, v4.8h\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v23.4s, v27.8h, v3.8h\n" "tbz x8, #2, 17f\n" "ld1 { v29.s }[0], [x20], #0x4\n" "tbz x8, #1, 16f\n" @@ -772,11 +756,11 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 19f\n" "ld1 { v29.b }[0], [x20]\n" "19:" // Oddments: Load (3, 3): Bit 2: End - "ldr x26, [x14, #0x38]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" - "smlal v23.4s, v29.4h, v8.4h\n" - "smlal2 v20.4s, v29.8h, v8.8h\n" - "add x26, x26, x17\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "ldr x26, [x12, #0x38]\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v23.4s, v29.8h, v8.8h\n" + "add x26, x26, x15\n" "tbz x8, #2, 21f\n" "ld1 { v28.s }[0], [x26], #0x4\n" "tbz x8, #1, 20f\n" @@ -798,13 +782,13 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 23f\n" "ld1 { v28.b }[0], [x26]\n" "23:" // Oddments: Load (0, 1): Bit 2: End - "ldr x25, [x14, #0x40]\n" - "ssubl v28.8h, v28.8b, v14.8b\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "ldr x25, [x12, #0x40]\n" "smlal v13.4s, v28.4h, v1.4h\n" - "smlal2 v19.4s, v28.8h, v1.8h\n" - "add x25, x25, x17\n" - "smlal v17.4s, v28.4h, v0.4h\n" - "smlal2 v25.4s, v28.8h, v0.8h\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "smlal v19.4s, v28.4h, v0.4h\n" + "smlal2 v11.4s, v28.8h, v0.8h\n" + "add x25, x25, x15\n" "tbz x8, #2, 25f\n" "ld1 { v31.s }[0], [x25], #0x4\n" "tbz x8, #1, 24f\n" @@ -826,13 +810,13 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 27f\n" "ld1 { v31.b }[0], [x25]\n" "27:" // Oddments: Load (0, 2): Bit 2: End - "ldr x19, [x14, #0x48]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" + "ssubl v31.8h, v31.8b, v22.8b\n" + "ldr x19, [x12, #0x48]\n" "smlal v13.4s, v31.4h, v2.4h\n" - "smlal2 v19.4s, v31.8h, v2.8h\n" - "add x19, x19, x17\n" - "smlal v17.4s, v31.4h, v1.4h\n" - "smlal2 v25.4s, v31.8h, v1.8h\n" + "smlal2 v26.4s, v31.8h, v2.8h\n" + "smlal v19.4s, v31.4h, v1.4h\n" + "smlal2 v11.4s, v31.8h, v1.8h\n" + "add x19, x19, x15\n" "tbz x8, #2, 29f\n" "ld1 { v30.s }[0], [x19], #0x4\n" "tbz x8, #1, 28f\n" @@ -854,17 +838,17 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 31f\n" "ld1 { v30.b }[0], [x19]\n" "31:" // Oddments: Load (2, 2): Bit 2: End - "ldr x24, [x14, #0x50]\n" - "ssubl v30.8h, v30.8b, v14.8b\n" + "ssubl v30.8h, v30.8b, v22.8b\n" + "ldr x24, [x12, #0x50]\n" "smlal v13.4s, v30.4h, v8.4h\n" - "smlal2 v19.4s, v30.8h, v8.8h\n" - "add x24, x24, x17\n" - "smlal v17.4s, v30.4h, v7.4h\n" - "smlal2 v25.4s, v30.8h, v7.8h\n" - "smlal v16.4s, v30.4h, v5.4h\n" - "smlal2 v21.4s, v30.8h, v5.8h\n" - "smlal v23.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" + "smlal2 v26.4s, v30.8h, v8.8h\n" + "smlal v19.4s, v30.4h, v7.4h\n" + "smlal2 v11.4s, v30.8h, v7.8h\n" + "add x24, x24, x15\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal2 v24.4s, v30.8h, v5.8h\n" + "smlal v9.4s, v30.4h, v4.4h\n" + "smlal2 v23.4s, v30.8h, v4.8h\n" "tbz x8, #2, 33f\n" "ld1 { v29.s }[0], [x24], #0x4\n" "tbz x8, #1, 32f\n" @@ -886,13 +870,13 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 35f\n" "ld1 { v29.b }[0], [x24]\n" "35:" // Oddments: Load (1, 0): Bit 2: End - "ldr x23, [x14, #0x58]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "ldr x23, [x12, #0x58]\n" "smlal v13.4s, v29.4h, v3.4h\n" - "smlal2 v19.4s, v29.8h, v3.8h\n" - "add x23, x23, x17\n" - "smlal v16.4s, v29.4h, v0.4h\n" - "smlal2 v21.4s, v29.8h, v0.8h\n" + "smlal2 v26.4s, v29.8h, v3.8h\n" + "smlal v18.4s, v29.4h, v0.4h\n" + "smlal2 v24.4s, v29.8h, v0.8h\n" + "add x23, x23, x15\n" "tbz x8, #2, 37f\n" "ld1 { v28.s }[0], [x23], #0x4\n" "tbz x8, #1, 36f\n" @@ -914,13 +898,13 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 39f\n" "ld1 { v28.b }[0], [x23]\n" "39:" // Oddments: Load (1, 3): Bit 2: End - "ldr x22, [x14, #0x60]\n" - "ssubl v28.8h, v28.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v5.4h\n" - "smlal2 v25.4s, v28.8h, v5.8h\n" - "add x22, x22, x17\n" - "smlal v23.4s, v28.4h, v2.4h\n" - "smlal2 v20.4s, v28.8h, v2.8h\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "ldr x22, [x12, #0x60]\n" + "smlal v19.4s, v28.4h, v5.4h\n" + "smlal2 v11.4s, v28.8h, v5.8h\n" + "smlal v9.4s, v28.4h, v2.4h\n" + "smlal2 v23.4s, v28.8h, v2.8h\n" + "add x22, x22, x15\n" "tbz x8, #2, 41f\n" "ld1 { v31.s }[0], [x22], #0x4\n" "tbz x8, #1, 40f\n" @@ -942,13 +926,13 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 43f\n" "ld1 { v31.b }[0], [x22]\n" "43:" // Oddments: Load (2, 0): Bit 2: End - "ldr x21, [x14, #0x68]\n" - "ssubl v31.8h, v31.8b, v14.8b\n" + "ssubl v31.8h, v31.8b, v22.8b\n" + "ldr x21, [x12, #0x68]\n" "smlal v13.4s, v31.4h, v6.4h\n" - "smlal2 v19.4s, v31.8h, v6.8h\n" - "add x21, x21, x17\n" - "smlal v16.4s, v31.4h, v3.4h\n" - "smlal2 v21.4s, v31.8h, v3.8h\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal v18.4s, v31.4h, v3.4h\n" + "smlal2 v24.4s, v31.8h, v3.8h\n" + "add x21, x21, x15\n" "tbz x8, #2, 45f\n" "ld1 { v30.s }[0], [x21], #0x4\n" "tbz x8, #1, 44f\n" @@ -970,13 +954,13 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 47f\n" "ld1 { v30.b }[0], [x21]\n" "47:" // Oddments: Load (2, 3): Bit 2: End - "ldr x20, [x14, #0x70]\n" - "ssubl v30.8h, v30.8b, v14.8b\n" - "smlal v17.4s, v30.4h, v8.4h\n" - "smlal2 v25.4s, v30.8h, v8.8h\n" - "add x20, x20, x17\n" - "smlal v23.4s, v30.4h, v5.4h\n" - "smlal2 v20.4s, v30.8h, v5.8h\n" + "ssubl v30.8h, v30.8b, v22.8b\n" + "ldr x20, [x12, #0x70]\n" + "smlal v19.4s, v30.4h, v8.4h\n" + "smlal2 v11.4s, v30.8h, v8.8h\n" + "smlal v9.4s, v30.4h, v5.4h\n" + "smlal2 v23.4s, v30.8h, v5.8h\n" + "add x20, x20, x15\n" "tbz x8, #2, 49f\n" "ld1 { v29.s }[0], [x20], #0x4\n" "tbz x8, #1, 48f\n" @@ -998,13 +982,13 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 51f\n" "ld1 { v29.b }[0], [x20]\n" "51:" // Oddments: Load (3, 1): Bit 2: End - "ldr x19, [x14, #0x78]\n" - "ssubl v29.8h, v29.8b, v14.8b\n" - "smlal v16.4s, v29.4h, v7.4h\n" - "smlal2 v21.4s, v29.8h, v7.8h\n" - "add x19, x19, x17\n" - "smlal v23.4s, v29.4h, v6.4h\n" - "smlal2 v20.4s, v29.8h, v6.8h\n" + "ssubl v29.8h, v29.8b, v22.8b\n" + "ldr x19, [x12, #0x78]\n" + "smlal v18.4s, v29.4h, v7.4h\n" + "smlal2 v24.4s, v29.8h, v7.8h\n" + "smlal v9.4s, v29.4h, v6.4h\n" + "smlal2 v23.4s, v29.8h, v6.8h\n" + "add x19, x19, x15\n" "tbz x8, #2, 53f\n" "ld1 { v28.s }[0], [x19], #0x4\n" "tbz x8, #1, 52f\n" @@ -1026,160 +1010,150 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 55f\n" "ld1 { v28.b }[0], [x19]\n" "55:" // Oddments: Load (3, 2): Bit 2: End - "ssubl v28.8h, v28.8b, v14.8b\n" - "smlal v16.4s, v28.4h, v8.4h\n" - "smlal2 v21.4s, v28.8h, v8.8h\n" - "smlal v23.4s, v28.4h, v7.4h\n" - "smlal2 v20.4s, v28.8h, v7.8h\n" + "ssubl v28.8h, v28.8b, v22.8b\n" + "smlal v18.4s, v28.4h, v8.4h\n" + "smlal2 v24.4s, v28.8h, v8.8h\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v23.4s, v28.8h, v7.8h\n" "tbz x8, #2, 57f\n" - "ld1 { v26.4s }, [x13], #0x10\n" - "ld1 { v10.4s }, [x11], #0x10\n" + "ld1 { v21.4s }, [x13], #0x10\n" + "ld1 { v25.4s }, [x11], #0x10\n" "tbz x8, #1, 56f\n" - "ld1 { v11.d }[0], [x13], #0x8\n" - "ld1 { v18.d }[0], [x11], #0x8\n" + "ld1 { v10.d }[0], [x13], #0x8\n" + "ld1 { v16.d }[0], [x11], #0x8\n" "tbz x8, #0, 59f\n" - "ld1 { v11.s }[2], [x13]\n" - "ld1 { v18.s }[2], [x11]\n" + "ld1 { v10.s }[2], [x13]\n" + "ld1 { v16.s }[2], [x11]\n" "b 59f\n" "56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset "tbz x8, #0, 59f\n" - "ld1 { v11.s }[0], [x13]\n" - "ld1 { v18.s }[0], [x11]\n" + "ld1 { v10.s }[0], [x13]\n" + "ld1 { v16.s }[0], [x11]\n" "b 59f\n" "57:" // Oddments: Load requant params: Bit 2: Unset "tbz x8, #1, 58f\n" - "ld1 { v26.d }[0], [x13], #0x8\n" - "ld1 { v10.d }[0], [x11], #0x8\n" + "ld1 { v21.d }[0], [x13], #0x8\n" + "ld1 { v25.d }[0], [x11], #0x8\n" "tbz x8, #0, 59f\n" - "ld1 { v26.s }[2], [x13]\n" - "ld1 { v10.s }[2], [x11]\n" + "ld1 { v21.s }[2], [x13]\n" + "ld1 { v25.s }[2], [x11]\n" "b 59f\n" "58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset "tbz x8, #0, 59f\n" - "ld1 { v26.s }[0], [x13]\n" - "ld1 { v10.s }[0], [x11]\n" + "ld1 { v21.s }[0], [x13]\n" + "ld1 { v25.s }[0], [x11]\n" "59:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v13.4s, v13.4s, v26.4s\n" - "add x10, x10, x15\n" - "sqrdmulh v19.4s, v19.4s, v11.4s\n" - "add x9, x9, x15\n" - "sqrdmulh v17.4s, v17.4s, v26.4s\n" - "add x28, x28, x15\n" - "sqrdmulh v25.4s, v25.4s, v11.4s\n" - "add x27, x27, x15\n" - "sqrdmulh v16.4s, v16.4s, v26.4s\n" - "and v22.16b, v13.16b, v10.16b\n" - "sshr v22.4s, v22.4s, #0x1f\n" - "and v28.16b, v19.16b, v18.16b\n" - "and v3.16b, v17.16b, v10.16b\n" - "sshr v28.4s, v28.4s, #0x1f\n" - "and v6.16b, v25.16b, v18.16b\n" - "and v0.16b, v16.16b, v10.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v21.4s, v21.4s, v11.4s\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sqadd v13.4s, v13.4s, v22.4s\n" - "sqrdmulh v23.4s, v23.4s, v26.4s\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "sqrdmulh v20.4s, v20.4s, v11.4s\n" - "sqadd v19.4s, v19.4s, v28.4s\n" - "sqadd v17.4s, v17.4s, v3.4s\n" - "srshl v13.4s, v13.4s, v10.4s\n" - "sqadd v25.4s, v25.4s, v6.4s\n" - "srshl v19.4s, v19.4s, v18.4s\n" - "srshl v17.4s, v17.4s, v10.4s\n" - "add v13.4s, v13.4s, v15.4s\n" - "srshl v25.4s, v25.4s, v18.4s\n" - "add v19.4s, v19.4s, v15.4s\n" - "smin v13.4s, v13.4s, v12.4s\n" - "add v17.4s, v17.4s, v15.4s\n" - "smin v19.4s, v19.4s, v12.4s\n" - "smax v13.4s, v13.4s, v24.4s\n" - "smin v17.4s, v17.4s, v12.4s\n" - "smax v19.4s, v19.4s, v24.4s\n" - "add v25.4s, v25.4s, v15.4s\n" - "smax v17.4s, v17.4s, v24.4s\n" - "uzp1 v13.16b, v13.16b, v19.16b\n" - "smin v25.4s, v25.4s, v12.4s\n" - "uzp1 v13.16b, v13.16b, v13.16b\n" - "sqadd v16.4s, v16.4s, v0.4s\n" - "smax v25.4s, v25.4s, v24.4s\n" - "and v29.16b, v21.16b, v18.16b\n" + "sqdmulh v13.4s, v13.4s, v21.4s\n" + "sqdmulh v19.4s, v19.4s, v21.4s\n" + "add x10, x10, x14\n" + "add x9, x9, x14\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "sqdmulh v9.4s, v9.4s, v21.4s\n" + "add x28, x28, x14\n" + "add x27, x27, x14\n" + "and v7.16b, v13.16b, v25.16b\n" + "sqdmulh v26.4s, v26.4s, v10.4s\n" + "and v4.16b, v19.16b, v25.16b\n" + "sqdmulh v11.4s, v11.4s, v10.4s\n" + "and v21.16b, v18.16b, v25.16b\n" + "sqdmulh v24.4s, v24.4s, v10.4s\n" + "and v20.16b, v9.16b, v25.16b\n" + "sqdmulh v23.4s, v23.4s, v10.4s\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "and v29.16b, v26.16b, v16.16b\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v10.16b, v11.16b, v16.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v31.16b, v24.16b, v16.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v30.16b, v23.16b, v16.16b\n" + "sqadd v13.4s, v13.4s, v7.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" - "uzp1 v17.16b, v17.16b, v25.16b\n" - "srshl v16.4s, v16.4s, v10.4s\n" - "and v3.16b, v23.16b, v10.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "add v16.4s, v16.4s, v15.4s\n" - "sqadd v21.4s, v21.4s, v29.4s\n" - "and v25.16b, v20.16b, v18.16b\n" - "sshr v25.4s, v25.4s, #0x1f\n" - "smin v16.4s, v16.4s, v12.4s\n" - "srshl v21.4s, v21.4s, v18.4s\n" - "sqadd v23.4s, v23.4s, v3.4s\n" - "smax v16.4s, v16.4s, v24.4s\n" - "add v21.4s, v21.4s, v15.4s\n" - "srshl v23.4s, v23.4s, v10.4s\n" - "sqadd v20.4s, v20.4s, v25.4s\n" - "smin v21.4s, v21.4s, v12.4s\n" - "add v23.4s, v23.4s, v15.4s\n" - "srshl v20.4s, v20.4s, v18.4s\n" - "smax v21.4s, v21.4s, v24.4s\n" - "smin v23.4s, v23.4s, v12.4s\n" - "uzp1 v16.16b, v16.16b, v21.16b\n" - "add v20.4s, v20.4s, v15.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "smax v23.4s, v23.4s, v24.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smax v20.4s, v20.4s, v24.4s\n" - "uzp1 v23.16b, v23.16b, v20.16b\n" - "uzp1 v23.16b, v23.16b, v23.16b\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "sshr v10.4s, v10.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v20.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "srshl v13.4s, v13.4s, v25.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "srshl v19.4s, v19.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v10.4s\n" + "srshl v18.4s, v18.4s, v25.4s\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "srshl v9.4s, v9.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v30.4s\n" + "srshl v26.4s, v26.4s, v16.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v11.4s, v11.4s, v16.4s\n" + "sqxtn v19.4h, v19.4s\n" + "srshl v24.4s, v24.4s, v16.4s\n" + "sqxtn v18.4h, v18.4s\n" + "srshl v23.4s, v23.4s, v16.4s\n" + "sqxtn v9.4h, v9.4s\n" + "sqxtn2 v13.8h, v26.4s\n" + "sqxtn2 v19.8h, v11.4s\n" + "sqxtn2 v18.8h, v24.4s\n" + "sqxtn2 v9.8h, v23.4s\n" + "sqadd v13.8h, v13.8h, v14.8h\n" + "sqadd v19.8h, v19.8h, v14.8h\n" + "sqadd v18.8h, v18.8h, v14.8h\n" + "sqadd v9.8h, v9.8h, v14.8h\n" + "smax v13.8h, v13.8h, v17.8h\n" + "smax v19.8h, v19.8h, v17.8h\n" + "smax v18.8h, v18.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smin v13.8h, v13.8h, v15.8h\n" + "smin v19.8h, v19.8h, v15.8h\n" + "smin v18.8h, v18.8h, v15.8h\n" + "smin v9.8h, v9.8h, v15.8h\n" + "uzp1 v13.16b, v13.16b, v13.16b\n" + "uzp1 v19.16b, v19.16b, v19.16b\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" "tbz x8, #2, 61f\n" "st1 { v13.s }[0], [x10], #0x4\n" - "st1 { v17.s }[0], [x9], #0x4\n" - "st1 { v16.s }[0], [x28], #0x4\n" - "st1 { v23.s }[0], [x27], #0x4\n" + "st1 { v19.s }[0], [x9], #0x4\n" + "st1 { v18.s }[0], [x28], #0x4\n" + "st1 { v9.s }[0], [x27], #0x4\n" "tbz x8, #1, 60f\n" "st1 { v13.h }[2], [x10], #0x2\n" - "st1 { v17.h }[2], [x9], #0x2\n" - "st1 { v16.h }[2], [x28], #0x2\n" - "st1 { v23.h }[2], [x27], #0x2\n" + "st1 { v19.h }[2], [x9], #0x2\n" + "st1 { v18.h }[2], [x28], #0x2\n" + "st1 { v9.h }[2], [x27], #0x2\n" "tbz x8, #0, 63f\n" "st1 { v13.b }[6], [x10], #0x1\n" - "st1 { v17.b }[6], [x9], #0x1\n" - "st1 { v16.b }[6], [x28], #0x1\n" - "st1 { v23.b }[6], [x27], #0x1\n" + "st1 { v19.b }[6], [x9], #0x1\n" + "st1 { v18.b }[6], [x28], #0x1\n" + "st1 { v9.b }[6], [x27], #0x1\n" "b 63f\n" "60:" // Oddments: Bit 2: Bit 1: Unset "tbz x8, #0, 63f\n" "st1 { v13.b }[4], [x10], #0x1\n" - "st1 { v17.b }[4], [x9], #0x1\n" - "st1 { v16.b }[4], [x28], #0x1\n" - "st1 { v23.b }[4], [x27], #0x1\n" + "st1 { v19.b }[4], [x9], #0x1\n" + "st1 { v18.b }[4], [x28], #0x1\n" + "st1 { v9.b }[4], [x27], #0x1\n" "b 63f\n" "61:" // Oddments: Bit 2: Unset "tbz x8, #1, 62f\n" "st1 { v13.h }[0], [x10], #0x2\n" - "st1 { v17.h }[0], [x9], #0x2\n" - "st1 { v16.h }[0], [x28], #0x2\n" - "st1 { v23.h }[0], [x27], #0x2\n" + "st1 { v19.h }[0], [x9], #0x2\n" + "st1 { v18.h }[0], [x28], #0x2\n" + "st1 { v9.h }[0], [x27], #0x2\n" "tbz x8, #0, 63f\n" "st1 { v13.b }[2], [x10], #0x1\n" - "st1 { v17.b }[2], [x9], #0x1\n" - "st1 { v16.b }[2], [x28], #0x1\n" - "st1 { v23.b }[2], [x27], #0x1\n" + "st1 { v19.b }[2], [x9], #0x1\n" + "st1 { v18.b }[2], [x28], #0x1\n" + "st1 { v9.b }[2], [x27], #0x1\n" "b 63f\n" "62:" // Oddments: Bit 2: Unset: Bit 1: Unset "tbz x8, #0, 63f\n" "st1 { v13.b }[0], [x10], #0x1\n" - "st1 { v17.b }[0], [x9], #0x1\n" - "st1 { v16.b }[0], [x28], #0x1\n" - "st1 { v23.b }[0], [x27], #0x1\n" + "st1 { v19.b }[0], [x9], #0x1\n" + "st1 { v18.b }[0], [x28], #0x1\n" + "st1 { v9.b }[0], [x27], #0x1\n" "63:" // Oddments: Bit 2: End - "64:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index b20759eec4..6032f8f2fb 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,24 @@ namespace depthwise { void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); -struct a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst +class a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_s8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_s8q_3x3_mla::get_packed_size; + a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 2, 2) {} - kern_type kernel = a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } - a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp index 3b3d9c8946..5499392d6d 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const int8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const int8_t *const *inptrs_raw, - const int8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -100,75 +100,75 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( - "ldr x4, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x5, #0x0\n" - "ldr x6, [%x[params], %[offsetof_Params_weights]]\n" - "mov x7, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x8, %x[params], %[offsetof_Params_inptrs]\n" - "ldr x17, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x16, x4, #0x3\n" - "ldr x15, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x19, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v12.16b }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v13.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v11.4s }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v19.4s }, [x20]\n" - "ld1r { v14.4s }, [x19]\n" - "ldp x14, x13, [x21, #0x0]\n" - "ldp x12, x11, [x21, #0x10]\n" + "ldr x19, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x24, x19, %[offsetof_Requantize32_a_offset]\n" + "add x23, x19, %[offsetof_Requantize32_b_offset]\n" + "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x21, x19, %[offsetof_Requantize32_c_offset]\n" + "add x20, x19, %[offsetof_Requantize32_minval]\n" + "ldr x17, [%x[params], %[offsetof_Params_weights]]\n" + "add x19, x19, %[offsetof_Requantize32_maxval]\n" + "ld1r { v12.16b }, [x24]\n" + "ld1r { v13.16b }, [x23]\n" + "lsr x16, x8, #0x3\n" + "ld1r { v11.8h }, [x21]\n" + "ld1r { v17.8h }, [x20]\n" + "mov x15, #0x0\n" + "mov x14, #0x0\n" + "ld1r { v14.8h }, [x19]\n" + "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n" + "add x12, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x10, x9, [x22, #0x0]\n" + "ldp x28, x27, [x22, #0x10]\n" "cbz x16, 3f\n" - "subs x16, x16, #0x1\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q15, [x19, #0x0]\n" - "mov v20.16b, v15.16b\n" + "subs x16, x16, #0x1\n" + "mov v9.16b, v15.16b\n" "ldr q10, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v15.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v17.16b, v15.16b\n" - "ldr d0, [x6, #0x0]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v16.16b, v10.16b\n" + "mov v22.16b, v15.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v21.16b, v10.16b\n" + "mov v23.16b, v15.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v18.16b, v10.16b\n" "ssubl v0.8h, v0.8b, v13.8b\n" - "mov v23.16b, v10.16b\n" - "ldr d1, [x6, #0x8]\n" - "mov v22.16b, v10.16b\n" - "ldr d2, [x6, #0x10]\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" "ssubl v1.8h, v1.8b, v13.8b\n" - "mov v18.16b, v10.16b\n" - "ldr d3, [x6, #0x18]\n" - "ldr d4, [x6, #0x20]\n" "ssubl v2.8h, v2.8b, v13.8b\n" - "ldr d5, [x6, #0x28]\n" + "ldp x26, x25, [x12, #0x0]\n" + "ldp x24, x23, [x12, #0x10]\n" "ssubl v3.8h, v3.8b, v13.8b\n" - "ldr d6, [x6, #0x30]\n" - "ldr d7, [x6, #0x38]\n" "ssubl v4.8h, v4.8b, v13.8b\n" - "ldr d8, [x6, #0x40]\n" + "ldp x22, x21, [x12, #0x20]\n" + "ldp x20, x19, [x12, #0x30]\n" "ssubl v5.8h, v5.8b, v13.8b\n" - "ldp x26, x25, [x8, #0x0]\n" "ssubl v6.8h, v6.8b, v13.8b\n" - "ldp x24, x23, [x8, #0x10]\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" "ssubl v7.8h, v7.8b, v13.8b\n" "ssubl v8.8h, v8.8b, v13.8b\n" - "ldp x22, x21, [x8, #0x20]\n" - "ldp x20, x19, [x8, #0x30]\n" - "ldr d31, [x26, x5]\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" "ssubl v31.8h, v31.8b, v12.8b\n" - "ldr d30, [x25, x5]\n" - "ldr d29, [x24, x5]\n" "ssubl v30.8h, v30.8b, v12.8b\n" - "ldr d28, [x23, x5]\n" - "ldr d27, [x22, x5]\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" "ssubl v29.8h, v29.8b, v12.8b\n" - "ldr d26, [x21, x5]\n" "ssubl v28.8h, v28.8b, v12.8b\n" - "ldr d25, [x20, x5]\n" - "ldr d24, [x19, x5]\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" "ssubl v27.8h, v27.8b, v12.8b\n" "ssubl v26.8h, v26.8b, v12.8b\n" "ssubl v25.8h, v25.8b, v12.8b\n" @@ -176,259 +176,251 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "beq 2f\n" "1:" // Loop "smlal v15.4s, v31.4h, v8.4h\n" - "ldr x23, [x8, #0x40]\n" - "add x6, x6, #0x48\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "ldr x22, [x8, #0x48]\n" - "subs x16, x16, #0x1\n" - "smlal v20.4s, v31.4h, v6.4h\n" - "ldr x21, [x8, #0x50]\n" - "smlal2 v23.4s, v31.8h, v6.8h\n" - "ldr x20, [x8, #0x58]\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "ldr x19, [x8, #0x60]\n" - "smlal2 v22.4s, v31.8h, v2.8h\n" - "ldr x10, [x8, #0x68]\n" - "smlal v17.4s, v31.4h, v0.4h\n" - "ldr x9, [x8, #0x70]\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr x28, [x8, #0x78]\n" + "ldr x24, [x12, #0x40]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" + "ldr x21, [x12, #0x50]\n" + "ldr x19, [x12, #0x58]\n" "smlal v15.4s, v30.4h, v0.4h\n" - "ldr x27, [x8, #0x80]\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "ldr x26, [x8, #0x88]\n" - "smlal v20.4s, v28.4h, v1.4h\n" - "ldr x25, [x8, #0x90]\n" - "smlal2 v23.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x5]\n" + "ldr x22, [x12, #0x78]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" "ssubl v28.8h, v28.8b, v12.8b\n" "smlal v15.4s, v29.4h, v1.4h\n" - "ldr x24, [x8, #0x98]\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "ldr d29, [x23, x5]\n" + "ldr d29, [x24, x15]\n" "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v2.4h\n" - "ldr x23, [x8, #0xa0]\n" - "smlal2 v23.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x5]\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" "ssubl v27.8h, v27.8b, v12.8b\n" "smlal v15.4s, v26.4h, v3.4h\n" - "ldr x22, [x8, #0xa8]\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "ldr d26, [x20, x5]\n" + "ldr d26, [x19, x15]\n" "ssubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "ldr x21, [x12, #0x80]\n" + "ldr x19, [x12, #0x68]\n" "smlal v15.4s, v25.4h, v4.4h\n" - "ldr x21, [x8, #0xb0]\n" "smlal2 v10.4s, v25.8h, v4.8h\n" - "ldr d25, [x19, x5]\n" + "ldr d25, [x20, x15]\n" "ssubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "ldr x20, [x12, #0x88]\n" + "ldr d29, [x19, x15]\n" "smlal v15.4s, v24.4h, v2.4h\n" - "ldr x20, [x8, #0xb8]\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "ldr x19, [x8, #0xc0]\n" - "smlal v20.4s, v24.4h, v0.4h\n" - "ldr q21, [x17, #0x0]\n" - "smlal2 v23.4s, v24.8h, v0.8h\n" - "ldr d24, [x9, x5]\n" - "ssubl v24.8h, v24.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v4.4h\n" - "ldr q30, [x15, #0x0]\n" - "smlal2 v23.4s, v29.8h, v4.8h\n" - "ldr d29, [x10, x5]\n" + "ldr x19, [x12, #0x70]\n" "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v5.4h\n" - "ldr q31, [x17, #0x10]\n" - "smlal2 v23.4s, v28.8h, v5.8h\n" - "ldr d28, [x27, x5]\n" - "add x17, x17, #0x20\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "ssubl v28.8h, v28.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ldr x24, [x12, #0x98]\n" + "ldr d24, [x19, x15]\n" "smlal v15.4s, v27.4h, v5.4h\n" - "ldr q9, [x15, #0x10]\n" - "add x15, x15, #0x20\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "ssubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v3.4h\n" - "smlal2 v23.4s, v27.8h, v3.8h\n" - "ldr d27, [x28, x5]\n" + "ssubl v24.8h, v24.8b, v12.8b\n" + "ldr x23, [x12, #0x90]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" "ssubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v26.4h, v3.4h\n" - "smlal2 v22.4s, v26.8h, v3.8h\n" - "ldr d26, [x26, x5]\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "ldr x22, [x12, #0xa8]\n" + "ldr x19, [x12, #0xa0]\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" "ssubl v26.8h, v26.8b, v12.8b\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "ldr x21, [x12, #0xb0]\n" + "ldr x20, [x12, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "smlal2 v18.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "ssubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v28.4h, v1.4h\n" "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "ldr q19, [x13, #0x0]\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v22.4s, v25.8h, v0.8h\n" - "ldr d25, [x25, x5]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" "ssubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v29.4h, v4.4h\n" - "smlal2 v22.4s, v29.8h, v4.8h\n" - "ldr d29, [x24, x5]\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v18.4s, v28.8h, v1.8h\n" "ssubl v29.8h, v29.8b, v12.8b\n" + "smlal v23.4s, v26.4h, v5.4h\n" "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q0, [x11, #0x0]\n" + "ldr q4, [x13, #0x10]\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v22.4s, v24.8h, v1.8h\n" - "ldr d24, [x22, x5]\n" - "ssubl v24.8h, v24.8b, v12.8b\n" - "smlal v17.4s, v27.4h, v4.4h\n" - "smlal2 v18.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x5]\n" - "ssubl v27.8h, v27.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v7.4h\n" - "smlal2 v23.4s, v28.8h, v7.8h\n" - "smlal v17.4s, v28.4h, v1.4h\n" - "smlal2 v18.4s, v28.8h, v1.8h\n" - "smlal v16.4s, v25.4h, v6.4h\n" - "smlal2 v22.4s, v25.8h, v6.8h\n" - "ldr d25, [x20, x5]\n" - "ssubl v25.8h, v25.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v5.4h\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "ldr q31, [x11, #0x10]\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" "smlal2 v18.4s, v26.8h, v5.8h\n" - "ldr d26, [x21, x5]\n" - "ssubl v26.8h, v26.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v8.4h\n" - "smlal2 v23.4s, v29.8h, v8.8h\n" - "smlal v17.4s, v29.4h, v2.4h\n" + "ssubl v24.8h, v24.8b, v12.8b\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" "smlal2 v18.4s, v29.8h, v2.8h\n" - "ldr d29, [x19, x5]\n" - "add x5, x5, #0x8\n" - "smlal v16.4s, v27.4h, v7.4h\n" - "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal2 v22.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v24.4h, v3.4h\n" - "smlal v16.4s, v24.4h, v5.4h\n" + "ssubl v26.8h, v26.8b, v12.8b\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v30.16b, v15.16b, v0.16b\n" + "add x17, x17, #0x48\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "subs x16, x16, #0x1\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "ldr d25, [x20, x15]\n" "smlal2 v18.4s, v24.8h, v3.8h\n" - "sqrdmulh v15.4s, v15.4s, v21.4s\n" - "smlal2 v22.4s, v24.8h, v5.8h\n" - "smlal v17.4s, v26.4h, v7.4h\n" + "ssubl v25.8h, v25.8b, v12.8b\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "add x13, x13, #0x20\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "ssubl v29.8h, v29.8b, v12.8b\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" "smlal2 v18.4s, v26.8h, v7.8h\n" - "smlal v16.4s, v25.4h, v8.4h\n" - "smlal2 v22.4s, v25.8h, v8.8h\n" - "smlal v17.4s, v25.4h, v6.4h\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x15, x15, #0x8\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v28.16b, v9.16b, v0.16b\n" + "add x11, x11, #0x20\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" "smlal2 v18.4s, v25.8h, v6.8h\n" - "and v26.16b, v15.16b, v30.16b\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "smlal v17.4s, v29.4h, v8.4h\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" "smlal2 v18.4s, v29.8h, v8.8h\n" - "sqrdmulh v10.4s, v10.4s, v31.4s\n" - "sqrdmulh v20.4s, v20.4s, v21.4s\n" - "sqrdmulh v23.4s, v23.4s, v31.4s\n" - "sqrdmulh v16.4s, v16.4s, v21.4s\n" - "sqadd v15.4s, v15.4s, v26.4s\n" - "and v8.16b, v10.16b, v9.16b\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v30.4s\n" - "and v4.16b, v20.16b, v30.16b\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "and v19.16b, v10.16b, v31.16b\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "and v2.16b, v23.16b, v9.16b\n" - "and v1.16b, v16.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "add v15.4s, v15.4s, v11.4s\n" - "sqadd v10.4s, v10.4s, v8.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v31.4s\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "smin v15.4s, v15.4s, v14.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "sqadd v23.4s, v23.4s, v2.4s\n" - "smax v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v30.4s\n" - "add v10.4s, v10.4s, v11.4s\n" - "srshl v23.4s, v23.4s, v9.4s\n" - "sqadd v16.4s, v16.4s, v1.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "add v20.4s, v20.4s, v11.4s\n" - "add v23.4s, v23.4s, v11.4s\n" - "smax v10.4s, v10.4s, v19.4s\n" - "smin v20.4s, v20.4s, v14.4s\n" - "smin v23.4s, v23.4s, v14.4s\n" - "uzp1 v15.16b, v15.16b, v10.16b\n" - "smax v20.4s, v20.4s, v19.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x14, x7]\n" - "smax v23.4s, v23.4s, v19.4s\n" - "srshl v16.4s, v16.4s, v30.4s\n" - "and v24.16b, v22.16b, v9.16b\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v23.16b\n" - "add v16.4s, v16.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v21.4s\n" - "uzp1 v20.16b, v20.16b, v20.16b\n" - "str d20, [x13, x7]\n" - "smin v16.4s, v16.4s, v14.4s\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "sqadd v22.4s, v22.4s, v24.4s\n" - "and v2.16b, v17.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "smax v16.4s, v16.4s, v19.4s\n" - "srshl v22.4s, v22.4s, v9.4s\n" - "and v31.16b, v18.16b, v9.16b\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "add v22.4s, v22.4s, v11.4s\n" - "sqadd v17.4s, v17.4s, v2.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "srshl v17.4s, v17.4s, v30.4s\n" - "sqadd v18.4s, v18.4s, v31.4s\n" - "smax v22.4s, v22.4s, v19.4s\n" - "uzp1 v16.16b, v16.16b, v22.16b\n" - "add v17.4s, v17.4s, v11.4s\n" - "srshl v18.4s, v18.4s, v9.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x12, x7]\n" - "smin v17.4s, v17.4s, v14.4s\n" - "add v18.4s, v18.4s, v11.4s\n" - "smax v17.4s, v17.4s, v19.4s\n" - "smin v18.4s, v18.4s, v14.4s\n" - "smax v18.4s, v18.4s, v19.4s\n" - "uzp1 v17.16b, v17.16b, v18.16b\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x11, x7]\n" - "add x7, x7, #0x8\n" + "str d15, [x10, x14]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "str d9, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d22, [x28, x14]\n" + "str d23, [x27, x14]\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q15, [x19, #0x0]\n" - "mov v20.16b, v15.16b\n" + "add x14, x14, #0x8\n" "ldr q10, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v15.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v17.16b, v15.16b\n" - "ldr d0, [x6, #0x0]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v9.16b, v15.16b\n" + "mov v16.16b, v10.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v22.16b, v15.16b\n" + "mov v21.16b, v10.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v23.16b, v15.16b\n" + "mov v18.16b, v10.16b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" "ssubl v0.8h, v0.8b, v13.8b\n" - "mov v23.16b, v10.16b\n" - "ldr d1, [x6, #0x8]\n" - "mov v22.16b, v10.16b\n" - "ldr d2, [x6, #0x10]\n" "ssubl v1.8h, v1.8b, v13.8b\n" - "mov v18.16b, v10.16b\n" - "ldr d3, [x6, #0x18]\n" - "ldr d4, [x6, #0x20]\n" + "ldp x26, x25, [x12, #0x0]\n" + "ldp x24, x23, [x12, #0x10]\n" "ssubl v2.8h, v2.8b, v13.8b\n" - "ldr d5, [x6, #0x28]\n" "ssubl v3.8h, v3.8b, v13.8b\n" - "ldr d6, [x6, #0x30]\n" - "ldr d7, [x6, #0x38]\n" + "ldp x22, x21, [x12, #0x20]\n" + "ldp x20, x19, [x12, #0x30]\n" "ssubl v4.8h, v4.8b, v13.8b\n" - "ldr d8, [x6, #0x40]\n" "ssubl v5.8h, v5.8b, v13.8b\n" - "ldp x26, x25, [x8, #0x0]\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" "ssubl v6.8h, v6.8b, v13.8b\n" - "ldp x24, x23, [x8, #0x10]\n" "ssubl v7.8h, v7.8b, v13.8b\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" "ssubl v8.8h, v8.8b, v13.8b\n" - "ldp x22, x21, [x8, #0x20]\n" - "ldp x20, x19, [x8, #0x30]\n" - "ldr d31, [x26, x5]\n" "ssubl v31.8h, v31.8b, v12.8b\n" - "ldr d30, [x25, x5]\n" - "ldr d29, [x24, x5]\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" "ssubl v30.8h, v30.8b, v12.8b\n" - "ldr d28, [x23, x5]\n" - "ldr d27, [x22, x5]\n" "ssubl v29.8h, v29.8b, v12.8b\n" - "ldr d26, [x21, x5]\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" "ssubl v28.8h, v28.8b, v12.8b\n" - "ldr d25, [x20, x5]\n" - "ldr d24, [x19, x5]\n" "ssubl v27.8h, v27.8b, v12.8b\n" "ssubl v26.8h, v26.8b, v12.8b\n" "ssubl v25.8h, v25.8b, v12.8b\n" @@ -436,275 +428,267 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "bgt 1b\n" "2:" // Tail "smlal v15.4s, v31.4h, v8.4h\n" - "ldr x23, [x8, #0x40]\n" - "tst x4, #0x7\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "ldr x22, [x8, #0x48]\n" - "smlal v20.4s, v31.4h, v6.4h\n" - "ldr x21, [x8, #0x50]\n" - "smlal2 v23.4s, v31.8h, v6.8h\n" - "ldr x20, [x8, #0x58]\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "ldr x19, [x8, #0x60]\n" - "smlal2 v22.4s, v31.8h, v2.8h\n" - "ldr x10, [x8, #0x68]\n" - "smlal v17.4s, v31.4h, v0.4h\n" - "ldr x9, [x8, #0x70]\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr x28, [x8, #0x78]\n" + "ldr x24, [x12, #0x40]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" + "ldr x21, [x12, #0x50]\n" + "ldr x19, [x12, #0x58]\n" "smlal v15.4s, v30.4h, v0.4h\n" - "ldr x27, [x8, #0x80]\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "ldr x26, [x8, #0x88]\n" - "smlal v20.4s, v28.4h, v1.4h\n" - "ldr x25, [x8, #0x90]\n" - "smlal2 v23.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x5]\n" + "ldr x22, [x12, #0x78]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" "ssubl v28.8h, v28.8b, v12.8b\n" "smlal v15.4s, v29.4h, v1.4h\n" - "ldr x24, [x8, #0x98]\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "ldr d29, [x23, x5]\n" + "ldr d29, [x24, x15]\n" "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v2.4h\n" - "ldr x23, [x8, #0xa0]\n" - "smlal2 v23.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x5]\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" "ssubl v27.8h, v27.8b, v12.8b\n" "smlal v15.4s, v26.4h, v3.4h\n" - "ldr x22, [x8, #0xa8]\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "ldr d26, [x20, x5]\n" + "ldr d26, [x19, x15]\n" "ssubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "ldr x21, [x12, #0x80]\n" + "ldr x19, [x12, #0x68]\n" "smlal v15.4s, v25.4h, v4.4h\n" - "ldr x21, [x8, #0xb0]\n" "smlal2 v10.4s, v25.8h, v4.8h\n" - "ldr d25, [x19, x5]\n" + "ldr d25, [x20, x15]\n" "ssubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "ldr x20, [x12, #0x88]\n" + "ldr d29, [x19, x15]\n" "smlal v15.4s, v24.4h, v2.4h\n" - "ldr x20, [x8, #0xb8]\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "ldr x19, [x8, #0xc0]\n" - "smlal v20.4s, v24.4h, v0.4h\n" - "ldr q21, [x17, #0x0]\n" - "smlal2 v23.4s, v24.8h, v0.8h\n" - "ldr d24, [x9, x5]\n" - "ssubl v24.8h, v24.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v4.4h\n" - "ldr q30, [x15, #0x0]\n" - "smlal2 v23.4s, v29.8h, v4.8h\n" - "ldr d29, [x10, x5]\n" + "ldr x19, [x12, #0x70]\n" "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v5.4h\n" - "ldr q31, [x17, #0x10]\n" - "smlal2 v23.4s, v28.8h, v5.8h\n" - "ldr d28, [x27, x5]\n" - "add x17, x17, #0x20\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "ssubl v28.8h, v28.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ldr x24, [x12, #0x98]\n" + "ldr d24, [x19, x15]\n" "smlal v15.4s, v27.4h, v5.4h\n" - "ldr q9, [x15, #0x10]\n" - "add x15, x15, #0x20\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "ssubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v3.4h\n" - "smlal2 v23.4s, v27.8h, v3.8h\n" - "ldr d27, [x28, x5]\n" + "ssubl v24.8h, v24.8b, v12.8b\n" + "ldr x23, [x12, #0x90]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" "ssubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v26.4h, v3.4h\n" - "smlal2 v22.4s, v26.8h, v3.8h\n" - "ldr d26, [x26, x5]\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "ldr x22, [x12, #0xa8]\n" + "ldr x19, [x12, #0xa0]\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" "ssubl v26.8h, v26.8b, v12.8b\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "ldr x21, [x12, #0xb0]\n" + "ldr x20, [x12, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "smlal2 v18.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "ssubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v28.4h, v1.4h\n" "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "ldr q19, [x13, #0x0]\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v22.4s, v25.8h, v0.8h\n" - "ldr d25, [x25, x5]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" "ssubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v29.4h, v4.4h\n" - "smlal2 v22.4s, v29.8h, v4.8h\n" - "ldr d29, [x24, x5]\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v18.4s, v28.8h, v1.8h\n" "ssubl v29.8h, v29.8b, v12.8b\n" + "smlal v23.4s, v26.4h, v5.4h\n" "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q0, [x11, #0x0]\n" + "ldr q4, [x13, #0x10]\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v22.4s, v24.8h, v1.8h\n" - "ldr d24, [x22, x5]\n" - "ssubl v24.8h, v24.8b, v12.8b\n" - "smlal v17.4s, v27.4h, v4.4h\n" - "smlal2 v18.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x5]\n" - "ssubl v27.8h, v27.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v7.4h\n" - "smlal2 v23.4s, v28.8h, v7.8h\n" - "smlal v17.4s, v28.4h, v1.4h\n" - "smlal2 v18.4s, v28.8h, v1.8h\n" - "smlal v16.4s, v25.4h, v6.4h\n" - "smlal2 v22.4s, v25.8h, v6.8h\n" - "ldr d25, [x20, x5]\n" - "ssubl v25.8h, v25.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v5.4h\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "ldr q31, [x11, #0x10]\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" "smlal2 v18.4s, v26.8h, v5.8h\n" - "ldr d26, [x21, x5]\n" - "ssubl v26.8h, v26.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v8.4h\n" - "smlal2 v23.4s, v29.8h, v8.8h\n" - "smlal v17.4s, v29.4h, v2.4h\n" + "ssubl v24.8h, v24.8b, v12.8b\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" "smlal2 v18.4s, v29.8h, v2.8h\n" - "ldr d29, [x19, x5]\n" - "add x5, x5, #0x8\n" - "smlal v16.4s, v27.4h, v7.4h\n" - "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal2 v22.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v24.4h, v3.4h\n" - "smlal v16.4s, v24.4h, v5.4h\n" + "ssubl v26.8h, v26.8b, v12.8b\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v30.16b, v15.16b, v0.16b\n" + "tst x8, #0x7\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "add x13, x13, #0x20\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "ldr d25, [x20, x15]\n" "smlal2 v18.4s, v24.8h, v3.8h\n" - "sqrdmulh v15.4s, v15.4s, v21.4s\n" - "smlal2 v22.4s, v24.8h, v5.8h\n" - "smlal v17.4s, v26.4h, v7.4h\n" + "ssubl v25.8h, v25.8b, v12.8b\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "add x11, x11, #0x20\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "ssubl v29.8h, v29.8b, v12.8b\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" "smlal2 v18.4s, v26.8h, v7.8h\n" - "smlal v16.4s, v25.4h, v8.4h\n" - "smlal2 v22.4s, v25.8h, v8.8h\n" - "smlal v17.4s, v25.4h, v6.4h\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x15, x15, #0x8\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v28.16b, v9.16b, v0.16b\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" "smlal2 v18.4s, v25.8h, v6.8h\n" - "and v26.16b, v15.16b, v30.16b\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "smlal v17.4s, v29.4h, v8.4h\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" "smlal2 v18.4s, v29.8h, v8.8h\n" - "sqrdmulh v10.4s, v10.4s, v31.4s\n" - "sqrdmulh v20.4s, v20.4s, v21.4s\n" - "sqrdmulh v23.4s, v23.4s, v31.4s\n" - "sqrdmulh v16.4s, v16.4s, v21.4s\n" - "sqadd v15.4s, v15.4s, v26.4s\n" - "and v8.16b, v10.16b, v9.16b\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v30.4s\n" - "and v4.16b, v20.16b, v30.16b\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "and v19.16b, v10.16b, v31.16b\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "and v2.16b, v23.16b, v9.16b\n" - "and v1.16b, v16.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "add v15.4s, v15.4s, v11.4s\n" - "sqadd v10.4s, v10.4s, v8.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v31.4s\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "smin v15.4s, v15.4s, v14.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "sqadd v23.4s, v23.4s, v2.4s\n" - "smax v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v30.4s\n" - "add v10.4s, v10.4s, v11.4s\n" - "srshl v23.4s, v23.4s, v9.4s\n" - "sqadd v16.4s, v16.4s, v1.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "add v20.4s, v20.4s, v11.4s\n" - "add v23.4s, v23.4s, v11.4s\n" - "smax v10.4s, v10.4s, v19.4s\n" - "smin v20.4s, v20.4s, v14.4s\n" - "smin v23.4s, v23.4s, v14.4s\n" - "uzp1 v15.16b, v15.16b, v10.16b\n" - "smax v20.4s, v20.4s, v19.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x14, x7]\n" - "smax v23.4s, v23.4s, v19.4s\n" - "srshl v16.4s, v16.4s, v30.4s\n" - "and v24.16b, v22.16b, v9.16b\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v23.16b\n" - "add v16.4s, v16.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v21.4s\n" - "uzp1 v20.16b, v20.16b, v20.16b\n" - "str d20, [x13, x7]\n" - "smin v16.4s, v16.4s, v14.4s\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "sqadd v22.4s, v22.4s, v24.4s\n" - "and v2.16b, v17.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "smax v16.4s, v16.4s, v19.4s\n" - "srshl v22.4s, v22.4s, v9.4s\n" - "and v31.16b, v18.16b, v9.16b\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "add v22.4s, v22.4s, v11.4s\n" - "sqadd v17.4s, v17.4s, v2.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "srshl v17.4s, v17.4s, v30.4s\n" - "sqadd v18.4s, v18.4s, v31.4s\n" - "smax v22.4s, v22.4s, v19.4s\n" - "uzp1 v16.16b, v16.16b, v22.16b\n" - "add v17.4s, v17.4s, v11.4s\n" - "srshl v18.4s, v18.4s, v9.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x12, x7]\n" - "smin v17.4s, v17.4s, v14.4s\n" - "add v18.4s, v18.4s, v11.4s\n" - "smax v17.4s, v17.4s, v19.4s\n" - "smin v18.4s, v18.4s, v14.4s\n" - "smax v18.4s, v18.4s, v19.4s\n" - "uzp1 v17.16b, v17.16b, v18.16b\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x11, x7]\n" - "add x7, x7, #0x8\n" + "str d15, [x10, x14]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "str d9, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d22, [x28, x14]\n" + "str d23, [x27, x14]\n" + "add x14, x14, #0x8\n" "beq 88f\n" - "add x6, x6, #0x48\n" + "add x17, x17, #0x48\n" "3:" // Oddments "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" - "tbz x4, #2, 5f\n" + "tbz x8, #2, 5f\n" "ld1 { v15.4s }, [x19], #0x10\n" - "tbz x4, #1, 4f\n" + "tbz x8, #1, 4f\n" "ld1 { v10.d }[0], [x19], #0x8\n" - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v10.s }[2], [x19]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v10.s }[0], [x19]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset - "tbz x4, #1, 6f\n" + "tbz x8, #1, 6f\n" "ld1 { v15.d }[0], [x19], #0x8\n" - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v15.s }[2], [x19]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v15.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v20.16b, v15.16b\n" - "ldr d0, [x6, #0x0]\n" - "mov v23.16b, v10.16b\n" - "ldr d1, [x6, #0x8]\n" - "mov v16.16b, v15.16b\n" - "ldr d2, [x6, #0x10]\n" - "mov v22.16b, v10.16b\n" - "ldr d3, [x6, #0x18]\n" - "mov v17.16b, v15.16b\n" - "ldr d4, [x6, #0x20]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "mov v9.16b, v15.16b\n" + "mov v16.16b, v10.16b\n" + "ldr d2, [x17, #0x10]\n" + "ldr d3, [x17, #0x18]\n" + "mov v22.16b, v15.16b\n" + "mov v21.16b, v10.16b\n" + "ldr d4, [x17, #0x20]\n" + "ldr d5, [x17, #0x28]\n" + "mov v23.16b, v15.16b\n" "mov v18.16b, v10.16b\n" - "ldr d5, [x6, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "ldr d7, [x17, #0x38]\n" + "ssubl v0.8h, v0.8b, v13.8b\n" "ssubl v1.8h, v1.8b, v13.8b\n" - "ldr d6, [x6, #0x30]\n" + "ldr d8, [x17, #0x40]\n" + "ldp x26, x25, [x12, #0x0]\n" "ssubl v2.8h, v2.8b, v13.8b\n" - "ldr d7, [x6, #0x38]\n" "ssubl v3.8h, v3.8b, v13.8b\n" - "ldr d8, [x6, #0x40]\n" + "ldp x24, x23, [x12, #0x10]\n" + "ldp x22, x21, [x12, #0x20]\n" "ssubl v4.8h, v4.8b, v13.8b\n" - "ldp x26, x25, [x8, #0x0]\n" "ssubl v5.8h, v5.8b, v13.8b\n" - "ldp x24, x23, [x8, #0x10]\n" + "ldp x20, x19, [x12, #0x30]\n" "ssubl v6.8h, v6.8b, v13.8b\n" "ssubl v7.8h, v7.8b, v13.8b\n" - "ldp x22, x21, [x8, #0x20]\n" "ssubl v8.8h, v8.8b, v13.8b\n" - "ldp x20, x19, [x8, #0x30]\n" - "add x26, x26, x5\n" - "add x25, x25, x5\n" - "add x24, x24, x5\n" - "add x23, x23, x5\n" - "add x22, x22, x5\n" - "add x21, x21, x5\n" - "add x20, x20, x5\n" - "add x19, x19, x5\n" - "tbz x4, #2, 9f\n" + "add x26, x26, x15\n" + "add x25, x25, x15\n" + "add x24, x24, x15\n" + "add x23, x23, x15\n" + "add x22, x22, x15\n" + "add x21, x21, x15\n" + "add x20, x20, x15\n" + "add x19, x19, x15\n" + "tbz x8, #2, 9f\n" "ld1 { v31.s }[0], [x26], #0x4\n" "ld1 { v30.s }[0], [x25], #0x4\n" "ld1 { v29.s }[0], [x24], #0x4\n" @@ -713,7 +697,7 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.s }[0], [x21], #0x4\n" "ld1 { v25.s }[0], [x20], #0x4\n" "ld1 { v24.s }[0], [x19], #0x4\n" - "tbz x4, #1, 8f\n" + "tbz x8, #1, 8f\n" "ld1 { v31.h }[2], [x26], #0x2\n" "ld1 { v30.h }[2], [x25], #0x2\n" "ld1 { v29.h }[2], [x24], #0x2\n" @@ -722,7 +706,7 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.h }[2], [x21], #0x2\n" "ld1 { v25.h }[2], [x20], #0x2\n" "ld1 { v24.h }[2], [x19], #0x2\n" - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[6], [x26]\n" "ld1 { v30.b }[6], [x25]\n" "ld1 { v29.b }[6], [x24]\n" @@ -733,7 +717,7 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[6], [x19]\n" "b 11f\n" "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[4], [x26]\n" "ld1 { v30.b }[4], [x25]\n" "ld1 { v29.b }[4], [x24]\n" @@ -744,7 +728,7 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[4], [x19]\n" "b 11f\n" "9:" // Oddments: Initial loads: Bit 2: Unset - "tbz x4, #1, 10f\n" + "tbz x8, #1, 10f\n" "ld1 { v31.h }[0], [x26], #0x2\n" "ld1 { v30.h }[0], [x25], #0x2\n" "ld1 { v29.h }[0], [x24], #0x2\n" @@ -753,7 +737,7 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.h }[0], [x21], #0x2\n" "ld1 { v25.h }[0], [x20], #0x2\n" "ld1 { v24.h }[0], [x19], #0x2\n" - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[2], [x26]\n" "ld1 { v30.b }[2], [x25]\n" "ld1 { v29.b }[2], [x24]\n" @@ -764,7 +748,7 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[2], [x19]\n" "b 11f\n" "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[0], [x26]\n" "ld1 { v30.b }[0], [x25]\n" "ld1 { v29.b }[0], [x24]\n" @@ -774,646 +758,636 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v25.b }[0], [x20]\n" "ld1 { v24.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End - "ldr x23, [x8, #0x40]\n" "ssubl v31.8h, v31.8b, v12.8b\n" "smlal v15.4s, v31.4h, v8.4h\n" - "ssubl v30.8h, v30.8b, v12.8b\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v31.4h, v6.4h\n" - "ssubl v28.8h, v28.8b, v12.8b\n" - "smlal2 v23.4s, v31.8h, v6.8h\n" - "ssubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "ssubl v26.8h, v26.8b, v12.8b\n" - "smlal2 v22.4s, v31.8h, v2.8h\n" - "ssubl v25.8h, v25.8b, v12.8b\n" - "smlal v17.4s, v31.4h, v0.4h\n" - "ssubl v24.8h, v24.8b, v12.8b\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "add x23, x23, x5\n" + "ldr x24, [x12, #0x40]\n" + "ssubl v30.8h, v30.8b, v12.8b\n" "smlal v15.4s, v30.4h, v0.4h\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "smlal v20.4s, v28.4h, v1.4h\n" - "smlal2 v23.4s, v28.8h, v1.8h\n" + "add x24, x24, x15\n" + "ssubl v29.8h, v29.8b, v12.8b\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" "smlal v15.4s, v29.4h, v1.4h\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "smlal v20.4s, v27.4h, v2.4h\n" - "smlal2 v23.4s, v27.8h, v2.8h\n" + "ssubl v28.8h, v28.8b, v12.8b\n" + "ssubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" "smlal v15.4s, v26.4h, v3.4h\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "smlal v20.4s, v24.4h, v0.4h\n" - "smlal2 v23.4s, v24.8h, v0.8h\n" + "ssubl v27.8h, v27.8b, v12.8b\n" + "ssubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" "smlal v15.4s, v25.4h, v4.4h\n" "smlal2 v10.4s, v25.8h, v4.8h\n" + "ssubl v24.8h, v24.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" "smlal v15.4s, v24.4h, v2.4h\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "tbz x4, #2, 13f\n" - "ld1 { v29.s }[0], [x23], #0x4\n" - "tbz x4, #1, 12f\n" - "ld1 { v29.h }[2], [x23], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[6], [x23]\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "tbz x8, #2, 13f\n" + "ld1 { v29.s }[0], [x24], #0x4\n" + "tbz x8, #1, 12f\n" + "ld1 { v29.h }[2], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[6], [x24]\n" "b 15f\n" "12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[4], [x23]\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[4], [x24]\n" "b 15f\n" "13:" // Oddments: Load (1, 3): Bit 2: Unset - "tbz x4, #1, 14f\n" - "ld1 { v29.h }[0], [x23], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[2], [x23]\n" + "tbz x8, #1, 14f\n" + "ld1 { v29.h }[0], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[2], [x24]\n" "b 15f\n" "14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[0], [x23]\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[0], [x24]\n" "15:" // Oddments: Load (1, 3): Bit 2: End - "ldr x22, [x8, #0x48]\n" "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v4.4h\n" - "smlal2 v23.4s, v29.8h, v4.8h\n" - "add x22, x22, x5\n" - "tbz x4, #2, 17f\n" - "ld1 { v28.s }[0], [x22], #0x4\n" - "tbz x4, #1, 16f\n" - "ld1 { v28.h }[2], [x22], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[6], [x22]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "add x23, x23, x15\n" + "tbz x8, #2, 17f\n" + "ld1 { v28.s }[0], [x23], #0x4\n" + "tbz x8, #1, 16f\n" + "ld1 { v28.h }[2], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[6], [x23]\n" "b 19f\n" "16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[4], [x22]\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[4], [x23]\n" "b 19f\n" "17:" // Oddments: Load (1, 4): Bit 2: Unset - "tbz x4, #1, 18f\n" - "ld1 { v28.h }[0], [x22], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[2], [x22]\n" + "tbz x8, #1, 18f\n" + "ld1 { v28.h }[0], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[2], [x23]\n" "b 19f\n" "18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[0], [x22]\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[0], [x23]\n" "19:" // Oddments: Load (1, 4): Bit 2: End - "ldr x21, [x8, #0x50]\n" "ssubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v5.4h\n" - "smlal2 v23.4s, v28.8h, v5.8h\n" - "add x21, x21, x5\n" - "tbz x4, #2, 21f\n" + "ldr x21, [x12, #0x50]\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 21f\n" "ld1 { v27.s }[0], [x21], #0x4\n" - "tbz x4, #1, 20f\n" + "tbz x8, #1, 20f\n" "ld1 { v27.h }[2], [x21], #0x2\n" - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[6], [x21]\n" "b 23f\n" "20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[4], [x21]\n" "b 23f\n" "21:" // Oddments: Load (1, 2): Bit 2: Unset - "tbz x4, #1, 22f\n" + "tbz x8, #1, 22f\n" "ld1 { v27.h }[0], [x21], #0x2\n" - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[2], [x21]\n" "b 23f\n" "22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[0], [x21]\n" "23:" // Oddments: Load (1, 2): Bit 2: End - "ldr x20, [x8, #0x58]\n" "ssubl v27.8h, v27.8b, v12.8b\n" + "ldr x19, [x12, #0x58]\n" "smlal v15.4s, v27.4h, v5.4h\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "add x20, x20, x5\n" - "smlal v20.4s, v27.4h, v3.4h\n" - "smlal2 v23.4s, v27.8h, v3.8h\n" - "tbz x4, #2, 25f\n" - "ld1 { v26.s }[0], [x20], #0x4\n" - "tbz x4, #1, 24f\n" - "ld1 { v26.h }[2], [x20], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[6], [x20]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 25f\n" + "ld1 { v26.s }[0], [x19], #0x4\n" + "tbz x8, #1, 24f\n" + "ld1 { v26.h }[2], [x19], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[6], [x19]\n" "b 27f\n" "24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[4], [x20]\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[4], [x19]\n" "b 27f\n" "25:" // Oddments: Load (3, 0): Bit 2: Unset - "tbz x4, #1, 26f\n" - "ld1 { v26.h }[0], [x20], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[2], [x20]\n" + "tbz x8, #1, 26f\n" + "ld1 { v26.h }[0], [x19], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[2], [x19]\n" "b 27f\n" "26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[0], [x20]\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[0], [x19]\n" "27:" // Oddments: Load (3, 0): Bit 2: End - "ldr x19, [x8, #0x60]\n" "ssubl v26.8h, v26.8b, v12.8b\n" - "smlal v16.4s, v26.4h, v3.4h\n" - "smlal2 v22.4s, v26.8h, v3.8h\n" - "add x19, x19, x5\n" - "tbz x4, #2, 29f\n" - "ld1 { v25.s }[0], [x19], #0x4\n" - "tbz x4, #1, 28f\n" - "ld1 { v25.h }[2], [x19], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[6], [x19]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 29f\n" + "ld1 { v25.s }[0], [x20], #0x4\n" + "tbz x8, #1, 28f\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[6], [x20]\n" "b 31f\n" "28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[4], [x19]\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[4], [x20]\n" "b 31f\n" "29:" // Oddments: Load (2, 0): Bit 2: Unset - "tbz x4, #1, 30f\n" - "ld1 { v25.h }[0], [x19], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[2], [x19]\n" + "tbz x8, #1, 30f\n" + "ld1 { v25.h }[0], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[2], [x20]\n" "b 31f\n" "30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[0], [x19]\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[0], [x20]\n" "31:" // Oddments: Load (2, 0): Bit 2: End - "ldr x10, [x8, #0x68]\n" "ssubl v25.8h, v25.8b, v12.8b\n" + "ldr x19, [x12, #0x68]\n" "smlal v15.4s, v25.4h, v6.4h\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "add x10, x10, x5\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v22.4s, v25.8h, v0.8h\n" - "tbz x4, #2, 33f\n" - "ld1 { v29.s }[0], [x10], #0x4\n" - "tbz x4, #1, 32f\n" - "ld1 { v29.h }[2], [x10], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[6], [x10]\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 33f\n" + "ld1 { v29.s }[0], [x19], #0x4\n" + "tbz x8, #1, 32f\n" + "ld1 { v29.h }[2], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[6], [x19]\n" "b 35f\n" "32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[4], [x10]\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[4], [x19]\n" "b 35f\n" "33:" // Oddments: Load (3, 1): Bit 2: Unset - "tbz x4, #1, 34f\n" - "ld1 { v29.h }[0], [x10], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[2], [x10]\n" + "tbz x8, #1, 34f\n" + "ld1 { v29.h }[0], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[2], [x19]\n" "b 35f\n" "34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[0], [x10]\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[0], [x19]\n" "35:" // Oddments: Load (3, 1): Bit 2: End - "ldr x9, [x8, #0x70]\n" "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal v16.4s, v29.4h, v4.4h\n" - "smlal2 v22.4s, v29.8h, v4.8h\n" - "add x9, x9, x5\n" - "tbz x4, #2, 37f\n" - "ld1 { v24.s }[0], [x9], #0x4\n" - "tbz x4, #1, 36f\n" - "ld1 { v24.h }[2], [x9], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[6], [x9]\n" + "ldr x19, [x12, #0x70]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 37f\n" + "ld1 { v24.s }[0], [x19], #0x4\n" + "tbz x8, #1, 36f\n" + "ld1 { v24.h }[2], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[6], [x19]\n" "b 39f\n" "36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[4], [x9]\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[4], [x19]\n" "b 39f\n" "37:" // Oddments: Load (2, 1): Bit 2: Unset - "tbz x4, #1, 38f\n" - "ld1 { v24.h }[0], [x9], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[2], [x9]\n" + "tbz x8, #1, 38f\n" + "ld1 { v24.h }[0], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[2], [x19]\n" "b 39f\n" "38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[0], [x9]\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[0], [x19]\n" "39:" // Oddments: Load (2, 1): Bit 2: End - "ldr x28, [x8, #0x78]\n" "ssubl v24.8h, v24.8b, v12.8b\n" + "ldr x22, [x12, #0x78]\n" "smlal v15.4s, v24.4h, v7.4h\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "add x28, x28, x5\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v22.4s, v24.8h, v1.8h\n" - "tbz x4, #2, 41f\n" - "ld1 { v27.s }[0], [x28], #0x4\n" - "tbz x4, #1, 40f\n" - "ld1 { v27.h }[2], [x28], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[6], [x28]\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 41f\n" + "ld1 { v27.s }[0], [x22], #0x4\n" + "tbz x8, #1, 40f\n" + "ld1 { v27.h }[2], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[6], [x22]\n" "b 43f\n" "40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[4], [x28]\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[4], [x22]\n" "b 43f\n" "41:" // Oddments: Load (3, 3): Bit 2: Unset - "tbz x4, #1, 42f\n" - "ld1 { v27.h }[0], [x28], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[2], [x28]\n" + "tbz x8, #1, 42f\n" + "ld1 { v27.h }[0], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[2], [x22]\n" "b 43f\n" "42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[0], [x28]\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[0], [x22]\n" "43:" // Oddments: Load (3, 3): Bit 2: End - "ldr x27, [x8, #0x80]\n" "ssubl v27.8h, v27.8b, v12.8b\n" - "smlal v17.4s, v27.4h, v4.4h\n" + "ldr x21, [x12, #0x80]\n" + "smlal v23.4s, v27.4h, v4.4h\n" "smlal2 v18.4s, v27.8h, v4.8h\n" - "add x27, x27, x5\n" - "tbz x4, #2, 45f\n" - "ld1 { v28.s }[0], [x27], #0x4\n" - "tbz x4, #1, 44f\n" - "ld1 { v28.h }[2], [x27], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[6], [x27]\n" + "add x21, x21, x15\n" + "tbz x8, #2, 45f\n" + "ld1 { v28.s }[0], [x21], #0x4\n" + "tbz x8, #1, 44f\n" + "ld1 { v28.h }[2], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[6], [x21]\n" "b 47f\n" "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[4], [x27]\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[4], [x21]\n" "b 47f\n" "45:" // Oddments: Load (2, 3): Bit 2: Unset - "tbz x4, #1, 46f\n" - "ld1 { v28.h }[0], [x27], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[2], [x27]\n" + "tbz x8, #1, 46f\n" + "ld1 { v28.h }[0], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[2], [x21]\n" "b 47f\n" "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[0], [x27]\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[0], [x21]\n" "47:" // Oddments: Load (2, 3): Bit 2: End - "ldr x26, [x8, #0x88]\n" "ssubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v7.4h\n" - "smlal2 v23.4s, v28.8h, v7.8h\n" - "add x26, x26, x5\n" - "smlal v17.4s, v28.4h, v1.4h\n" + "ldr x20, [x12, #0x88]\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "smlal v23.4s, v28.4h, v1.4h\n" "smlal2 v18.4s, v28.8h, v1.8h\n" - "tbz x4, #2, 49f\n" - "ld1 { v26.s }[0], [x26], #0x4\n" - "tbz x4, #1, 48f\n" - "ld1 { v26.h }[2], [x26], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[6], [x26]\n" + "add x20, x20, x15\n" + "tbz x8, #2, 49f\n" + "ld1 { v26.s }[0], [x20], #0x4\n" + "tbz x8, #1, 48f\n" + "ld1 { v26.h }[2], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[6], [x20]\n" "b 51f\n" "48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[4], [x26]\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[4], [x20]\n" "b 51f\n" "49:" // Oddments: Load (3, 4): Bit 2: Unset - "tbz x4, #1, 50f\n" - "ld1 { v26.h }[0], [x26], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[2], [x26]\n" + "tbz x8, #1, 50f\n" + "ld1 { v26.h }[0], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[2], [x20]\n" "b 51f\n" "50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[0], [x26]\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[0], [x20]\n" "51:" // Oddments: Load (3, 4): Bit 2: End - "ldr x25, [x8, #0x90]\n" "ssubl v26.8h, v26.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v5.4h\n" + "ldr x23, [x12, #0x90]\n" + "smlal v23.4s, v26.4h, v5.4h\n" "smlal2 v18.4s, v26.8h, v5.8h\n" - "add x25, x25, x5\n" - "tbz x4, #2, 53f\n" - "ld1 { v25.s }[0], [x25], #0x4\n" - "tbz x4, #1, 52f\n" - "ld1 { v25.h }[2], [x25], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[6], [x25]\n" + "add x23, x23, x15\n" + "tbz x8, #2, 53f\n" + "ld1 { v25.s }[0], [x23], #0x4\n" + "tbz x8, #1, 52f\n" + "ld1 { v25.h }[2], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[6], [x23]\n" "b 55f\n" "52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[4], [x25]\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[4], [x23]\n" "b 55f\n" "53:" // Oddments: Load (4, 0): Bit 2: Unset - "tbz x4, #1, 54f\n" - "ld1 { v25.h }[0], [x25], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[2], [x25]\n" + "tbz x8, #1, 54f\n" + "ld1 { v25.h }[0], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[2], [x23]\n" "b 55f\n" "54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[0], [x25]\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[0], [x23]\n" "55:" // Oddments: Load (4, 0): Bit 2: End - "ldr x24, [x8, #0x98]\n" "ssubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v25.4h, v6.4h\n" - "smlal2 v22.4s, v25.8h, v6.8h\n" - "add x24, x24, x5\n" - "tbz x4, #2, 57f\n" + "ldr x24, [x12, #0x98]\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "add x24, x24, x15\n" + "tbz x8, #2, 57f\n" "ld1 { v29.s }[0], [x24], #0x4\n" - "tbz x4, #1, 56f\n" + "tbz x8, #1, 56f\n" "ld1 { v29.h }[2], [x24], #0x2\n" - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[6], [x24]\n" "b 59f\n" "56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[4], [x24]\n" "b 59f\n" "57:" // Oddments: Load (2, 4): Bit 2: Unset - "tbz x4, #1, 58f\n" + "tbz x8, #1, 58f\n" "ld1 { v29.h }[0], [x24], #0x2\n" - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[2], [x24]\n" "b 59f\n" "58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[0], [x24]\n" "59:" // Oddments: Load (2, 4): Bit 2: End - "ldr x23, [x8, #0xa0]\n" "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v8.4h\n" - "smlal2 v23.4s, v29.8h, v8.8h\n" - "add x23, x23, x5\n" - "smlal v17.4s, v29.4h, v2.4h\n" + "ldr x19, [x12, #0xa0]\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "smlal v23.4s, v29.4h, v2.4h\n" "smlal2 v18.4s, v29.8h, v2.8h\n" - "tbz x4, #2, 61f\n" - "ld1 { v27.s }[0], [x23], #0x4\n" - "tbz x4, #1, 60f\n" - "ld1 { v27.h }[2], [x23], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[6], [x23]\n" + "add x19, x19, x15\n" + "tbz x8, #2, 61f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x8, #1, 60f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[6], [x19]\n" "b 63f\n" "60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[4], [x23]\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[4], [x19]\n" "b 63f\n" "61:" // Oddments: Load (4, 1): Bit 2: Unset - "tbz x4, #1, 62f\n" - "ld1 { v27.h }[0], [x23], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[2], [x23]\n" + "tbz x8, #1, 62f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[2], [x19]\n" "b 63f\n" "62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[0], [x23]\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[0], [x19]\n" "63:" // Oddments: Load (4, 1): Bit 2: End - "ldr x22, [x8, #0xa8]\n" "ssubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v27.4h, v7.4h\n" - "smlal2 v22.4s, v27.8h, v7.8h\n" - "add x22, x22, x5\n" - "tbz x4, #2, 65f\n" + "ldr x22, [x12, #0xa8]\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 65f\n" "ld1 { v24.s }[0], [x22], #0x4\n" - "tbz x4, #1, 64f\n" + "tbz x8, #1, 64f\n" "ld1 { v24.h }[2], [x22], #0x2\n" - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[6], [x22]\n" "b 67f\n" "64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[4], [x22]\n" "b 67f\n" "65:" // Oddments: Load (3, 2): Bit 2: Unset - "tbz x4, #1, 66f\n" + "tbz x8, #1, 66f\n" "ld1 { v24.h }[0], [x22], #0x2\n" - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[2], [x22]\n" "b 67f\n" "66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[0], [x22]\n" "67:" // Oddments: Load (3, 2): Bit 2: End - "ldr x21, [x8, #0xb0]\n" "ssubl v24.8h, v24.8b, v12.8b\n" - "smlal v16.4s, v24.4h, v5.4h\n" - "smlal2 v22.4s, v24.8h, v5.8h\n" - "add x21, x21, x5\n" - "smlal v17.4s, v24.4h, v3.4h\n" + "ldr x21, [x12, #0xb0]\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" + "smlal v23.4s, v24.4h, v3.4h\n" "smlal2 v18.4s, v24.8h, v3.8h\n" - "tbz x4, #2, 69f\n" + "add x21, x21, x15\n" + "tbz x8, #2, 69f\n" "ld1 { v26.s }[0], [x21], #0x4\n" - "tbz x4, #1, 68f\n" + "tbz x8, #1, 68f\n" "ld1 { v26.h }[2], [x21], #0x2\n" - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[6], [x21]\n" "b 71f\n" "68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[4], [x21]\n" "b 71f\n" "69:" // Oddments: Load (4, 3): Bit 2: Unset - "tbz x4, #1, 70f\n" + "tbz x8, #1, 70f\n" "ld1 { v26.h }[0], [x21], #0x2\n" - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[2], [x21]\n" "b 71f\n" "70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[0], [x21]\n" "71:" // Oddments: Load (4, 3): Bit 2: End - "ldr x20, [x8, #0xb8]\n" "ssubl v26.8h, v26.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v7.4h\n" + "ldr x20, [x12, #0xb8]\n" + "smlal v23.4s, v26.4h, v7.4h\n" "smlal2 v18.4s, v26.8h, v7.8h\n" - "add x20, x20, x5\n" - "tbz x4, #2, 73f\n" + "add x20, x20, x15\n" + "tbz x8, #2, 73f\n" "ld1 { v25.s }[0], [x20], #0x4\n" - "tbz x4, #1, 72f\n" + "tbz x8, #1, 72f\n" "ld1 { v25.h }[2], [x20], #0x2\n" - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[6], [x20]\n" "b 75f\n" "72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[4], [x20]\n" "b 75f\n" "73:" // Oddments: Load (4, 2): Bit 2: Unset - "tbz x4, #1, 74f\n" + "tbz x8, #1, 74f\n" "ld1 { v25.h }[0], [x20], #0x2\n" - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[2], [x20]\n" "b 75f\n" "74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[0], [x20]\n" "75:" // Oddments: Load (4, 2): Bit 2: End - "ldr x19, [x8, #0xc0]\n" "ssubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v25.4h, v8.4h\n" - "smlal2 v22.4s, v25.8h, v8.8h\n" - "add x19, x19, x5\n" - "smlal v17.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" + "smlal v23.4s, v25.4h, v6.4h\n" "smlal2 v18.4s, v25.8h, v6.8h\n" - "tbz x4, #2, 77f\n" + "add x19, x19, x15\n" + "tbz x8, #2, 77f\n" "ld1 { v29.s }[0], [x19], #0x4\n" - "tbz x4, #1, 76f\n" + "tbz x8, #1, 76f\n" "ld1 { v29.h }[2], [x19], #0x2\n" - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[6], [x19]\n" "b 79f\n" "76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[4], [x19]\n" "b 79f\n" "77:" // Oddments: Load (4, 4): Bit 2: Unset - "tbz x4, #1, 78f\n" + "tbz x8, #1, 78f\n" "ld1 { v29.h }[0], [x19], #0x2\n" - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[2], [x19]\n" "b 79f\n" "78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[0], [x19]\n" "79:" // Oddments: Load (4, 4): Bit 2: End "ssubl v29.8h, v29.8b, v12.8b\n" - "smlal v17.4s, v29.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" "smlal2 v18.4s, v29.8h, v8.8h\n" - "tbz x4, #2, 81f\n" - "ld1 { v21.4s }, [x17], #0x10\n" - "ld1 { v30.4s }, [x15], #0x10\n" - "tbz x4, #1, 80f\n" - "ld1 { v31.d }[0], [x17], #0x8\n" - "ld1 { v9.d }[0], [x15], #0x8\n" - "tbz x4, #0, 83f\n" - "ld1 { v31.s }[2], [x17]\n" - "ld1 { v9.s }[2], [x15]\n" + "tbz x8, #2, 81f\n" + "ld1 { v19.4s }, [x13], #0x10\n" + "ld1 { v0.4s }, [x11], #0x10\n" + "tbz x8, #1, 80f\n" + "ld1 { v4.d }[0], [x13], #0x8\n" + "ld1 { v31.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v4.s }[2], [x13]\n" + "ld1 { v31.s }[2], [x11]\n" "b 83f\n" "80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v31.s }[0], [x17]\n" - "ld1 { v9.s }[0], [x15]\n" + "tbz x8, #0, 83f\n" + "ld1 { v4.s }[0], [x13]\n" + "ld1 { v31.s }[0], [x11]\n" "b 83f\n" "81:" // Oddments: Load requant params: Bit 2: Unset - "tbz x4, #1, 82f\n" - "ld1 { v21.d }[0], [x17], #0x8\n" - "ld1 { v30.d }[0], [x15], #0x8\n" - "tbz x4, #0, 83f\n" - "ld1 { v21.s }[2], [x17]\n" - "ld1 { v30.s }[2], [x15]\n" + "tbz x8, #1, 82f\n" + "ld1 { v19.d }[0], [x13], #0x8\n" + "ld1 { v0.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[2], [x13]\n" + "ld1 { v0.s }[2], [x11]\n" "b 83f\n" "82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v21.s }[0], [x17]\n" - "ld1 { v30.s }[0], [x15]\n" + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[0], [x13]\n" + "ld1 { v0.s }[0], [x11]\n" "83:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v15.4s, v15.4s, v21.4s\n" - "add x14, x14, x7\n" - "sqrdmulh v10.4s, v10.4s, v31.4s\n" - "add x13, x13, x7\n" - "sqrdmulh v20.4s, v20.4s, v21.4s\n" - "add x12, x12, x7\n" - "sqrdmulh v23.4s, v23.4s, v31.4s\n" - "add x11, x11, x7\n" - "sqrdmulh v16.4s, v16.4s, v21.4s\n" - "and v26.16b, v15.16b, v30.16b\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "and v8.16b, v10.16b, v9.16b\n" - "and v4.16b, v20.16b, v30.16b\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "and v2.16b, v23.16b, v9.16b\n" - "and v1.16b, v16.16b, v30.16b\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x10, x10, x14\n" + "add x9, x9, x14\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "add x28, x28, x14\n" + "add x27, x27, x14\n" + "and v30.16b, v15.16b, v0.16b\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "and v28.16b, v9.16b, v0.16b\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "and v19.16b, v10.16b, v31.16b\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v31.4s\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v26.4s\n" - "sqrdmulh v17.4s, v17.4s, v21.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "sqadd v10.4s, v10.4s, v8.4s\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "srshl v15.4s, v15.4s, v30.4s\n" - "sqadd v23.4s, v23.4s, v2.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "srshl v20.4s, v20.4s, v30.4s\n" - "add v15.4s, v15.4s, v11.4s\n" - "srshl v23.4s, v23.4s, v9.4s\n" - "add v10.4s, v10.4s, v11.4s\n" - "smin v15.4s, v15.4s, v14.4s\n" - "add v20.4s, v20.4s, v11.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "smax v15.4s, v15.4s, v19.4s\n" - "smin v20.4s, v20.4s, v14.4s\n" - "smax v10.4s, v10.4s, v19.4s\n" - "add v23.4s, v23.4s, v11.4s\n" - "smax v20.4s, v20.4s, v19.4s\n" - "uzp1 v15.16b, v15.16b, v10.16b\n" - "smin v23.4s, v23.4s, v14.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "sqadd v16.4s, v16.4s, v1.4s\n" - "smax v23.4s, v23.4s, v19.4s\n" - "and v24.16b, v22.16b, v9.16b\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v23.16b\n" - "srshl v16.4s, v16.4s, v30.4s\n" - "and v2.16b, v17.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v20.16b\n" - "add v16.4s, v16.4s, v11.4s\n" - "sqadd v22.4s, v22.4s, v24.4s\n" - "and v31.16b, v18.16b, v9.16b\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "smin v16.4s, v16.4s, v14.4s\n" - "srshl v22.4s, v22.4s, v9.4s\n" - "sqadd v17.4s, v17.4s, v2.4s\n" - "smax v16.4s, v16.4s, v19.4s\n" - "add v22.4s, v22.4s, v11.4s\n" - "srshl v17.4s, v17.4s, v30.4s\n" - "sqadd v18.4s, v18.4s, v31.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "add v17.4s, v17.4s, v11.4s\n" - "srshl v18.4s, v18.4s, v9.4s\n" - "smax v22.4s, v22.4s, v19.4s\n" - "smin v17.4s, v17.4s, v14.4s\n" - "uzp1 v16.16b, v16.16b, v22.16b\n" - "add v18.4s, v18.4s, v11.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "smax v17.4s, v17.4s, v19.4s\n" - "smin v18.4s, v18.4s, v14.4s\n" - "smax v18.4s, v18.4s, v19.4s\n" - "uzp1 v17.16b, v17.16b, v18.16b\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "tbz x4, #2, 85f\n" - "st1 { v15.s }[0], [x14], #0x4\n" - "st1 { v20.s }[0], [x13], #0x4\n" - "st1 { v16.s }[0], [x12], #0x4\n" - "st1 { v17.s }[0], [x11], #0x4\n" - "tbz x4, #1, 84f\n" - "st1 { v15.h }[2], [x14], #0x2\n" - "st1 { v20.h }[2], [x13], #0x2\n" - "st1 { v16.h }[2], [x12], #0x2\n" - "st1 { v17.h }[2], [x11], #0x2\n" - "tbz x4, #0, 87f\n" - "st1 { v15.b }[6], [x14], #0x1\n" - "st1 { v20.b }[6], [x13], #0x1\n" - "st1 { v16.b }[6], [x12], #0x1\n" - "st1 { v17.b }[6], [x11], #0x1\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "tbz x8, #2, 85f\n" + "st1 { v15.s }[0], [x10], #0x4\n" + "st1 { v9.s }[0], [x9], #0x4\n" + "st1 { v22.s }[0], [x28], #0x4\n" + "st1 { v23.s }[0], [x27], #0x4\n" + "tbz x8, #1, 84f\n" + "st1 { v15.h }[2], [x10], #0x2\n" + "st1 { v9.h }[2], [x9], #0x2\n" + "st1 { v22.h }[2], [x28], #0x2\n" + "st1 { v23.h }[2], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[6], [x10], #0x1\n" + "st1 { v9.b }[6], [x9], #0x1\n" + "st1 { v22.b }[6], [x28], #0x1\n" + "st1 { v23.b }[6], [x27], #0x1\n" "b 87f\n" "84:" // Oddments: Bit 2: Bit 1: Unset - "tbz x4, #0, 87f\n" - "st1 { v15.b }[4], [x14], #0x1\n" - "st1 { v20.b }[4], [x13], #0x1\n" - "st1 { v16.b }[4], [x12], #0x1\n" - "st1 { v17.b }[4], [x11], #0x1\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[4], [x10], #0x1\n" + "st1 { v9.b }[4], [x9], #0x1\n" + "st1 { v22.b }[4], [x28], #0x1\n" + "st1 { v23.b }[4], [x27], #0x1\n" "b 87f\n" "85:" // Oddments: Bit 2: Unset - "tbz x4, #1, 86f\n" - "st1 { v15.h }[0], [x14], #0x2\n" - "st1 { v20.h }[0], [x13], #0x2\n" - "st1 { v16.h }[0], [x12], #0x2\n" - "st1 { v17.h }[0], [x11], #0x2\n" - "tbz x4, #0, 87f\n" - "st1 { v15.b }[2], [x14], #0x1\n" - "st1 { v20.b }[2], [x13], #0x1\n" - "st1 { v16.b }[2], [x12], #0x1\n" - "st1 { v17.b }[2], [x11], #0x1\n" + "tbz x8, #1, 86f\n" + "st1 { v15.h }[0], [x10], #0x2\n" + "st1 { v9.h }[0], [x9], #0x2\n" + "st1 { v22.h }[0], [x28], #0x2\n" + "st1 { v23.h }[0], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[2], [x10], #0x1\n" + "st1 { v9.b }[2], [x9], #0x1\n" + "st1 { v22.b }[2], [x28], #0x1\n" + "st1 { v23.b }[2], [x27], #0x1\n" "b 87f\n" "86:" // Oddments: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 87f\n" - "st1 { v15.b }[0], [x14], #0x1\n" - "st1 { v20.b }[0], [x13], #0x1\n" - "st1 { v16.b }[0], [x12], #0x1\n" - "st1 { v17.b }[0], [x11], #0x1\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[0], [x10], #0x1\n" + "st1 { v9.b }[0], [x9], #0x1\n" + "st1 { v22.b }[0], [x28], #0x1\n" + "st1 { v23.b }[0], [x27], #0x1\n" "87:" // Oddments: Bit 2: End - "88:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index a998fa16d6..52031e18da 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,24 @@ namespace depthwise { void a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); -struct a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst +class a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_s8q_5x5_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_s8q_5x5_mla::get_packed_size; + a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 5, 5, 1, 1) {} - kern_type kernel = a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } - a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp index ab64f53f66..bd71b65908 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const int8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const int8_t *const *inptrs_raw, - const int8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -111,2096 +111,2070 @@ void a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( - "ldr x4, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x10, #0x0\n" - "ldr x3, [%x[params], %[offsetof_Params_weights]]\n" - "mov x1, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x25, %x[params], %[offsetof_Params_inptrs]\n" - "ldr x2, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x19, x4, #0x3\n" - "ldr x5, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x13, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v7.16b }, [x13]\n" - "add x8, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v13.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v19.4s }, [x8]\n" - "add x8, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v16.4s }, [x20]\n" - "ld1r { v12.4s }, [x8]\n" - "ldp x17, x16, [x21, #0x0]\n" - "ldp x6, x8, [x21, #0x10]\n" - "cbz x19, 3f\n" - "subs x19, x19, #0x1\n" - "ldr x12, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q15, [x12, #0x0]\n" - "mov v18.16b, v15.16b\n" - "ldr q20, [x12, #0x10]\n" - "add x12, x12, #0x20\n" - "mov v11.16b, v15.16b\n" - "str x12, [%x[params], %[offsetof_Params_bias]]\n" + "ldr x10, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x0, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x17, x10, %[offsetof_Requantize32_a_offset]\n" + "add x9, x10, %[offsetof_Requantize32_b_offset]\n" + "ldr x25, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x4, x10, %[offsetof_Requantize32_c_offset]\n" + "add x14, x10, %[offsetof_Requantize32_minval]\n" + "ldr x23, [%x[params], %[offsetof_Params_weights]]\n" + "add x5, x10, %[offsetof_Requantize32_maxval]\n" + "ld1r { v9.16b }, [x17]\n" + "ld1r { v14.16b }, [x9]\n" + "lsr x3, x0, #0x3\n" + "ld1r { v18.8h }, [x4]\n" + "ld1r { v11.8h }, [x14]\n" + "mov x24, #0x0\n" + "mov x22, #0x0\n" + "ld1r { v13.8h }, [x5]\n" + "ldr x10, [%x[params], %[offsetof_Params_requant_muls]]\n" + "add x20, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x1, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x16, x8, [x25, #0x0]\n" + "ldp x4, x7, [x25, #0x10]\n" + "cbz x3, 3f\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q15, [x19, #0x0]\n" + "subs x3, x3, #0x1\n" + "mov v17.16b, v15.16b\n" + "ldr q16, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x23, #0x0]\n" + "ldr d1, [x23, #0x8]\n" + "ldr d2, [x23, #0x10]\n" + "mov v8.16b, v16.16b\n" "mov v10.16b, v15.16b\n" - "ldr d0, [x3, #0x0]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" - "mov v5.16b, v20.16b\n" - "ldr d1, [x3, #0x8]\n" - "mov v8.16b, v20.16b\n" - "ldr d2, [x3, #0x10]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" - "mov v9.16b, v20.16b\n" - "ldr d3, [x3, #0x18]\n" - "ldr d4, [x3, #0x20]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" - "ldp x28, x27, [x25, #0x0]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" - "ldp x26, x13, [x25, #0x10]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" - "ldp x24, x23, [x25, #0x20]\n" - "ldp x22, x21, [x25, #0x30]\n" - "ldp x20, x0, [x25, #0x40]\n" - "ldr d31, [x28, x10]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "ldr d30, [x27, x10]\n" - "ldr d29, [x26, x10]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "ldr d28, [x13, x10]\n" - "ldr d27, [x24, x10]\n" - "ssubl v29.8h, v29.8b, v7.8b\n" - "ldr d23, [x23, x10]\n" - "ssubl v28.8h, v28.8b, v7.8b\n" - "ldr d25, [x22, x10]\n" - "ldr d24, [x21, x10]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "ldr d26, [x20, x10]\n" - "ssubl v23.8h, v23.8b, v7.8b\n" - "ldr d22, [x0, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "ssubl v26.8h, v26.8b, v7.8b\n" - "ssubl v22.8h, v22.8b, v7.8b\n" + "ldr d3, [x23, #0x18]\n" + "ldr d4, [x23, #0x20]\n" + "mov v7.16b, v16.16b\n" + "mov v6.16b, v15.16b\n" + "ldp x28, x6, [x20, #0x0]\n" + "ldp x26, x25, [x20, #0x10]\n" + "mov v5.16b, v16.16b\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "ldp x5, x2, [x20, #0x20]\n" + "ldp x27, x21, [x20, #0x30]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "ldp x12, x19, [x20, #0x40]\n" + "ldr d31, [x28, x24]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "ldr d30, [x6, x24]\n" + "ldr d29, [x26, x24]\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "ldr d28, [x25, x24]\n" + "ldr d27, [x5, x24]\n" + "ssubl v29.8h, v29.8b, v9.8b\n" + "ssubl v28.8h, v28.8b, v9.8b\n" + "ldr d23, [x2, x24]\n" + "ldr d25, [x27, x24]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "ssubl v23.8h, v23.8b, v9.8b\n" + "ldr d24, [x21, x24]\n" + "ldr d26, [x12, x24]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "ldr d22, [x19, x24]\n" + "ssubl v26.8h, v26.8b, v9.8b\n" + "ssubl v22.8h, v22.8b, v9.8b\n" "beq 2f\n" "1:" // Loop "smlal v15.4s, v31.4h, v0.4h\n" - "ldr x20, [x25, #0x50]\n" - "subs x19, x19, #0x1\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr x28, [x25, #0x58]\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "ldr x0, [x25, #0x60]\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "ldr d31, [x20, x10]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "smlal v11.4s, v29.4h, v0.4h\n" - "ldr x7, [x25, #0x68]\n" - "smlal2 v8.4s, v29.8h, v0.8h\n" - "ldr x26, [x25, #0x70]\n" - "smlal v10.4s, v28.4h, v0.4h\n" - "ldr x23, [x25, #0x78]\n" - "smlal2 v9.4s, v28.8h, v0.8h\n" - "ldr d0, [x3, #0x28]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr x19, [x20, #0x50]\n" + "ldr d31, [x19, x24]\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v29.4h, v0.4h\n" + "ldr x15, [x20, #0x58]\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "smlal v6.4s, v28.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "ldr x19, [x20, #0x60]\n" + "ldr x27, [x20, #0x68]\n" + "smlal2 v7.4s, v29.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "ldr x20, [x25, #0x80]\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v1.4h\n" - "ldr x22, [x25, #0x88]\n" - "smlal2 v5.4s, v27.8h, v1.8h\n" - "ldr x13, [x25, #0x90]\n" - "smlal v11.4s, v28.4h, v1.4h\n" - "ldr x21, [x25, #0x98]\n" - "smlal2 v8.4s, v28.8h, v1.8h\n" - "ldr x14, [x25, #0xa0]\n" - "smlal v10.4s, v23.4h, v1.4h\n" - "ldr x11, [x25, #0xa8]\n" - "smlal2 v9.4s, v23.8h, v1.8h\n" - "ldr d1, [x3, #0x30]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" + "ldr x5, [x20, #0x70]\n" + "ldr x11, [x20, #0x78]\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal2 v5.4s, v28.8h, v0.8h\n" + "ldr d30, [x15, x24]\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v1.4h\n" + "smlal v10.4s, v28.4h, v1.4h\n" + "ldr d0, [x23, #0x28]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v27.8h, v1.8h\n" + "ldr x12, [x20, #0x80]\n" + "ldr x26, [x20, #0x88]\n" + "smlal2 v7.4s, v28.8h, v1.8h\n" "smlal v15.4s, v27.4h, v2.4h\n" - "ldr x24, [x25, #0xb0]\n" - "smlal2 v20.4s, v27.8h, v2.8h\n" - "ldr d27, [x0, x10]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "ldr x0, [x25, #0xb8]\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "ldr x15, [x25, #0xc0]\n" - "smlal v11.4s, v23.4h, v2.4h\n" - "ldr x9, [x25, #0xc8]\n" - "smlal2 v8.4s, v23.8h, v2.8h\n" - "ldr x27, [x25, #0xd0]\n" - "smlal v10.4s, v31.4h, v2.4h\n" - "ldr x28, [x25, #0xd8]\n" - "smlal2 v9.4s, v31.8h, v2.8h\n" - "ldr d2, [x3, #0x38]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" + "ldr x14, [x20, #0x90]\n" + "ldr x15, [x20, #0x98]\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal2 v5.4s, v23.8h, v1.8h\n" + "ldr d27, [x19, x24]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v23.4h, v2.4h\n" + "ldr d1, [x23, #0x30]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "ldr x21, [x20, #0xa0]\n" + "ldr x2, [x20, #0xa8]\n" + "smlal2 v7.4s, v23.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "ldr q6, [x2, #0x0]\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "ldr d25, [x7, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "ldr x12, [x25, #0xe0]\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "ldr q21, [x5, #0x0]\n" - "smlal v11.4s, v31.4h, v3.4h\n" - "ldr q17, [x2, #0x10]\n" - "add x2, x2, #0x20\n" - "smlal2 v8.4s, v31.8h, v3.8h\n" - "ldr q14, [x5, #0x10]\n" - "add x5, x5, #0x20\n" - "smlal v10.4s, v30.4h, v3.4h\n" - "smlal2 v9.4s, v30.8h, v3.8h\n" - "ldr d3, [x3, #0x40]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" + "ldr x13, [x20, #0xb0]\n" + "ldr x9, [x20, #0xb8]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v31.8h, v2.8h\n" + "ldr d25, [x27, x24]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v31.4h, v3.4h\n" + "ldr d2, [x23, #0x38]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "ldr x19, [x20, #0xc0]\n" + "ldr x28, [x20, #0xc8]\n" + "smlal2 v7.4s, v31.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "ldr d24, [x26, x10]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v4.4h\n" - "ldr x7, [x25, #0xe8]\n" - "smlal2 v5.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x10]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v11.4s, v30.4h, v4.4h\n" - "ldr x26, [x25, #0xf0]\n" - "smlal2 v8.4s, v30.8h, v4.8h\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0x48]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" + "ldr x6, [x20, #0xd0]\n" + "ldr x27, [x20, #0xd8]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "smlal2 v5.4s, v30.8h, v3.8h\n" + "ldr d24, [x5, x24]\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v4.4h\n" + "smlal v10.4s, v30.4h, v4.4h\n" + "ldr d3, [x23, #0x40]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v27.8h, v4.8h\n" + "ldr d27, [x11, x24]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v7.4s, v30.8h, v4.8h\n" "smlal v15.4s, v29.4h, v0.4h\n" - "smlal2 v20.4s, v29.8h, v0.8h\n" - "smlal v18.4s, v28.4h, v0.4h\n" - "smlal2 v5.4s, v28.8h, v0.8h\n" - "smlal v11.4s, v22.4h, v0.4h\n" - "smlal2 v8.4s, v22.8h, v0.8h\n" - "smlal v10.4s, v25.4h, v0.4h\n" - "smlal2 v9.4s, v25.8h, v0.8h\n" - "ldr d0, [x3, #0x50]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" + "ldr x11, [x20, #0xe0]\n" + "ldr x17, [x20, #0xe8]\n" + "smlal2 v16.4s, v29.8h, v0.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0x48]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v28.4h, v0.4h\n" + "smlal v10.4s, v22.4h, v0.4h\n" + "ldr x5, [x20, #0xf0]\n" + "ldr q12, [x10, #0x0]\n" + "smlal v6.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v28.8h, v0.8h\n" + "ldr q19, [x1, #0x0]\n" + "ldr q20, [x10, #0x10]\n" + "smlal2 v7.4s, v22.8h, v0.8h\n" "smlal v15.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x10]\n" - "ssubl v28.8h, v28.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v1.4h\n" - "ldr x23, [x25, #0xf8]\n" - "smlal2 v5.4s, v23.8h, v1.8h\n" - "smlal v11.4s, v25.4h, v1.4h\n" - "smlal2 v8.4s, v25.8h, v1.8h\n" - "smlal v10.4s, v24.4h, v1.4h\n" - "smlal2 v9.4s, v24.8h, v1.8h\n" - "ldr d1, [x3, #0x58]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" + "ldr q29, [x1, #0x10]\n" + "subs x3, x3, #0x1\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal2 v5.4s, v25.8h, v0.8h\n" + "ldr d28, [x26, x24]\n" + "ldr d0, [x23, #0x50]\n" + "smlal v17.4s, v23.4h, v1.4h\n" + "smlal v10.4s, v25.4h, v1.4h\n" + "ssubl v28.8h, v28.8b, v9.8b\n" + "ldr x25, [x20, #0xf8]\n" + "smlal v6.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "add x10, x10, #0x20\n" + "smlal2 v7.4s, v25.8h, v1.8h\n" "smlal v15.4s, v23.4h, v2.4h\n" - "smlal2 v20.4s, v23.8h, v2.8h\n" - "ldr d23, [x20, x10]\n" - "ssubl v23.8h, v23.8b, v7.8b\n" - "smlal v18.4s, v31.4h, v2.4h\n" - "ldr x22, [x25, #0x100]\n" - "smlal2 v5.4s, v31.8h, v2.8h\n" - "smlal v11.4s, v24.4h, v2.4h\n" - "smlal2 v8.4s, v24.8h, v2.8h\n" - "smlal v10.4s, v27.4h, v2.4h\n" - "smlal2 v9.4s, v27.8h, v2.8h\n" - "ldr d2, [x3, #0x60]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" + "add x1, x1, #0x20\n" + "smlal2 v16.4s, v23.8h, v2.8h\n" + "ldr d23, [x12, x24]\n" + "smlal2 v5.4s, v24.8h, v1.8h\n" + "ssubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v31.4h, v2.4h\n" + "smlal v10.4s, v24.4h, v2.4h\n" + "ldr d1, [x23, #0x58]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "ldr x26, [x20, #0x100]\n" + "smlal2 v7.4s, v24.8h, v2.8h\n" "smlal v15.4s, v31.4h, v3.4h\n" - "smlal2 v20.4s, v31.8h, v3.8h\n" - "ldr d31, [x13, x10]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v3.4h\n" - "ldr x20, [x25, #0x108]\n" - "smlal2 v5.4s, v30.8h, v3.8h\n" - "smlal v11.4s, v27.4h, v3.4h\n" - "smlal2 v8.4s, v27.8h, v3.8h\n" - "smlal v10.4s, v23.4h, v3.4h\n" - "smlal2 v9.4s, v23.8h, v3.8h\n" - "ldr d3, [x3, #0x68]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v3.8h\n" + "smlal2 v5.4s, v27.8h, v2.8h\n" + "ldr d31, [x14, x24]\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "smlal v17.4s, v30.4h, v3.4h\n" + "smlal v10.4s, v27.4h, v3.4h\n" + "ldr d2, [x23, #0x60]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "ldr x12, [x20, #0x108]\n" + "smlal2 v7.4s, v27.8h, v3.8h\n" "smlal v15.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x10]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "ldr x13, [x25, #0x110]\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "ldr d26, [x14, x10]\n" - "ssubl v26.8h, v26.8b, v7.8b\n" - "smlal v11.4s, v23.4h, v4.4h\n" - "ldr x21, [x25, #0x118]\n" - "smlal2 v8.4s, v23.8h, v4.8h\n" - "smlal v10.4s, v28.4h, v4.4h\n" - "smlal2 v9.4s, v28.8h, v4.8h\n" - "ldr d4, [x3, #0x70]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v30.8h, v4.8h\n" + "ldr d30, [x15, x24]\n" + "smlal2 v5.4s, v23.8h, v3.8h\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal v10.4s, v23.4h, v4.4h\n" + "ldr d3, [x23, #0x68]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "ldr d26, [x21, x24]\n" + "ssubl v26.8h, v26.8b, v9.8b\n" + "smlal2 v7.4s, v23.8h, v4.8h\n" "smlal v15.4s, v22.4h, v0.4h\n" - "smlal2 v20.4s, v22.8h, v0.8h\n" - "ldr d22, [x0, x10]\n" - "ssubl v22.8h, v22.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v0.4h\n" - "smlal2 v5.4s, v25.8h, v0.8h\n" - "smlal v11.4s, v31.4h, v0.4h\n" - "smlal2 v8.4s, v31.8h, v0.8h\n" - "smlal v10.4s, v30.4h, v0.4h\n" - "smlal2 v9.4s, v30.8h, v0.8h\n" - "ldr d0, [x3, #0x78]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" + "ldr x14, [x20, #0x110]\n" + "ldr x21, [x20, #0x118]\n" + "smlal2 v16.4s, v22.8h, v0.8h\n" + "smlal2 v5.4s, v28.8h, v4.8h\n" + "ldr d4, [x23, #0x70]\n" + "ldr d22, [x9, x24]\n" + "smlal v17.4s, v25.4h, v0.4h\n" + "smlal v10.4s, v31.4h, v0.4h\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "ssubl v22.8h, v22.8b, v9.8b\n" + "smlal2 v7.4s, v31.8h, v0.8h\n" "smlal v15.4s, v25.4h, v1.4h\n" - "smlal2 v20.4s, v25.8h, v1.8h\n" - "ldr d25, [x11, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v1.4h\n" - "smlal2 v5.4s, v24.8h, v1.8h\n" - "smlal v11.4s, v30.4h, v1.4h\n" - "smlal2 v8.4s, v30.8h, v1.8h\n" - "smlal v10.4s, v26.4h, v1.4h\n" - "smlal2 v9.4s, v26.8h, v1.8h\n" - "ldr d1, [x3, #0x80]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" + "smlal2 v16.4s, v25.8h, v1.8h\n" + "ldr d25, [x2, x24]\n" + "smlal2 v5.4s, v30.8h, v0.8h\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v1.4h\n" + "smlal v10.4s, v30.4h, v1.4h\n" + "ldr d0, [x23, #0x78]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "smlal2 v7.4s, v30.8h, v1.8h\n" "smlal v15.4s, v24.4h, v2.4h\n" - "smlal2 v20.4s, v24.8h, v2.8h\n" - "ldr d24, [x24, x10]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v2.4h\n" - "smlal2 v5.4s, v27.8h, v2.8h\n" - "smlal v11.4s, v26.4h, v2.4h\n" - "smlal2 v8.4s, v26.8h, v2.8h\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" - "ldr d2, [x3, #0x88]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" + "smlal2 v16.4s, v24.8h, v2.8h\n" + "ldr d24, [x13, x24]\n" + "smlal2 v5.4s, v26.8h, v1.8h\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v2.4h\n" + "smlal v10.4s, v26.4h, v2.4h\n" + "ldr d1, [x23, #0x80]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "smlal2 v7.4s, v26.8h, v2.8h\n" "smlal v15.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "ldr d27, [x15, x10]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v3.4h\n" - "smlal2 v5.4s, v23.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" - "ldr d3, [x3, #0x90]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "ldr d27, [x19, x24]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v23.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "ldr d2, [x23, #0x88]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v23.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" "smlal v15.4s, v23.4h, v4.4h\n" - "smlal2 v20.4s, v23.8h, v4.8h\n" - "ldr d23, [x9, x10]\n" - "ssubl v23.8h, v23.8b, v7.8b\n" - "smlal v18.4s, v28.4h, v4.4h\n" - "smlal2 v5.4s, v28.8h, v4.8h\n" - "ldr d28, [x12, x10]\n" - "ssubl v28.8h, v28.8b, v7.8b\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "smlal v10.4s, v22.4h, v4.4h\n" - "smlal2 v9.4s, v22.8h, v4.8h\n" - "ldr d4, [x3, #0x98]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v23.8h, v4.8h\n" + "ldr d23, [x28, x24]\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "ssubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v28.4h, v4.4h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "ldr d3, [x23, #0x90]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v22.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "ldr d28, [x11, x24]\n" + "ssubl v28.8h, v28.8b, v9.8b\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr d31, [x27, x10]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "smlal v11.4s, v27.4h, v0.4h\n" - "smlal2 v8.4s, v27.8h, v0.8h\n" - "smlal v10.4s, v23.4h, v0.4h\n" - "smlal2 v9.4s, v23.8h, v0.8h\n" - "ldr d0, [x3, #0xa0]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr d31, [x6, x24]\n" + "smlal2 v5.4s, v22.8h, v4.8h\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v27.4h, v0.4h\n" + "ldr d4, [x23, #0x98]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "smlal2 v7.4s, v27.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v26.4h, v1.4h\n" - "smlal2 v5.4s, v26.8h, v1.8h\n" - "smlal v11.4s, v23.4h, v1.4h\n" - "smlal2 v8.4s, v23.8h, v1.8h\n" - "smlal v10.4s, v31.4h, v1.4h\n" - "smlal2 v9.4s, v31.8h, v1.8h\n" - "ldr d1, [x3, #0xa8]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "ldr d30, [x27, x24]\n" + "smlal2 v5.4s, v23.8h, v0.8h\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v1.4h\n" + "smlal v10.4s, v23.4h, v1.4h\n" + "ldr d0, [x23, #0xa0]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v1.4h\n" + "smlal2 v8.4s, v26.8h, v1.8h\n" + "smlal2 v7.4s, v23.8h, v1.8h\n" "smlal v15.4s, v26.4h, v2.4h\n" - "smlal2 v20.4s, v26.8h, v2.8h\n" - "ldr d26, [x7, x10]\n" - "ssubl v26.8h, v26.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "smlal2 v8.4s, v31.8h, v2.8h\n" - "smlal v10.4s, v30.4h, v2.4h\n" - "smlal2 v9.4s, v30.8h, v2.8h\n" - "ldr d2, [x3, #0xb0]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" + "smlal2 v16.4s, v26.8h, v2.8h\n" + "smlal2 v5.4s, v31.8h, v1.8h\n" + "ldr d26, [x17, x24]\n" + "ssubl v26.8h, v26.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v31.4h, v2.4h\n" + "ldr d1, [x23, #0xa8]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal2 v7.4s, v31.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "ldr d25, [x26, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "smlal v11.4s, v30.4h, v3.4h\n" - "smlal2 v8.4s, v30.8h, v3.8h\n" - "smlal v10.4s, v28.4h, v3.4h\n" - "smlal2 v9.4s, v28.8h, v3.8h\n" - "ldr d3, [x3, #0xb8]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v30.8h, v2.8h\n" + "ldr d25, [x5, x24]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v30.4h, v3.4h\n" + "ldr d2, [x23, #0xb0]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal2 v7.4s, v30.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "ldr d24, [x23, x10]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v22.4h, v4.4h\n" - "smlal2 v5.4s, v22.8h, v4.8h\n" - "smlal v11.4s, v28.4h, v4.4h\n" - "smlal2 v8.4s, v28.8h, v4.8h\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0xc0]\n" - "add x3, x3, #0xc8\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "ldr d24, [x25, x24]\n" + "smlal2 v5.4s, v28.8h, v3.8h\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v22.4h, v4.4h\n" + "smlal v10.4s, v28.4h, v4.4h\n" + "ldr d3, [x23, #0xb8]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v7.4s, v28.8h, v4.8h\n" "smlal v15.4s, v27.4h, v0.4h\n" - "ssubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v20.4s, v27.8h, v0.8h\n" - "ldr d27, [x22, x10]\n" - "smlal v18.4s, v23.4h, v0.4h\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v5.4s, v23.8h, v0.8h\n" - "smlal v11.4s, v25.4h, v0.4h\n" - "smlal2 v8.4s, v25.8h, v0.8h\n" - "ldr d25, [x20, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v0.4h\n" - "smlal2 v9.4s, v24.8h, v0.8h\n" + "smlal2 v16.4s, v27.8h, v0.8h\n" + "ldr d27, [x26, x24]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v22.8h, v4.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0xc0]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v23.4h, v0.4h\n" + "smlal v10.4s, v25.4h, v0.4h\n" + "add x23, x23, #0xc8\n" + "smlal v6.4s, v24.4h, v0.4h\n" + "smlal2 v7.4s, v25.8h, v0.8h\n" + "ldr d25, [x12, x24]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "smlal2 v8.4s, v23.8h, v0.8h\n" + "smlal2 v5.4s, v24.8h, v0.8h\n" "smlal v15.4s, v23.4h, v1.4h\n" - "smlal2 v20.4s, v23.8h, v1.8h\n" - "smlal v18.4s, v31.4h, v1.4h\n" - "smlal2 v5.4s, v31.8h, v1.8h\n" - "smlal v11.4s, v24.4h, v1.4h\n" - "smlal2 v8.4s, v24.8h, v1.8h\n" - "ldr d24, [x13, x10]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v1.4h\n" - "smlal2 v9.4s, v27.8h, v1.8h\n" + "smlal v17.4s, v31.4h, v1.4h\n" + "smlal v10.4s, v24.4h, v1.4h\n" + "smlal v6.4s, v27.4h, v1.4h\n" + "smlal2 v7.4s, v24.8h, v1.8h\n" + "ldr d24, [x14, x24]\n" + "smlal2 v16.4s, v23.8h, v1.8h\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "smlal2 v8.4s, v31.8h, v1.8h\n" + "smlal2 v5.4s, v27.8h, v1.8h\n" "smlal v15.4s, v31.4h, v2.4h\n" - "smlal2 v20.4s, v31.8h, v2.8h\n" - "smlal v18.4s, v30.4h, v2.4h\n" - "smlal2 v5.4s, v30.8h, v2.8h\n" - "smlal v11.4s, v27.4h, v2.4h\n" - "smlal2 v8.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x10]\n" - "add x10, x10, #0x8\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" + "smlal v17.4s, v30.4h, v2.4h\n" + "smlal v10.4s, v27.4h, v2.4h\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v7.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x24]\n" + "smlal2 v16.4s, v31.8h, v2.8h\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v30.8h, v2.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x24, x24, #0x8\n" "smlal v15.4s, v30.4h, v3.4h\n" - "smlal2 v20.4s, v30.8h, v3.8h\n" - "smlal v18.4s, v28.4h, v3.4h\n" - "smlal2 v5.4s, v28.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" + "smlal v17.4s, v28.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v16.4s, v30.8h, v3.8h\n" + "smlal2 v8.4s, v28.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" "smlal v15.4s, v28.4h, v4.4h\n" - "smlal2 v20.4s, v28.8h, v4.8h\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "smlal v10.4s, v27.4h, v4.4h\n" - "smlal2 v9.4s, v27.8h, v4.8h\n" - "sqrdmulh v15.4s, v15.4s, v6.4s\n" - "sqrdmulh v20.4s, v20.4s, v17.4s\n" - "sqrdmulh v18.4s, v18.4s, v6.4s\n" - "sqrdmulh v5.4s, v5.4s, v17.4s\n" - "and v1.16b, v15.16b, v21.16b\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "and v29.16b, v20.16b, v14.16b\n" - "and v3.16b, v18.16b, v21.16b\n" - "sshr v29.4s, v29.4s, #0x1f\n" - "and v2.16b, v5.16b, v14.16b\n" - "sqrdmulh v11.4s, v11.4s, v6.4s\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v8.4s, v8.4s, v17.4s\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "sqdmulh v15.4s, v15.4s, v12.4s\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal v6.4s, v27.4h, v4.4h\n" + "sqdmulh v17.4s, v17.4s, v12.4s\n" + "smlal2 v16.4s, v28.8h, v4.8h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "sqdmulh v10.4s, v10.4s, v12.4s\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "smlal2 v5.4s, v27.8h, v4.8h\n" + "sqdmulh v6.4s, v6.4s, v12.4s\n" + "and v23.16b, v15.16b, v19.16b\n" + "sqdmulh v16.4s, v16.4s, v20.4s\n" + "and v22.16b, v17.16b, v19.16b\n" + "sqdmulh v8.4s, v8.4s, v20.4s\n" + "and v21.16b, v10.16b, v19.16b\n" + "sqdmulh v7.4s, v7.4s, v20.4s\n" + "and v26.16b, v6.16b, v19.16b\n" + "sqdmulh v5.4s, v5.4s, v20.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v4.16b, v16.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v2.16b, v8.16b, v29.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v7.16b, v29.16b\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "and v25.16b, v5.16b, v29.16b\n" + "sqadd v15.4s, v15.4s, v23.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v17.4s, v17.4s, v22.4s\n" "sshr v2.4s, v2.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v1.4s\n" - "sqrdmulh v10.4s, v10.4s, v6.4s\n" - "and v0.16b, v11.16b, v21.16b\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v21.4s\n" - "sqadd v20.4s, v20.4s, v29.4s\n" - "sqadd v18.4s, v18.4s, v3.4s\n" - "sqadd v5.4s, v5.4s, v2.4s\n" - "and v27.16b, v8.16b, v14.16b\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "add v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v14.4s\n" - "srshl v18.4s, v18.4s, v21.4s\n" - "srshl v5.4s, v5.4s, v14.4s\n" - "smin v15.4s, v15.4s, v12.4s\n" - "add v20.4s, v20.4s, v19.4s\n" - "add v18.4s, v18.4s, v19.4s\n" - "smax v15.4s, v15.4s, v16.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smin v18.4s, v18.4s, v12.4s\n" - "add v5.4s, v5.4s, v19.4s\n" - "smax v20.4s, v20.4s, v16.4s\n" - "smax v18.4s, v18.4s, v16.4s\n" - "smin v5.4s, v5.4s, v12.4s\n" - "uzp1 v15.16b, v15.16b, v20.16b\n" - "sqadd v11.4s, v11.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v6.4s, v6.4s, v26.4s\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v17.4s, v17.4s, v19.4s\n" + "sqadd v8.4s, v8.4s, v2.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "sqadd v7.4s, v7.4s, v3.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "sqadd v5.4s, v5.4s, v25.4s\n" + "srshl v16.4s, v16.4s, v29.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v17.4h, v17.4s\n" + "srshl v7.4s, v7.4s, v29.4s\n" + "sqxtn v10.4h, v10.4s\n" + "srshl v5.4s, v5.4s, v29.4s\n" + "sqxtn v6.4h, v6.4s\n" + "sqxtn2 v15.8h, v16.4s\n" + "sqxtn2 v17.8h, v8.4s\n" + "sqxtn2 v10.8h, v7.4s\n" + "sqxtn2 v6.8h, v5.4s\n" + "sqadd v15.8h, v15.8h, v18.8h\n" + "sqadd v17.8h, v17.8h, v18.8h\n" + "sqadd v10.8h, v10.8h, v18.8h\n" + "sqadd v6.8h, v6.8h, v18.8h\n" + "smax v15.8h, v15.8h, v11.8h\n" + "smax v17.8h, v17.8h, v11.8h\n" + "smax v10.8h, v10.8h, v11.8h\n" + "smax v6.8h, v6.8h, v11.8h\n" + "smin v15.8h, v15.8h, v13.8h\n" + "smin v17.8h, v17.8h, v13.8h\n" + "smin v10.8h, v10.8h, v13.8h\n" + "smin v6.8h, v6.8h, v13.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x17, x1]\n" - "smax v5.4s, v5.4s, v16.4s\n" - "sqadd v8.4s, v8.4s, v27.4s\n" - "srshl v11.4s, v11.4s, v21.4s\n" - "and v30.16b, v10.16b, v21.16b\n" - "sshr v30.4s, v30.4s, #0x1f\n" - "uzp1 v18.16b, v18.16b, v5.16b\n" - "add v11.4s, v11.4s, v19.4s\n" - "srshl v8.4s, v8.4s, v14.4s\n" - "uzp1 v18.16b, v18.16b, v18.16b\n" - "str d18, [x16, x1]\n" - "smin v11.4s, v11.4s, v12.4s\n" - "sqrdmulh v9.4s, v9.4s, v17.4s\n" - "add v8.4s, v8.4s, v19.4s\n" - "sqadd v10.4s, v10.4s, v30.4s\n" - "smax v11.4s, v11.4s, v16.4s\n" - "smin v8.4s, v8.4s, v12.4s\n" - "and v6.16b, v9.16b, v14.16b\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "smax v8.4s, v8.4s, v16.4s\n" - "srshl v10.4s, v10.4s, v21.4s\n" - "uzp1 v11.16b, v11.16b, v8.16b\n" - "add v10.4s, v10.4s, v19.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "str d11, [x6, x1]\n" - "smin v10.4s, v10.4s, v12.4s\n" - "sqadd v9.4s, v9.4s, v6.4s\n" - "smax v10.4s, v10.4s, v16.4s\n" - "srshl v9.4s, v9.4s, v14.4s\n" - "add v9.4s, v9.4s, v19.4s\n" - "smin v9.4s, v9.4s, v12.4s\n" - "smax v9.4s, v9.4s, v16.4s\n" - "uzp1 v10.16b, v10.16b, v9.16b\n" + "uzp1 v17.16b, v17.16b, v17.16b\n" + "str d15, [x16, x22]\n" "uzp1 v10.16b, v10.16b, v10.16b\n" - "str d10, [x8, x1]\n" - "add x1, x1, #0x8\n" - "ldr x12, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q15, [x12, #0x0]\n" - "mov v18.16b, v15.16b\n" - "ldr q20, [x12, #0x10]\n" - "add x12, x12, #0x20\n" - "mov v11.16b, v15.16b\n" - "str x12, [%x[params], %[offsetof_Params_bias]]\n" + "uzp1 v6.16b, v6.16b, v6.16b\n" + "str d17, [x8, x22]\n" + "str d10, [x4, x22]\n" + "str d6, [x7, x22]\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q15, [x19, #0x0]\n" + "add x22, x22, #0x8\n" + "ldr q16, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x23, #0x0]\n" + "ldr d1, [x23, #0x8]\n" + "ldr d2, [x23, #0x10]\n" + "mov v17.16b, v15.16b\n" + "mov v8.16b, v16.16b\n" + "ldr d3, [x23, #0x18]\n" + "ldr d4, [x23, #0x20]\n" "mov v10.16b, v15.16b\n" - "ldr d0, [x3, #0x0]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" - "mov v5.16b, v20.16b\n" - "ldr d1, [x3, #0x8]\n" - "mov v8.16b, v20.16b\n" - "ldr d2, [x3, #0x10]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" - "mov v9.16b, v20.16b\n" - "ldr d3, [x3, #0x18]\n" - "ldr d4, [x3, #0x20]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" - "ldp x28, x27, [x25, #0x0]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" - "ldp x26, x13, [x25, #0x10]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" - "ldp x24, x23, [x25, #0x20]\n" - "ldp x22, x21, [x25, #0x30]\n" - "ldp x20, x0, [x25, #0x40]\n" - "ldr d31, [x28, x10]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "ldr d30, [x27, x10]\n" - "ldr d29, [x26, x10]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "ldr d28, [x13, x10]\n" - "ldr d27, [x24, x10]\n" - "ssubl v29.8h, v29.8b, v7.8b\n" - "ldr d23, [x23, x10]\n" - "ssubl v28.8h, v28.8b, v7.8b\n" - "ldr d25, [x22, x10]\n" - "ldr d24, [x21, x10]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "ldr d26, [x20, x10]\n" - "ssubl v23.8h, v23.8b, v7.8b\n" - "ldr d22, [x0, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "ssubl v26.8h, v26.8b, v7.8b\n" - "ssubl v22.8h, v22.8b, v7.8b\n" + "mov v7.16b, v16.16b\n" + "ldp x28, x6, [x20, #0x0]\n" + "ldp x26, x25, [x20, #0x10]\n" + "mov v6.16b, v15.16b\n" + "mov v5.16b, v16.16b\n" + "ldp x5, x2, [x20, #0x20]\n" + "ldp x27, x21, [x20, #0x30]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "ldp x12, x19, [x20, #0x40]\n" + "ldr d31, [x28, x24]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "ldr d30, [x6, x24]\n" + "ldr d29, [x26, x24]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "ldr d28, [x25, x24]\n" + "ldr d27, [x5, x24]\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "ssubl v29.8h, v29.8b, v9.8b\n" + "ldr d23, [x2, x24]\n" + "ldr d25, [x27, x24]\n" + "ssubl v28.8h, v28.8b, v9.8b\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "ldr d24, [x21, x24]\n" + "ldr d26, [x12, x24]\n" + "ssubl v23.8h, v23.8b, v9.8b\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "ldr d22, [x19, x24]\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "ssubl v26.8h, v26.8b, v9.8b\n" + "ssubl v22.8h, v22.8b, v9.8b\n" "bgt 1b\n" "2:" // Tail "smlal v15.4s, v31.4h, v0.4h\n" - "ldr x20, [x25, #0x50]\n" - "tst x4, #0x7\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr x28, [x25, #0x58]\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "ldr x0, [x25, #0x60]\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "ldr d31, [x20, x10]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "smlal v11.4s, v29.4h, v0.4h\n" - "ldr x7, [x25, #0x68]\n" - "smlal2 v8.4s, v29.8h, v0.8h\n" - "ldr x26, [x25, #0x70]\n" - "smlal v10.4s, v28.4h, v0.4h\n" - "ldr x23, [x25, #0x78]\n" - "smlal2 v9.4s, v28.8h, v0.8h\n" - "ldr d0, [x3, #0x28]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr x19, [x20, #0x50]\n" + "ldr d31, [x19, x24]\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v29.4h, v0.4h\n" + "ldr x15, [x20, #0x58]\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "smlal v6.4s, v28.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "ldr x19, [x20, #0x60]\n" + "ldr x27, [x20, #0x68]\n" + "smlal2 v7.4s, v29.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "ldr x20, [x25, #0x80]\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v1.4h\n" - "ldr x22, [x25, #0x88]\n" - "smlal2 v5.4s, v27.8h, v1.8h\n" - "ldr x13, [x25, #0x90]\n" - "smlal v11.4s, v28.4h, v1.4h\n" - "ldr x21, [x25, #0x98]\n" - "smlal2 v8.4s, v28.8h, v1.8h\n" - "ldr x14, [x25, #0xa0]\n" - "smlal v10.4s, v23.4h, v1.4h\n" - "ldr x11, [x25, #0xa8]\n" - "smlal2 v9.4s, v23.8h, v1.8h\n" - "ldr d1, [x3, #0x30]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" + "ldr x5, [x20, #0x70]\n" + "ldr x11, [x20, #0x78]\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal2 v5.4s, v28.8h, v0.8h\n" + "ldr d30, [x15, x24]\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v1.4h\n" + "smlal v10.4s, v28.4h, v1.4h\n" + "ldr d0, [x23, #0x28]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v27.8h, v1.8h\n" + "ldr x12, [x20, #0x80]\n" + "ldr x26, [x20, #0x88]\n" + "smlal2 v7.4s, v28.8h, v1.8h\n" "smlal v15.4s, v27.4h, v2.4h\n" - "ldr x24, [x25, #0xb0]\n" - "smlal2 v20.4s, v27.8h, v2.8h\n" - "ldr d27, [x0, x10]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "ldr x0, [x25, #0xb8]\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "ldr x15, [x25, #0xc0]\n" - "smlal v11.4s, v23.4h, v2.4h\n" - "ldr x9, [x25, #0xc8]\n" - "smlal2 v8.4s, v23.8h, v2.8h\n" - "ldr x27, [x25, #0xd0]\n" - "smlal v10.4s, v31.4h, v2.4h\n" - "ldr x28, [x25, #0xd8]\n" - "smlal2 v9.4s, v31.8h, v2.8h\n" - "ldr d2, [x3, #0x38]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" + "ldr x14, [x20, #0x90]\n" + "ldr x15, [x20, #0x98]\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal2 v5.4s, v23.8h, v1.8h\n" + "ldr d27, [x19, x24]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v23.4h, v2.4h\n" + "ldr d1, [x23, #0x30]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "ldr x21, [x20, #0xa0]\n" + "ldr x2, [x20, #0xa8]\n" + "smlal2 v7.4s, v23.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "ldr x12, [x25, #0xe0]\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "ldr d25, [x7, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "ldr x7, [x25, #0xe8]\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "ldr q6, [x2, #0x0]\n" - "smlal v11.4s, v31.4h, v3.4h\n" - "ldr q21, [x5, #0x0]\n" - "smlal2 v8.4s, v31.8h, v3.8h\n" - "ldr q17, [x2, #0x10]\n" - "add x2, x2, #0x20\n" - "smlal v10.4s, v30.4h, v3.4h\n" - "ldr q14, [x5, #0x10]\n" - "add x5, x5, #0x20\n" - "smlal2 v9.4s, v30.8h, v3.8h\n" - "ldr d3, [x3, #0x40]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" + "ldr x13, [x20, #0xb0]\n" + "ldr x9, [x20, #0xb8]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v31.8h, v2.8h\n" + "ldr d25, [x27, x24]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v31.4h, v3.4h\n" + "ldr d2, [x23, #0x38]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "ldr x19, [x20, #0xc0]\n" + "ldr x28, [x20, #0xc8]\n" + "smlal2 v7.4s, v31.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "ldr d24, [x26, x10]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v4.4h\n" - "ldr x26, [x25, #0xf0]\n" - "smlal2 v5.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x10]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v11.4s, v30.4h, v4.4h\n" - "ldr x23, [x25, #0xf8]\n" - "smlal2 v8.4s, v30.8h, v4.8h\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0x48]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" + "ldr x6, [x20, #0xd0]\n" + "ldr x27, [x20, #0xd8]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "smlal2 v5.4s, v30.8h, v3.8h\n" + "ldr d24, [x5, x24]\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v4.4h\n" + "smlal v10.4s, v30.4h, v4.4h\n" + "ldr d3, [x23, #0x40]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v27.8h, v4.8h\n" + "ldr d27, [x11, x24]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v7.4s, v30.8h, v4.8h\n" "smlal v15.4s, v29.4h, v0.4h\n" - "smlal2 v20.4s, v29.8h, v0.8h\n" - "smlal v18.4s, v28.4h, v0.4h\n" - "smlal2 v5.4s, v28.8h, v0.8h\n" - "smlal v11.4s, v22.4h, v0.4h\n" - "smlal2 v8.4s, v22.8h, v0.8h\n" - "smlal v10.4s, v25.4h, v0.4h\n" - "smlal2 v9.4s, v25.8h, v0.8h\n" - "ldr d0, [x3, #0x50]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" + "ldr x11, [x20, #0xe0]\n" + "ldr x17, [x20, #0xe8]\n" + "smlal2 v16.4s, v29.8h, v0.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0x48]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v28.4h, v0.4h\n" + "smlal v10.4s, v22.4h, v0.4h\n" + "ldr x5, [x20, #0xf0]\n" + "ldr x25, [x20, #0xf8]\n" + "smlal v6.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v28.8h, v0.8h\n" + "ldr q12, [x10, #0x0]\n" + "ldr q19, [x1, #0x0]\n" + "smlal2 v7.4s, v22.8h, v0.8h\n" "smlal v15.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x10]\n" - "ssubl v28.8h, v28.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v1.4h\n" - "ldr x22, [x25, #0x100]\n" - "smlal2 v5.4s, v23.8h, v1.8h\n" - "smlal v11.4s, v25.4h, v1.4h\n" - "smlal2 v8.4s, v25.8h, v1.8h\n" - "smlal v10.4s, v24.4h, v1.4h\n" - "smlal2 v9.4s, v24.8h, v1.8h\n" - "ldr d1, [x3, #0x58]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" + "ldr q20, [x10, #0x10]\n" + "ldr q29, [x1, #0x10]\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal2 v5.4s, v25.8h, v0.8h\n" + "ldr d28, [x26, x24]\n" + "ldr d0, [x23, #0x50]\n" + "smlal v17.4s, v23.4h, v1.4h\n" + "smlal v10.4s, v25.4h, v1.4h\n" + "ssubl v28.8h, v28.8b, v9.8b\n" + "ldr x26, [x20, #0x100]\n" + "smlal v6.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "tst x0, #0x7\n" + "smlal2 v7.4s, v25.8h, v1.8h\n" "smlal v15.4s, v23.4h, v2.4h\n" - "smlal2 v20.4s, v23.8h, v2.8h\n" - "ldr d23, [x20, x10]\n" - "ssubl v23.8h, v23.8b, v7.8b\n" - "smlal v18.4s, v31.4h, v2.4h\n" - "ldr x20, [x25, #0x108]\n" - "smlal2 v5.4s, v31.8h, v2.8h\n" - "smlal v11.4s, v24.4h, v2.4h\n" - "smlal2 v8.4s, v24.8h, v2.8h\n" - "smlal v10.4s, v27.4h, v2.4h\n" - "smlal2 v9.4s, v27.8h, v2.8h\n" - "ldr d2, [x3, #0x60]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" + "add x10, x10, #0x20\n" + "add x1, x1, #0x20\n" + "smlal2 v16.4s, v23.8h, v2.8h\n" + "ldr d23, [x12, x24]\n" + "smlal2 v5.4s, v24.8h, v1.8h\n" + "ssubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v31.4h, v2.4h\n" + "smlal v10.4s, v24.4h, v2.4h\n" + "ldr d1, [x23, #0x58]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "ldr x12, [x20, #0x108]\n" + "smlal2 v7.4s, v24.8h, v2.8h\n" "smlal v15.4s, v31.4h, v3.4h\n" - "smlal2 v20.4s, v31.8h, v3.8h\n" - "ldr d31, [x13, x10]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v3.4h\n" - "ldr x13, [x25, #0x110]\n" - "smlal2 v5.4s, v30.8h, v3.8h\n" - "smlal v11.4s, v27.4h, v3.4h\n" - "smlal2 v8.4s, v27.8h, v3.8h\n" - "smlal v10.4s, v23.4h, v3.4h\n" - "smlal2 v9.4s, v23.8h, v3.8h\n" - "ldr d3, [x3, #0x68]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v3.8h\n" + "smlal2 v5.4s, v27.8h, v2.8h\n" + "ldr d31, [x14, x24]\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "smlal v17.4s, v30.4h, v3.4h\n" + "smlal v10.4s, v27.4h, v3.4h\n" + "ldr d2, [x23, #0x60]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "ldr x14, [x20, #0x110]\n" + "smlal2 v7.4s, v27.8h, v3.8h\n" "smlal v15.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x10]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "ldr x21, [x25, #0x118]\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "ldr d26, [x14, x10]\n" - "ssubl v26.8h, v26.8b, v7.8b\n" - "smlal v11.4s, v23.4h, v4.4h\n" - "smlal2 v8.4s, v23.8h, v4.8h\n" - "smlal v10.4s, v28.4h, v4.4h\n" - "smlal2 v9.4s, v28.8h, v4.8h\n" - "ldr d4, [x3, #0x70]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v30.8h, v4.8h\n" + "ldr d30, [x15, x24]\n" + "smlal2 v5.4s, v23.8h, v3.8h\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal v10.4s, v23.4h, v4.4h\n" + "ldr d3, [x23, #0x68]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "ldr d26, [x21, x24]\n" + "ssubl v26.8h, v26.8b, v9.8b\n" + "smlal2 v7.4s, v23.8h, v4.8h\n" "smlal v15.4s, v22.4h, v0.4h\n" - "smlal2 v20.4s, v22.8h, v0.8h\n" - "ldr d22, [x0, x10]\n" - "ssubl v22.8h, v22.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v0.4h\n" - "smlal2 v5.4s, v25.8h, v0.8h\n" - "smlal v11.4s, v31.4h, v0.4h\n" - "smlal2 v8.4s, v31.8h, v0.8h\n" - "smlal v10.4s, v30.4h, v0.4h\n" - "smlal2 v9.4s, v30.8h, v0.8h\n" - "ldr d0, [x3, #0x78]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" + "ldr x21, [x20, #0x118]\n" + "smlal2 v16.4s, v22.8h, v0.8h\n" + "smlal2 v5.4s, v28.8h, v4.8h\n" + "ldr d4, [x23, #0x70]\n" + "ldr d22, [x9, x24]\n" + "smlal v17.4s, v25.4h, v0.4h\n" + "smlal v10.4s, v31.4h, v0.4h\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "ssubl v22.8h, v22.8b, v9.8b\n" + "smlal2 v7.4s, v31.8h, v0.8h\n" "smlal v15.4s, v25.4h, v1.4h\n" - "smlal2 v20.4s, v25.8h, v1.8h\n" - "ldr d25, [x11, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v1.4h\n" - "smlal2 v5.4s, v24.8h, v1.8h\n" - "smlal v11.4s, v30.4h, v1.4h\n" - "smlal2 v8.4s, v30.8h, v1.8h\n" - "smlal v10.4s, v26.4h, v1.4h\n" - "smlal2 v9.4s, v26.8h, v1.8h\n" - "ldr d1, [x3, #0x80]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" + "smlal2 v16.4s, v25.8h, v1.8h\n" + "ldr d25, [x2, x24]\n" + "smlal2 v5.4s, v30.8h, v0.8h\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v1.4h\n" + "smlal v10.4s, v30.4h, v1.4h\n" + "ldr d0, [x23, #0x78]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "smlal2 v7.4s, v30.8h, v1.8h\n" "smlal v15.4s, v24.4h, v2.4h\n" - "smlal2 v20.4s, v24.8h, v2.8h\n" - "ldr d24, [x24, x10]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v2.4h\n" - "smlal2 v5.4s, v27.8h, v2.8h\n" - "smlal v11.4s, v26.4h, v2.4h\n" - "smlal2 v8.4s, v26.8h, v2.8h\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" - "ldr d2, [x3, #0x88]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" + "smlal2 v16.4s, v24.8h, v2.8h\n" + "ldr d24, [x13, x24]\n" + "smlal2 v5.4s, v26.8h, v1.8h\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v2.4h\n" + "smlal v10.4s, v26.4h, v2.4h\n" + "ldr d1, [x23, #0x80]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "smlal2 v7.4s, v26.8h, v2.8h\n" "smlal v15.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "ldr d27, [x15, x10]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v3.4h\n" - "smlal2 v5.4s, v23.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" - "ldr d3, [x3, #0x90]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "ldr d27, [x19, x24]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v23.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "ldr d2, [x23, #0x88]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v23.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" "smlal v15.4s, v23.4h, v4.4h\n" - "smlal2 v20.4s, v23.8h, v4.8h\n" - "ldr d23, [x9, x10]\n" - "ssubl v23.8h, v23.8b, v7.8b\n" - "smlal v18.4s, v28.4h, v4.4h\n" - "smlal2 v5.4s, v28.8h, v4.8h\n" - "ldr d28, [x12, x10]\n" - "ssubl v28.8h, v28.8b, v7.8b\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "smlal v10.4s, v22.4h, v4.4h\n" - "smlal2 v9.4s, v22.8h, v4.8h\n" - "ldr d4, [x3, #0x98]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v23.8h, v4.8h\n" + "ldr d23, [x28, x24]\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "ssubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v28.4h, v4.4h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "ldr d3, [x23, #0x90]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v22.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "ldr d28, [x11, x24]\n" + "ssubl v28.8h, v28.8b, v9.8b\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr d31, [x27, x10]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "smlal v11.4s, v27.4h, v0.4h\n" - "smlal2 v8.4s, v27.8h, v0.8h\n" - "smlal v10.4s, v23.4h, v0.4h\n" - "smlal2 v9.4s, v23.8h, v0.8h\n" - "ldr d0, [x3, #0xa0]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr d31, [x6, x24]\n" + "smlal2 v5.4s, v22.8h, v4.8h\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v27.4h, v0.4h\n" + "ldr d4, [x23, #0x98]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "smlal2 v7.4s, v27.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v26.4h, v1.4h\n" - "smlal2 v5.4s, v26.8h, v1.8h\n" - "smlal v11.4s, v23.4h, v1.4h\n" - "smlal2 v8.4s, v23.8h, v1.8h\n" - "smlal v10.4s, v31.4h, v1.4h\n" - "smlal2 v9.4s, v31.8h, v1.8h\n" - "ldr d1, [x3, #0xa8]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "ldr d30, [x27, x24]\n" + "smlal2 v5.4s, v23.8h, v0.8h\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v1.4h\n" + "smlal v10.4s, v23.4h, v1.4h\n" + "ldr d0, [x23, #0xa0]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v1.4h\n" + "smlal2 v8.4s, v26.8h, v1.8h\n" + "smlal2 v7.4s, v23.8h, v1.8h\n" "smlal v15.4s, v26.4h, v2.4h\n" - "smlal2 v20.4s, v26.8h, v2.8h\n" - "ldr d26, [x7, x10]\n" - "ssubl v26.8h, v26.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "smlal2 v8.4s, v31.8h, v2.8h\n" - "smlal v10.4s, v30.4h, v2.4h\n" - "smlal2 v9.4s, v30.8h, v2.8h\n" - "ldr d2, [x3, #0xb0]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" + "smlal2 v16.4s, v26.8h, v2.8h\n" + "smlal2 v5.4s, v31.8h, v1.8h\n" + "ldr d26, [x17, x24]\n" + "ssubl v26.8h, v26.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v31.4h, v2.4h\n" + "ldr d1, [x23, #0xa8]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal2 v7.4s, v31.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "ldr d25, [x26, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "smlal v11.4s, v30.4h, v3.4h\n" - "smlal2 v8.4s, v30.8h, v3.8h\n" - "smlal v10.4s, v28.4h, v3.4h\n" - "smlal2 v9.4s, v28.8h, v3.8h\n" - "ldr d3, [x3, #0xb8]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v30.8h, v2.8h\n" + "ldr d25, [x5, x24]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v30.4h, v3.4h\n" + "ldr d2, [x23, #0xb0]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal2 v7.4s, v30.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "ldr d24, [x23, x10]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v22.4h, v4.4h\n" - "smlal2 v5.4s, v22.8h, v4.8h\n" - "smlal v11.4s, v28.4h, v4.4h\n" - "smlal2 v8.4s, v28.8h, v4.8h\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0xc0]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "ldr d24, [x25, x24]\n" + "smlal2 v5.4s, v28.8h, v3.8h\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v22.4h, v4.4h\n" + "smlal v10.4s, v28.4h, v4.4h\n" + "ldr d3, [x23, #0xb8]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v7.4s, v28.8h, v4.8h\n" "smlal v15.4s, v27.4h, v0.4h\n" - "smlal2 v20.4s, v27.8h, v0.8h\n" - "ldr d27, [x22, x10]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v0.4h\n" - "smlal2 v5.4s, v23.8h, v0.8h\n" - "smlal v11.4s, v25.4h, v0.4h\n" - "smlal2 v8.4s, v25.8h, v0.8h\n" - "ldr d25, [x20, x10]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v0.4h\n" - "smlal2 v9.4s, v24.8h, v0.8h\n" + "smlal2 v16.4s, v27.8h, v0.8h\n" + "ldr d27, [x26, x24]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v22.8h, v4.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0xc0]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v23.4h, v0.4h\n" + "smlal v10.4s, v25.4h, v0.4h\n" + "smlal v6.4s, v24.4h, v0.4h\n" + "smlal2 v7.4s, v25.8h, v0.8h\n" + "ldr d25, [x12, x24]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "smlal2 v8.4s, v23.8h, v0.8h\n" + "smlal2 v5.4s, v24.8h, v0.8h\n" "smlal v15.4s, v23.4h, v1.4h\n" - "smlal2 v20.4s, v23.8h, v1.8h\n" - "smlal v18.4s, v31.4h, v1.4h\n" - "smlal2 v5.4s, v31.8h, v1.8h\n" - "smlal v11.4s, v24.4h, v1.4h\n" - "smlal2 v8.4s, v24.8h, v1.8h\n" - "ldr d24, [x13, x10]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v1.4h\n" - "smlal2 v9.4s, v27.8h, v1.8h\n" + "smlal v17.4s, v31.4h, v1.4h\n" + "smlal v10.4s, v24.4h, v1.4h\n" + "smlal v6.4s, v27.4h, v1.4h\n" + "smlal2 v7.4s, v24.8h, v1.8h\n" + "ldr d24, [x14, x24]\n" + "smlal2 v16.4s, v23.8h, v1.8h\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "smlal2 v8.4s, v31.8h, v1.8h\n" + "smlal2 v5.4s, v27.8h, v1.8h\n" "smlal v15.4s, v31.4h, v2.4h\n" - "smlal2 v20.4s, v31.8h, v2.8h\n" - "smlal v18.4s, v30.4h, v2.4h\n" - "smlal2 v5.4s, v30.8h, v2.8h\n" - "smlal v11.4s, v27.4h, v2.4h\n" - "smlal2 v8.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x10]\n" - "add x10, x10, #0x8\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" + "smlal v17.4s, v30.4h, v2.4h\n" + "smlal v10.4s, v27.4h, v2.4h\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v7.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x24]\n" + "smlal2 v16.4s, v31.8h, v2.8h\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v30.8h, v2.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x24, x24, #0x8\n" "smlal v15.4s, v30.4h, v3.4h\n" - "smlal2 v20.4s, v30.8h, v3.8h\n" - "smlal v18.4s, v28.4h, v3.4h\n" - "smlal2 v5.4s, v28.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" + "smlal v17.4s, v28.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v16.4s, v30.8h, v3.8h\n" + "smlal2 v8.4s, v28.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" "smlal v15.4s, v28.4h, v4.4h\n" - "smlal2 v20.4s, v28.8h, v4.8h\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "smlal v10.4s, v27.4h, v4.4h\n" - "smlal2 v9.4s, v27.8h, v4.8h\n" - "sqrdmulh v15.4s, v15.4s, v6.4s\n" - "sqrdmulh v20.4s, v20.4s, v17.4s\n" - "sqrdmulh v18.4s, v18.4s, v6.4s\n" - "sqrdmulh v5.4s, v5.4s, v17.4s\n" - "and v1.16b, v15.16b, v21.16b\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "and v29.16b, v20.16b, v14.16b\n" - "and v3.16b, v18.16b, v21.16b\n" - "sshr v29.4s, v29.4s, #0x1f\n" - "and v2.16b, v5.16b, v14.16b\n" - "sqrdmulh v11.4s, v11.4s, v6.4s\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v8.4s, v8.4s, v17.4s\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "sqdmulh v15.4s, v15.4s, v12.4s\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal v6.4s, v27.4h, v4.4h\n" + "sqdmulh v17.4s, v17.4s, v12.4s\n" + "smlal2 v16.4s, v28.8h, v4.8h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "sqdmulh v10.4s, v10.4s, v12.4s\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "smlal2 v5.4s, v27.8h, v4.8h\n" + "sqdmulh v6.4s, v6.4s, v12.4s\n" + "and v23.16b, v15.16b, v19.16b\n" + "sqdmulh v16.4s, v16.4s, v20.4s\n" + "and v22.16b, v17.16b, v19.16b\n" + "sqdmulh v8.4s, v8.4s, v20.4s\n" + "and v21.16b, v10.16b, v19.16b\n" + "sqdmulh v7.4s, v7.4s, v20.4s\n" + "and v26.16b, v6.16b, v19.16b\n" + "sqdmulh v5.4s, v5.4s, v20.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v4.16b, v16.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v2.16b, v8.16b, v29.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v7.16b, v29.16b\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "and v25.16b, v5.16b, v29.16b\n" + "sqadd v15.4s, v15.4s, v23.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v17.4s, v17.4s, v22.4s\n" "sshr v2.4s, v2.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v1.4s\n" - "sqrdmulh v10.4s, v10.4s, v6.4s\n" - "and v0.16b, v11.16b, v21.16b\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v21.4s\n" - "sqadd v20.4s, v20.4s, v29.4s\n" - "sqadd v18.4s, v18.4s, v3.4s\n" - "sqadd v5.4s, v5.4s, v2.4s\n" - "and v27.16b, v8.16b, v14.16b\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "add v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v14.4s\n" - "srshl v18.4s, v18.4s, v21.4s\n" - "srshl v5.4s, v5.4s, v14.4s\n" - "smin v15.4s, v15.4s, v12.4s\n" - "add v20.4s, v20.4s, v19.4s\n" - "add v18.4s, v18.4s, v19.4s\n" - "smax v15.4s, v15.4s, v16.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smin v18.4s, v18.4s, v12.4s\n" - "add v5.4s, v5.4s, v19.4s\n" - "smax v20.4s, v20.4s, v16.4s\n" - "smax v18.4s, v18.4s, v16.4s\n" - "smin v5.4s, v5.4s, v12.4s\n" - "uzp1 v15.16b, v15.16b, v20.16b\n" - "sqadd v11.4s, v11.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v6.4s, v6.4s, v26.4s\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v17.4s, v17.4s, v19.4s\n" + "sqadd v8.4s, v8.4s, v2.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "sqadd v7.4s, v7.4s, v3.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "sqadd v5.4s, v5.4s, v25.4s\n" + "srshl v16.4s, v16.4s, v29.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v17.4h, v17.4s\n" + "srshl v7.4s, v7.4s, v29.4s\n" + "sqxtn v10.4h, v10.4s\n" + "srshl v5.4s, v5.4s, v29.4s\n" + "sqxtn v6.4h, v6.4s\n" + "sqxtn2 v15.8h, v16.4s\n" + "sqxtn2 v17.8h, v8.4s\n" + "sqxtn2 v10.8h, v7.4s\n" + "sqxtn2 v6.8h, v5.4s\n" + "sqadd v15.8h, v15.8h, v18.8h\n" + "sqadd v17.8h, v17.8h, v18.8h\n" + "sqadd v10.8h, v10.8h, v18.8h\n" + "sqadd v6.8h, v6.8h, v18.8h\n" + "smax v15.8h, v15.8h, v11.8h\n" + "smax v17.8h, v17.8h, v11.8h\n" + "smax v10.8h, v10.8h, v11.8h\n" + "smax v6.8h, v6.8h, v11.8h\n" + "smin v15.8h, v15.8h, v13.8h\n" + "smin v17.8h, v17.8h, v13.8h\n" + "smin v10.8h, v10.8h, v13.8h\n" + "smin v6.8h, v6.8h, v13.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x17, x1]\n" - "smax v5.4s, v5.4s, v16.4s\n" - "sqadd v8.4s, v8.4s, v27.4s\n" - "srshl v11.4s, v11.4s, v21.4s\n" - "and v30.16b, v10.16b, v21.16b\n" - "sshr v30.4s, v30.4s, #0x1f\n" - "uzp1 v18.16b, v18.16b, v5.16b\n" - "add v11.4s, v11.4s, v19.4s\n" - "srshl v8.4s, v8.4s, v14.4s\n" - "uzp1 v18.16b, v18.16b, v18.16b\n" - "str d18, [x16, x1]\n" - "smin v11.4s, v11.4s, v12.4s\n" - "sqrdmulh v9.4s, v9.4s, v17.4s\n" - "add v8.4s, v8.4s, v19.4s\n" - "sqadd v10.4s, v10.4s, v30.4s\n" - "smax v11.4s, v11.4s, v16.4s\n" - "smin v8.4s, v8.4s, v12.4s\n" - "and v6.16b, v9.16b, v14.16b\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "smax v8.4s, v8.4s, v16.4s\n" - "srshl v10.4s, v10.4s, v21.4s\n" - "uzp1 v11.16b, v11.16b, v8.16b\n" - "add v10.4s, v10.4s, v19.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "str d11, [x6, x1]\n" - "smin v10.4s, v10.4s, v12.4s\n" - "sqadd v9.4s, v9.4s, v6.4s\n" - "smax v10.4s, v10.4s, v16.4s\n" - "srshl v9.4s, v9.4s, v14.4s\n" - "add v9.4s, v9.4s, v19.4s\n" - "smin v9.4s, v9.4s, v12.4s\n" - "smax v9.4s, v9.4s, v16.4s\n" - "uzp1 v10.16b, v10.16b, v9.16b\n" + "uzp1 v17.16b, v17.16b, v17.16b\n" + "str d15, [x16, x22]\n" "uzp1 v10.16b, v10.16b, v10.16b\n" - "str d10, [x8, x1]\n" - "add x1, x1, #0x8\n" + "uzp1 v6.16b, v6.16b, v6.16b\n" + "str d17, [x8, x22]\n" + "str d10, [x4, x22]\n" + "str d6, [x7, x22]\n" + "add x22, x22, #0x8\n" "beq 124f\n" - "add x3, x3, #0xc8\n" + "add x23, x23, #0xc8\n" "3:" // Oddments - "ldr x12, [%x[params], %[offsetof_Params_bias]]\n" - "tbz x4, #2, 5f\n" - "ld1 { v15.4s }, [x12], #0x10\n" - "tbz x4, #1, 4f\n" - "ld1 { v20.d }[0], [x12], #0x8\n" - "tbz x4, #0, 7f\n" - "ld1 { v20.s }[2], [x12]\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "tbz x0, #2, 5f\n" + "ld1 { v15.4s }, [x19], #0x10\n" + "tbz x0, #1, 4f\n" + "ld1 { v16.d }[0], [x19], #0x8\n" + "tbz x0, #0, 7f\n" + "ld1 { v16.s }[2], [x19]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset - "tbz x4, #0, 7f\n" - "ld1 { v20.s }[0], [x12]\n" + "tbz x0, #0, 7f\n" + "ld1 { v16.s }[0], [x19]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset - "tbz x4, #1, 6f\n" - "ld1 { v15.d }[0], [x12], #0x8\n" - "tbz x4, #0, 7f\n" - "ld1 { v15.s }[2], [x12]\n" + "tbz x0, #1, 6f\n" + "ld1 { v15.d }[0], [x19], #0x8\n" + "tbz x0, #0, 7f\n" + "ld1 { v15.s }[2], [x19]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 7f\n" - "ld1 { v15.s }[0], [x12]\n" + "tbz x0, #0, 7f\n" + "ld1 { v15.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v18.16b, v15.16b\n" - "ldr d0, [x3, #0x0]\n" - "mov v5.16b, v20.16b\n" - "ldr d1, [x3, #0x8]\n" - "mov v11.16b, v15.16b\n" - "ldr d2, [x3, #0x10]\n" - "mov v8.16b, v20.16b\n" - "ldr d3, [x3, #0x18]\n" + "ldr d0, [x23, #0x0]\n" + "ldr d1, [x23, #0x8]\n" + "mov v17.16b, v15.16b\n" + "mov v8.16b, v16.16b\n" + "ldr d2, [x23, #0x10]\n" + "ldr d3, [x23, #0x18]\n" "mov v10.16b, v15.16b\n" - "ldr d4, [x3, #0x20]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" - "mov v9.16b, v20.16b\n" - "ldp x28, x27, [x25, #0x0]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" - "ldp x26, x13, [x25, #0x10]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" - "ssubl v3.8h, v3.8b, v13.8b\n" - "ldp x24, x23, [x25, #0x20]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" - "ldp x22, x21, [x25, #0x30]\n" - "ldp x20, x0, [x25, #0x40]\n" - "add x28, x28, x10\n" - "add x27, x27, x10\n" - "add x26, x26, x10\n" - "add x13, x13, x10\n" - "add x24, x24, x10\n" - "add x23, x23, x10\n" - "add x22, x22, x10\n" - "add x21, x21, x10\n" - "add x20, x20, x10\n" - "add x0, x0, x10\n" - "tbz x4, #2, 9f\n" + "mov v7.16b, v16.16b\n" + "ldr d4, [x23, #0x20]\n" + "ldp x28, x6, [x20, #0x0]\n" + "mov v6.16b, v15.16b\n" + "mov v5.16b, v16.16b\n" + "ldp x26, x25, [x20, #0x10]\n" + "ldp x5, x2, [x20, #0x20]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "ldp x27, x21, [x20, #0x30]\n" + "ldp x12, x19, [x20, #0x40]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "add x28, x28, x24\n" + "add x6, x6, x24\n" + "add x26, x26, x24\n" + "add x25, x25, x24\n" + "add x5, x5, x24\n" + "add x2, x2, x24\n" + "add x27, x27, x24\n" + "add x21, x21, x24\n" + "add x12, x12, x24\n" + "add x19, x19, x24\n" + "tbz x0, #2, 9f\n" "ld1 { v31.s }[0], [x28], #0x4\n" - "ld1 { v30.s }[0], [x27], #0x4\n" + "ld1 { v30.s }[0], [x6], #0x4\n" "ld1 { v29.s }[0], [x26], #0x4\n" - "ld1 { v28.s }[0], [x13], #0x4\n" - "ld1 { v27.s }[0], [x24], #0x4\n" - "ld1 { v23.s }[0], [x23], #0x4\n" - "ld1 { v25.s }[0], [x22], #0x4\n" + "ld1 { v28.s }[0], [x25], #0x4\n" + "ld1 { v27.s }[0], [x5], #0x4\n" + "ld1 { v23.s }[0], [x2], #0x4\n" + "ld1 { v25.s }[0], [x27], #0x4\n" "ld1 { v24.s }[0], [x21], #0x4\n" - "ld1 { v26.s }[0], [x20], #0x4\n" - "ld1 { v22.s }[0], [x0], #0x4\n" - "tbz x4, #1, 8f\n" + "ld1 { v26.s }[0], [x12], #0x4\n" + "ld1 { v22.s }[0], [x19], #0x4\n" + "tbz x0, #1, 8f\n" "ld1 { v31.h }[2], [x28], #0x2\n" - "ld1 { v30.h }[2], [x27], #0x2\n" + "ld1 { v30.h }[2], [x6], #0x2\n" "ld1 { v29.h }[2], [x26], #0x2\n" - "ld1 { v28.h }[2], [x13], #0x2\n" - "ld1 { v27.h }[2], [x24], #0x2\n" - "ld1 { v23.h }[2], [x23], #0x2\n" - "ld1 { v25.h }[2], [x22], #0x2\n" + "ld1 { v28.h }[2], [x25], #0x2\n" + "ld1 { v27.h }[2], [x5], #0x2\n" + "ld1 { v23.h }[2], [x2], #0x2\n" + "ld1 { v25.h }[2], [x27], #0x2\n" "ld1 { v24.h }[2], [x21], #0x2\n" - "ld1 { v26.h }[2], [x20], #0x2\n" - "ld1 { v22.h }[2], [x0], #0x2\n" - "tbz x4, #0, 11f\n" + "ld1 { v26.h }[2], [x12], #0x2\n" + "ld1 { v22.h }[2], [x19], #0x2\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[6], [x28]\n" - "ld1 { v30.b }[6], [x27]\n" + "ld1 { v30.b }[6], [x6]\n" "ld1 { v29.b }[6], [x26]\n" - "ld1 { v28.b }[6], [x13]\n" - "ld1 { v27.b }[6], [x24]\n" - "ld1 { v23.b }[6], [x23]\n" - "ld1 { v25.b }[6], [x22]\n" + "ld1 { v28.b }[6], [x25]\n" + "ld1 { v27.b }[6], [x5]\n" + "ld1 { v23.b }[6], [x2]\n" + "ld1 { v25.b }[6], [x27]\n" "ld1 { v24.b }[6], [x21]\n" - "ld1 { v26.b }[6], [x20]\n" - "ld1 { v22.b }[6], [x0]\n" + "ld1 { v26.b }[6], [x12]\n" + "ld1 { v22.b }[6], [x19]\n" "b 11f\n" "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[4], [x28]\n" - "ld1 { v30.b }[4], [x27]\n" + "ld1 { v30.b }[4], [x6]\n" "ld1 { v29.b }[4], [x26]\n" - "ld1 { v28.b }[4], [x13]\n" - "ld1 { v27.b }[4], [x24]\n" - "ld1 { v23.b }[4], [x23]\n" - "ld1 { v25.b }[4], [x22]\n" + "ld1 { v28.b }[4], [x25]\n" + "ld1 { v27.b }[4], [x5]\n" + "ld1 { v23.b }[4], [x2]\n" + "ld1 { v25.b }[4], [x27]\n" "ld1 { v24.b }[4], [x21]\n" - "ld1 { v26.b }[4], [x20]\n" - "ld1 { v22.b }[4], [x0]\n" + "ld1 { v26.b }[4], [x12]\n" + "ld1 { v22.b }[4], [x19]\n" "b 11f\n" "9:" // Oddments: Initial loads: Bit 2: Unset - "tbz x4, #1, 10f\n" + "tbz x0, #1, 10f\n" "ld1 { v31.h }[0], [x28], #0x2\n" - "ld1 { v30.h }[0], [x27], #0x2\n" + "ld1 { v30.h }[0], [x6], #0x2\n" "ld1 { v29.h }[0], [x26], #0x2\n" - "ld1 { v28.h }[0], [x13], #0x2\n" - "ld1 { v27.h }[0], [x24], #0x2\n" - "ld1 { v23.h }[0], [x23], #0x2\n" - "ld1 { v25.h }[0], [x22], #0x2\n" + "ld1 { v28.h }[0], [x25], #0x2\n" + "ld1 { v27.h }[0], [x5], #0x2\n" + "ld1 { v23.h }[0], [x2], #0x2\n" + "ld1 { v25.h }[0], [x27], #0x2\n" "ld1 { v24.h }[0], [x21], #0x2\n" - "ld1 { v26.h }[0], [x20], #0x2\n" - "ld1 { v22.h }[0], [x0], #0x2\n" - "tbz x4, #0, 11f\n" + "ld1 { v26.h }[0], [x12], #0x2\n" + "ld1 { v22.h }[0], [x19], #0x2\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[2], [x28]\n" - "ld1 { v30.b }[2], [x27]\n" + "ld1 { v30.b }[2], [x6]\n" "ld1 { v29.b }[2], [x26]\n" - "ld1 { v28.b }[2], [x13]\n" - "ld1 { v27.b }[2], [x24]\n" - "ld1 { v23.b }[2], [x23]\n" - "ld1 { v25.b }[2], [x22]\n" + "ld1 { v28.b }[2], [x25]\n" + "ld1 { v27.b }[2], [x5]\n" + "ld1 { v23.b }[2], [x2]\n" + "ld1 { v25.b }[2], [x27]\n" "ld1 { v24.b }[2], [x21]\n" - "ld1 { v26.b }[2], [x20]\n" - "ld1 { v22.b }[2], [x0]\n" + "ld1 { v26.b }[2], [x12]\n" + "ld1 { v22.b }[2], [x19]\n" "b 11f\n" "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[0], [x28]\n" - "ld1 { v30.b }[0], [x27]\n" + "ld1 { v30.b }[0], [x6]\n" "ld1 { v29.b }[0], [x26]\n" - "ld1 { v28.b }[0], [x13]\n" - "ld1 { v27.b }[0], [x24]\n" - "ld1 { v23.b }[0], [x23]\n" - "ld1 { v25.b }[0], [x22]\n" + "ld1 { v28.b }[0], [x25]\n" + "ld1 { v27.b }[0], [x5]\n" + "ld1 { v23.b }[0], [x2]\n" + "ld1 { v25.b }[0], [x27]\n" "ld1 { v24.b }[0], [x21]\n" - "ld1 { v26.b }[0], [x20]\n" - "ld1 { v22.b }[0], [x0]\n" + "ld1 { v26.b }[0], [x12]\n" + "ld1 { v22.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End - "ldr x20, [x25, #0x50]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "ssubl v30.8h, v30.8b, v9.8b\n" "smlal v15.4s, v31.4h, v0.4h\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ssubl v29.8h, v29.8b, v7.8b\n" - "ssubl v28.8h, v28.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "ssubl v23.8h, v23.8b, v7.8b\n" - "smlal v11.4s, v29.4h, v0.4h\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal2 v8.4s, v29.8h, v0.8h\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v28.4h, v0.4h\n" - "ssubl v26.8h, v26.8b, v7.8b\n" - "smlal2 v9.4s, v28.8h, v0.8h\n" - "ssubl v22.8h, v22.8b, v7.8b\n" + "ldr x19, [x20, #0x50]\n" + "ssubl v29.8h, v29.8b, v9.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "smlal v10.4s, v29.4h, v0.4h\n" + "ssubl v28.8h, v28.8b, v9.8b\n" + "add x19, x19, x24\n" + "smlal2 v7.4s, v29.8h, v0.8h\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal v6.4s, v28.4h, v0.4h\n" + "smlal2 v5.4s, v28.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "add x20, x20, x10\n" - "smlal v18.4s, v27.4h, v1.4h\n" - "smlal2 v5.4s, v27.8h, v1.8h\n" - "smlal v11.4s, v28.4h, v1.4h\n" - "smlal2 v8.4s, v28.8h, v1.8h\n" - "smlal v10.4s, v23.4h, v1.4h\n" - "smlal2 v9.4s, v23.8h, v1.8h\n" + "ssubl v23.8h, v23.8b, v9.8b\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal v17.4s, v27.4h, v1.4h\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "smlal2 v8.4s, v27.8h, v1.8h\n" + "smlal v10.4s, v28.4h, v1.4h\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "smlal2 v7.4s, v28.8h, v1.8h\n" + "ssubl v26.8h, v26.8b, v9.8b\n" + "smlal v6.4s, v23.4h, v1.4h\n" + "ssubl v22.8h, v22.8b, v9.8b\n" + "smlal2 v5.4s, v23.8h, v1.8h\n" "smlal v15.4s, v27.4h, v2.4h\n" - "smlal2 v20.4s, v27.8h, v2.8h\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "smlal v11.4s, v23.4h, v2.4h\n" - "smlal2 v8.4s, v23.8h, v2.8h\n" - "tbz x4, #2, 13f\n" - "ld1 { v31.s }[0], [x20], #0x4\n" - "tbz x4, #1, 12f\n" - "ld1 { v31.h }[2], [x20], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[6], [x20]\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal v10.4s, v23.4h, v2.4h\n" + "smlal2 v7.4s, v23.8h, v2.8h\n" + "tbz x0, #2, 13f\n" + "ld1 { v31.s }[0], [x19], #0x4\n" + "tbz x0, #1, 12f\n" + "ld1 { v31.h }[2], [x19], #0x2\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[6], [x19]\n" "b 15f\n" "12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[4], [x20]\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[4], [x19]\n" "b 15f\n" "13:" // Oddments: Load (1, 3): Bit 2: Unset - "tbz x4, #1, 14f\n" - "ld1 { v31.h }[0], [x20], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[2], [x20]\n" + "tbz x0, #1, 14f\n" + "ld1 { v31.h }[0], [x19], #0x2\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[2], [x19]\n" "b 15f\n" "14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[0], [x20]\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[0], [x19]\n" "15:" // Oddments: Load (1, 3): Bit 2: End + "ssubl v31.8h, v31.8b, v9.8b\n" + "ldr x15, [x20, #0x58]\n" + "smlal v6.4s, v31.4h, v2.4h\n" + "smlal2 v5.4s, v31.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "ldr x28, [x25, #0x58]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "add x28, x28, x10\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "smlal v10.4s, v31.4h, v2.4h\n" - "smlal2 v9.4s, v31.8h, v2.8h\n" - "smlal v11.4s, v31.4h, v3.4h\n" - "smlal2 v8.4s, v31.8h, v3.8h\n" - "tbz x4, #2, 17f\n" - "ld1 { v30.s }[0], [x28], #0x4\n" - "tbz x4, #1, 16f\n" - "ld1 { v30.h }[2], [x28], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[6], [x28]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "add x15, x15, x24\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal v10.4s, v31.4h, v3.4h\n" + "smlal2 v7.4s, v31.8h, v3.8h\n" + "tbz x0, #2, 17f\n" + "ld1 { v30.s }[0], [x15], #0x4\n" + "tbz x0, #1, 16f\n" + "ld1 { v30.h }[2], [x15], #0x2\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[6], [x15]\n" "b 19f\n" "16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[4], [x28]\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[4], [x15]\n" "b 19f\n" "17:" // Oddments: Load (1, 4): Bit 2: Unset - "tbz x4, #1, 18f\n" - "ld1 { v30.h }[0], [x28], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[2], [x28]\n" + "tbz x0, #1, 18f\n" + "ld1 { v30.h }[0], [x15], #0x2\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[2], [x15]\n" "b 19f\n" "18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[0], [x28]\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[0], [x15]\n" "19:" // Oddments: Load (1, 4): Bit 2: End + "ssubl v30.8h, v30.8b, v9.8b\n" + "ldr x19, [x20, #0x60]\n" + "smlal v6.4s, v30.4h, v3.4h\n" + "smlal2 v5.4s, v30.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "ldr x0, [x25, #0x60]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "add x0, x0, x10\n" - "smlal v10.4s, v30.4h, v3.4h\n" - "smlal2 v9.4s, v30.8h, v3.8h\n" - "tbz x4, #2, 21f\n" - "ld1 { v27.s }[0], [x0], #0x4\n" - "tbz x4, #1, 20f\n" - "ld1 { v27.h }[2], [x0], #0x2\n" - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[6], [x0]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "add x19, x19, x24\n" + "tbz x0, #2, 21f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x0, #1, 20f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[6], [x19]\n" "b 23f\n" "20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[4], [x0]\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[4], [x19]\n" "b 23f\n" "21:" // Oddments: Load (0, 5): Bit 2: Unset - "tbz x4, #1, 22f\n" - "ld1 { v27.h }[0], [x0], #0x2\n" - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[2], [x0]\n" + "tbz x0, #1, 22f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[2], [x19]\n" "b 23f\n" "22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[0], [x0]\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[0], [x19]\n" "23:" // Oddments: Load (0, 5): Bit 2: End - "smlal v11.4s, v30.4h, v4.4h\n" - "ldr d0, [x3, #0x28]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v8.4s, v30.8h, v4.8h\n" - "ldr x7, [x25, #0x68]\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "ssubl v0.8h, v0.8b, v13.8b\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "add x7, x7, x10\n" - "smlal v18.4s, v27.4h, v4.4h\n" - "smlal2 v5.4s, v27.8h, v4.8h\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "ldr d0, [x23, #0x28]\n" + "smlal v17.4s, v27.4h, v4.4h\n" + "smlal2 v8.4s, v27.8h, v4.8h\n" + "smlal v10.4s, v30.4h, v4.4h\n" + "smlal2 v7.4s, v30.8h, v4.8h\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "ldr x27, [x20, #0x68]\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "add x27, x27, x24\n" "smlal v15.4s, v29.4h, v0.4h\n" - "smlal2 v20.4s, v29.8h, v0.8h\n" - "smlal v18.4s, v28.4h, v0.4h\n" - "smlal2 v5.4s, v28.8h, v0.8h\n" - "smlal v11.4s, v22.4h, v0.4h\n" - "smlal2 v8.4s, v22.8h, v0.8h\n" - "tbz x4, #2, 25f\n" - "ld1 { v25.s }[0], [x7], #0x4\n" - "tbz x4, #1, 24f\n" - "ld1 { v25.h }[2], [x7], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[6], [x7]\n" + "smlal2 v16.4s, v29.8h, v0.8h\n" + "smlal v17.4s, v28.4h, v0.4h\n" + "smlal2 v8.4s, v28.8h, v0.8h\n" + "smlal v10.4s, v22.4h, v0.4h\n" + "smlal2 v7.4s, v22.8h, v0.8h\n" + "tbz x0, #2, 25f\n" + "ld1 { v25.s }[0], [x27], #0x4\n" + "tbz x0, #1, 24f\n" + "ld1 { v25.h }[2], [x27], #0x2\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[6], [x27]\n" "b 27f\n" "24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[4], [x7]\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[4], [x27]\n" "b 27f\n" "25:" // Oddments: Load (2, 1): Bit 2: Unset - "tbz x4, #1, 26f\n" - "ld1 { v25.h }[0], [x7], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[2], [x7]\n" + "tbz x0, #1, 26f\n" + "ld1 { v25.h }[0], [x27], #0x2\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[2], [x27]\n" "b 27f\n" "26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[0], [x7]\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[0], [x27]\n" "27:" // Oddments: Load (2, 1): Bit 2: End - "ldr d1, [x3, #0x30]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v25.4h, v0.4h\n" - "ldr x26, [x25, #0x70]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" - "smlal2 v9.4s, v25.8h, v0.8h\n" - "add x26, x26, x10\n" + "ldr d1, [x23, #0x30]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "ldr x5, [x20, #0x70]\n" + "smlal v6.4s, v25.4h, v0.4h\n" + "smlal2 v5.4s, v25.8h, v0.8h\n" + "add x5, x5, x24\n" "smlal v15.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "smlal v18.4s, v23.4h, v1.4h\n" - "smlal2 v5.4s, v23.8h, v1.8h\n" - "smlal v11.4s, v25.4h, v1.4h\n" - "smlal2 v8.4s, v25.8h, v1.8h\n" - "tbz x4, #2, 29f\n" - "ld1 { v24.s }[0], [x26], #0x4\n" - "tbz x4, #1, 28f\n" - "ld1 { v24.h }[2], [x26], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[6], [x26]\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal v17.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "smlal v10.4s, v25.4h, v1.4h\n" + "smlal2 v7.4s, v25.8h, v1.8h\n" + "tbz x0, #2, 29f\n" + "ld1 { v24.s }[0], [x5], #0x4\n" + "tbz x0, #1, 28f\n" + "ld1 { v24.h }[2], [x5], #0x2\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[6], [x5]\n" "b 31f\n" "28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[4], [x26]\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[4], [x5]\n" "b 31f\n" "29:" // Oddments: Load (2, 2): Bit 2: Unset - "tbz x4, #1, 30f\n" - "ld1 { v24.h }[0], [x26], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[2], [x26]\n" + "tbz x0, #1, 30f\n" + "ld1 { v24.h }[0], [x5], #0x2\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[2], [x5]\n" "b 31f\n" "30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[0], [x26]\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[0], [x5]\n" "31:" // Oddments: Load (2, 2): Bit 2: End - "ldr d2, [x3, #0x38]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v1.4h\n" - "ldr x23, [x25, #0x78]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" - "smlal2 v9.4s, v24.8h, v1.8h\n" - "add x23, x23, x10\n" + "ldr d2, [x23, #0x38]\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "ldr x11, [x20, #0x78]\n" + "smlal v6.4s, v24.4h, v1.4h\n" + "smlal2 v5.4s, v24.8h, v1.8h\n" + "add x11, x11, x24\n" "smlal v15.4s, v23.4h, v2.4h\n" - "smlal2 v20.4s, v23.8h, v2.8h\n" - "smlal v18.4s, v31.4h, v2.4h\n" - "smlal2 v5.4s, v31.8h, v2.8h\n" - "smlal v11.4s, v24.4h, v2.4h\n" - "smlal2 v8.4s, v24.8h, v2.8h\n" - "tbz x4, #2, 33f\n" - "ld1 { v27.s }[0], [x23], #0x4\n" - "tbz x4, #1, 32f\n" - "ld1 { v27.h }[2], [x23], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[6], [x23]\n" + "smlal2 v16.4s, v23.8h, v2.8h\n" + "smlal v17.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "smlal v10.4s, v24.4h, v2.4h\n" + "smlal2 v7.4s, v24.8h, v2.8h\n" + "tbz x0, #2, 33f\n" + "ld1 { v27.s }[0], [x11], #0x4\n" + "tbz x0, #1, 32f\n" + "ld1 { v27.h }[2], [x11], #0x2\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[6], [x11]\n" "b 35f\n" "32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[4], [x23]\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[4], [x11]\n" "b 35f\n" "33:" // Oddments: Load (2, 3): Bit 2: Unset - "tbz x4, #1, 34f\n" - "ld1 { v27.h }[0], [x23], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[2], [x23]\n" + "tbz x0, #1, 34f\n" + "ld1 { v27.h }[0], [x11], #0x2\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[2], [x11]\n" "b 35f\n" "34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[0], [x23]\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[0], [x11]\n" "35:" // Oddments: Load (2, 3): Bit 2: End - "ldr d3, [x3, #0x40]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v2.4h\n" - "ldr x20, [x25, #0x80]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" - "smlal2 v9.4s, v27.8h, v2.8h\n" - "add x20, x20, x10\n" + "ldr d3, [x23, #0x40]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "ldr x12, [x20, #0x80]\n" + "smlal v6.4s, v27.4h, v2.4h\n" + "smlal2 v5.4s, v27.8h, v2.8h\n" + "add x12, x12, x24\n" "smlal v15.4s, v31.4h, v3.4h\n" - "smlal2 v20.4s, v31.8h, v3.8h\n" - "smlal v18.4s, v30.4h, v3.4h\n" - "smlal2 v5.4s, v30.8h, v3.8h\n" - "smlal v11.4s, v27.4h, v3.4h\n" - "smlal2 v8.4s, v27.8h, v3.8h\n" - "tbz x4, #2, 37f\n" - "ld1 { v23.s }[0], [x20], #0x4\n" - "tbz x4, #1, 36f\n" - "ld1 { v23.h }[2], [x20], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[6], [x20]\n" + "smlal2 v16.4s, v31.8h, v3.8h\n" + "smlal v17.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "smlal v10.4s, v27.4h, v3.4h\n" + "smlal2 v7.4s, v27.8h, v3.8h\n" + "tbz x0, #2, 37f\n" + "ld1 { v23.s }[0], [x12], #0x4\n" + "tbz x0, #1, 36f\n" + "ld1 { v23.h }[2], [x12], #0x2\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[6], [x12]\n" "b 39f\n" "36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[4], [x20]\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[4], [x12]\n" "b 39f\n" "37:" // Oddments: Load (2, 4): Bit 2: Unset - "tbz x4, #1, 38f\n" - "ld1 { v23.h }[0], [x20], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[2], [x20]\n" + "tbz x0, #1, 38f\n" + "ld1 { v23.h }[0], [x12], #0x2\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[2], [x12]\n" "b 39f\n" "38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[0], [x20]\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[0], [x12]\n" "39:" // Oddments: Load (2, 4): Bit 2: End - "ldr d4, [x3, #0x48]\n" - "ssubl v23.8h, v23.8b, v7.8b\n" - "smlal v10.4s, v23.4h, v3.4h\n" - "ldr x22, [x25, #0x88]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v9.4s, v23.8h, v3.8h\n" - "add x22, x22, x10\n" + "ldr d4, [x23, #0x48]\n" + "ssubl v23.8h, v23.8b, v9.8b\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "ldr x26, [x20, #0x88]\n" + "smlal v6.4s, v23.4h, v3.4h\n" + "smlal2 v5.4s, v23.8h, v3.8h\n" + "add x26, x26, x24\n" "smlal v15.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "smlal v11.4s, v23.4h, v4.4h\n" - "smlal2 v8.4s, v23.8h, v4.8h\n" - "tbz x4, #2, 41f\n" - "ld1 { v28.s }[0], [x22], #0x4\n" - "tbz x4, #1, 40f\n" - "ld1 { v28.h }[2], [x22], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[6], [x22]\n" + "smlal2 v16.4s, v30.8h, v4.8h\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "smlal v10.4s, v23.4h, v4.4h\n" + "smlal2 v7.4s, v23.8h, v4.8h\n" + "tbz x0, #2, 41f\n" + "ld1 { v28.s }[0], [x26], #0x4\n" + "tbz x0, #1, 40f\n" + "ld1 { v28.h }[2], [x26], #0x2\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[6], [x26]\n" "b 43f\n" "40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[4], [x22]\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[4], [x26]\n" "b 43f\n" "41:" // Oddments: Load (2, 5): Bit 2: Unset - "tbz x4, #1, 42f\n" - "ld1 { v28.h }[0], [x22], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[2], [x22]\n" + "tbz x0, #1, 42f\n" + "ld1 { v28.h }[0], [x26], #0x2\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[2], [x26]\n" "b 43f\n" "42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[0], [x22]\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[0], [x26]\n" "43:" // Oddments: Load (2, 5): Bit 2: End - "ldr d0, [x3, #0x50]\n" - "ssubl v28.8h, v28.8b, v7.8b\n" - "smlal v10.4s, v28.4h, v4.4h\n" - "ldr x13, [x25, #0x90]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" - "smlal2 v9.4s, v28.8h, v4.8h\n" - "add x13, x13, x10\n" + "ldr d0, [x23, #0x50]\n" + "ssubl v28.8h, v28.8b, v9.8b\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "ldr x14, [x20, #0x90]\n" + "smlal v6.4s, v28.4h, v4.4h\n" + "smlal2 v5.4s, v28.8h, v4.8h\n" + "add x14, x14, x24\n" "smlal v15.4s, v22.4h, v0.4h\n" - "smlal2 v20.4s, v22.8h, v0.8h\n" - "smlal v18.4s, v25.4h, v0.4h\n" - "smlal2 v5.4s, v25.8h, v0.8h\n" - "tbz x4, #2, 45f\n" - "ld1 { v31.s }[0], [x13], #0x4\n" - "tbz x4, #1, 44f\n" - "ld1 { v31.h }[2], [x13], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[6], [x13]\n" + "smlal2 v16.4s, v22.8h, v0.8h\n" + "smlal v17.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "tbz x0, #2, 45f\n" + "ld1 { v31.s }[0], [x14], #0x4\n" + "tbz x0, #1, 44f\n" + "ld1 { v31.h }[2], [x14], #0x2\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[6], [x14]\n" "b 47f\n" "44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[4], [x13]\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[4], [x14]\n" "b 47f\n" "45:" // Oddments: Load (3, 0): Bit 2: Unset - "tbz x4, #1, 46f\n" - "ld1 { v31.h }[0], [x13], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[2], [x13]\n" + "tbz x0, #1, 46f\n" + "ld1 { v31.h }[0], [x14], #0x2\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[2], [x14]\n" "b 47f\n" "46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[0], [x13]\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[0], [x14]\n" "47:" // Oddments: Load (3, 0): Bit 2: End - "ldr x21, [x25, #0x98]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "smlal v11.4s, v31.4h, v0.4h\n" - "smlal2 v8.4s, v31.8h, v0.8h\n" - "add x21, x21, x10\n" - "tbz x4, #2, 49f\n" - "ld1 { v30.s }[0], [x21], #0x4\n" - "tbz x4, #1, 48f\n" - "ld1 { v30.h }[2], [x21], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[6], [x21]\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "ldr x15, [x20, #0x98]\n" + "smlal v10.4s, v31.4h, v0.4h\n" + "smlal2 v7.4s, v31.8h, v0.8h\n" + "add x15, x15, x24\n" + "tbz x0, #2, 49f\n" + "ld1 { v30.s }[0], [x15], #0x4\n" + "tbz x0, #1, 48f\n" + "ld1 { v30.h }[2], [x15], #0x2\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[6], [x15]\n" "b 51f\n" "48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[4], [x21]\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[4], [x15]\n" "b 51f\n" "49:" // Oddments: Load (3, 1): Bit 2: Unset - "tbz x4, #1, 50f\n" - "ld1 { v30.h }[0], [x21], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[2], [x21]\n" + "tbz x0, #1, 50f\n" + "ld1 { v30.h }[0], [x15], #0x2\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[2], [x15]\n" "b 51f\n" "50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[0], [x21]\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[0], [x15]\n" "51:" // Oddments: Load (3, 1): Bit 2: End - "ldr d1, [x3, #0x58]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal v10.4s, v30.4h, v0.4h\n" - "ldr x14, [x25, #0xa0]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" - "smlal2 v9.4s, v30.8h, v0.8h\n" - "add x14, x14, x10\n" + "ldr d1, [x23, #0x58]\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "ldr x21, [x20, #0xa0]\n" + "smlal v6.4s, v30.4h, v0.4h\n" + "smlal2 v5.4s, v30.8h, v0.8h\n" + "add x21, x21, x24\n" "smlal v15.4s, v25.4h, v1.4h\n" - "smlal2 v20.4s, v25.8h, v1.8h\n" - "smlal v18.4s, v24.4h, v1.4h\n" - "smlal2 v5.4s, v24.8h, v1.8h\n" - "smlal v11.4s, v30.4h, v1.4h\n" - "smlal2 v8.4s, v30.8h, v1.8h\n" - "tbz x4, #2, 53f\n" - "ld1 { v26.s }[0], [x14], #0x4\n" - "tbz x4, #1, 52f\n" - "ld1 { v26.h }[2], [x14], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[6], [x14]\n" + "smlal2 v16.4s, v25.8h, v1.8h\n" + "smlal v17.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "smlal v10.4s, v30.4h, v1.4h\n" + "smlal2 v7.4s, v30.8h, v1.8h\n" + "tbz x0, #2, 53f\n" + "ld1 { v26.s }[0], [x21], #0x4\n" + "tbz x0, #1, 52f\n" + "ld1 { v26.h }[2], [x21], #0x2\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[6], [x21]\n" "b 55f\n" "52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[4], [x14]\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[4], [x21]\n" "b 55f\n" "53:" // Oddments: Load (3, 2): Bit 2: Unset - "tbz x4, #1, 54f\n" - "ld1 { v26.h }[0], [x14], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[2], [x14]\n" + "tbz x0, #1, 54f\n" + "ld1 { v26.h }[0], [x21], #0x2\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[2], [x21]\n" "b 55f\n" "54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[0], [x14]\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[0], [x21]\n" "55:" // Oddments: Load (3, 2): Bit 2: End - "ldr d2, [x3, #0x60]\n" - "ssubl v26.8h, v26.8b, v7.8b\n" - "smlal v10.4s, v26.4h, v1.4h\n" - "ldr x11, [x25, #0xa8]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" - "smlal2 v9.4s, v26.8h, v1.8h\n" - "add x11, x11, x10\n" + "ldr d2, [x23, #0x60]\n" + "ssubl v26.8h, v26.8b, v9.8b\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "ldr x2, [x20, #0xa8]\n" + "smlal v6.4s, v26.4h, v1.4h\n" + "smlal2 v5.4s, v26.8h, v1.8h\n" + "add x2, x2, x24\n" "smlal v15.4s, v24.4h, v2.4h\n" - "smlal2 v20.4s, v24.8h, v2.8h\n" - "smlal v18.4s, v27.4h, v2.4h\n" - "smlal2 v5.4s, v27.8h, v2.8h\n" - "smlal v11.4s, v26.4h, v2.4h\n" - "smlal2 v8.4s, v26.8h, v2.8h\n" - "tbz x4, #2, 57f\n" - "ld1 { v25.s }[0], [x11], #0x4\n" - "tbz x4, #1, 56f\n" - "ld1 { v25.h }[2], [x11], #0x2\n" - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[6], [x11]\n" + "smlal2 v16.4s, v24.8h, v2.8h\n" + "smlal v17.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "smlal v10.4s, v26.4h, v2.4h\n" + "smlal2 v7.4s, v26.8h, v2.8h\n" + "tbz x0, #2, 57f\n" + "ld1 { v25.s }[0], [x2], #0x4\n" + "tbz x0, #1, 56f\n" + "ld1 { v25.h }[2], [x2], #0x2\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[6], [x2]\n" "b 59f\n" "56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[4], [x11]\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[4], [x2]\n" "b 59f\n" "57:" // Oddments: Load (3, 3): Bit 2: Unset - "tbz x4, #1, 58f\n" - "ld1 { v25.h }[0], [x11], #0x2\n" - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[2], [x11]\n" + "tbz x0, #1, 58f\n" + "ld1 { v25.h }[0], [x2], #0x2\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[2], [x2]\n" "b 59f\n" "58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[0], [x11]\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[0], [x2]\n" "59:" // Oddments: Load (3, 3): Bit 2: End - "ldr d3, [x3, #0x68]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "ldr x24, [x25, #0xb0]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" - "add x24, x24, x10\n" + "ldr d3, [x23, #0x68]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "ldr x13, [x20, #0xb0]\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x13, x13, x24\n" "smlal v15.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "smlal v18.4s, v23.4h, v3.4h\n" - "smlal2 v5.4s, v23.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "tbz x4, #2, 61f\n" - "ld1 { v24.s }[0], [x24], #0x4\n" - "tbz x4, #1, 60f\n" - "ld1 { v24.h }[2], [x24], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[6], [x24]\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "smlal v17.4s, v23.4h, v3.4h\n" + "smlal2 v8.4s, v23.8h, v3.8h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "tbz x0, #2, 61f\n" + "ld1 { v24.s }[0], [x13], #0x4\n" + "tbz x0, #1, 60f\n" + "ld1 { v24.h }[2], [x13], #0x2\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[6], [x13]\n" "b 63f\n" "60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[4], [x24]\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[4], [x13]\n" "b 63f\n" "61:" // Oddments: Load (3, 4): Bit 2: Unset - "tbz x4, #1, 62f\n" - "ld1 { v24.h }[0], [x24], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[2], [x24]\n" + "tbz x0, #1, 62f\n" + "ld1 { v24.h }[0], [x13], #0x2\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[2], [x13]\n" "b 63f\n" "62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[0], [x24]\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[0], [x13]\n" "63:" // Oddments: Load (3, 4): Bit 2: End - "ldr d4, [x3, #0x70]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "ldr x0, [x25, #0xb8]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" - "add x0, x0, x10\n" + "ldr d4, [x23, #0x70]\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "ldr x9, [x20, #0xb8]\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "add x9, x9, x24\n" "smlal v15.4s, v23.4h, v4.4h\n" - "smlal2 v20.4s, v23.8h, v4.8h\n" - "smlal v18.4s, v28.4h, v4.4h\n" - "smlal2 v5.4s, v28.8h, v4.8h\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "tbz x4, #2, 65f\n" - "ld1 { v22.s }[0], [x0], #0x4\n" - "tbz x4, #1, 64f\n" - "ld1 { v22.h }[2], [x0], #0x2\n" - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[6], [x0]\n" + "smlal2 v16.4s, v23.8h, v4.8h\n" + "smlal v17.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "tbz x0, #2, 65f\n" + "ld1 { v22.s }[0], [x9], #0x4\n" + "tbz x0, #1, 64f\n" + "ld1 { v22.h }[2], [x9], #0x2\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[6], [x9]\n" "b 67f\n" "64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[4], [x0]\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[4], [x9]\n" "b 67f\n" "65:" // Oddments: Load (3, 5): Bit 2: Unset - "tbz x4, #1, 66f\n" - "ld1 { v22.h }[0], [x0], #0x2\n" - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[2], [x0]\n" + "tbz x0, #1, 66f\n" + "ld1 { v22.h }[0], [x9], #0x2\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[2], [x9]\n" "b 67f\n" "66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[0], [x0]\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[0], [x9]\n" "67:" // Oddments: Load (3, 5): Bit 2: End - "ldr d0, [x3, #0x78]\n" - "ssubl v22.8h, v22.8b, v7.8b\n" - "smlal v10.4s, v22.4h, v4.4h\n" - "ldr x15, [x25, #0xc0]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" - "smlal2 v9.4s, v22.8h, v4.8h\n" - "add x15, x15, x10\n" + "ldr d0, [x23, #0x78]\n" + "ssubl v22.8h, v22.8b, v9.8b\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "ldr x19, [x20, #0xc0]\n" + "smlal v6.4s, v22.4h, v4.4h\n" + "smlal2 v5.4s, v22.8h, v4.8h\n" + "add x19, x19, x24\n" "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "tbz x4, #2, 69f\n" - "ld1 { v27.s }[0], [x15], #0x4\n" - "tbz x4, #1, 68f\n" - "ld1 { v27.h }[2], [x15], #0x2\n" - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[6], [x15]\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "tbz x0, #2, 69f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x0, #1, 68f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[6], [x19]\n" "b 71f\n" "68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[4], [x15]\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[4], [x19]\n" "b 71f\n" "69:" // Oddments: Load (4, 0): Bit 2: Unset - "tbz x4, #1, 70f\n" - "ld1 { v27.h }[0], [x15], #0x2\n" - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[2], [x15]\n" + "tbz x0, #1, 70f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[2], [x19]\n" "b 71f\n" "70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[0], [x15]\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[0], [x19]\n" "71:" // Oddments: Load (4, 0): Bit 2: End - "ldr x9, [x25, #0xc8]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v11.4s, v27.4h, v0.4h\n" - "smlal2 v8.4s, v27.8h, v0.8h\n" - "add x9, x9, x10\n" - "tbz x4, #2, 73f\n" - "ld1 { v23.s }[0], [x9], #0x4\n" - "tbz x4, #1, 72f\n" - "ld1 { v23.h }[2], [x9], #0x2\n" - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[6], [x9]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "ldr x28, [x20, #0xc8]\n" + "smlal v10.4s, v27.4h, v0.4h\n" + "smlal2 v7.4s, v27.8h, v0.8h\n" + "add x28, x28, x24\n" + "tbz x0, #2, 73f\n" + "ld1 { v23.s }[0], [x28], #0x4\n" + "tbz x0, #1, 72f\n" + "ld1 { v23.h }[2], [x28], #0x2\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[6], [x28]\n" "b 75f\n" "72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[4], [x9]\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[4], [x28]\n" "b 75f\n" "73:" // Oddments: Load (4, 1): Bit 2: Unset - "tbz x4, #1, 74f\n" - "ld1 { v23.h }[0], [x9], #0x2\n" - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[2], [x9]\n" + "tbz x0, #1, 74f\n" + "ld1 { v23.h }[0], [x28], #0x2\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[2], [x28]\n" "b 75f\n" "74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[0], [x9]\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[0], [x28]\n" "75:" // Oddments: Load (4, 1): Bit 2: End - "ldr d1, [x3, #0x80]\n" - "ssubl v23.8h, v23.8b, v7.8b\n" - "smlal v10.4s, v23.4h, v0.4h\n" - "ldr x27, [x25, #0xd0]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" - "smlal2 v9.4s, v23.8h, v0.8h\n" - "add x27, x27, x10\n" + "ldr d1, [x23, #0x80]\n" + "ssubl v23.8h, v23.8b, v9.8b\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "ldr x6, [x20, #0xd0]\n" + "smlal v6.4s, v23.4h, v0.4h\n" + "smlal2 v5.4s, v23.8h, v0.8h\n" + "add x6, x6, x24\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "smlal v18.4s, v26.4h, v1.4h\n" - "smlal2 v5.4s, v26.8h, v1.8h\n" - "smlal v11.4s, v23.4h, v1.4h\n" - "smlal2 v8.4s, v23.8h, v1.8h\n" - "tbz x4, #2, 77f\n" - "ld1 { v31.s }[0], [x27], #0x4\n" - "tbz x4, #1, 76f\n" - "ld1 { v31.h }[2], [x27], #0x2\n" - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[6], [x27]\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal v17.4s, v26.4h, v1.4h\n" + "smlal2 v8.4s, v26.8h, v1.8h\n" + "smlal v10.4s, v23.4h, v1.4h\n" + "smlal2 v7.4s, v23.8h, v1.8h\n" + "tbz x0, #2, 77f\n" + "ld1 { v31.s }[0], [x6], #0x4\n" + "tbz x0, #1, 76f\n" + "ld1 { v31.h }[2], [x6], #0x2\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[6], [x6]\n" "b 79f\n" "76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[4], [x27]\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[4], [x6]\n" "b 79f\n" "77:" // Oddments: Load (4, 2): Bit 2: Unset - "tbz x4, #1, 78f\n" - "ld1 { v31.h }[0], [x27], #0x2\n" - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[2], [x27]\n" + "tbz x0, #1, 78f\n" + "ld1 { v31.h }[0], [x6], #0x2\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[2], [x6]\n" "b 79f\n" "78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[0], [x27]\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[0], [x6]\n" "79:" // Oddments: Load (4, 2): Bit 2: End - "ldr d2, [x3, #0x88]\n" - "ssubl v31.8h, v31.8b, v7.8b\n" - "smlal v10.4s, v31.4h, v1.4h\n" - "ldr x28, [x25, #0xd8]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" - "smlal2 v9.4s, v31.8h, v1.8h\n" - "add x28, x28, x10\n" + "ldr d2, [x23, #0x88]\n" + "ssubl v31.8h, v31.8b, v9.8b\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "ldr x27, [x20, #0xd8]\n" + "smlal v6.4s, v31.4h, v1.4h\n" + "smlal2 v5.4s, v31.8h, v1.8h\n" + "add x27, x27, x24\n" "smlal v15.4s, v26.4h, v2.4h\n" - "smlal2 v20.4s, v26.8h, v2.8h\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "smlal2 v8.4s, v31.8h, v2.8h\n" - "tbz x4, #2, 81f\n" - "ld1 { v30.s }[0], [x28], #0x4\n" - "tbz x4, #1, 80f\n" - "ld1 { v30.h }[2], [x28], #0x2\n" - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[6], [x28]\n" + "smlal2 v16.4s, v26.8h, v2.8h\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal v10.4s, v31.4h, v2.4h\n" + "smlal2 v7.4s, v31.8h, v2.8h\n" + "tbz x0, #2, 81f\n" + "ld1 { v30.s }[0], [x27], #0x4\n" + "tbz x0, #1, 80f\n" + "ld1 { v30.h }[2], [x27], #0x2\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[6], [x27]\n" "b 83f\n" "80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[4], [x28]\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[4], [x27]\n" "b 83f\n" "81:" // Oddments: Load (4, 3): Bit 2: Unset - "tbz x4, #1, 82f\n" - "ld1 { v30.h }[0], [x28], #0x2\n" - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[2], [x28]\n" + "tbz x0, #1, 82f\n" + "ld1 { v30.h }[0], [x27], #0x2\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[2], [x27]\n" "b 83f\n" "82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[0], [x28]\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[0], [x27]\n" "83:" // Oddments: Load (4, 3): Bit 2: End - "ldr d3, [x3, #0x90]\n" - "ssubl v30.8h, v30.8b, v7.8b\n" - "smlal v10.4s, v30.4h, v2.4h\n" - "ldr x12, [x25, #0xe0]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" - "smlal2 v9.4s, v30.8h, v2.8h\n" - "add x12, x12, x10\n" + "ldr d3, [x23, #0x90]\n" + "ssubl v30.8h, v30.8b, v9.8b\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "ldr x11, [x20, #0xe0]\n" + "smlal v6.4s, v30.4h, v2.4h\n" + "smlal2 v5.4s, v30.8h, v2.8h\n" + "add x11, x11, x24\n" "smlal v15.4s, v25.4h, v3.4h\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "smlal v11.4s, v30.4h, v3.4h\n" - "smlal2 v8.4s, v30.8h, v3.8h\n" - "tbz x4, #2, 85f\n" - "ld1 { v28.s }[0], [x12], #0x4\n" - "tbz x4, #1, 84f\n" - "ld1 { v28.h }[2], [x12], #0x2\n" - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[6], [x12]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal v10.4s, v30.4h, v3.4h\n" + "smlal2 v7.4s, v30.8h, v3.8h\n" + "tbz x0, #2, 85f\n" + "ld1 { v28.s }[0], [x11], #0x4\n" + "tbz x0, #1, 84f\n" + "ld1 { v28.h }[2], [x11], #0x2\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[6], [x11]\n" "b 87f\n" "84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[4], [x12]\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[4], [x11]\n" "b 87f\n" "85:" // Oddments: Load (4, 4): Bit 2: Unset - "tbz x4, #1, 86f\n" - "ld1 { v28.h }[0], [x12], #0x2\n" - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[2], [x12]\n" + "tbz x0, #1, 86f\n" + "ld1 { v28.h }[0], [x11], #0x2\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[2], [x11]\n" "b 87f\n" "86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[0], [x12]\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[0], [x11]\n" "87:" // Oddments: Load (4, 4): Bit 2: End - "ldr d4, [x3, #0x98]\n" - "ssubl v28.8h, v28.8b, v7.8b\n" - "smlal v10.4s, v28.4h, v3.4h\n" - "ldr x7, [x25, #0xe8]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v9.4s, v28.8h, v3.8h\n" - "add x7, x7, x10\n" + "ldr d4, [x23, #0x98]\n" + "ssubl v28.8h, v28.8b, v9.8b\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "ldr x17, [x20, #0xe8]\n" + "smlal v6.4s, v28.4h, v3.4h\n" + "smlal2 v5.4s, v28.8h, v3.8h\n" + "add x17, x17, x24\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "smlal v18.4s, v22.4h, v4.4h\n" - "smlal2 v5.4s, v22.8h, v4.8h\n" - "smlal v11.4s, v28.4h, v4.4h\n" - "smlal2 v8.4s, v28.8h, v4.8h\n" - "tbz x4, #2, 89f\n" - "ld1 { v26.s }[0], [x7], #0x4\n" - "tbz x4, #1, 88f\n" - "ld1 { v26.h }[2], [x7], #0x2\n" - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[6], [x7]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "smlal v17.4s, v22.4h, v4.4h\n" + "smlal2 v8.4s, v22.8h, v4.8h\n" + "smlal v10.4s, v28.4h, v4.4h\n" + "smlal2 v7.4s, v28.8h, v4.8h\n" + "tbz x0, #2, 89f\n" + "ld1 { v26.s }[0], [x17], #0x4\n" + "tbz x0, #1, 88f\n" + "ld1 { v26.h }[2], [x17], #0x2\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[6], [x17]\n" "b 91f\n" "88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[4], [x7]\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[4], [x17]\n" "b 91f\n" "89:" // Oddments: Load (4, 5): Bit 2: Unset - "tbz x4, #1, 90f\n" - "ld1 { v26.h }[0], [x7], #0x2\n" - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[2], [x7]\n" + "tbz x0, #1, 90f\n" + "ld1 { v26.h }[0], [x17], #0x2\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[2], [x17]\n" "b 91f\n" "90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[0], [x7]\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[0], [x17]\n" "91:" // Oddments: Load (4, 5): Bit 2: End - "ldr d0, [x3, #0xa0]\n" - "ssubl v26.8h, v26.8b, v7.8b\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "ldr x26, [x25, #0xf0]\n" - "ssubl v0.8h, v0.8b, v13.8b\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "add x26, x26, x10\n" + "ldr d0, [x23, #0xa0]\n" + "ssubl v26.8h, v26.8b, v9.8b\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "ldr x5, [x20, #0xf0]\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "add x5, x5, x24\n" "smlal v15.4s, v27.4h, v0.4h\n" - "smlal2 v20.4s, v27.8h, v0.8h\n" - "smlal v18.4s, v23.4h, v0.4h\n" - "smlal2 v5.4s, v23.8h, v0.8h\n" - "tbz x4, #2, 93f\n" - "ld1 { v25.s }[0], [x26], #0x4\n" - "tbz x4, #1, 92f\n" - "ld1 { v25.h }[2], [x26], #0x2\n" - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[6], [x26]\n" + "smlal2 v16.4s, v27.8h, v0.8h\n" + "smlal v17.4s, v23.4h, v0.4h\n" + "smlal2 v8.4s, v23.8h, v0.8h\n" + "tbz x0, #2, 93f\n" + "ld1 { v25.s }[0], [x5], #0x4\n" + "tbz x0, #1, 92f\n" + "ld1 { v25.h }[2], [x5], #0x2\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[6], [x5]\n" "b 95f\n" "92:" // Oddments: Load (5, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[4], [x26]\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[4], [x5]\n" "b 95f\n" "93:" // Oddments: Load (5, 0): Bit 2: Unset - "tbz x4, #1, 94f\n" - "ld1 { v25.h }[0], [x26], #0x2\n" - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[2], [x26]\n" + "tbz x0, #1, 94f\n" + "ld1 { v25.h }[0], [x5], #0x2\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[2], [x5]\n" "b 95f\n" "94:" // Oddments: Load (5, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[0], [x26]\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[0], [x5]\n" "95:" // Oddments: Load (5, 0): Bit 2: End - "ldr x23, [x25, #0xf8]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v11.4s, v25.4h, v0.4h\n" - "smlal2 v8.4s, v25.8h, v0.8h\n" - "add x23, x23, x10\n" - "tbz x4, #2, 97f\n" - "ld1 { v24.s }[0], [x23], #0x4\n" - "tbz x4, #1, 96f\n" - "ld1 { v24.h }[2], [x23], #0x2\n" - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[6], [x23]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "ldr x25, [x20, #0xf8]\n" + "smlal v10.4s, v25.4h, v0.4h\n" + "smlal2 v7.4s, v25.8h, v0.8h\n" + "add x25, x25, x24\n" + "tbz x0, #2, 97f\n" + "ld1 { v24.s }[0], [x25], #0x4\n" + "tbz x0, #1, 96f\n" + "ld1 { v24.h }[2], [x25], #0x2\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[6], [x25]\n" "b 99f\n" "96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[4], [x23]\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[4], [x25]\n" "b 99f\n" "97:" // Oddments: Load (5, 1): Bit 2: Unset - "tbz x4, #1, 98f\n" - "ld1 { v24.h }[0], [x23], #0x2\n" - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[2], [x23]\n" + "tbz x0, #1, 98f\n" + "ld1 { v24.h }[0], [x25], #0x2\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[2], [x25]\n" "b 99f\n" "98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[0], [x23]\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[0], [x25]\n" "99:" // Oddments: Load (5, 1): Bit 2: End - "ldr d1, [x3, #0xa8]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v0.4h\n" - "ldr x22, [x25, #0x100]\n" - "ssubl v1.8h, v1.8b, v13.8b\n" - "smlal2 v9.4s, v24.8h, v0.8h\n" - "add x22, x22, x10\n" + "ldr d1, [x23, #0xa8]\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "ldr x26, [x20, #0x100]\n" + "smlal v6.4s, v24.4h, v0.4h\n" + "smlal2 v5.4s, v24.8h, v0.8h\n" + "add x26, x26, x24\n" "smlal v15.4s, v23.4h, v1.4h\n" - "smlal2 v20.4s, v23.8h, v1.8h\n" - "smlal v18.4s, v31.4h, v1.4h\n" - "smlal2 v5.4s, v31.8h, v1.8h\n" - "smlal v11.4s, v24.4h, v1.4h\n" - "smlal2 v8.4s, v24.8h, v1.8h\n" - "tbz x4, #2, 101f\n" - "ld1 { v27.s }[0], [x22], #0x4\n" - "tbz x4, #1, 100f\n" - "ld1 { v27.h }[2], [x22], #0x2\n" - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[6], [x22]\n" + "smlal2 v16.4s, v23.8h, v1.8h\n" + "smlal v17.4s, v31.4h, v1.4h\n" + "smlal2 v8.4s, v31.8h, v1.8h\n" + "smlal v10.4s, v24.4h, v1.4h\n" + "smlal2 v7.4s, v24.8h, v1.8h\n" + "tbz x0, #2, 101f\n" + "ld1 { v27.s }[0], [x26], #0x4\n" + "tbz x0, #1, 100f\n" + "ld1 { v27.h }[2], [x26], #0x2\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[6], [x26]\n" "b 103f\n" "100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[4], [x22]\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[4], [x26]\n" "b 103f\n" "101:" // Oddments: Load (5, 2): Bit 2: Unset - "tbz x4, #1, 102f\n" - "ld1 { v27.h }[0], [x22], #0x2\n" - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[2], [x22]\n" + "tbz x0, #1, 102f\n" + "ld1 { v27.h }[0], [x26], #0x2\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[2], [x26]\n" "b 103f\n" "102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[0], [x22]\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[0], [x26]\n" "103:" // Oddments: Load (5, 2): Bit 2: End - "ldr d2, [x3, #0xb0]\n" - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v1.4h\n" - "ldr x20, [x25, #0x108]\n" - "ssubl v2.8h, v2.8b, v13.8b\n" - "smlal2 v9.4s, v27.8h, v1.8h\n" - "add x20, x20, x10\n" + "ldr d2, [x23, #0xb0]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "ldr x12, [x20, #0x108]\n" + "smlal v6.4s, v27.4h, v1.4h\n" + "smlal2 v5.4s, v27.8h, v1.8h\n" + "add x12, x12, x24\n" "smlal v15.4s, v31.4h, v2.4h\n" - "smlal2 v20.4s, v31.8h, v2.8h\n" - "smlal v18.4s, v30.4h, v2.4h\n" - "smlal2 v5.4s, v30.8h, v2.8h\n" - "smlal v11.4s, v27.4h, v2.4h\n" - "smlal2 v8.4s, v27.8h, v2.8h\n" - "tbz x4, #2, 105f\n" - "ld1 { v25.s }[0], [x20], #0x4\n" - "tbz x4, #1, 104f\n" - "ld1 { v25.h }[2], [x20], #0x2\n" - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[6], [x20]\n" + "smlal2 v16.4s, v31.8h, v2.8h\n" + "smlal v17.4s, v30.4h, v2.4h\n" + "smlal2 v8.4s, v30.8h, v2.8h\n" + "smlal v10.4s, v27.4h, v2.4h\n" + "smlal2 v7.4s, v27.8h, v2.8h\n" + "tbz x0, #2, 105f\n" + "ld1 { v25.s }[0], [x12], #0x4\n" + "tbz x0, #1, 104f\n" + "ld1 { v25.h }[2], [x12], #0x2\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[6], [x12]\n" "b 107f\n" "104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[4], [x20]\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[4], [x12]\n" "b 107f\n" "105:" // Oddments: Load (5, 3): Bit 2: Unset - "tbz x4, #1, 106f\n" - "ld1 { v25.h }[0], [x20], #0x2\n" - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[2], [x20]\n" + "tbz x0, #1, 106f\n" + "ld1 { v25.h }[0], [x12], #0x2\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[2], [x12]\n" "b 107f\n" "106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[0], [x20]\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[0], [x12]\n" "107:" // Oddments: Load (5, 3): Bit 2: End - "ldr d3, [x3, #0xb8]\n" - "ssubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "ldr x13, [x25, #0x110]\n" - "ssubl v3.8h, v3.8b, v13.8b\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" - "add x13, x13, x10\n" + "ldr d3, [x23, #0xb8]\n" + "ssubl v25.8h, v25.8b, v9.8b\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "ldr x14, [x20, #0x110]\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x14, x14, x24\n" "smlal v15.4s, v30.4h, v3.4h\n" - "smlal2 v20.4s, v30.8h, v3.8h\n" - "smlal v18.4s, v28.4h, v3.4h\n" - "smlal2 v5.4s, v28.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "tbz x4, #2, 109f\n" - "ld1 { v24.s }[0], [x13], #0x4\n" - "tbz x4, #1, 108f\n" - "ld1 { v24.h }[2], [x13], #0x2\n" - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[6], [x13]\n" + "smlal2 v16.4s, v30.8h, v3.8h\n" + "smlal v17.4s, v28.4h, v3.4h\n" + "smlal2 v8.4s, v28.8h, v3.8h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "tbz x0, #2, 109f\n" + "ld1 { v24.s }[0], [x14], #0x4\n" + "tbz x0, #1, 108f\n" + "ld1 { v24.h }[2], [x14], #0x2\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[6], [x14]\n" "b 111f\n" "108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[4], [x13]\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[4], [x14]\n" "b 111f\n" "109:" // Oddments: Load (5, 4): Bit 2: Unset - "tbz x4, #1, 110f\n" - "ld1 { v24.h }[0], [x13], #0x2\n" - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[2], [x13]\n" + "tbz x0, #1, 110f\n" + "ld1 { v24.h }[0], [x14], #0x2\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[2], [x14]\n" "b 111f\n" "110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[0], [x13]\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[0], [x14]\n" "111:" // Oddments: Load (5, 4): Bit 2: End - "ldr d4, [x3, #0xc0]\n" - "ssubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "ldr x21, [x25, #0x118]\n" - "ssubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" - "add x21, x21, x10\n" + "ldr d4, [x23, #0xc0]\n" + "ssubl v24.8h, v24.8b, v9.8b\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "ldr x21, [x20, #0x118]\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "add x21, x21, x24\n" "smlal v15.4s, v28.4h, v4.4h\n" - "smlal2 v20.4s, v28.8h, v4.8h\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "tbz x4, #2, 113f\n" + "smlal2 v16.4s, v28.8h, v4.8h\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "tbz x0, #2, 113f\n" "ld1 { v27.s }[0], [x21], #0x4\n" - "tbz x4, #1, 112f\n" + "tbz x0, #1, 112f\n" "ld1 { v27.h }[2], [x21], #0x2\n" - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[6], [x21]\n" "b 115f\n" "112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[4], [x21]\n" "b 115f\n" "113:" // Oddments: Load (5, 5): Bit 2: Unset - "tbz x4, #1, 114f\n" + "tbz x0, #1, 114f\n" "ld1 { v27.h }[0], [x21], #0x2\n" - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[2], [x21]\n" "b 115f\n" "114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[0], [x21]\n" "115:" // Oddments: Load (5, 5): Bit 2: End - "ssubl v27.8h, v27.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v4.4h\n" - "smlal2 v9.4s, v27.8h, v4.8h\n" - "tbz x4, #2, 117f\n" - "ld1 { v6.4s }, [x2], #0x10\n" - "ld1 { v21.4s }, [x5], #0x10\n" - "tbz x4, #1, 116f\n" - "ld1 { v17.d }[0], [x2], #0x8\n" - "ld1 { v14.d }[0], [x5], #0x8\n" - "tbz x4, #0, 119f\n" - "ld1 { v17.s }[2], [x2]\n" - "ld1 { v14.s }[2], [x5]\n" + "ssubl v27.8h, v27.8b, v9.8b\n" + "smlal v6.4s, v27.4h, v4.4h\n" + "smlal2 v5.4s, v27.8h, v4.8h\n" + "tbz x0, #2, 117f\n" + "ld1 { v12.4s }, [x10], #0x10\n" + "ld1 { v19.4s }, [x1], #0x10\n" + "tbz x0, #1, 116f\n" + "ld1 { v20.d }[0], [x10], #0x8\n" + "ld1 { v29.d }[0], [x1], #0x8\n" + "tbz x0, #0, 119f\n" + "ld1 { v20.s }[2], [x10]\n" + "ld1 { v29.s }[2], [x1]\n" "b 119f\n" "116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset - "tbz x4, #0, 119f\n" - "ld1 { v17.s }[0], [x2]\n" - "ld1 { v14.s }[0], [x5]\n" + "tbz x0, #0, 119f\n" + "ld1 { v20.s }[0], [x10]\n" + "ld1 { v29.s }[0], [x1]\n" "b 119f\n" "117:" // Oddments: Load requant params: Bit 2: Unset - "tbz x4, #1, 118f\n" - "ld1 { v6.d }[0], [x2], #0x8\n" - "ld1 { v21.d }[0], [x5], #0x8\n" - "tbz x4, #0, 119f\n" - "ld1 { v6.s }[2], [x2]\n" - "ld1 { v21.s }[2], [x5]\n" + "tbz x0, #1, 118f\n" + "ld1 { v12.d }[0], [x10], #0x8\n" + "ld1 { v19.d }[0], [x1], #0x8\n" + "tbz x0, #0, 119f\n" + "ld1 { v12.s }[2], [x10]\n" + "ld1 { v19.s }[2], [x1]\n" "b 119f\n" "118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 119f\n" - "ld1 { v6.s }[0], [x2]\n" - "ld1 { v21.s }[0], [x5]\n" + "tbz x0, #0, 119f\n" + "ld1 { v12.s }[0], [x10]\n" + "ld1 { v19.s }[0], [x1]\n" "119:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v15.4s, v15.4s, v6.4s\n" - "add x17, x17, x1\n" - "sqrdmulh v20.4s, v20.4s, v17.4s\n" - "add x16, x16, x1\n" - "sqrdmulh v18.4s, v18.4s, v6.4s\n" - "add x6, x6, x1\n" - "sqrdmulh v5.4s, v5.4s, v17.4s\n" - "add x8, x8, x1\n" - "sqrdmulh v11.4s, v11.4s, v6.4s\n" - "and v1.16b, v15.16b, v21.16b\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "and v29.16b, v20.16b, v14.16b\n" - "and v3.16b, v18.16b, v21.16b\n" - "sshr v29.4s, v29.4s, #0x1f\n" - "and v2.16b, v5.16b, v14.16b\n" - "and v0.16b, v11.16b, v21.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v8.4s, v8.4s, v17.4s\n" + "sqdmulh v15.4s, v15.4s, v12.4s\n" + "sqdmulh v17.4s, v17.4s, v12.4s\n" + "add x16, x16, x22\n" + "add x8, x8, x22\n" + "sqdmulh v10.4s, v10.4s, v12.4s\n" + "sqdmulh v6.4s, v6.4s, v12.4s\n" + "add x4, x4, x22\n" + "add x7, x7, x22\n" + "and v23.16b, v15.16b, v19.16b\n" + "sqdmulh v16.4s, v16.4s, v20.4s\n" + "and v22.16b, v17.16b, v19.16b\n" + "sqdmulh v8.4s, v8.4s, v20.4s\n" + "and v21.16b, v10.16b, v19.16b\n" + "sqdmulh v7.4s, v7.4s, v20.4s\n" + "and v26.16b, v6.16b, v19.16b\n" + "sqdmulh v5.4s, v5.4s, v20.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v4.16b, v16.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v2.16b, v8.16b, v29.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v7.16b, v29.16b\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "and v25.16b, v5.16b, v29.16b\n" + "sqadd v15.4s, v15.4s, v23.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v17.4s, v17.4s, v22.4s\n" "sshr v2.4s, v2.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v1.4s\n" - "sqrdmulh v10.4s, v10.4s, v6.4s\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "sqrdmulh v9.4s, v9.4s, v17.4s\n" - "sqadd v20.4s, v20.4s, v29.4s\n" - "sqadd v18.4s, v18.4s, v3.4s\n" - "srshl v15.4s, v15.4s, v21.4s\n" - "sqadd v5.4s, v5.4s, v2.4s\n" - "srshl v20.4s, v20.4s, v14.4s\n" - "srshl v18.4s, v18.4s, v21.4s\n" - "add v15.4s, v15.4s, v19.4s\n" - "srshl v5.4s, v5.4s, v14.4s\n" - "add v20.4s, v20.4s, v19.4s\n" - "smin v15.4s, v15.4s, v12.4s\n" - "add v18.4s, v18.4s, v19.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smax v15.4s, v15.4s, v16.4s\n" - "smin v18.4s, v18.4s, v12.4s\n" - "smax v20.4s, v20.4s, v16.4s\n" - "add v5.4s, v5.4s, v19.4s\n" - "smax v18.4s, v18.4s, v16.4s\n" - "uzp1 v15.16b, v15.16b, v20.16b\n" - "smin v5.4s, v5.4s, v12.4s\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v6.4s, v6.4s, v26.4s\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v17.4s, v17.4s, v19.4s\n" + "sqadd v8.4s, v8.4s, v2.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "sqadd v7.4s, v7.4s, v3.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "sqadd v5.4s, v5.4s, v25.4s\n" + "srshl v16.4s, v16.4s, v29.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v17.4h, v17.4s\n" + "srshl v7.4s, v7.4s, v29.4s\n" + "sqxtn v10.4h, v10.4s\n" + "srshl v5.4s, v5.4s, v29.4s\n" + "sqxtn v6.4h, v6.4s\n" + "sqxtn2 v15.8h, v16.4s\n" + "sqxtn2 v17.8h, v8.4s\n" + "sqxtn2 v10.8h, v7.4s\n" + "sqxtn2 v6.8h, v5.4s\n" + "sqadd v15.8h, v15.8h, v18.8h\n" + "sqadd v17.8h, v17.8h, v18.8h\n" + "sqadd v10.8h, v10.8h, v18.8h\n" + "sqadd v6.8h, v6.8h, v18.8h\n" + "smax v15.8h, v15.8h, v11.8h\n" + "smax v17.8h, v17.8h, v11.8h\n" + "smax v10.8h, v10.8h, v11.8h\n" + "smax v6.8h, v6.8h, v11.8h\n" + "smin v15.8h, v15.8h, v13.8h\n" + "smin v17.8h, v17.8h, v13.8h\n" + "smin v10.8h, v10.8h, v13.8h\n" + "smin v6.8h, v6.8h, v13.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "sqadd v11.4s, v11.4s, v0.4s\n" - "smax v5.4s, v5.4s, v16.4s\n" - "and v27.16b, v8.16b, v14.16b\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "uzp1 v18.16b, v18.16b, v5.16b\n" - "srshl v11.4s, v11.4s, v21.4s\n" - "and v30.16b, v10.16b, v21.16b\n" - "sshr v30.4s, v30.4s, #0x1f\n" - "uzp1 v18.16b, v18.16b, v18.16b\n" - "add v11.4s, v11.4s, v19.4s\n" - "sqadd v8.4s, v8.4s, v27.4s\n" - "and v6.16b, v9.16b, v14.16b\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "smin v11.4s, v11.4s, v12.4s\n" - "srshl v8.4s, v8.4s, v14.4s\n" - "sqadd v10.4s, v10.4s, v30.4s\n" - "smax v11.4s, v11.4s, v16.4s\n" - "add v8.4s, v8.4s, v19.4s\n" - "srshl v10.4s, v10.4s, v21.4s\n" - "sqadd v9.4s, v9.4s, v6.4s\n" - "smin v8.4s, v8.4s, v12.4s\n" - "add v10.4s, v10.4s, v19.4s\n" - "srshl v9.4s, v9.4s, v14.4s\n" - "smax v8.4s, v8.4s, v16.4s\n" - "smin v10.4s, v10.4s, v12.4s\n" - "uzp1 v11.16b, v11.16b, v8.16b\n" - "add v9.4s, v9.4s, v19.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "smax v10.4s, v10.4s, v16.4s\n" - "smin v9.4s, v9.4s, v12.4s\n" - "smax v9.4s, v9.4s, v16.4s\n" - "uzp1 v10.16b, v10.16b, v9.16b\n" + "uzp1 v17.16b, v17.16b, v17.16b\n" "uzp1 v10.16b, v10.16b, v10.16b\n" - "tbz x4, #2, 121f\n" - "st1 { v15.s }[0], [x17], #0x4\n" - "st1 { v18.s }[0], [x16], #0x4\n" - "st1 { v11.s }[0], [x6], #0x4\n" - "st1 { v10.s }[0], [x8], #0x4\n" - "tbz x4, #1, 120f\n" - "st1 { v15.h }[2], [x17], #0x2\n" - "st1 { v18.h }[2], [x16], #0x2\n" - "st1 { v11.h }[2], [x6], #0x2\n" - "st1 { v10.h }[2], [x8], #0x2\n" - "tbz x4, #0, 123f\n" - "st1 { v15.b }[6], [x17], #0x1\n" - "st1 { v18.b }[6], [x16], #0x1\n" - "st1 { v11.b }[6], [x6], #0x1\n" - "st1 { v10.b }[6], [x8], #0x1\n" + "uzp1 v6.16b, v6.16b, v6.16b\n" + "tbz x0, #2, 121f\n" + "st1 { v15.s }[0], [x16], #0x4\n" + "st1 { v17.s }[0], [x8], #0x4\n" + "st1 { v10.s }[0], [x4], #0x4\n" + "st1 { v6.s }[0], [x7], #0x4\n" + "tbz x0, #1, 120f\n" + "st1 { v15.h }[2], [x16], #0x2\n" + "st1 { v17.h }[2], [x8], #0x2\n" + "st1 { v10.h }[2], [x4], #0x2\n" + "st1 { v6.h }[2], [x7], #0x2\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[6], [x16], #0x1\n" + "st1 { v17.b }[6], [x8], #0x1\n" + "st1 { v10.b }[6], [x4], #0x1\n" + "st1 { v6.b }[6], [x7], #0x1\n" "b 123f\n" "120:" // Oddments: Bit 2: Bit 1: Unset - "tbz x4, #0, 123f\n" - "st1 { v15.b }[4], [x17], #0x1\n" - "st1 { v18.b }[4], [x16], #0x1\n" - "st1 { v11.b }[4], [x6], #0x1\n" - "st1 { v10.b }[4], [x8], #0x1\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[4], [x16], #0x1\n" + "st1 { v17.b }[4], [x8], #0x1\n" + "st1 { v10.b }[4], [x4], #0x1\n" + "st1 { v6.b }[4], [x7], #0x1\n" "b 123f\n" "121:" // Oddments: Bit 2: Unset - "tbz x4, #1, 122f\n" - "st1 { v15.h }[0], [x17], #0x2\n" - "st1 { v18.h }[0], [x16], #0x2\n" - "st1 { v11.h }[0], [x6], #0x2\n" - "st1 { v10.h }[0], [x8], #0x2\n" - "tbz x4, #0, 123f\n" - "st1 { v15.b }[2], [x17], #0x1\n" - "st1 { v18.b }[2], [x16], #0x1\n" - "st1 { v11.b }[2], [x6], #0x1\n" - "st1 { v10.b }[2], [x8], #0x1\n" + "tbz x0, #1, 122f\n" + "st1 { v15.h }[0], [x16], #0x2\n" + "st1 { v17.h }[0], [x8], #0x2\n" + "st1 { v10.h }[0], [x4], #0x2\n" + "st1 { v6.h }[0], [x7], #0x2\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[2], [x16], #0x1\n" + "st1 { v17.b }[2], [x8], #0x1\n" + "st1 { v10.b }[2], [x4], #0x1\n" + "st1 { v6.b }[2], [x7], #0x1\n" "b 123f\n" "122:" // Oddments: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 123f\n" - "st1 { v15.b }[0], [x17], #0x1\n" - "st1 { v18.b }[0], [x16], #0x1\n" - "st1 { v11.b }[0], [x6], #0x1\n" - "st1 { v10.b }[0], [x8], #0x1\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[0], [x16], #0x1\n" + "st1 { v17.b }[0], [x8], #0x1\n" + "st1 { v10.b }[0], [x4], #0x1\n" + "st1 { v6.b }[0], [x7], #0x1\n" "123:" // Oddments: Bit 2: End - "124:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst.hpp index 4e845cceaf..9b1f7c239f 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,28 +28,23 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_s8q_nhwc_generic_output9_mla_depthfirst_impl(const int8_t *const *const, int8_t *const *const, const void *, const arm_gemm::Requantize32&, const unsigned int, const unsigned int); -struct a64_s8q_nhwc_generic_output9_mla_depthfirst +class a64_s8q_nhwc_generic_output9_mla_depthfirst : public GenericDepthfirstKernelStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - typedef void (*kern_type)(const int8_t *const *const, int8_t *const *const, const void *, const arm_gemm::Requantize32&, const unsigned int, const unsigned int); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - constexpr static unsigned int n_output_points = 9; + KernelType kernel = a64_s8q_nhwc_generic_output9_mla_depthfirst_impl; - kern_type kernel = a64_s8q_nhwc_generic_output9_mla_depthfirst_impl; + public: + a64_s8q_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) : GenericDepthfirstKernelStrategy<int8_t, int8_t, int8_t, int32_t>(9, arm_gemm::VLType::None) {} - a64_s8q_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) {} + KernelType get_kernel() const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp index b9fef4f9ab..5ca3ccd4bf 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,39 +28,34 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl(const int8_t *const *const, int8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); -struct a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst +struct a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst : DepthfirstMultiplierStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - typedef void (*kern_type)(const int8_t *const *const, int8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - + using Parent = DepthfirstMultiplierStrategy<int8_t, int8_t, int8_t, int32_t>; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 4; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 9; - constexpr static unsigned int input_col_quads = 1; + a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst(const CPUInfo *) + : Parent(2, 4, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::None; } - a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp index 9a3eed47fb..0641229aa7 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -26,6 +26,8 @@ #include <cstdint> +#if defined(__aarch64__) + #pragma once namespace arm_conv { @@ -33,34 +35,26 @@ namespace depthwise { void a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl(const int8_t *const *const, int8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); -struct a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst +struct a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst : DepthfirstMultiplierStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - typedef void (*kern_type)(const int8_t *const *const, int8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - + using Parent = DepthfirstMultiplierStrategy<int8_t, int8_t, int8_t, int32_t>; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 4; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 8; - constexpr static unsigned int input_cols = 6; - constexpr static unsigned int input_col_quads = 1; + a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst(const CPUInfo *) + : Parent(4, 2, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::None; } - a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp index d0ae00d260..3dad8d5604 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,31 +28,25 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl(const int8_t *const *const, int8_t *const *const, const int8_t *, const int32_t *, const unsigned int, const unsigned int, const int32_t *, const int32_t *, const int32_t *, const arm_gemm::Requantize32&); -struct a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst +struct a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst : GenericDepthfirstMultiplierKernelStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - typedef void (*kern_type)(const int8_t *const *const, int8_t *const *const, const int8_t *, const int32_t *, const unsigned int, const unsigned int, const int32_t *, const int32_t *, const int32_t *, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - constexpr static unsigned int output_rows(void) { return 2; }; - constexpr static unsigned int output_cols(void) { return 8; }; - - constexpr static unsigned int output_col_regs(void) { return 2; }; - - kern_type kernel = a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; - - a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) {} + using Parent = GenericDepthfirstMultiplierKernelStrategy<int8_t, int8_t, int8_t, int32_t>; + a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) + : Parent(2, 8, arm_gemm::VLType::None) + { + } + Parent::KernelType kernel = a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp index 0fde00ba37..22b6b657a5 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,39 +34,40 @@ namespace arm_conv { namespace depthwise { -void a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *, int8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); +void a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32&, const int32_t *, const int32_t *, int8_t *const *); -struct a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst +class a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(const int8_t *const *, int8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int32_t *, const int8_t *, const arm_gemm::Requantize32 &, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_s8q_3x3_dot::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_s8q_3x3_dot::get_packed_size; - - kern_type kernel = a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; - - a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) {} + a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} + + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } + + Parent::KernelType kernel = a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + size_t get_storage_size(const DepthwiseArgs &args) const override + { + return interleave_a64_s8q_3x3_dot::get_packed_size(args); + } + + void pack_parameters( + const DepthwiseArgs &args, void *buffer, const void *biases, const arm_gemm::Requantize32 &qp, + const void *weights, size_t ld_weight_col, size_t ld_weight_row + ) const override + { + interleave_a64_s8q_3x3_dot::pack_parameters( + args.input_channels, buffer, reinterpret_cast<const int32_t *>(biases), + reinterpret_cast<const int8_t *>(weights), qp, ld_weight_col, ld_weight_row + ); + } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp index bdbda178b3..761c7ec86e 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,15 @@ namespace arm_conv { namespace depthwise { -void a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *const inptrs, int8_t *const *const outptrs, const void *params, const uint64_t n_channels, const arm_gemm::Requantize32& qp) +void a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl( + const unsigned int n_channels, + const int8_t *const *const inptrs, + const int8_t *params, + const int32_t *, // Bias, should be wrapped into the parameters + const arm_gemm::Requantize32& qp, + const int32_t *, const int32_t *, // Requant parameters, also wrapped + int8_t *const *const outptrs +) { __asm__ __volatile__( "ldp x15, x14, [%x[inptrs], #0x0]\n" @@ -1173,7 +1181,7 @@ void a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *con "34:" // End "add SP, SP, #0x80\n" : [params] "+&r" (params) - : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) + : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp index 05eddd1853..00c8a3cde2 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,39 +34,40 @@ namespace arm_conv { namespace depthwise { -void a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const uint8_t *const *, uint8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); +void a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32&, const int32_t *, const int32_t *, uint8_t *const *); -struct a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst +class a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef uint32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(const uint8_t *const *, uint8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int32_t *, const uint8_t *, const arm_gemm::Requantize32 &, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_u8q_3x3_dot::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_u8q_3x3_dot::get_packed_size; - - kern_type kernel = a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; - - a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) {} + a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} + + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } + + Parent::KernelType kernel = a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + size_t get_storage_size(const DepthwiseArgs &args) const override + { + return interleave_a64_u8q_3x3_dot::get_packed_size(args); + } + + void pack_parameters( + const DepthwiseArgs &args, void *buffer, const void *biases, const arm_gemm::Requantize32 &qp, + const void *weights, size_t ld_weight_col, size_t ld_weight_row + ) const override + { + interleave_a64_u8q_3x3_dot::pack_parameters( + args.input_channels, buffer, reinterpret_cast<const int32_t *>(biases), + reinterpret_cast<const uint8_t *>(weights), qp, ld_weight_col, ld_weight_row + ); + } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp index 22c584f8e7..64b305c21d 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,15 @@ namespace arm_conv { namespace depthwise { -void a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const uint8_t *const *const inptrs, uint8_t *const *const outptrs, const void *params, const uint64_t n_channels, const arm_gemm::Requantize32& qp) +void a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl( + const unsigned int n_channels, + const uint8_t *const *const inptrs, + const uint8_t *params, + const int32_t *, // Bias, should be wrapped into the parameters + const arm_gemm::Requantize32& qp, + const int32_t *, const int32_t *, // Requant parameters, also wrapped + uint8_t *const *const outptrs +) { __asm__ __volatile__( "ldp x13, x12, [%x[inptrs], #0x0]\n" @@ -1307,7 +1315,7 @@ void a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const uint8_t *const *con "34:" // End "add SP, SP, #0x80\n" : [params] "+&r" (params) - : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) + : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned) n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index 09ba75f685..b55055fc93 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,24 @@ namespace depthwise { void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst +class a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const uint8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_u8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_u8q_3x3_mla::get_packed_size; + a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} - kern_type kernel = a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } - a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp index b62ebb1687..453f9cfa92 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const uint8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const uint8_t *const *inptrs_raw, - const uint8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -91,513 +91,497 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( + "ldr x19, [%x[params], %[offsetof_Params_requant]]\n" "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x17, #0x0\n" - "ldr x16, [%x[params], %[offsetof_Params_weights]]\n" + "add x24, x19, %[offsetof_Requantize32_a_offset]\n" + "add x23, x19, %[offsetof_Requantize32_b_offset]\n" + "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x21, x19, %[offsetof_Requantize32_c_offset]\n" + "add x20, x19, %[offsetof_Requantize32_minval]\n" + "ldr x17, [%x[params], %[offsetof_Params_weights]]\n" + "add x19, x19, %[offsetof_Requantize32_maxval]\n" + "ld1r { v22.16b }, [x24]\n" + "ld1r { v12.16b }, [x23]\n" + "lsr x16, x8, #0x3\n" + "ld1r { v14.8h }, [x21]\n" + "ld1r { v17.8h }, [x20]\n" "mov x15, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x14, %x[params], %[offsetof_Params_inptrs]\n" + "mov x14, #0x0\n" + "ld1r { v15.8h }, [x19]\n" "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x12, x8, #0x3\n" + "add x12, %x[params], %[offsetof_Params_inptrs]\n" "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x19, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v14.16b }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v9.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v15.4s }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v24.4s }, [x20]\n" - "ld1r { v12.4s }, [x19]\n" - "ldp x10, x9, [x21, #0x0]\n" - "ldp x28, x27, [x21, #0x10]\n" - "cbz x12, 3f\n" - "subs x12, x12, #0x1\n" + "ldp x10, x9, [x22, #0x0]\n" + "ldp x28, x27, [x22, #0x10]\n" + "cbz x16, 3f\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q13, [x19, #0x0]\n" - "mov v17.16b, v13.16b\n" - "ldr q19, [x19, #0x10]\n" + "subs x16, x16, #0x1\n" + "mov v19.16b, v13.16b\n" + "ldr q26, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v13.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v23.16b, v13.16b\n" - "ldr d0, [x16, #0x0]\n" - "usubl v0.8h, v0.8b, v9.8b\n" - "mov v25.16b, v19.16b\n" - "ldr d1, [x16, #0x8]\n" - "mov v21.16b, v19.16b\n" - "ldr d2, [x16, #0x10]\n" - "usubl v1.8h, v1.8b, v9.8b\n" - "mov v20.16b, v19.16b\n" - "ldr d3, [x16, #0x18]\n" - "ldr d4, [x16, #0x20]\n" - "usubl v2.8h, v2.8b, v9.8b\n" - "ldr d5, [x16, #0x28]\n" - "usubl v3.8h, v3.8b, v9.8b\n" - "ldr d6, [x16, #0x30]\n" - "ldr d7, [x16, #0x38]\n" - "usubl v4.8h, v4.8b, v9.8b\n" - "ldr d8, [x16, #0x40]\n" - "usubl v5.8h, v5.8b, v9.8b\n" - "ldp x23, x22, [x14, #0x0]\n" - "usubl v6.8h, v6.8b, v9.8b\n" - "ldp x21, x20, [x14, #0x10]\n" - "usubl v7.8h, v7.8b, v9.8b\n" - "usubl v8.8h, v8.8b, v9.8b\n" - "ldr x19, [x14, #0x20]\n" - "ldr d31, [x23, x17]\n" - "usubl v31.8h, v31.8b, v14.8b\n" - "ldr d30, [x22, x17]\n" - "ldr d29, [x21, x17]\n" - "usubl v30.8h, v30.8b, v14.8b\n" - "ldr d28, [x20, x17]\n" - "ldr d27, [x19, x17]\n" - "usubl v29.8h, v29.8b, v14.8b\n" - "usubl v28.8h, v28.8b, v14.8b\n" - "usubl v27.8h, v27.8b, v14.8b\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v11.16b, v26.16b\n" + "mov v18.16b, v13.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v24.16b, v26.16b\n" + "mov v9.16b, v13.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v23.16b, v26.16b\n" + "usubl v0.8h, v0.8b, v12.8b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "usubl v1.8h, v1.8b, v12.8b\n" + "usubl v2.8h, v2.8b, v12.8b\n" + "ldp x23, x22, [x12, #0x0]\n" + "ldp x21, x20, [x12, #0x10]\n" + "usubl v3.8h, v3.8b, v12.8b\n" + "usubl v4.8h, v4.8b, v12.8b\n" + "ldr x19, [x12, #0x20]\n" + "ldr d31, [x23, x15]\n" + "usubl v5.8h, v5.8b, v12.8b\n" + "usubl v6.8h, v6.8b, v12.8b\n" + "ldr d30, [x22, x15]\n" + "ldr d29, [x21, x15]\n" + "usubl v7.8h, v7.8b, v12.8b\n" + "usubl v8.8h, v8.8b, v12.8b\n" + "ldr d28, [x20, x15]\n" + "ldr d27, [x19, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "usubl v27.8h, v27.8b, v22.8b\n" "beq 2f\n" "1:" // Loop "smlal v13.4s, v31.4h, v4.4h\n" - "ldr x21, [x14, #0x28]\n" - "add x16, x16, #0x48\n" - "smlal2 v19.4s, v31.8h, v4.8h\n" - "ldr x20, [x14, #0x30]\n" - "subs x12, x12, #0x1\n" - "smlal v17.4s, v31.4h, v3.4h\n" - "ldr x26, [x14, #0x38]\n" - "smlal2 v25.4s, v31.8h, v3.8h\n" - "ldr x25, [x14, #0x40]\n" - "smlal v16.4s, v31.4h, v1.4h\n" - "ldr x19, [x14, #0x48]\n" - "smlal2 v21.4s, v31.8h, v1.8h\n" - "ldr x24, [x14, #0x50]\n" - "smlal v23.4s, v31.4h, v0.4h\n" - "ldr x23, [x14, #0x58]\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr d31, [x21, x17]\n" - "usubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v31.8h, v4.8h\n" + "ldr x21, [x12, #0x28]\n" + "ldr x26, [x12, #0x38]\n" + "smlal v19.4s, v31.4h, v3.4h\n" + "smlal2 v11.4s, v31.8h, v3.8h\n" + "ldr x20, [x12, #0x30]\n" + "ldr x25, [x12, #0x40]\n" "smlal v13.4s, v30.4h, v0.4h\n" - "ldr x22, [x14, #0x60]\n" - "smlal2 v19.4s, v30.8h, v0.8h\n" - "ldr d30, [x19, x17]\n" - "usubl v30.8h, v30.8b, v14.8b\n" - "smlal v17.4s, v29.4h, v2.4h\n" - "ldr x21, [x14, #0x68]\n" - "smlal2 v25.4s, v29.8h, v2.8h\n" - "ldr d29, [x20, x17]\n" - "usubl v29.8h, v29.8b, v14.8b\n" + "smlal2 v26.4s, v30.8h, v0.8h\n" + "ldr x19, [x12, #0x48]\n" + "ldr d30, [x19, x15]\n" + "smlal v19.4s, v29.4h, v2.4h\n" + "smlal2 v11.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x15]\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v1.4h\n" + "smlal2 v24.4s, v31.8h, v1.8h\n" + "ldr x24, [x12, #0x50]\n" + "ldr x23, [x12, #0x58]\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v23.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" "smlal v13.4s, v28.4h, v5.4h\n" - "ldr x20, [x14, #0x70]\n" - "smlal2 v19.4s, v28.8h, v5.8h\n" - "ldr x19, [x14, #0x78]\n" - "smlal v17.4s, v28.4h, v4.4h\n" - "ldr q26, [x13, #0x0]\n" - "smlal2 v25.4s, v28.8h, v4.8h\n" - "ldr q10, [x11, #0x0]\n" - "smlal v16.4s, v28.4h, v2.4h\n" - "ldr q11, [x13, #0x10]\n" - "add x13, x13, #0x20\n" - "smlal2 v21.4s, v28.8h, v2.8h\n" - "ldr q18, [x11, #0x10]\n" - "add x11, x11, #0x20\n" - "smlal v23.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x26, x17]\n" - "usubl v28.8h, v28.8b, v14.8b\n" - "smlal v16.4s, v31.4h, v6.4h\n" - "smlal2 v21.4s, v31.8h, v6.8h\n" - "ldr d31, [x25, x17]\n" - "usubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v28.8h, v5.8h\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "ldr x22, [x12, #0x60]\n" + "smlal v19.4s, v28.4h, v4.4h\n" + "smlal2 v11.4s, v28.8h, v4.8h\n" + "ldr x21, [x12, #0x68]\n" + "ldr x20, [x12, #0x70]\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v24.4s, v28.8h, v2.8h\n" + "ldr x19, [x12, #0x78]\n" + "ldr q21, [x13, #0x0]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v23.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" "smlal v13.4s, v27.4h, v7.4h\n" - "smlal2 v19.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v27.4h, v6.4h\n" - "smlal2 v25.4s, v27.8h, v6.8h\n" - "smlal v16.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "smlal v23.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "ldr q25, [x11, #0x0]\n" + "ldr q10, [x13, #0x10]\n" + "smlal v19.4s, v27.4h, v6.4h\n" + "smlal2 v11.4s, v27.8h, v6.8h\n" + "ldr q16, [x11, #0x10]\n" + "add x17, x17, #0x48\n" + "smlal v18.4s, v31.4h, v6.4h\n" + "smlal2 v24.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v23.4s, v27.8h, v3.8h\n" + "subs x16, x16, #0x1\n" + "add x13, x13, #0x20\n" "smlal v13.4s, v28.4h, v1.4h\n" - "smlal2 v19.4s, v28.8h, v1.8h\n" - "smlal v23.4s, v29.4h, v8.4h\n" - "smlal2 v20.4s, v29.8h, v8.8h\n" - "ldr d29, [x24, x17]\n" - "usubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v0.4h\n" - "smlal2 v25.4s, v28.8h, v0.8h\n" - "ldr d28, [x23, x17]\n" - "usubl v28.8h, v28.8b, v14.8b\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "add x11, x11, #0x20\n" + "smlal v19.4s, v28.4h, v0.4h\n" + "smlal2 v11.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v18.4s, v27.4h, v4.4h\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v24.4s, v27.8h, v4.8h\n" + "smlal2 v23.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x15]\n" + "usubl v29.8h, v29.8b, v22.8b\n" "smlal v13.4s, v31.4h, v2.4h\n" - "smlal2 v19.4s, v31.8h, v2.8h\n" - "smlal v17.4s, v31.4h, v1.4h\n" - "smlal2 v25.4s, v31.8h, v1.8h\n" - "ldr d31, [x22, x17]\n" - "usubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v31.8h, v2.8h\n" + "smlal v19.4s, v31.4h, v1.4h\n" + "smlal2 v11.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal v9.4s, v30.4h, v4.4h\n" "smlal v13.4s, v30.4h, v8.4h\n" - "smlal2 v19.4s, v30.8h, v8.8h\n" - "smlal v17.4s, v30.4h, v7.4h\n" - "smlal2 v25.4s, v30.8h, v7.8h\n" - "smlal v16.4s, v30.4h, v5.4h\n" - "smlal2 v21.4s, v30.8h, v5.8h\n" - "smlal v23.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x17]\n" - "usubl v30.8h, v30.8b, v14.8b\n" + "smlal2 v26.4s, v30.8h, v8.8h\n" + "smlal v19.4s, v30.4h, v7.4h\n" + "smlal2 v11.4s, v30.8h, v7.8h\n" + "smlal2 v24.4s, v30.8h, v5.8h\n" + "smlal2 v23.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x15]\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "smlal v18.4s, v29.4h, v0.4h\n" + "smlal v9.4s, v28.4h, v2.4h\n" "smlal v13.4s, v29.4h, v3.4h\n" - "smlal2 v19.4s, v29.8h, v3.8h\n" - "smlal v16.4s, v29.4h, v0.4h\n" - "smlal2 v21.4s, v29.8h, v0.8h\n" - "ldr d29, [x20, x17]\n" - "usubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v5.4h\n" - "smlal2 v25.4s, v28.8h, v5.8h\n" - "smlal v23.4s, v28.4h, v2.4h\n" - "smlal2 v20.4s, v28.8h, v2.8h\n" - "ldr d28, [x19, x17]\n" - "add x17, x17, #0x8\n" + "smlal2 v26.4s, v29.8h, v3.8h\n" + "smlal2 v24.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x15]\n" + "smlal2 v23.4s, v28.8h, v2.8h\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v3.4h\n" + "smlal v9.4s, v30.4h, v5.4h\n" + "smlal v19.4s, v28.4h, v5.4h\n" + "smlal2 v11.4s, v28.8h, v5.8h\n" + "ldr d28, [x19, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal2 v24.4s, v31.8h, v3.8h\n" + "smlal2 v23.4s, v30.8h, v5.8h\n" + "add x15, x15, #0x8\n" + "smlal v18.4s, v29.4h, v7.4h\n" + "smlal v9.4s, v29.4h, v6.4h\n" + "smlal2 v24.4s, v29.8h, v7.8h\n" + "smlal2 v23.4s, v29.8h, v6.8h\n" "smlal v13.4s, v31.4h, v6.4h\n" - "usubl v28.8h, v28.8b, v14.8b\n" - "smlal2 v19.4s, v31.8h, v6.8h\n" - "smlal v16.4s, v31.4h, v3.4h\n" - "smlal2 v21.4s, v31.8h, v3.8h\n" - "smlal v17.4s, v30.4h, v8.4h\n" - "smlal2 v25.4s, v30.8h, v8.8h\n" - "smlal v23.4s, v30.4h, v5.4h\n" - "smlal2 v20.4s, v30.8h, v5.8h\n" - "smlal v16.4s, v29.4h, v7.4h\n" - "smlal2 v21.4s, v29.8h, v7.8h\n" - "smlal v23.4s, v29.4h, v6.4h\n" - "smlal2 v20.4s, v29.8h, v6.8h\n" - "smlal v16.4s, v28.4h, v8.4h\n" - "smlal2 v21.4s, v28.8h, v8.8h\n" - "smlal v23.4s, v28.4h, v7.4h\n" - "smlal2 v20.4s, v28.8h, v7.8h\n" - "sqrdmulh v13.4s, v13.4s, v26.4s\n" - "sqrdmulh v19.4s, v19.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v26.4s\n" - "sqrdmulh v25.4s, v25.4s, v11.4s\n" - "and v22.16b, v13.16b, v10.16b\n" - "sshr v22.4s, v22.4s, #0x1f\n" - "and v28.16b, v19.16b, v18.16b\n" - "and v3.16b, v17.16b, v10.16b\n" - "sshr v28.4s, v28.4s, #0x1f\n" - "and v6.16b, v25.16b, v18.16b\n" - "sqrdmulh v16.4s, v16.4s, v26.4s\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v21.4s, v21.4s, v11.4s\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sqadd v13.4s, v13.4s, v22.4s\n" - "sqrdmulh v23.4s, v23.4s, v26.4s\n" - "and v0.16b, v16.16b, v10.16b\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "srshl v13.4s, v13.4s, v10.4s\n" - "sqadd v19.4s, v19.4s, v28.4s\n" - "sqadd v17.4s, v17.4s, v3.4s\n" - "sqadd v25.4s, v25.4s, v6.4s\n" - "and v29.16b, v21.16b, v18.16b\n" + "smlal v19.4s, v30.4h, v8.4h\n" + "sqdmulh v13.4s, v13.4s, v21.4s\n" + "smlal v18.4s, v28.4h, v8.4h\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "sqdmulh v19.4s, v19.4s, v21.4s\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal2 v11.4s, v30.8h, v8.8h\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "smlal2 v24.4s, v28.8h, v8.8h\n" + "smlal2 v23.4s, v28.8h, v7.8h\n" + "sqdmulh v9.4s, v9.4s, v21.4s\n" + "and v7.16b, v13.16b, v25.16b\n" + "sqdmulh v26.4s, v26.4s, v10.4s\n" + "and v4.16b, v19.16b, v25.16b\n" + "sqdmulh v11.4s, v11.4s, v10.4s\n" + "and v21.16b, v18.16b, v25.16b\n" + "sqdmulh v24.4s, v24.4s, v10.4s\n" + "and v20.16b, v9.16b, v25.16b\n" + "sqdmulh v23.4s, v23.4s, v10.4s\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "and v29.16b, v26.16b, v16.16b\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v10.16b, v11.16b, v16.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v31.16b, v24.16b, v16.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v30.16b, v23.16b, v16.16b\n" + "sqadd v13.4s, v13.4s, v7.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" - "add v13.4s, v13.4s, v15.4s\n" - "srshl v19.4s, v19.4s, v18.4s\n" - "srshl v17.4s, v17.4s, v10.4s\n" - "srshl v25.4s, v25.4s, v18.4s\n" - "smin v13.4s, v13.4s, v12.4s\n" - "add v19.4s, v19.4s, v15.4s\n" - "add v17.4s, v17.4s, v15.4s\n" - "smax v13.4s, v13.4s, v24.4s\n" - "smin v19.4s, v19.4s, v12.4s\n" - "smin v17.4s, v17.4s, v12.4s\n" - "add v25.4s, v25.4s, v15.4s\n" - "smax v19.4s, v19.4s, v24.4s\n" - "smax v17.4s, v17.4s, v24.4s\n" - "smin v25.4s, v25.4s, v12.4s\n" - "uzp1 v13.16b, v13.16b, v19.16b\n" - "sqadd v16.4s, v16.4s, v0.4s\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "sshr v10.4s, v10.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v20.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "srshl v13.4s, v13.4s, v25.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "srshl v19.4s, v19.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v10.4s\n" + "srshl v18.4s, v18.4s, v25.4s\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "srshl v9.4s, v9.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v30.4s\n" + "srshl v26.4s, v26.4s, v16.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v11.4s, v11.4s, v16.4s\n" + "sqxtn v19.4h, v19.4s\n" + "srshl v24.4s, v24.4s, v16.4s\n" + "sqxtn v18.4h, v18.4s\n" + "srshl v23.4s, v23.4s, v16.4s\n" + "sqxtn v9.4h, v9.4s\n" + "sqxtn2 v13.8h, v26.4s\n" + "sqxtn2 v19.8h, v11.4s\n" + "sqxtn2 v18.8h, v24.4s\n" + "sqxtn2 v9.8h, v23.4s\n" + "sqadd v13.8h, v13.8h, v14.8h\n" + "sqadd v19.8h, v19.8h, v14.8h\n" + "sqadd v18.8h, v18.8h, v14.8h\n" + "sqadd v9.8h, v9.8h, v14.8h\n" + "smax v13.8h, v13.8h, v17.8h\n" + "smax v19.8h, v19.8h, v17.8h\n" + "smax v18.8h, v18.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smin v13.8h, v13.8h, v15.8h\n" + "smin v19.8h, v19.8h, v15.8h\n" + "smin v18.8h, v18.8h, v15.8h\n" + "smin v9.8h, v9.8h, v15.8h\n" "uzp1 v13.16b, v13.16b, v13.16b\n" - "str d13, [x10, x15]\n" - "smax v25.4s, v25.4s, v24.4s\n" - "sqadd v21.4s, v21.4s, v29.4s\n" - "srshl v16.4s, v16.4s, v10.4s\n" - "and v3.16b, v23.16b, v10.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "uzp1 v17.16b, v17.16b, v25.16b\n" - "add v16.4s, v16.4s, v15.4s\n" - "srshl v21.4s, v21.4s, v18.4s\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x9, x15]\n" - "smin v16.4s, v16.4s, v12.4s\n" - "sqrdmulh v20.4s, v20.4s, v11.4s\n" - "add v21.4s, v21.4s, v15.4s\n" - "sqadd v23.4s, v23.4s, v3.4s\n" - "smax v16.4s, v16.4s, v24.4s\n" - "smin v21.4s, v21.4s, v12.4s\n" - "and v25.16b, v20.16b, v18.16b\n" - "sshr v25.4s, v25.4s, #0x1f\n" - "smax v21.4s, v21.4s, v24.4s\n" - "srshl v23.4s, v23.4s, v10.4s\n" - "uzp1 v16.16b, v16.16b, v21.16b\n" - "add v23.4s, v23.4s, v15.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x28, x15]\n" - "smin v23.4s, v23.4s, v12.4s\n" - "sqadd v20.4s, v20.4s, v25.4s\n" - "smax v23.4s, v23.4s, v24.4s\n" - "srshl v20.4s, v20.4s, v18.4s\n" - "add v20.4s, v20.4s, v15.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smax v20.4s, v20.4s, v24.4s\n" - "uzp1 v23.16b, v23.16b, v20.16b\n" - "uzp1 v23.16b, v23.16b, v23.16b\n" - "str d23, [x27, x15]\n" - "add x15, x15, #0x8\n" + "uzp1 v19.16b, v19.16b, v19.16b\n" + "str d13, [x10, x14]\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "str d19, [x9, x14]\n" + "str d18, [x28, x14]\n" + "str d9, [x27, x14]\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q13, [x19, #0x0]\n" - "mov v17.16b, v13.16b\n" - "ldr q19, [x19, #0x10]\n" + "add x14, x14, #0x8\n" + "ldr q26, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v13.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v23.16b, v13.16b\n" - "ldr d0, [x16, #0x0]\n" - "usubl v0.8h, v0.8b, v9.8b\n" - "mov v25.16b, v19.16b\n" - "ldr d1, [x16, #0x8]\n" - "mov v21.16b, v19.16b\n" - "ldr d2, [x16, #0x10]\n" - "usubl v1.8h, v1.8b, v9.8b\n" - "mov v20.16b, v19.16b\n" - "ldr d3, [x16, #0x18]\n" - "ldr d4, [x16, #0x20]\n" - "usubl v2.8h, v2.8b, v9.8b\n" - "ldr d5, [x16, #0x28]\n" - "usubl v3.8h, v3.8b, v9.8b\n" - "ldr d6, [x16, #0x30]\n" - "ldr d7, [x16, #0x38]\n" - "usubl v4.8h, v4.8b, v9.8b\n" - "ldr d8, [x16, #0x40]\n" - "usubl v5.8h, v5.8b, v9.8b\n" - "ldp x23, x22, [x14, #0x0]\n" - "usubl v6.8h, v6.8b, v9.8b\n" - "ldp x21, x20, [x14, #0x10]\n" - "usubl v7.8h, v7.8b, v9.8b\n" - "usubl v8.8h, v8.8b, v9.8b\n" - "ldr x19, [x14, #0x20]\n" - "ldr d31, [x23, x17]\n" - "usubl v31.8h, v31.8b, v14.8b\n" - "ldr d30, [x22, x17]\n" - "ldr d29, [x21, x17]\n" - "usubl v30.8h, v30.8b, v14.8b\n" - "ldr d28, [x20, x17]\n" - "ldr d27, [x19, x17]\n" - "usubl v29.8h, v29.8b, v14.8b\n" - "usubl v28.8h, v28.8b, v14.8b\n" - "usubl v27.8h, v27.8b, v14.8b\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v19.16b, v13.16b\n" + "mov v11.16b, v26.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v18.16b, v13.16b\n" + "mov v24.16b, v26.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v9.16b, v13.16b\n" + "mov v23.16b, v26.16b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "usubl v0.8h, v0.8b, v12.8b\n" + "usubl v1.8h, v1.8b, v12.8b\n" + "ldp x23, x22, [x12, #0x0]\n" + "ldp x21, x20, [x12, #0x10]\n" + "usubl v2.8h, v2.8b, v12.8b\n" + "usubl v3.8h, v3.8b, v12.8b\n" + "ldr x19, [x12, #0x20]\n" + "ldr d31, [x23, x15]\n" + "usubl v4.8h, v4.8b, v12.8b\n" + "usubl v5.8h, v5.8b, v12.8b\n" + "ldr d30, [x22, x15]\n" + "ldr d29, [x21, x15]\n" + "usubl v6.8h, v6.8b, v12.8b\n" + "usubl v7.8h, v7.8b, v12.8b\n" + "ldr d28, [x20, x15]\n" + "ldr d27, [x19, x15]\n" + "usubl v8.8h, v8.8b, v12.8b\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "usubl v27.8h, v27.8b, v22.8b\n" "bgt 1b\n" "2:" // Tail "smlal v13.4s, v31.4h, v4.4h\n" - "ldr x21, [x14, #0x28]\n" - "tst x8, #0x7\n" - "smlal2 v19.4s, v31.8h, v4.8h\n" - "ldr x20, [x14, #0x30]\n" - "smlal v17.4s, v31.4h, v3.4h\n" - "ldr x26, [x14, #0x38]\n" - "smlal2 v25.4s, v31.8h, v3.8h\n" - "ldr x25, [x14, #0x40]\n" - "smlal v16.4s, v31.4h, v1.4h\n" - "ldr x19, [x14, #0x48]\n" - "smlal2 v21.4s, v31.8h, v1.8h\n" - "ldr x24, [x14, #0x50]\n" - "smlal v23.4s, v31.4h, v0.4h\n" - "ldr x23, [x14, #0x58]\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr d31, [x21, x17]\n" - "usubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v31.8h, v4.8h\n" + "ldr x21, [x12, #0x28]\n" + "ldr x26, [x12, #0x38]\n" + "smlal v19.4s, v31.4h, v3.4h\n" + "smlal2 v11.4s, v31.8h, v3.8h\n" + "ldr x20, [x12, #0x30]\n" + "ldr x25, [x12, #0x40]\n" "smlal v13.4s, v30.4h, v0.4h\n" - "ldr x22, [x14, #0x60]\n" - "smlal2 v19.4s, v30.8h, v0.8h\n" - "ldr d30, [x19, x17]\n" - "usubl v30.8h, v30.8b, v14.8b\n" - "smlal v17.4s, v29.4h, v2.4h\n" - "ldr x21, [x14, #0x68]\n" - "smlal2 v25.4s, v29.8h, v2.8h\n" - "ldr d29, [x20, x17]\n" - "usubl v29.8h, v29.8b, v14.8b\n" + "smlal2 v26.4s, v30.8h, v0.8h\n" + "ldr x19, [x12, #0x48]\n" + "ldr d30, [x19, x15]\n" + "smlal v19.4s, v29.4h, v2.4h\n" + "smlal2 v11.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x15]\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v1.4h\n" + "smlal2 v24.4s, v31.8h, v1.8h\n" + "ldr x24, [x12, #0x50]\n" + "ldr x23, [x12, #0x58]\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v23.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" "smlal v13.4s, v28.4h, v5.4h\n" - "ldr x20, [x14, #0x70]\n" - "smlal2 v19.4s, v28.8h, v5.8h\n" - "ldr x19, [x14, #0x78]\n" - "smlal v17.4s, v28.4h, v4.4h\n" - "ldr q26, [x13, #0x0]\n" - "smlal2 v25.4s, v28.8h, v4.8h\n" - "ldr q10, [x11, #0x0]\n" - "smlal v16.4s, v28.4h, v2.4h\n" - "ldr q11, [x13, #0x10]\n" + "smlal2 v26.4s, v28.8h, v5.8h\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "ldr x22, [x12, #0x60]\n" + "smlal v19.4s, v28.4h, v4.4h\n" + "smlal2 v11.4s, v28.8h, v4.8h\n" + "ldr x21, [x12, #0x68]\n" + "ldr x20, [x12, #0x70]\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v24.4s, v28.8h, v2.8h\n" + "ldr x19, [x12, #0x78]\n" + "ldr q21, [x13, #0x0]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v23.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v13.4s, v27.4h, v7.4h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "ldr q25, [x11, #0x0]\n" + "ldr q10, [x13, #0x10]\n" + "smlal v19.4s, v27.4h, v6.4h\n" + "smlal2 v11.4s, v27.8h, v6.8h\n" + "ldr q16, [x11, #0x10]\n" + "tst x8, #0x7\n" + "smlal v18.4s, v31.4h, v6.4h\n" + "smlal2 v24.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v23.4s, v27.8h, v3.8h\n" "add x13, x13, #0x20\n" - "smlal2 v21.4s, v28.8h, v2.8h\n" - "ldr q18, [x11, #0x10]\n" "add x11, x11, #0x20\n" - "smlal v23.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x26, x17]\n" - "usubl v28.8h, v28.8b, v14.8b\n" - "smlal v16.4s, v31.4h, v6.4h\n" - "smlal2 v21.4s, v31.8h, v6.8h\n" - "ldr d31, [x25, x17]\n" - "usubl v31.8h, v31.8b, v14.8b\n" - "smlal v13.4s, v27.4h, v7.4h\n" - "smlal2 v19.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v27.4h, v6.4h\n" - "smlal2 v25.4s, v27.8h, v6.8h\n" - "smlal v16.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "smlal v23.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" "smlal v13.4s, v28.4h, v1.4h\n" - "smlal2 v19.4s, v28.8h, v1.8h\n" - "smlal v23.4s, v29.4h, v8.4h\n" - "smlal2 v20.4s, v29.8h, v8.8h\n" - "ldr d29, [x24, x17]\n" - "usubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v0.4h\n" - "smlal2 v25.4s, v28.8h, v0.8h\n" - "ldr d28, [x23, x17]\n" - "usubl v28.8h, v28.8b, v14.8b\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "smlal v19.4s, v28.4h, v0.4h\n" + "smlal2 v11.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v18.4s, v27.4h, v4.4h\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v24.4s, v27.8h, v4.8h\n" + "smlal2 v23.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x15]\n" + "usubl v29.8h, v29.8b, v22.8b\n" "smlal v13.4s, v31.4h, v2.4h\n" - "smlal2 v19.4s, v31.8h, v2.8h\n" - "smlal v17.4s, v31.4h, v1.4h\n" - "smlal2 v25.4s, v31.8h, v1.8h\n" - "ldr d31, [x22, x17]\n" - "usubl v31.8h, v31.8b, v14.8b\n" + "smlal2 v26.4s, v31.8h, v2.8h\n" + "smlal v19.4s, v31.4h, v1.4h\n" + "smlal2 v11.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal v9.4s, v30.4h, v4.4h\n" "smlal v13.4s, v30.4h, v8.4h\n" - "smlal2 v19.4s, v30.8h, v8.8h\n" - "smlal v17.4s, v30.4h, v7.4h\n" - "smlal2 v25.4s, v30.8h, v7.8h\n" - "smlal v16.4s, v30.4h, v5.4h\n" - "smlal2 v21.4s, v30.8h, v5.8h\n" - "smlal v23.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x17]\n" - "usubl v30.8h, v30.8b, v14.8b\n" + "smlal2 v26.4s, v30.8h, v8.8h\n" + "smlal v19.4s, v30.4h, v7.4h\n" + "smlal2 v11.4s, v30.8h, v7.8h\n" + "smlal2 v24.4s, v30.8h, v5.8h\n" + "smlal2 v23.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x15]\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "smlal v18.4s, v29.4h, v0.4h\n" + "smlal v9.4s, v28.4h, v2.4h\n" "smlal v13.4s, v29.4h, v3.4h\n" - "smlal2 v19.4s, v29.8h, v3.8h\n" - "smlal v16.4s, v29.4h, v0.4h\n" - "smlal2 v21.4s, v29.8h, v0.8h\n" - "ldr d29, [x20, x17]\n" - "usubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v5.4h\n" - "smlal2 v25.4s, v28.8h, v5.8h\n" - "smlal v23.4s, v28.4h, v2.4h\n" - "smlal2 v20.4s, v28.8h, v2.8h\n" - "ldr d28, [x19, x17]\n" - "add x17, x17, #0x8\n" + "smlal2 v26.4s, v29.8h, v3.8h\n" + "smlal2 v24.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x15]\n" + "smlal2 v23.4s, v28.8h, v2.8h\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v3.4h\n" + "smlal v9.4s, v30.4h, v5.4h\n" + "smlal v19.4s, v28.4h, v5.4h\n" + "smlal2 v11.4s, v28.8h, v5.8h\n" + "ldr d28, [x19, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal2 v24.4s, v31.8h, v3.8h\n" + "smlal2 v23.4s, v30.8h, v5.8h\n" + "add x15, x15, #0x8\n" + "smlal v18.4s, v29.4h, v7.4h\n" + "smlal v9.4s, v29.4h, v6.4h\n" + "smlal2 v24.4s, v29.8h, v7.8h\n" + "smlal2 v23.4s, v29.8h, v6.8h\n" "smlal v13.4s, v31.4h, v6.4h\n" - "usubl v28.8h, v28.8b, v14.8b\n" - "smlal2 v19.4s, v31.8h, v6.8h\n" - "smlal v16.4s, v31.4h, v3.4h\n" - "smlal2 v21.4s, v31.8h, v3.8h\n" - "smlal v17.4s, v30.4h, v8.4h\n" - "smlal2 v25.4s, v30.8h, v8.8h\n" - "smlal v23.4s, v30.4h, v5.4h\n" - "smlal2 v20.4s, v30.8h, v5.8h\n" - "smlal v16.4s, v29.4h, v7.4h\n" - "smlal2 v21.4s, v29.8h, v7.8h\n" - "smlal v23.4s, v29.4h, v6.4h\n" - "smlal2 v20.4s, v29.8h, v6.8h\n" - "smlal v16.4s, v28.4h, v8.4h\n" - "smlal2 v21.4s, v28.8h, v8.8h\n" - "smlal v23.4s, v28.4h, v7.4h\n" - "smlal2 v20.4s, v28.8h, v7.8h\n" - "sqrdmulh v13.4s, v13.4s, v26.4s\n" - "sqrdmulh v19.4s, v19.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v26.4s\n" - "sqrdmulh v25.4s, v25.4s, v11.4s\n" - "and v22.16b, v13.16b, v10.16b\n" - "sshr v22.4s, v22.4s, #0x1f\n" - "and v28.16b, v19.16b, v18.16b\n" - "and v3.16b, v17.16b, v10.16b\n" - "sshr v28.4s, v28.4s, #0x1f\n" - "and v6.16b, v25.16b, v18.16b\n" - "sqrdmulh v16.4s, v16.4s, v26.4s\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v21.4s, v21.4s, v11.4s\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sqadd v13.4s, v13.4s, v22.4s\n" - "sqrdmulh v23.4s, v23.4s, v26.4s\n" - "and v0.16b, v16.16b, v10.16b\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "srshl v13.4s, v13.4s, v10.4s\n" - "sqadd v19.4s, v19.4s, v28.4s\n" - "sqadd v17.4s, v17.4s, v3.4s\n" - "sqadd v25.4s, v25.4s, v6.4s\n" - "and v29.16b, v21.16b, v18.16b\n" + "smlal v19.4s, v30.4h, v8.4h\n" + "sqdmulh v13.4s, v13.4s, v21.4s\n" + "smlal v18.4s, v28.4h, v8.4h\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "sqdmulh v19.4s, v19.4s, v21.4s\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal2 v11.4s, v30.8h, v8.8h\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "smlal2 v24.4s, v28.8h, v8.8h\n" + "smlal2 v23.4s, v28.8h, v7.8h\n" + "sqdmulh v9.4s, v9.4s, v21.4s\n" + "and v7.16b, v13.16b, v25.16b\n" + "sqdmulh v26.4s, v26.4s, v10.4s\n" + "and v4.16b, v19.16b, v25.16b\n" + "sqdmulh v11.4s, v11.4s, v10.4s\n" + "and v21.16b, v18.16b, v25.16b\n" + "sqdmulh v24.4s, v24.4s, v10.4s\n" + "and v20.16b, v9.16b, v25.16b\n" + "sqdmulh v23.4s, v23.4s, v10.4s\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "and v29.16b, v26.16b, v16.16b\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v10.16b, v11.16b, v16.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v31.16b, v24.16b, v16.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v30.16b, v23.16b, v16.16b\n" + "sqadd v13.4s, v13.4s, v7.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" - "add v13.4s, v13.4s, v15.4s\n" - "srshl v19.4s, v19.4s, v18.4s\n" - "srshl v17.4s, v17.4s, v10.4s\n" - "srshl v25.4s, v25.4s, v18.4s\n" - "smin v13.4s, v13.4s, v12.4s\n" - "add v19.4s, v19.4s, v15.4s\n" - "add v17.4s, v17.4s, v15.4s\n" - "smax v13.4s, v13.4s, v24.4s\n" - "smin v19.4s, v19.4s, v12.4s\n" - "smin v17.4s, v17.4s, v12.4s\n" - "add v25.4s, v25.4s, v15.4s\n" - "smax v19.4s, v19.4s, v24.4s\n" - "smax v17.4s, v17.4s, v24.4s\n" - "smin v25.4s, v25.4s, v12.4s\n" - "uzp1 v13.16b, v13.16b, v19.16b\n" - "sqadd v16.4s, v16.4s, v0.4s\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "sshr v10.4s, v10.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v20.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "srshl v13.4s, v13.4s, v25.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "srshl v19.4s, v19.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v10.4s\n" + "srshl v18.4s, v18.4s, v25.4s\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "srshl v9.4s, v9.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v30.4s\n" + "srshl v26.4s, v26.4s, v16.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v11.4s, v11.4s, v16.4s\n" + "sqxtn v19.4h, v19.4s\n" + "srshl v24.4s, v24.4s, v16.4s\n" + "sqxtn v18.4h, v18.4s\n" + "srshl v23.4s, v23.4s, v16.4s\n" + "sqxtn v9.4h, v9.4s\n" + "sqxtn2 v13.8h, v26.4s\n" + "sqxtn2 v19.8h, v11.4s\n" + "sqxtn2 v18.8h, v24.4s\n" + "sqxtn2 v9.8h, v23.4s\n" + "sqadd v13.8h, v13.8h, v14.8h\n" + "sqadd v19.8h, v19.8h, v14.8h\n" + "sqadd v18.8h, v18.8h, v14.8h\n" + "sqadd v9.8h, v9.8h, v14.8h\n" + "smax v13.8h, v13.8h, v17.8h\n" + "smax v19.8h, v19.8h, v17.8h\n" + "smax v18.8h, v18.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smin v13.8h, v13.8h, v15.8h\n" + "smin v19.8h, v19.8h, v15.8h\n" + "smin v18.8h, v18.8h, v15.8h\n" + "smin v9.8h, v9.8h, v15.8h\n" "uzp1 v13.16b, v13.16b, v13.16b\n" - "str d13, [x10, x15]\n" - "smax v25.4s, v25.4s, v24.4s\n" - "sqadd v21.4s, v21.4s, v29.4s\n" - "srshl v16.4s, v16.4s, v10.4s\n" - "and v3.16b, v23.16b, v10.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "uzp1 v17.16b, v17.16b, v25.16b\n" - "add v16.4s, v16.4s, v15.4s\n" - "srshl v21.4s, v21.4s, v18.4s\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x9, x15]\n" - "smin v16.4s, v16.4s, v12.4s\n" - "sqrdmulh v20.4s, v20.4s, v11.4s\n" - "add v21.4s, v21.4s, v15.4s\n" - "sqadd v23.4s, v23.4s, v3.4s\n" - "smax v16.4s, v16.4s, v24.4s\n" - "smin v21.4s, v21.4s, v12.4s\n" - "and v25.16b, v20.16b, v18.16b\n" - "sshr v25.4s, v25.4s, #0x1f\n" - "smax v21.4s, v21.4s, v24.4s\n" - "srshl v23.4s, v23.4s, v10.4s\n" - "uzp1 v16.16b, v16.16b, v21.16b\n" - "add v23.4s, v23.4s, v15.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x28, x15]\n" - "smin v23.4s, v23.4s, v12.4s\n" - "sqadd v20.4s, v20.4s, v25.4s\n" - "smax v23.4s, v23.4s, v24.4s\n" - "srshl v20.4s, v20.4s, v18.4s\n" - "add v20.4s, v20.4s, v15.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smax v20.4s, v20.4s, v24.4s\n" - "uzp1 v23.16b, v23.16b, v20.16b\n" - "uzp1 v23.16b, v23.16b, v23.16b\n" - "str d23, [x27, x15]\n" - "add x15, x15, #0x8\n" + "uzp1 v19.16b, v19.16b, v19.16b\n" + "str d13, [x10, x14]\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "str d19, [x9, x14]\n" + "str d18, [x28, x14]\n" + "str d9, [x27, x14]\n" + "add x14, x14, #0x8\n" "beq 64f\n" - "add x16, x16, #0x48\n" + "add x17, x17, #0x48\n" "3:" // Oddments "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "tbz x8, #2, 5f\n" "ld1 { v13.4s }, [x19], #0x10\n" "tbz x8, #1, 4f\n" - "ld1 { v19.d }[0], [x19], #0x8\n" + "ld1 { v26.d }[0], [x19], #0x8\n" "tbz x8, #0, 7f\n" - "ld1 { v19.s }[2], [x19]\n" + "ld1 { v26.s }[2], [x19]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset "tbz x8, #0, 7f\n" - "ld1 { v19.s }[0], [x19]\n" + "ld1 { v26.s }[0], [x19]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset "tbz x8, #1, 6f\n" @@ -609,38 +593,38 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 7f\n" "ld1 { v13.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v17.16b, v13.16b\n" - "ldr d0, [x16, #0x0]\n" - "mov v25.16b, v19.16b\n" - "ldr d1, [x16, #0x8]\n" - "mov v16.16b, v13.16b\n" - "ldr d2, [x16, #0x10]\n" - "mov v21.16b, v19.16b\n" - "ldr d3, [x16, #0x18]\n" - "mov v23.16b, v13.16b\n" - "ldr d4, [x16, #0x20]\n" - "usubl v0.8h, v0.8b, v9.8b\n" - "mov v20.16b, v19.16b\n" - "ldr d5, [x16, #0x28]\n" - "usubl v1.8h, v1.8b, v9.8b\n" - "ldr d6, [x16, #0x30]\n" - "usubl v2.8h, v2.8b, v9.8b\n" - "ldr d7, [x16, #0x38]\n" - "usubl v3.8h, v3.8b, v9.8b\n" - "ldr d8, [x16, #0x40]\n" - "usubl v4.8h, v4.8b, v9.8b\n" - "ldp x23, x22, [x14, #0x0]\n" - "usubl v5.8h, v5.8b, v9.8b\n" - "ldp x21, x20, [x14, #0x10]\n" - "usubl v6.8h, v6.8b, v9.8b\n" - "usubl v7.8h, v7.8b, v9.8b\n" - "ldr x19, [x14, #0x20]\n" - "usubl v8.8h, v8.8b, v9.8b\n" - "add x23, x23, x17\n" - "add x22, x22, x17\n" - "add x21, x21, x17\n" - "add x20, x20, x17\n" - "add x19, x19, x17\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "mov v19.16b, v13.16b\n" + "mov v11.16b, v26.16b\n" + "ldr d2, [x17, #0x10]\n" + "ldr d3, [x17, #0x18]\n" + "mov v18.16b, v13.16b\n" + "mov v24.16b, v26.16b\n" + "ldr d4, [x17, #0x20]\n" + "ldr d5, [x17, #0x28]\n" + "mov v9.16b, v13.16b\n" + "mov v23.16b, v26.16b\n" + "ldr d6, [x17, #0x30]\n" + "ldr d7, [x17, #0x38]\n" + "usubl v0.8h, v0.8b, v12.8b\n" + "usubl v1.8h, v1.8b, v12.8b\n" + "ldr d8, [x17, #0x40]\n" + "ldp x23, x22, [x12, #0x0]\n" + "usubl v2.8h, v2.8b, v12.8b\n" + "usubl v3.8h, v3.8b, v12.8b\n" + "ldp x21, x20, [x12, #0x10]\n" + "ldr x19, [x12, #0x20]\n" + "usubl v4.8h, v4.8b, v12.8b\n" + "usubl v5.8h, v5.8b, v12.8b\n" + "usubl v6.8h, v6.8b, v12.8b\n" + "usubl v7.8h, v7.8b, v12.8b\n" + "usubl v8.8h, v8.8b, v12.8b\n" + "add x23, x23, x15\n" + "add x22, x22, x15\n" + "add x21, x21, x15\n" + "add x20, x20, x15\n" + "add x19, x19, x15\n" "tbz x8, #2, 9f\n" "ld1 { v31.s }[0], [x23], #0x4\n" "ld1 { v30.s }[0], [x22], #0x4\n" @@ -690,33 +674,33 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "ld1 { v28.b }[0], [x20]\n" "ld1 { v27.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End - "ldr x21, [x14, #0x28]\n" - "usubl v31.8h, v31.8b, v14.8b\n" + "usubl v31.8h, v31.8b, v22.8b\n" "smlal v13.4s, v31.4h, v4.4h\n" - "usubl v30.8h, v30.8b, v14.8b\n" - "smlal2 v19.4s, v31.8h, v4.8h\n" - "usubl v29.8h, v29.8b, v14.8b\n" - "smlal v17.4s, v31.4h, v3.4h\n" - "usubl v28.8h, v28.8b, v14.8b\n" - "smlal2 v25.4s, v31.8h, v3.8h\n" - "usubl v27.8h, v27.8b, v14.8b\n" - "smlal v16.4s, v31.4h, v1.4h\n" - "add x21, x21, x17\n" - "smlal2 v21.4s, v31.8h, v1.8h\n" - "smlal v23.4s, v31.4h, v0.4h\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" + "smlal2 v26.4s, v31.8h, v4.8h\n" + "ldr x21, [x12, #0x28]\n" + "smlal v19.4s, v31.4h, v3.4h\n" + "smlal2 v11.4s, v31.8h, v3.8h\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "add x21, x21, x15\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v1.4h\n" + "smlal2 v24.4s, v31.8h, v1.8h\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v23.4s, v31.8h, v0.8h\n" + "usubl v28.8h, v28.8b, v22.8b\n" "smlal v13.4s, v30.4h, v0.4h\n" - "smlal2 v19.4s, v30.8h, v0.8h\n" - "smlal v17.4s, v29.4h, v2.4h\n" - "smlal2 v25.4s, v29.8h, v2.8h\n" + "smlal2 v26.4s, v30.8h, v0.8h\n" + "usubl v27.8h, v27.8b, v22.8b\n" + "smlal v19.4s, v29.4h, v2.4h\n" + "smlal2 v11.4s, v29.8h, v2.8h\n" "smlal v13.4s, v28.4h, v5.4h\n" - "smlal2 v19.4s, v28.8h, v5.8h\n" - "smlal v17.4s, v28.4h, v4.4h\n" - "smlal2 v25.4s, v28.8h, v4.8h\n" - "smlal v16.4s, v28.4h, v2.4h\n" - "smlal2 v21.4s, v28.8h, v2.8h\n" - "smlal v23.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" + "smlal2 v26.4s, v28.8h, v5.8h\n" + "smlal v19.4s, v28.4h, v4.4h\n" + "smlal2 v11.4s, v28.8h, v4.8h\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v24.4s, v28.8h, v2.8h\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v23.4s, v28.8h, v1.8h\n" "tbz x8, #2, 13f\n" "ld1 { v31.s }[0], [x21], #0x4\n" "tbz x8, #1, 12f\n" @@ -738,19 +722,19 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 15f\n" "ld1 { v31.b }[0], [x21]\n" "15:" // Oddments: Load (3, 0): Bit 2: End + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v6.4h\n" + "smlal2 v24.4s, v31.8h, v6.8h\n" + "ldr x20, [x12, #0x30]\n" "smlal v13.4s, v27.4h, v7.4h\n" - "ldr x20, [x14, #0x30]\n" - "usubl v31.8h, v31.8b, v14.8b\n" - "smlal2 v19.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v27.4h, v6.4h\n" - "add x20, x20, x17\n" - "smlal2 v25.4s, v27.8h, v6.8h\n" - "smlal v23.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "smlal v16.4s, v31.4h, v6.4h\n" - "smlal2 v21.4s, v31.8h, v6.8h\n" - "smlal v16.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "add x20, x20, x15\n" + "smlal v19.4s, v27.4h, v6.4h\n" + "smlal2 v11.4s, v27.8h, v6.8h\n" + "smlal v18.4s, v27.4h, v4.4h\n" + "smlal2 v24.4s, v27.8h, v4.8h\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v23.4s, v27.8h, v3.8h\n" "tbz x8, #2, 17f\n" "ld1 { v29.s }[0], [x20], #0x4\n" "tbz x8, #1, 16f\n" @@ -772,11 +756,11 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 19f\n" "ld1 { v29.b }[0], [x20]\n" "19:" // Oddments: Load (3, 3): Bit 2: End - "ldr x26, [x14, #0x38]\n" - "usubl v29.8h, v29.8b, v14.8b\n" - "smlal v23.4s, v29.4h, v8.4h\n" - "smlal2 v20.4s, v29.8h, v8.8h\n" - "add x26, x26, x17\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "ldr x26, [x12, #0x38]\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v23.4s, v29.8h, v8.8h\n" + "add x26, x26, x15\n" "tbz x8, #2, 21f\n" "ld1 { v28.s }[0], [x26], #0x4\n" "tbz x8, #1, 20f\n" @@ -798,13 +782,13 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 23f\n" "ld1 { v28.b }[0], [x26]\n" "23:" // Oddments: Load (0, 1): Bit 2: End - "ldr x25, [x14, #0x40]\n" - "usubl v28.8h, v28.8b, v14.8b\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "ldr x25, [x12, #0x40]\n" "smlal v13.4s, v28.4h, v1.4h\n" - "smlal2 v19.4s, v28.8h, v1.8h\n" - "add x25, x25, x17\n" - "smlal v17.4s, v28.4h, v0.4h\n" - "smlal2 v25.4s, v28.8h, v0.8h\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "smlal v19.4s, v28.4h, v0.4h\n" + "smlal2 v11.4s, v28.8h, v0.8h\n" + "add x25, x25, x15\n" "tbz x8, #2, 25f\n" "ld1 { v31.s }[0], [x25], #0x4\n" "tbz x8, #1, 24f\n" @@ -826,13 +810,13 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 27f\n" "ld1 { v31.b }[0], [x25]\n" "27:" // Oddments: Load (0, 2): Bit 2: End - "ldr x19, [x14, #0x48]\n" - "usubl v31.8h, v31.8b, v14.8b\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "ldr x19, [x12, #0x48]\n" "smlal v13.4s, v31.4h, v2.4h\n" - "smlal2 v19.4s, v31.8h, v2.8h\n" - "add x19, x19, x17\n" - "smlal v17.4s, v31.4h, v1.4h\n" - "smlal2 v25.4s, v31.8h, v1.8h\n" + "smlal2 v26.4s, v31.8h, v2.8h\n" + "smlal v19.4s, v31.4h, v1.4h\n" + "smlal2 v11.4s, v31.8h, v1.8h\n" + "add x19, x19, x15\n" "tbz x8, #2, 29f\n" "ld1 { v30.s }[0], [x19], #0x4\n" "tbz x8, #1, 28f\n" @@ -854,17 +838,17 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 31f\n" "ld1 { v30.b }[0], [x19]\n" "31:" // Oddments: Load (2, 2): Bit 2: End - "ldr x24, [x14, #0x50]\n" - "usubl v30.8h, v30.8b, v14.8b\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "ldr x24, [x12, #0x50]\n" "smlal v13.4s, v30.4h, v8.4h\n" - "smlal2 v19.4s, v30.8h, v8.8h\n" - "add x24, x24, x17\n" - "smlal v17.4s, v30.4h, v7.4h\n" - "smlal2 v25.4s, v30.8h, v7.8h\n" - "smlal v16.4s, v30.4h, v5.4h\n" - "smlal2 v21.4s, v30.8h, v5.8h\n" - "smlal v23.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" + "smlal2 v26.4s, v30.8h, v8.8h\n" + "smlal v19.4s, v30.4h, v7.4h\n" + "smlal2 v11.4s, v30.8h, v7.8h\n" + "add x24, x24, x15\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal2 v24.4s, v30.8h, v5.8h\n" + "smlal v9.4s, v30.4h, v4.4h\n" + "smlal2 v23.4s, v30.8h, v4.8h\n" "tbz x8, #2, 33f\n" "ld1 { v29.s }[0], [x24], #0x4\n" "tbz x8, #1, 32f\n" @@ -886,13 +870,13 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 35f\n" "ld1 { v29.b }[0], [x24]\n" "35:" // Oddments: Load (1, 0): Bit 2: End - "ldr x23, [x14, #0x58]\n" - "usubl v29.8h, v29.8b, v14.8b\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "ldr x23, [x12, #0x58]\n" "smlal v13.4s, v29.4h, v3.4h\n" - "smlal2 v19.4s, v29.8h, v3.8h\n" - "add x23, x23, x17\n" - "smlal v16.4s, v29.4h, v0.4h\n" - "smlal2 v21.4s, v29.8h, v0.8h\n" + "smlal2 v26.4s, v29.8h, v3.8h\n" + "smlal v18.4s, v29.4h, v0.4h\n" + "smlal2 v24.4s, v29.8h, v0.8h\n" + "add x23, x23, x15\n" "tbz x8, #2, 37f\n" "ld1 { v28.s }[0], [x23], #0x4\n" "tbz x8, #1, 36f\n" @@ -914,13 +898,13 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 39f\n" "ld1 { v28.b }[0], [x23]\n" "39:" // Oddments: Load (1, 3): Bit 2: End - "ldr x22, [x14, #0x60]\n" - "usubl v28.8h, v28.8b, v14.8b\n" - "smlal v17.4s, v28.4h, v5.4h\n" - "smlal2 v25.4s, v28.8h, v5.8h\n" - "add x22, x22, x17\n" - "smlal v23.4s, v28.4h, v2.4h\n" - "smlal2 v20.4s, v28.8h, v2.8h\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "ldr x22, [x12, #0x60]\n" + "smlal v19.4s, v28.4h, v5.4h\n" + "smlal2 v11.4s, v28.8h, v5.8h\n" + "smlal v9.4s, v28.4h, v2.4h\n" + "smlal2 v23.4s, v28.8h, v2.8h\n" + "add x22, x22, x15\n" "tbz x8, #2, 41f\n" "ld1 { v31.s }[0], [x22], #0x4\n" "tbz x8, #1, 40f\n" @@ -942,13 +926,13 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 43f\n" "ld1 { v31.b }[0], [x22]\n" "43:" // Oddments: Load (2, 0): Bit 2: End - "ldr x21, [x14, #0x68]\n" - "usubl v31.8h, v31.8b, v14.8b\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "ldr x21, [x12, #0x68]\n" "smlal v13.4s, v31.4h, v6.4h\n" - "smlal2 v19.4s, v31.8h, v6.8h\n" - "add x21, x21, x17\n" - "smlal v16.4s, v31.4h, v3.4h\n" - "smlal2 v21.4s, v31.8h, v3.8h\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal v18.4s, v31.4h, v3.4h\n" + "smlal2 v24.4s, v31.8h, v3.8h\n" + "add x21, x21, x15\n" "tbz x8, #2, 45f\n" "ld1 { v30.s }[0], [x21], #0x4\n" "tbz x8, #1, 44f\n" @@ -970,13 +954,13 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 47f\n" "ld1 { v30.b }[0], [x21]\n" "47:" // Oddments: Load (2, 3): Bit 2: End - "ldr x20, [x14, #0x70]\n" - "usubl v30.8h, v30.8b, v14.8b\n" - "smlal v17.4s, v30.4h, v8.4h\n" - "smlal2 v25.4s, v30.8h, v8.8h\n" - "add x20, x20, x17\n" - "smlal v23.4s, v30.4h, v5.4h\n" - "smlal2 v20.4s, v30.8h, v5.8h\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "ldr x20, [x12, #0x70]\n" + "smlal v19.4s, v30.4h, v8.4h\n" + "smlal2 v11.4s, v30.8h, v8.8h\n" + "smlal v9.4s, v30.4h, v5.4h\n" + "smlal2 v23.4s, v30.8h, v5.8h\n" + "add x20, x20, x15\n" "tbz x8, #2, 49f\n" "ld1 { v29.s }[0], [x20], #0x4\n" "tbz x8, #1, 48f\n" @@ -998,13 +982,13 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 51f\n" "ld1 { v29.b }[0], [x20]\n" "51:" // Oddments: Load (3, 1): Bit 2: End - "ldr x19, [x14, #0x78]\n" - "usubl v29.8h, v29.8b, v14.8b\n" - "smlal v16.4s, v29.4h, v7.4h\n" - "smlal2 v21.4s, v29.8h, v7.8h\n" - "add x19, x19, x17\n" - "smlal v23.4s, v29.4h, v6.4h\n" - "smlal2 v20.4s, v29.8h, v6.8h\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "ldr x19, [x12, #0x78]\n" + "smlal v18.4s, v29.4h, v7.4h\n" + "smlal2 v24.4s, v29.8h, v7.8h\n" + "smlal v9.4s, v29.4h, v6.4h\n" + "smlal2 v23.4s, v29.8h, v6.8h\n" + "add x19, x19, x15\n" "tbz x8, #2, 53f\n" "ld1 { v28.s }[0], [x19], #0x4\n" "tbz x8, #1, 52f\n" @@ -1026,160 +1010,150 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 55f\n" "ld1 { v28.b }[0], [x19]\n" "55:" // Oddments: Load (3, 2): Bit 2: End - "usubl v28.8h, v28.8b, v14.8b\n" - "smlal v16.4s, v28.4h, v8.4h\n" - "smlal2 v21.4s, v28.8h, v8.8h\n" - "smlal v23.4s, v28.4h, v7.4h\n" - "smlal2 v20.4s, v28.8h, v7.8h\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v18.4s, v28.4h, v8.4h\n" + "smlal2 v24.4s, v28.8h, v8.8h\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v23.4s, v28.8h, v7.8h\n" "tbz x8, #2, 57f\n" - "ld1 { v26.4s }, [x13], #0x10\n" - "ld1 { v10.4s }, [x11], #0x10\n" + "ld1 { v21.4s }, [x13], #0x10\n" + "ld1 { v25.4s }, [x11], #0x10\n" "tbz x8, #1, 56f\n" - "ld1 { v11.d }[0], [x13], #0x8\n" - "ld1 { v18.d }[0], [x11], #0x8\n" + "ld1 { v10.d }[0], [x13], #0x8\n" + "ld1 { v16.d }[0], [x11], #0x8\n" "tbz x8, #0, 59f\n" - "ld1 { v11.s }[2], [x13]\n" - "ld1 { v18.s }[2], [x11]\n" + "ld1 { v10.s }[2], [x13]\n" + "ld1 { v16.s }[2], [x11]\n" "b 59f\n" "56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset "tbz x8, #0, 59f\n" - "ld1 { v11.s }[0], [x13]\n" - "ld1 { v18.s }[0], [x11]\n" + "ld1 { v10.s }[0], [x13]\n" + "ld1 { v16.s }[0], [x11]\n" "b 59f\n" "57:" // Oddments: Load requant params: Bit 2: Unset "tbz x8, #1, 58f\n" - "ld1 { v26.d }[0], [x13], #0x8\n" - "ld1 { v10.d }[0], [x11], #0x8\n" + "ld1 { v21.d }[0], [x13], #0x8\n" + "ld1 { v25.d }[0], [x11], #0x8\n" "tbz x8, #0, 59f\n" - "ld1 { v26.s }[2], [x13]\n" - "ld1 { v10.s }[2], [x11]\n" + "ld1 { v21.s }[2], [x13]\n" + "ld1 { v25.s }[2], [x11]\n" "b 59f\n" "58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset "tbz x8, #0, 59f\n" - "ld1 { v26.s }[0], [x13]\n" - "ld1 { v10.s }[0], [x11]\n" + "ld1 { v21.s }[0], [x13]\n" + "ld1 { v25.s }[0], [x11]\n" "59:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v13.4s, v13.4s, v26.4s\n" - "add x10, x10, x15\n" - "sqrdmulh v19.4s, v19.4s, v11.4s\n" - "add x9, x9, x15\n" - "sqrdmulh v17.4s, v17.4s, v26.4s\n" - "add x28, x28, x15\n" - "sqrdmulh v25.4s, v25.4s, v11.4s\n" - "add x27, x27, x15\n" - "sqrdmulh v16.4s, v16.4s, v26.4s\n" - "and v22.16b, v13.16b, v10.16b\n" - "sshr v22.4s, v22.4s, #0x1f\n" - "and v28.16b, v19.16b, v18.16b\n" - "and v3.16b, v17.16b, v10.16b\n" - "sshr v28.4s, v28.4s, #0x1f\n" - "and v6.16b, v25.16b, v18.16b\n" - "and v0.16b, v16.16b, v10.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v21.4s, v21.4s, v11.4s\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sqadd v13.4s, v13.4s, v22.4s\n" - "sqrdmulh v23.4s, v23.4s, v26.4s\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "sqrdmulh v20.4s, v20.4s, v11.4s\n" - "sqadd v19.4s, v19.4s, v28.4s\n" - "sqadd v17.4s, v17.4s, v3.4s\n" - "srshl v13.4s, v13.4s, v10.4s\n" - "sqadd v25.4s, v25.4s, v6.4s\n" - "srshl v19.4s, v19.4s, v18.4s\n" - "srshl v17.4s, v17.4s, v10.4s\n" - "add v13.4s, v13.4s, v15.4s\n" - "srshl v25.4s, v25.4s, v18.4s\n" - "add v19.4s, v19.4s, v15.4s\n" - "smin v13.4s, v13.4s, v12.4s\n" - "add v17.4s, v17.4s, v15.4s\n" - "smin v19.4s, v19.4s, v12.4s\n" - "smax v13.4s, v13.4s, v24.4s\n" - "smin v17.4s, v17.4s, v12.4s\n" - "smax v19.4s, v19.4s, v24.4s\n" - "add v25.4s, v25.4s, v15.4s\n" - "smax v17.4s, v17.4s, v24.4s\n" - "uzp1 v13.16b, v13.16b, v19.16b\n" - "smin v25.4s, v25.4s, v12.4s\n" - "uzp1 v13.16b, v13.16b, v13.16b\n" - "sqadd v16.4s, v16.4s, v0.4s\n" - "smax v25.4s, v25.4s, v24.4s\n" - "and v29.16b, v21.16b, v18.16b\n" + "sqdmulh v13.4s, v13.4s, v21.4s\n" + "sqdmulh v19.4s, v19.4s, v21.4s\n" + "add x10, x10, x14\n" + "add x9, x9, x14\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "sqdmulh v9.4s, v9.4s, v21.4s\n" + "add x28, x28, x14\n" + "add x27, x27, x14\n" + "and v7.16b, v13.16b, v25.16b\n" + "sqdmulh v26.4s, v26.4s, v10.4s\n" + "and v4.16b, v19.16b, v25.16b\n" + "sqdmulh v11.4s, v11.4s, v10.4s\n" + "and v21.16b, v18.16b, v25.16b\n" + "sqdmulh v24.4s, v24.4s, v10.4s\n" + "and v20.16b, v9.16b, v25.16b\n" + "sqdmulh v23.4s, v23.4s, v10.4s\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "and v29.16b, v26.16b, v16.16b\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v10.16b, v11.16b, v16.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v31.16b, v24.16b, v16.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v30.16b, v23.16b, v16.16b\n" + "sqadd v13.4s, v13.4s, v7.4s\n" "sshr v29.4s, v29.4s, #0x1f\n" - "uzp1 v17.16b, v17.16b, v25.16b\n" - "srshl v16.4s, v16.4s, v10.4s\n" - "and v3.16b, v23.16b, v10.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "add v16.4s, v16.4s, v15.4s\n" - "sqadd v21.4s, v21.4s, v29.4s\n" - "and v25.16b, v20.16b, v18.16b\n" - "sshr v25.4s, v25.4s, #0x1f\n" - "smin v16.4s, v16.4s, v12.4s\n" - "srshl v21.4s, v21.4s, v18.4s\n" - "sqadd v23.4s, v23.4s, v3.4s\n" - "smax v16.4s, v16.4s, v24.4s\n" - "add v21.4s, v21.4s, v15.4s\n" - "srshl v23.4s, v23.4s, v10.4s\n" - "sqadd v20.4s, v20.4s, v25.4s\n" - "smin v21.4s, v21.4s, v12.4s\n" - "add v23.4s, v23.4s, v15.4s\n" - "srshl v20.4s, v20.4s, v18.4s\n" - "smax v21.4s, v21.4s, v24.4s\n" - "smin v23.4s, v23.4s, v12.4s\n" - "uzp1 v16.16b, v16.16b, v21.16b\n" - "add v20.4s, v20.4s, v15.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "smax v23.4s, v23.4s, v24.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smax v20.4s, v20.4s, v24.4s\n" - "uzp1 v23.16b, v23.16b, v20.16b\n" - "uzp1 v23.16b, v23.16b, v23.16b\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "sshr v10.4s, v10.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v20.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "srshl v13.4s, v13.4s, v25.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "srshl v19.4s, v19.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v10.4s\n" + "srshl v18.4s, v18.4s, v25.4s\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "srshl v9.4s, v9.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v30.4s\n" + "srshl v26.4s, v26.4s, v16.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v11.4s, v11.4s, v16.4s\n" + "sqxtn v19.4h, v19.4s\n" + "srshl v24.4s, v24.4s, v16.4s\n" + "sqxtn v18.4h, v18.4s\n" + "srshl v23.4s, v23.4s, v16.4s\n" + "sqxtn v9.4h, v9.4s\n" + "sqxtn2 v13.8h, v26.4s\n" + "sqxtn2 v19.8h, v11.4s\n" + "sqxtn2 v18.8h, v24.4s\n" + "sqxtn2 v9.8h, v23.4s\n" + "sqadd v13.8h, v13.8h, v14.8h\n" + "sqadd v19.8h, v19.8h, v14.8h\n" + "sqadd v18.8h, v18.8h, v14.8h\n" + "sqadd v9.8h, v9.8h, v14.8h\n" + "smax v13.8h, v13.8h, v17.8h\n" + "smax v19.8h, v19.8h, v17.8h\n" + "smax v18.8h, v18.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smin v13.8h, v13.8h, v15.8h\n" + "smin v19.8h, v19.8h, v15.8h\n" + "smin v18.8h, v18.8h, v15.8h\n" + "smin v9.8h, v9.8h, v15.8h\n" + "uzp1 v13.16b, v13.16b, v13.16b\n" + "uzp1 v19.16b, v19.16b, v19.16b\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" "tbz x8, #2, 61f\n" "st1 { v13.s }[0], [x10], #0x4\n" - "st1 { v17.s }[0], [x9], #0x4\n" - "st1 { v16.s }[0], [x28], #0x4\n" - "st1 { v23.s }[0], [x27], #0x4\n" + "st1 { v19.s }[0], [x9], #0x4\n" + "st1 { v18.s }[0], [x28], #0x4\n" + "st1 { v9.s }[0], [x27], #0x4\n" "tbz x8, #1, 60f\n" "st1 { v13.h }[2], [x10], #0x2\n" - "st1 { v17.h }[2], [x9], #0x2\n" - "st1 { v16.h }[2], [x28], #0x2\n" - "st1 { v23.h }[2], [x27], #0x2\n" + "st1 { v19.h }[2], [x9], #0x2\n" + "st1 { v18.h }[2], [x28], #0x2\n" + "st1 { v9.h }[2], [x27], #0x2\n" "tbz x8, #0, 63f\n" "st1 { v13.b }[6], [x10], #0x1\n" - "st1 { v17.b }[6], [x9], #0x1\n" - "st1 { v16.b }[6], [x28], #0x1\n" - "st1 { v23.b }[6], [x27], #0x1\n" + "st1 { v19.b }[6], [x9], #0x1\n" + "st1 { v18.b }[6], [x28], #0x1\n" + "st1 { v9.b }[6], [x27], #0x1\n" "b 63f\n" "60:" // Oddments: Bit 2: Bit 1: Unset "tbz x8, #0, 63f\n" "st1 { v13.b }[4], [x10], #0x1\n" - "st1 { v17.b }[4], [x9], #0x1\n" - "st1 { v16.b }[4], [x28], #0x1\n" - "st1 { v23.b }[4], [x27], #0x1\n" + "st1 { v19.b }[4], [x9], #0x1\n" + "st1 { v18.b }[4], [x28], #0x1\n" + "st1 { v9.b }[4], [x27], #0x1\n" "b 63f\n" "61:" // Oddments: Bit 2: Unset "tbz x8, #1, 62f\n" "st1 { v13.h }[0], [x10], #0x2\n" - "st1 { v17.h }[0], [x9], #0x2\n" - "st1 { v16.h }[0], [x28], #0x2\n" - "st1 { v23.h }[0], [x27], #0x2\n" + "st1 { v19.h }[0], [x9], #0x2\n" + "st1 { v18.h }[0], [x28], #0x2\n" + "st1 { v9.h }[0], [x27], #0x2\n" "tbz x8, #0, 63f\n" "st1 { v13.b }[2], [x10], #0x1\n" - "st1 { v17.b }[2], [x9], #0x1\n" - "st1 { v16.b }[2], [x28], #0x1\n" - "st1 { v23.b }[2], [x27], #0x1\n" + "st1 { v19.b }[2], [x9], #0x1\n" + "st1 { v18.b }[2], [x28], #0x1\n" + "st1 { v9.b }[2], [x27], #0x1\n" "b 63f\n" "62:" // Oddments: Bit 2: Unset: Bit 1: Unset "tbz x8, #0, 63f\n" "st1 { v13.b }[0], [x10], #0x1\n" - "st1 { v17.b }[0], [x9], #0x1\n" - "st1 { v16.b }[0], [x28], #0x1\n" - "st1 { v23.b }[0], [x27], #0x1\n" + "st1 { v19.b }[0], [x9], #0x1\n" + "st1 { v18.b }[0], [x28], #0x1\n" + "st1 { v9.b }[0], [x27], #0x1\n" "63:" // Oddments: Bit 2: End - "64:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index 44817dbccf..00d1c5e868 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,24 @@ namespace depthwise { void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst +class a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const uint8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_u8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_u8q_3x3_mla::get_packed_size; + a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 2, 2) {} - kern_type kernel = a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } - a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp index 8d22836e5b..872f665b40 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const uint8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const uint8_t *const *inptrs_raw, - const uint8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -100,75 +100,75 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( - "ldr x4, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x5, #0x0\n" - "ldr x6, [%x[params], %[offsetof_Params_weights]]\n" - "mov x7, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x8, %x[params], %[offsetof_Params_inptrs]\n" - "ldr x17, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x16, x4, #0x3\n" - "ldr x15, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x19, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v12.16b }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v13.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v11.4s }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v19.4s }, [x20]\n" - "ld1r { v14.4s }, [x19]\n" - "ldp x14, x13, [x21, #0x0]\n" - "ldp x12, x11, [x21, #0x10]\n" + "ldr x19, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x24, x19, %[offsetof_Requantize32_a_offset]\n" + "add x23, x19, %[offsetof_Requantize32_b_offset]\n" + "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x21, x19, %[offsetof_Requantize32_c_offset]\n" + "add x20, x19, %[offsetof_Requantize32_minval]\n" + "ldr x17, [%x[params], %[offsetof_Params_weights]]\n" + "add x19, x19, %[offsetof_Requantize32_maxval]\n" + "ld1r { v12.16b }, [x24]\n" + "ld1r { v13.16b }, [x23]\n" + "lsr x16, x8, #0x3\n" + "ld1r { v11.8h }, [x21]\n" + "ld1r { v17.8h }, [x20]\n" + "mov x15, #0x0\n" + "mov x14, #0x0\n" + "ld1r { v14.8h }, [x19]\n" + "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n" + "add x12, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x10, x9, [x22, #0x0]\n" + "ldp x28, x27, [x22, #0x10]\n" "cbz x16, 3f\n" - "subs x16, x16, #0x1\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q15, [x19, #0x0]\n" - "mov v20.16b, v15.16b\n" + "subs x16, x16, #0x1\n" + "mov v9.16b, v15.16b\n" "ldr q10, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v15.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v17.16b, v15.16b\n" - "ldr d0, [x6, #0x0]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v16.16b, v10.16b\n" + "mov v22.16b, v15.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v21.16b, v10.16b\n" + "mov v23.16b, v15.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v18.16b, v10.16b\n" "usubl v0.8h, v0.8b, v13.8b\n" - "mov v23.16b, v10.16b\n" - "ldr d1, [x6, #0x8]\n" - "mov v22.16b, v10.16b\n" - "ldr d2, [x6, #0x10]\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" "usubl v1.8h, v1.8b, v13.8b\n" - "mov v18.16b, v10.16b\n" - "ldr d3, [x6, #0x18]\n" - "ldr d4, [x6, #0x20]\n" "usubl v2.8h, v2.8b, v13.8b\n" - "ldr d5, [x6, #0x28]\n" + "ldp x26, x25, [x12, #0x0]\n" + "ldp x24, x23, [x12, #0x10]\n" "usubl v3.8h, v3.8b, v13.8b\n" - "ldr d6, [x6, #0x30]\n" - "ldr d7, [x6, #0x38]\n" "usubl v4.8h, v4.8b, v13.8b\n" - "ldr d8, [x6, #0x40]\n" + "ldp x22, x21, [x12, #0x20]\n" + "ldp x20, x19, [x12, #0x30]\n" "usubl v5.8h, v5.8b, v13.8b\n" - "ldp x26, x25, [x8, #0x0]\n" "usubl v6.8h, v6.8b, v13.8b\n" - "ldp x24, x23, [x8, #0x10]\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" "usubl v7.8h, v7.8b, v13.8b\n" "usubl v8.8h, v8.8b, v13.8b\n" - "ldp x22, x21, [x8, #0x20]\n" - "ldp x20, x19, [x8, #0x30]\n" - "ldr d31, [x26, x5]\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" "usubl v31.8h, v31.8b, v12.8b\n" - "ldr d30, [x25, x5]\n" - "ldr d29, [x24, x5]\n" "usubl v30.8h, v30.8b, v12.8b\n" - "ldr d28, [x23, x5]\n" - "ldr d27, [x22, x5]\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "ldr d26, [x21, x5]\n" "usubl v28.8h, v28.8b, v12.8b\n" - "ldr d25, [x20, x5]\n" - "ldr d24, [x19, x5]\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" "usubl v26.8h, v26.8b, v12.8b\n" "usubl v25.8h, v25.8b, v12.8b\n" @@ -176,259 +176,251 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "beq 2f\n" "1:" // Loop "smlal v15.4s, v31.4h, v8.4h\n" - "ldr x23, [x8, #0x40]\n" - "add x6, x6, #0x48\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "ldr x22, [x8, #0x48]\n" - "subs x16, x16, #0x1\n" - "smlal v20.4s, v31.4h, v6.4h\n" - "ldr x21, [x8, #0x50]\n" - "smlal2 v23.4s, v31.8h, v6.8h\n" - "ldr x20, [x8, #0x58]\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "ldr x19, [x8, #0x60]\n" - "smlal2 v22.4s, v31.8h, v2.8h\n" - "ldr x10, [x8, #0x68]\n" - "smlal v17.4s, v31.4h, v0.4h\n" - "ldr x9, [x8, #0x70]\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr x28, [x8, #0x78]\n" + "ldr x24, [x12, #0x40]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" + "ldr x21, [x12, #0x50]\n" + "ldr x19, [x12, #0x58]\n" "smlal v15.4s, v30.4h, v0.4h\n" - "ldr x27, [x8, #0x80]\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "ldr x26, [x8, #0x88]\n" - "smlal v20.4s, v28.4h, v1.4h\n" - "ldr x25, [x8, #0x90]\n" - "smlal2 v23.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x5]\n" + "ldr x22, [x12, #0x78]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" "usubl v28.8h, v28.8b, v12.8b\n" "smlal v15.4s, v29.4h, v1.4h\n" - "ldr x24, [x8, #0x98]\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "ldr d29, [x23, x5]\n" + "ldr d29, [x24, x15]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v2.4h\n" - "ldr x23, [x8, #0xa0]\n" - "smlal2 v23.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x5]\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" "smlal v15.4s, v26.4h, v3.4h\n" - "ldr x22, [x8, #0xa8]\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "ldr d26, [x20, x5]\n" + "ldr d26, [x19, x15]\n" "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "ldr x21, [x12, #0x80]\n" + "ldr x19, [x12, #0x68]\n" "smlal v15.4s, v25.4h, v4.4h\n" - "ldr x21, [x8, #0xb0]\n" "smlal2 v10.4s, v25.8h, v4.8h\n" - "ldr d25, [x19, x5]\n" + "ldr d25, [x20, x15]\n" "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "ldr x20, [x12, #0x88]\n" + "ldr d29, [x19, x15]\n" "smlal v15.4s, v24.4h, v2.4h\n" - "ldr x20, [x8, #0xb8]\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "ldr x19, [x8, #0xc0]\n" - "smlal v20.4s, v24.4h, v0.4h\n" - "ldr q21, [x17, #0x0]\n" - "smlal2 v23.4s, v24.8h, v0.8h\n" - "ldr d24, [x9, x5]\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v4.4h\n" - "ldr q30, [x15, #0x0]\n" - "smlal2 v23.4s, v29.8h, v4.8h\n" - "ldr d29, [x10, x5]\n" + "ldr x19, [x12, #0x70]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v5.4h\n" - "ldr q31, [x17, #0x10]\n" - "smlal2 v23.4s, v28.8h, v5.8h\n" - "ldr d28, [x27, x5]\n" - "add x17, x17, #0x20\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ldr x24, [x12, #0x98]\n" + "ldr d24, [x19, x15]\n" "smlal v15.4s, v27.4h, v5.4h\n" - "ldr q9, [x15, #0x10]\n" - "add x15, x15, #0x20\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "usubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v3.4h\n" - "smlal2 v23.4s, v27.8h, v3.8h\n" - "ldr d27, [x28, x5]\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x23, [x12, #0x90]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v26.4h, v3.4h\n" - "smlal2 v22.4s, v26.8h, v3.8h\n" - "ldr d26, [x26, x5]\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "ldr x22, [x12, #0xa8]\n" + "ldr x19, [x12, #0xa0]\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "ldr x21, [x12, #0xb0]\n" + "ldr x20, [x12, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "smlal2 v18.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v28.4h, v1.4h\n" "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "ldr q19, [x13, #0x0]\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v22.4s, v25.8h, v0.8h\n" - "ldr d25, [x25, x5]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v29.4h, v4.4h\n" - "smlal2 v22.4s, v29.8h, v4.8h\n" - "ldr d29, [x24, x5]\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v18.4s, v28.8h, v1.8h\n" "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v23.4s, v26.4h, v5.4h\n" "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q0, [x11, #0x0]\n" + "ldr q4, [x13, #0x10]\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v22.4s, v24.8h, v1.8h\n" - "ldr d24, [x22, x5]\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v17.4s, v27.4h, v4.4h\n" - "smlal2 v18.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x5]\n" - "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v7.4h\n" - "smlal2 v23.4s, v28.8h, v7.8h\n" - "smlal v17.4s, v28.4h, v1.4h\n" - "smlal2 v18.4s, v28.8h, v1.8h\n" - "smlal v16.4s, v25.4h, v6.4h\n" - "smlal2 v22.4s, v25.8h, v6.8h\n" - "ldr d25, [x20, x5]\n" - "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v5.4h\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "ldr q31, [x11, #0x10]\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" "smlal2 v18.4s, v26.8h, v5.8h\n" - "ldr d26, [x21, x5]\n" - "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v8.4h\n" - "smlal2 v23.4s, v29.8h, v8.8h\n" - "smlal v17.4s, v29.4h, v2.4h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" "smlal2 v18.4s, v29.8h, v2.8h\n" - "ldr d29, [x19, x5]\n" - "add x5, x5, #0x8\n" - "smlal v16.4s, v27.4h, v7.4h\n" - "usubl v29.8h, v29.8b, v12.8b\n" - "smlal2 v22.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v24.4h, v3.4h\n" - "smlal v16.4s, v24.4h, v5.4h\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v30.16b, v15.16b, v0.16b\n" + "add x17, x17, #0x48\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "subs x16, x16, #0x1\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "ldr d25, [x20, x15]\n" "smlal2 v18.4s, v24.8h, v3.8h\n" - "sqrdmulh v15.4s, v15.4s, v21.4s\n" - "smlal2 v22.4s, v24.8h, v5.8h\n" - "smlal v17.4s, v26.4h, v7.4h\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "add x13, x13, #0x20\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" "smlal2 v18.4s, v26.8h, v7.8h\n" - "smlal v16.4s, v25.4h, v8.4h\n" - "smlal2 v22.4s, v25.8h, v8.8h\n" - "smlal v17.4s, v25.4h, v6.4h\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x15, x15, #0x8\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v28.16b, v9.16b, v0.16b\n" + "add x11, x11, #0x20\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" "smlal2 v18.4s, v25.8h, v6.8h\n" - "and v26.16b, v15.16b, v30.16b\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "smlal v17.4s, v29.4h, v8.4h\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" "smlal2 v18.4s, v29.8h, v8.8h\n" - "sqrdmulh v10.4s, v10.4s, v31.4s\n" - "sqrdmulh v20.4s, v20.4s, v21.4s\n" - "sqrdmulh v23.4s, v23.4s, v31.4s\n" - "sqrdmulh v16.4s, v16.4s, v21.4s\n" - "sqadd v15.4s, v15.4s, v26.4s\n" - "and v8.16b, v10.16b, v9.16b\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v30.4s\n" - "and v4.16b, v20.16b, v30.16b\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "and v19.16b, v10.16b, v31.16b\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "and v2.16b, v23.16b, v9.16b\n" - "and v1.16b, v16.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "add v15.4s, v15.4s, v11.4s\n" - "sqadd v10.4s, v10.4s, v8.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v31.4s\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "smin v15.4s, v15.4s, v14.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "sqadd v23.4s, v23.4s, v2.4s\n" - "smax v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v30.4s\n" - "add v10.4s, v10.4s, v11.4s\n" - "srshl v23.4s, v23.4s, v9.4s\n" - "sqadd v16.4s, v16.4s, v1.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "add v20.4s, v20.4s, v11.4s\n" - "add v23.4s, v23.4s, v11.4s\n" - "smax v10.4s, v10.4s, v19.4s\n" - "smin v20.4s, v20.4s, v14.4s\n" - "smin v23.4s, v23.4s, v14.4s\n" - "uzp1 v15.16b, v15.16b, v10.16b\n" - "smax v20.4s, v20.4s, v19.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x14, x7]\n" - "smax v23.4s, v23.4s, v19.4s\n" - "srshl v16.4s, v16.4s, v30.4s\n" - "and v24.16b, v22.16b, v9.16b\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v23.16b\n" - "add v16.4s, v16.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v21.4s\n" - "uzp1 v20.16b, v20.16b, v20.16b\n" - "str d20, [x13, x7]\n" - "smin v16.4s, v16.4s, v14.4s\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "sqadd v22.4s, v22.4s, v24.4s\n" - "and v2.16b, v17.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "smax v16.4s, v16.4s, v19.4s\n" - "srshl v22.4s, v22.4s, v9.4s\n" - "and v31.16b, v18.16b, v9.16b\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "add v22.4s, v22.4s, v11.4s\n" - "sqadd v17.4s, v17.4s, v2.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "srshl v17.4s, v17.4s, v30.4s\n" - "sqadd v18.4s, v18.4s, v31.4s\n" - "smax v22.4s, v22.4s, v19.4s\n" - "uzp1 v16.16b, v16.16b, v22.16b\n" - "add v17.4s, v17.4s, v11.4s\n" - "srshl v18.4s, v18.4s, v9.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x12, x7]\n" - "smin v17.4s, v17.4s, v14.4s\n" - "add v18.4s, v18.4s, v11.4s\n" - "smax v17.4s, v17.4s, v19.4s\n" - "smin v18.4s, v18.4s, v14.4s\n" - "smax v18.4s, v18.4s, v19.4s\n" - "uzp1 v17.16b, v17.16b, v18.16b\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x11, x7]\n" - "add x7, x7, #0x8\n" + "str d15, [x10, x14]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "str d9, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d22, [x28, x14]\n" + "str d23, [x27, x14]\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "ldr q15, [x19, #0x0]\n" - "mov v20.16b, v15.16b\n" + "add x14, x14, #0x8\n" "ldr q10, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v16.16b, v15.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v17.16b, v15.16b\n" - "ldr d0, [x6, #0x0]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v9.16b, v15.16b\n" + "mov v16.16b, v10.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v22.16b, v15.16b\n" + "mov v21.16b, v10.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v23.16b, v15.16b\n" + "mov v18.16b, v10.16b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" "usubl v0.8h, v0.8b, v13.8b\n" - "mov v23.16b, v10.16b\n" - "ldr d1, [x6, #0x8]\n" - "mov v22.16b, v10.16b\n" - "ldr d2, [x6, #0x10]\n" "usubl v1.8h, v1.8b, v13.8b\n" - "mov v18.16b, v10.16b\n" - "ldr d3, [x6, #0x18]\n" - "ldr d4, [x6, #0x20]\n" + "ldp x26, x25, [x12, #0x0]\n" + "ldp x24, x23, [x12, #0x10]\n" "usubl v2.8h, v2.8b, v13.8b\n" - "ldr d5, [x6, #0x28]\n" "usubl v3.8h, v3.8b, v13.8b\n" - "ldr d6, [x6, #0x30]\n" - "ldr d7, [x6, #0x38]\n" + "ldp x22, x21, [x12, #0x20]\n" + "ldp x20, x19, [x12, #0x30]\n" "usubl v4.8h, v4.8b, v13.8b\n" - "ldr d8, [x6, #0x40]\n" "usubl v5.8h, v5.8b, v13.8b\n" - "ldp x26, x25, [x8, #0x0]\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" "usubl v6.8h, v6.8b, v13.8b\n" - "ldp x24, x23, [x8, #0x10]\n" "usubl v7.8h, v7.8b, v13.8b\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" "usubl v8.8h, v8.8b, v13.8b\n" - "ldp x22, x21, [x8, #0x20]\n" - "ldp x20, x19, [x8, #0x30]\n" - "ldr d31, [x26, x5]\n" "usubl v31.8h, v31.8b, v12.8b\n" - "ldr d30, [x25, x5]\n" - "ldr d29, [x24, x5]\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" "usubl v30.8h, v30.8b, v12.8b\n" - "ldr d28, [x23, x5]\n" - "ldr d27, [x22, x5]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "ldr d26, [x21, x5]\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" "usubl v28.8h, v28.8b, v12.8b\n" - "ldr d25, [x20, x5]\n" - "ldr d24, [x19, x5]\n" "usubl v27.8h, v27.8b, v12.8b\n" "usubl v26.8h, v26.8b, v12.8b\n" "usubl v25.8h, v25.8b, v12.8b\n" @@ -436,275 +428,267 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "bgt 1b\n" "2:" // Tail "smlal v15.4s, v31.4h, v8.4h\n" - "ldr x23, [x8, #0x40]\n" - "tst x4, #0x7\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "ldr x22, [x8, #0x48]\n" - "smlal v20.4s, v31.4h, v6.4h\n" - "ldr x21, [x8, #0x50]\n" - "smlal2 v23.4s, v31.8h, v6.8h\n" - "ldr x20, [x8, #0x58]\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "ldr x19, [x8, #0x60]\n" - "smlal2 v22.4s, v31.8h, v2.8h\n" - "ldr x10, [x8, #0x68]\n" - "smlal v17.4s, v31.4h, v0.4h\n" - "ldr x9, [x8, #0x70]\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr x28, [x8, #0x78]\n" + "ldr x24, [x12, #0x40]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" + "ldr x21, [x12, #0x50]\n" + "ldr x19, [x12, #0x58]\n" "smlal v15.4s, v30.4h, v0.4h\n" - "ldr x27, [x8, #0x80]\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "ldr x26, [x8, #0x88]\n" - "smlal v20.4s, v28.4h, v1.4h\n" - "ldr x25, [x8, #0x90]\n" - "smlal2 v23.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x5]\n" + "ldr x22, [x12, #0x78]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" "usubl v28.8h, v28.8b, v12.8b\n" "smlal v15.4s, v29.4h, v1.4h\n" - "ldr x24, [x8, #0x98]\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "ldr d29, [x23, x5]\n" + "ldr d29, [x24, x15]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v2.4h\n" - "ldr x23, [x8, #0xa0]\n" - "smlal2 v23.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x5]\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" "smlal v15.4s, v26.4h, v3.4h\n" - "ldr x22, [x8, #0xa8]\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "ldr d26, [x20, x5]\n" + "ldr d26, [x19, x15]\n" "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "ldr x21, [x12, #0x80]\n" + "ldr x19, [x12, #0x68]\n" "smlal v15.4s, v25.4h, v4.4h\n" - "ldr x21, [x8, #0xb0]\n" "smlal2 v10.4s, v25.8h, v4.8h\n" - "ldr d25, [x19, x5]\n" + "ldr d25, [x20, x15]\n" "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "ldr x20, [x12, #0x88]\n" + "ldr d29, [x19, x15]\n" "smlal v15.4s, v24.4h, v2.4h\n" - "ldr x20, [x8, #0xb8]\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "ldr x19, [x8, #0xc0]\n" - "smlal v20.4s, v24.4h, v0.4h\n" - "ldr q21, [x17, #0x0]\n" - "smlal2 v23.4s, v24.8h, v0.8h\n" - "ldr d24, [x9, x5]\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v4.4h\n" - "ldr q30, [x15, #0x0]\n" - "smlal2 v23.4s, v29.8h, v4.8h\n" - "ldr d29, [x10, x5]\n" + "ldr x19, [x12, #0x70]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v5.4h\n" - "ldr q31, [x17, #0x10]\n" - "smlal2 v23.4s, v28.8h, v5.8h\n" - "ldr d28, [x27, x5]\n" - "add x17, x17, #0x20\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ldr x24, [x12, #0x98]\n" + "ldr d24, [x19, x15]\n" "smlal v15.4s, v27.4h, v5.4h\n" - "ldr q9, [x15, #0x10]\n" - "add x15, x15, #0x20\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "usubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v27.4h, v3.4h\n" - "smlal2 v23.4s, v27.8h, v3.8h\n" - "ldr d27, [x28, x5]\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x23, [x12, #0x90]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v26.4h, v3.4h\n" - "smlal2 v22.4s, v26.8h, v3.8h\n" - "ldr d26, [x26, x5]\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "ldr x22, [x12, #0xa8]\n" + "ldr x19, [x12, #0xa0]\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "ldr x21, [x12, #0xb0]\n" + "ldr x20, [x12, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "smlal2 v18.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v28.4h, v1.4h\n" "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "ldr q19, [x13, #0x0]\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v22.4s, v25.8h, v0.8h\n" - "ldr d25, [x25, x5]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v29.4h, v4.4h\n" - "smlal2 v22.4s, v29.8h, v4.8h\n" - "ldr d29, [x24, x5]\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v18.4s, v28.8h, v1.8h\n" "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v23.4s, v26.4h, v5.4h\n" "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q0, [x11, #0x0]\n" + "ldr q4, [x13, #0x10]\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v22.4s, v24.8h, v1.8h\n" - "ldr d24, [x22, x5]\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v17.4s, v27.4h, v4.4h\n" - "smlal2 v18.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x5]\n" - "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v7.4h\n" - "smlal2 v23.4s, v28.8h, v7.8h\n" - "smlal v17.4s, v28.4h, v1.4h\n" - "smlal2 v18.4s, v28.8h, v1.8h\n" - "smlal v16.4s, v25.4h, v6.4h\n" - "smlal2 v22.4s, v25.8h, v6.8h\n" - "ldr d25, [x20, x5]\n" - "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v5.4h\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "ldr q31, [x11, #0x10]\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" "smlal2 v18.4s, v26.8h, v5.8h\n" - "ldr d26, [x21, x5]\n" - "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v8.4h\n" - "smlal2 v23.4s, v29.8h, v8.8h\n" - "smlal v17.4s, v29.4h, v2.4h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" "smlal2 v18.4s, v29.8h, v2.8h\n" - "ldr d29, [x19, x5]\n" - "add x5, x5, #0x8\n" - "smlal v16.4s, v27.4h, v7.4h\n" - "usubl v29.8h, v29.8b, v12.8b\n" - "smlal2 v22.4s, v27.8h, v7.8h\n" - "smlal v17.4s, v24.4h, v3.4h\n" - "smlal v16.4s, v24.4h, v5.4h\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v30.16b, v15.16b, v0.16b\n" + "tst x8, #0x7\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "add x13, x13, #0x20\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "ldr d25, [x20, x15]\n" "smlal2 v18.4s, v24.8h, v3.8h\n" - "sqrdmulh v15.4s, v15.4s, v21.4s\n" - "smlal2 v22.4s, v24.8h, v5.8h\n" - "smlal v17.4s, v26.4h, v7.4h\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "add x11, x11, #0x20\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" "smlal2 v18.4s, v26.8h, v7.8h\n" - "smlal v16.4s, v25.4h, v8.4h\n" - "smlal2 v22.4s, v25.8h, v8.8h\n" - "smlal v17.4s, v25.4h, v6.4h\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x15, x15, #0x8\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v28.16b, v9.16b, v0.16b\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" "smlal2 v18.4s, v25.8h, v6.8h\n" - "and v26.16b, v15.16b, v30.16b\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "smlal v17.4s, v29.4h, v8.4h\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" "smlal2 v18.4s, v29.8h, v8.8h\n" - "sqrdmulh v10.4s, v10.4s, v31.4s\n" - "sqrdmulh v20.4s, v20.4s, v21.4s\n" - "sqrdmulh v23.4s, v23.4s, v31.4s\n" - "sqrdmulh v16.4s, v16.4s, v21.4s\n" - "sqadd v15.4s, v15.4s, v26.4s\n" - "and v8.16b, v10.16b, v9.16b\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v30.4s\n" - "and v4.16b, v20.16b, v30.16b\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "and v19.16b, v10.16b, v31.16b\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "and v2.16b, v23.16b, v9.16b\n" - "and v1.16b, v16.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "add v15.4s, v15.4s, v11.4s\n" - "sqadd v10.4s, v10.4s, v8.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v31.4s\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "smin v15.4s, v15.4s, v14.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "sqadd v23.4s, v23.4s, v2.4s\n" - "smax v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v30.4s\n" - "add v10.4s, v10.4s, v11.4s\n" - "srshl v23.4s, v23.4s, v9.4s\n" - "sqadd v16.4s, v16.4s, v1.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "add v20.4s, v20.4s, v11.4s\n" - "add v23.4s, v23.4s, v11.4s\n" - "smax v10.4s, v10.4s, v19.4s\n" - "smin v20.4s, v20.4s, v14.4s\n" - "smin v23.4s, v23.4s, v14.4s\n" - "uzp1 v15.16b, v15.16b, v10.16b\n" - "smax v20.4s, v20.4s, v19.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x14, x7]\n" - "smax v23.4s, v23.4s, v19.4s\n" - "srshl v16.4s, v16.4s, v30.4s\n" - "and v24.16b, v22.16b, v9.16b\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v23.16b\n" - "add v16.4s, v16.4s, v11.4s\n" - "sqrdmulh v17.4s, v17.4s, v21.4s\n" - "uzp1 v20.16b, v20.16b, v20.16b\n" - "str d20, [x13, x7]\n" - "smin v16.4s, v16.4s, v14.4s\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "sqadd v22.4s, v22.4s, v24.4s\n" - "and v2.16b, v17.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "smax v16.4s, v16.4s, v19.4s\n" - "srshl v22.4s, v22.4s, v9.4s\n" - "and v31.16b, v18.16b, v9.16b\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "add v22.4s, v22.4s, v11.4s\n" - "sqadd v17.4s, v17.4s, v2.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "srshl v17.4s, v17.4s, v30.4s\n" - "sqadd v18.4s, v18.4s, v31.4s\n" - "smax v22.4s, v22.4s, v19.4s\n" - "uzp1 v16.16b, v16.16b, v22.16b\n" - "add v17.4s, v17.4s, v11.4s\n" - "srshl v18.4s, v18.4s, v9.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x12, x7]\n" - "smin v17.4s, v17.4s, v14.4s\n" - "add v18.4s, v18.4s, v11.4s\n" - "smax v17.4s, v17.4s, v19.4s\n" - "smin v18.4s, v18.4s, v14.4s\n" - "smax v18.4s, v18.4s, v19.4s\n" - "uzp1 v17.16b, v17.16b, v18.16b\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "str d17, [x11, x7]\n" - "add x7, x7, #0x8\n" + "str d15, [x10, x14]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "str d9, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d22, [x28, x14]\n" + "str d23, [x27, x14]\n" + "add x14, x14, #0x8\n" "beq 88f\n" - "add x6, x6, #0x48\n" + "add x17, x17, #0x48\n" "3:" // Oddments "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" - "tbz x4, #2, 5f\n" + "tbz x8, #2, 5f\n" "ld1 { v15.4s }, [x19], #0x10\n" - "tbz x4, #1, 4f\n" + "tbz x8, #1, 4f\n" "ld1 { v10.d }[0], [x19], #0x8\n" - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v10.s }[2], [x19]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v10.s }[0], [x19]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset - "tbz x4, #1, 6f\n" + "tbz x8, #1, 6f\n" "ld1 { v15.d }[0], [x19], #0x8\n" - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v15.s }[2], [x19]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v15.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v20.16b, v15.16b\n" - "ldr d0, [x6, #0x0]\n" - "mov v23.16b, v10.16b\n" - "ldr d1, [x6, #0x8]\n" - "mov v16.16b, v15.16b\n" - "ldr d2, [x6, #0x10]\n" - "mov v22.16b, v10.16b\n" - "ldr d3, [x6, #0x18]\n" - "mov v17.16b, v15.16b\n" - "ldr d4, [x6, #0x20]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "mov v9.16b, v15.16b\n" + "mov v16.16b, v10.16b\n" + "ldr d2, [x17, #0x10]\n" + "ldr d3, [x17, #0x18]\n" + "mov v22.16b, v15.16b\n" + "mov v21.16b, v10.16b\n" + "ldr d4, [x17, #0x20]\n" + "ldr d5, [x17, #0x28]\n" + "mov v23.16b, v15.16b\n" "mov v18.16b, v10.16b\n" - "ldr d5, [x6, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "ldr d7, [x17, #0x38]\n" + "usubl v0.8h, v0.8b, v13.8b\n" "usubl v1.8h, v1.8b, v13.8b\n" - "ldr d6, [x6, #0x30]\n" + "ldr d8, [x17, #0x40]\n" + "ldp x26, x25, [x12, #0x0]\n" "usubl v2.8h, v2.8b, v13.8b\n" - "ldr d7, [x6, #0x38]\n" "usubl v3.8h, v3.8b, v13.8b\n" - "ldr d8, [x6, #0x40]\n" + "ldp x24, x23, [x12, #0x10]\n" + "ldp x22, x21, [x12, #0x20]\n" "usubl v4.8h, v4.8b, v13.8b\n" - "ldp x26, x25, [x8, #0x0]\n" "usubl v5.8h, v5.8b, v13.8b\n" - "ldp x24, x23, [x8, #0x10]\n" + "ldp x20, x19, [x12, #0x30]\n" "usubl v6.8h, v6.8b, v13.8b\n" "usubl v7.8h, v7.8b, v13.8b\n" - "ldp x22, x21, [x8, #0x20]\n" "usubl v8.8h, v8.8b, v13.8b\n" - "ldp x20, x19, [x8, #0x30]\n" - "add x26, x26, x5\n" - "add x25, x25, x5\n" - "add x24, x24, x5\n" - "add x23, x23, x5\n" - "add x22, x22, x5\n" - "add x21, x21, x5\n" - "add x20, x20, x5\n" - "add x19, x19, x5\n" - "tbz x4, #2, 9f\n" + "add x26, x26, x15\n" + "add x25, x25, x15\n" + "add x24, x24, x15\n" + "add x23, x23, x15\n" + "add x22, x22, x15\n" + "add x21, x21, x15\n" + "add x20, x20, x15\n" + "add x19, x19, x15\n" + "tbz x8, #2, 9f\n" "ld1 { v31.s }[0], [x26], #0x4\n" "ld1 { v30.s }[0], [x25], #0x4\n" "ld1 { v29.s }[0], [x24], #0x4\n" @@ -713,7 +697,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.s }[0], [x21], #0x4\n" "ld1 { v25.s }[0], [x20], #0x4\n" "ld1 { v24.s }[0], [x19], #0x4\n" - "tbz x4, #1, 8f\n" + "tbz x8, #1, 8f\n" "ld1 { v31.h }[2], [x26], #0x2\n" "ld1 { v30.h }[2], [x25], #0x2\n" "ld1 { v29.h }[2], [x24], #0x2\n" @@ -722,7 +706,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.h }[2], [x21], #0x2\n" "ld1 { v25.h }[2], [x20], #0x2\n" "ld1 { v24.h }[2], [x19], #0x2\n" - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[6], [x26]\n" "ld1 { v30.b }[6], [x25]\n" "ld1 { v29.b }[6], [x24]\n" @@ -733,7 +717,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[6], [x19]\n" "b 11f\n" "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[4], [x26]\n" "ld1 { v30.b }[4], [x25]\n" "ld1 { v29.b }[4], [x24]\n" @@ -744,7 +728,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[4], [x19]\n" "b 11f\n" "9:" // Oddments: Initial loads: Bit 2: Unset - "tbz x4, #1, 10f\n" + "tbz x8, #1, 10f\n" "ld1 { v31.h }[0], [x26], #0x2\n" "ld1 { v30.h }[0], [x25], #0x2\n" "ld1 { v29.h }[0], [x24], #0x2\n" @@ -753,7 +737,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.h }[0], [x21], #0x2\n" "ld1 { v25.h }[0], [x20], #0x2\n" "ld1 { v24.h }[0], [x19], #0x2\n" - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[2], [x26]\n" "ld1 { v30.b }[2], [x25]\n" "ld1 { v29.b }[2], [x24]\n" @@ -764,7 +748,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[2], [x19]\n" "b 11f\n" "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[0], [x26]\n" "ld1 { v30.b }[0], [x25]\n" "ld1 { v29.b }[0], [x24]\n" @@ -774,646 +758,636 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v25.b }[0], [x20]\n" "ld1 { v24.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End - "ldr x23, [x8, #0x40]\n" "usubl v31.8h, v31.8b, v12.8b\n" "smlal v15.4s, v31.4h, v8.4h\n" - "usubl v30.8h, v30.8b, v12.8b\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v31.4h, v6.4h\n" - "usubl v28.8h, v28.8b, v12.8b\n" - "smlal2 v23.4s, v31.8h, v6.8h\n" - "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "usubl v26.8h, v26.8b, v12.8b\n" - "smlal2 v22.4s, v31.8h, v2.8h\n" - "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v17.4s, v31.4h, v0.4h\n" - "usubl v24.8h, v24.8b, v12.8b\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "add x23, x23, x5\n" + "ldr x24, [x12, #0x40]\n" + "usubl v30.8h, v30.8b, v12.8b\n" "smlal v15.4s, v30.4h, v0.4h\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "smlal v20.4s, v28.4h, v1.4h\n" - "smlal2 v23.4s, v28.8h, v1.8h\n" + "add x24, x24, x15\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" "smlal v15.4s, v29.4h, v1.4h\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "smlal v20.4s, v27.4h, v2.4h\n" - "smlal2 v23.4s, v27.8h, v2.8h\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" "smlal v15.4s, v26.4h, v3.4h\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "smlal v20.4s, v24.4h, v0.4h\n" - "smlal2 v23.4s, v24.8h, v0.8h\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" "smlal v15.4s, v25.4h, v4.4h\n" "smlal2 v10.4s, v25.8h, v4.8h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" "smlal v15.4s, v24.4h, v2.4h\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "tbz x4, #2, 13f\n" - "ld1 { v29.s }[0], [x23], #0x4\n" - "tbz x4, #1, 12f\n" - "ld1 { v29.h }[2], [x23], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[6], [x23]\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "tbz x8, #2, 13f\n" + "ld1 { v29.s }[0], [x24], #0x4\n" + "tbz x8, #1, 12f\n" + "ld1 { v29.h }[2], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[6], [x24]\n" "b 15f\n" "12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[4], [x23]\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[4], [x24]\n" "b 15f\n" "13:" // Oddments: Load (1, 3): Bit 2: Unset - "tbz x4, #1, 14f\n" - "ld1 { v29.h }[0], [x23], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[2], [x23]\n" + "tbz x8, #1, 14f\n" + "ld1 { v29.h }[0], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[2], [x24]\n" "b 15f\n" "14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v29.b }[0], [x23]\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[0], [x24]\n" "15:" // Oddments: Load (1, 3): Bit 2: End - "ldr x22, [x8, #0x48]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v4.4h\n" - "smlal2 v23.4s, v29.8h, v4.8h\n" - "add x22, x22, x5\n" - "tbz x4, #2, 17f\n" - "ld1 { v28.s }[0], [x22], #0x4\n" - "tbz x4, #1, 16f\n" - "ld1 { v28.h }[2], [x22], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[6], [x22]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "add x23, x23, x15\n" + "tbz x8, #2, 17f\n" + "ld1 { v28.s }[0], [x23], #0x4\n" + "tbz x8, #1, 16f\n" + "ld1 { v28.h }[2], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[6], [x23]\n" "b 19f\n" "16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[4], [x22]\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[4], [x23]\n" "b 19f\n" "17:" // Oddments: Load (1, 4): Bit 2: Unset - "tbz x4, #1, 18f\n" - "ld1 { v28.h }[0], [x22], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[2], [x22]\n" + "tbz x8, #1, 18f\n" + "ld1 { v28.h }[0], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[2], [x23]\n" "b 19f\n" "18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v28.b }[0], [x22]\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[0], [x23]\n" "19:" // Oddments: Load (1, 4): Bit 2: End - "ldr x21, [x8, #0x50]\n" "usubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v5.4h\n" - "smlal2 v23.4s, v28.8h, v5.8h\n" - "add x21, x21, x5\n" - "tbz x4, #2, 21f\n" + "ldr x21, [x12, #0x50]\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 21f\n" "ld1 { v27.s }[0], [x21], #0x4\n" - "tbz x4, #1, 20f\n" + "tbz x8, #1, 20f\n" "ld1 { v27.h }[2], [x21], #0x2\n" - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[6], [x21]\n" "b 23f\n" "20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[4], [x21]\n" "b 23f\n" "21:" // Oddments: Load (1, 2): Bit 2: Unset - "tbz x4, #1, 22f\n" + "tbz x8, #1, 22f\n" "ld1 { v27.h }[0], [x21], #0x2\n" - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[2], [x21]\n" "b 23f\n" "22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 23f\n" + "tbz x8, #0, 23f\n" "ld1 { v27.b }[0], [x21]\n" "23:" // Oddments: Load (1, 2): Bit 2: End - "ldr x20, [x8, #0x58]\n" "usubl v27.8h, v27.8b, v12.8b\n" + "ldr x19, [x12, #0x58]\n" "smlal v15.4s, v27.4h, v5.4h\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "add x20, x20, x5\n" - "smlal v20.4s, v27.4h, v3.4h\n" - "smlal2 v23.4s, v27.8h, v3.8h\n" - "tbz x4, #2, 25f\n" - "ld1 { v26.s }[0], [x20], #0x4\n" - "tbz x4, #1, 24f\n" - "ld1 { v26.h }[2], [x20], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[6], [x20]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 25f\n" + "ld1 { v26.s }[0], [x19], #0x4\n" + "tbz x8, #1, 24f\n" + "ld1 { v26.h }[2], [x19], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[6], [x19]\n" "b 27f\n" "24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[4], [x20]\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[4], [x19]\n" "b 27f\n" "25:" // Oddments: Load (3, 0): Bit 2: Unset - "tbz x4, #1, 26f\n" - "ld1 { v26.h }[0], [x20], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[2], [x20]\n" + "tbz x8, #1, 26f\n" + "ld1 { v26.h }[0], [x19], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[2], [x19]\n" "b 27f\n" "26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v26.b }[0], [x20]\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[0], [x19]\n" "27:" // Oddments: Load (3, 0): Bit 2: End - "ldr x19, [x8, #0x60]\n" "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v16.4s, v26.4h, v3.4h\n" - "smlal2 v22.4s, v26.8h, v3.8h\n" - "add x19, x19, x5\n" - "tbz x4, #2, 29f\n" - "ld1 { v25.s }[0], [x19], #0x4\n" - "tbz x4, #1, 28f\n" - "ld1 { v25.h }[2], [x19], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[6], [x19]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 29f\n" + "ld1 { v25.s }[0], [x20], #0x4\n" + "tbz x8, #1, 28f\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[6], [x20]\n" "b 31f\n" "28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[4], [x19]\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[4], [x20]\n" "b 31f\n" "29:" // Oddments: Load (2, 0): Bit 2: Unset - "tbz x4, #1, 30f\n" - "ld1 { v25.h }[0], [x19], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[2], [x19]\n" + "tbz x8, #1, 30f\n" + "ld1 { v25.h }[0], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[2], [x20]\n" "b 31f\n" "30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v25.b }[0], [x19]\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[0], [x20]\n" "31:" // Oddments: Load (2, 0): Bit 2: End - "ldr x10, [x8, #0x68]\n" "usubl v25.8h, v25.8b, v12.8b\n" + "ldr x19, [x12, #0x68]\n" "smlal v15.4s, v25.4h, v6.4h\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "add x10, x10, x5\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v22.4s, v25.8h, v0.8h\n" - "tbz x4, #2, 33f\n" - "ld1 { v29.s }[0], [x10], #0x4\n" - "tbz x4, #1, 32f\n" - "ld1 { v29.h }[2], [x10], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[6], [x10]\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 33f\n" + "ld1 { v29.s }[0], [x19], #0x4\n" + "tbz x8, #1, 32f\n" + "ld1 { v29.h }[2], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[6], [x19]\n" "b 35f\n" "32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[4], [x10]\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[4], [x19]\n" "b 35f\n" "33:" // Oddments: Load (3, 1): Bit 2: Unset - "tbz x4, #1, 34f\n" - "ld1 { v29.h }[0], [x10], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[2], [x10]\n" + "tbz x8, #1, 34f\n" + "ld1 { v29.h }[0], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[2], [x19]\n" "b 35f\n" "34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v29.b }[0], [x10]\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[0], [x19]\n" "35:" // Oddments: Load (3, 1): Bit 2: End - "ldr x9, [x8, #0x70]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v16.4s, v29.4h, v4.4h\n" - "smlal2 v22.4s, v29.8h, v4.8h\n" - "add x9, x9, x5\n" - "tbz x4, #2, 37f\n" - "ld1 { v24.s }[0], [x9], #0x4\n" - "tbz x4, #1, 36f\n" - "ld1 { v24.h }[2], [x9], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[6], [x9]\n" + "ldr x19, [x12, #0x70]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 37f\n" + "ld1 { v24.s }[0], [x19], #0x4\n" + "tbz x8, #1, 36f\n" + "ld1 { v24.h }[2], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[6], [x19]\n" "b 39f\n" "36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[4], [x9]\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[4], [x19]\n" "b 39f\n" "37:" // Oddments: Load (2, 1): Bit 2: Unset - "tbz x4, #1, 38f\n" - "ld1 { v24.h }[0], [x9], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[2], [x9]\n" + "tbz x8, #1, 38f\n" + "ld1 { v24.h }[0], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[2], [x19]\n" "b 39f\n" "38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v24.b }[0], [x9]\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[0], [x19]\n" "39:" // Oddments: Load (2, 1): Bit 2: End - "ldr x28, [x8, #0x78]\n" "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x22, [x12, #0x78]\n" "smlal v15.4s, v24.4h, v7.4h\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "add x28, x28, x5\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v22.4s, v24.8h, v1.8h\n" - "tbz x4, #2, 41f\n" - "ld1 { v27.s }[0], [x28], #0x4\n" - "tbz x4, #1, 40f\n" - "ld1 { v27.h }[2], [x28], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[6], [x28]\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 41f\n" + "ld1 { v27.s }[0], [x22], #0x4\n" + "tbz x8, #1, 40f\n" + "ld1 { v27.h }[2], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[6], [x22]\n" "b 43f\n" "40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[4], [x28]\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[4], [x22]\n" "b 43f\n" "41:" // Oddments: Load (3, 3): Bit 2: Unset - "tbz x4, #1, 42f\n" - "ld1 { v27.h }[0], [x28], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[2], [x28]\n" + "tbz x8, #1, 42f\n" + "ld1 { v27.h }[0], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[2], [x22]\n" "b 43f\n" "42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v27.b }[0], [x28]\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[0], [x22]\n" "43:" // Oddments: Load (3, 3): Bit 2: End - "ldr x27, [x8, #0x80]\n" "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v17.4s, v27.4h, v4.4h\n" + "ldr x21, [x12, #0x80]\n" + "smlal v23.4s, v27.4h, v4.4h\n" "smlal2 v18.4s, v27.8h, v4.8h\n" - "add x27, x27, x5\n" - "tbz x4, #2, 45f\n" - "ld1 { v28.s }[0], [x27], #0x4\n" - "tbz x4, #1, 44f\n" - "ld1 { v28.h }[2], [x27], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[6], [x27]\n" + "add x21, x21, x15\n" + "tbz x8, #2, 45f\n" + "ld1 { v28.s }[0], [x21], #0x4\n" + "tbz x8, #1, 44f\n" + "ld1 { v28.h }[2], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[6], [x21]\n" "b 47f\n" "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[4], [x27]\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[4], [x21]\n" "b 47f\n" "45:" // Oddments: Load (2, 3): Bit 2: Unset - "tbz x4, #1, 46f\n" - "ld1 { v28.h }[0], [x27], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[2], [x27]\n" + "tbz x8, #1, 46f\n" + "ld1 { v28.h }[0], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[2], [x21]\n" "b 47f\n" "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v28.b }[0], [x27]\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[0], [x21]\n" "47:" // Oddments: Load (2, 3): Bit 2: End - "ldr x26, [x8, #0x88]\n" "usubl v28.8h, v28.8b, v12.8b\n" - "smlal v20.4s, v28.4h, v7.4h\n" - "smlal2 v23.4s, v28.8h, v7.8h\n" - "add x26, x26, x5\n" - "smlal v17.4s, v28.4h, v1.4h\n" + "ldr x20, [x12, #0x88]\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "smlal v23.4s, v28.4h, v1.4h\n" "smlal2 v18.4s, v28.8h, v1.8h\n" - "tbz x4, #2, 49f\n" - "ld1 { v26.s }[0], [x26], #0x4\n" - "tbz x4, #1, 48f\n" - "ld1 { v26.h }[2], [x26], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[6], [x26]\n" + "add x20, x20, x15\n" + "tbz x8, #2, 49f\n" + "ld1 { v26.s }[0], [x20], #0x4\n" + "tbz x8, #1, 48f\n" + "ld1 { v26.h }[2], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[6], [x20]\n" "b 51f\n" "48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[4], [x26]\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[4], [x20]\n" "b 51f\n" "49:" // Oddments: Load (3, 4): Bit 2: Unset - "tbz x4, #1, 50f\n" - "ld1 { v26.h }[0], [x26], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[2], [x26]\n" + "tbz x8, #1, 50f\n" + "ld1 { v26.h }[0], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[2], [x20]\n" "b 51f\n" "50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v26.b }[0], [x26]\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[0], [x20]\n" "51:" // Oddments: Load (3, 4): Bit 2: End - "ldr x25, [x8, #0x90]\n" "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v5.4h\n" + "ldr x23, [x12, #0x90]\n" + "smlal v23.4s, v26.4h, v5.4h\n" "smlal2 v18.4s, v26.8h, v5.8h\n" - "add x25, x25, x5\n" - "tbz x4, #2, 53f\n" - "ld1 { v25.s }[0], [x25], #0x4\n" - "tbz x4, #1, 52f\n" - "ld1 { v25.h }[2], [x25], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[6], [x25]\n" + "add x23, x23, x15\n" + "tbz x8, #2, 53f\n" + "ld1 { v25.s }[0], [x23], #0x4\n" + "tbz x8, #1, 52f\n" + "ld1 { v25.h }[2], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[6], [x23]\n" "b 55f\n" "52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[4], [x25]\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[4], [x23]\n" "b 55f\n" "53:" // Oddments: Load (4, 0): Bit 2: Unset - "tbz x4, #1, 54f\n" - "ld1 { v25.h }[0], [x25], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[2], [x25]\n" + "tbz x8, #1, 54f\n" + "ld1 { v25.h }[0], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[2], [x23]\n" "b 55f\n" "54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v25.b }[0], [x25]\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[0], [x23]\n" "55:" // Oddments: Load (4, 0): Bit 2: End - "ldr x24, [x8, #0x98]\n" "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v25.4h, v6.4h\n" - "smlal2 v22.4s, v25.8h, v6.8h\n" - "add x24, x24, x5\n" - "tbz x4, #2, 57f\n" + "ldr x24, [x12, #0x98]\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "add x24, x24, x15\n" + "tbz x8, #2, 57f\n" "ld1 { v29.s }[0], [x24], #0x4\n" - "tbz x4, #1, 56f\n" + "tbz x8, #1, 56f\n" "ld1 { v29.h }[2], [x24], #0x2\n" - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[6], [x24]\n" "b 59f\n" "56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[4], [x24]\n" "b 59f\n" "57:" // Oddments: Load (2, 4): Bit 2: Unset - "tbz x4, #1, 58f\n" + "tbz x8, #1, 58f\n" "ld1 { v29.h }[0], [x24], #0x2\n" - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[2], [x24]\n" "b 59f\n" "58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[0], [x24]\n" "59:" // Oddments: Load (2, 4): Bit 2: End - "ldr x23, [x8, #0xa0]\n" "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v20.4s, v29.4h, v8.4h\n" - "smlal2 v23.4s, v29.8h, v8.8h\n" - "add x23, x23, x5\n" - "smlal v17.4s, v29.4h, v2.4h\n" + "ldr x19, [x12, #0xa0]\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "smlal v23.4s, v29.4h, v2.4h\n" "smlal2 v18.4s, v29.8h, v2.8h\n" - "tbz x4, #2, 61f\n" - "ld1 { v27.s }[0], [x23], #0x4\n" - "tbz x4, #1, 60f\n" - "ld1 { v27.h }[2], [x23], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[6], [x23]\n" + "add x19, x19, x15\n" + "tbz x8, #2, 61f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x8, #1, 60f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[6], [x19]\n" "b 63f\n" "60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[4], [x23]\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[4], [x19]\n" "b 63f\n" "61:" // Oddments: Load (4, 1): Bit 2: Unset - "tbz x4, #1, 62f\n" - "ld1 { v27.h }[0], [x23], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[2], [x23]\n" + "tbz x8, #1, 62f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[2], [x19]\n" "b 63f\n" "62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v27.b }[0], [x23]\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[0], [x19]\n" "63:" // Oddments: Load (4, 1): Bit 2: End - "ldr x22, [x8, #0xa8]\n" "usubl v27.8h, v27.8b, v12.8b\n" - "smlal v16.4s, v27.4h, v7.4h\n" - "smlal2 v22.4s, v27.8h, v7.8h\n" - "add x22, x22, x5\n" - "tbz x4, #2, 65f\n" + "ldr x22, [x12, #0xa8]\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 65f\n" "ld1 { v24.s }[0], [x22], #0x4\n" - "tbz x4, #1, 64f\n" + "tbz x8, #1, 64f\n" "ld1 { v24.h }[2], [x22], #0x2\n" - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[6], [x22]\n" "b 67f\n" "64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[4], [x22]\n" "b 67f\n" "65:" // Oddments: Load (3, 2): Bit 2: Unset - "tbz x4, #1, 66f\n" + "tbz x8, #1, 66f\n" "ld1 { v24.h }[0], [x22], #0x2\n" - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[2], [x22]\n" "b 67f\n" "66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[0], [x22]\n" "67:" // Oddments: Load (3, 2): Bit 2: End - "ldr x21, [x8, #0xb0]\n" "usubl v24.8h, v24.8b, v12.8b\n" - "smlal v16.4s, v24.4h, v5.4h\n" - "smlal2 v22.4s, v24.8h, v5.8h\n" - "add x21, x21, x5\n" - "smlal v17.4s, v24.4h, v3.4h\n" + "ldr x21, [x12, #0xb0]\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" + "smlal v23.4s, v24.4h, v3.4h\n" "smlal2 v18.4s, v24.8h, v3.8h\n" - "tbz x4, #2, 69f\n" + "add x21, x21, x15\n" + "tbz x8, #2, 69f\n" "ld1 { v26.s }[0], [x21], #0x4\n" - "tbz x4, #1, 68f\n" + "tbz x8, #1, 68f\n" "ld1 { v26.h }[2], [x21], #0x2\n" - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[6], [x21]\n" "b 71f\n" "68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[4], [x21]\n" "b 71f\n" "69:" // Oddments: Load (4, 3): Bit 2: Unset - "tbz x4, #1, 70f\n" + "tbz x8, #1, 70f\n" "ld1 { v26.h }[0], [x21], #0x2\n" - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[2], [x21]\n" "b 71f\n" "70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[0], [x21]\n" "71:" // Oddments: Load (4, 3): Bit 2: End - "ldr x20, [x8, #0xb8]\n" "usubl v26.8h, v26.8b, v12.8b\n" - "smlal v17.4s, v26.4h, v7.4h\n" + "ldr x20, [x12, #0xb8]\n" + "smlal v23.4s, v26.4h, v7.4h\n" "smlal2 v18.4s, v26.8h, v7.8h\n" - "add x20, x20, x5\n" - "tbz x4, #2, 73f\n" + "add x20, x20, x15\n" + "tbz x8, #2, 73f\n" "ld1 { v25.s }[0], [x20], #0x4\n" - "tbz x4, #1, 72f\n" + "tbz x8, #1, 72f\n" "ld1 { v25.h }[2], [x20], #0x2\n" - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[6], [x20]\n" "b 75f\n" "72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[4], [x20]\n" "b 75f\n" "73:" // Oddments: Load (4, 2): Bit 2: Unset - "tbz x4, #1, 74f\n" + "tbz x8, #1, 74f\n" "ld1 { v25.h }[0], [x20], #0x2\n" - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[2], [x20]\n" "b 75f\n" "74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[0], [x20]\n" "75:" // Oddments: Load (4, 2): Bit 2: End - "ldr x19, [x8, #0xc0]\n" "usubl v25.8h, v25.8b, v12.8b\n" - "smlal v16.4s, v25.4h, v8.4h\n" - "smlal2 v22.4s, v25.8h, v8.8h\n" - "add x19, x19, x5\n" - "smlal v17.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" + "smlal v23.4s, v25.4h, v6.4h\n" "smlal2 v18.4s, v25.8h, v6.8h\n" - "tbz x4, #2, 77f\n" + "add x19, x19, x15\n" + "tbz x8, #2, 77f\n" "ld1 { v29.s }[0], [x19], #0x4\n" - "tbz x4, #1, 76f\n" + "tbz x8, #1, 76f\n" "ld1 { v29.h }[2], [x19], #0x2\n" - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[6], [x19]\n" "b 79f\n" "76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[4], [x19]\n" "b 79f\n" "77:" // Oddments: Load (4, 4): Bit 2: Unset - "tbz x4, #1, 78f\n" + "tbz x8, #1, 78f\n" "ld1 { v29.h }[0], [x19], #0x2\n" - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[2], [x19]\n" "b 79f\n" "78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[0], [x19]\n" "79:" // Oddments: Load (4, 4): Bit 2: End "usubl v29.8h, v29.8b, v12.8b\n" - "smlal v17.4s, v29.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" "smlal2 v18.4s, v29.8h, v8.8h\n" - "tbz x4, #2, 81f\n" - "ld1 { v21.4s }, [x17], #0x10\n" - "ld1 { v30.4s }, [x15], #0x10\n" - "tbz x4, #1, 80f\n" - "ld1 { v31.d }[0], [x17], #0x8\n" - "ld1 { v9.d }[0], [x15], #0x8\n" - "tbz x4, #0, 83f\n" - "ld1 { v31.s }[2], [x17]\n" - "ld1 { v9.s }[2], [x15]\n" + "tbz x8, #2, 81f\n" + "ld1 { v19.4s }, [x13], #0x10\n" + "ld1 { v0.4s }, [x11], #0x10\n" + "tbz x8, #1, 80f\n" + "ld1 { v4.d }[0], [x13], #0x8\n" + "ld1 { v31.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v4.s }[2], [x13]\n" + "ld1 { v31.s }[2], [x11]\n" "b 83f\n" "80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v31.s }[0], [x17]\n" - "ld1 { v9.s }[0], [x15]\n" + "tbz x8, #0, 83f\n" + "ld1 { v4.s }[0], [x13]\n" + "ld1 { v31.s }[0], [x11]\n" "b 83f\n" "81:" // Oddments: Load requant params: Bit 2: Unset - "tbz x4, #1, 82f\n" - "ld1 { v21.d }[0], [x17], #0x8\n" - "ld1 { v30.d }[0], [x15], #0x8\n" - "tbz x4, #0, 83f\n" - "ld1 { v21.s }[2], [x17]\n" - "ld1 { v30.s }[2], [x15]\n" + "tbz x8, #1, 82f\n" + "ld1 { v19.d }[0], [x13], #0x8\n" + "ld1 { v0.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[2], [x13]\n" + "ld1 { v0.s }[2], [x11]\n" "b 83f\n" "82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v21.s }[0], [x17]\n" - "ld1 { v30.s }[0], [x15]\n" + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[0], [x13]\n" + "ld1 { v0.s }[0], [x11]\n" "83:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v15.4s, v15.4s, v21.4s\n" - "add x14, x14, x7\n" - "sqrdmulh v10.4s, v10.4s, v31.4s\n" - "add x13, x13, x7\n" - "sqrdmulh v20.4s, v20.4s, v21.4s\n" - "add x12, x12, x7\n" - "sqrdmulh v23.4s, v23.4s, v31.4s\n" - "add x11, x11, x7\n" - "sqrdmulh v16.4s, v16.4s, v21.4s\n" - "and v26.16b, v15.16b, v30.16b\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "and v8.16b, v10.16b, v9.16b\n" - "and v4.16b, v20.16b, v30.16b\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "and v2.16b, v23.16b, v9.16b\n" - "and v1.16b, v16.16b, v30.16b\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x10, x10, x14\n" + "add x9, x9, x14\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "add x28, x28, x14\n" + "add x27, x27, x14\n" + "and v30.16b, v15.16b, v0.16b\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "and v28.16b, v9.16b, v0.16b\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "and v19.16b, v10.16b, v31.16b\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v31.4s\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v26.4s\n" - "sqrdmulh v17.4s, v17.4s, v21.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "sqadd v10.4s, v10.4s, v8.4s\n" - "sqadd v20.4s, v20.4s, v4.4s\n" - "srshl v15.4s, v15.4s, v30.4s\n" - "sqadd v23.4s, v23.4s, v2.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "srshl v20.4s, v20.4s, v30.4s\n" - "add v15.4s, v15.4s, v11.4s\n" - "srshl v23.4s, v23.4s, v9.4s\n" - "add v10.4s, v10.4s, v11.4s\n" - "smin v15.4s, v15.4s, v14.4s\n" - "add v20.4s, v20.4s, v11.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "smax v15.4s, v15.4s, v19.4s\n" - "smin v20.4s, v20.4s, v14.4s\n" - "smax v10.4s, v10.4s, v19.4s\n" - "add v23.4s, v23.4s, v11.4s\n" - "smax v20.4s, v20.4s, v19.4s\n" - "uzp1 v15.16b, v15.16b, v10.16b\n" - "smin v23.4s, v23.4s, v14.4s\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "sqadd v16.4s, v16.4s, v1.4s\n" - "smax v23.4s, v23.4s, v19.4s\n" - "and v24.16b, v22.16b, v9.16b\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v23.16b\n" - "srshl v16.4s, v16.4s, v30.4s\n" - "and v2.16b, v17.16b, v30.16b\n" - "sshr v2.4s, v2.4s, #0x1f\n" - "uzp1 v20.16b, v20.16b, v20.16b\n" - "add v16.4s, v16.4s, v11.4s\n" - "sqadd v22.4s, v22.4s, v24.4s\n" - "and v31.16b, v18.16b, v9.16b\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "smin v16.4s, v16.4s, v14.4s\n" - "srshl v22.4s, v22.4s, v9.4s\n" - "sqadd v17.4s, v17.4s, v2.4s\n" - "smax v16.4s, v16.4s, v19.4s\n" - "add v22.4s, v22.4s, v11.4s\n" - "srshl v17.4s, v17.4s, v30.4s\n" - "sqadd v18.4s, v18.4s, v31.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "add v17.4s, v17.4s, v11.4s\n" - "srshl v18.4s, v18.4s, v9.4s\n" - "smax v22.4s, v22.4s, v19.4s\n" - "smin v17.4s, v17.4s, v14.4s\n" - "uzp1 v16.16b, v16.16b, v22.16b\n" - "add v18.4s, v18.4s, v11.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "smax v17.4s, v17.4s, v19.4s\n" - "smin v18.4s, v18.4s, v14.4s\n" - "smax v18.4s, v18.4s, v19.4s\n" - "uzp1 v17.16b, v17.16b, v18.16b\n" - "uzp1 v17.16b, v17.16b, v17.16b\n" - "tbz x4, #2, 85f\n" - "st1 { v15.s }[0], [x14], #0x4\n" - "st1 { v20.s }[0], [x13], #0x4\n" - "st1 { v16.s }[0], [x12], #0x4\n" - "st1 { v17.s }[0], [x11], #0x4\n" - "tbz x4, #1, 84f\n" - "st1 { v15.h }[2], [x14], #0x2\n" - "st1 { v20.h }[2], [x13], #0x2\n" - "st1 { v16.h }[2], [x12], #0x2\n" - "st1 { v17.h }[2], [x11], #0x2\n" - "tbz x4, #0, 87f\n" - "st1 { v15.b }[6], [x14], #0x1\n" - "st1 { v20.b }[6], [x13], #0x1\n" - "st1 { v16.b }[6], [x12], #0x1\n" - "st1 { v17.b }[6], [x11], #0x1\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "tbz x8, #2, 85f\n" + "st1 { v15.s }[0], [x10], #0x4\n" + "st1 { v9.s }[0], [x9], #0x4\n" + "st1 { v22.s }[0], [x28], #0x4\n" + "st1 { v23.s }[0], [x27], #0x4\n" + "tbz x8, #1, 84f\n" + "st1 { v15.h }[2], [x10], #0x2\n" + "st1 { v9.h }[2], [x9], #0x2\n" + "st1 { v22.h }[2], [x28], #0x2\n" + "st1 { v23.h }[2], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[6], [x10], #0x1\n" + "st1 { v9.b }[6], [x9], #0x1\n" + "st1 { v22.b }[6], [x28], #0x1\n" + "st1 { v23.b }[6], [x27], #0x1\n" "b 87f\n" "84:" // Oddments: Bit 2: Bit 1: Unset - "tbz x4, #0, 87f\n" - "st1 { v15.b }[4], [x14], #0x1\n" - "st1 { v20.b }[4], [x13], #0x1\n" - "st1 { v16.b }[4], [x12], #0x1\n" - "st1 { v17.b }[4], [x11], #0x1\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[4], [x10], #0x1\n" + "st1 { v9.b }[4], [x9], #0x1\n" + "st1 { v22.b }[4], [x28], #0x1\n" + "st1 { v23.b }[4], [x27], #0x1\n" "b 87f\n" "85:" // Oddments: Bit 2: Unset - "tbz x4, #1, 86f\n" - "st1 { v15.h }[0], [x14], #0x2\n" - "st1 { v20.h }[0], [x13], #0x2\n" - "st1 { v16.h }[0], [x12], #0x2\n" - "st1 { v17.h }[0], [x11], #0x2\n" - "tbz x4, #0, 87f\n" - "st1 { v15.b }[2], [x14], #0x1\n" - "st1 { v20.b }[2], [x13], #0x1\n" - "st1 { v16.b }[2], [x12], #0x1\n" - "st1 { v17.b }[2], [x11], #0x1\n" + "tbz x8, #1, 86f\n" + "st1 { v15.h }[0], [x10], #0x2\n" + "st1 { v9.h }[0], [x9], #0x2\n" + "st1 { v22.h }[0], [x28], #0x2\n" + "st1 { v23.h }[0], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[2], [x10], #0x1\n" + "st1 { v9.b }[2], [x9], #0x1\n" + "st1 { v22.b }[2], [x28], #0x1\n" + "st1 { v23.b }[2], [x27], #0x1\n" "b 87f\n" "86:" // Oddments: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 87f\n" - "st1 { v15.b }[0], [x14], #0x1\n" - "st1 { v20.b }[0], [x13], #0x1\n" - "st1 { v16.b }[0], [x12], #0x1\n" - "st1 { v17.b }[0], [x11], #0x1\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[0], [x10], #0x1\n" + "st1 { v9.b }[0], [x9], #0x1\n" + "st1 { v22.b }[0], [x28], #0x1\n" + "st1 { v23.b }[0], [x27], #0x1\n" "87:" // Oddments: Bit 2: End - "88:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index 73de9650c3..11e993c51c 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,24 @@ namespace depthwise { void a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst +class a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const uint8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_u8q_5x5_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_u8q_5x5_mla::get_packed_size; + a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 5, 5, 1, 1) {} - kern_type kernel = a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } - a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp index b42f29ab0d..6934dffc98 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const uint8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const uint8_t *const *inptrs_raw, - const uint8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -111,2096 +111,2070 @@ void a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( - "ldr x4, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x10, #0x0\n" - "ldr x3, [%x[params], %[offsetof_Params_weights]]\n" - "mov x1, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x25, %x[params], %[offsetof_Params_inptrs]\n" - "ldr x2, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x19, x4, #0x3\n" - "ldr x5, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x13, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v7.16b }, [x13]\n" - "add x8, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v13.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v19.4s }, [x8]\n" - "add x8, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v16.4s }, [x20]\n" - "ld1r { v12.4s }, [x8]\n" - "ldp x17, x16, [x21, #0x0]\n" - "ldp x6, x8, [x21, #0x10]\n" - "cbz x19, 3f\n" - "subs x19, x19, #0x1\n" - "ldr x12, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q15, [x12, #0x0]\n" - "mov v18.16b, v15.16b\n" - "ldr q20, [x12, #0x10]\n" - "add x12, x12, #0x20\n" - "mov v11.16b, v15.16b\n" - "str x12, [%x[params], %[offsetof_Params_bias]]\n" + "ldr x10, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x0, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x17, x10, %[offsetof_Requantize32_a_offset]\n" + "add x9, x10, %[offsetof_Requantize32_b_offset]\n" + "ldr x25, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x4, x10, %[offsetof_Requantize32_c_offset]\n" + "add x14, x10, %[offsetof_Requantize32_minval]\n" + "ldr x23, [%x[params], %[offsetof_Params_weights]]\n" + "add x5, x10, %[offsetof_Requantize32_maxval]\n" + "ld1r { v9.16b }, [x17]\n" + "ld1r { v14.16b }, [x9]\n" + "lsr x3, x0, #0x3\n" + "ld1r { v18.8h }, [x4]\n" + "ld1r { v11.8h }, [x14]\n" + "mov x24, #0x0\n" + "mov x22, #0x0\n" + "ld1r { v13.8h }, [x5]\n" + "ldr x10, [%x[params], %[offsetof_Params_requant_muls]]\n" + "add x20, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x1, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x16, x8, [x25, #0x0]\n" + "ldp x4, x7, [x25, #0x10]\n" + "cbz x3, 3f\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q15, [x19, #0x0]\n" + "subs x3, x3, #0x1\n" + "mov v17.16b, v15.16b\n" + "ldr q16, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x23, #0x0]\n" + "ldr d1, [x23, #0x8]\n" + "ldr d2, [x23, #0x10]\n" + "mov v8.16b, v16.16b\n" "mov v10.16b, v15.16b\n" - "ldr d0, [x3, #0x0]\n" - "usubl v0.8h, v0.8b, v13.8b\n" - "mov v5.16b, v20.16b\n" - "ldr d1, [x3, #0x8]\n" - "mov v8.16b, v20.16b\n" - "ldr d2, [x3, #0x10]\n" - "usubl v1.8h, v1.8b, v13.8b\n" - "mov v9.16b, v20.16b\n" - "ldr d3, [x3, #0x18]\n" - "ldr d4, [x3, #0x20]\n" - "usubl v2.8h, v2.8b, v13.8b\n" - "ldp x28, x27, [x25, #0x0]\n" - "usubl v3.8h, v3.8b, v13.8b\n" - "ldp x26, x13, [x25, #0x10]\n" - "usubl v4.8h, v4.8b, v13.8b\n" - "ldp x24, x23, [x25, #0x20]\n" - "ldp x22, x21, [x25, #0x30]\n" - "ldp x20, x0, [x25, #0x40]\n" - "ldr d31, [x28, x10]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "ldr d30, [x27, x10]\n" - "ldr d29, [x26, x10]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "ldr d28, [x13, x10]\n" - "ldr d27, [x24, x10]\n" - "usubl v29.8h, v29.8b, v7.8b\n" - "ldr d23, [x23, x10]\n" - "usubl v28.8h, v28.8b, v7.8b\n" - "ldr d25, [x22, x10]\n" - "ldr d24, [x21, x10]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "ldr d26, [x20, x10]\n" - "usubl v23.8h, v23.8b, v7.8b\n" - "ldr d22, [x0, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "usubl v26.8h, v26.8b, v7.8b\n" - "usubl v22.8h, v22.8b, v7.8b\n" + "ldr d3, [x23, #0x18]\n" + "ldr d4, [x23, #0x20]\n" + "mov v7.16b, v16.16b\n" + "mov v6.16b, v15.16b\n" + "ldp x28, x6, [x20, #0x0]\n" + "ldp x26, x25, [x20, #0x10]\n" + "mov v5.16b, v16.16b\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "ldp x5, x2, [x20, #0x20]\n" + "ldp x27, x21, [x20, #0x30]\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "ldp x12, x19, [x20, #0x40]\n" + "ldr d31, [x28, x24]\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "ldr d30, [x6, x24]\n" + "ldr d29, [x26, x24]\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "ldr d28, [x25, x24]\n" + "ldr d27, [x5, x24]\n" + "usubl v29.8h, v29.8b, v9.8b\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "ldr d23, [x2, x24]\n" + "ldr d25, [x27, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "ldr d24, [x21, x24]\n" + "ldr d26, [x12, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "ldr d22, [x19, x24]\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "usubl v22.8h, v22.8b, v9.8b\n" "beq 2f\n" "1:" // Loop "smlal v15.4s, v31.4h, v0.4h\n" - "ldr x20, [x25, #0x50]\n" - "subs x19, x19, #0x1\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr x28, [x25, #0x58]\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "ldr x0, [x25, #0x60]\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "ldr d31, [x20, x10]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "smlal v11.4s, v29.4h, v0.4h\n" - "ldr x7, [x25, #0x68]\n" - "smlal2 v8.4s, v29.8h, v0.8h\n" - "ldr x26, [x25, #0x70]\n" - "smlal v10.4s, v28.4h, v0.4h\n" - "ldr x23, [x25, #0x78]\n" - "smlal2 v9.4s, v28.8h, v0.8h\n" - "ldr d0, [x3, #0x28]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr x19, [x20, #0x50]\n" + "ldr d31, [x19, x24]\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v29.4h, v0.4h\n" + "ldr x15, [x20, #0x58]\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "smlal v6.4s, v28.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "ldr x19, [x20, #0x60]\n" + "ldr x27, [x20, #0x68]\n" + "smlal2 v7.4s, v29.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "ldr x20, [x25, #0x80]\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v1.4h\n" - "ldr x22, [x25, #0x88]\n" - "smlal2 v5.4s, v27.8h, v1.8h\n" - "ldr x13, [x25, #0x90]\n" - "smlal v11.4s, v28.4h, v1.4h\n" - "ldr x21, [x25, #0x98]\n" - "smlal2 v8.4s, v28.8h, v1.8h\n" - "ldr x14, [x25, #0xa0]\n" - "smlal v10.4s, v23.4h, v1.4h\n" - "ldr x11, [x25, #0xa8]\n" - "smlal2 v9.4s, v23.8h, v1.8h\n" - "ldr d1, [x3, #0x30]\n" - "usubl v1.8h, v1.8b, v13.8b\n" + "ldr x5, [x20, #0x70]\n" + "ldr x11, [x20, #0x78]\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal2 v5.4s, v28.8h, v0.8h\n" + "ldr d30, [x15, x24]\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v1.4h\n" + "smlal v10.4s, v28.4h, v1.4h\n" + "ldr d0, [x23, #0x28]\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v27.8h, v1.8h\n" + "ldr x12, [x20, #0x80]\n" + "ldr x26, [x20, #0x88]\n" + "smlal2 v7.4s, v28.8h, v1.8h\n" "smlal v15.4s, v27.4h, v2.4h\n" - "ldr x24, [x25, #0xb0]\n" - "smlal2 v20.4s, v27.8h, v2.8h\n" - "ldr d27, [x0, x10]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "ldr x0, [x25, #0xb8]\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "ldr x15, [x25, #0xc0]\n" - "smlal v11.4s, v23.4h, v2.4h\n" - "ldr x9, [x25, #0xc8]\n" - "smlal2 v8.4s, v23.8h, v2.8h\n" - "ldr x27, [x25, #0xd0]\n" - "smlal v10.4s, v31.4h, v2.4h\n" - "ldr x28, [x25, #0xd8]\n" - "smlal2 v9.4s, v31.8h, v2.8h\n" - "ldr d2, [x3, #0x38]\n" - "usubl v2.8h, v2.8b, v13.8b\n" + "ldr x14, [x20, #0x90]\n" + "ldr x15, [x20, #0x98]\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal2 v5.4s, v23.8h, v1.8h\n" + "ldr d27, [x19, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v23.4h, v2.4h\n" + "ldr d1, [x23, #0x30]\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "ldr x21, [x20, #0xa0]\n" + "ldr x2, [x20, #0xa8]\n" + "smlal2 v7.4s, v23.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "ldr q6, [x2, #0x0]\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "ldr d25, [x7, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "ldr x12, [x25, #0xe0]\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "ldr q21, [x5, #0x0]\n" - "smlal v11.4s, v31.4h, v3.4h\n" - "ldr q17, [x2, #0x10]\n" - "add x2, x2, #0x20\n" - "smlal2 v8.4s, v31.8h, v3.8h\n" - "ldr q14, [x5, #0x10]\n" - "add x5, x5, #0x20\n" - "smlal v10.4s, v30.4h, v3.4h\n" - "smlal2 v9.4s, v30.8h, v3.8h\n" - "ldr d3, [x3, #0x40]\n" - "usubl v3.8h, v3.8b, v13.8b\n" + "ldr x13, [x20, #0xb0]\n" + "ldr x9, [x20, #0xb8]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v31.8h, v2.8h\n" + "ldr d25, [x27, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v31.4h, v3.4h\n" + "ldr d2, [x23, #0x38]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "ldr x19, [x20, #0xc0]\n" + "ldr x28, [x20, #0xc8]\n" + "smlal2 v7.4s, v31.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "ldr d24, [x26, x10]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v4.4h\n" - "ldr x7, [x25, #0xe8]\n" - "smlal2 v5.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x10]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v11.4s, v30.4h, v4.4h\n" - "ldr x26, [x25, #0xf0]\n" - "smlal2 v8.4s, v30.8h, v4.8h\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0x48]\n" - "usubl v4.8h, v4.8b, v13.8b\n" + "ldr x6, [x20, #0xd0]\n" + "ldr x27, [x20, #0xd8]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "smlal2 v5.4s, v30.8h, v3.8h\n" + "ldr d24, [x5, x24]\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v4.4h\n" + "smlal v10.4s, v30.4h, v4.4h\n" + "ldr d3, [x23, #0x40]\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v27.8h, v4.8h\n" + "ldr d27, [x11, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v7.4s, v30.8h, v4.8h\n" "smlal v15.4s, v29.4h, v0.4h\n" - "smlal2 v20.4s, v29.8h, v0.8h\n" - "smlal v18.4s, v28.4h, v0.4h\n" - "smlal2 v5.4s, v28.8h, v0.8h\n" - "smlal v11.4s, v22.4h, v0.4h\n" - "smlal2 v8.4s, v22.8h, v0.8h\n" - "smlal v10.4s, v25.4h, v0.4h\n" - "smlal2 v9.4s, v25.8h, v0.8h\n" - "ldr d0, [x3, #0x50]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "ldr x11, [x20, #0xe0]\n" + "ldr x17, [x20, #0xe8]\n" + "smlal2 v16.4s, v29.8h, v0.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0x48]\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v28.4h, v0.4h\n" + "smlal v10.4s, v22.4h, v0.4h\n" + "ldr x5, [x20, #0xf0]\n" + "ldr q12, [x10, #0x0]\n" + "smlal v6.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v28.8h, v0.8h\n" + "ldr q19, [x1, #0x0]\n" + "ldr q20, [x10, #0x10]\n" + "smlal2 v7.4s, v22.8h, v0.8h\n" "smlal v15.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x10]\n" - "usubl v28.8h, v28.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v1.4h\n" - "ldr x23, [x25, #0xf8]\n" - "smlal2 v5.4s, v23.8h, v1.8h\n" - "smlal v11.4s, v25.4h, v1.4h\n" - "smlal2 v8.4s, v25.8h, v1.8h\n" - "smlal v10.4s, v24.4h, v1.4h\n" - "smlal2 v9.4s, v24.8h, v1.8h\n" - "ldr d1, [x3, #0x58]\n" - "usubl v1.8h, v1.8b, v13.8b\n" + "ldr q29, [x1, #0x10]\n" + "subs x3, x3, #0x1\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal2 v5.4s, v25.8h, v0.8h\n" + "ldr d28, [x26, x24]\n" + "ldr d0, [x23, #0x50]\n" + "smlal v17.4s, v23.4h, v1.4h\n" + "smlal v10.4s, v25.4h, v1.4h\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "ldr x25, [x20, #0xf8]\n" + "smlal v6.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "add x10, x10, #0x20\n" + "smlal2 v7.4s, v25.8h, v1.8h\n" "smlal v15.4s, v23.4h, v2.4h\n" - "smlal2 v20.4s, v23.8h, v2.8h\n" - "ldr d23, [x20, x10]\n" - "usubl v23.8h, v23.8b, v7.8b\n" - "smlal v18.4s, v31.4h, v2.4h\n" - "ldr x22, [x25, #0x100]\n" - "smlal2 v5.4s, v31.8h, v2.8h\n" - "smlal v11.4s, v24.4h, v2.4h\n" - "smlal2 v8.4s, v24.8h, v2.8h\n" - "smlal v10.4s, v27.4h, v2.4h\n" - "smlal2 v9.4s, v27.8h, v2.8h\n" - "ldr d2, [x3, #0x60]\n" - "usubl v2.8h, v2.8b, v13.8b\n" + "add x1, x1, #0x20\n" + "smlal2 v16.4s, v23.8h, v2.8h\n" + "ldr d23, [x12, x24]\n" + "smlal2 v5.4s, v24.8h, v1.8h\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v31.4h, v2.4h\n" + "smlal v10.4s, v24.4h, v2.4h\n" + "ldr d1, [x23, #0x58]\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "ldr x26, [x20, #0x100]\n" + "smlal2 v7.4s, v24.8h, v2.8h\n" "smlal v15.4s, v31.4h, v3.4h\n" - "smlal2 v20.4s, v31.8h, v3.8h\n" - "ldr d31, [x13, x10]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v3.4h\n" - "ldr x20, [x25, #0x108]\n" - "smlal2 v5.4s, v30.8h, v3.8h\n" - "smlal v11.4s, v27.4h, v3.4h\n" - "smlal2 v8.4s, v27.8h, v3.8h\n" - "smlal v10.4s, v23.4h, v3.4h\n" - "smlal2 v9.4s, v23.8h, v3.8h\n" - "ldr d3, [x3, #0x68]\n" - "usubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v3.8h\n" + "smlal2 v5.4s, v27.8h, v2.8h\n" + "ldr d31, [x14, x24]\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "smlal v17.4s, v30.4h, v3.4h\n" + "smlal v10.4s, v27.4h, v3.4h\n" + "ldr d2, [x23, #0x60]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "ldr x12, [x20, #0x108]\n" + "smlal2 v7.4s, v27.8h, v3.8h\n" "smlal v15.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x10]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "ldr x13, [x25, #0x110]\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "ldr d26, [x14, x10]\n" - "usubl v26.8h, v26.8b, v7.8b\n" - "smlal v11.4s, v23.4h, v4.4h\n" - "ldr x21, [x25, #0x118]\n" - "smlal2 v8.4s, v23.8h, v4.8h\n" - "smlal v10.4s, v28.4h, v4.4h\n" - "smlal2 v9.4s, v28.8h, v4.8h\n" - "ldr d4, [x3, #0x70]\n" - "usubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v30.8h, v4.8h\n" + "ldr d30, [x15, x24]\n" + "smlal2 v5.4s, v23.8h, v3.8h\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal v10.4s, v23.4h, v4.4h\n" + "ldr d3, [x23, #0x68]\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "ldr d26, [x21, x24]\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "smlal2 v7.4s, v23.8h, v4.8h\n" "smlal v15.4s, v22.4h, v0.4h\n" - "smlal2 v20.4s, v22.8h, v0.8h\n" - "ldr d22, [x0, x10]\n" - "usubl v22.8h, v22.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v0.4h\n" - "smlal2 v5.4s, v25.8h, v0.8h\n" - "smlal v11.4s, v31.4h, v0.4h\n" - "smlal2 v8.4s, v31.8h, v0.8h\n" - "smlal v10.4s, v30.4h, v0.4h\n" - "smlal2 v9.4s, v30.8h, v0.8h\n" - "ldr d0, [x3, #0x78]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "ldr x14, [x20, #0x110]\n" + "ldr x21, [x20, #0x118]\n" + "smlal2 v16.4s, v22.8h, v0.8h\n" + "smlal2 v5.4s, v28.8h, v4.8h\n" + "ldr d4, [x23, #0x70]\n" + "ldr d22, [x9, x24]\n" + "smlal v17.4s, v25.4h, v0.4h\n" + "smlal v10.4s, v31.4h, v0.4h\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "usubl v22.8h, v22.8b, v9.8b\n" + "smlal2 v7.4s, v31.8h, v0.8h\n" "smlal v15.4s, v25.4h, v1.4h\n" - "smlal2 v20.4s, v25.8h, v1.8h\n" - "ldr d25, [x11, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v1.4h\n" - "smlal2 v5.4s, v24.8h, v1.8h\n" - "smlal v11.4s, v30.4h, v1.4h\n" - "smlal2 v8.4s, v30.8h, v1.8h\n" - "smlal v10.4s, v26.4h, v1.4h\n" - "smlal2 v9.4s, v26.8h, v1.8h\n" - "ldr d1, [x3, #0x80]\n" - "usubl v1.8h, v1.8b, v13.8b\n" + "smlal2 v16.4s, v25.8h, v1.8h\n" + "ldr d25, [x2, x24]\n" + "smlal2 v5.4s, v30.8h, v0.8h\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v1.4h\n" + "smlal v10.4s, v30.4h, v1.4h\n" + "ldr d0, [x23, #0x78]\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "smlal2 v7.4s, v30.8h, v1.8h\n" "smlal v15.4s, v24.4h, v2.4h\n" - "smlal2 v20.4s, v24.8h, v2.8h\n" - "ldr d24, [x24, x10]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v2.4h\n" - "smlal2 v5.4s, v27.8h, v2.8h\n" - "smlal v11.4s, v26.4h, v2.4h\n" - "smlal2 v8.4s, v26.8h, v2.8h\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" - "ldr d2, [x3, #0x88]\n" - "usubl v2.8h, v2.8b, v13.8b\n" + "smlal2 v16.4s, v24.8h, v2.8h\n" + "ldr d24, [x13, x24]\n" + "smlal2 v5.4s, v26.8h, v1.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v2.4h\n" + "smlal v10.4s, v26.4h, v2.4h\n" + "ldr d1, [x23, #0x80]\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "smlal2 v7.4s, v26.8h, v2.8h\n" "smlal v15.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "ldr d27, [x15, x10]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v3.4h\n" - "smlal2 v5.4s, v23.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" - "ldr d3, [x3, #0x90]\n" - "usubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "ldr d27, [x19, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v23.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "ldr d2, [x23, #0x88]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v23.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" "smlal v15.4s, v23.4h, v4.4h\n" - "smlal2 v20.4s, v23.8h, v4.8h\n" - "ldr d23, [x9, x10]\n" - "usubl v23.8h, v23.8b, v7.8b\n" - "smlal v18.4s, v28.4h, v4.4h\n" - "smlal2 v5.4s, v28.8h, v4.8h\n" - "ldr d28, [x12, x10]\n" - "usubl v28.8h, v28.8b, v7.8b\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "smlal v10.4s, v22.4h, v4.4h\n" - "smlal2 v9.4s, v22.8h, v4.8h\n" - "ldr d4, [x3, #0x98]\n" - "usubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v23.8h, v4.8h\n" + "ldr d23, [x28, x24]\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v28.4h, v4.4h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "ldr d3, [x23, #0x90]\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v22.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "ldr d28, [x11, x24]\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr d31, [x27, x10]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "smlal v11.4s, v27.4h, v0.4h\n" - "smlal2 v8.4s, v27.8h, v0.8h\n" - "smlal v10.4s, v23.4h, v0.4h\n" - "smlal2 v9.4s, v23.8h, v0.8h\n" - "ldr d0, [x3, #0xa0]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr d31, [x6, x24]\n" + "smlal2 v5.4s, v22.8h, v4.8h\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v27.4h, v0.4h\n" + "ldr d4, [x23, #0x98]\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "smlal2 v7.4s, v27.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v26.4h, v1.4h\n" - "smlal2 v5.4s, v26.8h, v1.8h\n" - "smlal v11.4s, v23.4h, v1.4h\n" - "smlal2 v8.4s, v23.8h, v1.8h\n" - "smlal v10.4s, v31.4h, v1.4h\n" - "smlal2 v9.4s, v31.8h, v1.8h\n" - "ldr d1, [x3, #0xa8]\n" - "usubl v1.8h, v1.8b, v13.8b\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "ldr d30, [x27, x24]\n" + "smlal2 v5.4s, v23.8h, v0.8h\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v1.4h\n" + "smlal v10.4s, v23.4h, v1.4h\n" + "ldr d0, [x23, #0xa0]\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v1.4h\n" + "smlal2 v8.4s, v26.8h, v1.8h\n" + "smlal2 v7.4s, v23.8h, v1.8h\n" "smlal v15.4s, v26.4h, v2.4h\n" - "smlal2 v20.4s, v26.8h, v2.8h\n" - "ldr d26, [x7, x10]\n" - "usubl v26.8h, v26.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "smlal2 v8.4s, v31.8h, v2.8h\n" - "smlal v10.4s, v30.4h, v2.4h\n" - "smlal2 v9.4s, v30.8h, v2.8h\n" - "ldr d2, [x3, #0xb0]\n" - "usubl v2.8h, v2.8b, v13.8b\n" + "smlal2 v16.4s, v26.8h, v2.8h\n" + "smlal2 v5.4s, v31.8h, v1.8h\n" + "ldr d26, [x17, x24]\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v31.4h, v2.4h\n" + "ldr d1, [x23, #0xa8]\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal2 v7.4s, v31.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "ldr d25, [x26, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "smlal v11.4s, v30.4h, v3.4h\n" - "smlal2 v8.4s, v30.8h, v3.8h\n" - "smlal v10.4s, v28.4h, v3.4h\n" - "smlal2 v9.4s, v28.8h, v3.8h\n" - "ldr d3, [x3, #0xb8]\n" - "usubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v30.8h, v2.8h\n" + "ldr d25, [x5, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v30.4h, v3.4h\n" + "ldr d2, [x23, #0xb0]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal2 v7.4s, v30.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "ldr d24, [x23, x10]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v22.4h, v4.4h\n" - "smlal2 v5.4s, v22.8h, v4.8h\n" - "smlal v11.4s, v28.4h, v4.4h\n" - "smlal2 v8.4s, v28.8h, v4.8h\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0xc0]\n" - "add x3, x3, #0xc8\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "ldr d24, [x25, x24]\n" + "smlal2 v5.4s, v28.8h, v3.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v22.4h, v4.4h\n" + "smlal v10.4s, v28.4h, v4.4h\n" + "ldr d3, [x23, #0xb8]\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v7.4s, v28.8h, v4.8h\n" "smlal v15.4s, v27.4h, v0.4h\n" - "usubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v20.4s, v27.8h, v0.8h\n" - "ldr d27, [x22, x10]\n" - "smlal v18.4s, v23.4h, v0.4h\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v5.4s, v23.8h, v0.8h\n" - "smlal v11.4s, v25.4h, v0.4h\n" - "smlal2 v8.4s, v25.8h, v0.8h\n" - "ldr d25, [x20, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v0.4h\n" - "smlal2 v9.4s, v24.8h, v0.8h\n" + "smlal2 v16.4s, v27.8h, v0.8h\n" + "ldr d27, [x26, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v22.8h, v4.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0xc0]\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v23.4h, v0.4h\n" + "smlal v10.4s, v25.4h, v0.4h\n" + "add x23, x23, #0xc8\n" + "smlal v6.4s, v24.4h, v0.4h\n" + "smlal2 v7.4s, v25.8h, v0.8h\n" + "ldr d25, [x12, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal2 v8.4s, v23.8h, v0.8h\n" + "smlal2 v5.4s, v24.8h, v0.8h\n" "smlal v15.4s, v23.4h, v1.4h\n" - "smlal2 v20.4s, v23.8h, v1.8h\n" - "smlal v18.4s, v31.4h, v1.4h\n" - "smlal2 v5.4s, v31.8h, v1.8h\n" - "smlal v11.4s, v24.4h, v1.4h\n" - "smlal2 v8.4s, v24.8h, v1.8h\n" - "ldr d24, [x13, x10]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v1.4h\n" - "smlal2 v9.4s, v27.8h, v1.8h\n" + "smlal v17.4s, v31.4h, v1.4h\n" + "smlal v10.4s, v24.4h, v1.4h\n" + "smlal v6.4s, v27.4h, v1.4h\n" + "smlal2 v7.4s, v24.8h, v1.8h\n" + "ldr d24, [x14, x24]\n" + "smlal2 v16.4s, v23.8h, v1.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal2 v8.4s, v31.8h, v1.8h\n" + "smlal2 v5.4s, v27.8h, v1.8h\n" "smlal v15.4s, v31.4h, v2.4h\n" - "smlal2 v20.4s, v31.8h, v2.8h\n" - "smlal v18.4s, v30.4h, v2.4h\n" - "smlal2 v5.4s, v30.8h, v2.8h\n" - "smlal v11.4s, v27.4h, v2.4h\n" - "smlal2 v8.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x10]\n" - "add x10, x10, #0x8\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" + "smlal v17.4s, v30.4h, v2.4h\n" + "smlal v10.4s, v27.4h, v2.4h\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v7.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x24]\n" + "smlal2 v16.4s, v31.8h, v2.8h\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v30.8h, v2.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x24, x24, #0x8\n" "smlal v15.4s, v30.4h, v3.4h\n" - "smlal2 v20.4s, v30.8h, v3.8h\n" - "smlal v18.4s, v28.4h, v3.4h\n" - "smlal2 v5.4s, v28.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" + "smlal v17.4s, v28.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v16.4s, v30.8h, v3.8h\n" + "smlal2 v8.4s, v28.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" "smlal v15.4s, v28.4h, v4.4h\n" - "smlal2 v20.4s, v28.8h, v4.8h\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "smlal v10.4s, v27.4h, v4.4h\n" - "smlal2 v9.4s, v27.8h, v4.8h\n" - "sqrdmulh v15.4s, v15.4s, v6.4s\n" - "sqrdmulh v20.4s, v20.4s, v17.4s\n" - "sqrdmulh v18.4s, v18.4s, v6.4s\n" - "sqrdmulh v5.4s, v5.4s, v17.4s\n" - "and v1.16b, v15.16b, v21.16b\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "and v29.16b, v20.16b, v14.16b\n" - "and v3.16b, v18.16b, v21.16b\n" - "sshr v29.4s, v29.4s, #0x1f\n" - "and v2.16b, v5.16b, v14.16b\n" - "sqrdmulh v11.4s, v11.4s, v6.4s\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v8.4s, v8.4s, v17.4s\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "sqdmulh v15.4s, v15.4s, v12.4s\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal v6.4s, v27.4h, v4.4h\n" + "sqdmulh v17.4s, v17.4s, v12.4s\n" + "smlal2 v16.4s, v28.8h, v4.8h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "sqdmulh v10.4s, v10.4s, v12.4s\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "smlal2 v5.4s, v27.8h, v4.8h\n" + "sqdmulh v6.4s, v6.4s, v12.4s\n" + "and v23.16b, v15.16b, v19.16b\n" + "sqdmulh v16.4s, v16.4s, v20.4s\n" + "and v22.16b, v17.16b, v19.16b\n" + "sqdmulh v8.4s, v8.4s, v20.4s\n" + "and v21.16b, v10.16b, v19.16b\n" + "sqdmulh v7.4s, v7.4s, v20.4s\n" + "and v26.16b, v6.16b, v19.16b\n" + "sqdmulh v5.4s, v5.4s, v20.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v4.16b, v16.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v2.16b, v8.16b, v29.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v7.16b, v29.16b\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "and v25.16b, v5.16b, v29.16b\n" + "sqadd v15.4s, v15.4s, v23.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v17.4s, v17.4s, v22.4s\n" "sshr v2.4s, v2.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v1.4s\n" - "sqrdmulh v10.4s, v10.4s, v6.4s\n" - "and v0.16b, v11.16b, v21.16b\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v21.4s\n" - "sqadd v20.4s, v20.4s, v29.4s\n" - "sqadd v18.4s, v18.4s, v3.4s\n" - "sqadd v5.4s, v5.4s, v2.4s\n" - "and v27.16b, v8.16b, v14.16b\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "add v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v14.4s\n" - "srshl v18.4s, v18.4s, v21.4s\n" - "srshl v5.4s, v5.4s, v14.4s\n" - "smin v15.4s, v15.4s, v12.4s\n" - "add v20.4s, v20.4s, v19.4s\n" - "add v18.4s, v18.4s, v19.4s\n" - "smax v15.4s, v15.4s, v16.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smin v18.4s, v18.4s, v12.4s\n" - "add v5.4s, v5.4s, v19.4s\n" - "smax v20.4s, v20.4s, v16.4s\n" - "smax v18.4s, v18.4s, v16.4s\n" - "smin v5.4s, v5.4s, v12.4s\n" - "uzp1 v15.16b, v15.16b, v20.16b\n" - "sqadd v11.4s, v11.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v6.4s, v6.4s, v26.4s\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v17.4s, v17.4s, v19.4s\n" + "sqadd v8.4s, v8.4s, v2.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "sqadd v7.4s, v7.4s, v3.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "sqadd v5.4s, v5.4s, v25.4s\n" + "srshl v16.4s, v16.4s, v29.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v17.4h, v17.4s\n" + "srshl v7.4s, v7.4s, v29.4s\n" + "sqxtn v10.4h, v10.4s\n" + "srshl v5.4s, v5.4s, v29.4s\n" + "sqxtn v6.4h, v6.4s\n" + "sqxtn2 v15.8h, v16.4s\n" + "sqxtn2 v17.8h, v8.4s\n" + "sqxtn2 v10.8h, v7.4s\n" + "sqxtn2 v6.8h, v5.4s\n" + "sqadd v15.8h, v15.8h, v18.8h\n" + "sqadd v17.8h, v17.8h, v18.8h\n" + "sqadd v10.8h, v10.8h, v18.8h\n" + "sqadd v6.8h, v6.8h, v18.8h\n" + "smax v15.8h, v15.8h, v11.8h\n" + "smax v17.8h, v17.8h, v11.8h\n" + "smax v10.8h, v10.8h, v11.8h\n" + "smax v6.8h, v6.8h, v11.8h\n" + "smin v15.8h, v15.8h, v13.8h\n" + "smin v17.8h, v17.8h, v13.8h\n" + "smin v10.8h, v10.8h, v13.8h\n" + "smin v6.8h, v6.8h, v13.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x17, x1]\n" - "smax v5.4s, v5.4s, v16.4s\n" - "sqadd v8.4s, v8.4s, v27.4s\n" - "srshl v11.4s, v11.4s, v21.4s\n" - "and v30.16b, v10.16b, v21.16b\n" - "sshr v30.4s, v30.4s, #0x1f\n" - "uzp1 v18.16b, v18.16b, v5.16b\n" - "add v11.4s, v11.4s, v19.4s\n" - "srshl v8.4s, v8.4s, v14.4s\n" - "uzp1 v18.16b, v18.16b, v18.16b\n" - "str d18, [x16, x1]\n" - "smin v11.4s, v11.4s, v12.4s\n" - "sqrdmulh v9.4s, v9.4s, v17.4s\n" - "add v8.4s, v8.4s, v19.4s\n" - "sqadd v10.4s, v10.4s, v30.4s\n" - "smax v11.4s, v11.4s, v16.4s\n" - "smin v8.4s, v8.4s, v12.4s\n" - "and v6.16b, v9.16b, v14.16b\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "smax v8.4s, v8.4s, v16.4s\n" - "srshl v10.4s, v10.4s, v21.4s\n" - "uzp1 v11.16b, v11.16b, v8.16b\n" - "add v10.4s, v10.4s, v19.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "str d11, [x6, x1]\n" - "smin v10.4s, v10.4s, v12.4s\n" - "sqadd v9.4s, v9.4s, v6.4s\n" - "smax v10.4s, v10.4s, v16.4s\n" - "srshl v9.4s, v9.4s, v14.4s\n" - "add v9.4s, v9.4s, v19.4s\n" - "smin v9.4s, v9.4s, v12.4s\n" - "smax v9.4s, v9.4s, v16.4s\n" - "uzp1 v10.16b, v10.16b, v9.16b\n" + "uzp1 v17.16b, v17.16b, v17.16b\n" + "str d15, [x16, x22]\n" "uzp1 v10.16b, v10.16b, v10.16b\n" - "str d10, [x8, x1]\n" - "add x1, x1, #0x8\n" - "ldr x12, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q15, [x12, #0x0]\n" - "mov v18.16b, v15.16b\n" - "ldr q20, [x12, #0x10]\n" - "add x12, x12, #0x20\n" - "mov v11.16b, v15.16b\n" - "str x12, [%x[params], %[offsetof_Params_bias]]\n" + "uzp1 v6.16b, v6.16b, v6.16b\n" + "str d17, [x8, x22]\n" + "str d10, [x4, x22]\n" + "str d6, [x7, x22]\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q15, [x19, #0x0]\n" + "add x22, x22, #0x8\n" + "ldr q16, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x23, #0x0]\n" + "ldr d1, [x23, #0x8]\n" + "ldr d2, [x23, #0x10]\n" + "mov v17.16b, v15.16b\n" + "mov v8.16b, v16.16b\n" + "ldr d3, [x23, #0x18]\n" + "ldr d4, [x23, #0x20]\n" "mov v10.16b, v15.16b\n" - "ldr d0, [x3, #0x0]\n" - "usubl v0.8h, v0.8b, v13.8b\n" - "mov v5.16b, v20.16b\n" - "ldr d1, [x3, #0x8]\n" - "mov v8.16b, v20.16b\n" - "ldr d2, [x3, #0x10]\n" - "usubl v1.8h, v1.8b, v13.8b\n" - "mov v9.16b, v20.16b\n" - "ldr d3, [x3, #0x18]\n" - "ldr d4, [x3, #0x20]\n" - "usubl v2.8h, v2.8b, v13.8b\n" - "ldp x28, x27, [x25, #0x0]\n" - "usubl v3.8h, v3.8b, v13.8b\n" - "ldp x26, x13, [x25, #0x10]\n" - "usubl v4.8h, v4.8b, v13.8b\n" - "ldp x24, x23, [x25, #0x20]\n" - "ldp x22, x21, [x25, #0x30]\n" - "ldp x20, x0, [x25, #0x40]\n" - "ldr d31, [x28, x10]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "ldr d30, [x27, x10]\n" - "ldr d29, [x26, x10]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "ldr d28, [x13, x10]\n" - "ldr d27, [x24, x10]\n" - "usubl v29.8h, v29.8b, v7.8b\n" - "ldr d23, [x23, x10]\n" - "usubl v28.8h, v28.8b, v7.8b\n" - "ldr d25, [x22, x10]\n" - "ldr d24, [x21, x10]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "ldr d26, [x20, x10]\n" - "usubl v23.8h, v23.8b, v7.8b\n" - "ldr d22, [x0, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "usubl v26.8h, v26.8b, v7.8b\n" - "usubl v22.8h, v22.8b, v7.8b\n" + "mov v7.16b, v16.16b\n" + "ldp x28, x6, [x20, #0x0]\n" + "ldp x26, x25, [x20, #0x10]\n" + "mov v6.16b, v15.16b\n" + "mov v5.16b, v16.16b\n" + "ldp x5, x2, [x20, #0x20]\n" + "ldp x27, x21, [x20, #0x30]\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "ldp x12, x19, [x20, #0x40]\n" + "ldr d31, [x28, x24]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "ldr d30, [x6, x24]\n" + "ldr d29, [x26, x24]\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "ldr d28, [x25, x24]\n" + "ldr d27, [x5, x24]\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "usubl v29.8h, v29.8b, v9.8b\n" + "ldr d23, [x2, x24]\n" + "ldr d25, [x27, x24]\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "ldr d24, [x21, x24]\n" + "ldr d26, [x12, x24]\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "ldr d22, [x19, x24]\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "usubl v22.8h, v22.8b, v9.8b\n" "bgt 1b\n" "2:" // Tail "smlal v15.4s, v31.4h, v0.4h\n" - "ldr x20, [x25, #0x50]\n" - "tst x4, #0x7\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr x28, [x25, #0x58]\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "ldr x0, [x25, #0x60]\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "ldr d31, [x20, x10]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "smlal v11.4s, v29.4h, v0.4h\n" - "ldr x7, [x25, #0x68]\n" - "smlal2 v8.4s, v29.8h, v0.8h\n" - "ldr x26, [x25, #0x70]\n" - "smlal v10.4s, v28.4h, v0.4h\n" - "ldr x23, [x25, #0x78]\n" - "smlal2 v9.4s, v28.8h, v0.8h\n" - "ldr d0, [x3, #0x28]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr x19, [x20, #0x50]\n" + "ldr d31, [x19, x24]\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v29.4h, v0.4h\n" + "ldr x15, [x20, #0x58]\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "smlal v6.4s, v28.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "ldr x19, [x20, #0x60]\n" + "ldr x27, [x20, #0x68]\n" + "smlal2 v7.4s, v29.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "ldr x20, [x25, #0x80]\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v1.4h\n" - "ldr x22, [x25, #0x88]\n" - "smlal2 v5.4s, v27.8h, v1.8h\n" - "ldr x13, [x25, #0x90]\n" - "smlal v11.4s, v28.4h, v1.4h\n" - "ldr x21, [x25, #0x98]\n" - "smlal2 v8.4s, v28.8h, v1.8h\n" - "ldr x14, [x25, #0xa0]\n" - "smlal v10.4s, v23.4h, v1.4h\n" - "ldr x11, [x25, #0xa8]\n" - "smlal2 v9.4s, v23.8h, v1.8h\n" - "ldr d1, [x3, #0x30]\n" - "usubl v1.8h, v1.8b, v13.8b\n" + "ldr x5, [x20, #0x70]\n" + "ldr x11, [x20, #0x78]\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal2 v5.4s, v28.8h, v0.8h\n" + "ldr d30, [x15, x24]\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v1.4h\n" + "smlal v10.4s, v28.4h, v1.4h\n" + "ldr d0, [x23, #0x28]\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v27.8h, v1.8h\n" + "ldr x12, [x20, #0x80]\n" + "ldr x26, [x20, #0x88]\n" + "smlal2 v7.4s, v28.8h, v1.8h\n" "smlal v15.4s, v27.4h, v2.4h\n" - "ldr x24, [x25, #0xb0]\n" - "smlal2 v20.4s, v27.8h, v2.8h\n" - "ldr d27, [x0, x10]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "ldr x0, [x25, #0xb8]\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "ldr x15, [x25, #0xc0]\n" - "smlal v11.4s, v23.4h, v2.4h\n" - "ldr x9, [x25, #0xc8]\n" - "smlal2 v8.4s, v23.8h, v2.8h\n" - "ldr x27, [x25, #0xd0]\n" - "smlal v10.4s, v31.4h, v2.4h\n" - "ldr x28, [x25, #0xd8]\n" - "smlal2 v9.4s, v31.8h, v2.8h\n" - "ldr d2, [x3, #0x38]\n" - "usubl v2.8h, v2.8b, v13.8b\n" + "ldr x14, [x20, #0x90]\n" + "ldr x15, [x20, #0x98]\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal2 v5.4s, v23.8h, v1.8h\n" + "ldr d27, [x19, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v23.4h, v2.4h\n" + "ldr d1, [x23, #0x30]\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "ldr x21, [x20, #0xa0]\n" + "ldr x2, [x20, #0xa8]\n" + "smlal2 v7.4s, v23.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "ldr x12, [x25, #0xe0]\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "ldr d25, [x7, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "ldr x7, [x25, #0xe8]\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "ldr q6, [x2, #0x0]\n" - "smlal v11.4s, v31.4h, v3.4h\n" - "ldr q21, [x5, #0x0]\n" - "smlal2 v8.4s, v31.8h, v3.8h\n" - "ldr q17, [x2, #0x10]\n" - "add x2, x2, #0x20\n" - "smlal v10.4s, v30.4h, v3.4h\n" - "ldr q14, [x5, #0x10]\n" - "add x5, x5, #0x20\n" - "smlal2 v9.4s, v30.8h, v3.8h\n" - "ldr d3, [x3, #0x40]\n" - "usubl v3.8h, v3.8b, v13.8b\n" + "ldr x13, [x20, #0xb0]\n" + "ldr x9, [x20, #0xb8]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v31.8h, v2.8h\n" + "ldr d25, [x27, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v31.4h, v3.4h\n" + "ldr d2, [x23, #0x38]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "ldr x19, [x20, #0xc0]\n" + "ldr x28, [x20, #0xc8]\n" + "smlal2 v7.4s, v31.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "ldr d24, [x26, x10]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v4.4h\n" - "ldr x26, [x25, #0xf0]\n" - "smlal2 v5.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x10]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v11.4s, v30.4h, v4.4h\n" - "ldr x23, [x25, #0xf8]\n" - "smlal2 v8.4s, v30.8h, v4.8h\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0x48]\n" - "usubl v4.8h, v4.8b, v13.8b\n" + "ldr x6, [x20, #0xd0]\n" + "ldr x27, [x20, #0xd8]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "smlal2 v5.4s, v30.8h, v3.8h\n" + "ldr d24, [x5, x24]\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v4.4h\n" + "smlal v10.4s, v30.4h, v4.4h\n" + "ldr d3, [x23, #0x40]\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v27.8h, v4.8h\n" + "ldr d27, [x11, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v7.4s, v30.8h, v4.8h\n" "smlal v15.4s, v29.4h, v0.4h\n" - "smlal2 v20.4s, v29.8h, v0.8h\n" - "smlal v18.4s, v28.4h, v0.4h\n" - "smlal2 v5.4s, v28.8h, v0.8h\n" - "smlal v11.4s, v22.4h, v0.4h\n" - "smlal2 v8.4s, v22.8h, v0.8h\n" - "smlal v10.4s, v25.4h, v0.4h\n" - "smlal2 v9.4s, v25.8h, v0.8h\n" - "ldr d0, [x3, #0x50]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "ldr x11, [x20, #0xe0]\n" + "ldr x17, [x20, #0xe8]\n" + "smlal2 v16.4s, v29.8h, v0.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0x48]\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v28.4h, v0.4h\n" + "smlal v10.4s, v22.4h, v0.4h\n" + "ldr x5, [x20, #0xf0]\n" + "ldr x25, [x20, #0xf8]\n" + "smlal v6.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v28.8h, v0.8h\n" + "ldr q12, [x10, #0x0]\n" + "ldr q19, [x1, #0x0]\n" + "smlal2 v7.4s, v22.8h, v0.8h\n" "smlal v15.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x10]\n" - "usubl v28.8h, v28.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v1.4h\n" - "ldr x22, [x25, #0x100]\n" - "smlal2 v5.4s, v23.8h, v1.8h\n" - "smlal v11.4s, v25.4h, v1.4h\n" - "smlal2 v8.4s, v25.8h, v1.8h\n" - "smlal v10.4s, v24.4h, v1.4h\n" - "smlal2 v9.4s, v24.8h, v1.8h\n" - "ldr d1, [x3, #0x58]\n" - "usubl v1.8h, v1.8b, v13.8b\n" + "ldr q20, [x10, #0x10]\n" + "ldr q29, [x1, #0x10]\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal2 v5.4s, v25.8h, v0.8h\n" + "ldr d28, [x26, x24]\n" + "ldr d0, [x23, #0x50]\n" + "smlal v17.4s, v23.4h, v1.4h\n" + "smlal v10.4s, v25.4h, v1.4h\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "ldr x26, [x20, #0x100]\n" + "smlal v6.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "tst x0, #0x7\n" + "smlal2 v7.4s, v25.8h, v1.8h\n" "smlal v15.4s, v23.4h, v2.4h\n" - "smlal2 v20.4s, v23.8h, v2.8h\n" - "ldr d23, [x20, x10]\n" - "usubl v23.8h, v23.8b, v7.8b\n" - "smlal v18.4s, v31.4h, v2.4h\n" - "ldr x20, [x25, #0x108]\n" - "smlal2 v5.4s, v31.8h, v2.8h\n" - "smlal v11.4s, v24.4h, v2.4h\n" - "smlal2 v8.4s, v24.8h, v2.8h\n" - "smlal v10.4s, v27.4h, v2.4h\n" - "smlal2 v9.4s, v27.8h, v2.8h\n" - "ldr d2, [x3, #0x60]\n" - "usubl v2.8h, v2.8b, v13.8b\n" + "add x10, x10, #0x20\n" + "add x1, x1, #0x20\n" + "smlal2 v16.4s, v23.8h, v2.8h\n" + "ldr d23, [x12, x24]\n" + "smlal2 v5.4s, v24.8h, v1.8h\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v31.4h, v2.4h\n" + "smlal v10.4s, v24.4h, v2.4h\n" + "ldr d1, [x23, #0x58]\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "ldr x12, [x20, #0x108]\n" + "smlal2 v7.4s, v24.8h, v2.8h\n" "smlal v15.4s, v31.4h, v3.4h\n" - "smlal2 v20.4s, v31.8h, v3.8h\n" - "ldr d31, [x13, x10]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v3.4h\n" - "ldr x13, [x25, #0x110]\n" - "smlal2 v5.4s, v30.8h, v3.8h\n" - "smlal v11.4s, v27.4h, v3.4h\n" - "smlal2 v8.4s, v27.8h, v3.8h\n" - "smlal v10.4s, v23.4h, v3.4h\n" - "smlal2 v9.4s, v23.8h, v3.8h\n" - "ldr d3, [x3, #0x68]\n" - "usubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v3.8h\n" + "smlal2 v5.4s, v27.8h, v2.8h\n" + "ldr d31, [x14, x24]\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "smlal v17.4s, v30.4h, v3.4h\n" + "smlal v10.4s, v27.4h, v3.4h\n" + "ldr d2, [x23, #0x60]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "ldr x14, [x20, #0x110]\n" + "smlal2 v7.4s, v27.8h, v3.8h\n" "smlal v15.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x10]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "ldr x21, [x25, #0x118]\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "ldr d26, [x14, x10]\n" - "usubl v26.8h, v26.8b, v7.8b\n" - "smlal v11.4s, v23.4h, v4.4h\n" - "smlal2 v8.4s, v23.8h, v4.8h\n" - "smlal v10.4s, v28.4h, v4.4h\n" - "smlal2 v9.4s, v28.8h, v4.8h\n" - "ldr d4, [x3, #0x70]\n" - "usubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v30.8h, v4.8h\n" + "ldr d30, [x15, x24]\n" + "smlal2 v5.4s, v23.8h, v3.8h\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal v10.4s, v23.4h, v4.4h\n" + "ldr d3, [x23, #0x68]\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "ldr d26, [x21, x24]\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "smlal2 v7.4s, v23.8h, v4.8h\n" "smlal v15.4s, v22.4h, v0.4h\n" - "smlal2 v20.4s, v22.8h, v0.8h\n" - "ldr d22, [x0, x10]\n" - "usubl v22.8h, v22.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v0.4h\n" - "smlal2 v5.4s, v25.8h, v0.8h\n" - "smlal v11.4s, v31.4h, v0.4h\n" - "smlal2 v8.4s, v31.8h, v0.8h\n" - "smlal v10.4s, v30.4h, v0.4h\n" - "smlal2 v9.4s, v30.8h, v0.8h\n" - "ldr d0, [x3, #0x78]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "ldr x21, [x20, #0x118]\n" + "smlal2 v16.4s, v22.8h, v0.8h\n" + "smlal2 v5.4s, v28.8h, v4.8h\n" + "ldr d4, [x23, #0x70]\n" + "ldr d22, [x9, x24]\n" + "smlal v17.4s, v25.4h, v0.4h\n" + "smlal v10.4s, v31.4h, v0.4h\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "usubl v22.8h, v22.8b, v9.8b\n" + "smlal2 v7.4s, v31.8h, v0.8h\n" "smlal v15.4s, v25.4h, v1.4h\n" - "smlal2 v20.4s, v25.8h, v1.8h\n" - "ldr d25, [x11, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v1.4h\n" - "smlal2 v5.4s, v24.8h, v1.8h\n" - "smlal v11.4s, v30.4h, v1.4h\n" - "smlal2 v8.4s, v30.8h, v1.8h\n" - "smlal v10.4s, v26.4h, v1.4h\n" - "smlal2 v9.4s, v26.8h, v1.8h\n" - "ldr d1, [x3, #0x80]\n" - "usubl v1.8h, v1.8b, v13.8b\n" + "smlal2 v16.4s, v25.8h, v1.8h\n" + "ldr d25, [x2, x24]\n" + "smlal2 v5.4s, v30.8h, v0.8h\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v1.4h\n" + "smlal v10.4s, v30.4h, v1.4h\n" + "ldr d0, [x23, #0x78]\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "smlal2 v7.4s, v30.8h, v1.8h\n" "smlal v15.4s, v24.4h, v2.4h\n" - "smlal2 v20.4s, v24.8h, v2.8h\n" - "ldr d24, [x24, x10]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v27.4h, v2.4h\n" - "smlal2 v5.4s, v27.8h, v2.8h\n" - "smlal v11.4s, v26.4h, v2.4h\n" - "smlal2 v8.4s, v26.8h, v2.8h\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" - "ldr d2, [x3, #0x88]\n" - "usubl v2.8h, v2.8b, v13.8b\n" + "smlal2 v16.4s, v24.8h, v2.8h\n" + "ldr d24, [x13, x24]\n" + "smlal2 v5.4s, v26.8h, v1.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v2.4h\n" + "smlal v10.4s, v26.4h, v2.4h\n" + "ldr d1, [x23, #0x80]\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "smlal2 v7.4s, v26.8h, v2.8h\n" "smlal v15.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "ldr d27, [x15, x10]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v3.4h\n" - "smlal2 v5.4s, v23.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" - "ldr d3, [x3, #0x90]\n" - "usubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "ldr d27, [x19, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v23.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "ldr d2, [x23, #0x88]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v23.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" "smlal v15.4s, v23.4h, v4.4h\n" - "smlal2 v20.4s, v23.8h, v4.8h\n" - "ldr d23, [x9, x10]\n" - "usubl v23.8h, v23.8b, v7.8b\n" - "smlal v18.4s, v28.4h, v4.4h\n" - "smlal2 v5.4s, v28.8h, v4.8h\n" - "ldr d28, [x12, x10]\n" - "usubl v28.8h, v28.8b, v7.8b\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "smlal v10.4s, v22.4h, v4.4h\n" - "smlal2 v9.4s, v22.8h, v4.8h\n" - "ldr d4, [x3, #0x98]\n" - "usubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v23.8h, v4.8h\n" + "ldr d23, [x28, x24]\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v28.4h, v4.4h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "ldr d3, [x23, #0x90]\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v22.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "ldr d28, [x11, x24]\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "ldr d31, [x27, x10]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "smlal v11.4s, v27.4h, v0.4h\n" - "smlal2 v8.4s, v27.8h, v0.8h\n" - "smlal v10.4s, v23.4h, v0.4h\n" - "smlal2 v9.4s, v23.8h, v0.8h\n" - "ldr d0, [x3, #0xa0]\n" - "usubl v0.8h, v0.8b, v13.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr d31, [x6, x24]\n" + "smlal2 v5.4s, v22.8h, v4.8h\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v27.4h, v0.4h\n" + "ldr d4, [x23, #0x98]\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "smlal2 v7.4s, v27.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal v18.4s, v26.4h, v1.4h\n" - "smlal2 v5.4s, v26.8h, v1.8h\n" - "smlal v11.4s, v23.4h, v1.4h\n" - "smlal2 v8.4s, v23.8h, v1.8h\n" - "smlal v10.4s, v31.4h, v1.4h\n" - "smlal2 v9.4s, v31.8h, v1.8h\n" - "ldr d1, [x3, #0xa8]\n" - "usubl v1.8h, v1.8b, v13.8b\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "ldr d30, [x27, x24]\n" + "smlal2 v5.4s, v23.8h, v0.8h\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v1.4h\n" + "smlal v10.4s, v23.4h, v1.4h\n" + "ldr d0, [x23, #0xa0]\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v1.4h\n" + "smlal2 v8.4s, v26.8h, v1.8h\n" + "smlal2 v7.4s, v23.8h, v1.8h\n" "smlal v15.4s, v26.4h, v2.4h\n" - "smlal2 v20.4s, v26.8h, v2.8h\n" - "ldr d26, [x7, x10]\n" - "usubl v26.8h, v26.8b, v7.8b\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "smlal2 v8.4s, v31.8h, v2.8h\n" - "smlal v10.4s, v30.4h, v2.4h\n" - "smlal2 v9.4s, v30.8h, v2.8h\n" - "ldr d2, [x3, #0xb0]\n" - "usubl v2.8h, v2.8b, v13.8b\n" + "smlal2 v16.4s, v26.8h, v2.8h\n" + "smlal2 v5.4s, v31.8h, v1.8h\n" + "ldr d26, [x17, x24]\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v31.4h, v2.4h\n" + "ldr d1, [x23, #0xa8]\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal2 v7.4s, v31.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "ldr d25, [x26, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "smlal v11.4s, v30.4h, v3.4h\n" - "smlal2 v8.4s, v30.8h, v3.8h\n" - "smlal v10.4s, v28.4h, v3.4h\n" - "smlal2 v9.4s, v28.8h, v3.8h\n" - "ldr d3, [x3, #0xb8]\n" - "usubl v3.8h, v3.8b, v13.8b\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v30.8h, v2.8h\n" + "ldr d25, [x5, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v30.4h, v3.4h\n" + "ldr d2, [x23, #0xb0]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal2 v7.4s, v30.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "ldr d24, [x23, x10]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v18.4s, v22.4h, v4.4h\n" - "smlal2 v5.4s, v22.8h, v4.8h\n" - "smlal v11.4s, v28.4h, v4.4h\n" - "smlal2 v8.4s, v28.8h, v4.8h\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0xc0]\n" - "usubl v4.8h, v4.8b, v13.8b\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "ldr d24, [x25, x24]\n" + "smlal2 v5.4s, v28.8h, v3.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v22.4h, v4.4h\n" + "smlal v10.4s, v28.4h, v4.4h\n" + "ldr d3, [x23, #0xb8]\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v7.4s, v28.8h, v4.8h\n" "smlal v15.4s, v27.4h, v0.4h\n" - "smlal2 v20.4s, v27.8h, v0.8h\n" - "ldr d27, [x22, x10]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v18.4s, v23.4h, v0.4h\n" - "smlal2 v5.4s, v23.8h, v0.8h\n" - "smlal v11.4s, v25.4h, v0.4h\n" - "smlal2 v8.4s, v25.8h, v0.8h\n" - "ldr d25, [x20, x10]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v0.4h\n" - "smlal2 v9.4s, v24.8h, v0.8h\n" + "smlal2 v16.4s, v27.8h, v0.8h\n" + "ldr d27, [x26, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v22.8h, v4.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0xc0]\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v23.4h, v0.4h\n" + "smlal v10.4s, v25.4h, v0.4h\n" + "smlal v6.4s, v24.4h, v0.4h\n" + "smlal2 v7.4s, v25.8h, v0.8h\n" + "ldr d25, [x12, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal2 v8.4s, v23.8h, v0.8h\n" + "smlal2 v5.4s, v24.8h, v0.8h\n" "smlal v15.4s, v23.4h, v1.4h\n" - "smlal2 v20.4s, v23.8h, v1.8h\n" - "smlal v18.4s, v31.4h, v1.4h\n" - "smlal2 v5.4s, v31.8h, v1.8h\n" - "smlal v11.4s, v24.4h, v1.4h\n" - "smlal2 v8.4s, v24.8h, v1.8h\n" - "ldr d24, [x13, x10]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v1.4h\n" - "smlal2 v9.4s, v27.8h, v1.8h\n" + "smlal v17.4s, v31.4h, v1.4h\n" + "smlal v10.4s, v24.4h, v1.4h\n" + "smlal v6.4s, v27.4h, v1.4h\n" + "smlal2 v7.4s, v24.8h, v1.8h\n" + "ldr d24, [x14, x24]\n" + "smlal2 v16.4s, v23.8h, v1.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal2 v8.4s, v31.8h, v1.8h\n" + "smlal2 v5.4s, v27.8h, v1.8h\n" "smlal v15.4s, v31.4h, v2.4h\n" - "smlal2 v20.4s, v31.8h, v2.8h\n" - "smlal v18.4s, v30.4h, v2.4h\n" - "smlal2 v5.4s, v30.8h, v2.8h\n" - "smlal v11.4s, v27.4h, v2.4h\n" - "smlal2 v8.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x10]\n" - "add x10, x10, #0x8\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" + "smlal v17.4s, v30.4h, v2.4h\n" + "smlal v10.4s, v27.4h, v2.4h\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v7.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x24]\n" + "smlal2 v16.4s, v31.8h, v2.8h\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v30.8h, v2.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x24, x24, #0x8\n" "smlal v15.4s, v30.4h, v3.4h\n" - "smlal2 v20.4s, v30.8h, v3.8h\n" - "smlal v18.4s, v28.4h, v3.4h\n" - "smlal2 v5.4s, v28.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" + "smlal v17.4s, v28.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v16.4s, v30.8h, v3.8h\n" + "smlal2 v8.4s, v28.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" "smlal v15.4s, v28.4h, v4.4h\n" - "smlal2 v20.4s, v28.8h, v4.8h\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "smlal v10.4s, v27.4h, v4.4h\n" - "smlal2 v9.4s, v27.8h, v4.8h\n" - "sqrdmulh v15.4s, v15.4s, v6.4s\n" - "sqrdmulh v20.4s, v20.4s, v17.4s\n" - "sqrdmulh v18.4s, v18.4s, v6.4s\n" - "sqrdmulh v5.4s, v5.4s, v17.4s\n" - "and v1.16b, v15.16b, v21.16b\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "and v29.16b, v20.16b, v14.16b\n" - "and v3.16b, v18.16b, v21.16b\n" - "sshr v29.4s, v29.4s, #0x1f\n" - "and v2.16b, v5.16b, v14.16b\n" - "sqrdmulh v11.4s, v11.4s, v6.4s\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v8.4s, v8.4s, v17.4s\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "sqdmulh v15.4s, v15.4s, v12.4s\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal v6.4s, v27.4h, v4.4h\n" + "sqdmulh v17.4s, v17.4s, v12.4s\n" + "smlal2 v16.4s, v28.8h, v4.8h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "sqdmulh v10.4s, v10.4s, v12.4s\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "smlal2 v5.4s, v27.8h, v4.8h\n" + "sqdmulh v6.4s, v6.4s, v12.4s\n" + "and v23.16b, v15.16b, v19.16b\n" + "sqdmulh v16.4s, v16.4s, v20.4s\n" + "and v22.16b, v17.16b, v19.16b\n" + "sqdmulh v8.4s, v8.4s, v20.4s\n" + "and v21.16b, v10.16b, v19.16b\n" + "sqdmulh v7.4s, v7.4s, v20.4s\n" + "and v26.16b, v6.16b, v19.16b\n" + "sqdmulh v5.4s, v5.4s, v20.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v4.16b, v16.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v2.16b, v8.16b, v29.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v7.16b, v29.16b\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "and v25.16b, v5.16b, v29.16b\n" + "sqadd v15.4s, v15.4s, v23.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v17.4s, v17.4s, v22.4s\n" "sshr v2.4s, v2.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v1.4s\n" - "sqrdmulh v10.4s, v10.4s, v6.4s\n" - "and v0.16b, v11.16b, v21.16b\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "srshl v15.4s, v15.4s, v21.4s\n" - "sqadd v20.4s, v20.4s, v29.4s\n" - "sqadd v18.4s, v18.4s, v3.4s\n" - "sqadd v5.4s, v5.4s, v2.4s\n" - "and v27.16b, v8.16b, v14.16b\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "add v15.4s, v15.4s, v19.4s\n" - "srshl v20.4s, v20.4s, v14.4s\n" - "srshl v18.4s, v18.4s, v21.4s\n" - "srshl v5.4s, v5.4s, v14.4s\n" - "smin v15.4s, v15.4s, v12.4s\n" - "add v20.4s, v20.4s, v19.4s\n" - "add v18.4s, v18.4s, v19.4s\n" - "smax v15.4s, v15.4s, v16.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smin v18.4s, v18.4s, v12.4s\n" - "add v5.4s, v5.4s, v19.4s\n" - "smax v20.4s, v20.4s, v16.4s\n" - "smax v18.4s, v18.4s, v16.4s\n" - "smin v5.4s, v5.4s, v12.4s\n" - "uzp1 v15.16b, v15.16b, v20.16b\n" - "sqadd v11.4s, v11.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v6.4s, v6.4s, v26.4s\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v17.4s, v17.4s, v19.4s\n" + "sqadd v8.4s, v8.4s, v2.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "sqadd v7.4s, v7.4s, v3.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "sqadd v5.4s, v5.4s, v25.4s\n" + "srshl v16.4s, v16.4s, v29.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v17.4h, v17.4s\n" + "srshl v7.4s, v7.4s, v29.4s\n" + "sqxtn v10.4h, v10.4s\n" + "srshl v5.4s, v5.4s, v29.4s\n" + "sqxtn v6.4h, v6.4s\n" + "sqxtn2 v15.8h, v16.4s\n" + "sqxtn2 v17.8h, v8.4s\n" + "sqxtn2 v10.8h, v7.4s\n" + "sqxtn2 v6.8h, v5.4s\n" + "sqadd v15.8h, v15.8h, v18.8h\n" + "sqadd v17.8h, v17.8h, v18.8h\n" + "sqadd v10.8h, v10.8h, v18.8h\n" + "sqadd v6.8h, v6.8h, v18.8h\n" + "smax v15.8h, v15.8h, v11.8h\n" + "smax v17.8h, v17.8h, v11.8h\n" + "smax v10.8h, v10.8h, v11.8h\n" + "smax v6.8h, v6.8h, v11.8h\n" + "smin v15.8h, v15.8h, v13.8h\n" + "smin v17.8h, v17.8h, v13.8h\n" + "smin v10.8h, v10.8h, v13.8h\n" + "smin v6.8h, v6.8h, v13.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x17, x1]\n" - "smax v5.4s, v5.4s, v16.4s\n" - "sqadd v8.4s, v8.4s, v27.4s\n" - "srshl v11.4s, v11.4s, v21.4s\n" - "and v30.16b, v10.16b, v21.16b\n" - "sshr v30.4s, v30.4s, #0x1f\n" - "uzp1 v18.16b, v18.16b, v5.16b\n" - "add v11.4s, v11.4s, v19.4s\n" - "srshl v8.4s, v8.4s, v14.4s\n" - "uzp1 v18.16b, v18.16b, v18.16b\n" - "str d18, [x16, x1]\n" - "smin v11.4s, v11.4s, v12.4s\n" - "sqrdmulh v9.4s, v9.4s, v17.4s\n" - "add v8.4s, v8.4s, v19.4s\n" - "sqadd v10.4s, v10.4s, v30.4s\n" - "smax v11.4s, v11.4s, v16.4s\n" - "smin v8.4s, v8.4s, v12.4s\n" - "and v6.16b, v9.16b, v14.16b\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "smax v8.4s, v8.4s, v16.4s\n" - "srshl v10.4s, v10.4s, v21.4s\n" - "uzp1 v11.16b, v11.16b, v8.16b\n" - "add v10.4s, v10.4s, v19.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "str d11, [x6, x1]\n" - "smin v10.4s, v10.4s, v12.4s\n" - "sqadd v9.4s, v9.4s, v6.4s\n" - "smax v10.4s, v10.4s, v16.4s\n" - "srshl v9.4s, v9.4s, v14.4s\n" - "add v9.4s, v9.4s, v19.4s\n" - "smin v9.4s, v9.4s, v12.4s\n" - "smax v9.4s, v9.4s, v16.4s\n" - "uzp1 v10.16b, v10.16b, v9.16b\n" + "uzp1 v17.16b, v17.16b, v17.16b\n" + "str d15, [x16, x22]\n" "uzp1 v10.16b, v10.16b, v10.16b\n" - "str d10, [x8, x1]\n" - "add x1, x1, #0x8\n" + "uzp1 v6.16b, v6.16b, v6.16b\n" + "str d17, [x8, x22]\n" + "str d10, [x4, x22]\n" + "str d6, [x7, x22]\n" + "add x22, x22, #0x8\n" "beq 124f\n" - "add x3, x3, #0xc8\n" + "add x23, x23, #0xc8\n" "3:" // Oddments - "ldr x12, [%x[params], %[offsetof_Params_bias]]\n" - "tbz x4, #2, 5f\n" - "ld1 { v15.4s }, [x12], #0x10\n" - "tbz x4, #1, 4f\n" - "ld1 { v20.d }[0], [x12], #0x8\n" - "tbz x4, #0, 7f\n" - "ld1 { v20.s }[2], [x12]\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "tbz x0, #2, 5f\n" + "ld1 { v15.4s }, [x19], #0x10\n" + "tbz x0, #1, 4f\n" + "ld1 { v16.d }[0], [x19], #0x8\n" + "tbz x0, #0, 7f\n" + "ld1 { v16.s }[2], [x19]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset - "tbz x4, #0, 7f\n" - "ld1 { v20.s }[0], [x12]\n" + "tbz x0, #0, 7f\n" + "ld1 { v16.s }[0], [x19]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset - "tbz x4, #1, 6f\n" - "ld1 { v15.d }[0], [x12], #0x8\n" - "tbz x4, #0, 7f\n" - "ld1 { v15.s }[2], [x12]\n" + "tbz x0, #1, 6f\n" + "ld1 { v15.d }[0], [x19], #0x8\n" + "tbz x0, #0, 7f\n" + "ld1 { v15.s }[2], [x19]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 7f\n" - "ld1 { v15.s }[0], [x12]\n" + "tbz x0, #0, 7f\n" + "ld1 { v15.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v18.16b, v15.16b\n" - "ldr d0, [x3, #0x0]\n" - "mov v5.16b, v20.16b\n" - "ldr d1, [x3, #0x8]\n" - "mov v11.16b, v15.16b\n" - "ldr d2, [x3, #0x10]\n" - "mov v8.16b, v20.16b\n" - "ldr d3, [x3, #0x18]\n" + "ldr d0, [x23, #0x0]\n" + "ldr d1, [x23, #0x8]\n" + "mov v17.16b, v15.16b\n" + "mov v8.16b, v16.16b\n" + "ldr d2, [x23, #0x10]\n" + "ldr d3, [x23, #0x18]\n" "mov v10.16b, v15.16b\n" - "ldr d4, [x3, #0x20]\n" - "usubl v0.8h, v0.8b, v13.8b\n" - "mov v9.16b, v20.16b\n" - "ldp x28, x27, [x25, #0x0]\n" - "usubl v1.8h, v1.8b, v13.8b\n" - "ldp x26, x13, [x25, #0x10]\n" - "usubl v2.8h, v2.8b, v13.8b\n" - "usubl v3.8h, v3.8b, v13.8b\n" - "ldp x24, x23, [x25, #0x20]\n" - "usubl v4.8h, v4.8b, v13.8b\n" - "ldp x22, x21, [x25, #0x30]\n" - "ldp x20, x0, [x25, #0x40]\n" - "add x28, x28, x10\n" - "add x27, x27, x10\n" - "add x26, x26, x10\n" - "add x13, x13, x10\n" - "add x24, x24, x10\n" - "add x23, x23, x10\n" - "add x22, x22, x10\n" - "add x21, x21, x10\n" - "add x20, x20, x10\n" - "add x0, x0, x10\n" - "tbz x4, #2, 9f\n" + "mov v7.16b, v16.16b\n" + "ldr d4, [x23, #0x20]\n" + "ldp x28, x6, [x20, #0x0]\n" + "mov v6.16b, v15.16b\n" + "mov v5.16b, v16.16b\n" + "ldp x26, x25, [x20, #0x10]\n" + "ldp x5, x2, [x20, #0x20]\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "ldp x27, x21, [x20, #0x30]\n" + "ldp x12, x19, [x20, #0x40]\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "add x28, x28, x24\n" + "add x6, x6, x24\n" + "add x26, x26, x24\n" + "add x25, x25, x24\n" + "add x5, x5, x24\n" + "add x2, x2, x24\n" + "add x27, x27, x24\n" + "add x21, x21, x24\n" + "add x12, x12, x24\n" + "add x19, x19, x24\n" + "tbz x0, #2, 9f\n" "ld1 { v31.s }[0], [x28], #0x4\n" - "ld1 { v30.s }[0], [x27], #0x4\n" + "ld1 { v30.s }[0], [x6], #0x4\n" "ld1 { v29.s }[0], [x26], #0x4\n" - "ld1 { v28.s }[0], [x13], #0x4\n" - "ld1 { v27.s }[0], [x24], #0x4\n" - "ld1 { v23.s }[0], [x23], #0x4\n" - "ld1 { v25.s }[0], [x22], #0x4\n" + "ld1 { v28.s }[0], [x25], #0x4\n" + "ld1 { v27.s }[0], [x5], #0x4\n" + "ld1 { v23.s }[0], [x2], #0x4\n" + "ld1 { v25.s }[0], [x27], #0x4\n" "ld1 { v24.s }[0], [x21], #0x4\n" - "ld1 { v26.s }[0], [x20], #0x4\n" - "ld1 { v22.s }[0], [x0], #0x4\n" - "tbz x4, #1, 8f\n" + "ld1 { v26.s }[0], [x12], #0x4\n" + "ld1 { v22.s }[0], [x19], #0x4\n" + "tbz x0, #1, 8f\n" "ld1 { v31.h }[2], [x28], #0x2\n" - "ld1 { v30.h }[2], [x27], #0x2\n" + "ld1 { v30.h }[2], [x6], #0x2\n" "ld1 { v29.h }[2], [x26], #0x2\n" - "ld1 { v28.h }[2], [x13], #0x2\n" - "ld1 { v27.h }[2], [x24], #0x2\n" - "ld1 { v23.h }[2], [x23], #0x2\n" - "ld1 { v25.h }[2], [x22], #0x2\n" + "ld1 { v28.h }[2], [x25], #0x2\n" + "ld1 { v27.h }[2], [x5], #0x2\n" + "ld1 { v23.h }[2], [x2], #0x2\n" + "ld1 { v25.h }[2], [x27], #0x2\n" "ld1 { v24.h }[2], [x21], #0x2\n" - "ld1 { v26.h }[2], [x20], #0x2\n" - "ld1 { v22.h }[2], [x0], #0x2\n" - "tbz x4, #0, 11f\n" + "ld1 { v26.h }[2], [x12], #0x2\n" + "ld1 { v22.h }[2], [x19], #0x2\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[6], [x28]\n" - "ld1 { v30.b }[6], [x27]\n" + "ld1 { v30.b }[6], [x6]\n" "ld1 { v29.b }[6], [x26]\n" - "ld1 { v28.b }[6], [x13]\n" - "ld1 { v27.b }[6], [x24]\n" - "ld1 { v23.b }[6], [x23]\n" - "ld1 { v25.b }[6], [x22]\n" + "ld1 { v28.b }[6], [x25]\n" + "ld1 { v27.b }[6], [x5]\n" + "ld1 { v23.b }[6], [x2]\n" + "ld1 { v25.b }[6], [x27]\n" "ld1 { v24.b }[6], [x21]\n" - "ld1 { v26.b }[6], [x20]\n" - "ld1 { v22.b }[6], [x0]\n" + "ld1 { v26.b }[6], [x12]\n" + "ld1 { v22.b }[6], [x19]\n" "b 11f\n" "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[4], [x28]\n" - "ld1 { v30.b }[4], [x27]\n" + "ld1 { v30.b }[4], [x6]\n" "ld1 { v29.b }[4], [x26]\n" - "ld1 { v28.b }[4], [x13]\n" - "ld1 { v27.b }[4], [x24]\n" - "ld1 { v23.b }[4], [x23]\n" - "ld1 { v25.b }[4], [x22]\n" + "ld1 { v28.b }[4], [x25]\n" + "ld1 { v27.b }[4], [x5]\n" + "ld1 { v23.b }[4], [x2]\n" + "ld1 { v25.b }[4], [x27]\n" "ld1 { v24.b }[4], [x21]\n" - "ld1 { v26.b }[4], [x20]\n" - "ld1 { v22.b }[4], [x0]\n" + "ld1 { v26.b }[4], [x12]\n" + "ld1 { v22.b }[4], [x19]\n" "b 11f\n" "9:" // Oddments: Initial loads: Bit 2: Unset - "tbz x4, #1, 10f\n" + "tbz x0, #1, 10f\n" "ld1 { v31.h }[0], [x28], #0x2\n" - "ld1 { v30.h }[0], [x27], #0x2\n" + "ld1 { v30.h }[0], [x6], #0x2\n" "ld1 { v29.h }[0], [x26], #0x2\n" - "ld1 { v28.h }[0], [x13], #0x2\n" - "ld1 { v27.h }[0], [x24], #0x2\n" - "ld1 { v23.h }[0], [x23], #0x2\n" - "ld1 { v25.h }[0], [x22], #0x2\n" + "ld1 { v28.h }[0], [x25], #0x2\n" + "ld1 { v27.h }[0], [x5], #0x2\n" + "ld1 { v23.h }[0], [x2], #0x2\n" + "ld1 { v25.h }[0], [x27], #0x2\n" "ld1 { v24.h }[0], [x21], #0x2\n" - "ld1 { v26.h }[0], [x20], #0x2\n" - "ld1 { v22.h }[0], [x0], #0x2\n" - "tbz x4, #0, 11f\n" + "ld1 { v26.h }[0], [x12], #0x2\n" + "ld1 { v22.h }[0], [x19], #0x2\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[2], [x28]\n" - "ld1 { v30.b }[2], [x27]\n" + "ld1 { v30.b }[2], [x6]\n" "ld1 { v29.b }[2], [x26]\n" - "ld1 { v28.b }[2], [x13]\n" - "ld1 { v27.b }[2], [x24]\n" - "ld1 { v23.b }[2], [x23]\n" - "ld1 { v25.b }[2], [x22]\n" + "ld1 { v28.b }[2], [x25]\n" + "ld1 { v27.b }[2], [x5]\n" + "ld1 { v23.b }[2], [x2]\n" + "ld1 { v25.b }[2], [x27]\n" "ld1 { v24.b }[2], [x21]\n" - "ld1 { v26.b }[2], [x20]\n" - "ld1 { v22.b }[2], [x0]\n" + "ld1 { v26.b }[2], [x12]\n" + "ld1 { v22.b }[2], [x19]\n" "b 11f\n" "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[0], [x28]\n" - "ld1 { v30.b }[0], [x27]\n" + "ld1 { v30.b }[0], [x6]\n" "ld1 { v29.b }[0], [x26]\n" - "ld1 { v28.b }[0], [x13]\n" - "ld1 { v27.b }[0], [x24]\n" - "ld1 { v23.b }[0], [x23]\n" - "ld1 { v25.b }[0], [x22]\n" + "ld1 { v28.b }[0], [x25]\n" + "ld1 { v27.b }[0], [x5]\n" + "ld1 { v23.b }[0], [x2]\n" + "ld1 { v25.b }[0], [x27]\n" "ld1 { v24.b }[0], [x21]\n" - "ld1 { v26.b }[0], [x20]\n" - "ld1 { v22.b }[0], [x0]\n" + "ld1 { v26.b }[0], [x12]\n" + "ld1 { v22.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End - "ldr x20, [x25, #0x50]\n" - "usubl v31.8h, v31.8b, v7.8b\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "usubl v30.8h, v30.8b, v9.8b\n" "smlal v15.4s, v31.4h, v0.4h\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "usubl v29.8h, v29.8b, v7.8b\n" - "usubl v28.8h, v28.8b, v7.8b\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "usubl v23.8h, v23.8b, v7.8b\n" - "smlal v11.4s, v29.4h, v0.4h\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal2 v8.4s, v29.8h, v0.8h\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v28.4h, v0.4h\n" - "usubl v26.8h, v26.8b, v7.8b\n" - "smlal2 v9.4s, v28.8h, v0.8h\n" - "usubl v22.8h, v22.8b, v7.8b\n" + "ldr x19, [x20, #0x50]\n" + "usubl v29.8h, v29.8b, v9.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "smlal v10.4s, v29.4h, v0.4h\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "add x19, x19, x24\n" + "smlal2 v7.4s, v29.8h, v0.8h\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v6.4s, v28.4h, v0.4h\n" + "smlal2 v5.4s, v28.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "add x20, x20, x10\n" - "smlal v18.4s, v27.4h, v1.4h\n" - "smlal2 v5.4s, v27.8h, v1.8h\n" - "smlal v11.4s, v28.4h, v1.4h\n" - "smlal2 v8.4s, v28.8h, v1.8h\n" - "smlal v10.4s, v23.4h, v1.4h\n" - "smlal2 v9.4s, v23.8h, v1.8h\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal v17.4s, v27.4h, v1.4h\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal2 v8.4s, v27.8h, v1.8h\n" + "smlal v10.4s, v28.4h, v1.4h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal2 v7.4s, v28.8h, v1.8h\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "smlal v6.4s, v23.4h, v1.4h\n" + "usubl v22.8h, v22.8b, v9.8b\n" + "smlal2 v5.4s, v23.8h, v1.8h\n" "smlal v15.4s, v27.4h, v2.4h\n" - "smlal2 v20.4s, v27.8h, v2.8h\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "smlal v11.4s, v23.4h, v2.4h\n" - "smlal2 v8.4s, v23.8h, v2.8h\n" - "tbz x4, #2, 13f\n" - "ld1 { v31.s }[0], [x20], #0x4\n" - "tbz x4, #1, 12f\n" - "ld1 { v31.h }[2], [x20], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[6], [x20]\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal v10.4s, v23.4h, v2.4h\n" + "smlal2 v7.4s, v23.8h, v2.8h\n" + "tbz x0, #2, 13f\n" + "ld1 { v31.s }[0], [x19], #0x4\n" + "tbz x0, #1, 12f\n" + "ld1 { v31.h }[2], [x19], #0x2\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[6], [x19]\n" "b 15f\n" "12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[4], [x20]\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[4], [x19]\n" "b 15f\n" "13:" // Oddments: Load (1, 3): Bit 2: Unset - "tbz x4, #1, 14f\n" - "ld1 { v31.h }[0], [x20], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[2], [x20]\n" + "tbz x0, #1, 14f\n" + "ld1 { v31.h }[0], [x19], #0x2\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[2], [x19]\n" "b 15f\n" "14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[0], [x20]\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[0], [x19]\n" "15:" // Oddments: Load (1, 3): Bit 2: End + "usubl v31.8h, v31.8b, v9.8b\n" + "ldr x15, [x20, #0x58]\n" + "smlal v6.4s, v31.4h, v2.4h\n" + "smlal2 v5.4s, v31.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "ldr x28, [x25, #0x58]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "add x28, x28, x10\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "smlal v10.4s, v31.4h, v2.4h\n" - "smlal2 v9.4s, v31.8h, v2.8h\n" - "smlal v11.4s, v31.4h, v3.4h\n" - "smlal2 v8.4s, v31.8h, v3.8h\n" - "tbz x4, #2, 17f\n" - "ld1 { v30.s }[0], [x28], #0x4\n" - "tbz x4, #1, 16f\n" - "ld1 { v30.h }[2], [x28], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[6], [x28]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "add x15, x15, x24\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal v10.4s, v31.4h, v3.4h\n" + "smlal2 v7.4s, v31.8h, v3.8h\n" + "tbz x0, #2, 17f\n" + "ld1 { v30.s }[0], [x15], #0x4\n" + "tbz x0, #1, 16f\n" + "ld1 { v30.h }[2], [x15], #0x2\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[6], [x15]\n" "b 19f\n" "16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[4], [x28]\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[4], [x15]\n" "b 19f\n" "17:" // Oddments: Load (1, 4): Bit 2: Unset - "tbz x4, #1, 18f\n" - "ld1 { v30.h }[0], [x28], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[2], [x28]\n" + "tbz x0, #1, 18f\n" + "ld1 { v30.h }[0], [x15], #0x2\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[2], [x15]\n" "b 19f\n" "18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[0], [x28]\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[0], [x15]\n" "19:" // Oddments: Load (1, 4): Bit 2: End + "usubl v30.8h, v30.8b, v9.8b\n" + "ldr x19, [x20, #0x60]\n" + "smlal v6.4s, v30.4h, v3.4h\n" + "smlal2 v5.4s, v30.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "ldr x0, [x25, #0x60]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "add x0, x0, x10\n" - "smlal v10.4s, v30.4h, v3.4h\n" - "smlal2 v9.4s, v30.8h, v3.8h\n" - "tbz x4, #2, 21f\n" - "ld1 { v27.s }[0], [x0], #0x4\n" - "tbz x4, #1, 20f\n" - "ld1 { v27.h }[2], [x0], #0x2\n" - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[6], [x0]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "add x19, x19, x24\n" + "tbz x0, #2, 21f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x0, #1, 20f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[6], [x19]\n" "b 23f\n" "20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[4], [x0]\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[4], [x19]\n" "b 23f\n" "21:" // Oddments: Load (0, 5): Bit 2: Unset - "tbz x4, #1, 22f\n" - "ld1 { v27.h }[0], [x0], #0x2\n" - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[2], [x0]\n" + "tbz x0, #1, 22f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[2], [x19]\n" "b 23f\n" "22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[0], [x0]\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[0], [x19]\n" "23:" // Oddments: Load (0, 5): Bit 2: End - "smlal v11.4s, v30.4h, v4.4h\n" - "ldr d0, [x3, #0x28]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal2 v8.4s, v30.8h, v4.8h\n" - "ldr x7, [x25, #0x68]\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "usubl v0.8h, v0.8b, v13.8b\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "add x7, x7, x10\n" - "smlal v18.4s, v27.4h, v4.4h\n" - "smlal2 v5.4s, v27.8h, v4.8h\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "ldr d0, [x23, #0x28]\n" + "smlal v17.4s, v27.4h, v4.4h\n" + "smlal2 v8.4s, v27.8h, v4.8h\n" + "smlal v10.4s, v30.4h, v4.4h\n" + "smlal2 v7.4s, v30.8h, v4.8h\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "ldr x27, [x20, #0x68]\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "add x27, x27, x24\n" "smlal v15.4s, v29.4h, v0.4h\n" - "smlal2 v20.4s, v29.8h, v0.8h\n" - "smlal v18.4s, v28.4h, v0.4h\n" - "smlal2 v5.4s, v28.8h, v0.8h\n" - "smlal v11.4s, v22.4h, v0.4h\n" - "smlal2 v8.4s, v22.8h, v0.8h\n" - "tbz x4, #2, 25f\n" - "ld1 { v25.s }[0], [x7], #0x4\n" - "tbz x4, #1, 24f\n" - "ld1 { v25.h }[2], [x7], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[6], [x7]\n" + "smlal2 v16.4s, v29.8h, v0.8h\n" + "smlal v17.4s, v28.4h, v0.4h\n" + "smlal2 v8.4s, v28.8h, v0.8h\n" + "smlal v10.4s, v22.4h, v0.4h\n" + "smlal2 v7.4s, v22.8h, v0.8h\n" + "tbz x0, #2, 25f\n" + "ld1 { v25.s }[0], [x27], #0x4\n" + "tbz x0, #1, 24f\n" + "ld1 { v25.h }[2], [x27], #0x2\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[6], [x27]\n" "b 27f\n" "24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[4], [x7]\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[4], [x27]\n" "b 27f\n" "25:" // Oddments: Load (2, 1): Bit 2: Unset - "tbz x4, #1, 26f\n" - "ld1 { v25.h }[0], [x7], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[2], [x7]\n" + "tbz x0, #1, 26f\n" + "ld1 { v25.h }[0], [x27], #0x2\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[2], [x27]\n" "b 27f\n" "26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[0], [x7]\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[0], [x27]\n" "27:" // Oddments: Load (2, 1): Bit 2: End - "ldr d1, [x3, #0x30]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v25.4h, v0.4h\n" - "ldr x26, [x25, #0x70]\n" - "usubl v1.8h, v1.8b, v13.8b\n" - "smlal2 v9.4s, v25.8h, v0.8h\n" - "add x26, x26, x10\n" + "ldr d1, [x23, #0x30]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "ldr x5, [x20, #0x70]\n" + "smlal v6.4s, v25.4h, v0.4h\n" + "smlal2 v5.4s, v25.8h, v0.8h\n" + "add x5, x5, x24\n" "smlal v15.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "smlal v18.4s, v23.4h, v1.4h\n" - "smlal2 v5.4s, v23.8h, v1.8h\n" - "smlal v11.4s, v25.4h, v1.4h\n" - "smlal2 v8.4s, v25.8h, v1.8h\n" - "tbz x4, #2, 29f\n" - "ld1 { v24.s }[0], [x26], #0x4\n" - "tbz x4, #1, 28f\n" - "ld1 { v24.h }[2], [x26], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[6], [x26]\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal v17.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "smlal v10.4s, v25.4h, v1.4h\n" + "smlal2 v7.4s, v25.8h, v1.8h\n" + "tbz x0, #2, 29f\n" + "ld1 { v24.s }[0], [x5], #0x4\n" + "tbz x0, #1, 28f\n" + "ld1 { v24.h }[2], [x5], #0x2\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[6], [x5]\n" "b 31f\n" "28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[4], [x26]\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[4], [x5]\n" "b 31f\n" "29:" // Oddments: Load (2, 2): Bit 2: Unset - "tbz x4, #1, 30f\n" - "ld1 { v24.h }[0], [x26], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[2], [x26]\n" + "tbz x0, #1, 30f\n" + "ld1 { v24.h }[0], [x5], #0x2\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[2], [x5]\n" "b 31f\n" "30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[0], [x26]\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[0], [x5]\n" "31:" // Oddments: Load (2, 2): Bit 2: End - "ldr d2, [x3, #0x38]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v1.4h\n" - "ldr x23, [x25, #0x78]\n" - "usubl v2.8h, v2.8b, v13.8b\n" - "smlal2 v9.4s, v24.8h, v1.8h\n" - "add x23, x23, x10\n" + "ldr d2, [x23, #0x38]\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "ldr x11, [x20, #0x78]\n" + "smlal v6.4s, v24.4h, v1.4h\n" + "smlal2 v5.4s, v24.8h, v1.8h\n" + "add x11, x11, x24\n" "smlal v15.4s, v23.4h, v2.4h\n" - "smlal2 v20.4s, v23.8h, v2.8h\n" - "smlal v18.4s, v31.4h, v2.4h\n" - "smlal2 v5.4s, v31.8h, v2.8h\n" - "smlal v11.4s, v24.4h, v2.4h\n" - "smlal2 v8.4s, v24.8h, v2.8h\n" - "tbz x4, #2, 33f\n" - "ld1 { v27.s }[0], [x23], #0x4\n" - "tbz x4, #1, 32f\n" - "ld1 { v27.h }[2], [x23], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[6], [x23]\n" + "smlal2 v16.4s, v23.8h, v2.8h\n" + "smlal v17.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "smlal v10.4s, v24.4h, v2.4h\n" + "smlal2 v7.4s, v24.8h, v2.8h\n" + "tbz x0, #2, 33f\n" + "ld1 { v27.s }[0], [x11], #0x4\n" + "tbz x0, #1, 32f\n" + "ld1 { v27.h }[2], [x11], #0x2\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[6], [x11]\n" "b 35f\n" "32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[4], [x23]\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[4], [x11]\n" "b 35f\n" "33:" // Oddments: Load (2, 3): Bit 2: Unset - "tbz x4, #1, 34f\n" - "ld1 { v27.h }[0], [x23], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[2], [x23]\n" + "tbz x0, #1, 34f\n" + "ld1 { v27.h }[0], [x11], #0x2\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[2], [x11]\n" "b 35f\n" "34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[0], [x23]\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[0], [x11]\n" "35:" // Oddments: Load (2, 3): Bit 2: End - "ldr d3, [x3, #0x40]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v2.4h\n" - "ldr x20, [x25, #0x80]\n" - "usubl v3.8h, v3.8b, v13.8b\n" - "smlal2 v9.4s, v27.8h, v2.8h\n" - "add x20, x20, x10\n" + "ldr d3, [x23, #0x40]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "ldr x12, [x20, #0x80]\n" + "smlal v6.4s, v27.4h, v2.4h\n" + "smlal2 v5.4s, v27.8h, v2.8h\n" + "add x12, x12, x24\n" "smlal v15.4s, v31.4h, v3.4h\n" - "smlal2 v20.4s, v31.8h, v3.8h\n" - "smlal v18.4s, v30.4h, v3.4h\n" - "smlal2 v5.4s, v30.8h, v3.8h\n" - "smlal v11.4s, v27.4h, v3.4h\n" - "smlal2 v8.4s, v27.8h, v3.8h\n" - "tbz x4, #2, 37f\n" - "ld1 { v23.s }[0], [x20], #0x4\n" - "tbz x4, #1, 36f\n" - "ld1 { v23.h }[2], [x20], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[6], [x20]\n" + "smlal2 v16.4s, v31.8h, v3.8h\n" + "smlal v17.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "smlal v10.4s, v27.4h, v3.4h\n" + "smlal2 v7.4s, v27.8h, v3.8h\n" + "tbz x0, #2, 37f\n" + "ld1 { v23.s }[0], [x12], #0x4\n" + "tbz x0, #1, 36f\n" + "ld1 { v23.h }[2], [x12], #0x2\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[6], [x12]\n" "b 39f\n" "36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[4], [x20]\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[4], [x12]\n" "b 39f\n" "37:" // Oddments: Load (2, 4): Bit 2: Unset - "tbz x4, #1, 38f\n" - "ld1 { v23.h }[0], [x20], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[2], [x20]\n" + "tbz x0, #1, 38f\n" + "ld1 { v23.h }[0], [x12], #0x2\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[2], [x12]\n" "b 39f\n" "38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[0], [x20]\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[0], [x12]\n" "39:" // Oddments: Load (2, 4): Bit 2: End - "ldr d4, [x3, #0x48]\n" - "usubl v23.8h, v23.8b, v7.8b\n" - "smlal v10.4s, v23.4h, v3.4h\n" - "ldr x22, [x25, #0x88]\n" - "usubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v9.4s, v23.8h, v3.8h\n" - "add x22, x22, x10\n" + "ldr d4, [x23, #0x48]\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "ldr x26, [x20, #0x88]\n" + "smlal v6.4s, v23.4h, v3.4h\n" + "smlal2 v5.4s, v23.8h, v3.8h\n" + "add x26, x26, x24\n" "smlal v15.4s, v30.4h, v4.4h\n" - "smlal2 v20.4s, v30.8h, v4.8h\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "smlal v11.4s, v23.4h, v4.4h\n" - "smlal2 v8.4s, v23.8h, v4.8h\n" - "tbz x4, #2, 41f\n" - "ld1 { v28.s }[0], [x22], #0x4\n" - "tbz x4, #1, 40f\n" - "ld1 { v28.h }[2], [x22], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[6], [x22]\n" + "smlal2 v16.4s, v30.8h, v4.8h\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "smlal v10.4s, v23.4h, v4.4h\n" + "smlal2 v7.4s, v23.8h, v4.8h\n" + "tbz x0, #2, 41f\n" + "ld1 { v28.s }[0], [x26], #0x4\n" + "tbz x0, #1, 40f\n" + "ld1 { v28.h }[2], [x26], #0x2\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[6], [x26]\n" "b 43f\n" "40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[4], [x22]\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[4], [x26]\n" "b 43f\n" "41:" // Oddments: Load (2, 5): Bit 2: Unset - "tbz x4, #1, 42f\n" - "ld1 { v28.h }[0], [x22], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[2], [x22]\n" + "tbz x0, #1, 42f\n" + "ld1 { v28.h }[0], [x26], #0x2\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[2], [x26]\n" "b 43f\n" "42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[0], [x22]\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[0], [x26]\n" "43:" // Oddments: Load (2, 5): Bit 2: End - "ldr d0, [x3, #0x50]\n" - "usubl v28.8h, v28.8b, v7.8b\n" - "smlal v10.4s, v28.4h, v4.4h\n" - "ldr x13, [x25, #0x90]\n" - "usubl v0.8h, v0.8b, v13.8b\n" - "smlal2 v9.4s, v28.8h, v4.8h\n" - "add x13, x13, x10\n" + "ldr d0, [x23, #0x50]\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "ldr x14, [x20, #0x90]\n" + "smlal v6.4s, v28.4h, v4.4h\n" + "smlal2 v5.4s, v28.8h, v4.8h\n" + "add x14, x14, x24\n" "smlal v15.4s, v22.4h, v0.4h\n" - "smlal2 v20.4s, v22.8h, v0.8h\n" - "smlal v18.4s, v25.4h, v0.4h\n" - "smlal2 v5.4s, v25.8h, v0.8h\n" - "tbz x4, #2, 45f\n" - "ld1 { v31.s }[0], [x13], #0x4\n" - "tbz x4, #1, 44f\n" - "ld1 { v31.h }[2], [x13], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[6], [x13]\n" + "smlal2 v16.4s, v22.8h, v0.8h\n" + "smlal v17.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "tbz x0, #2, 45f\n" + "ld1 { v31.s }[0], [x14], #0x4\n" + "tbz x0, #1, 44f\n" + "ld1 { v31.h }[2], [x14], #0x2\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[6], [x14]\n" "b 47f\n" "44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[4], [x13]\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[4], [x14]\n" "b 47f\n" "45:" // Oddments: Load (3, 0): Bit 2: Unset - "tbz x4, #1, 46f\n" - "ld1 { v31.h }[0], [x13], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[2], [x13]\n" + "tbz x0, #1, 46f\n" + "ld1 { v31.h }[0], [x14], #0x2\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[2], [x14]\n" "b 47f\n" "46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[0], [x13]\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[0], [x14]\n" "47:" // Oddments: Load (3, 0): Bit 2: End - "ldr x21, [x25, #0x98]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "smlal v11.4s, v31.4h, v0.4h\n" - "smlal2 v8.4s, v31.8h, v0.8h\n" - "add x21, x21, x10\n" - "tbz x4, #2, 49f\n" - "ld1 { v30.s }[0], [x21], #0x4\n" - "tbz x4, #1, 48f\n" - "ld1 { v30.h }[2], [x21], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[6], [x21]\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "ldr x15, [x20, #0x98]\n" + "smlal v10.4s, v31.4h, v0.4h\n" + "smlal2 v7.4s, v31.8h, v0.8h\n" + "add x15, x15, x24\n" + "tbz x0, #2, 49f\n" + "ld1 { v30.s }[0], [x15], #0x4\n" + "tbz x0, #1, 48f\n" + "ld1 { v30.h }[2], [x15], #0x2\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[6], [x15]\n" "b 51f\n" "48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[4], [x21]\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[4], [x15]\n" "b 51f\n" "49:" // Oddments: Load (3, 1): Bit 2: Unset - "tbz x4, #1, 50f\n" - "ld1 { v30.h }[0], [x21], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[2], [x21]\n" + "tbz x0, #1, 50f\n" + "ld1 { v30.h }[0], [x15], #0x2\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[2], [x15]\n" "b 51f\n" "50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[0], [x21]\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[0], [x15]\n" "51:" // Oddments: Load (3, 1): Bit 2: End - "ldr d1, [x3, #0x58]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal v10.4s, v30.4h, v0.4h\n" - "ldr x14, [x25, #0xa0]\n" - "usubl v1.8h, v1.8b, v13.8b\n" - "smlal2 v9.4s, v30.8h, v0.8h\n" - "add x14, x14, x10\n" + "ldr d1, [x23, #0x58]\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "ldr x21, [x20, #0xa0]\n" + "smlal v6.4s, v30.4h, v0.4h\n" + "smlal2 v5.4s, v30.8h, v0.8h\n" + "add x21, x21, x24\n" "smlal v15.4s, v25.4h, v1.4h\n" - "smlal2 v20.4s, v25.8h, v1.8h\n" - "smlal v18.4s, v24.4h, v1.4h\n" - "smlal2 v5.4s, v24.8h, v1.8h\n" - "smlal v11.4s, v30.4h, v1.4h\n" - "smlal2 v8.4s, v30.8h, v1.8h\n" - "tbz x4, #2, 53f\n" - "ld1 { v26.s }[0], [x14], #0x4\n" - "tbz x4, #1, 52f\n" - "ld1 { v26.h }[2], [x14], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[6], [x14]\n" + "smlal2 v16.4s, v25.8h, v1.8h\n" + "smlal v17.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "smlal v10.4s, v30.4h, v1.4h\n" + "smlal2 v7.4s, v30.8h, v1.8h\n" + "tbz x0, #2, 53f\n" + "ld1 { v26.s }[0], [x21], #0x4\n" + "tbz x0, #1, 52f\n" + "ld1 { v26.h }[2], [x21], #0x2\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[6], [x21]\n" "b 55f\n" "52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[4], [x14]\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[4], [x21]\n" "b 55f\n" "53:" // Oddments: Load (3, 2): Bit 2: Unset - "tbz x4, #1, 54f\n" - "ld1 { v26.h }[0], [x14], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[2], [x14]\n" + "tbz x0, #1, 54f\n" + "ld1 { v26.h }[0], [x21], #0x2\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[2], [x21]\n" "b 55f\n" "54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[0], [x14]\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[0], [x21]\n" "55:" // Oddments: Load (3, 2): Bit 2: End - "ldr d2, [x3, #0x60]\n" - "usubl v26.8h, v26.8b, v7.8b\n" - "smlal v10.4s, v26.4h, v1.4h\n" - "ldr x11, [x25, #0xa8]\n" - "usubl v2.8h, v2.8b, v13.8b\n" - "smlal2 v9.4s, v26.8h, v1.8h\n" - "add x11, x11, x10\n" + "ldr d2, [x23, #0x60]\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "ldr x2, [x20, #0xa8]\n" + "smlal v6.4s, v26.4h, v1.4h\n" + "smlal2 v5.4s, v26.8h, v1.8h\n" + "add x2, x2, x24\n" "smlal v15.4s, v24.4h, v2.4h\n" - "smlal2 v20.4s, v24.8h, v2.8h\n" - "smlal v18.4s, v27.4h, v2.4h\n" - "smlal2 v5.4s, v27.8h, v2.8h\n" - "smlal v11.4s, v26.4h, v2.4h\n" - "smlal2 v8.4s, v26.8h, v2.8h\n" - "tbz x4, #2, 57f\n" - "ld1 { v25.s }[0], [x11], #0x4\n" - "tbz x4, #1, 56f\n" - "ld1 { v25.h }[2], [x11], #0x2\n" - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[6], [x11]\n" + "smlal2 v16.4s, v24.8h, v2.8h\n" + "smlal v17.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "smlal v10.4s, v26.4h, v2.4h\n" + "smlal2 v7.4s, v26.8h, v2.8h\n" + "tbz x0, #2, 57f\n" + "ld1 { v25.s }[0], [x2], #0x4\n" + "tbz x0, #1, 56f\n" + "ld1 { v25.h }[2], [x2], #0x2\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[6], [x2]\n" "b 59f\n" "56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[4], [x11]\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[4], [x2]\n" "b 59f\n" "57:" // Oddments: Load (3, 3): Bit 2: Unset - "tbz x4, #1, 58f\n" - "ld1 { v25.h }[0], [x11], #0x2\n" - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[2], [x11]\n" + "tbz x0, #1, 58f\n" + "ld1 { v25.h }[0], [x2], #0x2\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[2], [x2]\n" "b 59f\n" "58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[0], [x11]\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[0], [x2]\n" "59:" // Oddments: Load (3, 3): Bit 2: End - "ldr d3, [x3, #0x68]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "ldr x24, [x25, #0xb0]\n" - "usubl v3.8h, v3.8b, v13.8b\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" - "add x24, x24, x10\n" + "ldr d3, [x23, #0x68]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "ldr x13, [x20, #0xb0]\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x13, x13, x24\n" "smlal v15.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "smlal v18.4s, v23.4h, v3.4h\n" - "smlal2 v5.4s, v23.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "tbz x4, #2, 61f\n" - "ld1 { v24.s }[0], [x24], #0x4\n" - "tbz x4, #1, 60f\n" - "ld1 { v24.h }[2], [x24], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[6], [x24]\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "smlal v17.4s, v23.4h, v3.4h\n" + "smlal2 v8.4s, v23.8h, v3.8h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "tbz x0, #2, 61f\n" + "ld1 { v24.s }[0], [x13], #0x4\n" + "tbz x0, #1, 60f\n" + "ld1 { v24.h }[2], [x13], #0x2\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[6], [x13]\n" "b 63f\n" "60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[4], [x24]\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[4], [x13]\n" "b 63f\n" "61:" // Oddments: Load (3, 4): Bit 2: Unset - "tbz x4, #1, 62f\n" - "ld1 { v24.h }[0], [x24], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[2], [x24]\n" + "tbz x0, #1, 62f\n" + "ld1 { v24.h }[0], [x13], #0x2\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[2], [x13]\n" "b 63f\n" "62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[0], [x24]\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[0], [x13]\n" "63:" // Oddments: Load (3, 4): Bit 2: End - "ldr d4, [x3, #0x70]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "ldr x0, [x25, #0xb8]\n" - "usubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" - "add x0, x0, x10\n" + "ldr d4, [x23, #0x70]\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "ldr x9, [x20, #0xb8]\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "add x9, x9, x24\n" "smlal v15.4s, v23.4h, v4.4h\n" - "smlal2 v20.4s, v23.8h, v4.8h\n" - "smlal v18.4s, v28.4h, v4.4h\n" - "smlal2 v5.4s, v28.8h, v4.8h\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "tbz x4, #2, 65f\n" - "ld1 { v22.s }[0], [x0], #0x4\n" - "tbz x4, #1, 64f\n" - "ld1 { v22.h }[2], [x0], #0x2\n" - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[6], [x0]\n" + "smlal2 v16.4s, v23.8h, v4.8h\n" + "smlal v17.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "tbz x0, #2, 65f\n" + "ld1 { v22.s }[0], [x9], #0x4\n" + "tbz x0, #1, 64f\n" + "ld1 { v22.h }[2], [x9], #0x2\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[6], [x9]\n" "b 67f\n" "64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[4], [x0]\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[4], [x9]\n" "b 67f\n" "65:" // Oddments: Load (3, 5): Bit 2: Unset - "tbz x4, #1, 66f\n" - "ld1 { v22.h }[0], [x0], #0x2\n" - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[2], [x0]\n" + "tbz x0, #1, 66f\n" + "ld1 { v22.h }[0], [x9], #0x2\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[2], [x9]\n" "b 67f\n" "66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[0], [x0]\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[0], [x9]\n" "67:" // Oddments: Load (3, 5): Bit 2: End - "ldr d0, [x3, #0x78]\n" - "usubl v22.8h, v22.8b, v7.8b\n" - "smlal v10.4s, v22.4h, v4.4h\n" - "ldr x15, [x25, #0xc0]\n" - "usubl v0.8h, v0.8b, v13.8b\n" - "smlal2 v9.4s, v22.8h, v4.8h\n" - "add x15, x15, x10\n" + "ldr d0, [x23, #0x78]\n" + "usubl v22.8h, v22.8b, v9.8b\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "ldr x19, [x20, #0xc0]\n" + "smlal v6.4s, v22.4h, v4.4h\n" + "smlal2 v5.4s, v22.8h, v4.8h\n" + "add x19, x19, x24\n" "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v20.4s, v31.8h, v0.8h\n" - "smlal v18.4s, v30.4h, v0.4h\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "tbz x4, #2, 69f\n" - "ld1 { v27.s }[0], [x15], #0x4\n" - "tbz x4, #1, 68f\n" - "ld1 { v27.h }[2], [x15], #0x2\n" - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[6], [x15]\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "tbz x0, #2, 69f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x0, #1, 68f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[6], [x19]\n" "b 71f\n" "68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[4], [x15]\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[4], [x19]\n" "b 71f\n" "69:" // Oddments: Load (4, 0): Bit 2: Unset - "tbz x4, #1, 70f\n" - "ld1 { v27.h }[0], [x15], #0x2\n" - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[2], [x15]\n" + "tbz x0, #1, 70f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[2], [x19]\n" "b 71f\n" "70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[0], [x15]\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[0], [x19]\n" "71:" // Oddments: Load (4, 0): Bit 2: End - "ldr x9, [x25, #0xc8]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v11.4s, v27.4h, v0.4h\n" - "smlal2 v8.4s, v27.8h, v0.8h\n" - "add x9, x9, x10\n" - "tbz x4, #2, 73f\n" - "ld1 { v23.s }[0], [x9], #0x4\n" - "tbz x4, #1, 72f\n" - "ld1 { v23.h }[2], [x9], #0x2\n" - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[6], [x9]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "ldr x28, [x20, #0xc8]\n" + "smlal v10.4s, v27.4h, v0.4h\n" + "smlal2 v7.4s, v27.8h, v0.8h\n" + "add x28, x28, x24\n" + "tbz x0, #2, 73f\n" + "ld1 { v23.s }[0], [x28], #0x4\n" + "tbz x0, #1, 72f\n" + "ld1 { v23.h }[2], [x28], #0x2\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[6], [x28]\n" "b 75f\n" "72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[4], [x9]\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[4], [x28]\n" "b 75f\n" "73:" // Oddments: Load (4, 1): Bit 2: Unset - "tbz x4, #1, 74f\n" - "ld1 { v23.h }[0], [x9], #0x2\n" - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[2], [x9]\n" + "tbz x0, #1, 74f\n" + "ld1 { v23.h }[0], [x28], #0x2\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[2], [x28]\n" "b 75f\n" "74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[0], [x9]\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[0], [x28]\n" "75:" // Oddments: Load (4, 1): Bit 2: End - "ldr d1, [x3, #0x80]\n" - "usubl v23.8h, v23.8b, v7.8b\n" - "smlal v10.4s, v23.4h, v0.4h\n" - "ldr x27, [x25, #0xd0]\n" - "usubl v1.8h, v1.8b, v13.8b\n" - "smlal2 v9.4s, v23.8h, v0.8h\n" - "add x27, x27, x10\n" + "ldr d1, [x23, #0x80]\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "ldr x6, [x20, #0xd0]\n" + "smlal v6.4s, v23.4h, v0.4h\n" + "smlal2 v5.4s, v23.8h, v0.8h\n" + "add x6, x6, x24\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v20.4s, v30.8h, v1.8h\n" - "smlal v18.4s, v26.4h, v1.4h\n" - "smlal2 v5.4s, v26.8h, v1.8h\n" - "smlal v11.4s, v23.4h, v1.4h\n" - "smlal2 v8.4s, v23.8h, v1.8h\n" - "tbz x4, #2, 77f\n" - "ld1 { v31.s }[0], [x27], #0x4\n" - "tbz x4, #1, 76f\n" - "ld1 { v31.h }[2], [x27], #0x2\n" - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[6], [x27]\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal v17.4s, v26.4h, v1.4h\n" + "smlal2 v8.4s, v26.8h, v1.8h\n" + "smlal v10.4s, v23.4h, v1.4h\n" + "smlal2 v7.4s, v23.8h, v1.8h\n" + "tbz x0, #2, 77f\n" + "ld1 { v31.s }[0], [x6], #0x4\n" + "tbz x0, #1, 76f\n" + "ld1 { v31.h }[2], [x6], #0x2\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[6], [x6]\n" "b 79f\n" "76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[4], [x27]\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[4], [x6]\n" "b 79f\n" "77:" // Oddments: Load (4, 2): Bit 2: Unset - "tbz x4, #1, 78f\n" - "ld1 { v31.h }[0], [x27], #0x2\n" - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[2], [x27]\n" + "tbz x0, #1, 78f\n" + "ld1 { v31.h }[0], [x6], #0x2\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[2], [x6]\n" "b 79f\n" "78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[0], [x27]\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[0], [x6]\n" "79:" // Oddments: Load (4, 2): Bit 2: End - "ldr d2, [x3, #0x88]\n" - "usubl v31.8h, v31.8b, v7.8b\n" - "smlal v10.4s, v31.4h, v1.4h\n" - "ldr x28, [x25, #0xd8]\n" - "usubl v2.8h, v2.8b, v13.8b\n" - "smlal2 v9.4s, v31.8h, v1.8h\n" - "add x28, x28, x10\n" + "ldr d2, [x23, #0x88]\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "ldr x27, [x20, #0xd8]\n" + "smlal v6.4s, v31.4h, v1.4h\n" + "smlal2 v5.4s, v31.8h, v1.8h\n" + "add x27, x27, x24\n" "smlal v15.4s, v26.4h, v2.4h\n" - "smlal2 v20.4s, v26.8h, v2.8h\n" - "smlal v18.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "smlal2 v8.4s, v31.8h, v2.8h\n" - "tbz x4, #2, 81f\n" - "ld1 { v30.s }[0], [x28], #0x4\n" - "tbz x4, #1, 80f\n" - "ld1 { v30.h }[2], [x28], #0x2\n" - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[6], [x28]\n" + "smlal2 v16.4s, v26.8h, v2.8h\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal v10.4s, v31.4h, v2.4h\n" + "smlal2 v7.4s, v31.8h, v2.8h\n" + "tbz x0, #2, 81f\n" + "ld1 { v30.s }[0], [x27], #0x4\n" + "tbz x0, #1, 80f\n" + "ld1 { v30.h }[2], [x27], #0x2\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[6], [x27]\n" "b 83f\n" "80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[4], [x28]\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[4], [x27]\n" "b 83f\n" "81:" // Oddments: Load (4, 3): Bit 2: Unset - "tbz x4, #1, 82f\n" - "ld1 { v30.h }[0], [x28], #0x2\n" - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[2], [x28]\n" + "tbz x0, #1, 82f\n" + "ld1 { v30.h }[0], [x27], #0x2\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[2], [x27]\n" "b 83f\n" "82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[0], [x28]\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[0], [x27]\n" "83:" // Oddments: Load (4, 3): Bit 2: End - "ldr d3, [x3, #0x90]\n" - "usubl v30.8h, v30.8b, v7.8b\n" - "smlal v10.4s, v30.4h, v2.4h\n" - "ldr x12, [x25, #0xe0]\n" - "usubl v3.8h, v3.8b, v13.8b\n" - "smlal2 v9.4s, v30.8h, v2.8h\n" - "add x12, x12, x10\n" + "ldr d3, [x23, #0x90]\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "ldr x11, [x20, #0xe0]\n" + "smlal v6.4s, v30.4h, v2.4h\n" + "smlal2 v5.4s, v30.8h, v2.8h\n" + "add x11, x11, x24\n" "smlal v15.4s, v25.4h, v3.4h\n" - "smlal2 v20.4s, v25.8h, v3.8h\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "smlal v11.4s, v30.4h, v3.4h\n" - "smlal2 v8.4s, v30.8h, v3.8h\n" - "tbz x4, #2, 85f\n" - "ld1 { v28.s }[0], [x12], #0x4\n" - "tbz x4, #1, 84f\n" - "ld1 { v28.h }[2], [x12], #0x2\n" - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[6], [x12]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal v10.4s, v30.4h, v3.4h\n" + "smlal2 v7.4s, v30.8h, v3.8h\n" + "tbz x0, #2, 85f\n" + "ld1 { v28.s }[0], [x11], #0x4\n" + "tbz x0, #1, 84f\n" + "ld1 { v28.h }[2], [x11], #0x2\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[6], [x11]\n" "b 87f\n" "84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[4], [x12]\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[4], [x11]\n" "b 87f\n" "85:" // Oddments: Load (4, 4): Bit 2: Unset - "tbz x4, #1, 86f\n" - "ld1 { v28.h }[0], [x12], #0x2\n" - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[2], [x12]\n" + "tbz x0, #1, 86f\n" + "ld1 { v28.h }[0], [x11], #0x2\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[2], [x11]\n" "b 87f\n" "86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[0], [x12]\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[0], [x11]\n" "87:" // Oddments: Load (4, 4): Bit 2: End - "ldr d4, [x3, #0x98]\n" - "usubl v28.8h, v28.8b, v7.8b\n" - "smlal v10.4s, v28.4h, v3.4h\n" - "ldr x7, [x25, #0xe8]\n" - "usubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v9.4s, v28.8h, v3.8h\n" - "add x7, x7, x10\n" + "ldr d4, [x23, #0x98]\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "ldr x17, [x20, #0xe8]\n" + "smlal v6.4s, v28.4h, v3.4h\n" + "smlal2 v5.4s, v28.8h, v3.8h\n" + "add x17, x17, x24\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v20.4s, v24.8h, v4.8h\n" - "smlal v18.4s, v22.4h, v4.4h\n" - "smlal2 v5.4s, v22.8h, v4.8h\n" - "smlal v11.4s, v28.4h, v4.4h\n" - "smlal2 v8.4s, v28.8h, v4.8h\n" - "tbz x4, #2, 89f\n" - "ld1 { v26.s }[0], [x7], #0x4\n" - "tbz x4, #1, 88f\n" - "ld1 { v26.h }[2], [x7], #0x2\n" - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[6], [x7]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "smlal v17.4s, v22.4h, v4.4h\n" + "smlal2 v8.4s, v22.8h, v4.8h\n" + "smlal v10.4s, v28.4h, v4.4h\n" + "smlal2 v7.4s, v28.8h, v4.8h\n" + "tbz x0, #2, 89f\n" + "ld1 { v26.s }[0], [x17], #0x4\n" + "tbz x0, #1, 88f\n" + "ld1 { v26.h }[2], [x17], #0x2\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[6], [x17]\n" "b 91f\n" "88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[4], [x7]\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[4], [x17]\n" "b 91f\n" "89:" // Oddments: Load (4, 5): Bit 2: Unset - "tbz x4, #1, 90f\n" - "ld1 { v26.h }[0], [x7], #0x2\n" - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[2], [x7]\n" + "tbz x0, #1, 90f\n" + "ld1 { v26.h }[0], [x17], #0x2\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[2], [x17]\n" "b 91f\n" "90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[0], [x7]\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[0], [x17]\n" "91:" // Oddments: Load (4, 5): Bit 2: End - "ldr d0, [x3, #0xa0]\n" - "usubl v26.8h, v26.8b, v7.8b\n" - "smlal v10.4s, v26.4h, v4.4h\n" - "ldr x26, [x25, #0xf0]\n" - "usubl v0.8h, v0.8b, v13.8b\n" - "smlal2 v9.4s, v26.8h, v4.8h\n" - "add x26, x26, x10\n" + "ldr d0, [x23, #0xa0]\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "usubl v0.8h, v0.8b, v14.8b\n" + "ldr x5, [x20, #0xf0]\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "add x5, x5, x24\n" "smlal v15.4s, v27.4h, v0.4h\n" - "smlal2 v20.4s, v27.8h, v0.8h\n" - "smlal v18.4s, v23.4h, v0.4h\n" - "smlal2 v5.4s, v23.8h, v0.8h\n" - "tbz x4, #2, 93f\n" - "ld1 { v25.s }[0], [x26], #0x4\n" - "tbz x4, #1, 92f\n" - "ld1 { v25.h }[2], [x26], #0x2\n" - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[6], [x26]\n" + "smlal2 v16.4s, v27.8h, v0.8h\n" + "smlal v17.4s, v23.4h, v0.4h\n" + "smlal2 v8.4s, v23.8h, v0.8h\n" + "tbz x0, #2, 93f\n" + "ld1 { v25.s }[0], [x5], #0x4\n" + "tbz x0, #1, 92f\n" + "ld1 { v25.h }[2], [x5], #0x2\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[6], [x5]\n" "b 95f\n" "92:" // Oddments: Load (5, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[4], [x26]\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[4], [x5]\n" "b 95f\n" "93:" // Oddments: Load (5, 0): Bit 2: Unset - "tbz x4, #1, 94f\n" - "ld1 { v25.h }[0], [x26], #0x2\n" - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[2], [x26]\n" + "tbz x0, #1, 94f\n" + "ld1 { v25.h }[0], [x5], #0x2\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[2], [x5]\n" "b 95f\n" "94:" // Oddments: Load (5, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[0], [x26]\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[0], [x5]\n" "95:" // Oddments: Load (5, 0): Bit 2: End - "ldr x23, [x25, #0xf8]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v11.4s, v25.4h, v0.4h\n" - "smlal2 v8.4s, v25.8h, v0.8h\n" - "add x23, x23, x10\n" - "tbz x4, #2, 97f\n" - "ld1 { v24.s }[0], [x23], #0x4\n" - "tbz x4, #1, 96f\n" - "ld1 { v24.h }[2], [x23], #0x2\n" - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[6], [x23]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "ldr x25, [x20, #0xf8]\n" + "smlal v10.4s, v25.4h, v0.4h\n" + "smlal2 v7.4s, v25.8h, v0.8h\n" + "add x25, x25, x24\n" + "tbz x0, #2, 97f\n" + "ld1 { v24.s }[0], [x25], #0x4\n" + "tbz x0, #1, 96f\n" + "ld1 { v24.h }[2], [x25], #0x2\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[6], [x25]\n" "b 99f\n" "96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[4], [x23]\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[4], [x25]\n" "b 99f\n" "97:" // Oddments: Load (5, 1): Bit 2: Unset - "tbz x4, #1, 98f\n" - "ld1 { v24.h }[0], [x23], #0x2\n" - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[2], [x23]\n" + "tbz x0, #1, 98f\n" + "ld1 { v24.h }[0], [x25], #0x2\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[2], [x25]\n" "b 99f\n" "98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[0], [x23]\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[0], [x25]\n" "99:" // Oddments: Load (5, 1): Bit 2: End - "ldr d1, [x3, #0xa8]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v0.4h\n" - "ldr x22, [x25, #0x100]\n" - "usubl v1.8h, v1.8b, v13.8b\n" - "smlal2 v9.4s, v24.8h, v0.8h\n" - "add x22, x22, x10\n" + "ldr d1, [x23, #0xa8]\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "usubl v1.8h, v1.8b, v14.8b\n" + "ldr x26, [x20, #0x100]\n" + "smlal v6.4s, v24.4h, v0.4h\n" + "smlal2 v5.4s, v24.8h, v0.8h\n" + "add x26, x26, x24\n" "smlal v15.4s, v23.4h, v1.4h\n" - "smlal2 v20.4s, v23.8h, v1.8h\n" - "smlal v18.4s, v31.4h, v1.4h\n" - "smlal2 v5.4s, v31.8h, v1.8h\n" - "smlal v11.4s, v24.4h, v1.4h\n" - "smlal2 v8.4s, v24.8h, v1.8h\n" - "tbz x4, #2, 101f\n" - "ld1 { v27.s }[0], [x22], #0x4\n" - "tbz x4, #1, 100f\n" - "ld1 { v27.h }[2], [x22], #0x2\n" - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[6], [x22]\n" + "smlal2 v16.4s, v23.8h, v1.8h\n" + "smlal v17.4s, v31.4h, v1.4h\n" + "smlal2 v8.4s, v31.8h, v1.8h\n" + "smlal v10.4s, v24.4h, v1.4h\n" + "smlal2 v7.4s, v24.8h, v1.8h\n" + "tbz x0, #2, 101f\n" + "ld1 { v27.s }[0], [x26], #0x4\n" + "tbz x0, #1, 100f\n" + "ld1 { v27.h }[2], [x26], #0x2\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[6], [x26]\n" "b 103f\n" "100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[4], [x22]\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[4], [x26]\n" "b 103f\n" "101:" // Oddments: Load (5, 2): Bit 2: Unset - "tbz x4, #1, 102f\n" - "ld1 { v27.h }[0], [x22], #0x2\n" - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[2], [x22]\n" + "tbz x0, #1, 102f\n" + "ld1 { v27.h }[0], [x26], #0x2\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[2], [x26]\n" "b 103f\n" "102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[0], [x22]\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[0], [x26]\n" "103:" // Oddments: Load (5, 2): Bit 2: End - "ldr d2, [x3, #0xb0]\n" - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v1.4h\n" - "ldr x20, [x25, #0x108]\n" - "usubl v2.8h, v2.8b, v13.8b\n" - "smlal2 v9.4s, v27.8h, v1.8h\n" - "add x20, x20, x10\n" + "ldr d2, [x23, #0xb0]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "usubl v2.8h, v2.8b, v14.8b\n" + "ldr x12, [x20, #0x108]\n" + "smlal v6.4s, v27.4h, v1.4h\n" + "smlal2 v5.4s, v27.8h, v1.8h\n" + "add x12, x12, x24\n" "smlal v15.4s, v31.4h, v2.4h\n" - "smlal2 v20.4s, v31.8h, v2.8h\n" - "smlal v18.4s, v30.4h, v2.4h\n" - "smlal2 v5.4s, v30.8h, v2.8h\n" - "smlal v11.4s, v27.4h, v2.4h\n" - "smlal2 v8.4s, v27.8h, v2.8h\n" - "tbz x4, #2, 105f\n" - "ld1 { v25.s }[0], [x20], #0x4\n" - "tbz x4, #1, 104f\n" - "ld1 { v25.h }[2], [x20], #0x2\n" - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[6], [x20]\n" + "smlal2 v16.4s, v31.8h, v2.8h\n" + "smlal v17.4s, v30.4h, v2.4h\n" + "smlal2 v8.4s, v30.8h, v2.8h\n" + "smlal v10.4s, v27.4h, v2.4h\n" + "smlal2 v7.4s, v27.8h, v2.8h\n" + "tbz x0, #2, 105f\n" + "ld1 { v25.s }[0], [x12], #0x4\n" + "tbz x0, #1, 104f\n" + "ld1 { v25.h }[2], [x12], #0x2\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[6], [x12]\n" "b 107f\n" "104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[4], [x20]\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[4], [x12]\n" "b 107f\n" "105:" // Oddments: Load (5, 3): Bit 2: Unset - "tbz x4, #1, 106f\n" - "ld1 { v25.h }[0], [x20], #0x2\n" - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[2], [x20]\n" + "tbz x0, #1, 106f\n" + "ld1 { v25.h }[0], [x12], #0x2\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[2], [x12]\n" "b 107f\n" "106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[0], [x20]\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[0], [x12]\n" "107:" // Oddments: Load (5, 3): Bit 2: End - "ldr d3, [x3, #0xb8]\n" - "usubl v25.8h, v25.8b, v7.8b\n" - "smlal v10.4s, v25.4h, v2.4h\n" - "ldr x13, [x25, #0x110]\n" - "usubl v3.8h, v3.8b, v13.8b\n" - "smlal2 v9.4s, v25.8h, v2.8h\n" - "add x13, x13, x10\n" + "ldr d3, [x23, #0xb8]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "usubl v3.8h, v3.8b, v14.8b\n" + "ldr x14, [x20, #0x110]\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x14, x14, x24\n" "smlal v15.4s, v30.4h, v3.4h\n" - "smlal2 v20.4s, v30.8h, v3.8h\n" - "smlal v18.4s, v28.4h, v3.4h\n" - "smlal2 v5.4s, v28.8h, v3.8h\n" - "smlal v11.4s, v25.4h, v3.4h\n" - "smlal2 v8.4s, v25.8h, v3.8h\n" - "tbz x4, #2, 109f\n" - "ld1 { v24.s }[0], [x13], #0x4\n" - "tbz x4, #1, 108f\n" - "ld1 { v24.h }[2], [x13], #0x2\n" - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[6], [x13]\n" + "smlal2 v16.4s, v30.8h, v3.8h\n" + "smlal v17.4s, v28.4h, v3.4h\n" + "smlal2 v8.4s, v28.8h, v3.8h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "tbz x0, #2, 109f\n" + "ld1 { v24.s }[0], [x14], #0x4\n" + "tbz x0, #1, 108f\n" + "ld1 { v24.h }[2], [x14], #0x2\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[6], [x14]\n" "b 111f\n" "108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[4], [x13]\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[4], [x14]\n" "b 111f\n" "109:" // Oddments: Load (5, 4): Bit 2: Unset - "tbz x4, #1, 110f\n" - "ld1 { v24.h }[0], [x13], #0x2\n" - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[2], [x13]\n" + "tbz x0, #1, 110f\n" + "ld1 { v24.h }[0], [x14], #0x2\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[2], [x14]\n" "b 111f\n" "110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[0], [x13]\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[0], [x14]\n" "111:" // Oddments: Load (5, 4): Bit 2: End - "ldr d4, [x3, #0xc0]\n" - "usubl v24.8h, v24.8b, v7.8b\n" - "smlal v10.4s, v24.4h, v3.4h\n" - "ldr x21, [x25, #0x118]\n" - "usubl v4.8h, v4.8b, v13.8b\n" - "smlal2 v9.4s, v24.8h, v3.8h\n" - "add x21, x21, x10\n" + "ldr d4, [x23, #0xc0]\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "usubl v4.8h, v4.8b, v14.8b\n" + "ldr x21, [x20, #0x118]\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "add x21, x21, x24\n" "smlal v15.4s, v28.4h, v4.4h\n" - "smlal2 v20.4s, v28.8h, v4.8h\n" - "smlal v18.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "smlal v11.4s, v24.4h, v4.4h\n" - "smlal2 v8.4s, v24.8h, v4.8h\n" - "tbz x4, #2, 113f\n" + "smlal2 v16.4s, v28.8h, v4.8h\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "tbz x0, #2, 113f\n" "ld1 { v27.s }[0], [x21], #0x4\n" - "tbz x4, #1, 112f\n" + "tbz x0, #1, 112f\n" "ld1 { v27.h }[2], [x21], #0x2\n" - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[6], [x21]\n" "b 115f\n" "112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[4], [x21]\n" "b 115f\n" "113:" // Oddments: Load (5, 5): Bit 2: Unset - "tbz x4, #1, 114f\n" + "tbz x0, #1, 114f\n" "ld1 { v27.h }[0], [x21], #0x2\n" - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[2], [x21]\n" "b 115f\n" "114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[0], [x21]\n" "115:" // Oddments: Load (5, 5): Bit 2: End - "usubl v27.8h, v27.8b, v7.8b\n" - "smlal v10.4s, v27.4h, v4.4h\n" - "smlal2 v9.4s, v27.8h, v4.8h\n" - "tbz x4, #2, 117f\n" - "ld1 { v6.4s }, [x2], #0x10\n" - "ld1 { v21.4s }, [x5], #0x10\n" - "tbz x4, #1, 116f\n" - "ld1 { v17.d }[0], [x2], #0x8\n" - "ld1 { v14.d }[0], [x5], #0x8\n" - "tbz x4, #0, 119f\n" - "ld1 { v17.s }[2], [x2]\n" - "ld1 { v14.s }[2], [x5]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v6.4s, v27.4h, v4.4h\n" + "smlal2 v5.4s, v27.8h, v4.8h\n" + "tbz x0, #2, 117f\n" + "ld1 { v12.4s }, [x10], #0x10\n" + "ld1 { v19.4s }, [x1], #0x10\n" + "tbz x0, #1, 116f\n" + "ld1 { v20.d }[0], [x10], #0x8\n" + "ld1 { v29.d }[0], [x1], #0x8\n" + "tbz x0, #0, 119f\n" + "ld1 { v20.s }[2], [x10]\n" + "ld1 { v29.s }[2], [x1]\n" "b 119f\n" "116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset - "tbz x4, #0, 119f\n" - "ld1 { v17.s }[0], [x2]\n" - "ld1 { v14.s }[0], [x5]\n" + "tbz x0, #0, 119f\n" + "ld1 { v20.s }[0], [x10]\n" + "ld1 { v29.s }[0], [x1]\n" "b 119f\n" "117:" // Oddments: Load requant params: Bit 2: Unset - "tbz x4, #1, 118f\n" - "ld1 { v6.d }[0], [x2], #0x8\n" - "ld1 { v21.d }[0], [x5], #0x8\n" - "tbz x4, #0, 119f\n" - "ld1 { v6.s }[2], [x2]\n" - "ld1 { v21.s }[2], [x5]\n" + "tbz x0, #1, 118f\n" + "ld1 { v12.d }[0], [x10], #0x8\n" + "ld1 { v19.d }[0], [x1], #0x8\n" + "tbz x0, #0, 119f\n" + "ld1 { v12.s }[2], [x10]\n" + "ld1 { v19.s }[2], [x1]\n" "b 119f\n" "118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 119f\n" - "ld1 { v6.s }[0], [x2]\n" - "ld1 { v21.s }[0], [x5]\n" + "tbz x0, #0, 119f\n" + "ld1 { v12.s }[0], [x10]\n" + "ld1 { v19.s }[0], [x1]\n" "119:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v15.4s, v15.4s, v6.4s\n" - "add x17, x17, x1\n" - "sqrdmulh v20.4s, v20.4s, v17.4s\n" - "add x16, x16, x1\n" - "sqrdmulh v18.4s, v18.4s, v6.4s\n" - "add x6, x6, x1\n" - "sqrdmulh v5.4s, v5.4s, v17.4s\n" - "add x8, x8, x1\n" - "sqrdmulh v11.4s, v11.4s, v6.4s\n" - "and v1.16b, v15.16b, v21.16b\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "and v29.16b, v20.16b, v14.16b\n" - "and v3.16b, v18.16b, v21.16b\n" - "sshr v29.4s, v29.4s, #0x1f\n" - "and v2.16b, v5.16b, v14.16b\n" - "and v0.16b, v11.16b, v21.16b\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "sqrdmulh v8.4s, v8.4s, v17.4s\n" + "sqdmulh v15.4s, v15.4s, v12.4s\n" + "sqdmulh v17.4s, v17.4s, v12.4s\n" + "add x16, x16, x22\n" + "add x8, x8, x22\n" + "sqdmulh v10.4s, v10.4s, v12.4s\n" + "sqdmulh v6.4s, v6.4s, v12.4s\n" + "add x4, x4, x22\n" + "add x7, x7, x22\n" + "and v23.16b, v15.16b, v19.16b\n" + "sqdmulh v16.4s, v16.4s, v20.4s\n" + "and v22.16b, v17.16b, v19.16b\n" + "sqdmulh v8.4s, v8.4s, v20.4s\n" + "and v21.16b, v10.16b, v19.16b\n" + "sqdmulh v7.4s, v7.4s, v20.4s\n" + "and v26.16b, v6.16b, v19.16b\n" + "sqdmulh v5.4s, v5.4s, v20.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v4.16b, v16.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v2.16b, v8.16b, v29.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v7.16b, v29.16b\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "and v25.16b, v5.16b, v29.16b\n" + "sqadd v15.4s, v15.4s, v23.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v17.4s, v17.4s, v22.4s\n" "sshr v2.4s, v2.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v1.4s\n" - "sqrdmulh v10.4s, v10.4s, v6.4s\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "sqrdmulh v9.4s, v9.4s, v17.4s\n" - "sqadd v20.4s, v20.4s, v29.4s\n" - "sqadd v18.4s, v18.4s, v3.4s\n" - "srshl v15.4s, v15.4s, v21.4s\n" - "sqadd v5.4s, v5.4s, v2.4s\n" - "srshl v20.4s, v20.4s, v14.4s\n" - "srshl v18.4s, v18.4s, v21.4s\n" - "add v15.4s, v15.4s, v19.4s\n" - "srshl v5.4s, v5.4s, v14.4s\n" - "add v20.4s, v20.4s, v19.4s\n" - "smin v15.4s, v15.4s, v12.4s\n" - "add v18.4s, v18.4s, v19.4s\n" - "smin v20.4s, v20.4s, v12.4s\n" - "smax v15.4s, v15.4s, v16.4s\n" - "smin v18.4s, v18.4s, v12.4s\n" - "smax v20.4s, v20.4s, v16.4s\n" - "add v5.4s, v5.4s, v19.4s\n" - "smax v18.4s, v18.4s, v16.4s\n" - "uzp1 v15.16b, v15.16b, v20.16b\n" - "smin v5.4s, v5.4s, v12.4s\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v6.4s, v6.4s, v26.4s\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v17.4s, v17.4s, v19.4s\n" + "sqadd v8.4s, v8.4s, v2.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "sqadd v7.4s, v7.4s, v3.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "sqadd v5.4s, v5.4s, v25.4s\n" + "srshl v16.4s, v16.4s, v29.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v17.4h, v17.4s\n" + "srshl v7.4s, v7.4s, v29.4s\n" + "sqxtn v10.4h, v10.4s\n" + "srshl v5.4s, v5.4s, v29.4s\n" + "sqxtn v6.4h, v6.4s\n" + "sqxtn2 v15.8h, v16.4s\n" + "sqxtn2 v17.8h, v8.4s\n" + "sqxtn2 v10.8h, v7.4s\n" + "sqxtn2 v6.8h, v5.4s\n" + "sqadd v15.8h, v15.8h, v18.8h\n" + "sqadd v17.8h, v17.8h, v18.8h\n" + "sqadd v10.8h, v10.8h, v18.8h\n" + "sqadd v6.8h, v6.8h, v18.8h\n" + "smax v15.8h, v15.8h, v11.8h\n" + "smax v17.8h, v17.8h, v11.8h\n" + "smax v10.8h, v10.8h, v11.8h\n" + "smax v6.8h, v6.8h, v11.8h\n" + "smin v15.8h, v15.8h, v13.8h\n" + "smin v17.8h, v17.8h, v13.8h\n" + "smin v10.8h, v10.8h, v13.8h\n" + "smin v6.8h, v6.8h, v13.8h\n" "uzp1 v15.16b, v15.16b, v15.16b\n" - "sqadd v11.4s, v11.4s, v0.4s\n" - "smax v5.4s, v5.4s, v16.4s\n" - "and v27.16b, v8.16b, v14.16b\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "uzp1 v18.16b, v18.16b, v5.16b\n" - "srshl v11.4s, v11.4s, v21.4s\n" - "and v30.16b, v10.16b, v21.16b\n" - "sshr v30.4s, v30.4s, #0x1f\n" - "uzp1 v18.16b, v18.16b, v18.16b\n" - "add v11.4s, v11.4s, v19.4s\n" - "sqadd v8.4s, v8.4s, v27.4s\n" - "and v6.16b, v9.16b, v14.16b\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "smin v11.4s, v11.4s, v12.4s\n" - "srshl v8.4s, v8.4s, v14.4s\n" - "sqadd v10.4s, v10.4s, v30.4s\n" - "smax v11.4s, v11.4s, v16.4s\n" - "add v8.4s, v8.4s, v19.4s\n" - "srshl v10.4s, v10.4s, v21.4s\n" - "sqadd v9.4s, v9.4s, v6.4s\n" - "smin v8.4s, v8.4s, v12.4s\n" - "add v10.4s, v10.4s, v19.4s\n" - "srshl v9.4s, v9.4s, v14.4s\n" - "smax v8.4s, v8.4s, v16.4s\n" - "smin v10.4s, v10.4s, v12.4s\n" - "uzp1 v11.16b, v11.16b, v8.16b\n" - "add v9.4s, v9.4s, v19.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "smax v10.4s, v10.4s, v16.4s\n" - "smin v9.4s, v9.4s, v12.4s\n" - "smax v9.4s, v9.4s, v16.4s\n" - "uzp1 v10.16b, v10.16b, v9.16b\n" + "uzp1 v17.16b, v17.16b, v17.16b\n" "uzp1 v10.16b, v10.16b, v10.16b\n" - "tbz x4, #2, 121f\n" - "st1 { v15.s }[0], [x17], #0x4\n" - "st1 { v18.s }[0], [x16], #0x4\n" - "st1 { v11.s }[0], [x6], #0x4\n" - "st1 { v10.s }[0], [x8], #0x4\n" - "tbz x4, #1, 120f\n" - "st1 { v15.h }[2], [x17], #0x2\n" - "st1 { v18.h }[2], [x16], #0x2\n" - "st1 { v11.h }[2], [x6], #0x2\n" - "st1 { v10.h }[2], [x8], #0x2\n" - "tbz x4, #0, 123f\n" - "st1 { v15.b }[6], [x17], #0x1\n" - "st1 { v18.b }[6], [x16], #0x1\n" - "st1 { v11.b }[6], [x6], #0x1\n" - "st1 { v10.b }[6], [x8], #0x1\n" + "uzp1 v6.16b, v6.16b, v6.16b\n" + "tbz x0, #2, 121f\n" + "st1 { v15.s }[0], [x16], #0x4\n" + "st1 { v17.s }[0], [x8], #0x4\n" + "st1 { v10.s }[0], [x4], #0x4\n" + "st1 { v6.s }[0], [x7], #0x4\n" + "tbz x0, #1, 120f\n" + "st1 { v15.h }[2], [x16], #0x2\n" + "st1 { v17.h }[2], [x8], #0x2\n" + "st1 { v10.h }[2], [x4], #0x2\n" + "st1 { v6.h }[2], [x7], #0x2\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[6], [x16], #0x1\n" + "st1 { v17.b }[6], [x8], #0x1\n" + "st1 { v10.b }[6], [x4], #0x1\n" + "st1 { v6.b }[6], [x7], #0x1\n" "b 123f\n" "120:" // Oddments: Bit 2: Bit 1: Unset - "tbz x4, #0, 123f\n" - "st1 { v15.b }[4], [x17], #0x1\n" - "st1 { v18.b }[4], [x16], #0x1\n" - "st1 { v11.b }[4], [x6], #0x1\n" - "st1 { v10.b }[4], [x8], #0x1\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[4], [x16], #0x1\n" + "st1 { v17.b }[4], [x8], #0x1\n" + "st1 { v10.b }[4], [x4], #0x1\n" + "st1 { v6.b }[4], [x7], #0x1\n" "b 123f\n" "121:" // Oddments: Bit 2: Unset - "tbz x4, #1, 122f\n" - "st1 { v15.h }[0], [x17], #0x2\n" - "st1 { v18.h }[0], [x16], #0x2\n" - "st1 { v11.h }[0], [x6], #0x2\n" - "st1 { v10.h }[0], [x8], #0x2\n" - "tbz x4, #0, 123f\n" - "st1 { v15.b }[2], [x17], #0x1\n" - "st1 { v18.b }[2], [x16], #0x1\n" - "st1 { v11.b }[2], [x6], #0x1\n" - "st1 { v10.b }[2], [x8], #0x1\n" + "tbz x0, #1, 122f\n" + "st1 { v15.h }[0], [x16], #0x2\n" + "st1 { v17.h }[0], [x8], #0x2\n" + "st1 { v10.h }[0], [x4], #0x2\n" + "st1 { v6.h }[0], [x7], #0x2\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[2], [x16], #0x1\n" + "st1 { v17.b }[2], [x8], #0x1\n" + "st1 { v10.b }[2], [x4], #0x1\n" + "st1 { v6.b }[2], [x7], #0x1\n" "b 123f\n" "122:" // Oddments: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 123f\n" - "st1 { v15.b }[0], [x17], #0x1\n" - "st1 { v18.b }[0], [x16], #0x1\n" - "st1 { v11.b }[0], [x6], #0x1\n" - "st1 { v10.b }[0], [x8], #0x1\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[0], [x16], #0x1\n" + "st1 { v17.b }[0], [x8], #0x1\n" + "st1 { v10.b }[0], [x4], #0x1\n" + "st1 { v6.b }[0], [x7], #0x1\n" "123:" // Oddments: Bit 2: End - "124:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst.hpp index f5459c2ac1..b859978b1e 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,28 +28,23 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(const uint8_t *const *const, uint8_t *const *const, const void *, const arm_gemm::Requantize32&, const unsigned int, const unsigned int); -struct a64_u8q_nhwc_generic_output9_mla_depthfirst +class a64_u8q_nhwc_generic_output9_mla_depthfirst : public GenericDepthfirstKernelStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - typedef void (*kern_type)(const uint8_t *const *const, uint8_t *const *const, const void *, const arm_gemm::Requantize32&, const unsigned int, const unsigned int); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - constexpr static unsigned int n_output_points = 9; + KernelType kernel = a64_u8q_nhwc_generic_output9_mla_depthfirst_impl; - kern_type kernel = a64_u8q_nhwc_generic_output9_mla_depthfirst_impl; + public: + a64_u8q_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) : GenericDepthfirstKernelStrategy<uint8_t, uint8_t, uint8_t, int32_t>(9, arm_gemm::VLType::None) {} - a64_u8q_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) {} + KernelType get_kernel() const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp index e8ac603928..134f657fb8 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,39 +28,33 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl(const uint8_t *const *const, uint8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); -struct a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst +struct a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst : DepthfirstMultiplierStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef uint32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - typedef void (*kern_type)(const uint8_t *const *const, uint8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - + using Parent = DepthfirstMultiplierStrategy<uint8_t, uint8_t, uint8_t, int32_t>; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 4; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 9; - constexpr static unsigned int input_col_quads = 1; + a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst(const CPUInfo *) + : Parent(2, 4, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::None; } - a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp index c5e0417c20..b575a5d169 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,39 +28,33 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl(const uint8_t *const *const, uint8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); -struct a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst +struct a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst : DepthfirstMultiplierStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef uint32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - typedef void (*kern_type)(const uint8_t *const *const, uint8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - + using Parent = DepthfirstMultiplierStrategy<uint8_t, uint8_t, uint8_t, int32_t>; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 4; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 8; - constexpr static unsigned int input_cols = 6; - constexpr static unsigned int input_col_quads = 1; + a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst(const CPUInfo *) + : Parent(4, 2, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::None; } - a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp index 6b52017ce1..13f903b95d 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,31 +28,25 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl(const uint8_t *const *const, uint8_t *const *const, const uint8_t *, const int32_t *, const unsigned int, const unsigned int, const int32_t *, const int32_t *, const int32_t *, const arm_gemm::Requantize32&); -struct a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst +struct a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst : GenericDepthfirstMultiplierKernelStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - typedef void (*kern_type)(const uint8_t *const *const, uint8_t *const *const, const uint8_t *, const int32_t *, const unsigned int, const unsigned int, const int32_t *, const int32_t *, const int32_t *, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - constexpr static unsigned int output_rows(void) { return 2; }; - constexpr static unsigned int output_cols(void) { return 8; }; - - constexpr static unsigned int output_col_regs(void) { return 2; }; - - kern_type kernel = a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; - - a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) {} + using Parent = GenericDepthfirstMultiplierKernelStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) + : Parent(2, 8, arm_gemm::VLType::None) + { + } + Parent::KernelType kernel = a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp new file mode 100644 index 0000000000..2d2b452630 --- /dev/null +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "src/core/NEON/kernels/arm_gemm/utils.hpp" + +#include "src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp" + +#include <cstdint> + +#pragma once + +#if defined(__aarch64__) + +namespace arm_conv { +namespace depthwise { + +void a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); + +class a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> +{ + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + + public: + constexpr static unsigned int kernel_rows = 3; + constexpr static unsigned int kernel_cols = 3; + + constexpr static unsigned int stride_rows = 1; + constexpr static unsigned int stride_cols = 1; + + a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} + + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } + + Parent::KernelType kernel = a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } +}; + +} // namespace depthwise +} // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp new file mode 100644 index 0000000000..2410d38512 --- /dev/null +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp @@ -0,0 +1,1164 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_gemm.hpp" + +#include <cstddef> +#include <cstdint> + +#if defined(__aarch64__) + +namespace arm_conv { +namespace depthwise { + +void a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( + const unsigned int n_channels, + const uint8_t *const *const inptrs, + const uint8_t *const weights, + const int32_t *const bias, + const arm_gemm::Requantize32 &qp, + const int32_t *const requant_muls, + const int32_t *const requant_shifts, + uint8_t *const *const outptrs +) +{ + struct Params + { + long unsigned int n_channels; + const void *weights; + const int32_t *bias; + const arm_gemm::Requantize32 *requant; + const int32_t *const requant_muls; + const int32_t *const requant_shifts; + uint8_t *const *const outptrs; + const uint8_t *inptrs[16]; + + Params( + long unsigned int n_channels, + const uint8_t *const *inptrs_raw, + const void *const weights, + const int32_t *const bias, + const arm_gemm::Requantize32 &qp, + const int32_t *const requant_muls, + const int32_t *const requant_shifts, + uint8_t *const *outptrs + ) : n_channels(n_channels), weights(weights), bias(bias), + requant(&qp), requant_muls(requant_muls), + requant_shifts(requant_shifts), outptrs(outptrs) + { + inptrs[0] = inptrs_raw[5]; + inptrs[1] = inptrs_raw[0]; + inptrs[2] = inptrs_raw[3]; + inptrs[3] = inptrs_raw[6]; + inptrs[4] = inptrs_raw[9]; + inptrs[5] = inptrs_raw[12]; + inptrs[6] = inptrs_raw[15]; + inptrs[7] = inptrs_raw[1]; + inptrs[8] = inptrs_raw[2]; + inptrs[9] = inptrs_raw[10]; + inptrs[10] = inptrs_raw[4]; + inptrs[11] = inptrs_raw[7]; + inptrs[12] = inptrs_raw[8]; + inptrs[13] = inptrs_raw[11]; + inptrs[14] = inptrs_raw[13]; + inptrs[15] = inptrs_raw[14]; + + } + }; + + const Params params(n_channels, inptrs, weights, bias, qp, + requant_muls, requant_shifts, outptrs); + + __asm__ __volatile__( + "ldr x19, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x23, x19, %[offsetof_Requantize32_b_offset]\n" + "add x22, x19, %[offsetof_Requantize32_c_offset]\n" + "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x20, x19, %[offsetof_Requantize32_minval]\n" + "add x19, x19, %[offsetof_Requantize32_maxval]\n" + "ldr x17, [%x[params], %[offsetof_Params_weights]]\n" + "ld1r { v15.16b }, [x23]\n" + "ld1r { v13.8h }, [x22]\n" + "lsr x16, x8, #0x3\n" + "mov x15, #0x0\n" + "ld1r { v11.8h }, [x20]\n" + "ld1r { v25.8h }, [x19]\n" + "mov x14, #0x0\n" + "add x13, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x12, [%x[params], %[offsetof_Params_requant_muls]]\n" + "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x10, x9, [x21, #0x0]\n" + "ldp x28, x27, [x21, #0x10]\n" + "cbz x16, 3f\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q12, [x19, #0x0]\n" + "subs x16, x16, #0x1\n" + "mov v14.16b, v12.16b\n" + "ldr q17, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v9.16b, v17.16b\n" + "mov v16.16b, v12.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v10.16b, v17.16b\n" + "mov v18.16b, v12.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v26.16b, v17.16b\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "ldp x23, x22, [x13, #0x0]\n" + "ldp x21, x20, [x13, #0x10]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "ldr x19, [x13, #0x20]\n" + "ldr d31, [x23, x15]\n" + "usubl v5.8h, v5.8b, v15.8b\n" + "usubl v6.8h, v6.8b, v15.8b\n" + "ldr d30, [x22, x15]\n" + "ldr d29, [x21, x15]\n" + "usubl v7.8h, v7.8b, v15.8b\n" + "usubl v8.8h, v8.8b, v15.8b\n" + "ldr d28, [x20, x15]\n" + "ldr d27, [x19, x15]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ushll v29.8h, v29.8b, #0x0\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ushll v27.8h, v27.8b, #0x0\n" + "beq 2f\n" + "1:" // Loop + "smlal v12.4s, v31.4h, v4.4h\n" + "smlal2 v17.4s, v31.8h, v4.8h\n" + "ldr x21, [x13, #0x28]\n" + "ldr x26, [x13, #0x38]\n" + "smlal v14.4s, v31.4h, v3.4h\n" + "smlal2 v9.4s, v31.8h, v3.8h\n" + "ldr x20, [x13, #0x30]\n" + "ldr x25, [x13, #0x40]\n" + "smlal v12.4s, v30.4h, v0.4h\n" + "smlal2 v17.4s, v30.8h, v0.8h\n" + "ldr x19, [x13, #0x48]\n" + "ldr d30, [x19, x15]\n" + "smlal v14.4s, v29.4h, v2.4h\n" + "smlal2 v9.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x15]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v16.4s, v31.4h, v1.4h\n" + "smlal2 v10.4s, v31.8h, v1.8h\n" + "ldr x24, [x13, #0x50]\n" + "ldr x23, [x13, #0x58]\n" + "smlal v18.4s, v31.4h, v0.4h\n" + "smlal2 v26.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x15]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v12.4s, v28.4h, v5.4h\n" + "smlal2 v17.4s, v28.8h, v5.8h\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ldr x22, [x13, #0x60]\n" + "smlal v14.4s, v28.4h, v4.4h\n" + "smlal2 v9.4s, v28.8h, v4.8h\n" + "ldr x21, [x13, #0x68]\n" + "ldr x20, [x13, #0x70]\n" + "smlal v16.4s, v28.4h, v2.4h\n" + "smlal2 v10.4s, v28.8h, v2.8h\n" + "ldr x19, [x13, #0x78]\n" + "ldr q21, [x12, #0x0]\n" + "smlal v18.4s, v28.4h, v1.4h\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v12.4s, v27.4h, v7.4h\n" + "smlal2 v17.4s, v27.8h, v7.8h\n" + "ldr q24, [x11, #0x0]\n" + "ldr q19, [x12, #0x10]\n" + "smlal v14.4s, v27.4h, v6.4h\n" + "smlal2 v9.4s, v27.8h, v6.8h\n" + "ldr q23, [x11, #0x10]\n" + "add x17, x17, #0x48\n" + "smlal v16.4s, v31.4h, v6.4h\n" + "smlal2 v10.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x15]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v18.4s, v27.4h, v3.4h\n" + "smlal2 v26.4s, v27.8h, v3.8h\n" + "subs x16, x16, #0x1\n" + "add x12, x12, #0x20\n" + "smlal v12.4s, v28.4h, v1.4h\n" + "smlal2 v17.4s, v28.8h, v1.8h\n" + "add x11, x11, #0x20\n" + "smlal v14.4s, v28.4h, v0.4h\n" + "smlal2 v9.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v16.4s, v27.4h, v4.4h\n" + "smlal v18.4s, v29.4h, v8.4h\n" + "smlal2 v10.4s, v27.8h, v4.8h\n" + "smlal2 v26.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x15]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v12.4s, v31.4h, v2.4h\n" + "smlal2 v17.4s, v31.8h, v2.8h\n" + "smlal v14.4s, v31.4h, v1.4h\n" + "smlal2 v9.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x15]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v16.4s, v30.4h, v5.4h\n" + "smlal v18.4s, v30.4h, v4.4h\n" + "smlal v12.4s, v30.4h, v8.4h\n" + "smlal2 v17.4s, v30.8h, v8.8h\n" + "smlal v14.4s, v30.4h, v7.4h\n" + "smlal2 v9.4s, v30.8h, v7.8h\n" + "smlal2 v10.4s, v30.8h, v5.8h\n" + "smlal2 v26.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x15]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v16.4s, v29.4h, v0.4h\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal v12.4s, v29.4h, v3.4h\n" + "smlal2 v17.4s, v29.8h, v3.8h\n" + "smlal2 v10.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x15]\n" + "smlal2 v26.4s, v28.8h, v2.8h\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v16.4s, v31.4h, v3.4h\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal v14.4s, v28.4h, v5.4h\n" + "smlal2 v9.4s, v28.8h, v5.8h\n" + "ldr d28, [x19, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal2 v10.4s, v31.8h, v3.8h\n" + "smlal2 v26.4s, v30.8h, v5.8h\n" + "add x15, x15, #0x8\n" + "smlal v16.4s, v29.4h, v7.4h\n" + "smlal v18.4s, v29.4h, v6.4h\n" + "smlal2 v10.4s, v29.8h, v7.8h\n" + "smlal2 v26.4s, v29.8h, v6.8h\n" + "smlal v12.4s, v31.4h, v6.4h\n" + "smlal v14.4s, v30.4h, v8.4h\n" + "sqdmulh v12.4s, v12.4s, v21.4s\n" + "smlal v16.4s, v28.4h, v8.4h\n" + "smlal v18.4s, v28.4h, v7.4h\n" + "sqdmulh v14.4s, v14.4s, v21.4s\n" + "smlal2 v17.4s, v31.8h, v6.8h\n" + "smlal2 v9.4s, v30.8h, v8.8h\n" + "sqdmulh v16.4s, v16.4s, v21.4s\n" + "smlal2 v10.4s, v28.8h, v8.8h\n" + "smlal2 v26.4s, v28.8h, v7.8h\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "and v29.16b, v12.16b, v24.16b\n" + "sqdmulh v17.4s, v17.4s, v19.4s\n" + "and v22.16b, v14.16b, v24.16b\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "and v21.16b, v16.16b, v24.16b\n" + "sqdmulh v10.4s, v10.4s, v19.4s\n" + "and v20.16b, v18.16b, v24.16b\n" + "sqdmulh v26.4s, v26.4s, v19.4s\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v19.16b, v17.16b, v23.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v30.16b, v9.16b, v23.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v10.16b, v23.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v28.16b, v26.16b, v23.16b\n" + "sqadd v12.4s, v12.4s, v29.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v14.4s, v14.4s, v22.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sqadd v16.4s, v16.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v20.4s\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "srshl v12.4s, v12.4s, v24.4s\n" + "sqadd v17.4s, v17.4s, v19.4s\n" + "srshl v14.4s, v14.4s, v24.4s\n" + "sqadd v9.4s, v9.4s, v30.4s\n" + "srshl v16.4s, v16.4s, v24.4s\n" + "sqadd v10.4s, v10.4s, v3.4s\n" + "srshl v18.4s, v18.4s, v24.4s\n" + "sqadd v26.4s, v26.4s, v28.4s\n" + "srshl v17.4s, v17.4s, v23.4s\n" + "sqxtn v12.4h, v12.4s\n" + "srshl v9.4s, v9.4s, v23.4s\n" + "sqxtn v14.4h, v14.4s\n" + "srshl v10.4s, v10.4s, v23.4s\n" + "sqxtn v16.4h, v16.4s\n" + "srshl v26.4s, v26.4s, v23.4s\n" + "sqxtn v18.4h, v18.4s\n" + "sqxtn2 v12.8h, v17.4s\n" + "sqxtn2 v14.8h, v9.4s\n" + "sqxtn2 v16.8h, v10.4s\n" + "sqxtn2 v18.8h, v26.4s\n" + "sqadd v12.8h, v12.8h, v13.8h\n" + "sqadd v14.8h, v14.8h, v13.8h\n" + "sqadd v16.8h, v16.8h, v13.8h\n" + "sqadd v18.8h, v18.8h, v13.8h\n" + "smax v12.8h, v12.8h, v11.8h\n" + "smax v14.8h, v14.8h, v11.8h\n" + "smax v16.8h, v16.8h, v11.8h\n" + "smax v18.8h, v18.8h, v11.8h\n" + "smin v12.8h, v12.8h, v25.8h\n" + "smin v14.8h, v14.8h, v25.8h\n" + "smin v16.8h, v16.8h, v25.8h\n" + "smin v18.8h, v18.8h, v25.8h\n" + "uzp1 v12.16b, v12.16b, v12.16b\n" + "uzp1 v14.16b, v14.16b, v14.16b\n" + "str d12, [x10, x14]\n" + "uzp1 v16.16b, v16.16b, v16.16b\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "str d14, [x9, x14]\n" + "str d16, [x28, x14]\n" + "str d18, [x27, x14]\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q12, [x19, #0x0]\n" + "add x14, x14, #0x8\n" + "ldr q17, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v14.16b, v12.16b\n" + "mov v9.16b, v17.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v16.16b, v12.16b\n" + "mov v10.16b, v17.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v18.16b, v12.16b\n" + "mov v26.16b, v17.16b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "ldp x23, x22, [x13, #0x0]\n" + "ldp x21, x20, [x13, #0x10]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "ldr x19, [x13, #0x20]\n" + "ldr d31, [x23, x15]\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "usubl v5.8h, v5.8b, v15.8b\n" + "ldr d30, [x22, x15]\n" + "ldr d29, [x21, x15]\n" + "usubl v6.8h, v6.8b, v15.8b\n" + "usubl v7.8h, v7.8b, v15.8b\n" + "ldr d28, [x20, x15]\n" + "ldr d27, [x19, x15]\n" + "usubl v8.8h, v8.8b, v15.8b\n" + "ushll v31.8h, v31.8b, #0x0\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ushll v29.8h, v29.8b, #0x0\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ushll v27.8h, v27.8b, #0x0\n" + "bgt 1b\n" + "2:" // Tail + "smlal v12.4s, v31.4h, v4.4h\n" + "smlal2 v17.4s, v31.8h, v4.8h\n" + "ldr x21, [x13, #0x28]\n" + "ldr x26, [x13, #0x38]\n" + "smlal v14.4s, v31.4h, v3.4h\n" + "smlal2 v9.4s, v31.8h, v3.8h\n" + "ldr x20, [x13, #0x30]\n" + "ldr x25, [x13, #0x40]\n" + "smlal v12.4s, v30.4h, v0.4h\n" + "smlal2 v17.4s, v30.8h, v0.8h\n" + "ldr x19, [x13, #0x48]\n" + "ldr d30, [x19, x15]\n" + "smlal v14.4s, v29.4h, v2.4h\n" + "smlal2 v9.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x15]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v16.4s, v31.4h, v1.4h\n" + "smlal2 v10.4s, v31.8h, v1.8h\n" + "ldr x24, [x13, #0x50]\n" + "ldr x23, [x13, #0x58]\n" + "smlal v18.4s, v31.4h, v0.4h\n" + "smlal2 v26.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x15]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v12.4s, v28.4h, v5.4h\n" + "smlal2 v17.4s, v28.8h, v5.8h\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ldr x22, [x13, #0x60]\n" + "smlal v14.4s, v28.4h, v4.4h\n" + "smlal2 v9.4s, v28.8h, v4.8h\n" + "ldr x21, [x13, #0x68]\n" + "ldr x20, [x13, #0x70]\n" + "smlal v16.4s, v28.4h, v2.4h\n" + "smlal2 v10.4s, v28.8h, v2.8h\n" + "ldr x19, [x13, #0x78]\n" + "ldr q21, [x12, #0x0]\n" + "smlal v18.4s, v28.4h, v1.4h\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v12.4s, v27.4h, v7.4h\n" + "smlal2 v17.4s, v27.8h, v7.8h\n" + "ldr q24, [x11, #0x0]\n" + "ldr q19, [x12, #0x10]\n" + "smlal v14.4s, v27.4h, v6.4h\n" + "smlal2 v9.4s, v27.8h, v6.8h\n" + "ldr q23, [x11, #0x10]\n" + "tst x8, #0x7\n" + "smlal v16.4s, v31.4h, v6.4h\n" + "smlal2 v10.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x15]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v18.4s, v27.4h, v3.4h\n" + "smlal2 v26.4s, v27.8h, v3.8h\n" + "add x12, x12, #0x20\n" + "add x11, x11, #0x20\n" + "smlal v12.4s, v28.4h, v1.4h\n" + "smlal2 v17.4s, v28.8h, v1.8h\n" + "smlal v14.4s, v28.4h, v0.4h\n" + "smlal2 v9.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v16.4s, v27.4h, v4.4h\n" + "smlal v18.4s, v29.4h, v8.4h\n" + "smlal2 v10.4s, v27.8h, v4.8h\n" + "smlal2 v26.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x15]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v12.4s, v31.4h, v2.4h\n" + "smlal2 v17.4s, v31.8h, v2.8h\n" + "smlal v14.4s, v31.4h, v1.4h\n" + "smlal2 v9.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x15]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v16.4s, v30.4h, v5.4h\n" + "smlal v18.4s, v30.4h, v4.4h\n" + "smlal v12.4s, v30.4h, v8.4h\n" + "smlal2 v17.4s, v30.8h, v8.8h\n" + "smlal v14.4s, v30.4h, v7.4h\n" + "smlal2 v9.4s, v30.8h, v7.8h\n" + "smlal2 v10.4s, v30.8h, v5.8h\n" + "smlal2 v26.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x15]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v16.4s, v29.4h, v0.4h\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal v12.4s, v29.4h, v3.4h\n" + "smlal2 v17.4s, v29.8h, v3.8h\n" + "smlal2 v10.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x15]\n" + "smlal2 v26.4s, v28.8h, v2.8h\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v16.4s, v31.4h, v3.4h\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal v14.4s, v28.4h, v5.4h\n" + "smlal2 v9.4s, v28.8h, v5.8h\n" + "ldr d28, [x19, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal2 v10.4s, v31.8h, v3.8h\n" + "smlal2 v26.4s, v30.8h, v5.8h\n" + "add x15, x15, #0x8\n" + "smlal v16.4s, v29.4h, v7.4h\n" + "smlal v18.4s, v29.4h, v6.4h\n" + "smlal2 v10.4s, v29.8h, v7.8h\n" + "smlal2 v26.4s, v29.8h, v6.8h\n" + "smlal v12.4s, v31.4h, v6.4h\n" + "smlal v14.4s, v30.4h, v8.4h\n" + "sqdmulh v12.4s, v12.4s, v21.4s\n" + "smlal v16.4s, v28.4h, v8.4h\n" + "smlal v18.4s, v28.4h, v7.4h\n" + "sqdmulh v14.4s, v14.4s, v21.4s\n" + "smlal2 v17.4s, v31.8h, v6.8h\n" + "smlal2 v9.4s, v30.8h, v8.8h\n" + "sqdmulh v16.4s, v16.4s, v21.4s\n" + "smlal2 v10.4s, v28.8h, v8.8h\n" + "smlal2 v26.4s, v28.8h, v7.8h\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "and v29.16b, v12.16b, v24.16b\n" + "sqdmulh v17.4s, v17.4s, v19.4s\n" + "and v22.16b, v14.16b, v24.16b\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "and v21.16b, v16.16b, v24.16b\n" + "sqdmulh v10.4s, v10.4s, v19.4s\n" + "and v20.16b, v18.16b, v24.16b\n" + "sqdmulh v26.4s, v26.4s, v19.4s\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v19.16b, v17.16b, v23.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v30.16b, v9.16b, v23.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v10.16b, v23.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v28.16b, v26.16b, v23.16b\n" + "sqadd v12.4s, v12.4s, v29.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v14.4s, v14.4s, v22.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sqadd v16.4s, v16.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v20.4s\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "srshl v12.4s, v12.4s, v24.4s\n" + "sqadd v17.4s, v17.4s, v19.4s\n" + "srshl v14.4s, v14.4s, v24.4s\n" + "sqadd v9.4s, v9.4s, v30.4s\n" + "srshl v16.4s, v16.4s, v24.4s\n" + "sqadd v10.4s, v10.4s, v3.4s\n" + "srshl v18.4s, v18.4s, v24.4s\n" + "sqadd v26.4s, v26.4s, v28.4s\n" + "srshl v17.4s, v17.4s, v23.4s\n" + "sqxtn v12.4h, v12.4s\n" + "srshl v9.4s, v9.4s, v23.4s\n" + "sqxtn v14.4h, v14.4s\n" + "srshl v10.4s, v10.4s, v23.4s\n" + "sqxtn v16.4h, v16.4s\n" + "srshl v26.4s, v26.4s, v23.4s\n" + "sqxtn v18.4h, v18.4s\n" + "sqxtn2 v12.8h, v17.4s\n" + "sqxtn2 v14.8h, v9.4s\n" + "sqxtn2 v16.8h, v10.4s\n" + "sqxtn2 v18.8h, v26.4s\n" + "sqadd v12.8h, v12.8h, v13.8h\n" + "sqadd v14.8h, v14.8h, v13.8h\n" + "sqadd v16.8h, v16.8h, v13.8h\n" + "sqadd v18.8h, v18.8h, v13.8h\n" + "smax v12.8h, v12.8h, v11.8h\n" + "smax v14.8h, v14.8h, v11.8h\n" + "smax v16.8h, v16.8h, v11.8h\n" + "smax v18.8h, v18.8h, v11.8h\n" + "smin v12.8h, v12.8h, v25.8h\n" + "smin v14.8h, v14.8h, v25.8h\n" + "smin v16.8h, v16.8h, v25.8h\n" + "smin v18.8h, v18.8h, v25.8h\n" + "uzp1 v12.16b, v12.16b, v12.16b\n" + "uzp1 v14.16b, v14.16b, v14.16b\n" + "str d12, [x10, x14]\n" + "uzp1 v16.16b, v16.16b, v16.16b\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "str d14, [x9, x14]\n" + "str d16, [x28, x14]\n" + "str d18, [x27, x14]\n" + "add x14, x14, #0x8\n" + "beq 64f\n" + "add x17, x17, #0x48\n" + "3:" // Oddments + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "tbz x8, #2, 5f\n" + "ld1 { v12.4s }, [x19], #0x10\n" + "tbz x8, #1, 4f\n" + "ld1 { v17.d }[0], [x19], #0x8\n" + "tbz x8, #0, 7f\n" + "ld1 { v17.s }[2], [x19]\n" + "b 7f\n" + "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset + "tbz x8, #0, 7f\n" + "ld1 { v17.s }[0], [x19]\n" + "b 7f\n" + "5:" // Oddments: Load bias: Bit 2: Unset + "tbz x8, #1, 6f\n" + "ld1 { v12.d }[0], [x19], #0x8\n" + "tbz x8, #0, 7f\n" + "ld1 { v12.s }[2], [x19]\n" + "b 7f\n" + "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 7f\n" + "ld1 { v12.s }[0], [x19]\n" + "7:" // Oddments: Load bias: Bit 2: End + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "mov v14.16b, v12.16b\n" + "mov v9.16b, v17.16b\n" + "ldr d2, [x17, #0x10]\n" + "ldr d3, [x17, #0x18]\n" + "mov v16.16b, v12.16b\n" + "mov v10.16b, v17.16b\n" + "ldr d4, [x17, #0x20]\n" + "ldr d5, [x17, #0x28]\n" + "mov v18.16b, v12.16b\n" + "mov v26.16b, v17.16b\n" + "ldr d6, [x17, #0x30]\n" + "ldr d7, [x17, #0x38]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "ldr d8, [x17, #0x40]\n" + "ldp x23, x22, [x13, #0x0]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "ldp x21, x20, [x13, #0x10]\n" + "ldr x19, [x13, #0x20]\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "usubl v5.8h, v5.8b, v15.8b\n" + "usubl v6.8h, v6.8b, v15.8b\n" + "usubl v7.8h, v7.8b, v15.8b\n" + "usubl v8.8h, v8.8b, v15.8b\n" + "add x23, x23, x15\n" + "add x22, x22, x15\n" + "add x21, x21, x15\n" + "add x20, x20, x15\n" + "add x19, x19, x15\n" + "tbz x8, #2, 9f\n" + "ld1 { v31.s }[0], [x23], #0x4\n" + "ld1 { v30.s }[0], [x22], #0x4\n" + "ld1 { v29.s }[0], [x21], #0x4\n" + "ld1 { v28.s }[0], [x20], #0x4\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x8, #1, 8f\n" + "ld1 { v31.h }[2], [x23], #0x2\n" + "ld1 { v30.h }[2], [x22], #0x2\n" + "ld1 { v29.h }[2], [x21], #0x2\n" + "ld1 { v28.h }[2], [x20], #0x2\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[6], [x23]\n" + "ld1 { v30.b }[6], [x22]\n" + "ld1 { v29.b }[6], [x21]\n" + "ld1 { v28.b }[6], [x20]\n" + "ld1 { v27.b }[6], [x19]\n" + "b 11f\n" + "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[4], [x23]\n" + "ld1 { v30.b }[4], [x22]\n" + "ld1 { v29.b }[4], [x21]\n" + "ld1 { v28.b }[4], [x20]\n" + "ld1 { v27.b }[4], [x19]\n" + "b 11f\n" + "9:" // Oddments: Initial loads: Bit 2: Unset + "tbz x8, #1, 10f\n" + "ld1 { v31.h }[0], [x23], #0x2\n" + "ld1 { v30.h }[0], [x22], #0x2\n" + "ld1 { v29.h }[0], [x21], #0x2\n" + "ld1 { v28.h }[0], [x20], #0x2\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[2], [x23]\n" + "ld1 { v30.b }[2], [x22]\n" + "ld1 { v29.b }[2], [x21]\n" + "ld1 { v28.b }[2], [x20]\n" + "ld1 { v27.b }[2], [x19]\n" + "b 11f\n" + "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[0], [x23]\n" + "ld1 { v30.b }[0], [x22]\n" + "ld1 { v29.b }[0], [x21]\n" + "ld1 { v28.b }[0], [x20]\n" + "ld1 { v27.b }[0], [x19]\n" + "11:" // Oddments: Initial loads: Bit 2: End + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v12.4s, v31.4h, v4.4h\n" + "smlal2 v17.4s, v31.8h, v4.8h\n" + "ldr x21, [x13, #0x28]\n" + "smlal v14.4s, v31.4h, v3.4h\n" + "smlal2 v9.4s, v31.8h, v3.8h\n" + "ushll v30.8h, v30.8b, #0x0\n" + "add x21, x21, x15\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v16.4s, v31.4h, v1.4h\n" + "smlal2 v10.4s, v31.8h, v1.8h\n" + "smlal v18.4s, v31.4h, v0.4h\n" + "smlal2 v26.4s, v31.8h, v0.8h\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v12.4s, v30.4h, v0.4h\n" + "smlal2 v17.4s, v30.8h, v0.8h\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v14.4s, v29.4h, v2.4h\n" + "smlal2 v9.4s, v29.8h, v2.8h\n" + "smlal v12.4s, v28.4h, v5.4h\n" + "smlal2 v17.4s, v28.8h, v5.8h\n" + "smlal v14.4s, v28.4h, v4.4h\n" + "smlal2 v9.4s, v28.8h, v4.8h\n" + "smlal v16.4s, v28.4h, v2.4h\n" + "smlal2 v10.4s, v28.8h, v2.8h\n" + "smlal v18.4s, v28.4h, v1.4h\n" + "smlal2 v26.4s, v28.8h, v1.8h\n" + "tbz x8, #2, 13f\n" + "ld1 { v31.s }[0], [x21], #0x4\n" + "tbz x8, #1, 12f\n" + "ld1 { v31.h }[2], [x21], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v31.b }[6], [x21]\n" + "b 15f\n" + "12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset + "tbz x8, #0, 15f\n" + "ld1 { v31.b }[4], [x21]\n" + "b 15f\n" + "13:" // Oddments: Load (3, 0): Bit 2: Unset + "tbz x8, #1, 14f\n" + "ld1 { v31.h }[0], [x21], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v31.b }[2], [x21]\n" + "b 15f\n" + "14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 15f\n" + "ld1 { v31.b }[0], [x21]\n" + "15:" // Oddments: Load (3, 0): Bit 2: End + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v16.4s, v31.4h, v6.4h\n" + "smlal2 v10.4s, v31.8h, v6.8h\n" + "ldr x20, [x13, #0x30]\n" + "smlal v12.4s, v27.4h, v7.4h\n" + "smlal2 v17.4s, v27.8h, v7.8h\n" + "add x20, x20, x15\n" + "smlal v14.4s, v27.4h, v6.4h\n" + "smlal2 v9.4s, v27.8h, v6.8h\n" + "smlal v16.4s, v27.4h, v4.4h\n" + "smlal2 v10.4s, v27.8h, v4.8h\n" + "smlal v18.4s, v27.4h, v3.4h\n" + "smlal2 v26.4s, v27.8h, v3.8h\n" + "tbz x8, #2, 17f\n" + "ld1 { v29.s }[0], [x20], #0x4\n" + "tbz x8, #1, 16f\n" + "ld1 { v29.h }[2], [x20], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v29.b }[6], [x20]\n" + "b 19f\n" + "16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 19f\n" + "ld1 { v29.b }[4], [x20]\n" + "b 19f\n" + "17:" // Oddments: Load (3, 3): Bit 2: Unset + "tbz x8, #1, 18f\n" + "ld1 { v29.h }[0], [x20], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v29.b }[2], [x20]\n" + "b 19f\n" + "18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 19f\n" + "ld1 { v29.b }[0], [x20]\n" + "19:" // Oddments: Load (3, 3): Bit 2: End + "ushll v29.8h, v29.8b, #0x0\n" + "ldr x26, [x13, #0x38]\n" + "smlal v18.4s, v29.4h, v8.4h\n" + "smlal2 v26.4s, v29.8h, v8.8h\n" + "add x26, x26, x15\n" + "tbz x8, #2, 21f\n" + "ld1 { v28.s }[0], [x26], #0x4\n" + "tbz x8, #1, 20f\n" + "ld1 { v28.h }[2], [x26], #0x2\n" + "tbz x8, #0, 23f\n" + "ld1 { v28.b }[6], [x26]\n" + "b 23f\n" + "20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset + "tbz x8, #0, 23f\n" + "ld1 { v28.b }[4], [x26]\n" + "b 23f\n" + "21:" // Oddments: Load (0, 1): Bit 2: Unset + "tbz x8, #1, 22f\n" + "ld1 { v28.h }[0], [x26], #0x2\n" + "tbz x8, #0, 23f\n" + "ld1 { v28.b }[2], [x26]\n" + "b 23f\n" + "22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 23f\n" + "ld1 { v28.b }[0], [x26]\n" + "23:" // Oddments: Load (0, 1): Bit 2: End + "ushll v28.8h, v28.8b, #0x0\n" + "ldr x25, [x13, #0x40]\n" + "smlal v12.4s, v28.4h, v1.4h\n" + "smlal2 v17.4s, v28.8h, v1.8h\n" + "smlal v14.4s, v28.4h, v0.4h\n" + "smlal2 v9.4s, v28.8h, v0.8h\n" + "add x25, x25, x15\n" + "tbz x8, #2, 25f\n" + "ld1 { v31.s }[0], [x25], #0x4\n" + "tbz x8, #1, 24f\n" + "ld1 { v31.h }[2], [x25], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v31.b }[6], [x25]\n" + "b 27f\n" + "24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset + "tbz x8, #0, 27f\n" + "ld1 { v31.b }[4], [x25]\n" + "b 27f\n" + "25:" // Oddments: Load (0, 2): Bit 2: Unset + "tbz x8, #1, 26f\n" + "ld1 { v31.h }[0], [x25], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v31.b }[2], [x25]\n" + "b 27f\n" + "26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 27f\n" + "ld1 { v31.b }[0], [x25]\n" + "27:" // Oddments: Load (0, 2): Bit 2: End + "ushll v31.8h, v31.8b, #0x0\n" + "ldr x19, [x13, #0x48]\n" + "smlal v12.4s, v31.4h, v2.4h\n" + "smlal2 v17.4s, v31.8h, v2.8h\n" + "smlal v14.4s, v31.4h, v1.4h\n" + "smlal2 v9.4s, v31.8h, v1.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 29f\n" + "ld1 { v30.s }[0], [x19], #0x4\n" + "tbz x8, #1, 28f\n" + "ld1 { v30.h }[2], [x19], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v30.b }[6], [x19]\n" + "b 31f\n" + "28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset + "tbz x8, #0, 31f\n" + "ld1 { v30.b }[4], [x19]\n" + "b 31f\n" + "29:" // Oddments: Load (2, 2): Bit 2: Unset + "tbz x8, #1, 30f\n" + "ld1 { v30.h }[0], [x19], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v30.b }[2], [x19]\n" + "b 31f\n" + "30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 31f\n" + "ld1 { v30.b }[0], [x19]\n" + "31:" // Oddments: Load (2, 2): Bit 2: End + "ushll v30.8h, v30.8b, #0x0\n" + "ldr x24, [x13, #0x50]\n" + "smlal v12.4s, v30.4h, v8.4h\n" + "smlal2 v17.4s, v30.8h, v8.8h\n" + "smlal v14.4s, v30.4h, v7.4h\n" + "smlal2 v9.4s, v30.8h, v7.8h\n" + "add x24, x24, x15\n" + "smlal v16.4s, v30.4h, v5.4h\n" + "smlal2 v10.4s, v30.8h, v5.8h\n" + "smlal v18.4s, v30.4h, v4.4h\n" + "smlal2 v26.4s, v30.8h, v4.8h\n" + "tbz x8, #2, 33f\n" + "ld1 { v29.s }[0], [x24], #0x4\n" + "tbz x8, #1, 32f\n" + "ld1 { v29.h }[2], [x24], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[6], [x24]\n" + "b 35f\n" + "32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[4], [x24]\n" + "b 35f\n" + "33:" // Oddments: Load (1, 0): Bit 2: Unset + "tbz x8, #1, 34f\n" + "ld1 { v29.h }[0], [x24], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[2], [x24]\n" + "b 35f\n" + "34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[0], [x24]\n" + "35:" // Oddments: Load (1, 0): Bit 2: End + "ushll v29.8h, v29.8b, #0x0\n" + "ldr x23, [x13, #0x58]\n" + "smlal v12.4s, v29.4h, v3.4h\n" + "smlal2 v17.4s, v29.8h, v3.8h\n" + "smlal v16.4s, v29.4h, v0.4h\n" + "smlal2 v10.4s, v29.8h, v0.8h\n" + "add x23, x23, x15\n" + "tbz x8, #2, 37f\n" + "ld1 { v28.s }[0], [x23], #0x4\n" + "tbz x8, #1, 36f\n" + "ld1 { v28.h }[2], [x23], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v28.b }[6], [x23]\n" + "b 39f\n" + "36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 39f\n" + "ld1 { v28.b }[4], [x23]\n" + "b 39f\n" + "37:" // Oddments: Load (1, 3): Bit 2: Unset + "tbz x8, #1, 38f\n" + "ld1 { v28.h }[0], [x23], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v28.b }[2], [x23]\n" + "b 39f\n" + "38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 39f\n" + "ld1 { v28.b }[0], [x23]\n" + "39:" // Oddments: Load (1, 3): Bit 2: End + "ushll v28.8h, v28.8b, #0x0\n" + "ldr x22, [x13, #0x60]\n" + "smlal v14.4s, v28.4h, v5.4h\n" + "smlal2 v9.4s, v28.8h, v5.8h\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v26.4s, v28.8h, v2.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 41f\n" + "ld1 { v31.s }[0], [x22], #0x4\n" + "tbz x8, #1, 40f\n" + "ld1 { v31.h }[2], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v31.b }[6], [x22]\n" + "b 43f\n" + "40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset + "tbz x8, #0, 43f\n" + "ld1 { v31.b }[4], [x22]\n" + "b 43f\n" + "41:" // Oddments: Load (2, 0): Bit 2: Unset + "tbz x8, #1, 42f\n" + "ld1 { v31.h }[0], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v31.b }[2], [x22]\n" + "b 43f\n" + "42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 43f\n" + "ld1 { v31.b }[0], [x22]\n" + "43:" // Oddments: Load (2, 0): Bit 2: End + "ushll v31.8h, v31.8b, #0x0\n" + "ldr x21, [x13, #0x68]\n" + "smlal v12.4s, v31.4h, v6.4h\n" + "smlal2 v17.4s, v31.8h, v6.8h\n" + "smlal v16.4s, v31.4h, v3.4h\n" + "smlal2 v10.4s, v31.8h, v3.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 45f\n" + "ld1 { v30.s }[0], [x21], #0x4\n" + "tbz x8, #1, 44f\n" + "ld1 { v30.h }[2], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v30.b }[6], [x21]\n" + "b 47f\n" + "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 47f\n" + "ld1 { v30.b }[4], [x21]\n" + "b 47f\n" + "45:" // Oddments: Load (2, 3): Bit 2: Unset + "tbz x8, #1, 46f\n" + "ld1 { v30.h }[0], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v30.b }[2], [x21]\n" + "b 47f\n" + "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 47f\n" + "ld1 { v30.b }[0], [x21]\n" + "47:" // Oddments: Load (2, 3): Bit 2: End + "ushll v30.8h, v30.8b, #0x0\n" + "ldr x20, [x13, #0x70]\n" + "smlal v14.4s, v30.4h, v8.4h\n" + "smlal2 v9.4s, v30.8h, v8.8h\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal2 v26.4s, v30.8h, v5.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 49f\n" + "ld1 { v29.s }[0], [x20], #0x4\n" + "tbz x8, #1, 48f\n" + "ld1 { v29.h }[2], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v29.b }[6], [x20]\n" + "b 51f\n" + "48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset + "tbz x8, #0, 51f\n" + "ld1 { v29.b }[4], [x20]\n" + "b 51f\n" + "49:" // Oddments: Load (3, 1): Bit 2: Unset + "tbz x8, #1, 50f\n" + "ld1 { v29.h }[0], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v29.b }[2], [x20]\n" + "b 51f\n" + "50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 51f\n" + "ld1 { v29.b }[0], [x20]\n" + "51:" // Oddments: Load (3, 1): Bit 2: End + "ushll v29.8h, v29.8b, #0x0\n" + "ldr x19, [x13, #0x78]\n" + "smlal v16.4s, v29.4h, v7.4h\n" + "smlal2 v10.4s, v29.8h, v7.8h\n" + "smlal v18.4s, v29.4h, v6.4h\n" + "smlal2 v26.4s, v29.8h, v6.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 53f\n" + "ld1 { v28.s }[0], [x19], #0x4\n" + "tbz x8, #1, 52f\n" + "ld1 { v28.h }[2], [x19], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v28.b }[6], [x19]\n" + "b 55f\n" + "52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset + "tbz x8, #0, 55f\n" + "ld1 { v28.b }[4], [x19]\n" + "b 55f\n" + "53:" // Oddments: Load (3, 2): Bit 2: Unset + "tbz x8, #1, 54f\n" + "ld1 { v28.h }[0], [x19], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v28.b }[2], [x19]\n" + "b 55f\n" + "54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 55f\n" + "ld1 { v28.b }[0], [x19]\n" + "55:" // Oddments: Load (3, 2): Bit 2: End + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v16.4s, v28.4h, v8.4h\n" + "smlal2 v10.4s, v28.8h, v8.8h\n" + "smlal v18.4s, v28.4h, v7.4h\n" + "smlal2 v26.4s, v28.8h, v7.8h\n" + "tbz x8, #2, 57f\n" + "ld1 { v21.4s }, [x12], #0x10\n" + "ld1 { v24.4s }, [x11], #0x10\n" + "tbz x8, #1, 56f\n" + "ld1 { v19.d }[0], [x12], #0x8\n" + "ld1 { v23.d }[0], [x11], #0x8\n" + "tbz x8, #0, 59f\n" + "ld1 { v19.s }[2], [x12]\n" + "ld1 { v23.s }[2], [x11]\n" + "b 59f\n" + "56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset + "tbz x8, #0, 59f\n" + "ld1 { v19.s }[0], [x12]\n" + "ld1 { v23.s }[0], [x11]\n" + "b 59f\n" + "57:" // Oddments: Load requant params: Bit 2: Unset + "tbz x8, #1, 58f\n" + "ld1 { v21.d }[0], [x12], #0x8\n" + "ld1 { v24.d }[0], [x11], #0x8\n" + "tbz x8, #0, 59f\n" + "ld1 { v21.s }[2], [x12]\n" + "ld1 { v24.s }[2], [x11]\n" + "b 59f\n" + "58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 59f\n" + "ld1 { v21.s }[0], [x12]\n" + "ld1 { v24.s }[0], [x11]\n" + "59:" // Oddments: Load requant params: Bit 2: End + "sqdmulh v12.4s, v12.4s, v21.4s\n" + "sqdmulh v14.4s, v14.4s, v21.4s\n" + "add x10, x10, x14\n" + "add x9, x9, x14\n" + "sqdmulh v16.4s, v16.4s, v21.4s\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "add x28, x28, x14\n" + "add x27, x27, x14\n" + "and v29.16b, v12.16b, v24.16b\n" + "sqdmulh v17.4s, v17.4s, v19.4s\n" + "and v22.16b, v14.16b, v24.16b\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "and v21.16b, v16.16b, v24.16b\n" + "sqdmulh v10.4s, v10.4s, v19.4s\n" + "and v20.16b, v18.16b, v24.16b\n" + "sqdmulh v26.4s, v26.4s, v19.4s\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v19.16b, v17.16b, v23.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v30.16b, v9.16b, v23.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v10.16b, v23.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v28.16b, v26.16b, v23.16b\n" + "sqadd v12.4s, v12.4s, v29.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v14.4s, v14.4s, v22.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "sqadd v16.4s, v16.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v20.4s\n" + "sshr v28.4s, v28.4s, #0x1f\n" + "srshl v12.4s, v12.4s, v24.4s\n" + "sqadd v17.4s, v17.4s, v19.4s\n" + "srshl v14.4s, v14.4s, v24.4s\n" + "sqadd v9.4s, v9.4s, v30.4s\n" + "srshl v16.4s, v16.4s, v24.4s\n" + "sqadd v10.4s, v10.4s, v3.4s\n" + "srshl v18.4s, v18.4s, v24.4s\n" + "sqadd v26.4s, v26.4s, v28.4s\n" + "srshl v17.4s, v17.4s, v23.4s\n" + "sqxtn v12.4h, v12.4s\n" + "srshl v9.4s, v9.4s, v23.4s\n" + "sqxtn v14.4h, v14.4s\n" + "srshl v10.4s, v10.4s, v23.4s\n" + "sqxtn v16.4h, v16.4s\n" + "srshl v26.4s, v26.4s, v23.4s\n" + "sqxtn v18.4h, v18.4s\n" + "sqxtn2 v12.8h, v17.4s\n" + "sqxtn2 v14.8h, v9.4s\n" + "sqxtn2 v16.8h, v10.4s\n" + "sqxtn2 v18.8h, v26.4s\n" + "sqadd v12.8h, v12.8h, v13.8h\n" + "sqadd v14.8h, v14.8h, v13.8h\n" + "sqadd v16.8h, v16.8h, v13.8h\n" + "sqadd v18.8h, v18.8h, v13.8h\n" + "smax v12.8h, v12.8h, v11.8h\n" + "smax v14.8h, v14.8h, v11.8h\n" + "smax v16.8h, v16.8h, v11.8h\n" + "smax v18.8h, v18.8h, v11.8h\n" + "smin v12.8h, v12.8h, v25.8h\n" + "smin v14.8h, v14.8h, v25.8h\n" + "smin v16.8h, v16.8h, v25.8h\n" + "smin v18.8h, v18.8h, v25.8h\n" + "uzp1 v12.16b, v12.16b, v12.16b\n" + "uzp1 v14.16b, v14.16b, v14.16b\n" + "uzp1 v16.16b, v16.16b, v16.16b\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "tbz x8, #2, 61f\n" + "st1 { v12.s }[0], [x10], #0x4\n" + "st1 { v14.s }[0], [x9], #0x4\n" + "st1 { v16.s }[0], [x28], #0x4\n" + "st1 { v18.s }[0], [x27], #0x4\n" + "tbz x8, #1, 60f\n" + "st1 { v12.h }[2], [x10], #0x2\n" + "st1 { v14.h }[2], [x9], #0x2\n" + "st1 { v16.h }[2], [x28], #0x2\n" + "st1 { v18.h }[2], [x27], #0x2\n" + "tbz x8, #0, 63f\n" + "st1 { v12.b }[6], [x10], #0x1\n" + "st1 { v14.b }[6], [x9], #0x1\n" + "st1 { v16.b }[6], [x28], #0x1\n" + "st1 { v18.b }[6], [x27], #0x1\n" + "b 63f\n" + "60:" // Oddments: Bit 2: Bit 1: Unset + "tbz x8, #0, 63f\n" + "st1 { v12.b }[4], [x10], #0x1\n" + "st1 { v14.b }[4], [x9], #0x1\n" + "st1 { v16.b }[4], [x28], #0x1\n" + "st1 { v18.b }[4], [x27], #0x1\n" + "b 63f\n" + "61:" // Oddments: Bit 2: Unset + "tbz x8, #1, 62f\n" + "st1 { v12.h }[0], [x10], #0x2\n" + "st1 { v14.h }[0], [x9], #0x2\n" + "st1 { v16.h }[0], [x28], #0x2\n" + "st1 { v18.h }[0], [x27], #0x2\n" + "tbz x8, #0, 63f\n" + "st1 { v12.b }[2], [x10], #0x1\n" + "st1 { v14.b }[2], [x9], #0x1\n" + "st1 { v16.b }[2], [x28], #0x1\n" + "st1 { v18.b }[2], [x27], #0x1\n" + "b 63f\n" + "62:" // Oddments: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 63f\n" + "st1 { v12.b }[0], [x10], #0x1\n" + "st1 { v14.b }[0], [x9], #0x1\n" + "st1 { v16.b }[0], [x28], #0x1\n" + "st1 { v18.b }[0], [x27], #0x1\n" + "63:" // Oddments: Bit 2: End + "64:" // End + : + : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); +} + +} // namespace depthwise +} // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp new file mode 100644 index 0000000000..b479dbf7a4 --- /dev/null +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "src/core/NEON/kernels/arm_gemm/utils.hpp" + +#include "src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp" + +#include <cstdint> + +#pragma once + +#if defined(__aarch64__) + +namespace arm_conv { +namespace depthwise { + +void a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); + +class a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> +{ + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + + public: + constexpr static unsigned int kernel_rows = 3; + constexpr static unsigned int kernel_cols = 3; + + constexpr static unsigned int stride_rows = 2; + constexpr static unsigned int stride_cols = 2; + + a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 2, 2) {} + + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } + + Parent::KernelType kernel = a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } +}; + +} // namespace depthwise +} // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp new file mode 100644 index 0000000000..49f69c4204 --- /dev/null +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp @@ -0,0 +1,1395 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_gemm.hpp" + +#include <cstddef> +#include <cstdint> + +#if defined(__aarch64__) + +namespace arm_conv { +namespace depthwise { + +void a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( + const unsigned int n_channels, + const uint8_t *const *const inptrs, + const uint8_t *const weights, + const int32_t *const bias, + const arm_gemm::Requantize32 &qp, + const int32_t *const requant_muls, + const int32_t *const requant_shifts, + uint8_t *const *const outptrs +) +{ + struct Params + { + long unsigned int n_channels; + const void *weights; + const int32_t *bias; + const arm_gemm::Requantize32 *requant; + const int32_t *const requant_muls; + const int32_t *const requant_shifts; + uint8_t *const *const outptrs; + const uint8_t *inptrs[25]; + + Params( + long unsigned int n_channels, + const uint8_t *const *inptrs_raw, + const void *const weights, + const int32_t *const bias, + const arm_gemm::Requantize32 &qp, + const int32_t *const requant_muls, + const int32_t *const requant_shifts, + uint8_t *const *outptrs + ) : n_channels(n_channels), weights(weights), bias(bias), + requant(&qp), requant_muls(requant_muls), + requant_shifts(requant_shifts), outptrs(outptrs) + { + inptrs[0] = inptrs_raw[12]; + inptrs[1] = inptrs_raw[0]; + inptrs[2] = inptrs_raw[1]; + inptrs[3] = inptrs_raw[3]; + inptrs[4] = inptrs_raw[4]; + inptrs[5] = inptrs_raw[5]; + inptrs[6] = inptrs_raw[6]; + inptrs[7] = inptrs_raw[2]; + inptrs[8] = inptrs_raw[8]; + inptrs[9] = inptrs_raw[9]; + inptrs[10] = inptrs_raw[7]; + inptrs[11] = inptrs_raw[15]; + inptrs[12] = inptrs_raw[10]; + inptrs[13] = inptrs_raw[16]; + inptrs[14] = inptrs_raw[11]; + inptrs[15] = inptrs_raw[18]; + inptrs[16] = inptrs_raw[13]; + inptrs[17] = inptrs_raw[19]; + inptrs[18] = inptrs_raw[20]; + inptrs[19] = inptrs_raw[14]; + inptrs[20] = inptrs_raw[21]; + inptrs[21] = inptrs_raw[17]; + inptrs[22] = inptrs_raw[23]; + inptrs[23] = inptrs_raw[22]; + inptrs[24] = inptrs_raw[24]; + + } + }; + + const Params params(n_channels, inptrs, weights, bias, qp, + requant_muls, requant_shifts, outptrs); + + __asm__ __volatile__( + "ldr x19, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x23, x19, %[offsetof_Requantize32_b_offset]\n" + "add x22, x19, %[offsetof_Requantize32_c_offset]\n" + "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x20, x19, %[offsetof_Requantize32_minval]\n" + "add x19, x19, %[offsetof_Requantize32_maxval]\n" + "ldr x17, [%x[params], %[offsetof_Params_weights]]\n" + "ld1r { v16.16b }, [x23]\n" + "ld1r { v12.8h }, [x22]\n" + "lsr x16, x8, #0x3\n" + "mov x15, #0x0\n" + "ld1r { v14.8h }, [x20]\n" + "ld1r { v21.8h }, [x19]\n" + "mov x14, #0x0\n" + "add x13, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x12, [%x[params], %[offsetof_Params_requant_muls]]\n" + "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x10, x9, [x21, #0x0]\n" + "ldp x28, x27, [x21, #0x10]\n" + "cbz x16, 3f\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q15, [x19, #0x0]\n" + "subs x16, x16, #0x1\n" + "mov v13.16b, v15.16b\n" + "ldr q18, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v17.16b, v18.16b\n" + "mov v11.16b, v15.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v10.16b, v18.16b\n" + "mov v23.16b, v15.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v9.16b, v18.16b\n" + "usubl v0.8h, v0.8b, v16.8b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "usubl v1.8h, v1.8b, v16.8b\n" + "usubl v2.8h, v2.8b, v16.8b\n" + "ldp x26, x25, [x13, #0x0]\n" + "ldp x24, x23, [x13, #0x10]\n" + "usubl v3.8h, v3.8b, v16.8b\n" + "usubl v4.8h, v4.8b, v16.8b\n" + "ldp x22, x21, [x13, #0x20]\n" + "ldp x20, x19, [x13, #0x30]\n" + "usubl v5.8h, v5.8b, v16.8b\n" + "usubl v6.8h, v6.8b, v16.8b\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" + "usubl v7.8h, v7.8b, v16.8b\n" + "usubl v8.8h, v8.8b, v16.8b\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "ushll v26.8h, v26.8b, #0x0\n" + "ushll v25.8h, v25.8b, #0x0\n" + "ushll v24.8h, v24.8b, #0x0\n" + "beq 2f\n" + "1:" // Loop + "smlal v15.4s, v31.4h, v8.4h\n" + "smlal2 v18.4s, v31.8h, v8.8h\n" + "ldr x24, [x13, #0x40]\n" + "ldr x23, [x13, #0x48]\n" + "smlal v13.4s, v31.4h, v6.4h\n" + "smlal2 v17.4s, v31.8h, v6.8h\n" + "ldr x21, [x13, #0x50]\n" + "ldr x19, [x13, #0x58]\n" + "smlal v15.4s, v30.4h, v0.4h\n" + "smlal2 v18.4s, v30.8h, v0.8h\n" + "ldr x22, [x13, #0x78]\n" + "ldr x20, [x13, #0x60]\n" + "smlal v13.4s, v28.4h, v1.4h\n" + "smlal2 v17.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v15.4s, v29.4h, v1.4h\n" + "smlal2 v18.4s, v29.8h, v1.8h\n" + "ldr d29, [x24, x15]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v13.4s, v27.4h, v2.4h\n" + "smlal2 v17.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v15.4s, v26.4h, v3.4h\n" + "smlal2 v18.4s, v26.8h, v3.8h\n" + "ldr d26, [x19, x15]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v13.4s, v24.4h, v0.4h\n" + "smlal2 v17.4s, v24.8h, v0.8h\n" + "ldr x21, [x13, #0x80]\n" + "ldr x19, [x13, #0x68]\n" + "smlal v15.4s, v25.4h, v4.4h\n" + "smlal2 v18.4s, v25.8h, v4.8h\n" + "ldr d25, [x20, x15]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v13.4s, v29.4h, v4.4h\n" + "smlal2 v17.4s, v29.8h, v4.8h\n" + "ldr x20, [x13, #0x88]\n" + "ldr d29, [x19, x15]\n" + "smlal v15.4s, v24.4h, v2.4h\n" + "smlal2 v18.4s, v24.8h, v2.8h\n" + "ldr x19, [x13, #0x70]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v13.4s, v28.4h, v5.4h\n" + "smlal2 v17.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v11.4s, v31.4h, v2.4h\n" + "smlal2 v10.4s, v31.8h, v2.8h\n" + "ldr x24, [x13, #0x98]\n" + "ldr d24, [x19, x15]\n" + "smlal v15.4s, v27.4h, v5.4h\n" + "smlal2 v18.4s, v27.8h, v5.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "ldr x23, [x13, #0x90]\n" + "smlal v13.4s, v27.4h, v3.4h\n" + "smlal2 v17.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v11.4s, v26.4h, v3.4h\n" + "ldr x22, [x13, #0xa8]\n" + "ldr x19, [x13, #0xa0]\n" + "smlal2 v10.4s, v26.8h, v3.8h\n" + "smlal2 v9.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v11.4s, v25.4h, v0.4h\n" + "ldr x21, [x13, #0xb0]\n" + "ldr x20, [x13, #0xb8]\n" + "smlal2 v10.4s, v25.8h, v0.8h\n" + "smlal2 v9.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v23.4s, v28.4h, v1.4h\n" + "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x13, #0xc0]\n" + "ldr q22, [x12, #0x0]\n" + "smlal2 v18.4s, v25.8h, v6.8h\n" + "smlal v11.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal2 v10.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v9.4s, v28.8h, v1.8h\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v23.4s, v26.4h, v5.4h\n" + "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q31, [x11, #0x0]\n" + "ldr q19, [x12, #0x10]\n" + "smlal2 v18.4s, v24.8h, v7.8h\n" + "smlal v11.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v22.4s\n" + "ldr q30, [x11, #0x10]\n" + "smlal2 v10.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" + "smlal2 v9.4s, v26.8h, v5.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" + "smlal2 v9.4s, v29.8h, v2.8h\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v11.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v4.16b, v15.16b, v31.16b\n" + "add x17, x17, #0x48\n" + "smlal v13.4s, v28.4h, v7.4h\n" + "smlal2 v17.4s, v28.8h, v7.8h\n" + "sqdmulh v18.4s, v18.4s, v19.4s\n" + "subs x16, x16, #0x1\n" + "smlal2 v10.4s, v25.8h, v6.8h\n" + "ldr d25, [x20, x15]\n" + "smlal2 v9.4s, v24.8h, v3.8h\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v11.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "add x12, x12, #0x20\n" + "smlal v13.4s, v29.4h, v8.4h\n" + "smlal2 v17.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal2 v10.4s, v27.8h, v7.8h\n" + "smlal2 v9.4s, v26.8h, v7.8h\n" + "sqdmulh v13.4s, v13.4s, v22.4s\n" + "add x15, x15, #0x8\n" + "smlal v11.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v1.16b, v13.16b, v31.16b\n" + "add x11, x11, #0x20\n" + "smlal2 v10.4s, v24.8h, v5.8h\n" + "smlal2 v9.4s, v25.8h, v6.8h\n" + "sqdmulh v17.4s, v17.4s, v19.4s\n" + "smlal v11.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v11.4s, v11.4s, v22.4s\n" + "smlal2 v10.4s, v25.8h, v8.8h\n" + "smlal2 v9.4s, v29.8h, v8.8h\n" + "sqdmulh v23.4s, v23.4s, v22.4s\n" + "and v22.16b, v11.16b, v31.16b\n" + "sqdmulh v10.4s, v10.4s, v19.4s\n" + "and v20.16b, v23.16b, v31.16b\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "and v19.16b, v18.16b, v30.16b\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "and v27.16b, v17.16b, v30.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v25.16b, v10.16b, v30.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v0.16b, v9.16b, v30.16b\n" + "sqadd v15.4s, v15.4s, v4.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v13.4s, v13.4s, v1.4s\n" + "sshr v27.4s, v27.4s, #0x1f\n" + "sqadd v11.4s, v11.4s, v22.4s\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v0.4s, v0.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v31.4s\n" + "sqadd v18.4s, v18.4s, v19.4s\n" + "srshl v13.4s, v13.4s, v31.4s\n" + "sqadd v17.4s, v17.4s, v27.4s\n" + "srshl v11.4s, v11.4s, v31.4s\n" + "sqadd v10.4s, v10.4s, v25.4s\n" + "srshl v23.4s, v23.4s, v31.4s\n" + "sqadd v9.4s, v9.4s, v0.4s\n" + "srshl v18.4s, v18.4s, v30.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v17.4s, v17.4s, v30.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v10.4s, v10.4s, v30.4s\n" + "sqxtn v11.4h, v11.4s\n" + "srshl v9.4s, v9.4s, v30.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v18.4s\n" + "sqxtn2 v13.8h, v17.4s\n" + "sqxtn2 v11.8h, v10.4s\n" + "sqxtn2 v23.8h, v9.4s\n" + "sqadd v15.8h, v15.8h, v12.8h\n" + "sqadd v13.8h, v13.8h, v12.8h\n" + "sqadd v11.8h, v11.8h, v12.8h\n" + "sqadd v23.8h, v23.8h, v12.8h\n" + "smax v15.8h, v15.8h, v14.8h\n" + "smax v13.8h, v13.8h, v14.8h\n" + "smax v11.8h, v11.8h, v14.8h\n" + "smax v23.8h, v23.8h, v14.8h\n" + "smin v15.8h, v15.8h, v21.8h\n" + "smin v13.8h, v13.8h, v21.8h\n" + "smin v11.8h, v11.8h, v21.8h\n" + "smin v23.8h, v23.8h, v21.8h\n" + "uzp1 v15.16b, v15.16b, v15.16b\n" + "str d15, [x10, x14]\n" + "uzp1 v13.16b, v13.16b, v13.16b\n" + "uzp1 v11.16b, v11.16b, v11.16b\n" + "str d13, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d11, [x28, x14]\n" + "str d23, [x27, x14]\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q15, [x19, #0x0]\n" + "add x14, x14, #0x8\n" + "ldr q18, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v13.16b, v15.16b\n" + "mov v17.16b, v18.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v11.16b, v15.16b\n" + "mov v10.16b, v18.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v23.16b, v15.16b\n" + "mov v9.16b, v18.16b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "usubl v0.8h, v0.8b, v16.8b\n" + "usubl v1.8h, v1.8b, v16.8b\n" + "ldp x26, x25, [x13, #0x0]\n" + "ldp x24, x23, [x13, #0x10]\n" + "usubl v2.8h, v2.8b, v16.8b\n" + "usubl v3.8h, v3.8b, v16.8b\n" + "ldp x22, x21, [x13, #0x20]\n" + "ldp x20, x19, [x13, #0x30]\n" + "usubl v4.8h, v4.8b, v16.8b\n" + "usubl v5.8h, v5.8b, v16.8b\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" + "usubl v6.8h, v6.8b, v16.8b\n" + "usubl v7.8h, v7.8b, v16.8b\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" + "usubl v8.8h, v8.8b, v16.8b\n" + "ushll v31.8h, v31.8b, #0x0\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ushll v29.8h, v29.8b, #0x0\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ushll v27.8h, v27.8b, #0x0\n" + "ushll v26.8h, v26.8b, #0x0\n" + "ushll v25.8h, v25.8b, #0x0\n" + "ushll v24.8h, v24.8b, #0x0\n" + "bgt 1b\n" + "2:" // Tail + "smlal v15.4s, v31.4h, v8.4h\n" + "smlal2 v18.4s, v31.8h, v8.8h\n" + "ldr x24, [x13, #0x40]\n" + "ldr x23, [x13, #0x48]\n" + "smlal v13.4s, v31.4h, v6.4h\n" + "smlal2 v17.4s, v31.8h, v6.8h\n" + "ldr x21, [x13, #0x50]\n" + "ldr x19, [x13, #0x58]\n" + "smlal v15.4s, v30.4h, v0.4h\n" + "smlal2 v18.4s, v30.8h, v0.8h\n" + "ldr x22, [x13, #0x78]\n" + "ldr x20, [x13, #0x60]\n" + "smlal v13.4s, v28.4h, v1.4h\n" + "smlal2 v17.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v15.4s, v29.4h, v1.4h\n" + "smlal2 v18.4s, v29.8h, v1.8h\n" + "ldr d29, [x24, x15]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v13.4s, v27.4h, v2.4h\n" + "smlal2 v17.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v15.4s, v26.4h, v3.4h\n" + "smlal2 v18.4s, v26.8h, v3.8h\n" + "ldr d26, [x19, x15]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v13.4s, v24.4h, v0.4h\n" + "smlal2 v17.4s, v24.8h, v0.8h\n" + "ldr x21, [x13, #0x80]\n" + "ldr x19, [x13, #0x68]\n" + "smlal v15.4s, v25.4h, v4.4h\n" + "smlal2 v18.4s, v25.8h, v4.8h\n" + "ldr d25, [x20, x15]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v13.4s, v29.4h, v4.4h\n" + "smlal2 v17.4s, v29.8h, v4.8h\n" + "ldr x20, [x13, #0x88]\n" + "ldr d29, [x19, x15]\n" + "smlal v15.4s, v24.4h, v2.4h\n" + "smlal2 v18.4s, v24.8h, v2.8h\n" + "ldr x19, [x13, #0x70]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v13.4s, v28.4h, v5.4h\n" + "smlal2 v17.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal v11.4s, v31.4h, v2.4h\n" + "smlal2 v10.4s, v31.8h, v2.8h\n" + "ldr x24, [x13, #0x98]\n" + "ldr d24, [x19, x15]\n" + "smlal v15.4s, v27.4h, v5.4h\n" + "smlal2 v18.4s, v27.8h, v5.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "ldr x23, [x13, #0x90]\n" + "smlal v13.4s, v27.4h, v3.4h\n" + "smlal2 v17.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v11.4s, v26.4h, v3.4h\n" + "ldr x22, [x13, #0xa8]\n" + "ldr x19, [x13, #0xa0]\n" + "smlal2 v10.4s, v26.8h, v3.8h\n" + "smlal2 v9.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v11.4s, v25.4h, v0.4h\n" + "ldr x21, [x13, #0xb0]\n" + "ldr x20, [x13, #0xb8]\n" + "smlal2 v10.4s, v25.8h, v0.8h\n" + "smlal2 v9.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v23.4s, v28.4h, v1.4h\n" + "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x13, #0xc0]\n" + "ldr q22, [x12, #0x0]\n" + "smlal2 v18.4s, v25.8h, v6.8h\n" + "smlal v11.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal2 v10.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v9.4s, v28.8h, v1.8h\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v23.4s, v26.4h, v5.4h\n" + "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q31, [x11, #0x0]\n" + "ldr q19, [x12, #0x10]\n" + "smlal2 v18.4s, v24.8h, v7.8h\n" + "smlal v11.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v22.4s\n" + "ldr q30, [x11, #0x10]\n" + "smlal2 v10.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" + "smlal2 v9.4s, v26.8h, v5.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" + "smlal2 v9.4s, v29.8h, v2.8h\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v11.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v4.16b, v15.16b, v31.16b\n" + "tst x8, #0x7\n" + "smlal v13.4s, v28.4h, v7.4h\n" + "smlal2 v17.4s, v28.8h, v7.8h\n" + "sqdmulh v18.4s, v18.4s, v19.4s\n" + "add x12, x12, #0x20\n" + "smlal2 v10.4s, v25.8h, v6.8h\n" + "ldr d25, [x20, x15]\n" + "smlal2 v9.4s, v24.8h, v3.8h\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v11.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "add x11, x11, #0x20\n" + "smlal v13.4s, v29.4h, v8.4h\n" + "smlal2 v17.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal2 v10.4s, v27.8h, v7.8h\n" + "smlal2 v9.4s, v26.8h, v7.8h\n" + "sqdmulh v13.4s, v13.4s, v22.4s\n" + "add x15, x15, #0x8\n" + "smlal v11.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v1.16b, v13.16b, v31.16b\n" + "smlal2 v10.4s, v24.8h, v5.8h\n" + "smlal2 v9.4s, v25.8h, v6.8h\n" + "sqdmulh v17.4s, v17.4s, v19.4s\n" + "smlal v11.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v11.4s, v11.4s, v22.4s\n" + "smlal2 v10.4s, v25.8h, v8.8h\n" + "smlal2 v9.4s, v29.8h, v8.8h\n" + "sqdmulh v23.4s, v23.4s, v22.4s\n" + "and v22.16b, v11.16b, v31.16b\n" + "sqdmulh v10.4s, v10.4s, v19.4s\n" + "and v20.16b, v23.16b, v31.16b\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "and v19.16b, v18.16b, v30.16b\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "and v27.16b, v17.16b, v30.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v25.16b, v10.16b, v30.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v0.16b, v9.16b, v30.16b\n" + "sqadd v15.4s, v15.4s, v4.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v13.4s, v13.4s, v1.4s\n" + "sshr v27.4s, v27.4s, #0x1f\n" + "sqadd v11.4s, v11.4s, v22.4s\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v0.4s, v0.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v31.4s\n" + "sqadd v18.4s, v18.4s, v19.4s\n" + "srshl v13.4s, v13.4s, v31.4s\n" + "sqadd v17.4s, v17.4s, v27.4s\n" + "srshl v11.4s, v11.4s, v31.4s\n" + "sqadd v10.4s, v10.4s, v25.4s\n" + "srshl v23.4s, v23.4s, v31.4s\n" + "sqadd v9.4s, v9.4s, v0.4s\n" + "srshl v18.4s, v18.4s, v30.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v17.4s, v17.4s, v30.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v10.4s, v10.4s, v30.4s\n" + "sqxtn v11.4h, v11.4s\n" + "srshl v9.4s, v9.4s, v30.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v18.4s\n" + "sqxtn2 v13.8h, v17.4s\n" + "sqxtn2 v11.8h, v10.4s\n" + "sqxtn2 v23.8h, v9.4s\n" + "sqadd v15.8h, v15.8h, v12.8h\n" + "sqadd v13.8h, v13.8h, v12.8h\n" + "sqadd v11.8h, v11.8h, v12.8h\n" + "sqadd v23.8h, v23.8h, v12.8h\n" + "smax v15.8h, v15.8h, v14.8h\n" + "smax v13.8h, v13.8h, v14.8h\n" + "smax v11.8h, v11.8h, v14.8h\n" + "smax v23.8h, v23.8h, v14.8h\n" + "smin v15.8h, v15.8h, v21.8h\n" + "smin v13.8h, v13.8h, v21.8h\n" + "smin v11.8h, v11.8h, v21.8h\n" + "smin v23.8h, v23.8h, v21.8h\n" + "uzp1 v15.16b, v15.16b, v15.16b\n" + "str d15, [x10, x14]\n" + "uzp1 v13.16b, v13.16b, v13.16b\n" + "uzp1 v11.16b, v11.16b, v11.16b\n" + "str d13, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d11, [x28, x14]\n" + "str d23, [x27, x14]\n" + "add x14, x14, #0x8\n" + "beq 88f\n" + "add x17, x17, #0x48\n" + "3:" // Oddments + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "tbz x8, #2, 5f\n" + "ld1 { v15.4s }, [x19], #0x10\n" + "tbz x8, #1, 4f\n" + "ld1 { v18.d }[0], [x19], #0x8\n" + "tbz x8, #0, 7f\n" + "ld1 { v18.s }[2], [x19]\n" + "b 7f\n" + "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset + "tbz x8, #0, 7f\n" + "ld1 { v18.s }[0], [x19]\n" + "b 7f\n" + "5:" // Oddments: Load bias: Bit 2: Unset + "tbz x8, #1, 6f\n" + "ld1 { v15.d }[0], [x19], #0x8\n" + "tbz x8, #0, 7f\n" + "ld1 { v15.s }[2], [x19]\n" + "b 7f\n" + "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 7f\n" + "ld1 { v15.s }[0], [x19]\n" + "7:" // Oddments: Load bias: Bit 2: End + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "mov v13.16b, v15.16b\n" + "mov v17.16b, v18.16b\n" + "ldr d2, [x17, #0x10]\n" + "ldr d3, [x17, #0x18]\n" + "mov v11.16b, v15.16b\n" + "mov v10.16b, v18.16b\n" + "ldr d4, [x17, #0x20]\n" + "ldr d5, [x17, #0x28]\n" + "mov v23.16b, v15.16b\n" + "mov v9.16b, v18.16b\n" + "ldr d6, [x17, #0x30]\n" + "ldr d7, [x17, #0x38]\n" + "usubl v0.8h, v0.8b, v16.8b\n" + "usubl v1.8h, v1.8b, v16.8b\n" + "ldr d8, [x17, #0x40]\n" + "ldp x26, x25, [x13, #0x0]\n" + "usubl v2.8h, v2.8b, v16.8b\n" + "usubl v3.8h, v3.8b, v16.8b\n" + "ldp x24, x23, [x13, #0x10]\n" + "ldp x22, x21, [x13, #0x20]\n" + "usubl v4.8h, v4.8b, v16.8b\n" + "usubl v5.8h, v5.8b, v16.8b\n" + "ldp x20, x19, [x13, #0x30]\n" + "usubl v6.8h, v6.8b, v16.8b\n" + "usubl v7.8h, v7.8b, v16.8b\n" + "usubl v8.8h, v8.8b, v16.8b\n" + "add x26, x26, x15\n" + "add x25, x25, x15\n" + "add x24, x24, x15\n" + "add x23, x23, x15\n" + "add x22, x22, x15\n" + "add x21, x21, x15\n" + "add x20, x20, x15\n" + "add x19, x19, x15\n" + "tbz x8, #2, 9f\n" + "ld1 { v31.s }[0], [x26], #0x4\n" + "ld1 { v30.s }[0], [x25], #0x4\n" + "ld1 { v29.s }[0], [x24], #0x4\n" + "ld1 { v28.s }[0], [x23], #0x4\n" + "ld1 { v27.s }[0], [x22], #0x4\n" + "ld1 { v26.s }[0], [x21], #0x4\n" + "ld1 { v25.s }[0], [x20], #0x4\n" + "ld1 { v24.s }[0], [x19], #0x4\n" + "tbz x8, #1, 8f\n" + "ld1 { v31.h }[2], [x26], #0x2\n" + "ld1 { v30.h }[2], [x25], #0x2\n" + "ld1 { v29.h }[2], [x24], #0x2\n" + "ld1 { v28.h }[2], [x23], #0x2\n" + "ld1 { v27.h }[2], [x22], #0x2\n" + "ld1 { v26.h }[2], [x21], #0x2\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "ld1 { v24.h }[2], [x19], #0x2\n" + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[6], [x26]\n" + "ld1 { v30.b }[6], [x25]\n" + "ld1 { v29.b }[6], [x24]\n" + "ld1 { v28.b }[6], [x23]\n" + "ld1 { v27.b }[6], [x22]\n" + "ld1 { v26.b }[6], [x21]\n" + "ld1 { v25.b }[6], [x20]\n" + "ld1 { v24.b }[6], [x19]\n" + "b 11f\n" + "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[4], [x26]\n" + "ld1 { v30.b }[4], [x25]\n" + "ld1 { v29.b }[4], [x24]\n" + "ld1 { v28.b }[4], [x23]\n" + "ld1 { v27.b }[4], [x22]\n" + "ld1 { v26.b }[4], [x21]\n" + "ld1 { v25.b }[4], [x20]\n" + "ld1 { v24.b }[4], [x19]\n" + "b 11f\n" + "9:" // Oddments: Initial loads: Bit 2: Unset + "tbz x8, #1, 10f\n" + "ld1 { v31.h }[0], [x26], #0x2\n" + "ld1 { v30.h }[0], [x25], #0x2\n" + "ld1 { v29.h }[0], [x24], #0x2\n" + "ld1 { v28.h }[0], [x23], #0x2\n" + "ld1 { v27.h }[0], [x22], #0x2\n" + "ld1 { v26.h }[0], [x21], #0x2\n" + "ld1 { v25.h }[0], [x20], #0x2\n" + "ld1 { v24.h }[0], [x19], #0x2\n" + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[2], [x26]\n" + "ld1 { v30.b }[2], [x25]\n" + "ld1 { v29.b }[2], [x24]\n" + "ld1 { v28.b }[2], [x23]\n" + "ld1 { v27.b }[2], [x22]\n" + "ld1 { v26.b }[2], [x21]\n" + "ld1 { v25.b }[2], [x20]\n" + "ld1 { v24.b }[2], [x19]\n" + "b 11f\n" + "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 11f\n" + "ld1 { v31.b }[0], [x26]\n" + "ld1 { v30.b }[0], [x25]\n" + "ld1 { v29.b }[0], [x24]\n" + "ld1 { v28.b }[0], [x23]\n" + "ld1 { v27.b }[0], [x22]\n" + "ld1 { v26.b }[0], [x21]\n" + "ld1 { v25.b }[0], [x20]\n" + "ld1 { v24.b }[0], [x19]\n" + "11:" // Oddments: Initial loads: Bit 2: End + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v15.4s, v31.4h, v8.4h\n" + "smlal2 v18.4s, v31.8h, v8.8h\n" + "ldr x24, [x13, #0x40]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v15.4s, v30.4h, v0.4h\n" + "smlal2 v18.4s, v30.8h, v0.8h\n" + "add x24, x24, x15\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v13.4s, v31.4h, v6.4h\n" + "smlal2 v17.4s, v31.8h, v6.8h\n" + "smlal v15.4s, v29.4h, v1.4h\n" + "smlal2 v18.4s, v29.8h, v1.8h\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v13.4s, v28.4h, v1.4h\n" + "smlal2 v17.4s, v28.8h, v1.8h\n" + "smlal v15.4s, v26.4h, v3.4h\n" + "smlal2 v18.4s, v26.8h, v3.8h\n" + "ushll v27.8h, v27.8b, #0x0\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v13.4s, v27.4h, v2.4h\n" + "smlal2 v17.4s, v27.8h, v2.8h\n" + "smlal v15.4s, v25.4h, v4.4h\n" + "smlal2 v18.4s, v25.8h, v4.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal v11.4s, v31.4h, v2.4h\n" + "smlal2 v10.4s, v31.8h, v2.8h\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal2 v9.4s, v31.8h, v0.8h\n" + "smlal v15.4s, v24.4h, v2.4h\n" + "smlal2 v18.4s, v24.8h, v2.8h\n" + "smlal v13.4s, v24.4h, v0.4h\n" + "smlal2 v17.4s, v24.8h, v0.8h\n" + "tbz x8, #2, 13f\n" + "ld1 { v29.s }[0], [x24], #0x4\n" + "tbz x8, #1, 12f\n" + "ld1 { v29.h }[2], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[6], [x24]\n" + "b 15f\n" + "12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[4], [x24]\n" + "b 15f\n" + "13:" // Oddments: Load (1, 3): Bit 2: Unset + "tbz x8, #1, 14f\n" + "ld1 { v29.h }[0], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[2], [x24]\n" + "b 15f\n" + "14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[0], [x24]\n" + "15:" // Oddments: Load (1, 3): Bit 2: End + "ushll v29.8h, v29.8b, #0x0\n" + "ldr x23, [x13, #0x48]\n" + "smlal v13.4s, v29.4h, v4.4h\n" + "smlal2 v17.4s, v29.8h, v4.8h\n" + "add x23, x23, x15\n" + "tbz x8, #2, 17f\n" + "ld1 { v28.s }[0], [x23], #0x4\n" + "tbz x8, #1, 16f\n" + "ld1 { v28.h }[2], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[6], [x23]\n" + "b 19f\n" + "16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[4], [x23]\n" + "b 19f\n" + "17:" // Oddments: Load (1, 4): Bit 2: Unset + "tbz x8, #1, 18f\n" + "ld1 { v28.h }[0], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[2], [x23]\n" + "b 19f\n" + "18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[0], [x23]\n" + "19:" // Oddments: Load (1, 4): Bit 2: End + "ushll v28.8h, v28.8b, #0x0\n" + "ldr x21, [x13, #0x50]\n" + "smlal v13.4s, v28.4h, v5.4h\n" + "smlal2 v17.4s, v28.8h, v5.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 21f\n" + "ld1 { v27.s }[0], [x21], #0x4\n" + "tbz x8, #1, 20f\n" + "ld1 { v27.h }[2], [x21], #0x2\n" + "tbz x8, #0, 23f\n" + "ld1 { v27.b }[6], [x21]\n" + "b 23f\n" + "20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset + "tbz x8, #0, 23f\n" + "ld1 { v27.b }[4], [x21]\n" + "b 23f\n" + "21:" // Oddments: Load (1, 2): Bit 2: Unset + "tbz x8, #1, 22f\n" + "ld1 { v27.h }[0], [x21], #0x2\n" + "tbz x8, #0, 23f\n" + "ld1 { v27.b }[2], [x21]\n" + "b 23f\n" + "22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 23f\n" + "ld1 { v27.b }[0], [x21]\n" + "23:" // Oddments: Load (1, 2): Bit 2: End + "ushll v27.8h, v27.8b, #0x0\n" + "ldr x19, [x13, #0x58]\n" + "smlal v15.4s, v27.4h, v5.4h\n" + "smlal2 v18.4s, v27.8h, v5.8h\n" + "smlal v13.4s, v27.4h, v3.4h\n" + "smlal2 v17.4s, v27.8h, v3.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 25f\n" + "ld1 { v26.s }[0], [x19], #0x4\n" + "tbz x8, #1, 24f\n" + "ld1 { v26.h }[2], [x19], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[6], [x19]\n" + "b 27f\n" + "24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[4], [x19]\n" + "b 27f\n" + "25:" // Oddments: Load (3, 0): Bit 2: Unset + "tbz x8, #1, 26f\n" + "ld1 { v26.h }[0], [x19], #0x2\n" + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[2], [x19]\n" + "b 27f\n" + "26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 27f\n" + "ld1 { v26.b }[0], [x19]\n" + "27:" // Oddments: Load (3, 0): Bit 2: End + "ushll v26.8h, v26.8b, #0x0\n" + "ldr x20, [x13, #0x60]\n" + "smlal v11.4s, v26.4h, v3.4h\n" + "smlal2 v10.4s, v26.8h, v3.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 29f\n" + "ld1 { v25.s }[0], [x20], #0x4\n" + "tbz x8, #1, 28f\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[6], [x20]\n" + "b 31f\n" + "28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[4], [x20]\n" + "b 31f\n" + "29:" // Oddments: Load (2, 0): Bit 2: Unset + "tbz x8, #1, 30f\n" + "ld1 { v25.h }[0], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[2], [x20]\n" + "b 31f\n" + "30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[0], [x20]\n" + "31:" // Oddments: Load (2, 0): Bit 2: End + "ushll v25.8h, v25.8b, #0x0\n" + "ldr x19, [x13, #0x68]\n" + "smlal v15.4s, v25.4h, v6.4h\n" + "smlal2 v18.4s, v25.8h, v6.8h\n" + "smlal v11.4s, v25.4h, v0.4h\n" + "smlal2 v10.4s, v25.8h, v0.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 33f\n" + "ld1 { v29.s }[0], [x19], #0x4\n" + "tbz x8, #1, 32f\n" + "ld1 { v29.h }[2], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[6], [x19]\n" + "b 35f\n" + "32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[4], [x19]\n" + "b 35f\n" + "33:" // Oddments: Load (3, 1): Bit 2: Unset + "tbz x8, #1, 34f\n" + "ld1 { v29.h }[0], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[2], [x19]\n" + "b 35f\n" + "34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[0], [x19]\n" + "35:" // Oddments: Load (3, 1): Bit 2: End + "ushll v29.8h, v29.8b, #0x0\n" + "ldr x19, [x13, #0x70]\n" + "smlal v11.4s, v29.4h, v4.4h\n" + "smlal2 v10.4s, v29.8h, v4.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 37f\n" + "ld1 { v24.s }[0], [x19], #0x4\n" + "tbz x8, #1, 36f\n" + "ld1 { v24.h }[2], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[6], [x19]\n" + "b 39f\n" + "36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[4], [x19]\n" + "b 39f\n" + "37:" // Oddments: Load (2, 1): Bit 2: Unset + "tbz x8, #1, 38f\n" + "ld1 { v24.h }[0], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[2], [x19]\n" + "b 39f\n" + "38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[0], [x19]\n" + "39:" // Oddments: Load (2, 1): Bit 2: End + "ushll v24.8h, v24.8b, #0x0\n" + "ldr x22, [x13, #0x78]\n" + "smlal v15.4s, v24.4h, v7.4h\n" + "smlal2 v18.4s, v24.8h, v7.8h\n" + "smlal v11.4s, v24.4h, v1.4h\n" + "smlal2 v10.4s, v24.8h, v1.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 41f\n" + "ld1 { v27.s }[0], [x22], #0x4\n" + "tbz x8, #1, 40f\n" + "ld1 { v27.h }[2], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[6], [x22]\n" + "b 43f\n" + "40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[4], [x22]\n" + "b 43f\n" + "41:" // Oddments: Load (3, 3): Bit 2: Unset + "tbz x8, #1, 42f\n" + "ld1 { v27.h }[0], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[2], [x22]\n" + "b 43f\n" + "42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[0], [x22]\n" + "43:" // Oddments: Load (3, 3): Bit 2: End + "ushll v27.8h, v27.8b, #0x0\n" + "ldr x21, [x13, #0x80]\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal2 v9.4s, v27.8h, v4.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 45f\n" + "ld1 { v28.s }[0], [x21], #0x4\n" + "tbz x8, #1, 44f\n" + "ld1 { v28.h }[2], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[6], [x21]\n" + "b 47f\n" + "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[4], [x21]\n" + "b 47f\n" + "45:" // Oddments: Load (2, 3): Bit 2: Unset + "tbz x8, #1, 46f\n" + "ld1 { v28.h }[0], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[2], [x21]\n" + "b 47f\n" + "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[0], [x21]\n" + "47:" // Oddments: Load (2, 3): Bit 2: End + "ushll v28.8h, v28.8b, #0x0\n" + "ldr x20, [x13, #0x88]\n" + "smlal v13.4s, v28.4h, v7.4h\n" + "smlal2 v17.4s, v28.8h, v7.8h\n" + "smlal v23.4s, v28.4h, v1.4h\n" + "smlal2 v9.4s, v28.8h, v1.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 49f\n" + "ld1 { v26.s }[0], [x20], #0x4\n" + "tbz x8, #1, 48f\n" + "ld1 { v26.h }[2], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[6], [x20]\n" + "b 51f\n" + "48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[4], [x20]\n" + "b 51f\n" + "49:" // Oddments: Load (3, 4): Bit 2: Unset + "tbz x8, #1, 50f\n" + "ld1 { v26.h }[0], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[2], [x20]\n" + "b 51f\n" + "50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[0], [x20]\n" + "51:" // Oddments: Load (3, 4): Bit 2: End + "ushll v26.8h, v26.8b, #0x0\n" + "ldr x23, [x13, #0x90]\n" + "smlal v23.4s, v26.4h, v5.4h\n" + "smlal2 v9.4s, v26.8h, v5.8h\n" + "add x23, x23, x15\n" + "tbz x8, #2, 53f\n" + "ld1 { v25.s }[0], [x23], #0x4\n" + "tbz x8, #1, 52f\n" + "ld1 { v25.h }[2], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[6], [x23]\n" + "b 55f\n" + "52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[4], [x23]\n" + "b 55f\n" + "53:" // Oddments: Load (4, 0): Bit 2: Unset + "tbz x8, #1, 54f\n" + "ld1 { v25.h }[0], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[2], [x23]\n" + "b 55f\n" + "54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[0], [x23]\n" + "55:" // Oddments: Load (4, 0): Bit 2: End + "ushll v25.8h, v25.8b, #0x0\n" + "ldr x24, [x13, #0x98]\n" + "smlal v11.4s, v25.4h, v6.4h\n" + "smlal2 v10.4s, v25.8h, v6.8h\n" + "add x24, x24, x15\n" + "tbz x8, #2, 57f\n" + "ld1 { v29.s }[0], [x24], #0x4\n" + "tbz x8, #1, 56f\n" + "ld1 { v29.h }[2], [x24], #0x2\n" + "tbz x8, #0, 59f\n" + "ld1 { v29.b }[6], [x24]\n" + "b 59f\n" + "56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset + "tbz x8, #0, 59f\n" + "ld1 { v29.b }[4], [x24]\n" + "b 59f\n" + "57:" // Oddments: Load (2, 4): Bit 2: Unset + "tbz x8, #1, 58f\n" + "ld1 { v29.h }[0], [x24], #0x2\n" + "tbz x8, #0, 59f\n" + "ld1 { v29.b }[2], [x24]\n" + "b 59f\n" + "58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 59f\n" + "ld1 { v29.b }[0], [x24]\n" + "59:" // Oddments: Load (2, 4): Bit 2: End + "ushll v29.8h, v29.8b, #0x0\n" + "ldr x19, [x13, #0xa0]\n" + "smlal v13.4s, v29.4h, v8.4h\n" + "smlal2 v17.4s, v29.8h, v8.8h\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "smlal2 v9.4s, v29.8h, v2.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 61f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x8, #1, 60f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[6], [x19]\n" + "b 63f\n" + "60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[4], [x19]\n" + "b 63f\n" + "61:" // Oddments: Load (4, 1): Bit 2: Unset + "tbz x8, #1, 62f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[2], [x19]\n" + "b 63f\n" + "62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[0], [x19]\n" + "63:" // Oddments: Load (4, 1): Bit 2: End + "ushll v27.8h, v27.8b, #0x0\n" + "ldr x22, [x13, #0xa8]\n" + "smlal v11.4s, v27.4h, v7.4h\n" + "smlal2 v10.4s, v27.8h, v7.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 65f\n" + "ld1 { v24.s }[0], [x22], #0x4\n" + "tbz x8, #1, 64f\n" + "ld1 { v24.h }[2], [x22], #0x2\n" + "tbz x8, #0, 67f\n" + "ld1 { v24.b }[6], [x22]\n" + "b 67f\n" + "64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset + "tbz x8, #0, 67f\n" + "ld1 { v24.b }[4], [x22]\n" + "b 67f\n" + "65:" // Oddments: Load (3, 2): Bit 2: Unset + "tbz x8, #1, 66f\n" + "ld1 { v24.h }[0], [x22], #0x2\n" + "tbz x8, #0, 67f\n" + "ld1 { v24.b }[2], [x22]\n" + "b 67f\n" + "66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 67f\n" + "ld1 { v24.b }[0], [x22]\n" + "67:" // Oddments: Load (3, 2): Bit 2: End + "ushll v24.8h, v24.8b, #0x0\n" + "ldr x21, [x13, #0xb0]\n" + "smlal v11.4s, v24.4h, v5.4h\n" + "smlal2 v10.4s, v24.8h, v5.8h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "smlal2 v9.4s, v24.8h, v3.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 69f\n" + "ld1 { v26.s }[0], [x21], #0x4\n" + "tbz x8, #1, 68f\n" + "ld1 { v26.h }[2], [x21], #0x2\n" + "tbz x8, #0, 71f\n" + "ld1 { v26.b }[6], [x21]\n" + "b 71f\n" + "68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset + "tbz x8, #0, 71f\n" + "ld1 { v26.b }[4], [x21]\n" + "b 71f\n" + "69:" // Oddments: Load (4, 3): Bit 2: Unset + "tbz x8, #1, 70f\n" + "ld1 { v26.h }[0], [x21], #0x2\n" + "tbz x8, #0, 71f\n" + "ld1 { v26.b }[2], [x21]\n" + "b 71f\n" + "70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 71f\n" + "ld1 { v26.b }[0], [x21]\n" + "71:" // Oddments: Load (4, 3): Bit 2: End + "ushll v26.8h, v26.8b, #0x0\n" + "ldr x20, [x13, #0xb8]\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "smlal2 v9.4s, v26.8h, v7.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 73f\n" + "ld1 { v25.s }[0], [x20], #0x4\n" + "tbz x8, #1, 72f\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "tbz x8, #0, 75f\n" + "ld1 { v25.b }[6], [x20]\n" + "b 75f\n" + "72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset + "tbz x8, #0, 75f\n" + "ld1 { v25.b }[4], [x20]\n" + "b 75f\n" + "73:" // Oddments: Load (4, 2): Bit 2: Unset + "tbz x8, #1, 74f\n" + "ld1 { v25.h }[0], [x20], #0x2\n" + "tbz x8, #0, 75f\n" + "ld1 { v25.b }[2], [x20]\n" + "b 75f\n" + "74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 75f\n" + "ld1 { v25.b }[0], [x20]\n" + "75:" // Oddments: Load (4, 2): Bit 2: End + "ushll v25.8h, v25.8b, #0x0\n" + "ldr x19, [x13, #0xc0]\n" + "smlal v11.4s, v25.4h, v8.4h\n" + "smlal2 v10.4s, v25.8h, v8.8h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "smlal2 v9.4s, v25.8h, v6.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 77f\n" + "ld1 { v29.s }[0], [x19], #0x4\n" + "tbz x8, #1, 76f\n" + "ld1 { v29.h }[2], [x19], #0x2\n" + "tbz x8, #0, 79f\n" + "ld1 { v29.b }[6], [x19]\n" + "b 79f\n" + "76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset + "tbz x8, #0, 79f\n" + "ld1 { v29.b }[4], [x19]\n" + "b 79f\n" + "77:" // Oddments: Load (4, 4): Bit 2: Unset + "tbz x8, #1, 78f\n" + "ld1 { v29.h }[0], [x19], #0x2\n" + "tbz x8, #0, 79f\n" + "ld1 { v29.b }[2], [x19]\n" + "b 79f\n" + "78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 79f\n" + "ld1 { v29.b }[0], [x19]\n" + "79:" // Oddments: Load (4, 4): Bit 2: End + "ushll v29.8h, v29.8b, #0x0\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "smlal2 v9.4s, v29.8h, v8.8h\n" + "tbz x8, #2, 81f\n" + "ld1 { v22.4s }, [x12], #0x10\n" + "ld1 { v31.4s }, [x11], #0x10\n" + "tbz x8, #1, 80f\n" + "ld1 { v19.d }[0], [x12], #0x8\n" + "ld1 { v30.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[2], [x12]\n" + "ld1 { v30.s }[2], [x11]\n" + "b 83f\n" + "80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[0], [x12]\n" + "ld1 { v30.s }[0], [x11]\n" + "b 83f\n" + "81:" // Oddments: Load requant params: Bit 2: Unset + "tbz x8, #1, 82f\n" + "ld1 { v22.d }[0], [x12], #0x8\n" + "ld1 { v31.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v22.s }[2], [x12]\n" + "ld1 { v31.s }[2], [x11]\n" + "b 83f\n" + "82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 83f\n" + "ld1 { v22.s }[0], [x12]\n" + "ld1 { v31.s }[0], [x11]\n" + "83:" // Oddments: Load requant params: Bit 2: End + "sqdmulh v15.4s, v15.4s, v22.4s\n" + "sqdmulh v13.4s, v13.4s, v22.4s\n" + "add x10, x10, x14\n" + "add x9, x9, x14\n" + "sqdmulh v11.4s, v11.4s, v22.4s\n" + "sqdmulh v23.4s, v23.4s, v22.4s\n" + "add x28, x28, x14\n" + "add x27, x27, x14\n" + "and v4.16b, v15.16b, v31.16b\n" + "sqdmulh v18.4s, v18.4s, v19.4s\n" + "and v1.16b, v13.16b, v31.16b\n" + "sqdmulh v17.4s, v17.4s, v19.4s\n" + "and v22.16b, v11.16b, v31.16b\n" + "sqdmulh v10.4s, v10.4s, v19.4s\n" + "and v20.16b, v23.16b, v31.16b\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v19.16b, v18.16b, v30.16b\n" + "sshr v1.4s, v1.4s, #0x1f\n" + "and v27.16b, v17.16b, v30.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v25.16b, v10.16b, v30.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v0.16b, v9.16b, v30.16b\n" + "sqadd v15.4s, v15.4s, v4.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v13.4s, v13.4s, v1.4s\n" + "sshr v27.4s, v27.4s, #0x1f\n" + "sqadd v11.4s, v11.4s, v22.4s\n" + "sshr v25.4s, v25.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" + "sshr v0.4s, v0.4s, #0x1f\n" + "srshl v15.4s, v15.4s, v31.4s\n" + "sqadd v18.4s, v18.4s, v19.4s\n" + "srshl v13.4s, v13.4s, v31.4s\n" + "sqadd v17.4s, v17.4s, v27.4s\n" + "srshl v11.4s, v11.4s, v31.4s\n" + "sqadd v10.4s, v10.4s, v25.4s\n" + "srshl v23.4s, v23.4s, v31.4s\n" + "sqadd v9.4s, v9.4s, v0.4s\n" + "srshl v18.4s, v18.4s, v30.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v17.4s, v17.4s, v30.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v10.4s, v10.4s, v30.4s\n" + "sqxtn v11.4h, v11.4s\n" + "srshl v9.4s, v9.4s, v30.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v18.4s\n" + "sqxtn2 v13.8h, v17.4s\n" + "sqxtn2 v11.8h, v10.4s\n" + "sqxtn2 v23.8h, v9.4s\n" + "sqadd v15.8h, v15.8h, v12.8h\n" + "sqadd v13.8h, v13.8h, v12.8h\n" + "sqadd v11.8h, v11.8h, v12.8h\n" + "sqadd v23.8h, v23.8h, v12.8h\n" + "smax v15.8h, v15.8h, v14.8h\n" + "smax v13.8h, v13.8h, v14.8h\n" + "smax v11.8h, v11.8h, v14.8h\n" + "smax v23.8h, v23.8h, v14.8h\n" + "smin v15.8h, v15.8h, v21.8h\n" + "smin v13.8h, v13.8h, v21.8h\n" + "smin v11.8h, v11.8h, v21.8h\n" + "smin v23.8h, v23.8h, v21.8h\n" + "uzp1 v15.16b, v15.16b, v15.16b\n" + "uzp1 v13.16b, v13.16b, v13.16b\n" + "uzp1 v11.16b, v11.16b, v11.16b\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "tbz x8, #2, 85f\n" + "st1 { v15.s }[0], [x10], #0x4\n" + "st1 { v13.s }[0], [x9], #0x4\n" + "st1 { v11.s }[0], [x28], #0x4\n" + "st1 { v23.s }[0], [x27], #0x4\n" + "tbz x8, #1, 84f\n" + "st1 { v15.h }[2], [x10], #0x2\n" + "st1 { v13.h }[2], [x9], #0x2\n" + "st1 { v11.h }[2], [x28], #0x2\n" + "st1 { v23.h }[2], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[6], [x10], #0x1\n" + "st1 { v13.b }[6], [x9], #0x1\n" + "st1 { v11.b }[6], [x28], #0x1\n" + "st1 { v23.b }[6], [x27], #0x1\n" + "b 87f\n" + "84:" // Oddments: Bit 2: Bit 1: Unset + "tbz x8, #0, 87f\n" + "st1 { v15.b }[4], [x10], #0x1\n" + "st1 { v13.b }[4], [x9], #0x1\n" + "st1 { v11.b }[4], [x28], #0x1\n" + "st1 { v23.b }[4], [x27], #0x1\n" + "b 87f\n" + "85:" // Oddments: Bit 2: Unset + "tbz x8, #1, 86f\n" + "st1 { v15.h }[0], [x10], #0x2\n" + "st1 { v13.h }[0], [x9], #0x2\n" + "st1 { v11.h }[0], [x28], #0x2\n" + "st1 { v23.h }[0], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[2], [x10], #0x1\n" + "st1 { v13.b }[2], [x9], #0x1\n" + "st1 { v11.b }[2], [x28], #0x1\n" + "st1 { v23.b }[2], [x27], #0x1\n" + "b 87f\n" + "86:" // Oddments: Bit 2: Unset: Bit 1: Unset + "tbz x8, #0, 87f\n" + "st1 { v15.b }[0], [x10], #0x1\n" + "st1 { v13.b }[0], [x9], #0x1\n" + "st1 { v11.b }[0], [x28], #0x1\n" + "st1 { v23.b }[0], [x27], #0x1\n" + "87:" // Oddments: Bit 2: End + "88:" // End + : + : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); +} + +} // namespace depthwise +} // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp new file mode 100644 index 0000000000..482d1af80c --- /dev/null +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "src/core/NEON/kernels/arm_gemm/utils.hpp" + +#include "src/core/NEON/kernels/arm_conv/depthwise/interleaves/list.hpp" + +#include <cstdint> + +#pragma once + +#if defined(__aarch64__) + +namespace arm_conv { +namespace depthwise { + +void a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); + +class a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> +{ + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + + public: + constexpr static unsigned int kernel_rows = 5; + constexpr static unsigned int kernel_cols = 5; + + constexpr static unsigned int stride_rows = 1; + constexpr static unsigned int stride_cols = 1; + + a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 5, 5, 1, 1) {} + + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } + + Parent::KernelType kernel = a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } +}; + +} // namespace depthwise +} // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp new file mode 100644 index 0000000000..7f1fd1d568 --- /dev/null +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp @@ -0,0 +1,2185 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "arm_gemm.hpp" + +#include <cstddef> +#include <cstdint> + +#if defined(__aarch64__) + +namespace arm_conv { +namespace depthwise { + +void a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( + const unsigned int n_channels, + const uint8_t *const *const inptrs, + const uint8_t *const weights, + const int32_t *const bias, + const arm_gemm::Requantize32 &qp, + const int32_t *const requant_muls, + const int32_t *const requant_shifts, + uint8_t *const *const outptrs +) +{ + struct Params + { + long unsigned int n_channels; + const void *weights; + const int32_t *bias; + const arm_gemm::Requantize32 *requant; + const int32_t *const requant_muls; + const int32_t *const requant_shifts; + uint8_t *const *const outptrs; + const uint8_t *inptrs[36]; + + Params( + long unsigned int n_channels, + const uint8_t *const *inptrs_raw, + const void *const weights, + const int32_t *const bias, + const arm_gemm::Requantize32 &qp, + const int32_t *const requant_muls, + const int32_t *const requant_shifts, + uint8_t *const *outptrs + ) : n_channels(n_channels), weights(weights), bias(bias), + requant(&qp), requant_muls(requant_muls), + requant_shifts(requant_shifts), outptrs(outptrs) + { + inptrs[0] = inptrs_raw[0]; + inptrs[1] = inptrs_raw[1]; + inptrs[2] = inptrs_raw[6]; + inptrs[3] = inptrs_raw[7]; + inptrs[4] = inptrs_raw[2]; + inptrs[5] = inptrs_raw[8]; + inptrs[6] = inptrs_raw[3]; + inptrs[7] = inptrs_raw[4]; + inptrs[8] = inptrs_raw[11]; + inptrs[9] = inptrs_raw[12]; + inptrs[10] = inptrs_raw[9]; + inptrs[11] = inptrs_raw[10]; + inptrs[12] = inptrs_raw[5]; + inptrs[13] = inptrs_raw[13]; + inptrs[14] = inptrs_raw[14]; + inptrs[15] = inptrs_raw[15]; + inptrs[16] = inptrs_raw[16]; + inptrs[17] = inptrs_raw[17]; + inptrs[18] = inptrs_raw[18]; + inptrs[19] = inptrs_raw[19]; + inptrs[20] = inptrs_raw[20]; + inptrs[21] = inptrs_raw[21]; + inptrs[22] = inptrs_raw[22]; + inptrs[23] = inptrs_raw[23]; + inptrs[24] = inptrs_raw[24]; + inptrs[25] = inptrs_raw[25]; + inptrs[26] = inptrs_raw[26]; + inptrs[27] = inptrs_raw[27]; + inptrs[28] = inptrs_raw[28]; + inptrs[29] = inptrs_raw[29]; + inptrs[30] = inptrs_raw[30]; + inptrs[31] = inptrs_raw[31]; + inptrs[32] = inptrs_raw[32]; + inptrs[33] = inptrs_raw[33]; + inptrs[34] = inptrs_raw[34]; + inptrs[35] = inptrs_raw[35]; + + } + }; + + const Params params(n_channels, inptrs, weights, bias, qp, + requant_muls, requant_shifts, outptrs); + + __asm__ __volatile__( + "ldr x16, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x4, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x9, x16, %[offsetof_Requantize32_b_offset]\n" + "add x19, x16, %[offsetof_Requantize32_c_offset]\n" + "ldr x10, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x24, x16, %[offsetof_Requantize32_minval]\n" + "add x2, x16, %[offsetof_Requantize32_maxval]\n" + "ldr x8, [%x[params], %[offsetof_Params_weights]]\n" + "ld1r { v15.16b }, [x9]\n" + "ld1r { v16.8h }, [x19]\n" + "lsr x3, x4, #0x3\n" + "mov x1, #0x0\n" + "ld1r { v12.8h }, [x24]\n" + "ld1r { v13.8h }, [x2]\n" + "mov x2, #0x0\n" + "add x0, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x5, [%x[params], %[offsetof_Params_requant_muls]]\n" + "ldr x6, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x21, x15, [x10, #0x0]\n" + "ldp x17, x16, [x10, #0x10]\n" + "cbz x3, 3f\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q11, [x19, #0x0]\n" + "subs x3, x3, #0x1\n" + "mov v14.16b, v11.16b\n" + "ldr q21, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x8, #0x0]\n" + "ldr d1, [x8, #0x8]\n" + "ldr d2, [x8, #0x10]\n" + "mov v10.16b, v21.16b\n" + "mov v9.16b, v11.16b\n" + "ldr d3, [x8, #0x18]\n" + "ldr d4, [x8, #0x20]\n" + "mov v8.16b, v21.16b\n" + "mov v7.16b, v11.16b\n" + "ldp x28, x27, [x0, #0x0]\n" + "ldp x10, x26, [x0, #0x10]\n" + "mov v6.16b, v21.16b\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "ldp x24, x23, [x0, #0x20]\n" + "ldp x22, x25, [x0, #0x30]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "ldp x20, x19, [x0, #0x40]\n" + "ldr d31, [x28, x1]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "ldr d30, [x27, x1]\n" + "ldr d29, [x10, x1]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ldr d28, [x26, x1]\n" + "ldr d27, [x24, x1]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ldr d23, [x23, x1]\n" + "ldr d25, [x22, x1]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "ushll v23.8h, v23.8b, #0x0\n" + "ldr d24, [x25, x1]\n" + "ldr d26, [x20, x1]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "ushll v24.8h, v24.8b, #0x0\n" + "ldr d22, [x19, x1]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "ushll v22.8h, v22.8b, #0x0\n" + "beq 2f\n" + "1:" // Loop + "smlal v11.4s, v31.4h, v0.4h\n" + "smlal2 v21.4s, v31.8h, v0.8h\n" + "ldr x19, [x0, #0x50]\n" + "ldr d31, [x19, x1]\n" + "smlal v14.4s, v30.4h, v0.4h\n" + "smlal v9.4s, v29.4h, v0.4h\n" + "ldr x20, [x0, #0x58]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v7.4s, v28.4h, v0.4h\n" + "smlal2 v10.4s, v30.8h, v0.8h\n" + "ldr x19, [x0, #0x60]\n" + "ldr x24, [x0, #0x68]\n" + "smlal2 v8.4s, v29.8h, v0.8h\n" + "smlal v11.4s, v30.4h, v1.4h\n" + "ldr x23, [x0, #0x70]\n" + "ldr x26, [x0, #0x78]\n" + "smlal2 v21.4s, v30.8h, v1.8h\n" + "smlal2 v6.4s, v28.8h, v0.8h\n" + "ldr d30, [x20, x1]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v14.4s, v27.4h, v1.4h\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "ldr d0, [x8, #0x28]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "smlal v7.4s, v23.4h, v1.4h\n" + "smlal2 v10.4s, v27.8h, v1.8h\n" + "ldr x7, [x0, #0x80]\n" + "ldr x22, [x0, #0x88]\n" + "smlal2 v8.4s, v28.8h, v1.8h\n" + "smlal v11.4s, v27.4h, v2.4h\n" + "ldr x20, [x0, #0x90]\n" + "ldr x14, [x0, #0x98]\n" + "smlal2 v21.4s, v27.8h, v2.8h\n" + "smlal2 v6.4s, v23.8h, v1.8h\n" + "ldr d27, [x19, x1]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v14.4s, v25.4h, v2.4h\n" + "smlal v9.4s, v23.4h, v2.4h\n" + "ldr d1, [x8, #0x30]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "smlal v7.4s, v31.4h, v2.4h\n" + "smlal2 v10.4s, v25.8h, v2.8h\n" + "ldr x19, [x0, #0xa0]\n" + "ldr x13, [x0, #0xa8]\n" + "smlal2 v8.4s, v23.8h, v2.8h\n" + "smlal v11.4s, v25.4h, v3.4h\n" + "ldr x12, [x0, #0xb0]\n" + "ldr x11, [x0, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v3.8h\n" + "smlal2 v6.4s, v31.8h, v2.8h\n" + "ldr d25, [x24, x1]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v14.4s, v24.4h, v3.4h\n" + "smlal v9.4s, v31.4h, v3.4h\n" + "ldr d2, [x8, #0x38]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "smlal v7.4s, v30.4h, v3.4h\n" + "smlal2 v10.4s, v24.8h, v3.8h\n" + "ldr x10, [x0, #0xc0]\n" + "ldr x9, [x0, #0xc8]\n" + "smlal2 v8.4s, v31.8h, v3.8h\n" + "smlal v11.4s, v24.4h, v4.4h\n" + "ldr x28, [x0, #0xd0]\n" + "ldr x27, [x0, #0xd8]\n" + "smlal2 v21.4s, v24.8h, v4.8h\n" + "smlal2 v6.4s, v30.8h, v3.8h\n" + "ldr d24, [x23, x1]\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal v14.4s, v27.4h, v4.4h\n" + "smlal v9.4s, v30.4h, v4.4h\n" + "ldr d3, [x8, #0x40]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "smlal v7.4s, v26.4h, v4.4h\n" + "smlal2 v10.4s, v27.8h, v4.8h\n" + "ldr d27, [x26, x1]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal2 v8.4s, v30.8h, v4.8h\n" + "smlal v11.4s, v29.4h, v0.4h\n" + "ldr x26, [x0, #0xe0]\n" + "ldr x25, [x0, #0xe8]\n" + "smlal2 v21.4s, v29.8h, v0.8h\n" + "smlal2 v6.4s, v26.8h, v4.8h\n" + "ldr d4, [x8, #0x48]\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "smlal v14.4s, v28.4h, v0.4h\n" + "smlal v9.4s, v22.4h, v0.4h\n" + "ldr x24, [x0, #0xf0]\n" + "ldr q17, [x5, #0x0]\n" + "smlal v7.4s, v25.4h, v0.4h\n" + "smlal2 v10.4s, v28.8h, v0.8h\n" + "ldr q5, [x6, #0x0]\n" + "ldr q18, [x5, #0x10]\n" + "smlal2 v8.4s, v22.8h, v0.8h\n" + "smlal v11.4s, v28.4h, v1.4h\n" + "ldr q29, [x6, #0x10]\n" + "subs x3, x3, #0x1\n" + "smlal2 v21.4s, v28.8h, v1.8h\n" + "smlal2 v6.4s, v25.8h, v0.8h\n" + "ldr d28, [x22, x1]\n" + "ldr d0, [x8, #0x50]\n" + "smlal v14.4s, v23.4h, v1.4h\n" + "smlal v9.4s, v25.4h, v1.4h\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ldr x23, [x0, #0xf8]\n" + "smlal v7.4s, v24.4h, v1.4h\n" + "smlal2 v10.4s, v23.8h, v1.8h\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "add x5, x5, #0x20\n" + "smlal2 v8.4s, v25.8h, v1.8h\n" + "smlal v11.4s, v23.4h, v2.4h\n" + "add x6, x6, #0x20\n" + "smlal2 v21.4s, v23.8h, v2.8h\n" + "ldr d23, [x7, x1]\n" + "smlal2 v6.4s, v24.8h, v1.8h\n" + "ushll v23.8h, v23.8b, #0x0\n" + "smlal v14.4s, v31.4h, v2.4h\n" + "smlal v9.4s, v24.4h, v2.4h\n" + "ldr d1, [x8, #0x58]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "smlal v7.4s, v27.4h, v2.4h\n" + "smlal2 v10.4s, v31.8h, v2.8h\n" + "ldr x22, [x0, #0x100]\n" + "smlal2 v8.4s, v24.8h, v2.8h\n" + "smlal v11.4s, v31.4h, v3.4h\n" + "smlal2 v21.4s, v31.8h, v3.8h\n" + "smlal2 v6.4s, v27.8h, v2.8h\n" + "ldr d31, [x20, x1]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v14.4s, v30.4h, v3.4h\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "ldr d2, [x8, #0x60]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "smlal v7.4s, v23.4h, v3.4h\n" + "smlal2 v10.4s, v30.8h, v3.8h\n" + "ldr x7, [x0, #0x108]\n" + "smlal2 v8.4s, v27.8h, v3.8h\n" + "smlal v11.4s, v30.4h, v4.4h\n" + "smlal2 v21.4s, v30.8h, v4.8h\n" + "ldr d30, [x14, x1]\n" + "smlal2 v6.4s, v23.8h, v3.8h\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v14.4s, v26.4h, v4.4h\n" + "smlal v9.4s, v23.4h, v4.4h\n" + "ldr d3, [x8, #0x68]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "smlal v7.4s, v28.4h, v4.4h\n" + "smlal2 v10.4s, v26.8h, v4.8h\n" + "ldr d26, [x19, x1]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal2 v8.4s, v23.8h, v4.8h\n" + "smlal v11.4s, v22.4h, v0.4h\n" + "ldr x20, [x0, #0x110]\n" + "ldr x19, [x0, #0x118]\n" + "smlal2 v21.4s, v22.8h, v0.8h\n" + "smlal2 v6.4s, v28.8h, v4.8h\n" + "ldr d4, [x8, #0x70]\n" + "ldr d22, [x11, x1]\n" + "smlal v14.4s, v25.4h, v0.4h\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "smlal v7.4s, v30.4h, v0.4h\n" + "smlal2 v10.4s, v25.8h, v0.8h\n" + "ushll v22.8h, v22.8b, #0x0\n" + "smlal2 v8.4s, v31.8h, v0.8h\n" + "smlal v11.4s, v25.4h, v1.4h\n" + "smlal2 v21.4s, v25.8h, v1.8h\n" + "ldr d25, [x13, x1]\n" + "smlal2 v6.4s, v30.8h, v0.8h\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v14.4s, v24.4h, v1.4h\n" + "smlal v9.4s, v30.4h, v1.4h\n" + "ldr d0, [x8, #0x78]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "smlal v7.4s, v26.4h, v1.4h\n" + "smlal2 v10.4s, v24.8h, v1.8h\n" + "smlal2 v8.4s, v30.8h, v1.8h\n" + "smlal v11.4s, v24.4h, v2.4h\n" + "smlal2 v21.4s, v24.8h, v2.8h\n" + "ldr d24, [x12, x1]\n" + "smlal2 v6.4s, v26.8h, v1.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal v14.4s, v27.4h, v2.4h\n" + "smlal v9.4s, v26.4h, v2.4h\n" + "ldr d1, [x8, #0x80]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "smlal v7.4s, v25.4h, v2.4h\n" + "smlal2 v10.4s, v27.8h, v2.8h\n" + "smlal2 v8.4s, v26.8h, v2.8h\n" + "smlal v11.4s, v27.4h, v3.4h\n" + "smlal2 v21.4s, v27.8h, v3.8h\n" + "smlal2 v6.4s, v25.8h, v2.8h\n" + "ldr d27, [x10, x1]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v14.4s, v23.4h, v3.4h\n" + "smlal v9.4s, v25.4h, v3.4h\n" + "ldr d2, [x8, #0x88]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "smlal v7.4s, v24.4h, v3.4h\n" + "smlal2 v10.4s, v23.8h, v3.8h\n" + "smlal2 v8.4s, v25.8h, v3.8h\n" + "smlal v11.4s, v23.4h, v4.4h\n" + "smlal2 v21.4s, v23.8h, v4.8h\n" + "ldr d23, [x9, x1]\n" + "smlal2 v6.4s, v24.8h, v3.8h\n" + "ushll v23.8h, v23.8b, #0x0\n" + "smlal v14.4s, v28.4h, v4.4h\n" + "smlal v9.4s, v24.4h, v4.4h\n" + "ldr d3, [x8, #0x90]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "smlal v7.4s, v22.4h, v4.4h\n" + "smlal2 v10.4s, v28.8h, v4.8h\n" + "ldr d28, [x26, x1]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal2 v8.4s, v24.8h, v4.8h\n" + "smlal v11.4s, v31.4h, v0.4h\n" + "smlal2 v21.4s, v31.8h, v0.8h\n" + "ldr d31, [x28, x1]\n" + "smlal2 v6.4s, v22.8h, v4.8h\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v14.4s, v30.4h, v0.4h\n" + "smlal v9.4s, v27.4h, v0.4h\n" + "ldr d4, [x8, #0x98]\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "smlal v7.4s, v23.4h, v0.4h\n" + "smlal2 v10.4s, v30.8h, v0.8h\n" + "smlal2 v8.4s, v27.8h, v0.8h\n" + "smlal v11.4s, v30.4h, v1.4h\n" + "smlal2 v21.4s, v30.8h, v1.8h\n" + "ldr d30, [x27, x1]\n" + "smlal2 v6.4s, v23.8h, v0.8h\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v14.4s, v26.4h, v1.4h\n" + "smlal v9.4s, v23.4h, v1.4h\n" + "ldr d0, [x8, #0xa0]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "smlal v7.4s, v31.4h, v1.4h\n" + "smlal2 v10.4s, v26.8h, v1.8h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "smlal v11.4s, v26.4h, v2.4h\n" + "smlal2 v21.4s, v26.8h, v2.8h\n" + "smlal2 v6.4s, v31.8h, v1.8h\n" + "ldr d26, [x25, x1]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v14.4s, v25.4h, v2.4h\n" + "smlal v9.4s, v31.4h, v2.4h\n" + "ldr d1, [x8, #0xa8]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "smlal v7.4s, v30.4h, v2.4h\n" + "smlal2 v10.4s, v25.8h, v2.8h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "smlal v11.4s, v25.4h, v3.4h\n" + "smlal2 v21.4s, v25.8h, v3.8h\n" + "smlal2 v6.4s, v30.8h, v2.8h\n" + "ldr d25, [x24, x1]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v14.4s, v24.4h, v3.4h\n" + "smlal v9.4s, v30.4h, v3.4h\n" + "ldr d2, [x8, #0xb0]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "smlal v7.4s, v28.4h, v3.4h\n" + "smlal2 v10.4s, v24.8h, v3.8h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "smlal v11.4s, v24.4h, v4.4h\n" + "smlal2 v21.4s, v24.8h, v4.8h\n" + "ldr d24, [x23, x1]\n" + "smlal2 v6.4s, v28.8h, v3.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal v14.4s, v22.4h, v4.4h\n" + "smlal v9.4s, v28.4h, v4.4h\n" + "ldr d3, [x8, #0xb8]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "smlal v7.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "smlal v11.4s, v27.4h, v0.4h\n" + "smlal2 v21.4s, v27.8h, v0.8h\n" + "ldr d27, [x22, x1]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal2 v10.4s, v22.8h, v4.8h\n" + "smlal2 v6.4s, v26.8h, v4.8h\n" + "ldr d4, [x8, #0xc0]\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "smlal v14.4s, v23.4h, v0.4h\n" + "smlal v9.4s, v25.4h, v0.4h\n" + "add x8, x8, #0xc8\n" + "smlal v7.4s, v24.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "ldr d25, [x7, x1]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal2 v10.4s, v23.8h, v0.8h\n" + "smlal2 v6.4s, v24.8h, v0.8h\n" + "smlal v11.4s, v23.4h, v1.4h\n" + "smlal v14.4s, v31.4h, v1.4h\n" + "smlal v9.4s, v24.4h, v1.4h\n" + "smlal v7.4s, v27.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "ldr d24, [x20, x1]\n" + "smlal2 v21.4s, v23.8h, v1.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal2 v10.4s, v31.8h, v1.8h\n" + "smlal2 v6.4s, v27.8h, v1.8h\n" + "smlal v11.4s, v31.4h, v2.4h\n" + "smlal v14.4s, v30.4h, v2.4h\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal v7.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "ldr d27, [x19, x1]\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal2 v10.4s, v30.8h, v2.8h\n" + "smlal2 v6.4s, v25.8h, v2.8h\n" + "add x1, x1, #0x8\n" + "smlal v11.4s, v30.4h, v3.4h\n" + "smlal v14.4s, v28.4h, v3.4h\n" + "smlal v9.4s, v25.4h, v3.4h\n" + "smlal v7.4s, v24.4h, v3.4h\n" + "smlal2 v21.4s, v30.8h, v3.8h\n" + "smlal2 v10.4s, v28.8h, v3.8h\n" + "smlal2 v8.4s, v25.8h, v3.8h\n" + "smlal2 v6.4s, v24.8h, v3.8h\n" + "smlal v11.4s, v28.4h, v4.4h\n" + "smlal v14.4s, v26.4h, v4.4h\n" + "sqdmulh v11.4s, v11.4s, v17.4s\n" + "smlal v9.4s, v24.4h, v4.4h\n" + "smlal v7.4s, v27.4h, v4.4h\n" + "sqdmulh v14.4s, v14.4s, v17.4s\n" + "smlal2 v21.4s, v28.8h, v4.8h\n" + "smlal2 v10.4s, v26.8h, v4.8h\n" + "sqdmulh v9.4s, v9.4s, v17.4s\n" + "smlal2 v8.4s, v24.8h, v4.8h\n" + "smlal2 v6.4s, v27.8h, v4.8h\n" + "sqdmulh v7.4s, v7.4s, v17.4s\n" + "and v23.16b, v11.16b, v5.16b\n" + "sqdmulh v21.4s, v21.4s, v18.4s\n" + "and v22.16b, v14.16b, v5.16b\n" + "sqdmulh v10.4s, v10.4s, v18.4s\n" + "and v17.16b, v9.16b, v5.16b\n" + "sqdmulh v8.4s, v8.4s, v18.4s\n" + "and v20.16b, v7.16b, v5.16b\n" + "sqdmulh v6.4s, v6.4s, v18.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v19.16b, v21.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v18.16b, v10.16b, v29.16b\n" + "sshr v17.4s, v17.4s, #0x1f\n" + "and v26.16b, v8.16b, v29.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v4.16b, v6.16b, v29.16b\n" + "sqadd v11.4s, v11.4s, v23.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v14.4s, v14.4s, v22.4s\n" + "sshr v18.4s, v18.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v17.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "sqadd v7.4s, v7.4s, v20.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "srshl v11.4s, v11.4s, v5.4s\n" + "sqadd v21.4s, v21.4s, v19.4s\n" + "srshl v14.4s, v14.4s, v5.4s\n" + "sqadd v10.4s, v10.4s, v18.4s\n" + "srshl v9.4s, v9.4s, v5.4s\n" + "sqadd v8.4s, v8.4s, v26.4s\n" + "srshl v7.4s, v7.4s, v5.4s\n" + "sqadd v6.4s, v6.4s, v4.4s\n" + "srshl v21.4s, v21.4s, v29.4s\n" + "sqxtn v11.4h, v11.4s\n" + "srshl v10.4s, v10.4s, v29.4s\n" + "sqxtn v14.4h, v14.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v6.4s, v6.4s, v29.4s\n" + "sqxtn v7.4h, v7.4s\n" + "sqxtn2 v11.8h, v21.4s\n" + "sqxtn2 v14.8h, v10.4s\n" + "sqxtn2 v9.8h, v8.4s\n" + "sqxtn2 v7.8h, v6.4s\n" + "sqadd v11.8h, v11.8h, v16.8h\n" + "sqadd v14.8h, v14.8h, v16.8h\n" + "sqadd v9.8h, v9.8h, v16.8h\n" + "sqadd v7.8h, v7.8h, v16.8h\n" + "smax v11.8h, v11.8h, v12.8h\n" + "smax v14.8h, v14.8h, v12.8h\n" + "smax v9.8h, v9.8h, v12.8h\n" + "smax v7.8h, v7.8h, v12.8h\n" + "smin v11.8h, v11.8h, v13.8h\n" + "smin v14.8h, v14.8h, v13.8h\n" + "smin v9.8h, v9.8h, v13.8h\n" + "smin v7.8h, v7.8h, v13.8h\n" + "uzp1 v11.16b, v11.16b, v11.16b\n" + "uzp1 v14.16b, v14.16b, v14.16b\n" + "str d11, [x21, x2]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v7.16b, v7.16b, v7.16b\n" + "str d14, [x15, x2]\n" + "str d9, [x17, x2]\n" + "str d7, [x16, x2]\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q11, [x19, #0x0]\n" + "add x2, x2, #0x8\n" + "ldr q21, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x8, #0x0]\n" + "ldr d1, [x8, #0x8]\n" + "ldr d2, [x8, #0x10]\n" + "mov v14.16b, v11.16b\n" + "mov v10.16b, v21.16b\n" + "ldr d3, [x8, #0x18]\n" + "ldr d4, [x8, #0x20]\n" + "mov v9.16b, v11.16b\n" + "mov v8.16b, v21.16b\n" + "ldp x28, x27, [x0, #0x0]\n" + "ldp x10, x26, [x0, #0x10]\n" + "mov v7.16b, v11.16b\n" + "mov v6.16b, v21.16b\n" + "ldp x24, x23, [x0, #0x20]\n" + "ldp x22, x25, [x0, #0x30]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "ldp x20, x19, [x0, #0x40]\n" + "ldr d31, [x28, x1]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "ldr d30, [x27, x1]\n" + "ldr d29, [x10, x1]\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "ushll v31.8h, v31.8b, #0x0\n" + "ldr d28, [x26, x1]\n" + "ldr d27, [x24, x1]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "ushll v29.8h, v29.8b, #0x0\n" + "ldr d23, [x23, x1]\n" + "ldr d25, [x22, x1]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ushll v27.8h, v27.8b, #0x0\n" + "ldr d24, [x25, x1]\n" + "ldr d26, [x20, x1]\n" + "ushll v23.8h, v23.8b, #0x0\n" + "ushll v25.8h, v25.8b, #0x0\n" + "ldr d22, [x19, x1]\n" + "ushll v24.8h, v24.8b, #0x0\n" + "ushll v26.8h, v26.8b, #0x0\n" + "ushll v22.8h, v22.8b, #0x0\n" + "bgt 1b\n" + "2:" // Tail + "smlal v11.4s, v31.4h, v0.4h\n" + "smlal2 v21.4s, v31.8h, v0.8h\n" + "ldr x19, [x0, #0x50]\n" + "ldr d31, [x19, x1]\n" + "smlal v14.4s, v30.4h, v0.4h\n" + "smlal v9.4s, v29.4h, v0.4h\n" + "ldr x20, [x0, #0x58]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v7.4s, v28.4h, v0.4h\n" + "smlal2 v10.4s, v30.8h, v0.8h\n" + "ldr x19, [x0, #0x60]\n" + "ldr x24, [x0, #0x68]\n" + "smlal2 v8.4s, v29.8h, v0.8h\n" + "smlal v11.4s, v30.4h, v1.4h\n" + "ldr x23, [x0, #0x70]\n" + "ldr x26, [x0, #0x78]\n" + "smlal2 v21.4s, v30.8h, v1.8h\n" + "smlal2 v6.4s, v28.8h, v0.8h\n" + "ldr d30, [x20, x1]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v14.4s, v27.4h, v1.4h\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "ldr d0, [x8, #0x28]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "smlal v7.4s, v23.4h, v1.4h\n" + "smlal2 v10.4s, v27.8h, v1.8h\n" + "ldr x7, [x0, #0x80]\n" + "ldr x22, [x0, #0x88]\n" + "smlal2 v8.4s, v28.8h, v1.8h\n" + "smlal v11.4s, v27.4h, v2.4h\n" + "ldr x20, [x0, #0x90]\n" + "ldr x14, [x0, #0x98]\n" + "smlal2 v21.4s, v27.8h, v2.8h\n" + "smlal2 v6.4s, v23.8h, v1.8h\n" + "ldr d27, [x19, x1]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v14.4s, v25.4h, v2.4h\n" + "smlal v9.4s, v23.4h, v2.4h\n" + "ldr d1, [x8, #0x30]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "smlal v7.4s, v31.4h, v2.4h\n" + "smlal2 v10.4s, v25.8h, v2.8h\n" + "ldr x19, [x0, #0xa0]\n" + "ldr x13, [x0, #0xa8]\n" + "smlal2 v8.4s, v23.8h, v2.8h\n" + "smlal v11.4s, v25.4h, v3.4h\n" + "ldr x12, [x0, #0xb0]\n" + "ldr x11, [x0, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v3.8h\n" + "smlal2 v6.4s, v31.8h, v2.8h\n" + "ldr d25, [x24, x1]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v14.4s, v24.4h, v3.4h\n" + "smlal v9.4s, v31.4h, v3.4h\n" + "ldr d2, [x8, #0x38]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "smlal v7.4s, v30.4h, v3.4h\n" + "smlal2 v10.4s, v24.8h, v3.8h\n" + "ldr x10, [x0, #0xc0]\n" + "ldr x9, [x0, #0xc8]\n" + "smlal2 v8.4s, v31.8h, v3.8h\n" + "smlal v11.4s, v24.4h, v4.4h\n" + "ldr x28, [x0, #0xd0]\n" + "ldr x27, [x0, #0xd8]\n" + "smlal2 v21.4s, v24.8h, v4.8h\n" + "smlal2 v6.4s, v30.8h, v3.8h\n" + "ldr d24, [x23, x1]\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal v14.4s, v27.4h, v4.4h\n" + "smlal v9.4s, v30.4h, v4.4h\n" + "ldr d3, [x8, #0x40]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "smlal v7.4s, v26.4h, v4.4h\n" + "smlal2 v10.4s, v27.8h, v4.8h\n" + "ldr d27, [x26, x1]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal2 v8.4s, v30.8h, v4.8h\n" + "smlal v11.4s, v29.4h, v0.4h\n" + "ldr x26, [x0, #0xe0]\n" + "ldr x25, [x0, #0xe8]\n" + "smlal2 v21.4s, v29.8h, v0.8h\n" + "smlal2 v6.4s, v26.8h, v4.8h\n" + "ldr d4, [x8, #0x48]\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "smlal v14.4s, v28.4h, v0.4h\n" + "smlal v9.4s, v22.4h, v0.4h\n" + "ldr x24, [x0, #0xf0]\n" + "ldr x23, [x0, #0xf8]\n" + "smlal v7.4s, v25.4h, v0.4h\n" + "smlal2 v10.4s, v28.8h, v0.8h\n" + "ldr q17, [x5, #0x0]\n" + "ldr q5, [x6, #0x0]\n" + "smlal2 v8.4s, v22.8h, v0.8h\n" + "smlal v11.4s, v28.4h, v1.4h\n" + "ldr q18, [x5, #0x10]\n" + "ldr q29, [x6, #0x10]\n" + "smlal2 v21.4s, v28.8h, v1.8h\n" + "smlal2 v6.4s, v25.8h, v0.8h\n" + "ldr d28, [x22, x1]\n" + "ldr d0, [x8, #0x50]\n" + "smlal v14.4s, v23.4h, v1.4h\n" + "smlal v9.4s, v25.4h, v1.4h\n" + "ushll v28.8h, v28.8b, #0x0\n" + "ldr x22, [x0, #0x100]\n" + "smlal v7.4s, v24.4h, v1.4h\n" + "smlal2 v10.4s, v23.8h, v1.8h\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "tst x4, #0x7\n" + "smlal2 v8.4s, v25.8h, v1.8h\n" + "smlal v11.4s, v23.4h, v2.4h\n" + "add x5, x5, #0x20\n" + "add x6, x6, #0x20\n" + "smlal2 v21.4s, v23.8h, v2.8h\n" + "ldr d23, [x7, x1]\n" + "smlal2 v6.4s, v24.8h, v1.8h\n" + "ushll v23.8h, v23.8b, #0x0\n" + "smlal v14.4s, v31.4h, v2.4h\n" + "smlal v9.4s, v24.4h, v2.4h\n" + "ldr d1, [x8, #0x58]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "smlal v7.4s, v27.4h, v2.4h\n" + "smlal2 v10.4s, v31.8h, v2.8h\n" + "ldr x7, [x0, #0x108]\n" + "smlal2 v8.4s, v24.8h, v2.8h\n" + "smlal v11.4s, v31.4h, v3.4h\n" + "smlal2 v21.4s, v31.8h, v3.8h\n" + "smlal2 v6.4s, v27.8h, v2.8h\n" + "ldr d31, [x20, x1]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v14.4s, v30.4h, v3.4h\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "ldr d2, [x8, #0x60]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "smlal v7.4s, v23.4h, v3.4h\n" + "smlal2 v10.4s, v30.8h, v3.8h\n" + "ldr x20, [x0, #0x110]\n" + "smlal2 v8.4s, v27.8h, v3.8h\n" + "smlal v11.4s, v30.4h, v4.4h\n" + "smlal2 v21.4s, v30.8h, v4.8h\n" + "ldr d30, [x14, x1]\n" + "smlal2 v6.4s, v23.8h, v3.8h\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v14.4s, v26.4h, v4.4h\n" + "smlal v9.4s, v23.4h, v4.4h\n" + "ldr d3, [x8, #0x68]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "smlal v7.4s, v28.4h, v4.4h\n" + "smlal2 v10.4s, v26.8h, v4.8h\n" + "ldr d26, [x19, x1]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal2 v8.4s, v23.8h, v4.8h\n" + "smlal v11.4s, v22.4h, v0.4h\n" + "ldr x19, [x0, #0x118]\n" + "smlal2 v21.4s, v22.8h, v0.8h\n" + "smlal2 v6.4s, v28.8h, v4.8h\n" + "ldr d4, [x8, #0x70]\n" + "ldr d22, [x11, x1]\n" + "smlal v14.4s, v25.4h, v0.4h\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "smlal v7.4s, v30.4h, v0.4h\n" + "smlal2 v10.4s, v25.8h, v0.8h\n" + "ushll v22.8h, v22.8b, #0x0\n" + "smlal2 v8.4s, v31.8h, v0.8h\n" + "smlal v11.4s, v25.4h, v1.4h\n" + "smlal2 v21.4s, v25.8h, v1.8h\n" + "ldr d25, [x13, x1]\n" + "smlal2 v6.4s, v30.8h, v0.8h\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v14.4s, v24.4h, v1.4h\n" + "smlal v9.4s, v30.4h, v1.4h\n" + "ldr d0, [x8, #0x78]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "smlal v7.4s, v26.4h, v1.4h\n" + "smlal2 v10.4s, v24.8h, v1.8h\n" + "smlal2 v8.4s, v30.8h, v1.8h\n" + "smlal v11.4s, v24.4h, v2.4h\n" + "smlal2 v21.4s, v24.8h, v2.8h\n" + "ldr d24, [x12, x1]\n" + "smlal2 v6.4s, v26.8h, v1.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal v14.4s, v27.4h, v2.4h\n" + "smlal v9.4s, v26.4h, v2.4h\n" + "ldr d1, [x8, #0x80]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "smlal v7.4s, v25.4h, v2.4h\n" + "smlal2 v10.4s, v27.8h, v2.8h\n" + "smlal2 v8.4s, v26.8h, v2.8h\n" + "smlal v11.4s, v27.4h, v3.4h\n" + "smlal2 v21.4s, v27.8h, v3.8h\n" + "smlal2 v6.4s, v25.8h, v2.8h\n" + "ldr d27, [x10, x1]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v14.4s, v23.4h, v3.4h\n" + "smlal v9.4s, v25.4h, v3.4h\n" + "ldr d2, [x8, #0x88]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "smlal v7.4s, v24.4h, v3.4h\n" + "smlal2 v10.4s, v23.8h, v3.8h\n" + "smlal2 v8.4s, v25.8h, v3.8h\n" + "smlal v11.4s, v23.4h, v4.4h\n" + "smlal2 v21.4s, v23.8h, v4.8h\n" + "ldr d23, [x9, x1]\n" + "smlal2 v6.4s, v24.8h, v3.8h\n" + "ushll v23.8h, v23.8b, #0x0\n" + "smlal v14.4s, v28.4h, v4.4h\n" + "smlal v9.4s, v24.4h, v4.4h\n" + "ldr d3, [x8, #0x90]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "smlal v7.4s, v22.4h, v4.4h\n" + "smlal2 v10.4s, v28.8h, v4.8h\n" + "ldr d28, [x26, x1]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "smlal2 v8.4s, v24.8h, v4.8h\n" + "smlal v11.4s, v31.4h, v0.4h\n" + "smlal2 v21.4s, v31.8h, v0.8h\n" + "ldr d31, [x28, x1]\n" + "smlal2 v6.4s, v22.8h, v4.8h\n" + "ushll v31.8h, v31.8b, #0x0\n" + "smlal v14.4s, v30.4h, v0.4h\n" + "smlal v9.4s, v27.4h, v0.4h\n" + "ldr d4, [x8, #0x98]\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "smlal v7.4s, v23.4h, v0.4h\n" + "smlal2 v10.4s, v30.8h, v0.8h\n" + "smlal2 v8.4s, v27.8h, v0.8h\n" + "smlal v11.4s, v30.4h, v1.4h\n" + "smlal2 v21.4s, v30.8h, v1.8h\n" + "ldr d30, [x27, x1]\n" + "smlal2 v6.4s, v23.8h, v0.8h\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v14.4s, v26.4h, v1.4h\n" + "smlal v9.4s, v23.4h, v1.4h\n" + "ldr d0, [x8, #0xa0]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "smlal v7.4s, v31.4h, v1.4h\n" + "smlal2 v10.4s, v26.8h, v1.8h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "smlal v11.4s, v26.4h, v2.4h\n" + "smlal2 v21.4s, v26.8h, v2.8h\n" + "smlal2 v6.4s, v31.8h, v1.8h\n" + "ldr d26, [x25, x1]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v14.4s, v25.4h, v2.4h\n" + "smlal v9.4s, v31.4h, v2.4h\n" + "ldr d1, [x8, #0xa8]\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "smlal v7.4s, v30.4h, v2.4h\n" + "smlal2 v10.4s, v25.8h, v2.8h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "smlal v11.4s, v25.4h, v3.4h\n" + "smlal2 v21.4s, v25.8h, v3.8h\n" + "smlal2 v6.4s, v30.8h, v2.8h\n" + "ldr d25, [x24, x1]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal v14.4s, v24.4h, v3.4h\n" + "smlal v9.4s, v30.4h, v3.4h\n" + "ldr d2, [x8, #0xb0]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "smlal v7.4s, v28.4h, v3.4h\n" + "smlal2 v10.4s, v24.8h, v3.8h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "smlal v11.4s, v24.4h, v4.4h\n" + "smlal2 v21.4s, v24.8h, v4.8h\n" + "ldr d24, [x23, x1]\n" + "smlal2 v6.4s, v28.8h, v3.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal v14.4s, v22.4h, v4.4h\n" + "smlal v9.4s, v28.4h, v4.4h\n" + "ldr d3, [x8, #0xb8]\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "smlal v7.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "smlal v11.4s, v27.4h, v0.4h\n" + "smlal2 v21.4s, v27.8h, v0.8h\n" + "ldr d27, [x22, x1]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal2 v10.4s, v22.8h, v4.8h\n" + "smlal2 v6.4s, v26.8h, v4.8h\n" + "ldr d4, [x8, #0xc0]\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "smlal v14.4s, v23.4h, v0.4h\n" + "smlal v9.4s, v25.4h, v0.4h\n" + "smlal v7.4s, v24.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "ldr d25, [x7, x1]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal2 v10.4s, v23.8h, v0.8h\n" + "smlal2 v6.4s, v24.8h, v0.8h\n" + "smlal v11.4s, v23.4h, v1.4h\n" + "smlal v14.4s, v31.4h, v1.4h\n" + "smlal v9.4s, v24.4h, v1.4h\n" + "smlal v7.4s, v27.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "ldr d24, [x20, x1]\n" + "smlal2 v21.4s, v23.8h, v1.8h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal2 v10.4s, v31.8h, v1.8h\n" + "smlal2 v6.4s, v27.8h, v1.8h\n" + "smlal v11.4s, v31.4h, v2.4h\n" + "smlal v14.4s, v30.4h, v2.4h\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal v7.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "ldr d27, [x19, x1]\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal2 v10.4s, v30.8h, v2.8h\n" + "smlal2 v6.4s, v25.8h, v2.8h\n" + "add x1, x1, #0x8\n" + "smlal v11.4s, v30.4h, v3.4h\n" + "smlal v14.4s, v28.4h, v3.4h\n" + "smlal v9.4s, v25.4h, v3.4h\n" + "smlal v7.4s, v24.4h, v3.4h\n" + "smlal2 v21.4s, v30.8h, v3.8h\n" + "smlal2 v10.4s, v28.8h, v3.8h\n" + "smlal2 v8.4s, v25.8h, v3.8h\n" + "smlal2 v6.4s, v24.8h, v3.8h\n" + "smlal v11.4s, v28.4h, v4.4h\n" + "smlal v14.4s, v26.4h, v4.4h\n" + "sqdmulh v11.4s, v11.4s, v17.4s\n" + "smlal v9.4s, v24.4h, v4.4h\n" + "smlal v7.4s, v27.4h, v4.4h\n" + "sqdmulh v14.4s, v14.4s, v17.4s\n" + "smlal2 v21.4s, v28.8h, v4.8h\n" + "smlal2 v10.4s, v26.8h, v4.8h\n" + "sqdmulh v9.4s, v9.4s, v17.4s\n" + "smlal2 v8.4s, v24.8h, v4.8h\n" + "smlal2 v6.4s, v27.8h, v4.8h\n" + "sqdmulh v7.4s, v7.4s, v17.4s\n" + "and v23.16b, v11.16b, v5.16b\n" + "sqdmulh v21.4s, v21.4s, v18.4s\n" + "and v22.16b, v14.16b, v5.16b\n" + "sqdmulh v10.4s, v10.4s, v18.4s\n" + "and v17.16b, v9.16b, v5.16b\n" + "sqdmulh v8.4s, v8.4s, v18.4s\n" + "and v20.16b, v7.16b, v5.16b\n" + "sqdmulh v6.4s, v6.4s, v18.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v19.16b, v21.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v18.16b, v10.16b, v29.16b\n" + "sshr v17.4s, v17.4s, #0x1f\n" + "and v26.16b, v8.16b, v29.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v4.16b, v6.16b, v29.16b\n" + "sqadd v11.4s, v11.4s, v23.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v14.4s, v14.4s, v22.4s\n" + "sshr v18.4s, v18.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v17.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "sqadd v7.4s, v7.4s, v20.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "srshl v11.4s, v11.4s, v5.4s\n" + "sqadd v21.4s, v21.4s, v19.4s\n" + "srshl v14.4s, v14.4s, v5.4s\n" + "sqadd v10.4s, v10.4s, v18.4s\n" + "srshl v9.4s, v9.4s, v5.4s\n" + "sqadd v8.4s, v8.4s, v26.4s\n" + "srshl v7.4s, v7.4s, v5.4s\n" + "sqadd v6.4s, v6.4s, v4.4s\n" + "srshl v21.4s, v21.4s, v29.4s\n" + "sqxtn v11.4h, v11.4s\n" + "srshl v10.4s, v10.4s, v29.4s\n" + "sqxtn v14.4h, v14.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v6.4s, v6.4s, v29.4s\n" + "sqxtn v7.4h, v7.4s\n" + "sqxtn2 v11.8h, v21.4s\n" + "sqxtn2 v14.8h, v10.4s\n" + "sqxtn2 v9.8h, v8.4s\n" + "sqxtn2 v7.8h, v6.4s\n" + "sqadd v11.8h, v11.8h, v16.8h\n" + "sqadd v14.8h, v14.8h, v16.8h\n" + "sqadd v9.8h, v9.8h, v16.8h\n" + "sqadd v7.8h, v7.8h, v16.8h\n" + "smax v11.8h, v11.8h, v12.8h\n" + "smax v14.8h, v14.8h, v12.8h\n" + "smax v9.8h, v9.8h, v12.8h\n" + "smax v7.8h, v7.8h, v12.8h\n" + "smin v11.8h, v11.8h, v13.8h\n" + "smin v14.8h, v14.8h, v13.8h\n" + "smin v9.8h, v9.8h, v13.8h\n" + "smin v7.8h, v7.8h, v13.8h\n" + "uzp1 v11.16b, v11.16b, v11.16b\n" + "uzp1 v14.16b, v14.16b, v14.16b\n" + "str d11, [x21, x2]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v7.16b, v7.16b, v7.16b\n" + "str d14, [x15, x2]\n" + "str d9, [x17, x2]\n" + "str d7, [x16, x2]\n" + "add x2, x2, #0x8\n" + "beq 124f\n" + "add x8, x8, #0xc8\n" + "3:" // Oddments + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "tbz x4, #2, 5f\n" + "ld1 { v11.4s }, [x19], #0x10\n" + "tbz x4, #1, 4f\n" + "ld1 { v21.d }[0], [x19], #0x8\n" + "tbz x4, #0, 7f\n" + "ld1 { v21.s }[2], [x19]\n" + "b 7f\n" + "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset + "tbz x4, #0, 7f\n" + "ld1 { v21.s }[0], [x19]\n" + "b 7f\n" + "5:" // Oddments: Load bias: Bit 2: Unset + "tbz x4, #1, 6f\n" + "ld1 { v11.d }[0], [x19], #0x8\n" + "tbz x4, #0, 7f\n" + "ld1 { v11.s }[2], [x19]\n" + "b 7f\n" + "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 7f\n" + "ld1 { v11.s }[0], [x19]\n" + "7:" // Oddments: Load bias: Bit 2: End + "ldr d0, [x8, #0x0]\n" + "ldr d1, [x8, #0x8]\n" + "mov v14.16b, v11.16b\n" + "mov v10.16b, v21.16b\n" + "ldr d2, [x8, #0x10]\n" + "ldr d3, [x8, #0x18]\n" + "mov v9.16b, v11.16b\n" + "mov v8.16b, v21.16b\n" + "ldr d4, [x8, #0x20]\n" + "ldp x28, x27, [x0, #0x0]\n" + "mov v7.16b, v11.16b\n" + "mov v6.16b, v21.16b\n" + "ldp x10, x26, [x0, #0x10]\n" + "ldp x24, x23, [x0, #0x20]\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "ldp x22, x25, [x0, #0x30]\n" + "ldp x20, x19, [x0, #0x40]\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "add x28, x28, x1\n" + "add x27, x27, x1\n" + "add x10, x10, x1\n" + "add x26, x26, x1\n" + "add x24, x24, x1\n" + "add x23, x23, x1\n" + "add x22, x22, x1\n" + "add x25, x25, x1\n" + "add x20, x20, x1\n" + "add x19, x19, x1\n" + "tbz x4, #2, 9f\n" + "ld1 { v31.s }[0], [x28], #0x4\n" + "ld1 { v30.s }[0], [x27], #0x4\n" + "ld1 { v29.s }[0], [x10], #0x4\n" + "ld1 { v28.s }[0], [x26], #0x4\n" + "ld1 { v27.s }[0], [x24], #0x4\n" + "ld1 { v23.s }[0], [x23], #0x4\n" + "ld1 { v25.s }[0], [x22], #0x4\n" + "ld1 { v24.s }[0], [x25], #0x4\n" + "ld1 { v26.s }[0], [x20], #0x4\n" + "ld1 { v22.s }[0], [x19], #0x4\n" + "tbz x4, #1, 8f\n" + "ld1 { v31.h }[2], [x28], #0x2\n" + "ld1 { v30.h }[2], [x27], #0x2\n" + "ld1 { v29.h }[2], [x10], #0x2\n" + "ld1 { v28.h }[2], [x26], #0x2\n" + "ld1 { v27.h }[2], [x24], #0x2\n" + "ld1 { v23.h }[2], [x23], #0x2\n" + "ld1 { v25.h }[2], [x22], #0x2\n" + "ld1 { v24.h }[2], [x25], #0x2\n" + "ld1 { v26.h }[2], [x20], #0x2\n" + "ld1 { v22.h }[2], [x19], #0x2\n" + "tbz x4, #0, 11f\n" + "ld1 { v31.b }[6], [x28]\n" + "ld1 { v30.b }[6], [x27]\n" + "ld1 { v29.b }[6], [x10]\n" + "ld1 { v28.b }[6], [x26]\n" + "ld1 { v27.b }[6], [x24]\n" + "ld1 { v23.b }[6], [x23]\n" + "ld1 { v25.b }[6], [x22]\n" + "ld1 { v24.b }[6], [x25]\n" + "ld1 { v26.b }[6], [x20]\n" + "ld1 { v22.b }[6], [x19]\n" + "b 11f\n" + "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset + "tbz x4, #0, 11f\n" + "ld1 { v31.b }[4], [x28]\n" + "ld1 { v30.b }[4], [x27]\n" + "ld1 { v29.b }[4], [x10]\n" + "ld1 { v28.b }[4], [x26]\n" + "ld1 { v27.b }[4], [x24]\n" + "ld1 { v23.b }[4], [x23]\n" + "ld1 { v25.b }[4], [x22]\n" + "ld1 { v24.b }[4], [x25]\n" + "ld1 { v26.b }[4], [x20]\n" + "ld1 { v22.b }[4], [x19]\n" + "b 11f\n" + "9:" // Oddments: Initial loads: Bit 2: Unset + "tbz x4, #1, 10f\n" + "ld1 { v31.h }[0], [x28], #0x2\n" + "ld1 { v30.h }[0], [x27], #0x2\n" + "ld1 { v29.h }[0], [x10], #0x2\n" + "ld1 { v28.h }[0], [x26], #0x2\n" + "ld1 { v27.h }[0], [x24], #0x2\n" + "ld1 { v23.h }[0], [x23], #0x2\n" + "ld1 { v25.h }[0], [x22], #0x2\n" + "ld1 { v24.h }[0], [x25], #0x2\n" + "ld1 { v26.h }[0], [x20], #0x2\n" + "ld1 { v22.h }[0], [x19], #0x2\n" + "tbz x4, #0, 11f\n" + "ld1 { v31.b }[2], [x28]\n" + "ld1 { v30.b }[2], [x27]\n" + "ld1 { v29.b }[2], [x10]\n" + "ld1 { v28.b }[2], [x26]\n" + "ld1 { v27.b }[2], [x24]\n" + "ld1 { v23.b }[2], [x23]\n" + "ld1 { v25.b }[2], [x22]\n" + "ld1 { v24.b }[2], [x25]\n" + "ld1 { v26.b }[2], [x20]\n" + "ld1 { v22.b }[2], [x19]\n" + "b 11f\n" + "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 11f\n" + "ld1 { v31.b }[0], [x28]\n" + "ld1 { v30.b }[0], [x27]\n" + "ld1 { v29.b }[0], [x10]\n" + "ld1 { v28.b }[0], [x26]\n" + "ld1 { v27.b }[0], [x24]\n" + "ld1 { v23.b }[0], [x23]\n" + "ld1 { v25.b }[0], [x22]\n" + "ld1 { v24.b }[0], [x25]\n" + "ld1 { v26.b }[0], [x20]\n" + "ld1 { v22.b }[0], [x19]\n" + "11:" // Oddments: Initial loads: Bit 2: End + "ushll v31.8h, v31.8b, #0x0\n" + "ushll v30.8h, v30.8b, #0x0\n" + "smlal v11.4s, v31.4h, v0.4h\n" + "ldr x19, [x0, #0x50]\n" + "ushll v29.8h, v29.8b, #0x0\n" + "smlal2 v21.4s, v31.8h, v0.8h\n" + "smlal v14.4s, v30.4h, v0.4h\n" + "smlal2 v10.4s, v30.8h, v0.8h\n" + "smlal v9.4s, v29.4h, v0.4h\n" + "ushll v28.8h, v28.8b, #0x0\n" + "add x19, x19, x1\n" + "smlal2 v8.4s, v29.8h, v0.8h\n" + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v7.4s, v28.4h, v0.4h\n" + "smlal2 v6.4s, v28.8h, v0.8h\n" + "smlal v11.4s, v30.4h, v1.4h\n" + "ushll v23.8h, v23.8b, #0x0\n" + "smlal2 v21.4s, v30.8h, v1.8h\n" + "smlal v14.4s, v27.4h, v1.4h\n" + "ushll v25.8h, v25.8b, #0x0\n" + "smlal2 v10.4s, v27.8h, v1.8h\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "ushll v24.8h, v24.8b, #0x0\n" + "smlal2 v8.4s, v28.8h, v1.8h\n" + "ushll v26.8h, v26.8b, #0x0\n" + "smlal v7.4s, v23.4h, v1.4h\n" + "ushll v22.8h, v22.8b, #0x0\n" + "smlal2 v6.4s, v23.8h, v1.8h\n" + "smlal v11.4s, v27.4h, v2.4h\n" + "smlal2 v21.4s, v27.8h, v2.8h\n" + "smlal v14.4s, v25.4h, v2.4h\n" + "smlal2 v10.4s, v25.8h, v2.8h\n" + "smlal v9.4s, v23.4h, v2.4h\n" + "smlal2 v8.4s, v23.8h, v2.8h\n" + "tbz x4, #2, 13f\n" + "ld1 { v31.s }[0], [x19], #0x4\n" + "tbz x4, #1, 12f\n" + "ld1 { v31.h }[2], [x19], #0x2\n" + "tbz x4, #0, 15f\n" + "ld1 { v31.b }[6], [x19]\n" + "b 15f\n" + "12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset + "tbz x4, #0, 15f\n" + "ld1 { v31.b }[4], [x19]\n" + "b 15f\n" + "13:" // Oddments: Load (1, 3): Bit 2: Unset + "tbz x4, #1, 14f\n" + "ld1 { v31.h }[0], [x19], #0x2\n" + "tbz x4, #0, 15f\n" + "ld1 { v31.b }[2], [x19]\n" + "b 15f\n" + "14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 15f\n" + "ld1 { v31.b }[0], [x19]\n" + "15:" // Oddments: Load (1, 3): Bit 2: End + "ushll v31.8h, v31.8b, #0x0\n" + "ldr x20, [x0, #0x58]\n" + "smlal v7.4s, v31.4h, v2.4h\n" + "smlal2 v6.4s, v31.8h, v2.8h\n" + "smlal v11.4s, v25.4h, v3.4h\n" + "smlal2 v21.4s, v25.8h, v3.8h\n" + "add x20, x20, x1\n" + "smlal v14.4s, v24.4h, v3.4h\n" + "smlal2 v10.4s, v24.8h, v3.8h\n" + "smlal v9.4s, v31.4h, v3.4h\n" + "smlal2 v8.4s, v31.8h, v3.8h\n" + "tbz x4, #2, 17f\n" + "ld1 { v30.s }[0], [x20], #0x4\n" + "tbz x4, #1, 16f\n" + "ld1 { v30.h }[2], [x20], #0x2\n" + "tbz x4, #0, 19f\n" + "ld1 { v30.b }[6], [x20]\n" + "b 19f\n" + "16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset + "tbz x4, #0, 19f\n" + "ld1 { v30.b }[4], [x20]\n" + "b 19f\n" + "17:" // Oddments: Load (1, 4): Bit 2: Unset + "tbz x4, #1, 18f\n" + "ld1 { v30.h }[0], [x20], #0x2\n" + "tbz x4, #0, 19f\n" + "ld1 { v30.b }[2], [x20]\n" + "b 19f\n" + "18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 19f\n" + "ld1 { v30.b }[0], [x20]\n" + "19:" // Oddments: Load (1, 4): Bit 2: End + "ushll v30.8h, v30.8b, #0x0\n" + "ldr x19, [x0, #0x60]\n" + "smlal v7.4s, v30.4h, v3.4h\n" + "smlal2 v6.4s, v30.8h, v3.8h\n" + "smlal v11.4s, v24.4h, v4.4h\n" + "smlal2 v21.4s, v24.8h, v4.8h\n" + "add x19, x19, x1\n" + "tbz x4, #2, 21f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x4, #1, 20f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x4, #0, 23f\n" + "ld1 { v27.b }[6], [x19]\n" + "b 23f\n" + "20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset + "tbz x4, #0, 23f\n" + "ld1 { v27.b }[4], [x19]\n" + "b 23f\n" + "21:" // Oddments: Load (0, 5): Bit 2: Unset + "tbz x4, #1, 22f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x4, #0, 23f\n" + "ld1 { v27.b }[2], [x19]\n" + "b 23f\n" + "22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 23f\n" + "ld1 { v27.b }[0], [x19]\n" + "23:" // Oddments: Load (0, 5): Bit 2: End + "ushll v27.8h, v27.8b, #0x0\n" + "ldr d0, [x8, #0x28]\n" + "smlal v14.4s, v27.4h, v4.4h\n" + "smlal2 v10.4s, v27.8h, v4.8h\n" + "smlal v9.4s, v30.4h, v4.4h\n" + "smlal2 v8.4s, v30.8h, v4.8h\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "ldr x24, [x0, #0x68]\n" + "smlal v7.4s, v26.4h, v4.4h\n" + "smlal2 v6.4s, v26.8h, v4.8h\n" + "add x24, x24, x1\n" + "smlal v11.4s, v29.4h, v0.4h\n" + "smlal2 v21.4s, v29.8h, v0.8h\n" + "smlal v14.4s, v28.4h, v0.4h\n" + "smlal2 v10.4s, v28.8h, v0.8h\n" + "smlal v9.4s, v22.4h, v0.4h\n" + "smlal2 v8.4s, v22.8h, v0.8h\n" + "tbz x4, #2, 25f\n" + "ld1 { v25.s }[0], [x24], #0x4\n" + "tbz x4, #1, 24f\n" + "ld1 { v25.h }[2], [x24], #0x2\n" + "tbz x4, #0, 27f\n" + "ld1 { v25.b }[6], [x24]\n" + "b 27f\n" + "24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset + "tbz x4, #0, 27f\n" + "ld1 { v25.b }[4], [x24]\n" + "b 27f\n" + "25:" // Oddments: Load (2, 1): Bit 2: Unset + "tbz x4, #1, 26f\n" + "ld1 { v25.h }[0], [x24], #0x2\n" + "tbz x4, #0, 27f\n" + "ld1 { v25.b }[2], [x24]\n" + "b 27f\n" + "26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 27f\n" + "ld1 { v25.b }[0], [x24]\n" + "27:" // Oddments: Load (2, 1): Bit 2: End + "ldr d1, [x8, #0x30]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "ldr x23, [x0, #0x70]\n" + "smlal v7.4s, v25.4h, v0.4h\n" + "smlal2 v6.4s, v25.8h, v0.8h\n" + "add x23, x23, x1\n" + "smlal v11.4s, v28.4h, v1.4h\n" + "smlal2 v21.4s, v28.8h, v1.8h\n" + "smlal v14.4s, v23.4h, v1.4h\n" + "smlal2 v10.4s, v23.8h, v1.8h\n" + "smlal v9.4s, v25.4h, v1.4h\n" + "smlal2 v8.4s, v25.8h, v1.8h\n" + "tbz x4, #2, 29f\n" + "ld1 { v24.s }[0], [x23], #0x4\n" + "tbz x4, #1, 28f\n" + "ld1 { v24.h }[2], [x23], #0x2\n" + "tbz x4, #0, 31f\n" + "ld1 { v24.b }[6], [x23]\n" + "b 31f\n" + "28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset + "tbz x4, #0, 31f\n" + "ld1 { v24.b }[4], [x23]\n" + "b 31f\n" + "29:" // Oddments: Load (2, 2): Bit 2: Unset + "tbz x4, #1, 30f\n" + "ld1 { v24.h }[0], [x23], #0x2\n" + "tbz x4, #0, 31f\n" + "ld1 { v24.b }[2], [x23]\n" + "b 31f\n" + "30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 31f\n" + "ld1 { v24.b }[0], [x23]\n" + "31:" // Oddments: Load (2, 2): Bit 2: End + "ldr d2, [x8, #0x38]\n" + "ushll v24.8h, v24.8b, #0x0\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "ldr x26, [x0, #0x78]\n" + "smlal v7.4s, v24.4h, v1.4h\n" + "smlal2 v6.4s, v24.8h, v1.8h\n" + "add x26, x26, x1\n" + "smlal v11.4s, v23.4h, v2.4h\n" + "smlal2 v21.4s, v23.8h, v2.8h\n" + "smlal v14.4s, v31.4h, v2.4h\n" + "smlal2 v10.4s, v31.8h, v2.8h\n" + "smlal v9.4s, v24.4h, v2.4h\n" + "smlal2 v8.4s, v24.8h, v2.8h\n" + "tbz x4, #2, 33f\n" + "ld1 { v27.s }[0], [x26], #0x4\n" + "tbz x4, #1, 32f\n" + "ld1 { v27.h }[2], [x26], #0x2\n" + "tbz x4, #0, 35f\n" + "ld1 { v27.b }[6], [x26]\n" + "b 35f\n" + "32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset + "tbz x4, #0, 35f\n" + "ld1 { v27.b }[4], [x26]\n" + "b 35f\n" + "33:" // Oddments: Load (2, 3): Bit 2: Unset + "tbz x4, #1, 34f\n" + "ld1 { v27.h }[0], [x26], #0x2\n" + "tbz x4, #0, 35f\n" + "ld1 { v27.b }[2], [x26]\n" + "b 35f\n" + "34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 35f\n" + "ld1 { v27.b }[0], [x26]\n" + "35:" // Oddments: Load (2, 3): Bit 2: End + "ldr d3, [x8, #0x40]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "ldr x7, [x0, #0x80]\n" + "smlal v7.4s, v27.4h, v2.4h\n" + "smlal2 v6.4s, v27.8h, v2.8h\n" + "add x7, x7, x1\n" + "smlal v11.4s, v31.4h, v3.4h\n" + "smlal2 v21.4s, v31.8h, v3.8h\n" + "smlal v14.4s, v30.4h, v3.4h\n" + "smlal2 v10.4s, v30.8h, v3.8h\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v8.4s, v27.8h, v3.8h\n" + "tbz x4, #2, 37f\n" + "ld1 { v23.s }[0], [x7], #0x4\n" + "tbz x4, #1, 36f\n" + "ld1 { v23.h }[2], [x7], #0x2\n" + "tbz x4, #0, 39f\n" + "ld1 { v23.b }[6], [x7]\n" + "b 39f\n" + "36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset + "tbz x4, #0, 39f\n" + "ld1 { v23.b }[4], [x7]\n" + "b 39f\n" + "37:" // Oddments: Load (2, 4): Bit 2: Unset + "tbz x4, #1, 38f\n" + "ld1 { v23.h }[0], [x7], #0x2\n" + "tbz x4, #0, 39f\n" + "ld1 { v23.b }[2], [x7]\n" + "b 39f\n" + "38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 39f\n" + "ld1 { v23.b }[0], [x7]\n" + "39:" // Oddments: Load (2, 4): Bit 2: End + "ldr d4, [x8, #0x48]\n" + "ushll v23.8h, v23.8b, #0x0\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "ldr x22, [x0, #0x88]\n" + "smlal v7.4s, v23.4h, v3.4h\n" + "smlal2 v6.4s, v23.8h, v3.8h\n" + "add x22, x22, x1\n" + "smlal v11.4s, v30.4h, v4.4h\n" + "smlal2 v21.4s, v30.8h, v4.8h\n" + "smlal v14.4s, v26.4h, v4.4h\n" + "smlal2 v10.4s, v26.8h, v4.8h\n" + "smlal v9.4s, v23.4h, v4.4h\n" + "smlal2 v8.4s, v23.8h, v4.8h\n" + "tbz x4, #2, 41f\n" + "ld1 { v28.s }[0], [x22], #0x4\n" + "tbz x4, #1, 40f\n" + "ld1 { v28.h }[2], [x22], #0x2\n" + "tbz x4, #0, 43f\n" + "ld1 { v28.b }[6], [x22]\n" + "b 43f\n" + "40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset + "tbz x4, #0, 43f\n" + "ld1 { v28.b }[4], [x22]\n" + "b 43f\n" + "41:" // Oddments: Load (2, 5): Bit 2: Unset + "tbz x4, #1, 42f\n" + "ld1 { v28.h }[0], [x22], #0x2\n" + "tbz x4, #0, 43f\n" + "ld1 { v28.b }[2], [x22]\n" + "b 43f\n" + "42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 43f\n" + "ld1 { v28.b }[0], [x22]\n" + "43:" // Oddments: Load (2, 5): Bit 2: End + "ldr d0, [x8, #0x50]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "ldr x20, [x0, #0x90]\n" + "smlal v7.4s, v28.4h, v4.4h\n" + "smlal2 v6.4s, v28.8h, v4.8h\n" + "add x20, x20, x1\n" + "smlal v11.4s, v22.4h, v0.4h\n" + "smlal2 v21.4s, v22.8h, v0.8h\n" + "smlal v14.4s, v25.4h, v0.4h\n" + "smlal2 v10.4s, v25.8h, v0.8h\n" + "tbz x4, #2, 45f\n" + "ld1 { v31.s }[0], [x20], #0x4\n" + "tbz x4, #1, 44f\n" + "ld1 { v31.h }[2], [x20], #0x2\n" + "tbz x4, #0, 47f\n" + "ld1 { v31.b }[6], [x20]\n" + "b 47f\n" + "44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset + "tbz x4, #0, 47f\n" + "ld1 { v31.b }[4], [x20]\n" + "b 47f\n" + "45:" // Oddments: Load (3, 0): Bit 2: Unset + "tbz x4, #1, 46f\n" + "ld1 { v31.h }[0], [x20], #0x2\n" + "tbz x4, #0, 47f\n" + "ld1 { v31.b }[2], [x20]\n" + "b 47f\n" + "46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 47f\n" + "ld1 { v31.b }[0], [x20]\n" + "47:" // Oddments: Load (3, 0): Bit 2: End + "ushll v31.8h, v31.8b, #0x0\n" + "ldr x14, [x0, #0x98]\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v8.4s, v31.8h, v0.8h\n" + "add x14, x14, x1\n" + "tbz x4, #2, 49f\n" + "ld1 { v30.s }[0], [x14], #0x4\n" + "tbz x4, #1, 48f\n" + "ld1 { v30.h }[2], [x14], #0x2\n" + "tbz x4, #0, 51f\n" + "ld1 { v30.b }[6], [x14]\n" + "b 51f\n" + "48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset + "tbz x4, #0, 51f\n" + "ld1 { v30.b }[4], [x14]\n" + "b 51f\n" + "49:" // Oddments: Load (3, 1): Bit 2: Unset + "tbz x4, #1, 50f\n" + "ld1 { v30.h }[0], [x14], #0x2\n" + "tbz x4, #0, 51f\n" + "ld1 { v30.b }[2], [x14]\n" + "b 51f\n" + "50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 51f\n" + "ld1 { v30.b }[0], [x14]\n" + "51:" // Oddments: Load (3, 1): Bit 2: End + "ldr d1, [x8, #0x58]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "ldr x19, [x0, #0xa0]\n" + "smlal v7.4s, v30.4h, v0.4h\n" + "smlal2 v6.4s, v30.8h, v0.8h\n" + "add x19, x19, x1\n" + "smlal v11.4s, v25.4h, v1.4h\n" + "smlal2 v21.4s, v25.8h, v1.8h\n" + "smlal v14.4s, v24.4h, v1.4h\n" + "smlal2 v10.4s, v24.8h, v1.8h\n" + "smlal v9.4s, v30.4h, v1.4h\n" + "smlal2 v8.4s, v30.8h, v1.8h\n" + "tbz x4, #2, 53f\n" + "ld1 { v26.s }[0], [x19], #0x4\n" + "tbz x4, #1, 52f\n" + "ld1 { v26.h }[2], [x19], #0x2\n" + "tbz x4, #0, 55f\n" + "ld1 { v26.b }[6], [x19]\n" + "b 55f\n" + "52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset + "tbz x4, #0, 55f\n" + "ld1 { v26.b }[4], [x19]\n" + "b 55f\n" + "53:" // Oddments: Load (3, 2): Bit 2: Unset + "tbz x4, #1, 54f\n" + "ld1 { v26.h }[0], [x19], #0x2\n" + "tbz x4, #0, 55f\n" + "ld1 { v26.b }[2], [x19]\n" + "b 55f\n" + "54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 55f\n" + "ld1 { v26.b }[0], [x19]\n" + "55:" // Oddments: Load (3, 2): Bit 2: End + "ldr d2, [x8, #0x60]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "ldr x13, [x0, #0xa8]\n" + "smlal v7.4s, v26.4h, v1.4h\n" + "smlal2 v6.4s, v26.8h, v1.8h\n" + "add x13, x13, x1\n" + "smlal v11.4s, v24.4h, v2.4h\n" + "smlal2 v21.4s, v24.8h, v2.8h\n" + "smlal v14.4s, v27.4h, v2.4h\n" + "smlal2 v10.4s, v27.8h, v2.8h\n" + "smlal v9.4s, v26.4h, v2.4h\n" + "smlal2 v8.4s, v26.8h, v2.8h\n" + "tbz x4, #2, 57f\n" + "ld1 { v25.s }[0], [x13], #0x4\n" + "tbz x4, #1, 56f\n" + "ld1 { v25.h }[2], [x13], #0x2\n" + "tbz x4, #0, 59f\n" + "ld1 { v25.b }[6], [x13]\n" + "b 59f\n" + "56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset + "tbz x4, #0, 59f\n" + "ld1 { v25.b }[4], [x13]\n" + "b 59f\n" + "57:" // Oddments: Load (3, 3): Bit 2: Unset + "tbz x4, #1, 58f\n" + "ld1 { v25.h }[0], [x13], #0x2\n" + "tbz x4, #0, 59f\n" + "ld1 { v25.b }[2], [x13]\n" + "b 59f\n" + "58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 59f\n" + "ld1 { v25.b }[0], [x13]\n" + "59:" // Oddments: Load (3, 3): Bit 2: End + "ldr d3, [x8, #0x68]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "ldr x12, [x0, #0xb0]\n" + "smlal v7.4s, v25.4h, v2.4h\n" + "smlal2 v6.4s, v25.8h, v2.8h\n" + "add x12, x12, x1\n" + "smlal v11.4s, v27.4h, v3.4h\n" + "smlal2 v21.4s, v27.8h, v3.8h\n" + "smlal v14.4s, v23.4h, v3.4h\n" + "smlal2 v10.4s, v23.8h, v3.8h\n" + "smlal v9.4s, v25.4h, v3.4h\n" + "smlal2 v8.4s, v25.8h, v3.8h\n" + "tbz x4, #2, 61f\n" + "ld1 { v24.s }[0], [x12], #0x4\n" + "tbz x4, #1, 60f\n" + "ld1 { v24.h }[2], [x12], #0x2\n" + "tbz x4, #0, 63f\n" + "ld1 { v24.b }[6], [x12]\n" + "b 63f\n" + "60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset + "tbz x4, #0, 63f\n" + "ld1 { v24.b }[4], [x12]\n" + "b 63f\n" + "61:" // Oddments: Load (3, 4): Bit 2: Unset + "tbz x4, #1, 62f\n" + "ld1 { v24.h }[0], [x12], #0x2\n" + "tbz x4, #0, 63f\n" + "ld1 { v24.b }[2], [x12]\n" + "b 63f\n" + "62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 63f\n" + "ld1 { v24.b }[0], [x12]\n" + "63:" // Oddments: Load (3, 4): Bit 2: End + "ldr d4, [x8, #0x70]\n" + "ushll v24.8h, v24.8b, #0x0\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "ldr x11, [x0, #0xb8]\n" + "smlal v7.4s, v24.4h, v3.4h\n" + "smlal2 v6.4s, v24.8h, v3.8h\n" + "add x11, x11, x1\n" + "smlal v11.4s, v23.4h, v4.4h\n" + "smlal2 v21.4s, v23.8h, v4.8h\n" + "smlal v14.4s, v28.4h, v4.4h\n" + "smlal2 v10.4s, v28.8h, v4.8h\n" + "smlal v9.4s, v24.4h, v4.4h\n" + "smlal2 v8.4s, v24.8h, v4.8h\n" + "tbz x4, #2, 65f\n" + "ld1 { v22.s }[0], [x11], #0x4\n" + "tbz x4, #1, 64f\n" + "ld1 { v22.h }[2], [x11], #0x2\n" + "tbz x4, #0, 67f\n" + "ld1 { v22.b }[6], [x11]\n" + "b 67f\n" + "64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset + "tbz x4, #0, 67f\n" + "ld1 { v22.b }[4], [x11]\n" + "b 67f\n" + "65:" // Oddments: Load (3, 5): Bit 2: Unset + "tbz x4, #1, 66f\n" + "ld1 { v22.h }[0], [x11], #0x2\n" + "tbz x4, #0, 67f\n" + "ld1 { v22.b }[2], [x11]\n" + "b 67f\n" + "66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 67f\n" + "ld1 { v22.b }[0], [x11]\n" + "67:" // Oddments: Load (3, 5): Bit 2: End + "ldr d0, [x8, #0x78]\n" + "ushll v22.8h, v22.8b, #0x0\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "ldr x10, [x0, #0xc0]\n" + "smlal v7.4s, v22.4h, v4.4h\n" + "smlal2 v6.4s, v22.8h, v4.8h\n" + "add x10, x10, x1\n" + "smlal v11.4s, v31.4h, v0.4h\n" + "smlal2 v21.4s, v31.8h, v0.8h\n" + "smlal v14.4s, v30.4h, v0.4h\n" + "smlal2 v10.4s, v30.8h, v0.8h\n" + "tbz x4, #2, 69f\n" + "ld1 { v27.s }[0], [x10], #0x4\n" + "tbz x4, #1, 68f\n" + "ld1 { v27.h }[2], [x10], #0x2\n" + "tbz x4, #0, 71f\n" + "ld1 { v27.b }[6], [x10]\n" + "b 71f\n" + "68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset + "tbz x4, #0, 71f\n" + "ld1 { v27.b }[4], [x10]\n" + "b 71f\n" + "69:" // Oddments: Load (4, 0): Bit 2: Unset + "tbz x4, #1, 70f\n" + "ld1 { v27.h }[0], [x10], #0x2\n" + "tbz x4, #0, 71f\n" + "ld1 { v27.b }[2], [x10]\n" + "b 71f\n" + "70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 71f\n" + "ld1 { v27.b }[0], [x10]\n" + "71:" // Oddments: Load (4, 0): Bit 2: End + "ushll v27.8h, v27.8b, #0x0\n" + "ldr x9, [x0, #0xc8]\n" + "smlal v9.4s, v27.4h, v0.4h\n" + "smlal2 v8.4s, v27.8h, v0.8h\n" + "add x9, x9, x1\n" + "tbz x4, #2, 73f\n" + "ld1 { v23.s }[0], [x9], #0x4\n" + "tbz x4, #1, 72f\n" + "ld1 { v23.h }[2], [x9], #0x2\n" + "tbz x4, #0, 75f\n" + "ld1 { v23.b }[6], [x9]\n" + "b 75f\n" + "72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset + "tbz x4, #0, 75f\n" + "ld1 { v23.b }[4], [x9]\n" + "b 75f\n" + "73:" // Oddments: Load (4, 1): Bit 2: Unset + "tbz x4, #1, 74f\n" + "ld1 { v23.h }[0], [x9], #0x2\n" + "tbz x4, #0, 75f\n" + "ld1 { v23.b }[2], [x9]\n" + "b 75f\n" + "74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 75f\n" + "ld1 { v23.b }[0], [x9]\n" + "75:" // Oddments: Load (4, 1): Bit 2: End + "ldr d1, [x8, #0x80]\n" + "ushll v23.8h, v23.8b, #0x0\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "ldr x28, [x0, #0xd0]\n" + "smlal v7.4s, v23.4h, v0.4h\n" + "smlal2 v6.4s, v23.8h, v0.8h\n" + "add x28, x28, x1\n" + "smlal v11.4s, v30.4h, v1.4h\n" + "smlal2 v21.4s, v30.8h, v1.8h\n" + "smlal v14.4s, v26.4h, v1.4h\n" + "smlal2 v10.4s, v26.8h, v1.8h\n" + "smlal v9.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "tbz x4, #2, 77f\n" + "ld1 { v31.s }[0], [x28], #0x4\n" + "tbz x4, #1, 76f\n" + "ld1 { v31.h }[2], [x28], #0x2\n" + "tbz x4, #0, 79f\n" + "ld1 { v31.b }[6], [x28]\n" + "b 79f\n" + "76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset + "tbz x4, #0, 79f\n" + "ld1 { v31.b }[4], [x28]\n" + "b 79f\n" + "77:" // Oddments: Load (4, 2): Bit 2: Unset + "tbz x4, #1, 78f\n" + "ld1 { v31.h }[0], [x28], #0x2\n" + "tbz x4, #0, 79f\n" + "ld1 { v31.b }[2], [x28]\n" + "b 79f\n" + "78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 79f\n" + "ld1 { v31.b }[0], [x28]\n" + "79:" // Oddments: Load (4, 2): Bit 2: End + "ldr d2, [x8, #0x88]\n" + "ushll v31.8h, v31.8b, #0x0\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "ldr x27, [x0, #0xd8]\n" + "smlal v7.4s, v31.4h, v1.4h\n" + "smlal2 v6.4s, v31.8h, v1.8h\n" + "add x27, x27, x1\n" + "smlal v11.4s, v26.4h, v2.4h\n" + "smlal2 v21.4s, v26.8h, v2.8h\n" + "smlal v14.4s, v25.4h, v2.4h\n" + "smlal2 v10.4s, v25.8h, v2.8h\n" + "smlal v9.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "tbz x4, #2, 81f\n" + "ld1 { v30.s }[0], [x27], #0x4\n" + "tbz x4, #1, 80f\n" + "ld1 { v30.h }[2], [x27], #0x2\n" + "tbz x4, #0, 83f\n" + "ld1 { v30.b }[6], [x27]\n" + "b 83f\n" + "80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset + "tbz x4, #0, 83f\n" + "ld1 { v30.b }[4], [x27]\n" + "b 83f\n" + "81:" // Oddments: Load (4, 3): Bit 2: Unset + "tbz x4, #1, 82f\n" + "ld1 { v30.h }[0], [x27], #0x2\n" + "tbz x4, #0, 83f\n" + "ld1 { v30.b }[2], [x27]\n" + "b 83f\n" + "82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 83f\n" + "ld1 { v30.b }[0], [x27]\n" + "83:" // Oddments: Load (4, 3): Bit 2: End + "ldr d3, [x8, #0x90]\n" + "ushll v30.8h, v30.8b, #0x0\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "ldr x26, [x0, #0xe0]\n" + "smlal v7.4s, v30.4h, v2.4h\n" + "smlal2 v6.4s, v30.8h, v2.8h\n" + "add x26, x26, x1\n" + "smlal v11.4s, v25.4h, v3.4h\n" + "smlal2 v21.4s, v25.8h, v3.8h\n" + "smlal v14.4s, v24.4h, v3.4h\n" + "smlal2 v10.4s, v24.8h, v3.8h\n" + "smlal v9.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "tbz x4, #2, 85f\n" + "ld1 { v28.s }[0], [x26], #0x4\n" + "tbz x4, #1, 84f\n" + "ld1 { v28.h }[2], [x26], #0x2\n" + "tbz x4, #0, 87f\n" + "ld1 { v28.b }[6], [x26]\n" + "b 87f\n" + "84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset + "tbz x4, #0, 87f\n" + "ld1 { v28.b }[4], [x26]\n" + "b 87f\n" + "85:" // Oddments: Load (4, 4): Bit 2: Unset + "tbz x4, #1, 86f\n" + "ld1 { v28.h }[0], [x26], #0x2\n" + "tbz x4, #0, 87f\n" + "ld1 { v28.b }[2], [x26]\n" + "b 87f\n" + "86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 87f\n" + "ld1 { v28.b }[0], [x26]\n" + "87:" // Oddments: Load (4, 4): Bit 2: End + "ldr d4, [x8, #0x98]\n" + "ushll v28.8h, v28.8b, #0x0\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "ldr x25, [x0, #0xe8]\n" + "smlal v7.4s, v28.4h, v3.4h\n" + "smlal2 v6.4s, v28.8h, v3.8h\n" + "add x25, x25, x1\n" + "smlal v11.4s, v24.4h, v4.4h\n" + "smlal2 v21.4s, v24.8h, v4.8h\n" + "smlal v14.4s, v22.4h, v4.4h\n" + "smlal2 v10.4s, v22.8h, v4.8h\n" + "smlal v9.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "tbz x4, #2, 89f\n" + "ld1 { v26.s }[0], [x25], #0x4\n" + "tbz x4, #1, 88f\n" + "ld1 { v26.h }[2], [x25], #0x2\n" + "tbz x4, #0, 91f\n" + "ld1 { v26.b }[6], [x25]\n" + "b 91f\n" + "88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset + "tbz x4, #0, 91f\n" + "ld1 { v26.b }[4], [x25]\n" + "b 91f\n" + "89:" // Oddments: Load (4, 5): Bit 2: Unset + "tbz x4, #1, 90f\n" + "ld1 { v26.h }[0], [x25], #0x2\n" + "tbz x4, #0, 91f\n" + "ld1 { v26.b }[2], [x25]\n" + "b 91f\n" + "90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 91f\n" + "ld1 { v26.b }[0], [x25]\n" + "91:" // Oddments: Load (4, 5): Bit 2: End + "ldr d0, [x8, #0xa0]\n" + "ushll v26.8h, v26.8b, #0x0\n" + "usubl v0.8h, v0.8b, v15.8b\n" + "ldr x24, [x0, #0xf0]\n" + "smlal v7.4s, v26.4h, v4.4h\n" + "smlal2 v6.4s, v26.8h, v4.8h\n" + "add x24, x24, x1\n" + "smlal v11.4s, v27.4h, v0.4h\n" + "smlal2 v21.4s, v27.8h, v0.8h\n" + "smlal v14.4s, v23.4h, v0.4h\n" + "smlal2 v10.4s, v23.8h, v0.8h\n" + "tbz x4, #2, 93f\n" + "ld1 { v25.s }[0], [x24], #0x4\n" + "tbz x4, #1, 92f\n" + "ld1 { v25.h }[2], [x24], #0x2\n" + "tbz x4, #0, 95f\n" + "ld1 { v25.b }[6], [x24]\n" + "b 95f\n" + "92:" // Oddments: Load (5, 0): Bit 2: Bit 1: Unset + "tbz x4, #0, 95f\n" + "ld1 { v25.b }[4], [x24]\n" + "b 95f\n" + "93:" // Oddments: Load (5, 0): Bit 2: Unset + "tbz x4, #1, 94f\n" + "ld1 { v25.h }[0], [x24], #0x2\n" + "tbz x4, #0, 95f\n" + "ld1 { v25.b }[2], [x24]\n" + "b 95f\n" + "94:" // Oddments: Load (5, 0): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 95f\n" + "ld1 { v25.b }[0], [x24]\n" + "95:" // Oddments: Load (5, 0): Bit 2: End + "ushll v25.8h, v25.8b, #0x0\n" + "ldr x23, [x0, #0xf8]\n" + "smlal v9.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "add x23, x23, x1\n" + "tbz x4, #2, 97f\n" + "ld1 { v24.s }[0], [x23], #0x4\n" + "tbz x4, #1, 96f\n" + "ld1 { v24.h }[2], [x23], #0x2\n" + "tbz x4, #0, 99f\n" + "ld1 { v24.b }[6], [x23]\n" + "b 99f\n" + "96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset + "tbz x4, #0, 99f\n" + "ld1 { v24.b }[4], [x23]\n" + "b 99f\n" + "97:" // Oddments: Load (5, 1): Bit 2: Unset + "tbz x4, #1, 98f\n" + "ld1 { v24.h }[0], [x23], #0x2\n" + "tbz x4, #0, 99f\n" + "ld1 { v24.b }[2], [x23]\n" + "b 99f\n" + "98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 99f\n" + "ld1 { v24.b }[0], [x23]\n" + "99:" // Oddments: Load (5, 1): Bit 2: End + "ldr d1, [x8, #0xa8]\n" + "ushll v24.8h, v24.8b, #0x0\n" + "usubl v1.8h, v1.8b, v15.8b\n" + "ldr x22, [x0, #0x100]\n" + "smlal v7.4s, v24.4h, v0.4h\n" + "smlal2 v6.4s, v24.8h, v0.8h\n" + "add x22, x22, x1\n" + "smlal v11.4s, v23.4h, v1.4h\n" + "smlal2 v21.4s, v23.8h, v1.8h\n" + "smlal v14.4s, v31.4h, v1.4h\n" + "smlal2 v10.4s, v31.8h, v1.8h\n" + "smlal v9.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "tbz x4, #2, 101f\n" + "ld1 { v27.s }[0], [x22], #0x4\n" + "tbz x4, #1, 100f\n" + "ld1 { v27.h }[2], [x22], #0x2\n" + "tbz x4, #0, 103f\n" + "ld1 { v27.b }[6], [x22]\n" + "b 103f\n" + "100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset + "tbz x4, #0, 103f\n" + "ld1 { v27.b }[4], [x22]\n" + "b 103f\n" + "101:" // Oddments: Load (5, 2): Bit 2: Unset + "tbz x4, #1, 102f\n" + "ld1 { v27.h }[0], [x22], #0x2\n" + "tbz x4, #0, 103f\n" + "ld1 { v27.b }[2], [x22]\n" + "b 103f\n" + "102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 103f\n" + "ld1 { v27.b }[0], [x22]\n" + "103:" // Oddments: Load (5, 2): Bit 2: End + "ldr d2, [x8, #0xb0]\n" + "ushll v27.8h, v27.8b, #0x0\n" + "usubl v2.8h, v2.8b, v15.8b\n" + "ldr x7, [x0, #0x108]\n" + "smlal v7.4s, v27.4h, v1.4h\n" + "smlal2 v6.4s, v27.8h, v1.8h\n" + "add x7, x7, x1\n" + "smlal v11.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "smlal v14.4s, v30.4h, v2.4h\n" + "smlal2 v10.4s, v30.8h, v2.8h\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "tbz x4, #2, 105f\n" + "ld1 { v25.s }[0], [x7], #0x4\n" + "tbz x4, #1, 104f\n" + "ld1 { v25.h }[2], [x7], #0x2\n" + "tbz x4, #0, 107f\n" + "ld1 { v25.b }[6], [x7]\n" + "b 107f\n" + "104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset + "tbz x4, #0, 107f\n" + "ld1 { v25.b }[4], [x7]\n" + "b 107f\n" + "105:" // Oddments: Load (5, 3): Bit 2: Unset + "tbz x4, #1, 106f\n" + "ld1 { v25.h }[0], [x7], #0x2\n" + "tbz x4, #0, 107f\n" + "ld1 { v25.b }[2], [x7]\n" + "b 107f\n" + "106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 107f\n" + "ld1 { v25.b }[0], [x7]\n" + "107:" // Oddments: Load (5, 3): Bit 2: End + "ldr d3, [x8, #0xb8]\n" + "ushll v25.8h, v25.8b, #0x0\n" + "usubl v3.8h, v3.8b, v15.8b\n" + "ldr x20, [x0, #0x110]\n" + "smlal v7.4s, v25.4h, v2.4h\n" + "smlal2 v6.4s, v25.8h, v2.8h\n" + "add x20, x20, x1\n" + "smlal v11.4s, v30.4h, v3.4h\n" + "smlal2 v21.4s, v30.8h, v3.8h\n" + "smlal v14.4s, v28.4h, v3.4h\n" + "smlal2 v10.4s, v28.8h, v3.8h\n" + "smlal v9.4s, v25.4h, v3.4h\n" + "smlal2 v8.4s, v25.8h, v3.8h\n" + "tbz x4, #2, 109f\n" + "ld1 { v24.s }[0], [x20], #0x4\n" + "tbz x4, #1, 108f\n" + "ld1 { v24.h }[2], [x20], #0x2\n" + "tbz x4, #0, 111f\n" + "ld1 { v24.b }[6], [x20]\n" + "b 111f\n" + "108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset + "tbz x4, #0, 111f\n" + "ld1 { v24.b }[4], [x20]\n" + "b 111f\n" + "109:" // Oddments: Load (5, 4): Bit 2: Unset + "tbz x4, #1, 110f\n" + "ld1 { v24.h }[0], [x20], #0x2\n" + "tbz x4, #0, 111f\n" + "ld1 { v24.b }[2], [x20]\n" + "b 111f\n" + "110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 111f\n" + "ld1 { v24.b }[0], [x20]\n" + "111:" // Oddments: Load (5, 4): Bit 2: End + "ldr d4, [x8, #0xc0]\n" + "ushll v24.8h, v24.8b, #0x0\n" + "usubl v4.8h, v4.8b, v15.8b\n" + "ldr x19, [x0, #0x118]\n" + "smlal v7.4s, v24.4h, v3.4h\n" + "smlal2 v6.4s, v24.8h, v3.8h\n" + "add x19, x19, x1\n" + "smlal v11.4s, v28.4h, v4.4h\n" + "smlal2 v21.4s, v28.8h, v4.8h\n" + "smlal v14.4s, v26.4h, v4.4h\n" + "smlal2 v10.4s, v26.8h, v4.8h\n" + "smlal v9.4s, v24.4h, v4.4h\n" + "smlal2 v8.4s, v24.8h, v4.8h\n" + "tbz x4, #2, 113f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x4, #1, 112f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x4, #0, 115f\n" + "ld1 { v27.b }[6], [x19]\n" + "b 115f\n" + "112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset + "tbz x4, #0, 115f\n" + "ld1 { v27.b }[4], [x19]\n" + "b 115f\n" + "113:" // Oddments: Load (5, 5): Bit 2: Unset + "tbz x4, #1, 114f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x4, #0, 115f\n" + "ld1 { v27.b }[2], [x19]\n" + "b 115f\n" + "114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 115f\n" + "ld1 { v27.b }[0], [x19]\n" + "115:" // Oddments: Load (5, 5): Bit 2: End + "ushll v27.8h, v27.8b, #0x0\n" + "smlal v7.4s, v27.4h, v4.4h\n" + "smlal2 v6.4s, v27.8h, v4.8h\n" + "tbz x4, #2, 117f\n" + "ld1 { v17.4s }, [x5], #0x10\n" + "ld1 { v5.4s }, [x6], #0x10\n" + "tbz x4, #1, 116f\n" + "ld1 { v18.d }[0], [x5], #0x8\n" + "ld1 { v29.d }[0], [x6], #0x8\n" + "tbz x4, #0, 119f\n" + "ld1 { v18.s }[2], [x5]\n" + "ld1 { v29.s }[2], [x6]\n" + "b 119f\n" + "116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset + "tbz x4, #0, 119f\n" + "ld1 { v18.s }[0], [x5]\n" + "ld1 { v29.s }[0], [x6]\n" + "b 119f\n" + "117:" // Oddments: Load requant params: Bit 2: Unset + "tbz x4, #1, 118f\n" + "ld1 { v17.d }[0], [x5], #0x8\n" + "ld1 { v5.d }[0], [x6], #0x8\n" + "tbz x4, #0, 119f\n" + "ld1 { v17.s }[2], [x5]\n" + "ld1 { v5.s }[2], [x6]\n" + "b 119f\n" + "118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 119f\n" + "ld1 { v17.s }[0], [x5]\n" + "ld1 { v5.s }[0], [x6]\n" + "119:" // Oddments: Load requant params: Bit 2: End + "sqdmulh v11.4s, v11.4s, v17.4s\n" + "sqdmulh v14.4s, v14.4s, v17.4s\n" + "add x21, x21, x2\n" + "add x15, x15, x2\n" + "sqdmulh v9.4s, v9.4s, v17.4s\n" + "sqdmulh v7.4s, v7.4s, v17.4s\n" + "add x17, x17, x2\n" + "add x16, x16, x2\n" + "and v23.16b, v11.16b, v5.16b\n" + "sqdmulh v21.4s, v21.4s, v18.4s\n" + "and v22.16b, v14.16b, v5.16b\n" + "sqdmulh v10.4s, v10.4s, v18.4s\n" + "and v17.16b, v9.16b, v5.16b\n" + "sqdmulh v8.4s, v8.4s, v18.4s\n" + "and v20.16b, v7.16b, v5.16b\n" + "sqdmulh v6.4s, v6.4s, v18.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v19.16b, v21.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v18.16b, v10.16b, v29.16b\n" + "sshr v17.4s, v17.4s, #0x1f\n" + "and v26.16b, v8.16b, v29.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v4.16b, v6.16b, v29.16b\n" + "sqadd v11.4s, v11.4s, v23.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v14.4s, v14.4s, v22.4s\n" + "sshr v18.4s, v18.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v17.4s\n" + "sshr v26.4s, v26.4s, #0x1f\n" + "sqadd v7.4s, v7.4s, v20.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "srshl v11.4s, v11.4s, v5.4s\n" + "sqadd v21.4s, v21.4s, v19.4s\n" + "srshl v14.4s, v14.4s, v5.4s\n" + "sqadd v10.4s, v10.4s, v18.4s\n" + "srshl v9.4s, v9.4s, v5.4s\n" + "sqadd v8.4s, v8.4s, v26.4s\n" + "srshl v7.4s, v7.4s, v5.4s\n" + "sqadd v6.4s, v6.4s, v4.4s\n" + "srshl v21.4s, v21.4s, v29.4s\n" + "sqxtn v11.4h, v11.4s\n" + "srshl v10.4s, v10.4s, v29.4s\n" + "sqxtn v14.4h, v14.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v6.4s, v6.4s, v29.4s\n" + "sqxtn v7.4h, v7.4s\n" + "sqxtn2 v11.8h, v21.4s\n" + "sqxtn2 v14.8h, v10.4s\n" + "sqxtn2 v9.8h, v8.4s\n" + "sqxtn2 v7.8h, v6.4s\n" + "sqadd v11.8h, v11.8h, v16.8h\n" + "sqadd v14.8h, v14.8h, v16.8h\n" + "sqadd v9.8h, v9.8h, v16.8h\n" + "sqadd v7.8h, v7.8h, v16.8h\n" + "smax v11.8h, v11.8h, v12.8h\n" + "smax v14.8h, v14.8h, v12.8h\n" + "smax v9.8h, v9.8h, v12.8h\n" + "smax v7.8h, v7.8h, v12.8h\n" + "smin v11.8h, v11.8h, v13.8h\n" + "smin v14.8h, v14.8h, v13.8h\n" + "smin v9.8h, v9.8h, v13.8h\n" + "smin v7.8h, v7.8h, v13.8h\n" + "uzp1 v11.16b, v11.16b, v11.16b\n" + "uzp1 v14.16b, v14.16b, v14.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v7.16b, v7.16b, v7.16b\n" + "tbz x4, #2, 121f\n" + "st1 { v11.s }[0], [x21], #0x4\n" + "st1 { v14.s }[0], [x15], #0x4\n" + "st1 { v9.s }[0], [x17], #0x4\n" + "st1 { v7.s }[0], [x16], #0x4\n" + "tbz x4, #1, 120f\n" + "st1 { v11.h }[2], [x21], #0x2\n" + "st1 { v14.h }[2], [x15], #0x2\n" + "st1 { v9.h }[2], [x17], #0x2\n" + "st1 { v7.h }[2], [x16], #0x2\n" + "tbz x4, #0, 123f\n" + "st1 { v11.b }[6], [x21], #0x1\n" + "st1 { v14.b }[6], [x15], #0x1\n" + "st1 { v9.b }[6], [x17], #0x1\n" + "st1 { v7.b }[6], [x16], #0x1\n" + "b 123f\n" + "120:" // Oddments: Bit 2: Bit 1: Unset + "tbz x4, #0, 123f\n" + "st1 { v11.b }[4], [x21], #0x1\n" + "st1 { v14.b }[4], [x15], #0x1\n" + "st1 { v9.b }[4], [x17], #0x1\n" + "st1 { v7.b }[4], [x16], #0x1\n" + "b 123f\n" + "121:" // Oddments: Bit 2: Unset + "tbz x4, #1, 122f\n" + "st1 { v11.h }[0], [x21], #0x2\n" + "st1 { v14.h }[0], [x15], #0x2\n" + "st1 { v9.h }[0], [x17], #0x2\n" + "st1 { v7.h }[0], [x16], #0x2\n" + "tbz x4, #0, 123f\n" + "st1 { v11.b }[2], [x21], #0x1\n" + "st1 { v14.b }[2], [x15], #0x1\n" + "st1 { v9.b }[2], [x17], #0x1\n" + "st1 { v7.b }[2], [x16], #0x1\n" + "b 123f\n" + "122:" // Oddments: Bit 2: Unset: Bit 1: Unset + "tbz x4, #0, 123f\n" + "st1 { v11.b }[0], [x21], #0x1\n" + "st1 { v14.b }[0], [x15], #0x1\n" + "st1 { v9.b }[0], [x17], #0x1\n" + "st1 { v7.b }[0], [x16], #0x1\n" + "123:" // Oddments: Bit 2: End + "124:" // End + : + : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); +} + +} // namespace depthwise +} // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index 1bacb5ffe7..281511a1fb 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,24 @@ namespace depthwise { void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst +class a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef int8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_s8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_s8q_3x3_mla::get_packed_size; + a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} - kern_type kernel = a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } - a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp index 8cbbfae00d..22f95746fc 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const int8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const uint8_t *const *inptrs_raw, - const int8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -91,505 +91,489 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( + "ldr x19, [%x[params], %[offsetof_Params_requant]]\n" "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x17, #0x0\n" - "ldr x16, [%x[params], %[offsetof_Params_weights]]\n" + "add x24, x19, %[offsetof_Requantize32_a_offset]\n" + "add x23, x19, %[offsetof_Requantize32_b_offset]\n" + "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x21, x19, %[offsetof_Requantize32_c_offset]\n" + "add x20, x19, %[offsetof_Requantize32_minval]\n" + "ldr x17, [%x[params], %[offsetof_Params_weights]]\n" + "add x19, x19, %[offsetof_Requantize32_maxval]\n" + "ld1r { v22.16b }, [x24]\n" + "ld1r { v12.16b }, [x23]\n" + "lsr x16, x8, #0x3\n" + "ld1r { v14.8h }, [x21]\n" + "ld1r { v17.8h }, [x20]\n" "mov x15, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x14, %x[params], %[offsetof_Params_inptrs]\n" + "mov x14, #0x0\n" + "ld1r { v15.8h }, [x19]\n" "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x12, x8, #0x3\n" + "add x12, %x[params], %[offsetof_Params_inptrs]\n" "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x19, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v21.16b }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v17.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v13.4s }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v15.4s }, [x20]\n" - "ld1r { v14.4s }, [x19]\n" - "ldp x10, x9, [x21, #0x0]\n" - "ldp x28, x27, [x21, #0x10]\n" - "cbz x12, 3f\n" - "subs x12, x12, #0x1\n" + "ldp x10, x9, [x22, #0x0]\n" + "ldp x28, x27, [x22, #0x10]\n" + "cbz x16, 3f\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q11, [x19, #0x0]\n" - "mov v23.16b, v11.16b\n" + "ldr q13, [x19, #0x0]\n" + "subs x16, x16, #0x1\n" + "mov v19.16b, v13.16b\n" "ldr q26, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v12.16b, v11.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v24.16b, v11.16b\n" - "ldr d0, [x16, #0x0]\n" - "ldr d1, [x16, #0x8]\n" - "mov v9.16b, v26.16b\n" - "ldr d2, [x16, #0x10]\n" - "mov v22.16b, v26.16b\n" - "ldr d3, [x16, #0x18]\n" - "mov v10.16b, v26.16b\n" - "ldr d4, [x16, #0x20]\n" - "ssubl v0.8h, v0.8b, v17.8b\n" - "ldr d5, [x16, #0x28]\n" - "ssubl v1.8h, v1.8b, v17.8b\n" - "ldr d6, [x16, #0x30]\n" - "ssubl v2.8h, v2.8b, v17.8b\n" - "ldr d7, [x16, #0x38]\n" - "ssubl v3.8h, v3.8b, v17.8b\n" - "ldr d8, [x16, #0x40]\n" - "ssubl v4.8h, v4.8b, v17.8b\n" - "ldp x23, x22, [x14, #0x0]\n" - "ssubl v5.8h, v5.8b, v17.8b\n" - "ldp x21, x20, [x14, #0x10]\n" - "ssubl v6.8h, v6.8b, v17.8b\n" - "ssubl v7.8h, v7.8b, v17.8b\n" - "ldr x19, [x14, #0x20]\n" - "ssubl v8.8h, v8.8b, v17.8b\n" - "ldr d31, [x23, x17]\n" - "usubl v31.8h, v31.8b, v21.8b\n" - "ldr d30, [x22, x17]\n" - "ldr d29, [x21, x17]\n" - "usubl v30.8h, v30.8b, v21.8b\n" - "ldr d28, [x20, x17]\n" - "usubl v29.8h, v29.8b, v21.8b\n" - "ldr d27, [x19, x17]\n" - "usubl v28.8h, v28.8b, v21.8b\n" - "usubl v27.8h, v27.8b, v21.8b\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v11.16b, v26.16b\n" + "mov v18.16b, v13.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v24.16b, v26.16b\n" + "mov v9.16b, v13.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v23.16b, v26.16b\n" + "ssubl v0.8h, v0.8b, v12.8b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "ssubl v1.8h, v1.8b, v12.8b\n" + "ssubl v2.8h, v2.8b, v12.8b\n" + "ldp x23, x22, [x12, #0x0]\n" + "ldp x21, x20, [x12, #0x10]\n" + "ssubl v3.8h, v3.8b, v12.8b\n" + "ssubl v4.8h, v4.8b, v12.8b\n" + "ldr x19, [x12, #0x20]\n" + "ldr d31, [x23, x15]\n" + "ssubl v5.8h, v5.8b, v12.8b\n" + "ssubl v6.8h, v6.8b, v12.8b\n" + "ldr d30, [x22, x15]\n" + "ldr d29, [x21, x15]\n" + "ssubl v7.8h, v7.8b, v12.8b\n" + "ssubl v8.8h, v8.8b, v12.8b\n" + "ldr d28, [x20, x15]\n" + "ldr d27, [x19, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "usubl v27.8h, v27.8b, v22.8b\n" "beq 2f\n" "1:" // Loop - "smlal v11.4s, v31.4h, v4.4h\n" - "ldr x21, [x14, #0x28]\n" - "add x16, x16, #0x48\n" + "smlal v13.4s, v31.4h, v4.4h\n" "smlal2 v26.4s, v31.8h, v4.8h\n" - "ldr x20, [x14, #0x30]\n" - "subs x12, x12, #0x1\n" - "smlal v23.4s, v31.4h, v3.4h\n" - "ldr x26, [x14, #0x38]\n" - "smlal2 v9.4s, v31.8h, v3.8h\n" - "ldr x25, [x14, #0x40]\n" - "smlal v12.4s, v31.4h, v1.4h\n" - "ldr x19, [x14, #0x48]\n" - "smlal2 v22.4s, v31.8h, v1.8h\n" - "ldr x24, [x14, #0x50]\n" - "smlal v24.4s, v31.4h, v0.4h\n" - "ldr x23, [x14, #0x58]\n" - "smlal2 v10.4s, v31.8h, v0.8h\n" - "ldr d31, [x21, x17]\n" - "smlal v11.4s, v30.4h, v0.4h\n" - "ldr x22, [x14, #0x60]\n" + "ldr x21, [x12, #0x28]\n" + "ldr x26, [x12, #0x38]\n" + "smlal v19.4s, v31.4h, v3.4h\n" + "smlal2 v11.4s, v31.8h, v3.8h\n" + "ldr x20, [x12, #0x30]\n" + "ldr x25, [x12, #0x40]\n" + "smlal v13.4s, v30.4h, v0.4h\n" "smlal2 v26.4s, v30.8h, v0.8h\n" - "ldr d30, [x19, x17]\n" - "smlal v23.4s, v29.4h, v2.4h\n" - "ldr x21, [x14, #0x68]\n" - "smlal2 v9.4s, v29.8h, v2.8h\n" - "ldr d29, [x20, x17]\n" - "smlal v11.4s, v28.4h, v5.4h\n" - "ldr x20, [x14, #0x70]\n" + "ldr x19, [x12, #0x48]\n" + "ldr d30, [x19, x15]\n" + "smlal v19.4s, v29.4h, v2.4h\n" + "smlal2 v11.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x15]\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v1.4h\n" + "smlal2 v24.4s, v31.8h, v1.8h\n" + "ldr x24, [x12, #0x50]\n" + "ldr x23, [x12, #0x58]\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v23.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v13.4s, v28.4h, v5.4h\n" "smlal2 v26.4s, v28.8h, v5.8h\n" - "ldr x19, [x14, #0x78]\n" - "smlal v23.4s, v28.4h, v4.4h\n" - "ldr q25, [x13, #0x0]\n" - "smlal2 v9.4s, v28.8h, v4.8h\n" - "ldr q18, [x11, #0x0]\n" - "smlal v12.4s, v28.4h, v2.4h\n" - "ldr q16, [x13, #0x10]\n" - "add x13, x13, #0x20\n" - "smlal2 v22.4s, v28.8h, v2.8h\n" - "ldr q20, [x11, #0x10]\n" - "add x11, x11, #0x20\n" - "smlal v24.4s, v28.4h, v1.4h\n" - "smlal2 v10.4s, v28.8h, v1.8h\n" - "ldr d28, [x26, x17]\n" - "usubl v31.8h, v31.8b, v21.8b\n" - "smlal v11.4s, v27.4h, v7.4h\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "ldr x22, [x12, #0x60]\n" + "smlal v19.4s, v28.4h, v4.4h\n" + "smlal2 v11.4s, v28.8h, v4.8h\n" + "ldr x21, [x12, #0x68]\n" + "ldr x20, [x12, #0x70]\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v24.4s, v28.8h, v2.8h\n" + "ldr x19, [x12, #0x78]\n" + "ldr q21, [x13, #0x0]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v23.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v13.4s, v27.4h, v7.4h\n" "smlal2 v26.4s, v27.8h, v7.8h\n" - "smlal v12.4s, v31.4h, v6.4h\n" - "smlal2 v22.4s, v31.8h, v6.8h\n" - "ldr d31, [x25, x17]\n" - "smlal v23.4s, v27.4h, v6.4h\n" - "smlal2 v9.4s, v27.8h, v6.8h\n" - "smlal v12.4s, v27.4h, v4.4h\n" - "smlal2 v22.4s, v27.8h, v4.8h\n" - "smlal v24.4s, v27.4h, v3.4h\n" - "smlal2 v10.4s, v27.8h, v3.8h\n" - "usubl v29.8h, v29.8b, v21.8b\n" - "usubl v28.8h, v28.8b, v21.8b\n" - "usubl v31.8h, v31.8b, v21.8b\n" - "smlal v24.4s, v29.4h, v8.4h\n" - "smlal2 v10.4s, v29.8h, v8.8h\n" - "ldr d29, [x24, x17]\n" - "smlal v11.4s, v28.4h, v1.4h\n" + "ldr q25, [x11, #0x0]\n" + "ldr q10, [x13, #0x10]\n" + "smlal v19.4s, v27.4h, v6.4h\n" + "smlal2 v11.4s, v27.8h, v6.8h\n" + "ldr q16, [x11, #0x10]\n" + "add x17, x17, #0x48\n" + "smlal v18.4s, v31.4h, v6.4h\n" + "smlal2 v24.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v23.4s, v27.8h, v3.8h\n" + "subs x16, x16, #0x1\n" + "add x13, x13, #0x20\n" + "smlal v13.4s, v28.4h, v1.4h\n" "smlal2 v26.4s, v28.8h, v1.8h\n" - "smlal v23.4s, v28.4h, v0.4h\n" - "smlal2 v9.4s, v28.8h, v0.8h\n" - "ldr d28, [x23, x17]\n" - "smlal v11.4s, v31.4h, v2.4h\n" + "add x11, x11, #0x20\n" + "smlal v19.4s, v28.4h, v0.4h\n" + "smlal2 v11.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v18.4s, v27.4h, v4.4h\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v24.4s, v27.8h, v4.8h\n" + "smlal2 v23.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x15]\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v13.4s, v31.4h, v2.4h\n" "smlal2 v26.4s, v31.8h, v2.8h\n" - "smlal v23.4s, v31.4h, v1.4h\n" - "smlal2 v9.4s, v31.8h, v1.8h\n" - "ldr d31, [x22, x17]\n" - "usubl v30.8h, v30.8b, v21.8b\n" - "usubl v29.8h, v29.8b, v21.8b\n" - "usubl v28.8h, v28.8b, v21.8b\n" - "smlal v11.4s, v30.4h, v8.4h\n" + "smlal v19.4s, v31.4h, v1.4h\n" + "smlal2 v11.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal v9.4s, v30.4h, v4.4h\n" + "smlal v13.4s, v30.4h, v8.4h\n" "smlal2 v26.4s, v30.8h, v8.8h\n" - "smlal v23.4s, v30.4h, v7.4h\n" - "smlal2 v9.4s, v30.8h, v7.8h\n" - "smlal v12.4s, v30.4h, v5.4h\n" - "smlal2 v22.4s, v30.8h, v5.8h\n" - "smlal v24.4s, v30.4h, v4.4h\n" - "smlal2 v10.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x17]\n" - "smlal v11.4s, v29.4h, v3.4h\n" + "smlal v19.4s, v30.4h, v7.4h\n" + "smlal2 v11.4s, v30.8h, v7.8h\n" + "smlal2 v24.4s, v30.8h, v5.8h\n" + "smlal2 v23.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x15]\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "smlal v18.4s, v29.4h, v0.4h\n" + "smlal v9.4s, v28.4h, v2.4h\n" + "smlal v13.4s, v29.4h, v3.4h\n" "smlal2 v26.4s, v29.8h, v3.8h\n" - "smlal v12.4s, v29.4h, v0.4h\n" - "smlal2 v22.4s, v29.8h, v0.8h\n" - "ldr d29, [x20, x17]\n" - "smlal v23.4s, v28.4h, v5.4h\n" - "smlal2 v9.4s, v28.8h, v5.8h\n" - "smlal v24.4s, v28.4h, v2.4h\n" - "smlal2 v10.4s, v28.8h, v2.8h\n" - "ldr d28, [x19, x17]\n" - "add x17, x17, #0x8\n" - "usubl v31.8h, v31.8b, v21.8b\n" - "usubl v30.8h, v30.8b, v21.8b\n" - "usubl v29.8h, v29.8b, v21.8b\n" - "smlal v11.4s, v31.4h, v6.4h\n" - "smlal2 v26.4s, v31.8h, v6.8h\n" - "smlal v12.4s, v31.4h, v3.4h\n" - "smlal2 v22.4s, v31.8h, v3.8h\n" - "smlal v23.4s, v30.4h, v8.4h\n" - "smlal2 v9.4s, v30.8h, v8.8h\n" - "smlal v24.4s, v30.4h, v5.4h\n" - "smlal2 v10.4s, v30.8h, v5.8h\n" - "smlal v12.4s, v29.4h, v7.4h\n" - "smlal2 v22.4s, v29.8h, v7.8h\n" - "smlal v24.4s, v29.4h, v6.4h\n" - "smlal2 v10.4s, v29.8h, v6.8h\n" - "usubl v28.8h, v28.8b, v21.8b\n" - "sqrdmulh v11.4s, v11.4s, v25.4s\n" - "sqrdmulh v26.4s, v26.4s, v16.4s\n" - "smlal v12.4s, v28.4h, v8.4h\n" - "smlal2 v22.4s, v28.8h, v8.8h\n" - "smlal v24.4s, v28.4h, v7.4h\n" - "smlal2 v10.4s, v28.8h, v7.8h\n" - "and v19.16b, v11.16b, v18.16b\n" - "and v5.16b, v26.16b, v20.16b\n" - "sqrdmulh v23.4s, v23.4s, v25.4s\n" - "sshr v19.4s, v19.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sqrdmulh v9.4s, v9.4s, v16.4s\n" - "sqadd v11.4s, v11.4s, v19.4s\n" - "sqadd v26.4s, v26.4s, v5.4s\n" - "and v28.16b, v23.16b, v18.16b\n" - "and v8.16b, v9.16b, v20.16b\n" - "srshl v11.4s, v11.4s, v18.4s\n" - "srshl v26.4s, v26.4s, v20.4s\n" - "sshr v28.4s, v28.4s, #0x1f\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "add v11.4s, v11.4s, v13.4s\n" - "add v26.4s, v26.4s, v13.4s\n" - "sqadd v23.4s, v23.4s, v28.4s\n" - "smin v11.4s, v11.4s, v14.4s\n" - "smin v26.4s, v26.4s, v14.4s\n" - "sqadd v9.4s, v9.4s, v8.4s\n" - "smax v11.4s, v11.4s, v15.4s\n" - "smax v26.4s, v26.4s, v15.4s\n" - "srshl v23.4s, v23.4s, v18.4s\n" - "srshl v9.4s, v9.4s, v20.4s\n" - "uzp1 v11.16b, v11.16b, v26.16b\n" - "sqrdmulh v12.4s, v12.4s, v25.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "str d11, [x10, x15]\n" - "add v23.4s, v23.4s, v13.4s\n" - "add v9.4s, v9.4s, v13.4s\n" - "and v1.16b, v12.16b, v18.16b\n" - "sqrdmulh v22.4s, v22.4s, v16.4s\n" - "smin v23.4s, v23.4s, v14.4s\n" - "smin v9.4s, v9.4s, v14.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "smax v23.4s, v23.4s, v15.4s\n" - "smax v9.4s, v9.4s, v15.4s\n" - "sqadd v12.4s, v12.4s, v1.4s\n" - "and v0.16b, v22.16b, v20.16b\n" - "uzp1 v23.16b, v23.16b, v9.16b\n" - "sqrdmulh v24.4s, v24.4s, v25.4s\n" - "uzp1 v23.16b, v23.16b, v23.16b\n" - "str d23, [x9, x15]\n" - "srshl v12.4s, v12.4s, v18.4s\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "and v26.16b, v24.16b, v18.16b\n" - "sqrdmulh v10.4s, v10.4s, v16.4s\n" - "sqadd v22.4s, v22.4s, v0.4s\n" - "add v12.4s, v12.4s, v13.4s\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "and v16.16b, v10.16b, v20.16b\n" - "smin v12.4s, v12.4s, v14.4s\n" - "srshl v22.4s, v22.4s, v20.4s\n" - "sqadd v24.4s, v24.4s, v26.4s\n" - "smax v12.4s, v12.4s, v15.4s\n" - "sshr v16.4s, v16.4s, #0x1f\n" - "add v22.4s, v22.4s, v13.4s\n" - "srshl v24.4s, v24.4s, v18.4s\n" - "sqadd v10.4s, v10.4s, v16.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "add v24.4s, v24.4s, v13.4s\n" - "smax v22.4s, v22.4s, v15.4s\n" - "srshl v10.4s, v10.4s, v20.4s\n" - "smin v24.4s, v24.4s, v14.4s\n" - "uzp1 v12.16b, v12.16b, v22.16b\n" - "add v10.4s, v10.4s, v13.4s\n" - "uzp1 v12.16b, v12.16b, v12.16b\n" - "str d12, [x28, x15]\n" - "smax v24.4s, v24.4s, v15.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "smax v10.4s, v10.4s, v15.4s\n" - "uzp1 v24.16b, v24.16b, v10.16b\n" - "uzp1 v24.16b, v24.16b, v24.16b\n" - "str d24, [x27, x15]\n" + "smlal2 v24.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x15]\n" + "smlal2 v23.4s, v28.8h, v2.8h\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v3.4h\n" + "smlal v9.4s, v30.4h, v5.4h\n" + "smlal v19.4s, v28.4h, v5.4h\n" + "smlal2 v11.4s, v28.8h, v5.8h\n" + "ldr d28, [x19, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal2 v24.4s, v31.8h, v3.8h\n" + "smlal2 v23.4s, v30.8h, v5.8h\n" "add x15, x15, #0x8\n" + "smlal v18.4s, v29.4h, v7.4h\n" + "smlal v9.4s, v29.4h, v6.4h\n" + "smlal2 v24.4s, v29.8h, v7.8h\n" + "smlal2 v23.4s, v29.8h, v6.8h\n" + "smlal v13.4s, v31.4h, v6.4h\n" + "smlal v19.4s, v30.4h, v8.4h\n" + "sqdmulh v13.4s, v13.4s, v21.4s\n" + "smlal v18.4s, v28.4h, v8.4h\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "sqdmulh v19.4s, v19.4s, v21.4s\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal2 v11.4s, v30.8h, v8.8h\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "smlal2 v24.4s, v28.8h, v8.8h\n" + "smlal2 v23.4s, v28.8h, v7.8h\n" + "sqdmulh v9.4s, v9.4s, v21.4s\n" + "and v7.16b, v13.16b, v25.16b\n" + "sqdmulh v26.4s, v26.4s, v10.4s\n" + "and v4.16b, v19.16b, v25.16b\n" + "sqdmulh v11.4s, v11.4s, v10.4s\n" + "and v21.16b, v18.16b, v25.16b\n" + "sqdmulh v24.4s, v24.4s, v10.4s\n" + "and v20.16b, v9.16b, v25.16b\n" + "sqdmulh v23.4s, v23.4s, v10.4s\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "and v29.16b, v26.16b, v16.16b\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v10.16b, v11.16b, v16.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v31.16b, v24.16b, v16.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v30.16b, v23.16b, v16.16b\n" + "sqadd v13.4s, v13.4s, v7.4s\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "sshr v10.4s, v10.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v20.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "srshl v13.4s, v13.4s, v25.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "srshl v19.4s, v19.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v10.4s\n" + "srshl v18.4s, v18.4s, v25.4s\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "srshl v9.4s, v9.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v30.4s\n" + "srshl v26.4s, v26.4s, v16.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v11.4s, v11.4s, v16.4s\n" + "sqxtn v19.4h, v19.4s\n" + "srshl v24.4s, v24.4s, v16.4s\n" + "sqxtn v18.4h, v18.4s\n" + "srshl v23.4s, v23.4s, v16.4s\n" + "sqxtn v9.4h, v9.4s\n" + "sqxtn2 v13.8h, v26.4s\n" + "sqxtn2 v19.8h, v11.4s\n" + "sqxtn2 v18.8h, v24.4s\n" + "sqxtn2 v9.8h, v23.4s\n" + "sqadd v13.8h, v13.8h, v14.8h\n" + "sqadd v19.8h, v19.8h, v14.8h\n" + "sqadd v18.8h, v18.8h, v14.8h\n" + "sqadd v9.8h, v9.8h, v14.8h\n" + "smax v13.8h, v13.8h, v17.8h\n" + "smax v19.8h, v19.8h, v17.8h\n" + "smax v18.8h, v18.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smin v13.8h, v13.8h, v15.8h\n" + "smin v19.8h, v19.8h, v15.8h\n" + "smin v18.8h, v18.8h, v15.8h\n" + "smin v9.8h, v9.8h, v15.8h\n" + "uzp1 v13.16b, v13.16b, v13.16b\n" + "uzp1 v19.16b, v19.16b, v19.16b\n" + "str d13, [x10, x14]\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "str d19, [x9, x14]\n" + "str d18, [x28, x14]\n" + "str d9, [x27, x14]\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q11, [x19, #0x0]\n" - "mov v23.16b, v11.16b\n" + "ldr q13, [x19, #0x0]\n" + "add x14, x14, #0x8\n" "ldr q26, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v12.16b, v11.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v24.16b, v11.16b\n" - "ldr d0, [x16, #0x0]\n" - "ldr d1, [x16, #0x8]\n" - "mov v9.16b, v26.16b\n" - "ldr d2, [x16, #0x10]\n" - "mov v22.16b, v26.16b\n" - "ldr d3, [x16, #0x18]\n" - "mov v10.16b, v26.16b\n" - "ldr d4, [x16, #0x20]\n" - "ssubl v0.8h, v0.8b, v17.8b\n" - "ldr d5, [x16, #0x28]\n" - "ssubl v1.8h, v1.8b, v17.8b\n" - "ldr d6, [x16, #0x30]\n" - "ssubl v2.8h, v2.8b, v17.8b\n" - "ldr d7, [x16, #0x38]\n" - "ssubl v3.8h, v3.8b, v17.8b\n" - "ldr d8, [x16, #0x40]\n" - "ssubl v4.8h, v4.8b, v17.8b\n" - "ldp x23, x22, [x14, #0x0]\n" - "ssubl v5.8h, v5.8b, v17.8b\n" - "ldp x21, x20, [x14, #0x10]\n" - "ssubl v6.8h, v6.8b, v17.8b\n" - "ssubl v7.8h, v7.8b, v17.8b\n" - "ldr x19, [x14, #0x20]\n" - "ssubl v8.8h, v8.8b, v17.8b\n" - "ldr d31, [x23, x17]\n" - "usubl v31.8h, v31.8b, v21.8b\n" - "ldr d30, [x22, x17]\n" - "ldr d29, [x21, x17]\n" - "usubl v30.8h, v30.8b, v21.8b\n" - "ldr d28, [x20, x17]\n" - "usubl v29.8h, v29.8b, v21.8b\n" - "ldr d27, [x19, x17]\n" - "usubl v28.8h, v28.8b, v21.8b\n" - "usubl v27.8h, v27.8b, v21.8b\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v19.16b, v13.16b\n" + "mov v11.16b, v26.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v18.16b, v13.16b\n" + "mov v24.16b, v26.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v9.16b, v13.16b\n" + "mov v23.16b, v26.16b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "ssubl v0.8h, v0.8b, v12.8b\n" + "ssubl v1.8h, v1.8b, v12.8b\n" + "ldp x23, x22, [x12, #0x0]\n" + "ldp x21, x20, [x12, #0x10]\n" + "ssubl v2.8h, v2.8b, v12.8b\n" + "ssubl v3.8h, v3.8b, v12.8b\n" + "ldr x19, [x12, #0x20]\n" + "ldr d31, [x23, x15]\n" + "ssubl v4.8h, v4.8b, v12.8b\n" + "ssubl v5.8h, v5.8b, v12.8b\n" + "ldr d30, [x22, x15]\n" + "ldr d29, [x21, x15]\n" + "ssubl v6.8h, v6.8b, v12.8b\n" + "ssubl v7.8h, v7.8b, v12.8b\n" + "ldr d28, [x20, x15]\n" + "ldr d27, [x19, x15]\n" + "ssubl v8.8h, v8.8b, v12.8b\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "usubl v27.8h, v27.8b, v22.8b\n" "bgt 1b\n" "2:" // Tail - "smlal v11.4s, v31.4h, v4.4h\n" - "ldr x21, [x14, #0x28]\n" - "tst x8, #0x7\n" + "smlal v13.4s, v31.4h, v4.4h\n" "smlal2 v26.4s, v31.8h, v4.8h\n" - "ldr x20, [x14, #0x30]\n" - "smlal v23.4s, v31.4h, v3.4h\n" - "ldr x26, [x14, #0x38]\n" - "smlal2 v9.4s, v31.8h, v3.8h\n" - "ldr x25, [x14, #0x40]\n" - "smlal v12.4s, v31.4h, v1.4h\n" - "ldr x19, [x14, #0x48]\n" - "smlal2 v22.4s, v31.8h, v1.8h\n" - "ldr x24, [x14, #0x50]\n" - "smlal v24.4s, v31.4h, v0.4h\n" - "ldr x23, [x14, #0x58]\n" - "smlal2 v10.4s, v31.8h, v0.8h\n" - "ldr d31, [x21, x17]\n" - "smlal v11.4s, v30.4h, v0.4h\n" - "ldr x22, [x14, #0x60]\n" + "ldr x21, [x12, #0x28]\n" + "ldr x26, [x12, #0x38]\n" + "smlal v19.4s, v31.4h, v3.4h\n" + "smlal2 v11.4s, v31.8h, v3.8h\n" + "ldr x20, [x12, #0x30]\n" + "ldr x25, [x12, #0x40]\n" + "smlal v13.4s, v30.4h, v0.4h\n" "smlal2 v26.4s, v30.8h, v0.8h\n" - "ldr d30, [x19, x17]\n" - "smlal v23.4s, v29.4h, v2.4h\n" - "ldr x21, [x14, #0x68]\n" - "smlal2 v9.4s, v29.8h, v2.8h\n" - "ldr d29, [x20, x17]\n" - "smlal v11.4s, v28.4h, v5.4h\n" - "ldr x20, [x14, #0x70]\n" + "ldr x19, [x12, #0x48]\n" + "ldr d30, [x19, x15]\n" + "smlal v19.4s, v29.4h, v2.4h\n" + "smlal2 v11.4s, v29.8h, v2.8h\n" + "ldr d29, [x20, x15]\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v1.4h\n" + "smlal2 v24.4s, v31.8h, v1.8h\n" + "ldr x24, [x12, #0x50]\n" + "ldr x23, [x12, #0x58]\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v23.4s, v31.8h, v0.8h\n" + "ldr d31, [x21, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v13.4s, v28.4h, v5.4h\n" "smlal2 v26.4s, v28.8h, v5.8h\n" - "ldr x19, [x14, #0x78]\n" - "smlal v23.4s, v28.4h, v4.4h\n" - "ldr q25, [x13, #0x0]\n" - "smlal2 v9.4s, v28.8h, v4.8h\n" - "ldr q18, [x11, #0x0]\n" - "smlal v12.4s, v28.4h, v2.4h\n" - "ldr q16, [x13, #0x10]\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "ldr x22, [x12, #0x60]\n" + "smlal v19.4s, v28.4h, v4.4h\n" + "smlal2 v11.4s, v28.8h, v4.8h\n" + "ldr x21, [x12, #0x68]\n" + "ldr x20, [x12, #0x70]\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v24.4s, v28.8h, v2.8h\n" + "ldr x19, [x12, #0x78]\n" + "ldr q21, [x13, #0x0]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v23.4s, v28.8h, v1.8h\n" + "ldr d28, [x26, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v13.4s, v27.4h, v7.4h\n" + "smlal2 v26.4s, v27.8h, v7.8h\n" + "ldr q25, [x11, #0x0]\n" + "ldr q10, [x13, #0x10]\n" + "smlal v19.4s, v27.4h, v6.4h\n" + "smlal2 v11.4s, v27.8h, v6.8h\n" + "ldr q16, [x11, #0x10]\n" + "tst x8, #0x7\n" + "smlal v18.4s, v31.4h, v6.4h\n" + "smlal2 v24.4s, v31.8h, v6.8h\n" + "ldr d31, [x25, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v23.4s, v27.8h, v3.8h\n" "add x13, x13, #0x20\n" - "smlal2 v22.4s, v28.8h, v2.8h\n" - "ldr q20, [x11, #0x10]\n" "add x11, x11, #0x20\n" - "smlal v24.4s, v28.4h, v1.4h\n" - "smlal2 v10.4s, v28.8h, v1.8h\n" - "ldr d28, [x26, x17]\n" - "usubl v31.8h, v31.8b, v21.8b\n" - "smlal v11.4s, v27.4h, v7.4h\n" - "smlal2 v26.4s, v27.8h, v7.8h\n" - "smlal v12.4s, v31.4h, v6.4h\n" - "smlal2 v22.4s, v31.8h, v6.8h\n" - "ldr d31, [x25, x17]\n" - "smlal v23.4s, v27.4h, v6.4h\n" - "smlal2 v9.4s, v27.8h, v6.8h\n" - "smlal v12.4s, v27.4h, v4.4h\n" - "smlal2 v22.4s, v27.8h, v4.8h\n" - "smlal v24.4s, v27.4h, v3.4h\n" - "smlal2 v10.4s, v27.8h, v3.8h\n" - "usubl v29.8h, v29.8b, v21.8b\n" - "usubl v28.8h, v28.8b, v21.8b\n" - "usubl v31.8h, v31.8b, v21.8b\n" - "smlal v24.4s, v29.4h, v8.4h\n" - "smlal2 v10.4s, v29.8h, v8.8h\n" - "ldr d29, [x24, x17]\n" - "smlal v11.4s, v28.4h, v1.4h\n" + "smlal v13.4s, v28.4h, v1.4h\n" "smlal2 v26.4s, v28.8h, v1.8h\n" - "smlal v23.4s, v28.4h, v0.4h\n" - "smlal2 v9.4s, v28.8h, v0.8h\n" - "ldr d28, [x23, x17]\n" - "smlal v11.4s, v31.4h, v2.4h\n" + "smlal v19.4s, v28.4h, v0.4h\n" + "smlal2 v11.4s, v28.8h, v0.8h\n" + "ldr d28, [x23, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v18.4s, v27.4h, v4.4h\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v24.4s, v27.8h, v4.8h\n" + "smlal2 v23.4s, v29.8h, v8.8h\n" + "ldr d29, [x24, x15]\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v13.4s, v31.4h, v2.4h\n" "smlal2 v26.4s, v31.8h, v2.8h\n" - "smlal v23.4s, v31.4h, v1.4h\n" - "smlal2 v9.4s, v31.8h, v1.8h\n" - "ldr d31, [x22, x17]\n" - "usubl v30.8h, v30.8b, v21.8b\n" - "usubl v29.8h, v29.8b, v21.8b\n" - "usubl v28.8h, v28.8b, v21.8b\n" - "smlal v11.4s, v30.4h, v8.4h\n" + "smlal v19.4s, v31.4h, v1.4h\n" + "smlal2 v11.4s, v31.8h, v1.8h\n" + "ldr d31, [x22, x15]\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal v9.4s, v30.4h, v4.4h\n" + "smlal v13.4s, v30.4h, v8.4h\n" "smlal2 v26.4s, v30.8h, v8.8h\n" - "smlal v23.4s, v30.4h, v7.4h\n" - "smlal2 v9.4s, v30.8h, v7.8h\n" - "smlal v12.4s, v30.4h, v5.4h\n" - "smlal2 v22.4s, v30.8h, v5.8h\n" - "smlal v24.4s, v30.4h, v4.4h\n" - "smlal2 v10.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x17]\n" - "smlal v11.4s, v29.4h, v3.4h\n" + "smlal v19.4s, v30.4h, v7.4h\n" + "smlal2 v11.4s, v30.8h, v7.8h\n" + "smlal2 v24.4s, v30.8h, v5.8h\n" + "smlal2 v23.4s, v30.8h, v4.8h\n" + "ldr d30, [x21, x15]\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "smlal v18.4s, v29.4h, v0.4h\n" + "smlal v9.4s, v28.4h, v2.4h\n" + "smlal v13.4s, v29.4h, v3.4h\n" "smlal2 v26.4s, v29.8h, v3.8h\n" - "smlal v12.4s, v29.4h, v0.4h\n" - "smlal2 v22.4s, v29.8h, v0.8h\n" - "ldr d29, [x20, x17]\n" - "smlal v23.4s, v28.4h, v5.4h\n" - "smlal2 v9.4s, v28.8h, v5.8h\n" - "smlal v24.4s, v28.4h, v2.4h\n" - "smlal2 v10.4s, v28.8h, v2.8h\n" - "ldr d28, [x19, x17]\n" - "add x17, x17, #0x8\n" - "usubl v31.8h, v31.8b, v21.8b\n" - "usubl v30.8h, v30.8b, v21.8b\n" - "usubl v29.8h, v29.8b, v21.8b\n" - "smlal v11.4s, v31.4h, v6.4h\n" - "smlal2 v26.4s, v31.8h, v6.8h\n" - "smlal v12.4s, v31.4h, v3.4h\n" - "smlal2 v22.4s, v31.8h, v3.8h\n" - "smlal v23.4s, v30.4h, v8.4h\n" - "smlal2 v9.4s, v30.8h, v8.8h\n" - "smlal v24.4s, v30.4h, v5.4h\n" - "smlal2 v10.4s, v30.8h, v5.8h\n" - "smlal v12.4s, v29.4h, v7.4h\n" - "smlal2 v22.4s, v29.8h, v7.8h\n" - "smlal v24.4s, v29.4h, v6.4h\n" - "smlal2 v10.4s, v29.8h, v6.8h\n" - "usubl v28.8h, v28.8b, v21.8b\n" - "sqrdmulh v11.4s, v11.4s, v25.4s\n" - "sqrdmulh v26.4s, v26.4s, v16.4s\n" - "smlal v12.4s, v28.4h, v8.4h\n" - "smlal2 v22.4s, v28.8h, v8.8h\n" - "smlal v24.4s, v28.4h, v7.4h\n" - "smlal2 v10.4s, v28.8h, v7.8h\n" - "and v19.16b, v11.16b, v18.16b\n" - "and v5.16b, v26.16b, v20.16b\n" - "sqrdmulh v23.4s, v23.4s, v25.4s\n" - "sshr v19.4s, v19.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sqrdmulh v9.4s, v9.4s, v16.4s\n" - "sqadd v11.4s, v11.4s, v19.4s\n" - "sqadd v26.4s, v26.4s, v5.4s\n" - "and v28.16b, v23.16b, v18.16b\n" - "and v8.16b, v9.16b, v20.16b\n" - "srshl v11.4s, v11.4s, v18.4s\n" - "srshl v26.4s, v26.4s, v20.4s\n" - "sshr v28.4s, v28.4s, #0x1f\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "add v11.4s, v11.4s, v13.4s\n" - "add v26.4s, v26.4s, v13.4s\n" - "sqadd v23.4s, v23.4s, v28.4s\n" - "smin v11.4s, v11.4s, v14.4s\n" - "smin v26.4s, v26.4s, v14.4s\n" - "sqadd v9.4s, v9.4s, v8.4s\n" - "smax v11.4s, v11.4s, v15.4s\n" - "smax v26.4s, v26.4s, v15.4s\n" - "srshl v23.4s, v23.4s, v18.4s\n" - "srshl v9.4s, v9.4s, v20.4s\n" - "uzp1 v11.16b, v11.16b, v26.16b\n" - "sqrdmulh v12.4s, v12.4s, v25.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "str d11, [x10, x15]\n" - "add v23.4s, v23.4s, v13.4s\n" - "add v9.4s, v9.4s, v13.4s\n" - "and v1.16b, v12.16b, v18.16b\n" - "sqrdmulh v22.4s, v22.4s, v16.4s\n" - "smin v23.4s, v23.4s, v14.4s\n" - "smin v9.4s, v9.4s, v14.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "smax v23.4s, v23.4s, v15.4s\n" - "smax v9.4s, v9.4s, v15.4s\n" - "sqadd v12.4s, v12.4s, v1.4s\n" - "and v0.16b, v22.16b, v20.16b\n" - "uzp1 v23.16b, v23.16b, v9.16b\n" - "sqrdmulh v24.4s, v24.4s, v25.4s\n" - "uzp1 v23.16b, v23.16b, v23.16b\n" - "str d23, [x9, x15]\n" - "srshl v12.4s, v12.4s, v18.4s\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "and v26.16b, v24.16b, v18.16b\n" - "sqrdmulh v10.4s, v10.4s, v16.4s\n" - "sqadd v22.4s, v22.4s, v0.4s\n" - "add v12.4s, v12.4s, v13.4s\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "and v16.16b, v10.16b, v20.16b\n" - "smin v12.4s, v12.4s, v14.4s\n" - "srshl v22.4s, v22.4s, v20.4s\n" - "sqadd v24.4s, v24.4s, v26.4s\n" - "smax v12.4s, v12.4s, v15.4s\n" - "sshr v16.4s, v16.4s, #0x1f\n" - "add v22.4s, v22.4s, v13.4s\n" - "srshl v24.4s, v24.4s, v18.4s\n" - "sqadd v10.4s, v10.4s, v16.4s\n" - "smin v22.4s, v22.4s, v14.4s\n" - "add v24.4s, v24.4s, v13.4s\n" - "smax v22.4s, v22.4s, v15.4s\n" - "srshl v10.4s, v10.4s, v20.4s\n" - "smin v24.4s, v24.4s, v14.4s\n" - "uzp1 v12.16b, v12.16b, v22.16b\n" - "add v10.4s, v10.4s, v13.4s\n" - "uzp1 v12.16b, v12.16b, v12.16b\n" - "str d12, [x28, x15]\n" - "smax v24.4s, v24.4s, v15.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "smax v10.4s, v10.4s, v15.4s\n" - "uzp1 v24.16b, v24.16b, v10.16b\n" - "uzp1 v24.16b, v24.16b, v24.16b\n" - "str d24, [x27, x15]\n" + "smlal2 v24.4s, v29.8h, v0.8h\n" + "ldr d29, [x20, x15]\n" + "smlal2 v23.4s, v28.8h, v2.8h\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v3.4h\n" + "smlal v9.4s, v30.4h, v5.4h\n" + "smlal v19.4s, v28.4h, v5.4h\n" + "smlal2 v11.4s, v28.8h, v5.8h\n" + "ldr d28, [x19, x15]\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal2 v24.4s, v31.8h, v3.8h\n" + "smlal2 v23.4s, v30.8h, v5.8h\n" "add x15, x15, #0x8\n" + "smlal v18.4s, v29.4h, v7.4h\n" + "smlal v9.4s, v29.4h, v6.4h\n" + "smlal2 v24.4s, v29.8h, v7.8h\n" + "smlal2 v23.4s, v29.8h, v6.8h\n" + "smlal v13.4s, v31.4h, v6.4h\n" + "smlal v19.4s, v30.4h, v8.4h\n" + "sqdmulh v13.4s, v13.4s, v21.4s\n" + "smlal v18.4s, v28.4h, v8.4h\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "sqdmulh v19.4s, v19.4s, v21.4s\n" + "smlal2 v26.4s, v31.8h, v6.8h\n" + "smlal2 v11.4s, v30.8h, v8.8h\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "smlal2 v24.4s, v28.8h, v8.8h\n" + "smlal2 v23.4s, v28.8h, v7.8h\n" + "sqdmulh v9.4s, v9.4s, v21.4s\n" + "and v7.16b, v13.16b, v25.16b\n" + "sqdmulh v26.4s, v26.4s, v10.4s\n" + "and v4.16b, v19.16b, v25.16b\n" + "sqdmulh v11.4s, v11.4s, v10.4s\n" + "and v21.16b, v18.16b, v25.16b\n" + "sqdmulh v24.4s, v24.4s, v10.4s\n" + "and v20.16b, v9.16b, v25.16b\n" + "sqdmulh v23.4s, v23.4s, v10.4s\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "and v29.16b, v26.16b, v16.16b\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v10.16b, v11.16b, v16.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v31.16b, v24.16b, v16.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v30.16b, v23.16b, v16.16b\n" + "sqadd v13.4s, v13.4s, v7.4s\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "sshr v10.4s, v10.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v20.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "srshl v13.4s, v13.4s, v25.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "srshl v19.4s, v19.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v10.4s\n" + "srshl v18.4s, v18.4s, v25.4s\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "srshl v9.4s, v9.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v30.4s\n" + "srshl v26.4s, v26.4s, v16.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v11.4s, v11.4s, v16.4s\n" + "sqxtn v19.4h, v19.4s\n" + "srshl v24.4s, v24.4s, v16.4s\n" + "sqxtn v18.4h, v18.4s\n" + "srshl v23.4s, v23.4s, v16.4s\n" + "sqxtn v9.4h, v9.4s\n" + "sqxtn2 v13.8h, v26.4s\n" + "sqxtn2 v19.8h, v11.4s\n" + "sqxtn2 v18.8h, v24.4s\n" + "sqxtn2 v9.8h, v23.4s\n" + "sqadd v13.8h, v13.8h, v14.8h\n" + "sqadd v19.8h, v19.8h, v14.8h\n" + "sqadd v18.8h, v18.8h, v14.8h\n" + "sqadd v9.8h, v9.8h, v14.8h\n" + "smax v13.8h, v13.8h, v17.8h\n" + "smax v19.8h, v19.8h, v17.8h\n" + "smax v18.8h, v18.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smin v13.8h, v13.8h, v15.8h\n" + "smin v19.8h, v19.8h, v15.8h\n" + "smin v18.8h, v18.8h, v15.8h\n" + "smin v9.8h, v9.8h, v15.8h\n" + "uzp1 v13.16b, v13.16b, v13.16b\n" + "uzp1 v19.16b, v19.16b, v19.16b\n" + "str d13, [x10, x14]\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "str d19, [x9, x14]\n" + "str d18, [x28, x14]\n" + "str d9, [x27, x14]\n" + "add x14, x14, #0x8\n" "beq 64f\n" - "add x16, x16, #0x48\n" + "add x17, x17, #0x48\n" "3:" // Oddments "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" "tbz x8, #2, 5f\n" - "ld1 { v11.4s }, [x19], #0x10\n" + "ld1 { v13.4s }, [x19], #0x10\n" "tbz x8, #1, 4f\n" "ld1 { v26.d }[0], [x19], #0x8\n" "tbz x8, #0, 7f\n" @@ -601,46 +585,46 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset "tbz x8, #1, 6f\n" - "ld1 { v11.d }[0], [x19], #0x8\n" + "ld1 { v13.d }[0], [x19], #0x8\n" "tbz x8, #0, 7f\n" - "ld1 { v11.s }[2], [x19]\n" + "ld1 { v13.s }[2], [x19]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset "tbz x8, #0, 7f\n" - "ld1 { v11.s }[0], [x19]\n" + "ld1 { v13.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v23.16b, v11.16b\n" - "ldr d0, [x16, #0x0]\n" - "mov v9.16b, v26.16b\n" - "ldr d1, [x16, #0x8]\n" - "mov v12.16b, v11.16b\n" - "ldr d2, [x16, #0x10]\n" - "mov v22.16b, v26.16b\n" - "ldr d3, [x16, #0x18]\n" - "mov v24.16b, v11.16b\n" - "ldr d4, [x16, #0x20]\n" - "mov v10.16b, v26.16b\n" - "ldr d5, [x16, #0x28]\n" - "ssubl v0.8h, v0.8b, v17.8b\n" - "ldr d6, [x16, #0x30]\n" - "ssubl v1.8h, v1.8b, v17.8b\n" - "ldr d7, [x16, #0x38]\n" - "ssubl v2.8h, v2.8b, v17.8b\n" - "ldr d8, [x16, #0x40]\n" - "ssubl v3.8h, v3.8b, v17.8b\n" - "ldp x23, x22, [x14, #0x0]\n" - "add x23, x23, x17\n" - "ssubl v4.8h, v4.8b, v17.8b\n" - "ldp x21, x20, [x14, #0x10]\n" - "ssubl v5.8h, v5.8b, v17.8b\n" - "ldr x19, [x14, #0x20]\n" - "ssubl v6.8h, v6.8b, v17.8b\n" - "add x22, x22, x17\n" - "ssubl v7.8h, v7.8b, v17.8b\n" - "add x21, x21, x17\n" - "ssubl v8.8h, v8.8b, v17.8b\n" - "add x20, x20, x17\n" - "add x19, x19, x17\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "mov v19.16b, v13.16b\n" + "mov v11.16b, v26.16b\n" + "ldr d2, [x17, #0x10]\n" + "ldr d3, [x17, #0x18]\n" + "mov v18.16b, v13.16b\n" + "mov v24.16b, v26.16b\n" + "ldr d4, [x17, #0x20]\n" + "ldr d5, [x17, #0x28]\n" + "mov v9.16b, v13.16b\n" + "mov v23.16b, v26.16b\n" + "ldr d6, [x17, #0x30]\n" + "ldr d7, [x17, #0x38]\n" + "ssubl v0.8h, v0.8b, v12.8b\n" + "ssubl v1.8h, v1.8b, v12.8b\n" + "ldr d8, [x17, #0x40]\n" + "ldp x23, x22, [x12, #0x0]\n" + "ssubl v2.8h, v2.8b, v12.8b\n" + "ssubl v3.8h, v3.8b, v12.8b\n" + "ldp x21, x20, [x12, #0x10]\n" + "ldr x19, [x12, #0x20]\n" + "ssubl v4.8h, v4.8b, v12.8b\n" + "ssubl v5.8h, v5.8b, v12.8b\n" + "ssubl v6.8h, v6.8b, v12.8b\n" + "ssubl v7.8h, v7.8b, v12.8b\n" + "ssubl v8.8h, v8.8b, v12.8b\n" + "add x23, x23, x15\n" + "add x22, x22, x15\n" + "add x21, x21, x15\n" + "add x20, x20, x15\n" + "add x19, x19, x15\n" "tbz x8, #2, 9f\n" "ld1 { v31.s }[0], [x23], #0x4\n" "ld1 { v30.s }[0], [x22], #0x4\n" @@ -690,33 +674,33 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "ld1 { v28.b }[0], [x20]\n" "ld1 { v27.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End - "usubl v31.8h, v31.8b, v21.8b\n" - "ldr x21, [x14, #0x28]\n" - "add x21, x21, x17\n" - "usubl v30.8h, v30.8b, v21.8b\n" - "usubl v29.8h, v29.8b, v21.8b\n" - "usubl v28.8h, v28.8b, v21.8b\n" - "usubl v27.8h, v27.8b, v21.8b\n" - "smlal v11.4s, v31.4h, v4.4h\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v13.4s, v31.4h, v4.4h\n" "smlal2 v26.4s, v31.8h, v4.8h\n" - "smlal v23.4s, v31.4h, v3.4h\n" - "smlal2 v9.4s, v31.8h, v3.8h\n" - "smlal v12.4s, v31.4h, v1.4h\n" - "smlal2 v22.4s, v31.8h, v1.8h\n" - "smlal v24.4s, v31.4h, v0.4h\n" - "smlal2 v10.4s, v31.8h, v0.8h\n" - "smlal v11.4s, v30.4h, v0.4h\n" + "ldr x21, [x12, #0x28]\n" + "smlal v19.4s, v31.4h, v3.4h\n" + "smlal2 v11.4s, v31.8h, v3.8h\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "add x21, x21, x15\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v1.4h\n" + "smlal2 v24.4s, v31.8h, v1.8h\n" + "smlal v9.4s, v31.4h, v0.4h\n" + "smlal2 v23.4s, v31.8h, v0.8h\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v13.4s, v30.4h, v0.4h\n" "smlal2 v26.4s, v30.8h, v0.8h\n" - "smlal v23.4s, v29.4h, v2.4h\n" - "smlal2 v9.4s, v29.8h, v2.8h\n" - "smlal v11.4s, v28.4h, v5.4h\n" + "usubl v27.8h, v27.8b, v22.8b\n" + "smlal v19.4s, v29.4h, v2.4h\n" + "smlal2 v11.4s, v29.8h, v2.8h\n" + "smlal v13.4s, v28.4h, v5.4h\n" "smlal2 v26.4s, v28.8h, v5.8h\n" - "smlal v23.4s, v28.4h, v4.4h\n" - "smlal2 v9.4s, v28.8h, v4.8h\n" - "smlal v12.4s, v28.4h, v2.4h\n" - "smlal2 v22.4s, v28.8h, v2.8h\n" - "smlal v24.4s, v28.4h, v1.4h\n" - "smlal2 v10.4s, v28.8h, v1.8h\n" + "smlal v19.4s, v28.4h, v4.4h\n" + "smlal2 v11.4s, v28.8h, v4.8h\n" + "smlal v18.4s, v28.4h, v2.4h\n" + "smlal2 v24.4s, v28.8h, v2.8h\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v23.4s, v28.8h, v1.8h\n" "tbz x8, #2, 13f\n" "ld1 { v31.s }[0], [x21], #0x4\n" "tbz x8, #1, 12f\n" @@ -738,19 +722,19 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 15f\n" "ld1 { v31.b }[0], [x21]\n" "15:" // Oddments: Load (3, 0): Bit 2: End - "usubl v31.8h, v31.8b, v21.8b\n" - "ldr x20, [x14, #0x30]\n" - "smlal v11.4s, v27.4h, v7.4h\n" - "add x20, x20, x17\n" - "smlal v12.4s, v31.4h, v6.4h\n" - "smlal2 v22.4s, v31.8h, v6.8h\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "smlal v18.4s, v31.4h, v6.4h\n" + "smlal2 v24.4s, v31.8h, v6.8h\n" + "ldr x20, [x12, #0x30]\n" + "smlal v13.4s, v27.4h, v7.4h\n" "smlal2 v26.4s, v27.8h, v7.8h\n" - "smlal v23.4s, v27.4h, v6.4h\n" - "smlal2 v9.4s, v27.8h, v6.8h\n" - "smlal v12.4s, v27.4h, v4.4h\n" - "smlal2 v22.4s, v27.8h, v4.8h\n" - "smlal v24.4s, v27.4h, v3.4h\n" - "smlal2 v10.4s, v27.8h, v3.8h\n" + "add x20, x20, x15\n" + "smlal v19.4s, v27.4h, v6.4h\n" + "smlal2 v11.4s, v27.8h, v6.8h\n" + "smlal v18.4s, v27.4h, v4.4h\n" + "smlal2 v24.4s, v27.8h, v4.8h\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v23.4s, v27.8h, v3.8h\n" "tbz x8, #2, 17f\n" "ld1 { v29.s }[0], [x20], #0x4\n" "tbz x8, #1, 16f\n" @@ -772,11 +756,11 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 19f\n" "ld1 { v29.b }[0], [x20]\n" "19:" // Oddments: Load (3, 3): Bit 2: End - "usubl v29.8h, v29.8b, v21.8b\n" - "ldr x26, [x14, #0x38]\n" - "smlal v24.4s, v29.4h, v8.4h\n" - "add x26, x26, x17\n" - "smlal2 v10.4s, v29.8h, v8.8h\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "ldr x26, [x12, #0x38]\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v23.4s, v29.8h, v8.8h\n" + "add x26, x26, x15\n" "tbz x8, #2, 21f\n" "ld1 { v28.s }[0], [x26], #0x4\n" "tbz x8, #1, 20f\n" @@ -798,13 +782,13 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 23f\n" "ld1 { v28.b }[0], [x26]\n" "23:" // Oddments: Load (0, 1): Bit 2: End - "usubl v28.8h, v28.8b, v21.8b\n" - "ldr x25, [x14, #0x40]\n" - "smlal v11.4s, v28.4h, v1.4h\n" - "add x25, x25, x17\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "ldr x25, [x12, #0x40]\n" + "smlal v13.4s, v28.4h, v1.4h\n" "smlal2 v26.4s, v28.8h, v1.8h\n" - "smlal v23.4s, v28.4h, v0.4h\n" - "smlal2 v9.4s, v28.8h, v0.8h\n" + "smlal v19.4s, v28.4h, v0.4h\n" + "smlal2 v11.4s, v28.8h, v0.8h\n" + "add x25, x25, x15\n" "tbz x8, #2, 25f\n" "ld1 { v31.s }[0], [x25], #0x4\n" "tbz x8, #1, 24f\n" @@ -826,13 +810,13 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 27f\n" "ld1 { v31.b }[0], [x25]\n" "27:" // Oddments: Load (0, 2): Bit 2: End - "usubl v31.8h, v31.8b, v21.8b\n" - "ldr x19, [x14, #0x48]\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "add x19, x19, x17\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "ldr x19, [x12, #0x48]\n" + "smlal v13.4s, v31.4h, v2.4h\n" "smlal2 v26.4s, v31.8h, v2.8h\n" - "smlal v23.4s, v31.4h, v1.4h\n" - "smlal2 v9.4s, v31.8h, v1.8h\n" + "smlal v19.4s, v31.4h, v1.4h\n" + "smlal2 v11.4s, v31.8h, v1.8h\n" + "add x19, x19, x15\n" "tbz x8, #2, 29f\n" "ld1 { v30.s }[0], [x19], #0x4\n" "tbz x8, #1, 28f\n" @@ -854,17 +838,17 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 31f\n" "ld1 { v30.b }[0], [x19]\n" "31:" // Oddments: Load (2, 2): Bit 2: End - "usubl v30.8h, v30.8b, v21.8b\n" - "ldr x24, [x14, #0x50]\n" - "smlal v11.4s, v30.4h, v8.4h\n" - "add x24, x24, x17\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "ldr x24, [x12, #0x50]\n" + "smlal v13.4s, v30.4h, v8.4h\n" "smlal2 v26.4s, v30.8h, v8.8h\n" - "smlal v23.4s, v30.4h, v7.4h\n" - "smlal2 v9.4s, v30.8h, v7.8h\n" - "smlal v12.4s, v30.4h, v5.4h\n" - "smlal2 v22.4s, v30.8h, v5.8h\n" - "smlal v24.4s, v30.4h, v4.4h\n" - "smlal2 v10.4s, v30.8h, v4.8h\n" + "smlal v19.4s, v30.4h, v7.4h\n" + "smlal2 v11.4s, v30.8h, v7.8h\n" + "add x24, x24, x15\n" + "smlal v18.4s, v30.4h, v5.4h\n" + "smlal2 v24.4s, v30.8h, v5.8h\n" + "smlal v9.4s, v30.4h, v4.4h\n" + "smlal2 v23.4s, v30.8h, v4.8h\n" "tbz x8, #2, 33f\n" "ld1 { v29.s }[0], [x24], #0x4\n" "tbz x8, #1, 32f\n" @@ -886,13 +870,13 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 35f\n" "ld1 { v29.b }[0], [x24]\n" "35:" // Oddments: Load (1, 0): Bit 2: End - "usubl v29.8h, v29.8b, v21.8b\n" - "ldr x23, [x14, #0x58]\n" - "smlal v11.4s, v29.4h, v3.4h\n" - "add x23, x23, x17\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "ldr x23, [x12, #0x58]\n" + "smlal v13.4s, v29.4h, v3.4h\n" "smlal2 v26.4s, v29.8h, v3.8h\n" - "smlal v12.4s, v29.4h, v0.4h\n" - "smlal2 v22.4s, v29.8h, v0.8h\n" + "smlal v18.4s, v29.4h, v0.4h\n" + "smlal2 v24.4s, v29.8h, v0.8h\n" + "add x23, x23, x15\n" "tbz x8, #2, 37f\n" "ld1 { v28.s }[0], [x23], #0x4\n" "tbz x8, #1, 36f\n" @@ -914,13 +898,13 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 39f\n" "ld1 { v28.b }[0], [x23]\n" "39:" // Oddments: Load (1, 3): Bit 2: End - "usubl v28.8h, v28.8b, v21.8b\n" - "ldr x22, [x14, #0x60]\n" - "smlal v23.4s, v28.4h, v5.4h\n" - "add x22, x22, x17\n" - "smlal2 v9.4s, v28.8h, v5.8h\n" - "smlal v24.4s, v28.4h, v2.4h\n" - "smlal2 v10.4s, v28.8h, v2.8h\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "ldr x22, [x12, #0x60]\n" + "smlal v19.4s, v28.4h, v5.4h\n" + "smlal2 v11.4s, v28.8h, v5.8h\n" + "smlal v9.4s, v28.4h, v2.4h\n" + "smlal2 v23.4s, v28.8h, v2.8h\n" + "add x22, x22, x15\n" "tbz x8, #2, 41f\n" "ld1 { v31.s }[0], [x22], #0x4\n" "tbz x8, #1, 40f\n" @@ -942,13 +926,13 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 43f\n" "ld1 { v31.b }[0], [x22]\n" "43:" // Oddments: Load (2, 0): Bit 2: End - "usubl v31.8h, v31.8b, v21.8b\n" - "ldr x21, [x14, #0x68]\n" - "smlal v11.4s, v31.4h, v6.4h\n" - "add x21, x21, x17\n" + "usubl v31.8h, v31.8b, v22.8b\n" + "ldr x21, [x12, #0x68]\n" + "smlal v13.4s, v31.4h, v6.4h\n" "smlal2 v26.4s, v31.8h, v6.8h\n" - "smlal v12.4s, v31.4h, v3.4h\n" - "smlal2 v22.4s, v31.8h, v3.8h\n" + "smlal v18.4s, v31.4h, v3.4h\n" + "smlal2 v24.4s, v31.8h, v3.8h\n" + "add x21, x21, x15\n" "tbz x8, #2, 45f\n" "ld1 { v30.s }[0], [x21], #0x4\n" "tbz x8, #1, 44f\n" @@ -970,13 +954,13 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 47f\n" "ld1 { v30.b }[0], [x21]\n" "47:" // Oddments: Load (2, 3): Bit 2: End - "usubl v30.8h, v30.8b, v21.8b\n" - "ldr x20, [x14, #0x70]\n" - "smlal v23.4s, v30.4h, v8.4h\n" - "add x20, x20, x17\n" - "smlal2 v9.4s, v30.8h, v8.8h\n" - "smlal v24.4s, v30.4h, v5.4h\n" - "smlal2 v10.4s, v30.8h, v5.8h\n" + "usubl v30.8h, v30.8b, v22.8b\n" + "ldr x20, [x12, #0x70]\n" + "smlal v19.4s, v30.4h, v8.4h\n" + "smlal2 v11.4s, v30.8h, v8.8h\n" + "smlal v9.4s, v30.4h, v5.4h\n" + "smlal2 v23.4s, v30.8h, v5.8h\n" + "add x20, x20, x15\n" "tbz x8, #2, 49f\n" "ld1 { v29.s }[0], [x20], #0x4\n" "tbz x8, #1, 48f\n" @@ -998,13 +982,13 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 51f\n" "ld1 { v29.b }[0], [x20]\n" "51:" // Oddments: Load (3, 1): Bit 2: End - "usubl v29.8h, v29.8b, v21.8b\n" - "ldr x19, [x14, #0x78]\n" - "smlal v12.4s, v29.4h, v7.4h\n" - "add x19, x19, x17\n" - "smlal2 v22.4s, v29.8h, v7.8h\n" - "smlal v24.4s, v29.4h, v6.4h\n" - "smlal2 v10.4s, v29.8h, v6.8h\n" + "usubl v29.8h, v29.8b, v22.8b\n" + "ldr x19, [x12, #0x78]\n" + "smlal v18.4s, v29.4h, v7.4h\n" + "smlal2 v24.4s, v29.8h, v7.8h\n" + "smlal v9.4s, v29.4h, v6.4h\n" + "smlal2 v23.4s, v29.8h, v6.8h\n" + "add x19, x19, x15\n" "tbz x8, #2, 53f\n" "ld1 { v28.s }[0], [x19], #0x4\n" "tbz x8, #1, 52f\n" @@ -1026,160 +1010,150 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( "tbz x8, #0, 55f\n" "ld1 { v28.b }[0], [x19]\n" "55:" // Oddments: Load (3, 2): Bit 2: End - "usubl v28.8h, v28.8b, v21.8b\n" - "smlal v12.4s, v28.4h, v8.4h\n" - "smlal2 v22.4s, v28.8h, v8.8h\n" - "smlal v24.4s, v28.4h, v7.4h\n" - "smlal2 v10.4s, v28.8h, v7.8h\n" + "usubl v28.8h, v28.8b, v22.8b\n" + "smlal v18.4s, v28.4h, v8.4h\n" + "smlal2 v24.4s, v28.8h, v8.8h\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v23.4s, v28.8h, v7.8h\n" "tbz x8, #2, 57f\n" - "ld1 { v25.4s }, [x13], #0x10\n" - "ld1 { v18.4s }, [x11], #0x10\n" + "ld1 { v21.4s }, [x13], #0x10\n" + "ld1 { v25.4s }, [x11], #0x10\n" "tbz x8, #1, 56f\n" - "ld1 { v16.d }[0], [x13], #0x8\n" - "ld1 { v20.d }[0], [x11], #0x8\n" + "ld1 { v10.d }[0], [x13], #0x8\n" + "ld1 { v16.d }[0], [x11], #0x8\n" "tbz x8, #0, 59f\n" - "ld1 { v16.s }[2], [x13]\n" - "ld1 { v20.s }[2], [x11]\n" + "ld1 { v10.s }[2], [x13]\n" + "ld1 { v16.s }[2], [x11]\n" "b 59f\n" "56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset "tbz x8, #0, 59f\n" - "ld1 { v16.s }[0], [x13]\n" - "ld1 { v20.s }[0], [x11]\n" + "ld1 { v10.s }[0], [x13]\n" + "ld1 { v16.s }[0], [x11]\n" "b 59f\n" "57:" // Oddments: Load requant params: Bit 2: Unset "tbz x8, #1, 58f\n" - "ld1 { v25.d }[0], [x13], #0x8\n" - "ld1 { v18.d }[0], [x11], #0x8\n" + "ld1 { v21.d }[0], [x13], #0x8\n" + "ld1 { v25.d }[0], [x11], #0x8\n" "tbz x8, #0, 59f\n" - "ld1 { v25.s }[2], [x13]\n" - "ld1 { v18.s }[2], [x11]\n" + "ld1 { v21.s }[2], [x13]\n" + "ld1 { v25.s }[2], [x11]\n" "b 59f\n" "58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset "tbz x8, #0, 59f\n" - "ld1 { v25.s }[0], [x13]\n" - "ld1 { v18.s }[0], [x11]\n" + "ld1 { v21.s }[0], [x13]\n" + "ld1 { v25.s }[0], [x11]\n" "59:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v11.4s, v11.4s, v25.4s\n" - "add x10, x10, x15\n" - "sqrdmulh v26.4s, v26.4s, v16.4s\n" - "add x9, x9, x15\n" - "sqrdmulh v23.4s, v23.4s, v25.4s\n" - "add x28, x28, x15\n" - "sqrdmulh v9.4s, v9.4s, v16.4s\n" - "add x27, x27, x15\n" - "sqrdmulh v12.4s, v12.4s, v25.4s\n" - "and v19.16b, v11.16b, v18.16b\n" - "and v5.16b, v26.16b, v20.16b\n" - "and v28.16b, v23.16b, v18.16b\n" - "sshr v19.4s, v19.4s, #0x1f\n" - "sshr v5.4s, v5.4s, #0x1f\n" - "sshr v28.4s, v28.4s, #0x1f\n" - "sqadd v11.4s, v11.4s, v19.4s\n" - "sqadd v26.4s, v26.4s, v5.4s\n" - "sqadd v23.4s, v23.4s, v28.4s\n" - "and v8.16b, v9.16b, v20.16b\n" - "srshl v11.4s, v11.4s, v18.4s\n" - "srshl v26.4s, v26.4s, v20.4s\n" - "srshl v23.4s, v23.4s, v18.4s\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "add v11.4s, v11.4s, v13.4s\n" - "add v26.4s, v26.4s, v13.4s\n" - "add v23.4s, v23.4s, v13.4s\n" - "smin v11.4s, v11.4s, v14.4s\n" - "smin v26.4s, v26.4s, v14.4s\n" - "smin v23.4s, v23.4s, v14.4s\n" - "smax v11.4s, v11.4s, v15.4s\n" - "smax v26.4s, v26.4s, v15.4s\n" - "smax v23.4s, v23.4s, v15.4s\n" - "sqadd v9.4s, v9.4s, v8.4s\n" - "uzp1 v11.16b, v11.16b, v26.16b\n" - "and v1.16b, v12.16b, v18.16b\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "srshl v9.4s, v9.4s, v20.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "sqrdmulh v22.4s, v22.4s, v16.4s\n" - "sqrdmulh v24.4s, v24.4s, v25.4s\n" - "add v9.4s, v9.4s, v13.4s\n" - "sqadd v12.4s, v12.4s, v1.4s\n" - "and v0.16b, v22.16b, v20.16b\n" - "smin v9.4s, v9.4s, v14.4s\n" - "and v26.16b, v24.16b, v18.16b\n" - "srshl v12.4s, v12.4s, v18.4s\n" - "smax v9.4s, v9.4s, v15.4s\n" - "sshr v0.4s, v0.4s, #0x1f\n" - "sshr v26.4s, v26.4s, #0x1f\n" - "uzp1 v23.16b, v23.16b, v9.16b\n" - "add v12.4s, v12.4s, v13.4s\n" - "uzp1 v23.16b, v23.16b, v23.16b\n" - "sqadd v22.4s, v22.4s, v0.4s\n" - "smin v12.4s, v12.4s, v14.4s\n" - "sqadd v24.4s, v24.4s, v26.4s\n" - "sqrdmulh v10.4s, v10.4s, v16.4s\n" - "smax v12.4s, v12.4s, v15.4s\n" - "srshl v22.4s, v22.4s, v20.4s\n" - "srshl v24.4s, v24.4s, v18.4s\n" - "and v16.16b, v10.16b, v20.16b\n" - "add v22.4s, v22.4s, v13.4s\n" - "add v24.4s, v24.4s, v13.4s\n" - "sshr v16.4s, v16.4s, #0x1f\n" - "smin v22.4s, v22.4s, v14.4s\n" - "smin v24.4s, v24.4s, v14.4s\n" - "sqadd v10.4s, v10.4s, v16.4s\n" - "smax v22.4s, v22.4s, v15.4s\n" - "smax v24.4s, v24.4s, v15.4s\n" - "srshl v10.4s, v10.4s, v20.4s\n" - "uzp1 v12.16b, v12.16b, v22.16b\n" - "uzp1 v12.16b, v12.16b, v12.16b\n" - "add v10.4s, v10.4s, v13.4s\n" - "smin v10.4s, v10.4s, v14.4s\n" - "smax v10.4s, v10.4s, v15.4s\n" - "uzp1 v24.16b, v24.16b, v10.16b\n" - "uzp1 v24.16b, v24.16b, v24.16b\n" + "sqdmulh v13.4s, v13.4s, v21.4s\n" + "sqdmulh v19.4s, v19.4s, v21.4s\n" + "add x10, x10, x14\n" + "add x9, x9, x14\n" + "sqdmulh v18.4s, v18.4s, v21.4s\n" + "sqdmulh v9.4s, v9.4s, v21.4s\n" + "add x28, x28, x14\n" + "add x27, x27, x14\n" + "and v7.16b, v13.16b, v25.16b\n" + "sqdmulh v26.4s, v26.4s, v10.4s\n" + "and v4.16b, v19.16b, v25.16b\n" + "sqdmulh v11.4s, v11.4s, v10.4s\n" + "and v21.16b, v18.16b, v25.16b\n" + "sqdmulh v24.4s, v24.4s, v10.4s\n" + "and v20.16b, v9.16b, v25.16b\n" + "sqdmulh v23.4s, v23.4s, v10.4s\n" + "sshr v7.4s, v7.4s, #0x1f\n" + "and v29.16b, v26.16b, v16.16b\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "and v10.16b, v11.16b, v16.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v31.16b, v24.16b, v16.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v30.16b, v23.16b, v16.16b\n" + "sqadd v13.4s, v13.4s, v7.4s\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "sqadd v19.4s, v19.4s, v4.4s\n" + "sshr v10.4s, v10.4s, #0x1f\n" + "sqadd v18.4s, v18.4s, v21.4s\n" + "sshr v31.4s, v31.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v20.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "srshl v13.4s, v13.4s, v25.4s\n" + "sqadd v26.4s, v26.4s, v29.4s\n" + "srshl v19.4s, v19.4s, v25.4s\n" + "sqadd v11.4s, v11.4s, v10.4s\n" + "srshl v18.4s, v18.4s, v25.4s\n" + "sqadd v24.4s, v24.4s, v31.4s\n" + "srshl v9.4s, v9.4s, v25.4s\n" + "sqadd v23.4s, v23.4s, v30.4s\n" + "srshl v26.4s, v26.4s, v16.4s\n" + "sqxtn v13.4h, v13.4s\n" + "srshl v11.4s, v11.4s, v16.4s\n" + "sqxtn v19.4h, v19.4s\n" + "srshl v24.4s, v24.4s, v16.4s\n" + "sqxtn v18.4h, v18.4s\n" + "srshl v23.4s, v23.4s, v16.4s\n" + "sqxtn v9.4h, v9.4s\n" + "sqxtn2 v13.8h, v26.4s\n" + "sqxtn2 v19.8h, v11.4s\n" + "sqxtn2 v18.8h, v24.4s\n" + "sqxtn2 v9.8h, v23.4s\n" + "sqadd v13.8h, v13.8h, v14.8h\n" + "sqadd v19.8h, v19.8h, v14.8h\n" + "sqadd v18.8h, v18.8h, v14.8h\n" + "sqadd v9.8h, v9.8h, v14.8h\n" + "smax v13.8h, v13.8h, v17.8h\n" + "smax v19.8h, v19.8h, v17.8h\n" + "smax v18.8h, v18.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smin v13.8h, v13.8h, v15.8h\n" + "smin v19.8h, v19.8h, v15.8h\n" + "smin v18.8h, v18.8h, v15.8h\n" + "smin v9.8h, v9.8h, v15.8h\n" + "uzp1 v13.16b, v13.16b, v13.16b\n" + "uzp1 v19.16b, v19.16b, v19.16b\n" + "uzp1 v18.16b, v18.16b, v18.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" "tbz x8, #2, 61f\n" - "st1 { v11.s }[0], [x10], #0x4\n" - "st1 { v23.s }[0], [x9], #0x4\n" - "st1 { v12.s }[0], [x28], #0x4\n" - "st1 { v24.s }[0], [x27], #0x4\n" + "st1 { v13.s }[0], [x10], #0x4\n" + "st1 { v19.s }[0], [x9], #0x4\n" + "st1 { v18.s }[0], [x28], #0x4\n" + "st1 { v9.s }[0], [x27], #0x4\n" "tbz x8, #1, 60f\n" - "st1 { v11.h }[2], [x10], #0x2\n" - "st1 { v23.h }[2], [x9], #0x2\n" - "st1 { v12.h }[2], [x28], #0x2\n" - "st1 { v24.h }[2], [x27], #0x2\n" + "st1 { v13.h }[2], [x10], #0x2\n" + "st1 { v19.h }[2], [x9], #0x2\n" + "st1 { v18.h }[2], [x28], #0x2\n" + "st1 { v9.h }[2], [x27], #0x2\n" "tbz x8, #0, 63f\n" - "st1 { v11.b }[6], [x10], #0x1\n" - "st1 { v23.b }[6], [x9], #0x1\n" - "st1 { v12.b }[6], [x28], #0x1\n" - "st1 { v24.b }[6], [x27], #0x1\n" + "st1 { v13.b }[6], [x10], #0x1\n" + "st1 { v19.b }[6], [x9], #0x1\n" + "st1 { v18.b }[6], [x28], #0x1\n" + "st1 { v9.b }[6], [x27], #0x1\n" "b 63f\n" "60:" // Oddments: Bit 2: Bit 1: Unset "tbz x8, #0, 63f\n" - "st1 { v11.b }[4], [x10], #0x1\n" - "st1 { v23.b }[4], [x9], #0x1\n" - "st1 { v12.b }[4], [x28], #0x1\n" - "st1 { v24.b }[4], [x27], #0x1\n" + "st1 { v13.b }[4], [x10], #0x1\n" + "st1 { v19.b }[4], [x9], #0x1\n" + "st1 { v18.b }[4], [x28], #0x1\n" + "st1 { v9.b }[4], [x27], #0x1\n" "b 63f\n" "61:" // Oddments: Bit 2: Unset "tbz x8, #1, 62f\n" - "st1 { v11.h }[0], [x10], #0x2\n" - "st1 { v23.h }[0], [x9], #0x2\n" - "st1 { v12.h }[0], [x28], #0x2\n" - "st1 { v24.h }[0], [x27], #0x2\n" + "st1 { v13.h }[0], [x10], #0x2\n" + "st1 { v19.h }[0], [x9], #0x2\n" + "st1 { v18.h }[0], [x28], #0x2\n" + "st1 { v9.h }[0], [x27], #0x2\n" "tbz x8, #0, 63f\n" - "st1 { v11.b }[2], [x10], #0x1\n" - "st1 { v23.b }[2], [x9], #0x1\n" - "st1 { v12.b }[2], [x28], #0x1\n" - "st1 { v24.b }[2], [x27], #0x1\n" + "st1 { v13.b }[2], [x10], #0x1\n" + "st1 { v19.b }[2], [x9], #0x1\n" + "st1 { v18.b }[2], [x28], #0x1\n" + "st1 { v9.b }[2], [x27], #0x1\n" "b 63f\n" "62:" // Oddments: Bit 2: Unset: Bit 1: Unset "tbz x8, #0, 63f\n" - "st1 { v11.b }[0], [x10], #0x1\n" - "st1 { v23.b }[0], [x9], #0x1\n" - "st1 { v12.b }[0], [x28], #0x1\n" - "st1 { v24.b }[0], [x27], #0x1\n" + "st1 { v13.b }[0], [x10], #0x1\n" + "st1 { v19.b }[0], [x9], #0x1\n" + "st1 { v18.b }[0], [x28], #0x1\n" + "st1 { v9.b }[0], [x27], #0x1\n" "63:" // Oddments: Bit 2: End - "64:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index 77861e94f0..9a1b64ed7b 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,24 @@ namespace depthwise { void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst +class a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef int8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_s8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_s8q_3x3_mla::get_packed_size; + a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 2, 2) {} - kern_type kernel = a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } - a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp index 4e1586b033..790d26bc6d 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const int8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const uint8_t *const *inptrs_raw, - const int8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -100,611 +100,595 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( - "ldr x3, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x4, #0x0\n" - "ldr x5, [%x[params], %[offsetof_Params_weights]]\n" - "mov x6, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x7, %x[params], %[offsetof_Params_inptrs]\n" - "ldr x8, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x17, x3, #0x3\n" - "ldr x16, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x19, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v22.16b }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v12.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v14.4s }, [x19]\n" - "add x19, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v16.4s }, [x20]\n" - "ld1r { v15.4s }, [x19]\n" - "ldp x15, x14, [x21, #0x0]\n" - "ldp x13, x12, [x21, #0x10]\n" - "cbz x17, 3f\n" - "subs x17, x17, #0x1\n" + "ldr x19, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x24, x19, %[offsetof_Requantize32_a_offset]\n" + "add x23, x19, %[offsetof_Requantize32_b_offset]\n" + "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x21, x19, %[offsetof_Requantize32_c_offset]\n" + "add x20, x19, %[offsetof_Requantize32_minval]\n" + "ldr x17, [%x[params], %[offsetof_Params_weights]]\n" + "add x19, x19, %[offsetof_Requantize32_maxval]\n" + "ld1r { v12.16b }, [x24]\n" + "ld1r { v13.16b }, [x23]\n" + "lsr x16, x8, #0x3\n" + "ld1r { v11.8h }, [x21]\n" + "ld1r { v17.8h }, [x20]\n" + "mov x15, #0x0\n" + "mov x14, #0x0\n" + "ld1r { v14.8h }, [x19]\n" + "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n" + "add x12, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x10, x9, [x22, #0x0]\n" + "ldp x28, x27, [x22, #0x10]\n" + "cbz x16, 3f\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q13, [x19, #0x0]\n" - "mov v19.16b, v13.16b\n" + "ldr q15, [x19, #0x0]\n" + "subs x16, x16, #0x1\n" + "mov v9.16b, v15.16b\n" "ldr q10, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v11.16b, v13.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v18.16b, v13.16b\n" - "ldr d0, [x5, #0x0]\n" - "ldr d1, [x5, #0x8]\n" - "mov v20.16b, v10.16b\n" - "ldr d2, [x5, #0x10]\n" - "mov v17.16b, v10.16b\n" - "ldr d3, [x5, #0x18]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v16.16b, v10.16b\n" + "mov v22.16b, v15.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" "mov v21.16b, v10.16b\n" - "ldr d4, [x5, #0x20]\n" - "ssubl v0.8h, v0.8b, v12.8b\n" - "ldr d5, [x5, #0x28]\n" - "ssubl v1.8h, v1.8b, v12.8b\n" - "ldr d6, [x5, #0x30]\n" - "ssubl v2.8h, v2.8b, v12.8b\n" - "ldr d7, [x5, #0x38]\n" - "ssubl v3.8h, v3.8b, v12.8b\n" - "ldr d8, [x5, #0x40]\n" - "ssubl v4.8h, v4.8b, v12.8b\n" - "ldp x26, x25, [x7, #0x0]\n" - "ssubl v5.8h, v5.8b, v12.8b\n" - "ldp x24, x23, [x7, #0x10]\n" - "ssubl v6.8h, v6.8b, v12.8b\n" - "ssubl v7.8h, v7.8b, v12.8b\n" - "ldp x22, x21, [x7, #0x20]\n" - "ssubl v8.8h, v8.8b, v12.8b\n" - "ldp x20, x19, [x7, #0x30]\n" - "ldr d31, [x26, x4]\n" - "usubl v31.8h, v31.8b, v22.8b\n" - "ldr d30, [x25, x4]\n" - "ldr d29, [x24, x4]\n" - "usubl v30.8h, v30.8b, v22.8b\n" - "ldr d28, [x23, x4]\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "ldr d27, [x22, x4]\n" - "ldr d26, [x21, x4]\n" - "usubl v28.8h, v28.8b, v22.8b\n" - "ldr d25, [x20, x4]\n" - "ldr d24, [x19, x4]\n" - "usubl v27.8h, v27.8b, v22.8b\n" - "usubl v26.8h, v26.8b, v22.8b\n" - "usubl v25.8h, v25.8b, v22.8b\n" - "usubl v24.8h, v24.8b, v22.8b\n" + "mov v23.16b, v15.16b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v18.16b, v10.16b\n" + "ssubl v0.8h, v0.8b, v13.8b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "ssubl v1.8h, v1.8b, v13.8b\n" + "ssubl v2.8h, v2.8b, v13.8b\n" + "ldp x26, x25, [x12, #0x0]\n" + "ldp x24, x23, [x12, #0x10]\n" + "ssubl v3.8h, v3.8b, v13.8b\n" + "ssubl v4.8h, v4.8b, v13.8b\n" + "ldp x22, x21, [x12, #0x20]\n" + "ldp x20, x19, [x12, #0x30]\n" + "ssubl v5.8h, v5.8b, v13.8b\n" + "ssubl v6.8h, v6.8b, v13.8b\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" + "ssubl v7.8h, v7.8b, v13.8b\n" + "ssubl v8.8h, v8.8b, v13.8b\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" + "usubl v31.8h, v31.8b, v12.8b\n" + "usubl v30.8h, v30.8b, v12.8b\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "usubl v24.8h, v24.8b, v12.8b\n" "beq 2f\n" "1:" // Loop - "smlal v13.4s, v31.4h, v8.4h\n" - "ldr x22, [x7, #0x40]\n" - "add x5, x5, #0x48\n" + "smlal v15.4s, v31.4h, v8.4h\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "ldr x21, [x7, #0x48]\n" - "subs x17, x17, #0x1\n" - "smlal v19.4s, v31.4h, v6.4h\n" - "ldr x20, [x7, #0x50]\n" - "smlal2 v20.4s, v31.8h, v6.8h\n" - "ldr x19, [x7, #0x58]\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "ldr x11, [x7, #0x60]\n" - "smlal2 v17.4s, v31.8h, v2.8h\n" - "ldr x10, [x7, #0x68]\n" - "smlal v18.4s, v31.4h, v0.4h\n" - "ldr x9, [x7, #0x70]\n" - "smlal2 v21.4s, v31.8h, v0.8h\n" - "ldr x28, [x7, #0x78]\n" - "smlal v13.4s, v30.4h, v0.4h\n" - "ldr x27, [x7, #0x80]\n" + "ldr x24, [x12, #0x40]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" + "ldr x21, [x12, #0x50]\n" + "ldr x19, [x12, #0x58]\n" + "smlal v15.4s, v30.4h, v0.4h\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "ldr x26, [x7, #0x88]\n" - "smlal v19.4s, v28.4h, v1.4h\n" - "ldr x25, [x7, #0x90]\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x21, x4]\n" - "smlal v13.4s, v29.4h, v1.4h\n" - "ldr x24, [x7, #0x98]\n" + "ldr x22, [x12, #0x78]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "smlal v15.4s, v29.4h, v1.4h\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "ldr d29, [x22, x4]\n" - "smlal v19.4s, v27.4h, v2.4h\n" - "ldr x23, [x7, #0xa0]\n" - "smlal2 v20.4s, v27.8h, v2.8h\n" - "ldr d27, [x20, x4]\n" - "smlal v13.4s, v26.4h, v3.4h\n" - "ldr x22, [x7, #0xa8]\n" + "ldr d29, [x24, x15]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v15.4s, v26.4h, v3.4h\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "ldr d26, [x19, x4]\n" - "smlal v19.4s, v24.4h, v0.4h\n" - "ldr x21, [x7, #0xb0]\n" - "smlal2 v20.4s, v24.8h, v0.8h\n" - "ldr x20, [x7, #0xb8]\n" - "smlal v13.4s, v25.4h, v4.4h\n" - "ldr x19, [x7, #0xc0]\n" + "ldr d26, [x19, x15]\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "ldr x21, [x12, #0x80]\n" + "ldr x19, [x12, #0x68]\n" + "smlal v15.4s, v25.4h, v4.4h\n" "smlal2 v10.4s, v25.8h, v4.8h\n" - "ldr d25, [x11, x4]\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "ldr q31, [x8, #0x0]\n" - "usubl v28.8h, v28.8b, v22.8b\n" - "ldr q30, [x16, #0x0]\n" - "smlal v13.4s, v24.4h, v2.4h\n" - "ldr q23, [x8, #0x10]\n" - "add x8, x8, #0x20\n" + "ldr d25, [x20, x15]\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "ldr x20, [x12, #0x88]\n" + "ldr d29, [x19, x15]\n" + "smlal v15.4s, v24.4h, v2.4h\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "ldr d24, [x9, x4]\n" - "smlal v19.4s, v29.4h, v4.4h\n" - "ldr q9, [x16, #0x10]\n" - "add x16, x16, #0x20\n" - "smlal2 v20.4s, v29.8h, v4.8h\n" - "ldr d29, [x10, x4]\n" - "usubl v27.8h, v27.8b, v22.8b\n" - "usubl v26.8h, v26.8b, v22.8b\n" - "smlal v19.4s, v28.4h, v5.4h\n" - "smlal v13.4s, v27.4h, v5.4h\n" - "smlal2 v20.4s, v28.8h, v5.8h\n" - "ldr d28, [x27, x4]\n" + "ldr x19, [x12, #0x70]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ldr x24, [x12, #0x98]\n" + "ldr d24, [x19, x15]\n" + "smlal v15.4s, v27.4h, v5.4h\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "smlal v19.4s, v27.4h, v3.4h\n" - "smlal v11.4s, v26.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "ldr d27, [x28, x4]\n" - "smlal2 v17.4s, v26.8h, v3.8h\n" - "ldr d26, [x26, x4]\n" - "usubl v25.8h, v25.8b, v22.8b\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "usubl v24.8h, v24.8b, v22.8b\n" - "smlal v13.4s, v25.4h, v6.4h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x23, [x12, #0x90]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "ldr x22, [x12, #0xa8]\n" + "ldr x19, [x12, #0xa0]\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "ldr x21, [x12, #0xb0]\n" + "ldr x20, [x12, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "smlal2 v18.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v28.4h, v1.4h\n" + "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "ldr q19, [x13, #0x0]\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "smlal v11.4s, v25.4h, v0.4h\n" - "smlal2 v17.4s, v25.8h, v0.8h\n" - "ldr d25, [x25, x4]\n" - "smlal v13.4s, v24.4h, v7.4h\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v18.4s, v28.8h, v1.8h\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v23.4s, v26.4h, v5.4h\n" + "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q0, [x11, #0x0]\n" + "ldr q4, [x13, #0x10]\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "smlal v11.4s, v29.4h, v4.4h\n" - "smlal2 v17.4s, v29.8h, v4.8h\n" - "ldr d29, [x24, x4]\n" - "usubl v27.8h, v27.8b, v22.8b\n" - "usubl v28.8h, v28.8b, v22.8b\n" - "smlal v11.4s, v24.4h, v1.4h\n" - "smlal2 v17.4s, v24.8h, v1.8h\n" - "ldr d24, [x22, x4]\n" - "smlal v18.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x4]\n" - "smlal v19.4s, v28.4h, v7.4h\n" - "smlal2 v20.4s, v28.8h, v7.8h\n" - "smlal v18.4s, v28.4h, v1.4h\n" - "smlal2 v21.4s, v28.8h, v1.8h\n" - "usubl v26.8h, v26.8b, v22.8b\n" - "usubl v25.8h, v25.8b, v22.8b\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "smlal v18.4s, v26.4h, v5.4h\n" - "smlal2 v21.4s, v26.8h, v5.8h\n" - "ldr d26, [x21, x4]\n" - "smlal v11.4s, v25.4h, v6.4h\n" - "smlal2 v17.4s, v25.8h, v6.8h\n" - "ldr d25, [x20, x4]\n" - "smlal v19.4s, v29.4h, v8.4h\n" - "smlal2 v20.4s, v29.8h, v8.8h\n" - "smlal v18.4s, v29.4h, v2.4h\n" - "smlal2 v21.4s, v29.8h, v2.8h\n" - "ldr d29, [x19, x4]\n" - "add x4, x4, #0x8\n" - "usubl v27.8h, v27.8b, v22.8b\n" - "usubl v24.8h, v24.8b, v22.8b\n" - "usubl v26.8h, v26.8b, v22.8b\n" - "usubl v25.8h, v25.8b, v22.8b\n" - "smlal v11.4s, v27.4h, v7.4h\n" - "smlal2 v17.4s, v27.8h, v7.8h\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "smlal2 v21.4s, v24.8h, v3.8h\n" - "smlal v11.4s, v24.4h, v5.4h\n" - "smlal2 v17.4s, v24.8h, v5.8h\n" - "smlal v18.4s, v26.4h, v7.4h\n" - "smlal2 v21.4s, v26.8h, v7.8h\n" - "smlal v11.4s, v25.4h, v8.4h\n" - "smlal2 v17.4s, v25.8h, v8.8h\n" - "smlal v18.4s, v25.4h, v6.4h\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "ldr q31, [x11, #0x10]\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" + "smlal2 v18.4s, v26.8h, v5.8h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" + "smlal2 v18.4s, v29.8h, v2.8h\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v30.16b, v15.16b, v0.16b\n" + "add x17, x17, #0x48\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "subs x16, x16, #0x1\n" "smlal2 v21.4s, v25.8h, v6.8h\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "sqrdmulh v13.4s, v13.4s, v31.4s\n" - "sqrdmulh v10.4s, v10.4s, v23.4s\n" - "smlal v18.4s, v29.4h, v8.4h\n" - "smlal2 v21.4s, v29.8h, v8.8h\n" - "and v27.16b, v13.16b, v30.16b\n" - "and v7.16b, v10.16b, v9.16b\n" - "sqrdmulh v19.4s, v19.4s, v31.4s\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqrdmulh v20.4s, v20.4s, v23.4s\n" - "sqadd v13.4s, v13.4s, v27.4s\n" - "sqadd v10.4s, v10.4s, v7.4s\n" - "and v6.16b, v19.16b, v30.16b\n" - "and v3.16b, v20.16b, v9.16b\n" - "srshl v13.4s, v13.4s, v30.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "add v13.4s, v13.4s, v14.4s\n" - "add v10.4s, v10.4s, v14.4s\n" - "sqadd v19.4s, v19.4s, v6.4s\n" - "smin v13.4s, v13.4s, v15.4s\n" - "smin v10.4s, v10.4s, v15.4s\n" - "sqadd v20.4s, v20.4s, v3.4s\n" - "smax v13.4s, v13.4s, v16.4s\n" - "smax v10.4s, v10.4s, v16.4s\n" - "srshl v19.4s, v19.4s, v30.4s\n" - "srshl v20.4s, v20.4s, v9.4s\n" - "uzp1 v13.16b, v13.16b, v10.16b\n" - "sqrdmulh v11.4s, v11.4s, v31.4s\n" - "uzp1 v13.16b, v13.16b, v13.16b\n" - "str d13, [x15, x6]\n" - "add v19.4s, v19.4s, v14.4s\n" - "add v20.4s, v20.4s, v14.4s\n" - "and v28.16b, v11.16b, v30.16b\n" - "sqrdmulh v17.4s, v17.4s, v23.4s\n" - "smin v19.4s, v19.4s, v15.4s\n" - "smin v20.4s, v20.4s, v15.4s\n" + "ldr d25, [x20, x15]\n" + "smlal2 v18.4s, v24.8h, v3.8h\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "add x13, x13, #0x20\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" + "smlal2 v18.4s, v26.8h, v7.8h\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x15, x15, #0x8\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v28.16b, v9.16b, v0.16b\n" + "add x11, x11, #0x20\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" + "smlal2 v18.4s, v25.8h, v6.8h\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" + "smlal2 v18.4s, v29.8h, v8.8h\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "and v19.16b, v10.16b, v31.16b\n" "sshr v28.4s, v28.4s, #0x1f\n" - "smax v19.4s, v19.4s, v16.4s\n" - "smax v20.4s, v20.4s, v16.4s\n" - "sqadd v11.4s, v11.4s, v28.4s\n" - "and v26.16b, v17.16b, v9.16b\n" - "uzp1 v19.16b, v19.16b, v20.16b\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "uzp1 v19.16b, v19.16b, v19.16b\n" - "str d19, [x14, x6]\n" - "srshl v11.4s, v11.4s, v30.4s\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" "sshr v26.4s, v26.4s, #0x1f\n" - "and v8.16b, v18.16b, v30.16b\n" - "sqrdmulh v21.4s, v21.4s, v23.4s\n" - "sqadd v17.4s, v17.4s, v26.4s\n" - "add v11.4s, v11.4s, v14.4s\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "and v27.16b, v21.16b, v9.16b\n" - "smin v11.4s, v11.4s, v15.4s\n" - "srshl v17.4s, v17.4s, v9.4s\n" - "sqadd v18.4s, v18.4s, v8.4s\n" - "smax v11.4s, v11.4s, v16.4s\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "add v17.4s, v17.4s, v14.4s\n" - "srshl v18.4s, v18.4s, v30.4s\n" - "sqadd v21.4s, v21.4s, v27.4s\n" - "smin v17.4s, v17.4s, v15.4s\n" - "add v18.4s, v18.4s, v14.4s\n" - "smax v17.4s, v17.4s, v16.4s\n" - "srshl v21.4s, v21.4s, v9.4s\n" - "smin v18.4s, v18.4s, v15.4s\n" - "uzp1 v11.16b, v11.16b, v17.16b\n" - "add v21.4s, v21.4s, v14.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "str d11, [x13, x6]\n" - "smax v18.4s, v18.4s, v16.4s\n" - "smin v21.4s, v21.4s, v15.4s\n" - "smax v21.4s, v21.4s, v16.4s\n" - "uzp1 v18.16b, v18.16b, v21.16b\n" - "uzp1 v18.16b, v18.16b, v18.16b\n" - "str d18, [x12, x6]\n" - "add x6, x6, #0x8\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" + "uzp1 v15.16b, v15.16b, v15.16b\n" + "str d15, [x10, x14]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "str d9, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d22, [x28, x14]\n" + "str d23, [x27, x14]\n" "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q13, [x19, #0x0]\n" - "mov v19.16b, v13.16b\n" + "ldr q15, [x19, #0x0]\n" + "add x14, x14, #0x8\n" "ldr q10, [x19, #0x10]\n" "add x19, x19, #0x20\n" - "mov v11.16b, v13.16b\n" "str x19, [%x[params], %[offsetof_Params_bias]]\n" - "mov v18.16b, v13.16b\n" - "ldr d0, [x5, #0x0]\n" - "ldr d1, [x5, #0x8]\n" - "mov v20.16b, v10.16b\n" - "ldr d2, [x5, #0x10]\n" - "mov v17.16b, v10.16b\n" - "ldr d3, [x5, #0x18]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "ldr d2, [x17, #0x10]\n" + "mov v9.16b, v15.16b\n" + "mov v16.16b, v10.16b\n" + "ldr d3, [x17, #0x18]\n" + "ldr d4, [x17, #0x20]\n" + "mov v22.16b, v15.16b\n" "mov v21.16b, v10.16b\n" - "ldr d4, [x5, #0x20]\n" - "ssubl v0.8h, v0.8b, v12.8b\n" - "ldr d5, [x5, #0x28]\n" - "ssubl v1.8h, v1.8b, v12.8b\n" - "ldr d6, [x5, #0x30]\n" - "ssubl v2.8h, v2.8b, v12.8b\n" - "ldr d7, [x5, #0x38]\n" - "ssubl v3.8h, v3.8b, v12.8b\n" - "ldr d8, [x5, #0x40]\n" - "ssubl v4.8h, v4.8b, v12.8b\n" - "ldp x26, x25, [x7, #0x0]\n" - "ssubl v5.8h, v5.8b, v12.8b\n" - "ldp x24, x23, [x7, #0x10]\n" - "ssubl v6.8h, v6.8b, v12.8b\n" - "ssubl v7.8h, v7.8b, v12.8b\n" - "ldp x22, x21, [x7, #0x20]\n" - "ssubl v8.8h, v8.8b, v12.8b\n" - "ldp x20, x19, [x7, #0x30]\n" - "ldr d31, [x26, x4]\n" - "usubl v31.8h, v31.8b, v22.8b\n" - "ldr d30, [x25, x4]\n" - "ldr d29, [x24, x4]\n" - "usubl v30.8h, v30.8b, v22.8b\n" - "ldr d28, [x23, x4]\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "ldr d27, [x22, x4]\n" - "ldr d26, [x21, x4]\n" - "usubl v28.8h, v28.8b, v22.8b\n" - "ldr d25, [x20, x4]\n" - "ldr d24, [x19, x4]\n" - "usubl v27.8h, v27.8b, v22.8b\n" - "usubl v26.8h, v26.8b, v22.8b\n" - "usubl v25.8h, v25.8b, v22.8b\n" - "usubl v24.8h, v24.8b, v22.8b\n" + "ldr d5, [x17, #0x28]\n" + "ldr d6, [x17, #0x30]\n" + "mov v23.16b, v15.16b\n" + "mov v18.16b, v10.16b\n" + "ldr d7, [x17, #0x38]\n" + "ldr d8, [x17, #0x40]\n" + "ssubl v0.8h, v0.8b, v13.8b\n" + "ssubl v1.8h, v1.8b, v13.8b\n" + "ldp x26, x25, [x12, #0x0]\n" + "ldp x24, x23, [x12, #0x10]\n" + "ssubl v2.8h, v2.8b, v13.8b\n" + "ssubl v3.8h, v3.8b, v13.8b\n" + "ldp x22, x21, [x12, #0x20]\n" + "ldp x20, x19, [x12, #0x30]\n" + "ssubl v4.8h, v4.8b, v13.8b\n" + "ssubl v5.8h, v5.8b, v13.8b\n" + "ldr d31, [x26, x15]\n" + "ldr d30, [x25, x15]\n" + "ssubl v6.8h, v6.8b, v13.8b\n" + "ssubl v7.8h, v7.8b, v13.8b\n" + "ldr d29, [x24, x15]\n" + "ldr d28, [x23, x15]\n" + "ssubl v8.8h, v8.8b, v13.8b\n" + "usubl v31.8h, v31.8b, v12.8b\n" + "ldr d27, [x22, x15]\n" + "ldr d26, [x21, x15]\n" + "usubl v30.8h, v30.8b, v12.8b\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "ldr d25, [x20, x15]\n" + "ldr d24, [x19, x15]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "usubl v24.8h, v24.8b, v12.8b\n" "bgt 1b\n" "2:" // Tail - "smlal v13.4s, v31.4h, v8.4h\n" - "ldr x22, [x7, #0x40]\n" - "tst x3, #0x7\n" + "smlal v15.4s, v31.4h, v8.4h\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "ldr x21, [x7, #0x48]\n" - "smlal v19.4s, v31.4h, v6.4h\n" - "ldr x20, [x7, #0x50]\n" - "smlal2 v20.4s, v31.8h, v6.8h\n" - "ldr x19, [x7, #0x58]\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "ldr x11, [x7, #0x60]\n" - "smlal2 v17.4s, v31.8h, v2.8h\n" - "ldr x10, [x7, #0x68]\n" - "smlal v18.4s, v31.4h, v0.4h\n" - "ldr x9, [x7, #0x70]\n" - "smlal2 v21.4s, v31.8h, v0.8h\n" - "ldr x28, [x7, #0x78]\n" - "smlal v13.4s, v30.4h, v0.4h\n" - "ldr x27, [x7, #0x80]\n" + "ldr x24, [x12, #0x40]\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" + "ldr x21, [x12, #0x50]\n" + "ldr x19, [x12, #0x58]\n" + "smlal v15.4s, v30.4h, v0.4h\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "ldr x26, [x7, #0x88]\n" - "smlal v19.4s, v28.4h, v1.4h\n" - "ldr x25, [x7, #0x90]\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "ldr d28, [x21, x4]\n" - "smlal v13.4s, v29.4h, v1.4h\n" - "ldr x24, [x7, #0x98]\n" + "ldr x22, [x12, #0x78]\n" + "ldr x20, [x12, #0x60]\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "ldr d28, [x23, x15]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "smlal v15.4s, v29.4h, v1.4h\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "ldr d29, [x22, x4]\n" - "smlal v19.4s, v27.4h, v2.4h\n" - "ldr x23, [x7, #0xa0]\n" - "smlal2 v20.4s, v27.8h, v2.8h\n" - "ldr d27, [x20, x4]\n" - "smlal v13.4s, v26.4h, v3.4h\n" - "ldr x22, [x7, #0xa8]\n" + "ldr d29, [x24, x15]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v15.4s, v26.4h, v3.4h\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "ldr d26, [x19, x4]\n" - "smlal v19.4s, v24.4h, v0.4h\n" - "ldr x21, [x7, #0xb0]\n" - "smlal2 v20.4s, v24.8h, v0.8h\n" - "ldr x20, [x7, #0xb8]\n" - "smlal v13.4s, v25.4h, v4.4h\n" - "ldr x19, [x7, #0xc0]\n" + "ldr d26, [x19, x15]\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "ldr x21, [x12, #0x80]\n" + "ldr x19, [x12, #0x68]\n" + "smlal v15.4s, v25.4h, v4.4h\n" "smlal2 v10.4s, v25.8h, v4.8h\n" - "ldr d25, [x11, x4]\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "ldr q31, [x8, #0x0]\n" - "usubl v28.8h, v28.8b, v22.8b\n" - "ldr q30, [x16, #0x0]\n" - "smlal v13.4s, v24.4h, v2.4h\n" - "ldr q23, [x8, #0x10]\n" - "add x8, x8, #0x20\n" + "ldr d25, [x20, x15]\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "ldr x20, [x12, #0x88]\n" + "ldr d29, [x19, x15]\n" + "smlal v15.4s, v24.4h, v2.4h\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "ldr d24, [x9, x4]\n" - "smlal v19.4s, v29.4h, v4.4h\n" - "ldr q9, [x16, #0x10]\n" - "add x16, x16, #0x20\n" - "smlal2 v20.4s, v29.8h, v4.8h\n" - "ldr d29, [x10, x4]\n" - "usubl v27.8h, v27.8b, v22.8b\n" - "usubl v26.8h, v26.8b, v22.8b\n" - "smlal v19.4s, v28.4h, v5.4h\n" - "smlal v13.4s, v27.4h, v5.4h\n" - "smlal2 v20.4s, v28.8h, v5.8h\n" - "ldr d28, [x27, x4]\n" + "ldr x19, [x12, #0x70]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "ldr d28, [x21, x15]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "ldr x24, [x12, #0x98]\n" + "ldr d24, [x19, x15]\n" + "smlal v15.4s, v27.4h, v5.4h\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "smlal v19.4s, v27.4h, v3.4h\n" - "smlal v11.4s, v26.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "ldr d27, [x28, x4]\n" - "smlal2 v17.4s, v26.8h, v3.8h\n" - "ldr d26, [x26, x4]\n" - "usubl v25.8h, v25.8b, v22.8b\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "usubl v24.8h, v24.8b, v22.8b\n" - "smlal v13.4s, v25.4h, v6.4h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x23, [x12, #0x90]\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "ldr d27, [x22, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "ldr x22, [x12, #0xa8]\n" + "ldr x19, [x12, #0xa0]\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" + "ldr d26, [x20, x15]\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "ldr x21, [x12, #0xb0]\n" + "ldr x20, [x12, #0xb8]\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "smlal2 v18.4s, v27.8h, v4.8h\n" + "ldr d27, [x19, x15]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "smlal v23.4s, v28.4h, v1.4h\n" + "smlal v15.4s, v25.4h, v6.4h\n" + "ldr x19, [x12, #0xc0]\n" + "ldr q19, [x13, #0x0]\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "smlal v11.4s, v25.4h, v0.4h\n" - "smlal2 v17.4s, v25.8h, v0.8h\n" - "ldr d25, [x25, x4]\n" - "smlal v13.4s, v24.4h, v7.4h\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "ldr d25, [x23, x15]\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "ldr d29, [x24, x15]\n" + "smlal2 v18.4s, v28.8h, v1.8h\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v23.4s, v26.4h, v5.4h\n" + "smlal v15.4s, v24.4h, v7.4h\n" + "ldr q0, [x11, #0x0]\n" + "ldr q4, [x13, #0x10]\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "smlal v11.4s, v29.4h, v4.4h\n" - "smlal2 v17.4s, v29.8h, v4.8h\n" - "ldr d29, [x24, x4]\n" - "usubl v27.8h, v27.8b, v22.8b\n" - "usubl v28.8h, v28.8b, v22.8b\n" - "smlal v11.4s, v24.4h, v1.4h\n" - "smlal2 v17.4s, v24.8h, v1.8h\n" - "ldr d24, [x22, x4]\n" - "smlal v18.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x4]\n" - "smlal v19.4s, v28.4h, v7.4h\n" - "smlal2 v20.4s, v28.8h, v7.8h\n" - "smlal v18.4s, v28.4h, v1.4h\n" - "smlal2 v21.4s, v28.8h, v1.8h\n" - "usubl v26.8h, v26.8b, v22.8b\n" - "usubl v25.8h, v25.8b, v22.8b\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "smlal v18.4s, v26.4h, v5.4h\n" - "smlal2 v21.4s, v26.8h, v5.8h\n" - "ldr d26, [x21, x4]\n" - "smlal v11.4s, v25.4h, v6.4h\n" - "smlal2 v17.4s, v25.8h, v6.8h\n" - "ldr d25, [x20, x4]\n" - "smlal v19.4s, v29.4h, v8.4h\n" - "smlal2 v20.4s, v29.8h, v8.8h\n" - "smlal v18.4s, v29.4h, v2.4h\n" - "smlal2 v21.4s, v29.8h, v2.8h\n" - "ldr d29, [x19, x4]\n" - "add x4, x4, #0x8\n" - "usubl v27.8h, v27.8b, v22.8b\n" - "usubl v24.8h, v24.8b, v22.8b\n" - "usubl v26.8h, v26.8b, v22.8b\n" - "usubl v25.8h, v25.8b, v22.8b\n" - "smlal v11.4s, v27.4h, v7.4h\n" - "smlal2 v17.4s, v27.8h, v7.8h\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "smlal2 v21.4s, v24.8h, v3.8h\n" - "smlal v11.4s, v24.4h, v5.4h\n" - "smlal2 v17.4s, v24.8h, v5.8h\n" - "smlal v18.4s, v26.4h, v7.4h\n" - "smlal2 v21.4s, v26.8h, v7.8h\n" - "smlal v11.4s, v25.4h, v8.4h\n" - "smlal2 v17.4s, v25.8h, v8.8h\n" - "smlal v18.4s, v25.4h, v6.4h\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "ldr q31, [x11, #0x10]\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "ldr d24, [x22, x15]\n" + "smlal2 v18.4s, v26.8h, v5.8h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "ldr d26, [x21, x15]\n" + "smlal2 v18.4s, v29.8h, v2.8h\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "and v30.16b, v15.16b, v0.16b\n" + "tst x8, #0x7\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "add x13, x13, #0x20\n" "smlal2 v21.4s, v25.8h, v6.8h\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "sqrdmulh v13.4s, v13.4s, v31.4s\n" - "sqrdmulh v10.4s, v10.4s, v23.4s\n" - "smlal v18.4s, v29.4h, v8.4h\n" - "smlal2 v21.4s, v29.8h, v8.8h\n" - "and v27.16b, v13.16b, v30.16b\n" - "and v7.16b, v10.16b, v9.16b\n" - "sqrdmulh v19.4s, v19.4s, v31.4s\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sqrdmulh v20.4s, v20.4s, v23.4s\n" - "sqadd v13.4s, v13.4s, v27.4s\n" - "sqadd v10.4s, v10.4s, v7.4s\n" - "and v6.16b, v19.16b, v30.16b\n" - "and v3.16b, v20.16b, v9.16b\n" - "srshl v13.4s, v13.4s, v30.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "add v13.4s, v13.4s, v14.4s\n" - "add v10.4s, v10.4s, v14.4s\n" - "sqadd v19.4s, v19.4s, v6.4s\n" - "smin v13.4s, v13.4s, v15.4s\n" - "smin v10.4s, v10.4s, v15.4s\n" - "sqadd v20.4s, v20.4s, v3.4s\n" - "smax v13.4s, v13.4s, v16.4s\n" - "smax v10.4s, v10.4s, v16.4s\n" - "srshl v19.4s, v19.4s, v30.4s\n" - "srshl v20.4s, v20.4s, v9.4s\n" - "uzp1 v13.16b, v13.16b, v10.16b\n" - "sqrdmulh v11.4s, v11.4s, v31.4s\n" - "uzp1 v13.16b, v13.16b, v13.16b\n" - "str d13, [x15, x6]\n" - "add v19.4s, v19.4s, v14.4s\n" - "add v20.4s, v20.4s, v14.4s\n" - "and v28.16b, v11.16b, v30.16b\n" - "sqrdmulh v17.4s, v17.4s, v23.4s\n" - "smin v19.4s, v19.4s, v15.4s\n" - "smin v20.4s, v20.4s, v15.4s\n" + "ldr d25, [x20, x15]\n" + "smlal2 v18.4s, v24.8h, v3.8h\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "add x11, x11, #0x20\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "ldr d29, [x19, x15]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" + "smlal2 v18.4s, v26.8h, v7.8h\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x15, x15, #0x8\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "and v28.16b, v9.16b, v0.16b\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" + "smlal2 v18.4s, v25.8h, v6.8h\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" + "smlal2 v18.4s, v29.8h, v8.8h\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "and v19.16b, v10.16b, v31.16b\n" "sshr v28.4s, v28.4s, #0x1f\n" - "smax v19.4s, v19.4s, v16.4s\n" - "smax v20.4s, v20.4s, v16.4s\n" - "sqadd v11.4s, v11.4s, v28.4s\n" - "and v26.16b, v17.16b, v9.16b\n" - "uzp1 v19.16b, v19.16b, v20.16b\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "uzp1 v19.16b, v19.16b, v19.16b\n" - "str d19, [x14, x6]\n" - "srshl v11.4s, v11.4s, v30.4s\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" "sshr v26.4s, v26.4s, #0x1f\n" - "and v8.16b, v18.16b, v30.16b\n" - "sqrdmulh v21.4s, v21.4s, v23.4s\n" - "sqadd v17.4s, v17.4s, v26.4s\n" - "add v11.4s, v11.4s, v14.4s\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "and v27.16b, v21.16b, v9.16b\n" - "smin v11.4s, v11.4s, v15.4s\n" - "srshl v17.4s, v17.4s, v9.4s\n" - "sqadd v18.4s, v18.4s, v8.4s\n" - "smax v11.4s, v11.4s, v16.4s\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "add v17.4s, v17.4s, v14.4s\n" - "srshl v18.4s, v18.4s, v30.4s\n" - "sqadd v21.4s, v21.4s, v27.4s\n" - "smin v17.4s, v17.4s, v15.4s\n" - "add v18.4s, v18.4s, v14.4s\n" - "smax v17.4s, v17.4s, v16.4s\n" - "srshl v21.4s, v21.4s, v9.4s\n" - "smin v18.4s, v18.4s, v15.4s\n" - "uzp1 v11.16b, v11.16b, v17.16b\n" - "add v21.4s, v21.4s, v14.4s\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "str d11, [x13, x6]\n" - "smax v18.4s, v18.4s, v16.4s\n" - "smin v21.4s, v21.4s, v15.4s\n" - "smax v21.4s, v21.4s, v16.4s\n" - "uzp1 v18.16b, v18.16b, v21.16b\n" - "uzp1 v18.16b, v18.16b, v18.16b\n" - "str d18, [x12, x6]\n" - "add x6, x6, #0x8\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" + "uzp1 v15.16b, v15.16b, v15.16b\n" + "str d15, [x10, x14]\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "str d9, [x9, x14]\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "str d22, [x28, x14]\n" + "str d23, [x27, x14]\n" + "add x14, x14, #0x8\n" "beq 88f\n" - "add x5, x5, #0x48\n" + "add x17, x17, #0x48\n" "3:" // Oddments "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" - "tbz x3, #2, 5f\n" - "ld1 { v13.4s }, [x19], #0x10\n" - "tbz x3, #1, 4f\n" + "tbz x8, #2, 5f\n" + "ld1 { v15.4s }, [x19], #0x10\n" + "tbz x8, #1, 4f\n" "ld1 { v10.d }[0], [x19], #0x8\n" - "tbz x3, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v10.s }[2], [x19]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset - "tbz x3, #0, 7f\n" + "tbz x8, #0, 7f\n" "ld1 { v10.s }[0], [x19]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset - "tbz x3, #1, 6f\n" - "ld1 { v13.d }[0], [x19], #0x8\n" - "tbz x3, #0, 7f\n" - "ld1 { v13.s }[2], [x19]\n" + "tbz x8, #1, 6f\n" + "ld1 { v15.d }[0], [x19], #0x8\n" + "tbz x8, #0, 7f\n" + "ld1 { v15.s }[2], [x19]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 7f\n" - "ld1 { v13.s }[0], [x19]\n" + "tbz x8, #0, 7f\n" + "ld1 { v15.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v19.16b, v13.16b\n" - "ldr d0, [x5, #0x0]\n" - "mov v20.16b, v10.16b\n" - "ldr d1, [x5, #0x8]\n" - "mov v11.16b, v13.16b\n" - "ldr d2, [x5, #0x10]\n" - "mov v17.16b, v10.16b\n" - "ldr d3, [x5, #0x18]\n" - "mov v18.16b, v13.16b\n" - "ldr d4, [x5, #0x20]\n" + "ldr d0, [x17, #0x0]\n" + "ldr d1, [x17, #0x8]\n" + "mov v9.16b, v15.16b\n" + "mov v16.16b, v10.16b\n" + "ldr d2, [x17, #0x10]\n" + "ldr d3, [x17, #0x18]\n" + "mov v22.16b, v15.16b\n" "mov v21.16b, v10.16b\n" - "ldr d5, [x5, #0x28]\n" - "ssubl v0.8h, v0.8b, v12.8b\n" - "ldr d6, [x5, #0x30]\n" - "ssubl v1.8h, v1.8b, v12.8b\n" - "ldr d7, [x5, #0x38]\n" - "ssubl v2.8h, v2.8b, v12.8b\n" - "ldr d8, [x5, #0x40]\n" - "ssubl v3.8h, v3.8b, v12.8b\n" - "ldp x26, x25, [x7, #0x0]\n" - "add x26, x26, x4\n" - "ssubl v4.8h, v4.8b, v12.8b\n" - "ldp x24, x23, [x7, #0x10]\n" - "ssubl v5.8h, v5.8b, v12.8b\n" - "ldp x22, x21, [x7, #0x20]\n" - "ssubl v6.8h, v6.8b, v12.8b\n" - "add x25, x25, x4\n" - "ssubl v7.8h, v7.8b, v12.8b\n" - "ldp x20, x19, [x7, #0x30]\n" - "ssubl v8.8h, v8.8b, v12.8b\n" - "add x24, x24, x4\n" - "add x23, x23, x4\n" - "add x22, x22, x4\n" - "add x21, x21, x4\n" - "add x20, x20, x4\n" - "add x19, x19, x4\n" - "tbz x3, #2, 9f\n" + "ldr d4, [x17, #0x20]\n" + "ldr d5, [x17, #0x28]\n" + "mov v23.16b, v15.16b\n" + "mov v18.16b, v10.16b\n" + "ldr d6, [x17, #0x30]\n" + "ldr d7, [x17, #0x38]\n" + "ssubl v0.8h, v0.8b, v13.8b\n" + "ssubl v1.8h, v1.8b, v13.8b\n" + "ldr d8, [x17, #0x40]\n" + "ldp x26, x25, [x12, #0x0]\n" + "ssubl v2.8h, v2.8b, v13.8b\n" + "ssubl v3.8h, v3.8b, v13.8b\n" + "ldp x24, x23, [x12, #0x10]\n" + "ldp x22, x21, [x12, #0x20]\n" + "ssubl v4.8h, v4.8b, v13.8b\n" + "ssubl v5.8h, v5.8b, v13.8b\n" + "ldp x20, x19, [x12, #0x30]\n" + "ssubl v6.8h, v6.8b, v13.8b\n" + "ssubl v7.8h, v7.8b, v13.8b\n" + "ssubl v8.8h, v8.8b, v13.8b\n" + "add x26, x26, x15\n" + "add x25, x25, x15\n" + "add x24, x24, x15\n" + "add x23, x23, x15\n" + "add x22, x22, x15\n" + "add x21, x21, x15\n" + "add x20, x20, x15\n" + "add x19, x19, x15\n" + "tbz x8, #2, 9f\n" "ld1 { v31.s }[0], [x26], #0x4\n" "ld1 { v30.s }[0], [x25], #0x4\n" "ld1 { v29.s }[0], [x24], #0x4\n" @@ -713,7 +697,7 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.s }[0], [x21], #0x4\n" "ld1 { v25.s }[0], [x20], #0x4\n" "ld1 { v24.s }[0], [x19], #0x4\n" - "tbz x3, #1, 8f\n" + "tbz x8, #1, 8f\n" "ld1 { v31.h }[2], [x26], #0x2\n" "ld1 { v30.h }[2], [x25], #0x2\n" "ld1 { v29.h }[2], [x24], #0x2\n" @@ -722,7 +706,7 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.h }[2], [x21], #0x2\n" "ld1 { v25.h }[2], [x20], #0x2\n" "ld1 { v24.h }[2], [x19], #0x2\n" - "tbz x3, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[6], [x26]\n" "ld1 { v30.b }[6], [x25]\n" "ld1 { v29.b }[6], [x24]\n" @@ -733,7 +717,7 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[6], [x19]\n" "b 11f\n" "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset - "tbz x3, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[4], [x26]\n" "ld1 { v30.b }[4], [x25]\n" "ld1 { v29.b }[4], [x24]\n" @@ -744,7 +728,7 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[4], [x19]\n" "b 11f\n" "9:" // Oddments: Initial loads: Bit 2: Unset - "tbz x3, #1, 10f\n" + "tbz x8, #1, 10f\n" "ld1 { v31.h }[0], [x26], #0x2\n" "ld1 { v30.h }[0], [x25], #0x2\n" "ld1 { v29.h }[0], [x24], #0x2\n" @@ -753,7 +737,7 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v26.h }[0], [x21], #0x2\n" "ld1 { v25.h }[0], [x20], #0x2\n" "ld1 { v24.h }[0], [x19], #0x2\n" - "tbz x3, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[2], [x26]\n" "ld1 { v30.b }[2], [x25]\n" "ld1 { v29.b }[2], [x24]\n" @@ -764,7 +748,7 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v24.b }[2], [x19]\n" "b 11f\n" "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 11f\n" + "tbz x8, #0, 11f\n" "ld1 { v31.b }[0], [x26]\n" "ld1 { v30.b }[0], [x25]\n" "ld1 { v29.b }[0], [x24]\n" @@ -774,646 +758,636 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( "ld1 { v25.b }[0], [x20]\n" "ld1 { v24.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End - "usubl v31.8h, v31.8b, v22.8b\n" - "ldr x22, [x7, #0x40]\n" - "add x22, x22, x4\n" - "usubl v30.8h, v30.8b, v22.8b\n" - "usubl v29.8h, v29.8b, v22.8b\n" - "usubl v28.8h, v28.8b, v22.8b\n" - "usubl v27.8h, v27.8b, v22.8b\n" - "usubl v26.8h, v26.8b, v22.8b\n" - "usubl v25.8h, v25.8b, v22.8b\n" - "usubl v24.8h, v24.8b, v22.8b\n" - "smlal v13.4s, v31.4h, v8.4h\n" + "usubl v31.8h, v31.8b, v12.8b\n" + "smlal v15.4s, v31.4h, v8.4h\n" "smlal2 v10.4s, v31.8h, v8.8h\n" - "smlal v19.4s, v31.4h, v6.4h\n" - "smlal2 v20.4s, v31.8h, v6.8h\n" - "smlal v11.4s, v31.4h, v2.4h\n" - "smlal2 v17.4s, v31.8h, v2.8h\n" - "smlal v18.4s, v31.4h, v0.4h\n" - "smlal2 v21.4s, v31.8h, v0.8h\n" - "smlal v13.4s, v30.4h, v0.4h\n" + "ldr x24, [x12, #0x40]\n" + "usubl v30.8h, v30.8b, v12.8b\n" + "smlal v15.4s, v30.4h, v0.4h\n" "smlal2 v10.4s, v30.8h, v0.8h\n" - "smlal v19.4s, v28.4h, v1.4h\n" - "smlal2 v20.4s, v28.8h, v1.8h\n" - "smlal v13.4s, v29.4h, v1.4h\n" + "add x24, x24, x15\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v9.4s, v31.4h, v6.4h\n" + "smlal2 v16.4s, v31.8h, v6.8h\n" + "smlal v15.4s, v29.4h, v1.4h\n" "smlal2 v10.4s, v29.8h, v1.8h\n" - "smlal v19.4s, v27.4h, v2.4h\n" - "smlal2 v20.4s, v27.8h, v2.8h\n" - "smlal v13.4s, v26.4h, v3.4h\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "smlal v9.4s, v28.4h, v1.4h\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal v15.4s, v26.4h, v3.4h\n" "smlal2 v10.4s, v26.8h, v3.8h\n" - "smlal v19.4s, v24.4h, v0.4h\n" - "smlal2 v20.4s, v24.8h, v0.8h\n" - "smlal v13.4s, v25.4h, v4.4h\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "smlal v9.4s, v27.4h, v2.4h\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal v15.4s, v25.4h, v4.4h\n" "smlal2 v10.4s, v25.8h, v4.8h\n" - "smlal v13.4s, v24.4h, v2.4h\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "smlal v22.4s, v31.4h, v2.4h\n" + "smlal2 v21.4s, v31.8h, v2.8h\n" + "smlal v23.4s, v31.4h, v0.4h\n" + "smlal2 v18.4s, v31.8h, v0.8h\n" + "smlal v15.4s, v24.4h, v2.4h\n" "smlal2 v10.4s, v24.8h, v2.8h\n" - "tbz x3, #2, 13f\n" - "ld1 { v29.s }[0], [x22], #0x4\n" - "tbz x3, #1, 12f\n" - "ld1 { v29.h }[2], [x22], #0x2\n" - "tbz x3, #0, 15f\n" - "ld1 { v29.b }[6], [x22]\n" + "smlal v9.4s, v24.4h, v0.4h\n" + "smlal2 v16.4s, v24.8h, v0.8h\n" + "tbz x8, #2, 13f\n" + "ld1 { v29.s }[0], [x24], #0x4\n" + "tbz x8, #1, 12f\n" + "ld1 { v29.h }[2], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[6], [x24]\n" "b 15f\n" "12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset - "tbz x3, #0, 15f\n" - "ld1 { v29.b }[4], [x22]\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[4], [x24]\n" "b 15f\n" "13:" // Oddments: Load (1, 3): Bit 2: Unset - "tbz x3, #1, 14f\n" - "ld1 { v29.h }[0], [x22], #0x2\n" - "tbz x3, #0, 15f\n" - "ld1 { v29.b }[2], [x22]\n" + "tbz x8, #1, 14f\n" + "ld1 { v29.h }[0], [x24], #0x2\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[2], [x24]\n" "b 15f\n" "14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 15f\n" - "ld1 { v29.b }[0], [x22]\n" + "tbz x8, #0, 15f\n" + "ld1 { v29.b }[0], [x24]\n" "15:" // Oddments: Load (1, 3): Bit 2: End - "usubl v29.8h, v29.8b, v22.8b\n" - "ldr x21, [x7, #0x48]\n" - "smlal v19.4s, v29.4h, v4.4h\n" - "add x21, x21, x4\n" - "smlal2 v20.4s, v29.8h, v4.8h\n" - "tbz x3, #2, 17f\n" - "ld1 { v28.s }[0], [x21], #0x4\n" - "tbz x3, #1, 16f\n" - "ld1 { v28.h }[2], [x21], #0x2\n" - "tbz x3, #0, 19f\n" - "ld1 { v28.b }[6], [x21]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "ldr x23, [x12, #0x48]\n" + "smlal v9.4s, v29.4h, v4.4h\n" + "smlal2 v16.4s, v29.8h, v4.8h\n" + "add x23, x23, x15\n" + "tbz x8, #2, 17f\n" + "ld1 { v28.s }[0], [x23], #0x4\n" + "tbz x8, #1, 16f\n" + "ld1 { v28.h }[2], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[6], [x23]\n" "b 19f\n" "16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset - "tbz x3, #0, 19f\n" - "ld1 { v28.b }[4], [x21]\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[4], [x23]\n" "b 19f\n" "17:" // Oddments: Load (1, 4): Bit 2: Unset - "tbz x3, #1, 18f\n" - "ld1 { v28.h }[0], [x21], #0x2\n" - "tbz x3, #0, 19f\n" - "ld1 { v28.b }[2], [x21]\n" + "tbz x8, #1, 18f\n" + "ld1 { v28.h }[0], [x23], #0x2\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[2], [x23]\n" "b 19f\n" "18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 19f\n" - "ld1 { v28.b }[0], [x21]\n" + "tbz x8, #0, 19f\n" + "ld1 { v28.b }[0], [x23]\n" "19:" // Oddments: Load (1, 4): Bit 2: End - "usubl v28.8h, v28.8b, v22.8b\n" - "ldr x20, [x7, #0x50]\n" - "smlal v19.4s, v28.4h, v5.4h\n" - "add x20, x20, x4\n" - "smlal2 v20.4s, v28.8h, v5.8h\n" - "tbz x3, #2, 21f\n" - "ld1 { v27.s }[0], [x20], #0x4\n" - "tbz x3, #1, 20f\n" - "ld1 { v27.h }[2], [x20], #0x2\n" - "tbz x3, #0, 23f\n" - "ld1 { v27.b }[6], [x20]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "ldr x21, [x12, #0x50]\n" + "smlal v9.4s, v28.4h, v5.4h\n" + "smlal2 v16.4s, v28.8h, v5.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 21f\n" + "ld1 { v27.s }[0], [x21], #0x4\n" + "tbz x8, #1, 20f\n" + "ld1 { v27.h }[2], [x21], #0x2\n" + "tbz x8, #0, 23f\n" + "ld1 { v27.b }[6], [x21]\n" "b 23f\n" "20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset - "tbz x3, #0, 23f\n" - "ld1 { v27.b }[4], [x20]\n" + "tbz x8, #0, 23f\n" + "ld1 { v27.b }[4], [x21]\n" "b 23f\n" "21:" // Oddments: Load (1, 2): Bit 2: Unset - "tbz x3, #1, 22f\n" - "ld1 { v27.h }[0], [x20], #0x2\n" - "tbz x3, #0, 23f\n" - "ld1 { v27.b }[2], [x20]\n" + "tbz x8, #1, 22f\n" + "ld1 { v27.h }[0], [x21], #0x2\n" + "tbz x8, #0, 23f\n" + "ld1 { v27.b }[2], [x21]\n" "b 23f\n" "22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 23f\n" - "ld1 { v27.b }[0], [x20]\n" + "tbz x8, #0, 23f\n" + "ld1 { v27.b }[0], [x21]\n" "23:" // Oddments: Load (1, 2): Bit 2: End - "usubl v27.8h, v27.8b, v22.8b\n" - "ldr x19, [x7, #0x58]\n" - "smlal v13.4s, v27.4h, v5.4h\n" - "add x19, x19, x4\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "ldr x19, [x12, #0x58]\n" + "smlal v15.4s, v27.4h, v5.4h\n" "smlal2 v10.4s, v27.8h, v5.8h\n" - "smlal v19.4s, v27.4h, v3.4h\n" - "smlal2 v20.4s, v27.8h, v3.8h\n" - "tbz x3, #2, 25f\n" + "smlal v9.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 25f\n" "ld1 { v26.s }[0], [x19], #0x4\n" - "tbz x3, #1, 24f\n" + "tbz x8, #1, 24f\n" "ld1 { v26.h }[2], [x19], #0x2\n" - "tbz x3, #0, 27f\n" + "tbz x8, #0, 27f\n" "ld1 { v26.b }[6], [x19]\n" "b 27f\n" "24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset - "tbz x3, #0, 27f\n" + "tbz x8, #0, 27f\n" "ld1 { v26.b }[4], [x19]\n" "b 27f\n" "25:" // Oddments: Load (3, 0): Bit 2: Unset - "tbz x3, #1, 26f\n" + "tbz x8, #1, 26f\n" "ld1 { v26.h }[0], [x19], #0x2\n" - "tbz x3, #0, 27f\n" + "tbz x8, #0, 27f\n" "ld1 { v26.b }[2], [x19]\n" "b 27f\n" "26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 27f\n" + "tbz x8, #0, 27f\n" "ld1 { v26.b }[0], [x19]\n" "27:" // Oddments: Load (3, 0): Bit 2: End - "usubl v26.8h, v26.8b, v22.8b\n" - "ldr x11, [x7, #0x60]\n" - "smlal v11.4s, v26.4h, v3.4h\n" - "add x11, x11, x4\n" - "smlal2 v17.4s, v26.8h, v3.8h\n" - "tbz x3, #2, 29f\n" - "ld1 { v25.s }[0], [x11], #0x4\n" - "tbz x3, #1, 28f\n" - "ld1 { v25.h }[2], [x11], #0x2\n" - "tbz x3, #0, 31f\n" - "ld1 { v25.b }[6], [x11]\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "ldr x20, [x12, #0x60]\n" + "smlal v22.4s, v26.4h, v3.4h\n" + "smlal2 v21.4s, v26.8h, v3.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 29f\n" + "ld1 { v25.s }[0], [x20], #0x4\n" + "tbz x8, #1, 28f\n" + "ld1 { v25.h }[2], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[6], [x20]\n" "b 31f\n" "28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset - "tbz x3, #0, 31f\n" - "ld1 { v25.b }[4], [x11]\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[4], [x20]\n" "b 31f\n" "29:" // Oddments: Load (2, 0): Bit 2: Unset - "tbz x3, #1, 30f\n" - "ld1 { v25.h }[0], [x11], #0x2\n" - "tbz x3, #0, 31f\n" - "ld1 { v25.b }[2], [x11]\n" + "tbz x8, #1, 30f\n" + "ld1 { v25.h }[0], [x20], #0x2\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[2], [x20]\n" "b 31f\n" "30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 31f\n" - "ld1 { v25.b }[0], [x11]\n" + "tbz x8, #0, 31f\n" + "ld1 { v25.b }[0], [x20]\n" "31:" // Oddments: Load (2, 0): Bit 2: End - "usubl v25.8h, v25.8b, v22.8b\n" - "ldr x10, [x7, #0x68]\n" - "smlal v13.4s, v25.4h, v6.4h\n" - "add x10, x10, x4\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "ldr x19, [x12, #0x68]\n" + "smlal v15.4s, v25.4h, v6.4h\n" "smlal2 v10.4s, v25.8h, v6.8h\n" - "smlal v11.4s, v25.4h, v0.4h\n" - "smlal2 v17.4s, v25.8h, v0.8h\n" - "tbz x3, #2, 33f\n" - "ld1 { v29.s }[0], [x10], #0x4\n" - "tbz x3, #1, 32f\n" - "ld1 { v29.h }[2], [x10], #0x2\n" - "tbz x3, #0, 35f\n" - "ld1 { v29.b }[6], [x10]\n" + "smlal v22.4s, v25.4h, v0.4h\n" + "smlal2 v21.4s, v25.8h, v0.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 33f\n" + "ld1 { v29.s }[0], [x19], #0x4\n" + "tbz x8, #1, 32f\n" + "ld1 { v29.h }[2], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[6], [x19]\n" "b 35f\n" "32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset - "tbz x3, #0, 35f\n" - "ld1 { v29.b }[4], [x10]\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[4], [x19]\n" "b 35f\n" "33:" // Oddments: Load (3, 1): Bit 2: Unset - "tbz x3, #1, 34f\n" - "ld1 { v29.h }[0], [x10], #0x2\n" - "tbz x3, #0, 35f\n" - "ld1 { v29.b }[2], [x10]\n" + "tbz x8, #1, 34f\n" + "ld1 { v29.h }[0], [x19], #0x2\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[2], [x19]\n" "b 35f\n" "34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 35f\n" - "ld1 { v29.b }[0], [x10]\n" + "tbz x8, #0, 35f\n" + "ld1 { v29.b }[0], [x19]\n" "35:" // Oddments: Load (3, 1): Bit 2: End - "usubl v29.8h, v29.8b, v22.8b\n" - "ldr x9, [x7, #0x70]\n" - "smlal v11.4s, v29.4h, v4.4h\n" - "add x9, x9, x4\n" - "smlal2 v17.4s, v29.8h, v4.8h\n" - "tbz x3, #2, 37f\n" - "ld1 { v24.s }[0], [x9], #0x4\n" - "tbz x3, #1, 36f\n" - "ld1 { v24.h }[2], [x9], #0x2\n" - "tbz x3, #0, 39f\n" - "ld1 { v24.b }[6], [x9]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "ldr x19, [x12, #0x70]\n" + "smlal v22.4s, v29.4h, v4.4h\n" + "smlal2 v21.4s, v29.8h, v4.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 37f\n" + "ld1 { v24.s }[0], [x19], #0x4\n" + "tbz x8, #1, 36f\n" + "ld1 { v24.h }[2], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[6], [x19]\n" "b 39f\n" "36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset - "tbz x3, #0, 39f\n" - "ld1 { v24.b }[4], [x9]\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[4], [x19]\n" "b 39f\n" "37:" // Oddments: Load (2, 1): Bit 2: Unset - "tbz x3, #1, 38f\n" - "ld1 { v24.h }[0], [x9], #0x2\n" - "tbz x3, #0, 39f\n" - "ld1 { v24.b }[2], [x9]\n" + "tbz x8, #1, 38f\n" + "ld1 { v24.h }[0], [x19], #0x2\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[2], [x19]\n" "b 39f\n" "38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 39f\n" - "ld1 { v24.b }[0], [x9]\n" + "tbz x8, #0, 39f\n" + "ld1 { v24.b }[0], [x19]\n" "39:" // Oddments: Load (2, 1): Bit 2: End - "usubl v24.8h, v24.8b, v22.8b\n" - "ldr x28, [x7, #0x78]\n" - "smlal v13.4s, v24.4h, v7.4h\n" - "add x28, x28, x4\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x22, [x12, #0x78]\n" + "smlal v15.4s, v24.4h, v7.4h\n" "smlal2 v10.4s, v24.8h, v7.8h\n" - "smlal v11.4s, v24.4h, v1.4h\n" - "smlal2 v17.4s, v24.8h, v1.8h\n" - "tbz x3, #2, 41f\n" - "ld1 { v27.s }[0], [x28], #0x4\n" - "tbz x3, #1, 40f\n" - "ld1 { v27.h }[2], [x28], #0x2\n" - "tbz x3, #0, 43f\n" - "ld1 { v27.b }[6], [x28]\n" + "smlal v22.4s, v24.4h, v1.4h\n" + "smlal2 v21.4s, v24.8h, v1.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 41f\n" + "ld1 { v27.s }[0], [x22], #0x4\n" + "tbz x8, #1, 40f\n" + "ld1 { v27.h }[2], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[6], [x22]\n" "b 43f\n" "40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset - "tbz x3, #0, 43f\n" - "ld1 { v27.b }[4], [x28]\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[4], [x22]\n" "b 43f\n" "41:" // Oddments: Load (3, 3): Bit 2: Unset - "tbz x3, #1, 42f\n" - "ld1 { v27.h }[0], [x28], #0x2\n" - "tbz x3, #0, 43f\n" - "ld1 { v27.b }[2], [x28]\n" + "tbz x8, #1, 42f\n" + "ld1 { v27.h }[0], [x22], #0x2\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[2], [x22]\n" "b 43f\n" "42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 43f\n" - "ld1 { v27.b }[0], [x28]\n" + "tbz x8, #0, 43f\n" + "ld1 { v27.b }[0], [x22]\n" "43:" // Oddments: Load (3, 3): Bit 2: End - "usubl v27.8h, v27.8b, v22.8b\n" - "ldr x27, [x7, #0x80]\n" - "smlal v18.4s, v27.4h, v4.4h\n" - "add x27, x27, x4\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "tbz x3, #2, 45f\n" - "ld1 { v28.s }[0], [x27], #0x4\n" - "tbz x3, #1, 44f\n" - "ld1 { v28.h }[2], [x27], #0x2\n" - "tbz x3, #0, 47f\n" - "ld1 { v28.b }[6], [x27]\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "ldr x21, [x12, #0x80]\n" + "smlal v23.4s, v27.4h, v4.4h\n" + "smlal2 v18.4s, v27.8h, v4.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 45f\n" + "ld1 { v28.s }[0], [x21], #0x4\n" + "tbz x8, #1, 44f\n" + "ld1 { v28.h }[2], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[6], [x21]\n" "b 47f\n" "44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset - "tbz x3, #0, 47f\n" - "ld1 { v28.b }[4], [x27]\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[4], [x21]\n" "b 47f\n" "45:" // Oddments: Load (2, 3): Bit 2: Unset - "tbz x3, #1, 46f\n" - "ld1 { v28.h }[0], [x27], #0x2\n" - "tbz x3, #0, 47f\n" - "ld1 { v28.b }[2], [x27]\n" + "tbz x8, #1, 46f\n" + "ld1 { v28.h }[0], [x21], #0x2\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[2], [x21]\n" "b 47f\n" "46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 47f\n" - "ld1 { v28.b }[0], [x27]\n" + "tbz x8, #0, 47f\n" + "ld1 { v28.b }[0], [x21]\n" "47:" // Oddments: Load (2, 3): Bit 2: End - "usubl v28.8h, v28.8b, v22.8b\n" - "ldr x26, [x7, #0x88]\n" - "smlal v19.4s, v28.4h, v7.4h\n" - "add x26, x26, x4\n" - "smlal2 v20.4s, v28.8h, v7.8h\n" - "smlal v18.4s, v28.4h, v1.4h\n" - "smlal2 v21.4s, v28.8h, v1.8h\n" - "tbz x3, #2, 49f\n" - "ld1 { v26.s }[0], [x26], #0x4\n" - "tbz x3, #1, 48f\n" - "ld1 { v26.h }[2], [x26], #0x2\n" - "tbz x3, #0, 51f\n" - "ld1 { v26.b }[6], [x26]\n" + "usubl v28.8h, v28.8b, v12.8b\n" + "ldr x20, [x12, #0x88]\n" + "smlal v9.4s, v28.4h, v7.4h\n" + "smlal2 v16.4s, v28.8h, v7.8h\n" + "smlal v23.4s, v28.4h, v1.4h\n" + "smlal2 v18.4s, v28.8h, v1.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 49f\n" + "ld1 { v26.s }[0], [x20], #0x4\n" + "tbz x8, #1, 48f\n" + "ld1 { v26.h }[2], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[6], [x20]\n" "b 51f\n" "48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset - "tbz x3, #0, 51f\n" - "ld1 { v26.b }[4], [x26]\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[4], [x20]\n" "b 51f\n" "49:" // Oddments: Load (3, 4): Bit 2: Unset - "tbz x3, #1, 50f\n" - "ld1 { v26.h }[0], [x26], #0x2\n" - "tbz x3, #0, 51f\n" - "ld1 { v26.b }[2], [x26]\n" + "tbz x8, #1, 50f\n" + "ld1 { v26.h }[0], [x20], #0x2\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[2], [x20]\n" "b 51f\n" "50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 51f\n" - "ld1 { v26.b }[0], [x26]\n" + "tbz x8, #0, 51f\n" + "ld1 { v26.b }[0], [x20]\n" "51:" // Oddments: Load (3, 4): Bit 2: End - "usubl v26.8h, v26.8b, v22.8b\n" - "ldr x25, [x7, #0x90]\n" - "smlal v18.4s, v26.4h, v5.4h\n" - "add x25, x25, x4\n" - "smlal2 v21.4s, v26.8h, v5.8h\n" - "tbz x3, #2, 53f\n" - "ld1 { v25.s }[0], [x25], #0x4\n" - "tbz x3, #1, 52f\n" - "ld1 { v25.h }[2], [x25], #0x2\n" - "tbz x3, #0, 55f\n" - "ld1 { v25.b }[6], [x25]\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "ldr x23, [x12, #0x90]\n" + "smlal v23.4s, v26.4h, v5.4h\n" + "smlal2 v18.4s, v26.8h, v5.8h\n" + "add x23, x23, x15\n" + "tbz x8, #2, 53f\n" + "ld1 { v25.s }[0], [x23], #0x4\n" + "tbz x8, #1, 52f\n" + "ld1 { v25.h }[2], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[6], [x23]\n" "b 55f\n" "52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset - "tbz x3, #0, 55f\n" - "ld1 { v25.b }[4], [x25]\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[4], [x23]\n" "b 55f\n" "53:" // Oddments: Load (4, 0): Bit 2: Unset - "tbz x3, #1, 54f\n" - "ld1 { v25.h }[0], [x25], #0x2\n" - "tbz x3, #0, 55f\n" - "ld1 { v25.b }[2], [x25]\n" + "tbz x8, #1, 54f\n" + "ld1 { v25.h }[0], [x23], #0x2\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[2], [x23]\n" "b 55f\n" "54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 55f\n" - "ld1 { v25.b }[0], [x25]\n" + "tbz x8, #0, 55f\n" + "ld1 { v25.b }[0], [x23]\n" "55:" // Oddments: Load (4, 0): Bit 2: End - "usubl v25.8h, v25.8b, v22.8b\n" - "ldr x24, [x7, #0x98]\n" - "smlal v11.4s, v25.4h, v6.4h\n" - "add x24, x24, x4\n" - "smlal2 v17.4s, v25.8h, v6.8h\n" - "tbz x3, #2, 57f\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "ldr x24, [x12, #0x98]\n" + "smlal v22.4s, v25.4h, v6.4h\n" + "smlal2 v21.4s, v25.8h, v6.8h\n" + "add x24, x24, x15\n" + "tbz x8, #2, 57f\n" "ld1 { v29.s }[0], [x24], #0x4\n" - "tbz x3, #1, 56f\n" + "tbz x8, #1, 56f\n" "ld1 { v29.h }[2], [x24], #0x2\n" - "tbz x3, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[6], [x24]\n" "b 59f\n" "56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset - "tbz x3, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[4], [x24]\n" "b 59f\n" "57:" // Oddments: Load (2, 4): Bit 2: Unset - "tbz x3, #1, 58f\n" + "tbz x8, #1, 58f\n" "ld1 { v29.h }[0], [x24], #0x2\n" - "tbz x3, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[2], [x24]\n" "b 59f\n" "58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 59f\n" + "tbz x8, #0, 59f\n" "ld1 { v29.b }[0], [x24]\n" "59:" // Oddments: Load (2, 4): Bit 2: End - "usubl v29.8h, v29.8b, v22.8b\n" - "ldr x23, [x7, #0xa0]\n" - "smlal v19.4s, v29.4h, v8.4h\n" - "add x23, x23, x4\n" - "smlal2 v20.4s, v29.8h, v8.8h\n" - "smlal v18.4s, v29.4h, v2.4h\n" - "smlal2 v21.4s, v29.8h, v2.8h\n" - "tbz x3, #2, 61f\n" - "ld1 { v27.s }[0], [x23], #0x4\n" - "tbz x3, #1, 60f\n" - "ld1 { v27.h }[2], [x23], #0x2\n" - "tbz x3, #0, 63f\n" - "ld1 { v27.b }[6], [x23]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "ldr x19, [x12, #0xa0]\n" + "smlal v9.4s, v29.4h, v8.4h\n" + "smlal2 v16.4s, v29.8h, v8.8h\n" + "smlal v23.4s, v29.4h, v2.4h\n" + "smlal2 v18.4s, v29.8h, v2.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 61f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x8, #1, 60f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[6], [x19]\n" "b 63f\n" "60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset - "tbz x3, #0, 63f\n" - "ld1 { v27.b }[4], [x23]\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[4], [x19]\n" "b 63f\n" "61:" // Oddments: Load (4, 1): Bit 2: Unset - "tbz x3, #1, 62f\n" - "ld1 { v27.h }[0], [x23], #0x2\n" - "tbz x3, #0, 63f\n" - "ld1 { v27.b }[2], [x23]\n" + "tbz x8, #1, 62f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[2], [x19]\n" "b 63f\n" "62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 63f\n" - "ld1 { v27.b }[0], [x23]\n" + "tbz x8, #0, 63f\n" + "ld1 { v27.b }[0], [x19]\n" "63:" // Oddments: Load (4, 1): Bit 2: End - "usubl v27.8h, v27.8b, v22.8b\n" - "ldr x22, [x7, #0xa8]\n" - "smlal v11.4s, v27.4h, v7.4h\n" - "add x22, x22, x4\n" - "smlal2 v17.4s, v27.8h, v7.8h\n" - "tbz x3, #2, 65f\n" + "usubl v27.8h, v27.8b, v12.8b\n" + "ldr x22, [x12, #0xa8]\n" + "smlal v22.4s, v27.4h, v7.4h\n" + "smlal2 v21.4s, v27.8h, v7.8h\n" + "add x22, x22, x15\n" + "tbz x8, #2, 65f\n" "ld1 { v24.s }[0], [x22], #0x4\n" - "tbz x3, #1, 64f\n" + "tbz x8, #1, 64f\n" "ld1 { v24.h }[2], [x22], #0x2\n" - "tbz x3, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[6], [x22]\n" "b 67f\n" "64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset - "tbz x3, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[4], [x22]\n" "b 67f\n" "65:" // Oddments: Load (3, 2): Bit 2: Unset - "tbz x3, #1, 66f\n" + "tbz x8, #1, 66f\n" "ld1 { v24.h }[0], [x22], #0x2\n" - "tbz x3, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[2], [x22]\n" "b 67f\n" "66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 67f\n" + "tbz x8, #0, 67f\n" "ld1 { v24.b }[0], [x22]\n" "67:" // Oddments: Load (3, 2): Bit 2: End - "usubl v24.8h, v24.8b, v22.8b\n" - "ldr x21, [x7, #0xb0]\n" - "smlal v11.4s, v24.4h, v5.4h\n" - "add x21, x21, x4\n" - "smlal2 v17.4s, v24.8h, v5.8h\n" - "smlal v18.4s, v24.4h, v3.4h\n" - "smlal2 v21.4s, v24.8h, v3.8h\n" - "tbz x3, #2, 69f\n" + "usubl v24.8h, v24.8b, v12.8b\n" + "ldr x21, [x12, #0xb0]\n" + "smlal v22.4s, v24.4h, v5.4h\n" + "smlal2 v21.4s, v24.8h, v5.8h\n" + "smlal v23.4s, v24.4h, v3.4h\n" + "smlal2 v18.4s, v24.8h, v3.8h\n" + "add x21, x21, x15\n" + "tbz x8, #2, 69f\n" "ld1 { v26.s }[0], [x21], #0x4\n" - "tbz x3, #1, 68f\n" + "tbz x8, #1, 68f\n" "ld1 { v26.h }[2], [x21], #0x2\n" - "tbz x3, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[6], [x21]\n" "b 71f\n" "68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset - "tbz x3, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[4], [x21]\n" "b 71f\n" "69:" // Oddments: Load (4, 3): Bit 2: Unset - "tbz x3, #1, 70f\n" + "tbz x8, #1, 70f\n" "ld1 { v26.h }[0], [x21], #0x2\n" - "tbz x3, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[2], [x21]\n" "b 71f\n" "70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 71f\n" + "tbz x8, #0, 71f\n" "ld1 { v26.b }[0], [x21]\n" "71:" // Oddments: Load (4, 3): Bit 2: End - "usubl v26.8h, v26.8b, v22.8b\n" - "ldr x20, [x7, #0xb8]\n" - "smlal v18.4s, v26.4h, v7.4h\n" - "add x20, x20, x4\n" - "smlal2 v21.4s, v26.8h, v7.8h\n" - "tbz x3, #2, 73f\n" + "usubl v26.8h, v26.8b, v12.8b\n" + "ldr x20, [x12, #0xb8]\n" + "smlal v23.4s, v26.4h, v7.4h\n" + "smlal2 v18.4s, v26.8h, v7.8h\n" + "add x20, x20, x15\n" + "tbz x8, #2, 73f\n" "ld1 { v25.s }[0], [x20], #0x4\n" - "tbz x3, #1, 72f\n" + "tbz x8, #1, 72f\n" "ld1 { v25.h }[2], [x20], #0x2\n" - "tbz x3, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[6], [x20]\n" "b 75f\n" "72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset - "tbz x3, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[4], [x20]\n" "b 75f\n" "73:" // Oddments: Load (4, 2): Bit 2: Unset - "tbz x3, #1, 74f\n" + "tbz x8, #1, 74f\n" "ld1 { v25.h }[0], [x20], #0x2\n" - "tbz x3, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[2], [x20]\n" "b 75f\n" "74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 75f\n" + "tbz x8, #0, 75f\n" "ld1 { v25.b }[0], [x20]\n" "75:" // Oddments: Load (4, 2): Bit 2: End - "usubl v25.8h, v25.8b, v22.8b\n" - "ldr x19, [x7, #0xc0]\n" - "smlal v11.4s, v25.4h, v8.4h\n" - "add x19, x19, x4\n" - "smlal2 v17.4s, v25.8h, v8.8h\n" - "smlal v18.4s, v25.4h, v6.4h\n" - "smlal2 v21.4s, v25.8h, v6.8h\n" - "tbz x3, #2, 77f\n" + "usubl v25.8h, v25.8b, v12.8b\n" + "ldr x19, [x12, #0xc0]\n" + "smlal v22.4s, v25.4h, v8.4h\n" + "smlal2 v21.4s, v25.8h, v8.8h\n" + "smlal v23.4s, v25.4h, v6.4h\n" + "smlal2 v18.4s, v25.8h, v6.8h\n" + "add x19, x19, x15\n" + "tbz x8, #2, 77f\n" "ld1 { v29.s }[0], [x19], #0x4\n" - "tbz x3, #1, 76f\n" + "tbz x8, #1, 76f\n" "ld1 { v29.h }[2], [x19], #0x2\n" - "tbz x3, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[6], [x19]\n" "b 79f\n" "76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset - "tbz x3, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[4], [x19]\n" "b 79f\n" "77:" // Oddments: Load (4, 4): Bit 2: Unset - "tbz x3, #1, 78f\n" + "tbz x8, #1, 78f\n" "ld1 { v29.h }[0], [x19], #0x2\n" - "tbz x3, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[2], [x19]\n" "b 79f\n" "78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 79f\n" + "tbz x8, #0, 79f\n" "ld1 { v29.b }[0], [x19]\n" "79:" // Oddments: Load (4, 4): Bit 2: End - "usubl v29.8h, v29.8b, v22.8b\n" - "smlal v18.4s, v29.4h, v8.4h\n" - "smlal2 v21.4s, v29.8h, v8.8h\n" - "tbz x3, #2, 81f\n" - "ld1 { v31.4s }, [x8], #0x10\n" - "ld1 { v30.4s }, [x16], #0x10\n" - "tbz x3, #1, 80f\n" - "ld1 { v23.d }[0], [x8], #0x8\n" - "ld1 { v9.d }[0], [x16], #0x8\n" - "tbz x3, #0, 83f\n" - "ld1 { v23.s }[2], [x8]\n" - "ld1 { v9.s }[2], [x16]\n" + "usubl v29.8h, v29.8b, v12.8b\n" + "smlal v23.4s, v29.4h, v8.4h\n" + "smlal2 v18.4s, v29.8h, v8.8h\n" + "tbz x8, #2, 81f\n" + "ld1 { v19.4s }, [x13], #0x10\n" + "ld1 { v0.4s }, [x11], #0x10\n" + "tbz x8, #1, 80f\n" + "ld1 { v4.d }[0], [x13], #0x8\n" + "ld1 { v31.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v4.s }[2], [x13]\n" + "ld1 { v31.s }[2], [x11]\n" "b 83f\n" "80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset - "tbz x3, #0, 83f\n" - "ld1 { v23.s }[0], [x8]\n" - "ld1 { v9.s }[0], [x16]\n" + "tbz x8, #0, 83f\n" + "ld1 { v4.s }[0], [x13]\n" + "ld1 { v31.s }[0], [x11]\n" "b 83f\n" "81:" // Oddments: Load requant params: Bit 2: Unset - "tbz x3, #1, 82f\n" - "ld1 { v31.d }[0], [x8], #0x8\n" - "ld1 { v30.d }[0], [x16], #0x8\n" - "tbz x3, #0, 83f\n" - "ld1 { v31.s }[2], [x8]\n" - "ld1 { v30.s }[2], [x16]\n" + "tbz x8, #1, 82f\n" + "ld1 { v19.d }[0], [x13], #0x8\n" + "ld1 { v0.d }[0], [x11], #0x8\n" + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[2], [x13]\n" + "ld1 { v0.s }[2], [x11]\n" "b 83f\n" "82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 83f\n" - "ld1 { v31.s }[0], [x8]\n" - "ld1 { v30.s }[0], [x16]\n" + "tbz x8, #0, 83f\n" + "ld1 { v19.s }[0], [x13]\n" + "ld1 { v0.s }[0], [x11]\n" "83:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v13.4s, v13.4s, v31.4s\n" - "add x15, x15, x6\n" - "sqrdmulh v10.4s, v10.4s, v23.4s\n" - "add x14, x14, x6\n" - "sqrdmulh v19.4s, v19.4s, v31.4s\n" - "add x13, x13, x6\n" - "sqrdmulh v20.4s, v20.4s, v23.4s\n" - "add x12, x12, x6\n" - "sqrdmulh v11.4s, v11.4s, v31.4s\n" - "and v27.16b, v13.16b, v30.16b\n" - "and v7.16b, v10.16b, v9.16b\n" - "and v6.16b, v19.16b, v30.16b\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "sshr v7.4s, v7.4s, #0x1f\n" - "sshr v6.4s, v6.4s, #0x1f\n" - "sqadd v13.4s, v13.4s, v27.4s\n" - "sqadd v10.4s, v10.4s, v7.4s\n" - "sqadd v19.4s, v19.4s, v6.4s\n" - "and v3.16b, v20.16b, v9.16b\n" - "srshl v13.4s, v13.4s, v30.4s\n" - "srshl v10.4s, v10.4s, v9.4s\n" - "srshl v19.4s, v19.4s, v30.4s\n" - "sshr v3.4s, v3.4s, #0x1f\n" - "add v13.4s, v13.4s, v14.4s\n" - "add v10.4s, v10.4s, v14.4s\n" - "add v19.4s, v19.4s, v14.4s\n" - "smin v13.4s, v13.4s, v15.4s\n" - "smin v10.4s, v10.4s, v15.4s\n" - "smin v19.4s, v19.4s, v15.4s\n" - "smax v13.4s, v13.4s, v16.4s\n" - "smax v10.4s, v10.4s, v16.4s\n" - "smax v19.4s, v19.4s, v16.4s\n" - "sqadd v20.4s, v20.4s, v3.4s\n" - "uzp1 v13.16b, v13.16b, v10.16b\n" - "and v28.16b, v11.16b, v30.16b\n" - "uzp1 v13.16b, v13.16b, v13.16b\n" - "srshl v20.4s, v20.4s, v9.4s\n" + "sqdmulh v15.4s, v15.4s, v19.4s\n" + "sqdmulh v9.4s, v9.4s, v19.4s\n" + "add x10, x10, x14\n" + "add x9, x9, x14\n" + "sqdmulh v22.4s, v22.4s, v19.4s\n" + "sqdmulh v23.4s, v23.4s, v19.4s\n" + "add x28, x28, x14\n" + "add x27, x27, x14\n" + "and v30.16b, v15.16b, v0.16b\n" + "sqdmulh v10.4s, v10.4s, v4.4s\n" + "and v28.16b, v9.16b, v0.16b\n" + "sqdmulh v16.4s, v16.4s, v4.4s\n" + "and v29.16b, v22.16b, v0.16b\n" + "sqdmulh v21.4s, v21.4s, v4.4s\n" + "and v20.16b, v23.16b, v0.16b\n" + "sqdmulh v18.4s, v18.4s, v4.4s\n" + "sshr v30.4s, v30.4s, #0x1f\n" + "and v19.16b, v10.16b, v31.16b\n" "sshr v28.4s, v28.4s, #0x1f\n" - "sqrdmulh v17.4s, v17.4s, v23.4s\n" - "sqrdmulh v18.4s, v18.4s, v31.4s\n" - "add v20.4s, v20.4s, v14.4s\n" - "sqadd v11.4s, v11.4s, v28.4s\n" - "and v26.16b, v17.16b, v9.16b\n" - "smin v20.4s, v20.4s, v15.4s\n" - "and v8.16b, v18.16b, v30.16b\n" - "srshl v11.4s, v11.4s, v30.4s\n" - "smax v20.4s, v20.4s, v16.4s\n" + "and v4.16b, v16.16b, v31.16b\n" + "sshr v29.4s, v29.4s, #0x1f\n" + "and v5.16b, v21.16b, v31.16b\n" + "sshr v20.4s, v20.4s, #0x1f\n" + "and v26.16b, v18.16b, v31.16b\n" + "sqadd v15.4s, v15.4s, v30.4s\n" + "sshr v19.4s, v19.4s, #0x1f\n" + "sqadd v9.4s, v9.4s, v28.4s\n" + "sshr v4.4s, v4.4s, #0x1f\n" + "sqadd v22.4s, v22.4s, v29.4s\n" + "sshr v5.4s, v5.4s, #0x1f\n" + "sqadd v23.4s, v23.4s, v20.4s\n" "sshr v26.4s, v26.4s, #0x1f\n" - "sshr v8.4s, v8.4s, #0x1f\n" - "uzp1 v19.16b, v19.16b, v20.16b\n" - "add v11.4s, v11.4s, v14.4s\n" - "uzp1 v19.16b, v19.16b, v19.16b\n" - "sqadd v17.4s, v17.4s, v26.4s\n" - "smin v11.4s, v11.4s, v15.4s\n" - "sqadd v18.4s, v18.4s, v8.4s\n" - "sqrdmulh v21.4s, v21.4s, v23.4s\n" - "smax v11.4s, v11.4s, v16.4s\n" - "srshl v17.4s, v17.4s, v9.4s\n" - "srshl v18.4s, v18.4s, v30.4s\n" - "and v27.16b, v21.16b, v9.16b\n" - "add v17.4s, v17.4s, v14.4s\n" - "add v18.4s, v18.4s, v14.4s\n" - "sshr v27.4s, v27.4s, #0x1f\n" - "smin v17.4s, v17.4s, v15.4s\n" - "smin v18.4s, v18.4s, v15.4s\n" - "sqadd v21.4s, v21.4s, v27.4s\n" - "smax v17.4s, v17.4s, v16.4s\n" - "smax v18.4s, v18.4s, v16.4s\n" - "srshl v21.4s, v21.4s, v9.4s\n" - "uzp1 v11.16b, v11.16b, v17.16b\n" - "uzp1 v11.16b, v11.16b, v11.16b\n" - "add v21.4s, v21.4s, v14.4s\n" - "smin v21.4s, v21.4s, v15.4s\n" - "smax v21.4s, v21.4s, v16.4s\n" - "uzp1 v18.16b, v18.16b, v21.16b\n" - "uzp1 v18.16b, v18.16b, v18.16b\n" - "tbz x3, #2, 85f\n" - "st1 { v13.s }[0], [x15], #0x4\n" - "st1 { v19.s }[0], [x14], #0x4\n" - "st1 { v11.s }[0], [x13], #0x4\n" - "st1 { v18.s }[0], [x12], #0x4\n" - "tbz x3, #1, 84f\n" - "st1 { v13.h }[2], [x15], #0x2\n" - "st1 { v19.h }[2], [x14], #0x2\n" - "st1 { v11.h }[2], [x13], #0x2\n" - "st1 { v18.h }[2], [x12], #0x2\n" - "tbz x3, #0, 87f\n" - "st1 { v13.b }[6], [x15], #0x1\n" - "st1 { v19.b }[6], [x14], #0x1\n" - "st1 { v11.b }[6], [x13], #0x1\n" - "st1 { v18.b }[6], [x12], #0x1\n" + "srshl v15.4s, v15.4s, v0.4s\n" + "sqadd v10.4s, v10.4s, v19.4s\n" + "srshl v9.4s, v9.4s, v0.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v22.4s, v22.4s, v0.4s\n" + "sqadd v21.4s, v21.4s, v5.4s\n" + "srshl v23.4s, v23.4s, v0.4s\n" + "sqadd v18.4s, v18.4s, v26.4s\n" + "srshl v10.4s, v10.4s, v31.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v16.4s, v16.4s, v31.4s\n" + "sqxtn v9.4h, v9.4s\n" + "srshl v21.4s, v21.4s, v31.4s\n" + "sqxtn v22.4h, v22.4s\n" + "srshl v18.4s, v18.4s, v31.4s\n" + "sqxtn v23.4h, v23.4s\n" + "sqxtn2 v15.8h, v10.4s\n" + "sqxtn2 v9.8h, v16.4s\n" + "sqxtn2 v22.8h, v21.4s\n" + "sqxtn2 v23.8h, v18.4s\n" + "sqadd v15.8h, v15.8h, v11.8h\n" + "sqadd v9.8h, v9.8h, v11.8h\n" + "sqadd v22.8h, v22.8h, v11.8h\n" + "sqadd v23.8h, v23.8h, v11.8h\n" + "smax v15.8h, v15.8h, v17.8h\n" + "smax v9.8h, v9.8h, v17.8h\n" + "smax v22.8h, v22.8h, v17.8h\n" + "smax v23.8h, v23.8h, v17.8h\n" + "smin v15.8h, v15.8h, v14.8h\n" + "smin v9.8h, v9.8h, v14.8h\n" + "smin v22.8h, v22.8h, v14.8h\n" + "smin v23.8h, v23.8h, v14.8h\n" + "uzp1 v15.16b, v15.16b, v15.16b\n" + "uzp1 v9.16b, v9.16b, v9.16b\n" + "uzp1 v22.16b, v22.16b, v22.16b\n" + "uzp1 v23.16b, v23.16b, v23.16b\n" + "tbz x8, #2, 85f\n" + "st1 { v15.s }[0], [x10], #0x4\n" + "st1 { v9.s }[0], [x9], #0x4\n" + "st1 { v22.s }[0], [x28], #0x4\n" + "st1 { v23.s }[0], [x27], #0x4\n" + "tbz x8, #1, 84f\n" + "st1 { v15.h }[2], [x10], #0x2\n" + "st1 { v9.h }[2], [x9], #0x2\n" + "st1 { v22.h }[2], [x28], #0x2\n" + "st1 { v23.h }[2], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[6], [x10], #0x1\n" + "st1 { v9.b }[6], [x9], #0x1\n" + "st1 { v22.b }[6], [x28], #0x1\n" + "st1 { v23.b }[6], [x27], #0x1\n" "b 87f\n" "84:" // Oddments: Bit 2: Bit 1: Unset - "tbz x3, #0, 87f\n" - "st1 { v13.b }[4], [x15], #0x1\n" - "st1 { v19.b }[4], [x14], #0x1\n" - "st1 { v11.b }[4], [x13], #0x1\n" - "st1 { v18.b }[4], [x12], #0x1\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[4], [x10], #0x1\n" + "st1 { v9.b }[4], [x9], #0x1\n" + "st1 { v22.b }[4], [x28], #0x1\n" + "st1 { v23.b }[4], [x27], #0x1\n" "b 87f\n" "85:" // Oddments: Bit 2: Unset - "tbz x3, #1, 86f\n" - "st1 { v13.h }[0], [x15], #0x2\n" - "st1 { v19.h }[0], [x14], #0x2\n" - "st1 { v11.h }[0], [x13], #0x2\n" - "st1 { v18.h }[0], [x12], #0x2\n" - "tbz x3, #0, 87f\n" - "st1 { v13.b }[2], [x15], #0x1\n" - "st1 { v19.b }[2], [x14], #0x1\n" - "st1 { v11.b }[2], [x13], #0x1\n" - "st1 { v18.b }[2], [x12], #0x1\n" + "tbz x8, #1, 86f\n" + "st1 { v15.h }[0], [x10], #0x2\n" + "st1 { v9.h }[0], [x9], #0x2\n" + "st1 { v22.h }[0], [x28], #0x2\n" + "st1 { v23.h }[0], [x27], #0x2\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[2], [x10], #0x1\n" + "st1 { v9.b }[2], [x9], #0x1\n" + "st1 { v22.b }[2], [x28], #0x1\n" + "st1 { v23.b }[2], [x27], #0x1\n" "b 87f\n" "86:" // Oddments: Bit 2: Unset: Bit 1: Unset - "tbz x3, #0, 87f\n" - "st1 { v13.b }[0], [x15], #0x1\n" - "st1 { v19.b }[0], [x14], #0x1\n" - "st1 { v11.b }[0], [x13], #0x1\n" - "st1 { v18.b }[0], [x12], #0x1\n" + "tbz x8, #0, 87f\n" + "st1 { v15.b }[0], [x10], #0x1\n" + "st1 { v9.b }[0], [x9], #0x1\n" + "st1 { v22.b }[0], [x28], #0x1\n" + "st1 { v23.b }[0], [x27], #0x1\n" "87:" // Oddments: Bit 2: End - "88:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); } diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index d3d5000d4c..ea70b56349 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,24 @@ namespace depthwise { void a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst +class a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef int8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - constexpr static parameter_packing_fn pack_parameters = interleave_a64_s8q_5x5_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_a64_s8q_5x5_mla::get_packed_size; + a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 5, 5, 1, 1) {} - kern_type kernel = a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::None; } - a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp index 97156137bf..291ffec0bb 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,7 +46,7 @@ void a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( struct Params { long unsigned int n_channels; - const int8_t *weights; + const void *weights; const int32_t *bias; const arm_gemm::Requantize32 *requant; const int32_t *const requant_muls; @@ -57,7 +57,7 @@ void a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( Params( long unsigned int n_channels, const uint8_t *const *inptrs_raw, - const int8_t *const weights, + const void *const weights, const int32_t *const bias, const arm_gemm::Requantize32 &qp, const int32_t *const requant_muls, @@ -111,2096 +111,2070 @@ void a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( requant_muls, requant_shifts, outptrs); __asm__ __volatile__( - "ldr x4, [%x[params], %[offsetof_Params_n_channels]]\n" - "mov x10, #0x0\n" - "ldr x3, [%x[params], %[offsetof_Params_weights]]\n" - "mov x1, #0x0\n" - "ldr x22, [%x[params], %[offsetof_Params_requant]]\n" - "add x25, %x[params], %[offsetof_Params_inptrs]\n" - "ldr x2, [%x[params], %[offsetof_Params_requant_muls]]\n" - "lsr x19, x4, #0x3\n" - "ldr x5, [%x[params], %[offsetof_Params_requant_shifts]]\n" - "add x13, x22, %[offsetof_Requantize32_a_offset]\n" - "ldr x21, [%x[params], %[offsetof_Params_outptrs]]\n" - "add x20, x22, %[offsetof_Requantize32_b_offset]\n" - "ld1r { v9.16b }, [x13]\n" - "add x8, x22, %[offsetof_Requantize32_c_offset]\n" - "ld1r { v14.16b }, [x20]\n" - "add x20, x22, %[offsetof_Requantize32_minval]\n" - "ld1r { v10.4s }, [x8]\n" - "add x8, x22, %[offsetof_Requantize32_maxval]\n" - "ld1r { v11.4s }, [x20]\n" - "ld1r { v13.4s }, [x8]\n" - "ldp x17, x16, [x21, #0x0]\n" - "ldp x6, x8, [x21, #0x10]\n" - "cbz x19, 3f\n" - "subs x19, x19, #0x1\n" - "ldr x12, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q15, [x12, #0x0]\n" - "mov v16.16b, v15.16b\n" - "ldr q18, [x12, #0x10]\n" - "add x12, x12, #0x20\n" - "mov v7.16b, v15.16b\n" - "str x12, [%x[params], %[offsetof_Params_bias]]\n" - "mov v8.16b, v15.16b\n" - "ldr d0, [x3, #0x0]\n" - "ldr d1, [x3, #0x8]\n" - "mov v21.16b, v18.16b\n" - "ldr d2, [x3, #0x10]\n" - "mov v17.16b, v18.16b\n" - "ldr d3, [x3, #0x18]\n" - "mov v5.16b, v18.16b\n" - "ldr d4, [x3, #0x20]\n" + "ldr x10, [%x[params], %[offsetof_Params_requant]]\n" + "ldr x0, [%x[params], %[offsetof_Params_n_channels]]\n" + "add x17, x10, %[offsetof_Requantize32_a_offset]\n" + "add x9, x10, %[offsetof_Requantize32_b_offset]\n" + "ldr x25, [%x[params], %[offsetof_Params_outptrs]]\n" + "add x4, x10, %[offsetof_Requantize32_c_offset]\n" + "add x14, x10, %[offsetof_Requantize32_minval]\n" + "ldr x23, [%x[params], %[offsetof_Params_weights]]\n" + "add x5, x10, %[offsetof_Requantize32_maxval]\n" + "ld1r { v9.16b }, [x17]\n" + "ld1r { v14.16b }, [x9]\n" + "lsr x3, x0, #0x3\n" + "ld1r { v18.8h }, [x4]\n" + "ld1r { v11.8h }, [x14]\n" + "mov x24, #0x0\n" + "mov x22, #0x0\n" + "ld1r { v13.8h }, [x5]\n" + "ldr x10, [%x[params], %[offsetof_Params_requant_muls]]\n" + "add x20, %x[params], %[offsetof_Params_inptrs]\n" + "ldr x1, [%x[params], %[offsetof_Params_requant_shifts]]\n" + "ldp x16, x8, [x25, #0x0]\n" + "ldp x4, x7, [x25, #0x10]\n" + "cbz x3, 3f\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q15, [x19, #0x0]\n" + "subs x3, x3, #0x1\n" + "mov v17.16b, v15.16b\n" + "ldr q16, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x23, #0x0]\n" + "ldr d1, [x23, #0x8]\n" + "ldr d2, [x23, #0x10]\n" + "mov v8.16b, v16.16b\n" + "mov v10.16b, v15.16b\n" + "ldr d3, [x23, #0x18]\n" + "ldr d4, [x23, #0x20]\n" + "mov v7.16b, v16.16b\n" + "mov v6.16b, v15.16b\n" + "ldp x28, x6, [x20, #0x0]\n" + "ldp x26, x25, [x20, #0x10]\n" + "mov v5.16b, v16.16b\n" "ssubl v0.8h, v0.8b, v14.8b\n" - "ldp x28, x27, [x25, #0x0]\n" + "ldp x5, x2, [x20, #0x20]\n" + "ldp x27, x21, [x20, #0x30]\n" "ssubl v1.8h, v1.8b, v14.8b\n" - "ldp x26, x13, [x25, #0x10]\n" "ssubl v2.8h, v2.8b, v14.8b\n" + "ldp x12, x19, [x20, #0x40]\n" + "ldr d31, [x28, x24]\n" "ssubl v3.8h, v3.8b, v14.8b\n" - "ldp x24, x23, [x25, #0x20]\n" "ssubl v4.8h, v4.8b, v14.8b\n" - "ldp x22, x21, [x25, #0x30]\n" - "ldp x20, x0, [x25, #0x40]\n" - "ldr d31, [x28, x10]\n" + "ldr d30, [x6, x24]\n" + "ldr d29, [x26, x24]\n" "usubl v31.8h, v31.8b, v9.8b\n" - "ldr d30, [x27, x10]\n" - "ldr d29, [x26, x10]\n" "usubl v30.8h, v30.8b, v9.8b\n" - "ldr d28, [x13, x10]\n" + "ldr d28, [x25, x24]\n" + "ldr d27, [x5, x24]\n" "usubl v29.8h, v29.8b, v9.8b\n" - "ldr d27, [x24, x10]\n" - "ldr d23, [x23, x10]\n" "usubl v28.8h, v28.8b, v9.8b\n" - "ldr d25, [x22, x10]\n" - "ldr d24, [x21, x10]\n" + "ldr d23, [x2, x24]\n" + "ldr d25, [x27, x24]\n" "usubl v27.8h, v27.8b, v9.8b\n" "usubl v23.8h, v23.8b, v9.8b\n" - "ldr d26, [x20, x10]\n" - "ldr d22, [x0, x10]\n" + "ldr d24, [x21, x24]\n" + "ldr d26, [x12, x24]\n" "usubl v25.8h, v25.8b, v9.8b\n" "usubl v24.8h, v24.8b, v9.8b\n" + "ldr d22, [x19, x24]\n" "usubl v26.8h, v26.8b, v9.8b\n" "usubl v22.8h, v22.8b, v9.8b\n" "beq 2f\n" "1:" // Loop "smlal v15.4s, v31.4h, v0.4h\n" - "ldr x20, [x25, #0x50]\n" - "subs x19, x19, #0x1\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr x28, [x25, #0x58]\n" - "smlal v16.4s, v30.4h, v0.4h\n" - "ldr x0, [x25, #0x60]\n" - "smlal2 v21.4s, v30.8h, v0.8h\n" - "ldr d31, [x20, x10]\n" - "smlal v7.4s, v29.4h, v0.4h\n" - "ldr x7, [x25, #0x68]\n" - "smlal2 v17.4s, v29.8h, v0.8h\n" - "ldr x26, [x25, #0x70]\n" - "smlal v8.4s, v28.4h, v0.4h\n" - "ldr x23, [x25, #0x78]\n" - "smlal2 v5.4s, v28.8h, v0.8h\n" - "ldr d0, [x3, #0x28]\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr x19, [x20, #0x50]\n" + "ldr d31, [x19, x24]\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v29.4h, v0.4h\n" + "ldr x15, [x20, #0x58]\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "smlal v6.4s, v28.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "ldr x19, [x20, #0x60]\n" + "ldr x27, [x20, #0x68]\n" + "smlal2 v7.4s, v29.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "ldr x20, [x25, #0x80]\n" - "smlal2 v18.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "smlal v16.4s, v27.4h, v1.4h\n" - "ldr x22, [x25, #0x88]\n" - "smlal2 v21.4s, v27.8h, v1.8h\n" - "ldr x13, [x25, #0x90]\n" - "smlal v7.4s, v28.4h, v1.4h\n" - "ldr x21, [x25, #0x98]\n" - "smlal2 v17.4s, v28.8h, v1.8h\n" - "ldr x14, [x25, #0xa0]\n" - "smlal v8.4s, v23.4h, v1.4h\n" - "ldr x11, [x25, #0xa8]\n" - "smlal2 v5.4s, v23.8h, v1.8h\n" - "ldr d1, [x3, #0x30]\n" + "ldr x5, [x20, #0x70]\n" + "ldr x11, [x20, #0x78]\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal2 v5.4s, v28.8h, v0.8h\n" + "ldr d30, [x15, x24]\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v1.4h\n" + "smlal v10.4s, v28.4h, v1.4h\n" + "ldr d0, [x23, #0x28]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v27.8h, v1.8h\n" + "ldr x12, [x20, #0x80]\n" + "ldr x26, [x20, #0x88]\n" + "smlal2 v7.4s, v28.8h, v1.8h\n" "smlal v15.4s, v27.4h, v2.4h\n" - "ldr x24, [x25, #0xb0]\n" - "smlal2 v18.4s, v27.8h, v2.8h\n" - "ldr d27, [x0, x10]\n" - "smlal v16.4s, v25.4h, v2.4h\n" - "ldr x0, [x25, #0xb8]\n" - "smlal2 v21.4s, v25.8h, v2.8h\n" - "ldr x15, [x25, #0xc0]\n" - "smlal v7.4s, v23.4h, v2.4h\n" - "ldr x9, [x25, #0xc8]\n" - "smlal2 v17.4s, v23.8h, v2.8h\n" - "ldr x27, [x25, #0xd0]\n" - "usubl v31.8h, v31.8b, v9.8b\n" - "ldr x28, [x25, #0xd8]\n" + "ldr x14, [x20, #0x90]\n" + "ldr x15, [x20, #0x98]\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal2 v5.4s, v23.8h, v1.8h\n" + "ldr d27, [x19, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v23.4h, v2.4h\n" + "ldr d1, [x23, #0x30]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "ldr x21, [x20, #0xa0]\n" + "ldr x2, [x20, #0xa8]\n" + "smlal2 v7.4s, v23.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "ldr q6, [x2, #0x0]\n" - "smlal2 v18.4s, v25.8h, v3.8h\n" - "ldr d25, [x7, x10]\n" - "smlal v8.4s, v31.4h, v2.4h\n" - "ldr x12, [x25, #0xe0]\n" + "ldr x13, [x20, #0xb0]\n" + "ldr x9, [x20, #0xb8]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" "smlal2 v5.4s, v31.8h, v2.8h\n" - "ldr d2, [x3, #0x38]\n" - "smlal v16.4s, v24.4h, v3.4h\n" - "ldr q19, [x5, #0x0]\n" - "smlal2 v21.4s, v24.8h, v3.8h\n" - "ldr q20, [x2, #0x10]\n" - "add x2, x2, #0x20\n" - "smlal v7.4s, v31.4h, v3.4h\n" - "ldr q12, [x5, #0x10]\n" - "add x5, x5, #0x20\n" - "smlal2 v17.4s, v31.8h, v3.8h\n" - "usubl v30.8h, v30.8b, v9.8b\n" + "ldr d25, [x27, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v31.4h, v3.4h\n" + "ldr d2, [x23, #0x38]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "ldr x19, [x20, #0xc0]\n" + "ldr x28, [x20, #0xc8]\n" + "smlal2 v7.4s, v31.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v18.4s, v24.8h, v4.8h\n" - "ldr d24, [x26, x10]\n" - "smlal v8.4s, v30.4h, v3.4h\n" - "ldr x7, [x25, #0xe8]\n" + "ldr x6, [x20, #0xd0]\n" + "ldr x27, [x20, #0xd8]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" "smlal2 v5.4s, v30.8h, v3.8h\n" - "ldr d3, [x3, #0x40]\n" + "ldr d24, [x5, x24]\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v4.4h\n" + "smlal v10.4s, v30.4h, v4.4h\n" + "ldr d3, [x23, #0x40]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v27.8h, v4.8h\n" + "ldr d27, [x11, x24]\n" "usubl v27.8h, v27.8b, v9.8b\n" - "smlal v7.4s, v30.4h, v4.4h\n" - "smlal2 v17.4s, v30.8h, v4.8h\n" - "smlal v16.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x10]\n" - "smlal v8.4s, v26.4h, v4.4h\n" - "ldr x26, [x25, #0xf0]\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0x48]\n" - "ssubl v0.8h, v0.8b, v14.8b\n" - "usubl v25.8h, v25.8b, v9.8b\n" - "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal2 v7.4s, v30.8h, v4.8h\n" "smlal v15.4s, v29.4h, v0.4h\n" - "smlal2 v18.4s, v29.8h, v0.8h\n" - "smlal v16.4s, v28.4h, v0.4h\n" - "smlal2 v21.4s, v28.8h, v0.8h\n" - "smlal v7.4s, v22.4h, v0.4h\n" - "smlal2 v17.4s, v22.8h, v0.8h\n" - "smlal v8.4s, v25.4h, v0.4h\n" - "smlal2 v5.4s, v25.8h, v0.8h\n" - "ldr d0, [x3, #0x50]\n" + "ldr x11, [x20, #0xe0]\n" + "ldr x17, [x20, #0xe8]\n" + "smlal2 v16.4s, v29.8h, v0.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0x48]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v28.4h, v0.4h\n" + "smlal v10.4s, v22.4h, v0.4h\n" + "ldr x5, [x20, #0xf0]\n" + "ldr q12, [x10, #0x0]\n" + "smlal v6.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v28.8h, v0.8h\n" + "ldr q19, [x1, #0x0]\n" + "ldr q20, [x10, #0x10]\n" + "smlal2 v7.4s, v22.8h, v0.8h\n" "smlal v15.4s, v28.4h, v1.4h\n" - "smlal2 v18.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x10]\n" - "smlal v16.4s, v23.4h, v1.4h\n" - "ldr x23, [x25, #0xf8]\n" - "smlal2 v21.4s, v23.8h, v1.8h\n" - "smlal v7.4s, v25.4h, v1.4h\n" - "smlal2 v17.4s, v25.8h, v1.8h\n" - "usubl v24.8h, v24.8b, v9.8b\n" - "ssubl v2.8h, v2.8b, v14.8b\n" - "usubl v27.8h, v27.8b, v9.8b\n" - "smlal v8.4s, v24.4h, v1.4h\n" - "smlal2 v5.4s, v24.8h, v1.8h\n" - "ldr d1, [x3, #0x58]\n" + "ldr q29, [x1, #0x10]\n" + "subs x3, x3, #0x1\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal2 v5.4s, v25.8h, v0.8h\n" + "ldr d28, [x26, x24]\n" + "ldr d0, [x23, #0x50]\n" + "smlal v17.4s, v23.4h, v1.4h\n" + "smlal v10.4s, v25.4h, v1.4h\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "ldr x25, [x20, #0xf8]\n" + "smlal v6.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "add x10, x10, #0x20\n" + "smlal2 v7.4s, v25.8h, v1.8h\n" "smlal v15.4s, v23.4h, v2.4h\n" - "smlal2 v18.4s, v23.8h, v2.8h\n" - "ldr d23, [x20, x10]\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "ldr x22, [x25, #0x100]\n" - "smlal2 v21.4s, v31.8h, v2.8h\n" - "smlal v7.4s, v24.4h, v2.4h\n" - "smlal2 v17.4s, v24.8h, v2.8h\n" - "smlal v8.4s, v27.4h, v2.4h\n" - "smlal2 v5.4s, v27.8h, v2.8h\n" - "ldr d2, [x3, #0x60]\n" - "ssubl v3.8h, v3.8b, v14.8b\n" + "add x1, x1, #0x20\n" + "smlal2 v16.4s, v23.8h, v2.8h\n" + "ldr d23, [x12, x24]\n" + "smlal2 v5.4s, v24.8h, v1.8h\n" "usubl v23.8h, v23.8b, v9.8b\n" - "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v31.4h, v2.4h\n" + "smlal v10.4s, v24.4h, v2.4h\n" + "ldr d1, [x23, #0x58]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "ldr x26, [x20, #0x100]\n" + "smlal2 v7.4s, v24.8h, v2.8h\n" "smlal v15.4s, v31.4h, v3.4h\n" - "smlal2 v18.4s, v31.8h, v3.8h\n" - "ldr d31, [x13, x10]\n" - "smlal v16.4s, v30.4h, v3.4h\n" - "ldr x20, [x25, #0x108]\n" - "smlal2 v21.4s, v30.8h, v3.8h\n" - "smlal v7.4s, v27.4h, v3.4h\n" - "smlal2 v17.4s, v27.8h, v3.8h\n" - "smlal v8.4s, v23.4h, v3.4h\n" - "smlal2 v5.4s, v23.8h, v3.8h\n" - "ldr d3, [x3, #0x68]\n" - "smlal v15.4s, v30.4h, v4.4h\n" - "smlal2 v18.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x10]\n" - "smlal v16.4s, v26.4h, v4.4h\n" - "ldr x13, [x25, #0x110]\n" - "smlal2 v21.4s, v26.8h, v4.8h\n" - "ldr d26, [x14, x10]\n" - "smlal v7.4s, v23.4h, v4.4h\n" - "ldr x21, [x25, #0x118]\n" - "smlal2 v17.4s, v23.8h, v4.8h\n" - "usubl v28.8h, v28.8b, v9.8b\n" - "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal2 v16.4s, v31.8h, v3.8h\n" + "smlal2 v5.4s, v27.8h, v2.8h\n" + "ldr d31, [x14, x24]\n" "usubl v31.8h, v31.8b, v9.8b\n" - "smlal v8.4s, v28.4h, v4.4h\n" - "smlal2 v5.4s, v28.8h, v4.8h\n" - "ldr d4, [x3, #0x70]\n" - "smlal v15.4s, v22.4h, v0.4h\n" - "smlal2 v18.4s, v22.8h, v0.8h\n" - "ldr d22, [x0, x10]\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v21.4s, v25.8h, v0.8h\n" - "smlal v7.4s, v31.4h, v0.4h\n" - "smlal2 v17.4s, v31.8h, v0.8h\n" + "smlal v17.4s, v30.4h, v3.4h\n" + "smlal v10.4s, v27.4h, v3.4h\n" + "ldr d2, [x23, #0x60]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "ldr x12, [x20, #0x108]\n" + "smlal2 v7.4s, v27.8h, v3.8h\n" + "smlal v15.4s, v30.4h, v4.4h\n" + "smlal2 v16.4s, v30.8h, v4.8h\n" + "ldr d30, [x15, x24]\n" + "smlal2 v5.4s, v23.8h, v3.8h\n" "usubl v30.8h, v30.8b, v9.8b\n" - "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal v10.4s, v23.4h, v4.4h\n" + "ldr d3, [x23, #0x68]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "ldr d26, [x21, x24]\n" "usubl v26.8h, v26.8b, v9.8b\n" - "smlal v8.4s, v30.4h, v0.4h\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "ldr d0, [x3, #0x78]\n" + "smlal2 v7.4s, v23.8h, v4.8h\n" + "smlal v15.4s, v22.4h, v0.4h\n" + "ldr x14, [x20, #0x110]\n" + "ldr x21, [x20, #0x118]\n" + "smlal2 v16.4s, v22.8h, v0.8h\n" + "smlal2 v5.4s, v28.8h, v4.8h\n" + "ldr d4, [x23, #0x70]\n" + "ldr d22, [x9, x24]\n" + "smlal v17.4s, v25.4h, v0.4h\n" + "smlal v10.4s, v31.4h, v0.4h\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "usubl v22.8h, v22.8b, v9.8b\n" + "smlal2 v7.4s, v31.8h, v0.8h\n" "smlal v15.4s, v25.4h, v1.4h\n" - "smlal2 v18.4s, v25.8h, v1.8h\n" - "ldr d25, [x11, x10]\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v21.4s, v24.8h, v1.8h\n" - "smlal v7.4s, v30.4h, v1.4h\n" - "smlal2 v17.4s, v30.8h, v1.8h\n" - "smlal v8.4s, v26.4h, v1.4h\n" - "smlal2 v5.4s, v26.8h, v1.8h\n" - "ldr d1, [x3, #0x80]\n" - "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal2 v16.4s, v25.8h, v1.8h\n" + "ldr d25, [x2, x24]\n" + "smlal2 v5.4s, v30.8h, v0.8h\n" "usubl v25.8h, v25.8b, v9.8b\n" - "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v17.4s, v24.4h, v1.4h\n" + "smlal v10.4s, v30.4h, v1.4h\n" + "ldr d0, [x23, #0x78]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "smlal2 v7.4s, v30.8h, v1.8h\n" "smlal v15.4s, v24.4h, v2.4h\n" - "smlal2 v18.4s, v24.8h, v2.8h\n" - "ldr d24, [x24, x10]\n" - "smlal v16.4s, v27.4h, v2.4h\n" - "smlal2 v21.4s, v27.8h, v2.8h\n" - "smlal v7.4s, v26.4h, v2.4h\n" - "smlal2 v17.4s, v26.8h, v2.8h\n" - "smlal v8.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "ldr d2, [x3, #0x88]\n" - "smlal v15.4s, v27.4h, v3.4h\n" - "smlal2 v18.4s, v27.8h, v3.8h\n" - "ldr d27, [x15, x10]\n" - "smlal v16.4s, v23.4h, v3.4h\n" - "smlal2 v21.4s, v23.8h, v3.8h\n" - "smlal v7.4s, v25.4h, v3.4h\n" - "smlal2 v17.4s, v25.8h, v3.8h\n" + "smlal2 v16.4s, v24.8h, v2.8h\n" + "ldr d24, [x13, x24]\n" + "smlal2 v5.4s, v26.8h, v1.8h\n" "usubl v24.8h, v24.8b, v9.8b\n" - "ssubl v4.8h, v4.8b, v14.8b\n" - "usubl v22.8h, v22.8b, v9.8b\n" - "smlal v8.4s, v24.4h, v3.4h\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "ldr d3, [x3, #0x90]\n" - "smlal v15.4s, v23.4h, v4.4h\n" - "smlal2 v18.4s, v23.8h, v4.8h\n" - "ldr d23, [x9, x10]\n" - "smlal v16.4s, v28.4h, v4.4h\n" - "smlal2 v21.4s, v28.8h, v4.8h\n" - "ldr d28, [x12, x10]\n" - "smlal v7.4s, v24.4h, v4.4h\n" - "smlal2 v17.4s, v24.8h, v4.8h\n" - "smlal v8.4s, v22.4h, v4.4h\n" - "smlal2 v5.4s, v22.8h, v4.8h\n" - "ldr d4, [x3, #0x98]\n" - "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v17.4s, v27.4h, v2.4h\n" + "smlal v10.4s, v26.4h, v2.4h\n" + "ldr d1, [x23, #0x80]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "smlal2 v7.4s, v26.8h, v2.8h\n" + "smlal v15.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "ldr d27, [x19, x24]\n" "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v23.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "ldr d2, [x23, #0x88]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v23.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "smlal v15.4s, v23.4h, v4.4h\n" + "smlal2 v16.4s, v23.8h, v4.8h\n" + "ldr d23, [x28, x24]\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" "usubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v28.4h, v4.4h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "ldr d3, [x23, #0x90]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v22.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "ldr d28, [x11, x24]\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr d31, [x27, x10]\n" - "smlal v16.4s, v30.4h, v0.4h\n" - "smlal2 v21.4s, v30.8h, v0.8h\n" - "smlal v7.4s, v27.4h, v0.4h\n" - "smlal2 v17.4s, v27.8h, v0.8h\n" - "smlal v8.4s, v23.4h, v0.4h\n" - "smlal2 v5.4s, v23.8h, v0.8h\n" - "ldr d0, [x3, #0xa0]\n" - "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr d31, [x6, x24]\n" + "smlal2 v5.4s, v22.8h, v4.8h\n" "usubl v31.8h, v31.8b, v9.8b\n" - "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v27.4h, v0.4h\n" + "ldr d4, [x23, #0x98]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "smlal2 v7.4s, v27.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v18.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "smlal v16.4s, v26.4h, v1.4h\n" - "smlal2 v21.4s, v26.8h, v1.8h\n" - "smlal v7.4s, v23.4h, v1.4h\n" - "smlal2 v17.4s, v23.8h, v1.8h\n" - "smlal v8.4s, v31.4h, v1.4h\n" - "smlal2 v5.4s, v31.8h, v1.8h\n" - "ldr d1, [x3, #0xa8]\n" - "smlal v15.4s, v26.4h, v2.4h\n" - "smlal2 v18.4s, v26.8h, v2.8h\n" - "ldr d26, [x7, x10]\n" - "smlal v16.4s, v25.4h, v2.4h\n" - "smlal2 v21.4s, v25.8h, v2.8h\n" - "smlal v7.4s, v31.4h, v2.4h\n" - "smlal2 v17.4s, v31.8h, v2.8h\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "ldr d30, [x27, x24]\n" + "smlal2 v5.4s, v23.8h, v0.8h\n" "usubl v30.8h, v30.8b, v9.8b\n" - "ssubl v3.8h, v3.8b, v14.8b\n" - "usubl v28.8h, v28.8b, v9.8b\n" - "smlal v8.4s, v30.4h, v2.4h\n" - "smlal2 v5.4s, v30.8h, v2.8h\n" - "ldr d2, [x3, #0xb0]\n" - "smlal v15.4s, v25.4h, v3.4h\n" - "smlal2 v18.4s, v25.8h, v3.8h\n" - "ldr d25, [x26, x10]\n" - "smlal v16.4s, v24.4h, v3.4h\n" - "smlal2 v21.4s, v24.8h, v3.8h\n" - "smlal v7.4s, v30.4h, v3.4h\n" - "smlal2 v17.4s, v30.8h, v3.8h\n" - "smlal v8.4s, v28.4h, v3.4h\n" - "smlal2 v5.4s, v28.8h, v3.8h\n" - "ldr d3, [x3, #0xb8]\n" - "ssubl v4.8h, v4.8b, v14.8b\n" - "usubl v26.8h, v26.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v1.4h\n" + "smlal v10.4s, v23.4h, v1.4h\n" + "ldr d0, [x23, #0xa0]\n" "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v1.4h\n" + "smlal2 v8.4s, v26.8h, v1.8h\n" + "smlal2 v7.4s, v23.8h, v1.8h\n" + "smlal v15.4s, v26.4h, v2.4h\n" + "smlal2 v16.4s, v26.8h, v2.8h\n" + "smlal2 v5.4s, v31.8h, v1.8h\n" + "ldr d26, [x17, x24]\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v31.4h, v2.4h\n" + "ldr d1, [x23, #0xa8]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal2 v7.4s, v31.8h, v2.8h\n" + "smlal v15.4s, v25.4h, v3.4h\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v30.8h, v2.8h\n" + "ldr d25, [x5, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v30.4h, v3.4h\n" + "ldr d2, [x23, #0xb0]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal2 v7.4s, v30.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v18.4s, v24.8h, v4.8h\n" - "ldr d24, [x23, x10]\n" - "smlal v16.4s, v22.4h, v4.4h\n" - "smlal2 v21.4s, v22.8h, v4.8h\n" - "smlal v7.4s, v28.4h, v4.4h\n" - "smlal2 v17.4s, v28.8h, v4.8h\n" - "smlal v8.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0xc0]\n" - "add x3, x3, #0xc8\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "ldr d24, [x25, x24]\n" + "smlal2 v5.4s, v28.8h, v3.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v22.4h, v4.4h\n" + "smlal v10.4s, v28.4h, v4.4h\n" + "ldr d3, [x23, #0xb8]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v7.4s, v28.8h, v4.8h\n" "smlal v15.4s, v27.4h, v0.4h\n" - "smlal2 v18.4s, v27.8h, v0.8h\n" - "ldr d27, [x22, x10]\n" - "smlal v16.4s, v23.4h, v0.4h\n" - "smlal2 v21.4s, v23.8h, v0.8h\n" + "smlal2 v16.4s, v27.8h, v0.8h\n" + "ldr d27, [x26, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v22.8h, v4.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0xc0]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v23.4h, v0.4h\n" + "smlal v10.4s, v25.4h, v0.4h\n" + "add x23, x23, #0xc8\n" + "smlal v6.4s, v24.4h, v0.4h\n" + "smlal2 v7.4s, v25.8h, v0.8h\n" + "ldr d25, [x12, x24]\n" "usubl v25.8h, v25.8b, v9.8b\n" - "usubl v24.8h, v24.8b, v9.8b\n" - "ssubl v1.8h, v1.8b, v14.8b\n" - "smlal v7.4s, v25.4h, v0.4h\n" - "smlal2 v17.4s, v25.8h, v0.8h\n" - "ldr d25, [x20, x10]\n" - "smlal v8.4s, v24.4h, v0.4h\n" + "smlal2 v8.4s, v23.8h, v0.8h\n" "smlal2 v5.4s, v24.8h, v0.8h\n" "smlal v15.4s, v23.4h, v1.4h\n" - "smlal2 v18.4s, v23.8h, v1.8h\n" - "smlal v16.4s, v31.4h, v1.4h\n" - "smlal2 v21.4s, v31.8h, v1.8h\n" - "smlal v7.4s, v24.4h, v1.4h\n" - "smlal2 v17.4s, v24.8h, v1.8h\n" - "ldr d24, [x13, x10]\n" - "usubl v27.8h, v27.8b, v9.8b\n" - "ssubl v2.8h, v2.8b, v14.8b\n" - "usubl v25.8h, v25.8b, v9.8b\n" - "smlal v8.4s, v27.4h, v1.4h\n" + "smlal v17.4s, v31.4h, v1.4h\n" + "smlal v10.4s, v24.4h, v1.4h\n" + "smlal v6.4s, v27.4h, v1.4h\n" + "smlal2 v7.4s, v24.8h, v1.8h\n" + "ldr d24, [x14, x24]\n" + "smlal2 v16.4s, v23.8h, v1.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal2 v8.4s, v31.8h, v1.8h\n" "smlal2 v5.4s, v27.8h, v1.8h\n" "smlal v15.4s, v31.4h, v2.4h\n" - "smlal2 v18.4s, v31.8h, v2.8h\n" - "smlal v16.4s, v30.4h, v2.4h\n" - "smlal2 v21.4s, v30.8h, v2.8h\n" - "smlal v7.4s, v27.4h, v2.4h\n" - "smlal2 v17.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x10]\n" - "add x10, x10, #0x8\n" - "smlal v8.4s, v25.4h, v2.4h\n" + "smlal v17.4s, v30.4h, v2.4h\n" + "smlal v10.4s, v27.4h, v2.4h\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v7.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x24]\n" + "smlal2 v16.4s, v31.8h, v2.8h\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v30.8h, v2.8h\n" "smlal2 v5.4s, v25.8h, v2.8h\n" - "ssubl v3.8h, v3.8b, v14.8b\n" - "usubl v24.8h, v24.8b, v9.8b\n" - "ssubl v4.8h, v4.8b, v14.8b\n" + "add x24, x24, #0x8\n" "smlal v15.4s, v30.4h, v3.4h\n" - "smlal2 v18.4s, v30.8h, v3.8h\n" - "smlal v16.4s, v28.4h, v3.4h\n" - "smlal2 v21.4s, v28.8h, v3.8h\n" - "smlal v7.4s, v25.4h, v3.4h\n" - "smlal2 v17.4s, v25.8h, v3.8h\n" - "smlal v8.4s, v24.4h, v3.4h\n" + "smlal v17.4s, v28.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v16.4s, v30.8h, v3.8h\n" + "smlal2 v8.4s, v28.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" "smlal2 v5.4s, v24.8h, v3.8h\n" "smlal v15.4s, v28.4h, v4.4h\n" - "smlal2 v18.4s, v28.8h, v4.8h\n" - "smlal v16.4s, v26.4h, v4.4h\n" - "smlal2 v21.4s, v26.8h, v4.8h\n" - "smlal v7.4s, v24.4h, v4.4h\n" - "smlal2 v17.4s, v24.8h, v4.8h\n" - "usubl v27.8h, v27.8b, v9.8b\n" - "sqrdmulh v15.4s, v15.4s, v6.4s\n" - "sqrdmulh v18.4s, v18.4s, v20.4s\n" - "smlal v8.4s, v27.4h, v4.4h\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "sqdmulh v15.4s, v15.4s, v12.4s\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal v6.4s, v27.4h, v4.4h\n" + "sqdmulh v17.4s, v17.4s, v12.4s\n" + "smlal2 v16.4s, v28.8h, v4.8h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "sqdmulh v10.4s, v10.4s, v12.4s\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" "smlal2 v5.4s, v27.8h, v4.8h\n" - "and v28.16b, v15.16b, v19.16b\n" - "and v26.16b, v18.16b, v12.16b\n" - "sqrdmulh v16.4s, v16.4s, v6.4s\n" - "sshr v28.4s, v28.4s, #0x1f\n" + "sqdmulh v6.4s, v6.4s, v12.4s\n" + "and v23.16b, v15.16b, v19.16b\n" + "sqdmulh v16.4s, v16.4s, v20.4s\n" + "and v22.16b, v17.16b, v19.16b\n" + "sqdmulh v8.4s, v8.4s, v20.4s\n" + "and v21.16b, v10.16b, v19.16b\n" + "sqdmulh v7.4s, v7.4s, v20.4s\n" + "and v26.16b, v6.16b, v19.16b\n" + "sqdmulh v5.4s, v5.4s, v20.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v4.16b, v16.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v2.16b, v8.16b, v29.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v7.16b, v29.16b\n" "sshr v26.4s, v26.4s, #0x1f\n" - "sqrdmulh v21.4s, v21.4s, v20.4s\n" - "sqadd v15.4s, v15.4s, v28.4s\n" - "sqadd v18.4s, v18.4s, v26.4s\n" - "and v29.16b, v16.16b, v19.16b\n" - "and v4.16b, v21.16b, v12.16b\n" - "srshl v15.4s, v15.4s, v19.4s\n" - "srshl v18.4s, v18.4s, v12.4s\n" - "sshr v29.4s, v29.4s, #0x1f\n" + "and v25.16b, v5.16b, v29.16b\n" + "sqadd v15.4s, v15.4s, v23.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "add v15.4s, v15.4s, v10.4s\n" - "add v18.4s, v18.4s, v10.4s\n" - "sqadd v16.4s, v16.4s, v29.4s\n" - "smin v15.4s, v15.4s, v13.4s\n" - "smin v18.4s, v18.4s, v13.4s\n" - "sqadd v21.4s, v21.4s, v4.4s\n" - "smax v15.4s, v15.4s, v11.4s\n" - "smax v18.4s, v18.4s, v11.4s\n" - "srshl v16.4s, v16.4s, v19.4s\n" - "srshl v21.4s, v21.4s, v12.4s\n" - "uzp1 v15.16b, v15.16b, v18.16b\n" - "sqrdmulh v7.4s, v7.4s, v6.4s\n" - "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x17, x1]\n" - "add v16.4s, v16.4s, v10.4s\n" - "add v21.4s, v21.4s, v10.4s\n" - "and v25.16b, v7.16b, v19.16b\n" - "sqrdmulh v17.4s, v17.4s, v20.4s\n" - "smin v16.4s, v16.4s, v13.4s\n" - "smin v21.4s, v21.4s, v13.4s\n" + "sqadd v17.4s, v17.4s, v22.4s\n" + "sshr v2.4s, v2.4s, #0x1f\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v6.4s, v6.4s, v26.4s\n" "sshr v25.4s, v25.4s, #0x1f\n" - "smax v16.4s, v16.4s, v11.4s\n" - "smax v21.4s, v21.4s, v11.4s\n" - "sqadd v7.4s, v7.4s, v25.4s\n" - "and v31.16b, v17.16b, v12.16b\n" - "uzp1 v16.16b, v16.16b, v21.16b\n" - "sqrdmulh v8.4s, v8.4s, v6.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x16, x1]\n" - "srshl v7.4s, v7.4s, v19.4s\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "and v24.16b, v8.16b, v19.16b\n" - "sqrdmulh v5.4s, v5.4s, v20.4s\n" - "sqadd v17.4s, v17.4s, v31.4s\n" - "add v7.4s, v7.4s, v10.4s\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "and v1.16b, v5.16b, v12.16b\n" - "smin v7.4s, v7.4s, v13.4s\n" - "srshl v17.4s, v17.4s, v12.4s\n" - "sqadd v8.4s, v8.4s, v24.4s\n" - "smax v7.4s, v7.4s, v11.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "add v17.4s, v17.4s, v10.4s\n" - "srshl v8.4s, v8.4s, v19.4s\n" - "sqadd v5.4s, v5.4s, v1.4s\n" - "smin v17.4s, v17.4s, v13.4s\n" - "add v8.4s, v8.4s, v10.4s\n" - "smax v17.4s, v17.4s, v11.4s\n" - "srshl v5.4s, v5.4s, v12.4s\n" - "smin v8.4s, v8.4s, v13.4s\n" - "uzp1 v7.16b, v7.16b, v17.16b\n" - "add v5.4s, v5.4s, v10.4s\n" - "uzp1 v7.16b, v7.16b, v7.16b\n" - "str d7, [x6, x1]\n" - "smax v8.4s, v8.4s, v11.4s\n" - "smin v5.4s, v5.4s, v13.4s\n" - "smax v5.4s, v5.4s, v11.4s\n" - "uzp1 v8.16b, v8.16b, v5.16b\n" - "uzp1 v8.16b, v8.16b, v8.16b\n" - "str d8, [x8, x1]\n" - "add x1, x1, #0x8\n" - "ldr x12, [%x[params], %[offsetof_Params_bias]]\n" - "ldr q15, [x12, #0x0]\n" - "mov v16.16b, v15.16b\n" - "ldr q18, [x12, #0x10]\n" - "add x12, x12, #0x20\n" - "mov v7.16b, v15.16b\n" - "str x12, [%x[params], %[offsetof_Params_bias]]\n" - "mov v8.16b, v15.16b\n" - "ldr d0, [x3, #0x0]\n" - "ldr d1, [x3, #0x8]\n" - "mov v21.16b, v18.16b\n" - "ldr d2, [x3, #0x10]\n" - "mov v17.16b, v18.16b\n" - "ldr d3, [x3, #0x18]\n" - "mov v5.16b, v18.16b\n" - "ldr d4, [x3, #0x20]\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v17.4s, v17.4s, v19.4s\n" + "sqadd v8.4s, v8.4s, v2.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "sqadd v7.4s, v7.4s, v3.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "sqadd v5.4s, v5.4s, v25.4s\n" + "srshl v16.4s, v16.4s, v29.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v17.4h, v17.4s\n" + "srshl v7.4s, v7.4s, v29.4s\n" + "sqxtn v10.4h, v10.4s\n" + "srshl v5.4s, v5.4s, v29.4s\n" + "sqxtn v6.4h, v6.4s\n" + "sqxtn2 v15.8h, v16.4s\n" + "sqxtn2 v17.8h, v8.4s\n" + "sqxtn2 v10.8h, v7.4s\n" + "sqxtn2 v6.8h, v5.4s\n" + "sqadd v15.8h, v15.8h, v18.8h\n" + "sqadd v17.8h, v17.8h, v18.8h\n" + "sqadd v10.8h, v10.8h, v18.8h\n" + "sqadd v6.8h, v6.8h, v18.8h\n" + "smax v15.8h, v15.8h, v11.8h\n" + "smax v17.8h, v17.8h, v11.8h\n" + "smax v10.8h, v10.8h, v11.8h\n" + "smax v6.8h, v6.8h, v11.8h\n" + "smin v15.8h, v15.8h, v13.8h\n" + "smin v17.8h, v17.8h, v13.8h\n" + "smin v10.8h, v10.8h, v13.8h\n" + "smin v6.8h, v6.8h, v13.8h\n" + "uzp1 v15.16b, v15.16b, v15.16b\n" + "uzp1 v17.16b, v17.16b, v17.16b\n" + "str d15, [x16, x22]\n" + "uzp1 v10.16b, v10.16b, v10.16b\n" + "uzp1 v6.16b, v6.16b, v6.16b\n" + "str d17, [x8, x22]\n" + "str d10, [x4, x22]\n" + "str d6, [x7, x22]\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr q15, [x19, #0x0]\n" + "add x22, x22, #0x8\n" + "ldr q16, [x19, #0x10]\n" + "add x19, x19, #0x20\n" + "str x19, [%x[params], %[offsetof_Params_bias]]\n" + "ldr d0, [x23, #0x0]\n" + "ldr d1, [x23, #0x8]\n" + "ldr d2, [x23, #0x10]\n" + "mov v17.16b, v15.16b\n" + "mov v8.16b, v16.16b\n" + "ldr d3, [x23, #0x18]\n" + "ldr d4, [x23, #0x20]\n" + "mov v10.16b, v15.16b\n" + "mov v7.16b, v16.16b\n" + "ldp x28, x6, [x20, #0x0]\n" + "ldp x26, x25, [x20, #0x10]\n" + "mov v6.16b, v15.16b\n" + "mov v5.16b, v16.16b\n" + "ldp x5, x2, [x20, #0x20]\n" + "ldp x27, x21, [x20, #0x30]\n" "ssubl v0.8h, v0.8b, v14.8b\n" - "ldp x28, x27, [x25, #0x0]\n" "ssubl v1.8h, v1.8b, v14.8b\n" - "ldp x26, x13, [x25, #0x10]\n" + "ldp x12, x19, [x20, #0x40]\n" + "ldr d31, [x28, x24]\n" "ssubl v2.8h, v2.8b, v14.8b\n" "ssubl v3.8h, v3.8b, v14.8b\n" - "ldp x24, x23, [x25, #0x20]\n" + "ldr d30, [x6, x24]\n" + "ldr d29, [x26, x24]\n" "ssubl v4.8h, v4.8b, v14.8b\n" - "ldp x22, x21, [x25, #0x30]\n" - "ldp x20, x0, [x25, #0x40]\n" - "ldr d31, [x28, x10]\n" "usubl v31.8h, v31.8b, v9.8b\n" - "ldr d30, [x27, x10]\n" - "ldr d29, [x26, x10]\n" + "ldr d28, [x25, x24]\n" + "ldr d27, [x5, x24]\n" "usubl v30.8h, v30.8b, v9.8b\n" - "ldr d28, [x13, x10]\n" "usubl v29.8h, v29.8b, v9.8b\n" - "ldr d27, [x24, x10]\n" - "ldr d23, [x23, x10]\n" + "ldr d23, [x2, x24]\n" + "ldr d25, [x27, x24]\n" "usubl v28.8h, v28.8b, v9.8b\n" - "ldr d25, [x22, x10]\n" - "ldr d24, [x21, x10]\n" "usubl v27.8h, v27.8b, v9.8b\n" + "ldr d24, [x21, x24]\n" + "ldr d26, [x12, x24]\n" "usubl v23.8h, v23.8b, v9.8b\n" - "ldr d26, [x20, x10]\n" - "ldr d22, [x0, x10]\n" "usubl v25.8h, v25.8b, v9.8b\n" + "ldr d22, [x19, x24]\n" "usubl v24.8h, v24.8b, v9.8b\n" "usubl v26.8h, v26.8b, v9.8b\n" "usubl v22.8h, v22.8b, v9.8b\n" "bgt 1b\n" "2:" // Tail "smlal v15.4s, v31.4h, v0.4h\n" - "ldr x20, [x25, #0x50]\n" - "tst x4, #0x7\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr x28, [x25, #0x58]\n" - "smlal v16.4s, v30.4h, v0.4h\n" - "ldr x0, [x25, #0x60]\n" - "smlal2 v21.4s, v30.8h, v0.8h\n" - "ldr d31, [x20, x10]\n" - "smlal v7.4s, v29.4h, v0.4h\n" - "ldr x7, [x25, #0x68]\n" - "smlal2 v17.4s, v29.8h, v0.8h\n" - "ldr x26, [x25, #0x70]\n" - "smlal v8.4s, v28.4h, v0.4h\n" - "ldr x23, [x25, #0x78]\n" - "smlal2 v5.4s, v28.8h, v0.8h\n" - "ldr d0, [x3, #0x28]\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr x19, [x20, #0x50]\n" + "ldr d31, [x19, x24]\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v29.4h, v0.4h\n" + "ldr x15, [x20, #0x58]\n" + "usubl v31.8h, v31.8b, v9.8b\n" + "smlal v6.4s, v28.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "ldr x19, [x20, #0x60]\n" + "ldr x27, [x20, #0x68]\n" + "smlal2 v7.4s, v29.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "ldr x20, [x25, #0x80]\n" - "smlal2 v18.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "smlal v16.4s, v27.4h, v1.4h\n" - "ldr x22, [x25, #0x88]\n" - "smlal2 v21.4s, v27.8h, v1.8h\n" - "ldr x13, [x25, #0x90]\n" - "smlal v7.4s, v28.4h, v1.4h\n" - "ldr x21, [x25, #0x98]\n" - "smlal2 v17.4s, v28.8h, v1.8h\n" - "ldr x14, [x25, #0xa0]\n" - "smlal v8.4s, v23.4h, v1.4h\n" - "ldr x11, [x25, #0xa8]\n" - "smlal2 v5.4s, v23.8h, v1.8h\n" - "ldr d1, [x3, #0x30]\n" + "ldr x5, [x20, #0x70]\n" + "ldr x11, [x20, #0x78]\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal2 v5.4s, v28.8h, v0.8h\n" + "ldr d30, [x15, x24]\n" + "usubl v30.8h, v30.8b, v9.8b\n" + "smlal v17.4s, v27.4h, v1.4h\n" + "smlal v10.4s, v28.4h, v1.4h\n" + "ldr d0, [x23, #0x28]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v27.8h, v1.8h\n" + "ldr x12, [x20, #0x80]\n" + "ldr x26, [x20, #0x88]\n" + "smlal2 v7.4s, v28.8h, v1.8h\n" "smlal v15.4s, v27.4h, v2.4h\n" - "ldr x24, [x25, #0xb0]\n" - "smlal2 v18.4s, v27.8h, v2.8h\n" - "ldr d27, [x0, x10]\n" - "smlal v16.4s, v25.4h, v2.4h\n" - "ldr x0, [x25, #0xb8]\n" - "smlal2 v21.4s, v25.8h, v2.8h\n" - "ldr x15, [x25, #0xc0]\n" - "smlal v7.4s, v23.4h, v2.4h\n" - "ldr x9, [x25, #0xc8]\n" - "smlal2 v17.4s, v23.8h, v2.8h\n" - "ldr x27, [x25, #0xd0]\n" - "usubl v31.8h, v31.8b, v9.8b\n" - "ldr x28, [x25, #0xd8]\n" + "ldr x14, [x20, #0x90]\n" + "ldr x15, [x20, #0x98]\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal2 v5.4s, v23.8h, v1.8h\n" + "ldr d27, [x19, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v23.4h, v2.4h\n" + "ldr d1, [x23, #0x30]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "ldr x21, [x20, #0xa0]\n" + "ldr x2, [x20, #0xa8]\n" + "smlal2 v7.4s, v23.8h, v2.8h\n" "smlal v15.4s, v25.4h, v3.4h\n" - "ldr x12, [x25, #0xe0]\n" - "smlal2 v18.4s, v25.8h, v3.8h\n" - "ldr d25, [x7, x10]\n" - "smlal v8.4s, v31.4h, v2.4h\n" - "ldr x7, [x25, #0xe8]\n" + "ldr x13, [x20, #0xb0]\n" + "ldr x9, [x20, #0xb8]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" "smlal2 v5.4s, v31.8h, v2.8h\n" - "ldr d2, [x3, #0x38]\n" - "smlal v16.4s, v24.4h, v3.4h\n" - "ldr q6, [x2, #0x0]\n" - "smlal2 v21.4s, v24.8h, v3.8h\n" - "ldr q19, [x5, #0x0]\n" - "smlal v7.4s, v31.4h, v3.4h\n" - "ldr q20, [x2, #0x10]\n" - "add x2, x2, #0x20\n" - "smlal2 v17.4s, v31.8h, v3.8h\n" - "ldr q12, [x5, #0x10]\n" - "add x5, x5, #0x20\n" - "usubl v30.8h, v30.8b, v9.8b\n" + "ldr d25, [x27, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v31.4h, v3.4h\n" + "ldr d2, [x23, #0x38]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "ldr x19, [x20, #0xc0]\n" + "ldr x28, [x20, #0xc8]\n" + "smlal2 v7.4s, v31.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v18.4s, v24.8h, v4.8h\n" - "ldr d24, [x26, x10]\n" - "usubl v27.8h, v27.8b, v9.8b\n" - "ldr x26, [x25, #0xf0]\n" - "smlal v8.4s, v30.4h, v3.4h\n" + "ldr x6, [x20, #0xd0]\n" + "ldr x27, [x20, #0xd8]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" "smlal2 v5.4s, v30.8h, v3.8h\n" - "ldr d3, [x3, #0x40]\n" - "smlal v16.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "ldr d27, [x23, x10]\n" - "smlal v7.4s, v30.4h, v4.4h\n" - "ldr x23, [x25, #0xf8]\n" - "smlal2 v17.4s, v30.8h, v4.8h\n" - "smlal v8.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0x48]\n" - "ssubl v0.8h, v0.8b, v14.8b\n" - "usubl v25.8h, v25.8b, v9.8b\n" - "ssubl v1.8h, v1.8b, v14.8b\n" - "smlal v15.4s, v29.4h, v0.4h\n" - "smlal2 v18.4s, v29.8h, v0.8h\n" - "smlal v16.4s, v28.4h, v0.4h\n" - "smlal2 v21.4s, v28.8h, v0.8h\n" - "smlal v7.4s, v22.4h, v0.4h\n" - "smlal2 v17.4s, v22.8h, v0.8h\n" - "smlal v8.4s, v25.4h, v0.4h\n" - "smlal2 v5.4s, v25.8h, v0.8h\n" - "ldr d0, [x3, #0x50]\n" - "smlal v15.4s, v28.4h, v1.4h\n" - "smlal2 v18.4s, v28.8h, v1.8h\n" - "ldr d28, [x22, x10]\n" - "smlal v16.4s, v23.4h, v1.4h\n" - "ldr x22, [x25, #0x100]\n" - "smlal2 v21.4s, v23.8h, v1.8h\n" - "smlal v7.4s, v25.4h, v1.4h\n" - "smlal2 v17.4s, v25.8h, v1.8h\n" + "ldr d24, [x5, x24]\n" "usubl v24.8h, v24.8b, v9.8b\n" - "ssubl v2.8h, v2.8b, v14.8b\n" - "usubl v27.8h, v27.8b, v9.8b\n" - "smlal v8.4s, v24.4h, v1.4h\n" - "smlal2 v5.4s, v24.8h, v1.8h\n" - "ldr d1, [x3, #0x58]\n" - "smlal v15.4s, v23.4h, v2.4h\n" - "smlal2 v18.4s, v23.8h, v2.8h\n" - "ldr d23, [x20, x10]\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "ldr x20, [x25, #0x108]\n" - "smlal2 v21.4s, v31.8h, v2.8h\n" - "smlal v7.4s, v24.4h, v2.4h\n" - "smlal2 v17.4s, v24.8h, v2.8h\n" - "smlal v8.4s, v27.4h, v2.4h\n" - "smlal2 v5.4s, v27.8h, v2.8h\n" - "ldr d2, [x3, #0x60]\n" + "smlal v17.4s, v27.4h, v4.4h\n" + "smlal v10.4s, v30.4h, v4.4h\n" + "ldr d3, [x23, #0x40]\n" "ssubl v3.8h, v3.8b, v14.8b\n" - "usubl v23.8h, v23.8b, v9.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v27.8h, v4.8h\n" + "ldr d27, [x11, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v7.4s, v30.8h, v4.8h\n" + "smlal v15.4s, v29.4h, v0.4h\n" + "ldr x11, [x20, #0xe0]\n" + "ldr x17, [x20, #0xe8]\n" + "smlal2 v16.4s, v29.8h, v0.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0x48]\n" "ssubl v4.8h, v4.8b, v14.8b\n" - "smlal v15.4s, v31.4h, v3.4h\n" - "smlal2 v18.4s, v31.8h, v3.8h\n" - "ldr d31, [x13, x10]\n" - "smlal v16.4s, v30.4h, v3.4h\n" - "ldr x13, [x25, #0x110]\n" - "smlal2 v21.4s, v30.8h, v3.8h\n" - "smlal v7.4s, v27.4h, v3.4h\n" - "smlal2 v17.4s, v27.8h, v3.8h\n" - "smlal v8.4s, v23.4h, v3.4h\n" - "smlal2 v5.4s, v23.8h, v3.8h\n" - "ldr d3, [x3, #0x68]\n" - "smlal v15.4s, v30.4h, v4.4h\n" - "smlal2 v18.4s, v30.8h, v4.8h\n" - "ldr d30, [x21, x10]\n" - "smlal v16.4s, v26.4h, v4.4h\n" - "ldr x21, [x25, #0x118]\n" - "smlal2 v21.4s, v26.8h, v4.8h\n" - "ldr d26, [x14, x10]\n" - "smlal v7.4s, v23.4h, v4.4h\n" - "smlal2 v17.4s, v23.8h, v4.8h\n" + "smlal v17.4s, v28.4h, v0.4h\n" + "smlal v10.4s, v22.4h, v0.4h\n" + "ldr x5, [x20, #0xf0]\n" + "ldr x25, [x20, #0xf8]\n" + "smlal v6.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v28.8h, v0.8h\n" + "ldr q12, [x10, #0x0]\n" + "ldr q19, [x1, #0x0]\n" + "smlal2 v7.4s, v22.8h, v0.8h\n" + "smlal v15.4s, v28.4h, v1.4h\n" + "ldr q20, [x10, #0x10]\n" + "ldr q29, [x1, #0x10]\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal2 v5.4s, v25.8h, v0.8h\n" + "ldr d28, [x26, x24]\n" + "ldr d0, [x23, #0x50]\n" + "smlal v17.4s, v23.4h, v1.4h\n" + "smlal v10.4s, v25.4h, v1.4h\n" "usubl v28.8h, v28.8b, v9.8b\n" + "ldr x26, [x20, #0x100]\n" + "smlal v6.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" "ssubl v0.8h, v0.8b, v14.8b\n" + "tst x0, #0x7\n" + "smlal2 v7.4s, v25.8h, v1.8h\n" + "smlal v15.4s, v23.4h, v2.4h\n" + "add x10, x10, #0x20\n" + "add x1, x1, #0x20\n" + "smlal2 v16.4s, v23.8h, v2.8h\n" + "ldr d23, [x12, x24]\n" + "smlal2 v5.4s, v24.8h, v1.8h\n" + "usubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v31.4h, v2.4h\n" + "smlal v10.4s, v24.4h, v2.4h\n" + "ldr d1, [x23, #0x58]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "ldr x12, [x20, #0x108]\n" + "smlal2 v7.4s, v24.8h, v2.8h\n" + "smlal v15.4s, v31.4h, v3.4h\n" + "smlal2 v16.4s, v31.8h, v3.8h\n" + "smlal2 v5.4s, v27.8h, v2.8h\n" + "ldr d31, [x14, x24]\n" "usubl v31.8h, v31.8b, v9.8b\n" - "smlal v8.4s, v28.4h, v4.4h\n" - "smlal2 v5.4s, v28.8h, v4.8h\n" - "ldr d4, [x3, #0x70]\n" - "smlal v15.4s, v22.4h, v0.4h\n" - "smlal2 v18.4s, v22.8h, v0.8h\n" - "ldr d22, [x0, x10]\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v21.4s, v25.8h, v0.8h\n" - "smlal v7.4s, v31.4h, v0.4h\n" - "smlal2 v17.4s, v31.8h, v0.8h\n" + "smlal v17.4s, v30.4h, v3.4h\n" + "smlal v10.4s, v27.4h, v3.4h\n" + "ldr d2, [x23, #0x60]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "ldr x14, [x20, #0x110]\n" + "smlal2 v7.4s, v27.8h, v3.8h\n" + "smlal v15.4s, v30.4h, v4.4h\n" + "smlal2 v16.4s, v30.8h, v4.8h\n" + "ldr d30, [x15, x24]\n" + "smlal2 v5.4s, v23.8h, v3.8h\n" "usubl v30.8h, v30.8b, v9.8b\n" - "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal v10.4s, v23.4h, v4.4h\n" + "ldr d3, [x23, #0x68]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "ldr d26, [x21, x24]\n" "usubl v26.8h, v26.8b, v9.8b\n" - "smlal v8.4s, v30.4h, v0.4h\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" - "ldr d0, [x3, #0x78]\n" + "smlal2 v7.4s, v23.8h, v4.8h\n" + "smlal v15.4s, v22.4h, v0.4h\n" + "ldr x21, [x20, #0x118]\n" + "smlal2 v16.4s, v22.8h, v0.8h\n" + "smlal2 v5.4s, v28.8h, v4.8h\n" + "ldr d4, [x23, #0x70]\n" + "ldr d22, [x9, x24]\n" + "smlal v17.4s, v25.4h, v0.4h\n" + "smlal v10.4s, v31.4h, v0.4h\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "usubl v22.8h, v22.8b, v9.8b\n" + "smlal2 v7.4s, v31.8h, v0.8h\n" "smlal v15.4s, v25.4h, v1.4h\n" - "smlal2 v18.4s, v25.8h, v1.8h\n" - "ldr d25, [x11, x10]\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v21.4s, v24.8h, v1.8h\n" - "smlal v7.4s, v30.4h, v1.4h\n" - "smlal2 v17.4s, v30.8h, v1.8h\n" - "smlal v8.4s, v26.4h, v1.4h\n" - "smlal2 v5.4s, v26.8h, v1.8h\n" - "ldr d1, [x3, #0x80]\n" - "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal2 v16.4s, v25.8h, v1.8h\n" + "ldr d25, [x2, x24]\n" + "smlal2 v5.4s, v30.8h, v0.8h\n" "usubl v25.8h, v25.8b, v9.8b\n" - "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v17.4s, v24.4h, v1.4h\n" + "smlal v10.4s, v30.4h, v1.4h\n" + "ldr d0, [x23, #0x78]\n" + "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "smlal2 v7.4s, v30.8h, v1.8h\n" "smlal v15.4s, v24.4h, v2.4h\n" - "smlal2 v18.4s, v24.8h, v2.8h\n" - "ldr d24, [x24, x10]\n" - "smlal v16.4s, v27.4h, v2.4h\n" - "smlal2 v21.4s, v27.8h, v2.8h\n" - "smlal v7.4s, v26.4h, v2.4h\n" - "smlal2 v17.4s, v26.8h, v2.8h\n" - "smlal v8.4s, v25.4h, v2.4h\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" - "ldr d2, [x3, #0x88]\n" - "smlal v15.4s, v27.4h, v3.4h\n" - "smlal2 v18.4s, v27.8h, v3.8h\n" - "ldr d27, [x15, x10]\n" - "smlal v16.4s, v23.4h, v3.4h\n" - "smlal2 v21.4s, v23.8h, v3.8h\n" - "smlal v7.4s, v25.4h, v3.4h\n" - "smlal2 v17.4s, v25.8h, v3.8h\n" + "smlal2 v16.4s, v24.8h, v2.8h\n" + "ldr d24, [x13, x24]\n" + "smlal2 v5.4s, v26.8h, v1.8h\n" "usubl v24.8h, v24.8b, v9.8b\n" - "ssubl v4.8h, v4.8b, v14.8b\n" - "usubl v22.8h, v22.8b, v9.8b\n" - "smlal v8.4s, v24.4h, v3.4h\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" - "ldr d3, [x3, #0x90]\n" - "smlal v15.4s, v23.4h, v4.4h\n" - "smlal2 v18.4s, v23.8h, v4.8h\n" - "ldr d23, [x9, x10]\n" - "smlal v16.4s, v28.4h, v4.4h\n" - "smlal2 v21.4s, v28.8h, v4.8h\n" - "ldr d28, [x12, x10]\n" - "smlal v7.4s, v24.4h, v4.4h\n" - "smlal2 v17.4s, v24.8h, v4.8h\n" - "smlal v8.4s, v22.4h, v4.4h\n" - "smlal2 v5.4s, v22.8h, v4.8h\n" - "ldr d4, [x3, #0x98]\n" - "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v17.4s, v27.4h, v2.4h\n" + "smlal v10.4s, v26.4h, v2.4h\n" + "ldr d1, [x23, #0x80]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "smlal2 v7.4s, v26.8h, v2.8h\n" + "smlal v15.4s, v27.4h, v3.4h\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "ldr d27, [x19, x24]\n" "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v17.4s, v23.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "ldr d2, [x23, #0x88]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v23.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "smlal v15.4s, v23.4h, v4.4h\n" + "smlal2 v16.4s, v23.8h, v4.8h\n" + "ldr d23, [x28, x24]\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" "usubl v23.8h, v23.8b, v9.8b\n" + "smlal v17.4s, v28.4h, v4.4h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "ldr d3, [x23, #0x90]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v22.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "ldr d28, [x11, x24]\n" + "usubl v28.8h, v28.8b, v9.8b\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "ldr d31, [x27, x10]\n" - "smlal v16.4s, v30.4h, v0.4h\n" - "smlal2 v21.4s, v30.8h, v0.8h\n" - "smlal v7.4s, v27.4h, v0.4h\n" - "smlal2 v17.4s, v27.8h, v0.8h\n" - "smlal v8.4s, v23.4h, v0.4h\n" - "smlal2 v5.4s, v23.8h, v0.8h\n" - "ldr d0, [x3, #0xa0]\n" - "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "ldr d31, [x6, x24]\n" + "smlal2 v5.4s, v22.8h, v4.8h\n" "usubl v31.8h, v31.8b, v9.8b\n" - "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal v10.4s, v27.4h, v0.4h\n" + "ldr d4, [x23, #0x98]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v6.4s, v23.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "smlal2 v7.4s, v27.8h, v0.8h\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v18.4s, v30.8h, v1.8h\n" - "ldr d30, [x28, x10]\n" - "smlal v16.4s, v26.4h, v1.4h\n" - "smlal2 v21.4s, v26.8h, v1.8h\n" - "smlal v7.4s, v23.4h, v1.4h\n" - "smlal2 v17.4s, v23.8h, v1.8h\n" - "smlal v8.4s, v31.4h, v1.4h\n" - "smlal2 v5.4s, v31.8h, v1.8h\n" - "ldr d1, [x3, #0xa8]\n" - "smlal v15.4s, v26.4h, v2.4h\n" - "smlal2 v18.4s, v26.8h, v2.8h\n" - "ldr d26, [x7, x10]\n" - "smlal v16.4s, v25.4h, v2.4h\n" - "smlal2 v21.4s, v25.8h, v2.8h\n" - "smlal v7.4s, v31.4h, v2.4h\n" - "smlal2 v17.4s, v31.8h, v2.8h\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "ldr d30, [x27, x24]\n" + "smlal2 v5.4s, v23.8h, v0.8h\n" "usubl v30.8h, v30.8b, v9.8b\n" - "ssubl v3.8h, v3.8b, v14.8b\n" - "usubl v28.8h, v28.8b, v9.8b\n" - "smlal v8.4s, v30.4h, v2.4h\n" - "smlal2 v5.4s, v30.8h, v2.8h\n" - "ldr d2, [x3, #0xb0]\n" - "smlal v15.4s, v25.4h, v3.4h\n" - "smlal2 v18.4s, v25.8h, v3.8h\n" - "ldr d25, [x26, x10]\n" - "smlal v16.4s, v24.4h, v3.4h\n" - "smlal2 v21.4s, v24.8h, v3.8h\n" - "smlal v7.4s, v30.4h, v3.4h\n" - "smlal2 v17.4s, v30.8h, v3.8h\n" - "smlal v8.4s, v28.4h, v3.4h\n" - "smlal2 v5.4s, v28.8h, v3.8h\n" - "ldr d3, [x3, #0xb8]\n" - "ssubl v4.8h, v4.8b, v14.8b\n" - "usubl v26.8h, v26.8b, v9.8b\n" + "smlal v17.4s, v26.4h, v1.4h\n" + "smlal v10.4s, v23.4h, v1.4h\n" + "ldr d0, [x23, #0xa0]\n" "ssubl v0.8h, v0.8b, v14.8b\n" + "smlal v6.4s, v31.4h, v1.4h\n" + "smlal2 v8.4s, v26.8h, v1.8h\n" + "smlal2 v7.4s, v23.8h, v1.8h\n" + "smlal v15.4s, v26.4h, v2.4h\n" + "smlal2 v16.4s, v26.8h, v2.8h\n" + "smlal2 v5.4s, v31.8h, v1.8h\n" + "ldr d26, [x17, x24]\n" + "usubl v26.8h, v26.8b, v9.8b\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal v10.4s, v31.4h, v2.4h\n" + "ldr d1, [x23, #0xa8]\n" + "ssubl v1.8h, v1.8b, v14.8b\n" + "smlal v6.4s, v30.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal2 v7.4s, v31.8h, v2.8h\n" + "smlal v15.4s, v25.4h, v3.4h\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal2 v5.4s, v30.8h, v2.8h\n" + "ldr d25, [x5, x24]\n" + "usubl v25.8h, v25.8b, v9.8b\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal v10.4s, v30.4h, v3.4h\n" + "ldr d2, [x23, #0xb0]\n" + "ssubl v2.8h, v2.8b, v14.8b\n" + "smlal v6.4s, v28.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal2 v7.4s, v30.8h, v3.8h\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v18.4s, v24.8h, v4.8h\n" - "ldr d24, [x23, x10]\n" - "smlal v16.4s, v22.4h, v4.4h\n" - "smlal2 v21.4s, v22.8h, v4.8h\n" - "smlal v7.4s, v28.4h, v4.4h\n" - "smlal2 v17.4s, v28.8h, v4.8h\n" - "smlal v8.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" - "ldr d4, [x3, #0xc0]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "ldr d24, [x25, x24]\n" + "smlal2 v5.4s, v28.8h, v3.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal v17.4s, v22.4h, v4.4h\n" + "smlal v10.4s, v28.4h, v4.4h\n" + "ldr d3, [x23, #0xb8]\n" + "ssubl v3.8h, v3.8b, v14.8b\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v7.4s, v28.8h, v4.8h\n" "smlal v15.4s, v27.4h, v0.4h\n" - "smlal2 v18.4s, v27.8h, v0.8h\n" - "ldr d27, [x22, x10]\n" - "smlal v16.4s, v23.4h, v0.4h\n" - "smlal2 v21.4s, v23.8h, v0.8h\n" + "smlal2 v16.4s, v27.8h, v0.8h\n" + "ldr d27, [x26, x24]\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v22.8h, v4.8h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d4, [x23, #0xc0]\n" + "ssubl v4.8h, v4.8b, v14.8b\n" + "smlal v17.4s, v23.4h, v0.4h\n" + "smlal v10.4s, v25.4h, v0.4h\n" + "smlal v6.4s, v24.4h, v0.4h\n" + "smlal2 v7.4s, v25.8h, v0.8h\n" + "ldr d25, [x12, x24]\n" "usubl v25.8h, v25.8b, v9.8b\n" - "usubl v24.8h, v24.8b, v9.8b\n" - "ssubl v1.8h, v1.8b, v14.8b\n" - "smlal v7.4s, v25.4h, v0.4h\n" - "smlal2 v17.4s, v25.8h, v0.8h\n" - "ldr d25, [x20, x10]\n" - "smlal v8.4s, v24.4h, v0.4h\n" + "smlal2 v8.4s, v23.8h, v0.8h\n" "smlal2 v5.4s, v24.8h, v0.8h\n" "smlal v15.4s, v23.4h, v1.4h\n" - "smlal2 v18.4s, v23.8h, v1.8h\n" - "smlal v16.4s, v31.4h, v1.4h\n" - "smlal2 v21.4s, v31.8h, v1.8h\n" - "smlal v7.4s, v24.4h, v1.4h\n" - "smlal2 v17.4s, v24.8h, v1.8h\n" - "ldr d24, [x13, x10]\n" - "usubl v27.8h, v27.8b, v9.8b\n" - "ssubl v2.8h, v2.8b, v14.8b\n" - "usubl v25.8h, v25.8b, v9.8b\n" - "smlal v8.4s, v27.4h, v1.4h\n" + "smlal v17.4s, v31.4h, v1.4h\n" + "smlal v10.4s, v24.4h, v1.4h\n" + "smlal v6.4s, v27.4h, v1.4h\n" + "smlal2 v7.4s, v24.8h, v1.8h\n" + "ldr d24, [x14, x24]\n" + "smlal2 v16.4s, v23.8h, v1.8h\n" + "usubl v24.8h, v24.8b, v9.8b\n" + "smlal2 v8.4s, v31.8h, v1.8h\n" "smlal2 v5.4s, v27.8h, v1.8h\n" "smlal v15.4s, v31.4h, v2.4h\n" - "smlal2 v18.4s, v31.8h, v2.8h\n" - "smlal v16.4s, v30.4h, v2.4h\n" - "smlal2 v21.4s, v30.8h, v2.8h\n" - "smlal v7.4s, v27.4h, v2.4h\n" - "smlal2 v17.4s, v27.8h, v2.8h\n" - "ldr d27, [x21, x10]\n" - "add x10, x10, #0x8\n" - "smlal v8.4s, v25.4h, v2.4h\n" + "smlal v17.4s, v30.4h, v2.4h\n" + "smlal v10.4s, v27.4h, v2.4h\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v7.4s, v27.8h, v2.8h\n" + "ldr d27, [x21, x24]\n" + "smlal2 v16.4s, v31.8h, v2.8h\n" + "usubl v27.8h, v27.8b, v9.8b\n" + "smlal2 v8.4s, v30.8h, v2.8h\n" "smlal2 v5.4s, v25.8h, v2.8h\n" - "ssubl v3.8h, v3.8b, v14.8b\n" - "usubl v24.8h, v24.8b, v9.8b\n" - "ssubl v4.8h, v4.8b, v14.8b\n" + "add x24, x24, #0x8\n" "smlal v15.4s, v30.4h, v3.4h\n" - "smlal2 v18.4s, v30.8h, v3.8h\n" - "smlal v16.4s, v28.4h, v3.4h\n" - "smlal2 v21.4s, v28.8h, v3.8h\n" - "smlal v7.4s, v25.4h, v3.4h\n" - "smlal2 v17.4s, v25.8h, v3.8h\n" - "smlal v8.4s, v24.4h, v3.4h\n" + "smlal v17.4s, v28.4h, v3.4h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v16.4s, v30.8h, v3.8h\n" + "smlal2 v8.4s, v28.8h, v3.8h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" "smlal2 v5.4s, v24.8h, v3.8h\n" "smlal v15.4s, v28.4h, v4.4h\n" - "smlal2 v18.4s, v28.8h, v4.8h\n" - "smlal v16.4s, v26.4h, v4.4h\n" - "smlal2 v21.4s, v26.8h, v4.8h\n" - "smlal v7.4s, v24.4h, v4.4h\n" - "smlal2 v17.4s, v24.8h, v4.8h\n" - "usubl v27.8h, v27.8b, v9.8b\n" - "sqrdmulh v15.4s, v15.4s, v6.4s\n" - "sqrdmulh v18.4s, v18.4s, v20.4s\n" - "smlal v8.4s, v27.4h, v4.4h\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "sqdmulh v15.4s, v15.4s, v12.4s\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal v6.4s, v27.4h, v4.4h\n" + "sqdmulh v17.4s, v17.4s, v12.4s\n" + "smlal2 v16.4s, v28.8h, v4.8h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "sqdmulh v10.4s, v10.4s, v12.4s\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" "smlal2 v5.4s, v27.8h, v4.8h\n" - "and v28.16b, v15.16b, v19.16b\n" - "and v26.16b, v18.16b, v12.16b\n" - "sqrdmulh v16.4s, v16.4s, v6.4s\n" - "sshr v28.4s, v28.4s, #0x1f\n" + "sqdmulh v6.4s, v6.4s, v12.4s\n" + "and v23.16b, v15.16b, v19.16b\n" + "sqdmulh v16.4s, v16.4s, v20.4s\n" + "and v22.16b, v17.16b, v19.16b\n" + "sqdmulh v8.4s, v8.4s, v20.4s\n" + "and v21.16b, v10.16b, v19.16b\n" + "sqdmulh v7.4s, v7.4s, v20.4s\n" + "and v26.16b, v6.16b, v19.16b\n" + "sqdmulh v5.4s, v5.4s, v20.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v4.16b, v16.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v2.16b, v8.16b, v29.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v7.16b, v29.16b\n" "sshr v26.4s, v26.4s, #0x1f\n" - "sqrdmulh v21.4s, v21.4s, v20.4s\n" - "sqadd v15.4s, v15.4s, v28.4s\n" - "sqadd v18.4s, v18.4s, v26.4s\n" - "and v29.16b, v16.16b, v19.16b\n" - "and v4.16b, v21.16b, v12.16b\n" - "srshl v15.4s, v15.4s, v19.4s\n" - "srshl v18.4s, v18.4s, v12.4s\n" - "sshr v29.4s, v29.4s, #0x1f\n" + "and v25.16b, v5.16b, v29.16b\n" + "sqadd v15.4s, v15.4s, v23.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "add v15.4s, v15.4s, v10.4s\n" - "add v18.4s, v18.4s, v10.4s\n" - "sqadd v16.4s, v16.4s, v29.4s\n" - "smin v15.4s, v15.4s, v13.4s\n" - "smin v18.4s, v18.4s, v13.4s\n" - "sqadd v21.4s, v21.4s, v4.4s\n" - "smax v15.4s, v15.4s, v11.4s\n" - "smax v18.4s, v18.4s, v11.4s\n" - "srshl v16.4s, v16.4s, v19.4s\n" - "srshl v21.4s, v21.4s, v12.4s\n" - "uzp1 v15.16b, v15.16b, v18.16b\n" - "sqrdmulh v7.4s, v7.4s, v6.4s\n" - "uzp1 v15.16b, v15.16b, v15.16b\n" - "str d15, [x17, x1]\n" - "add v16.4s, v16.4s, v10.4s\n" - "add v21.4s, v21.4s, v10.4s\n" - "and v25.16b, v7.16b, v19.16b\n" - "sqrdmulh v17.4s, v17.4s, v20.4s\n" - "smin v16.4s, v16.4s, v13.4s\n" - "smin v21.4s, v21.4s, v13.4s\n" + "sqadd v17.4s, v17.4s, v22.4s\n" + "sshr v2.4s, v2.4s, #0x1f\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v6.4s, v6.4s, v26.4s\n" "sshr v25.4s, v25.4s, #0x1f\n" - "smax v16.4s, v16.4s, v11.4s\n" - "smax v21.4s, v21.4s, v11.4s\n" - "sqadd v7.4s, v7.4s, v25.4s\n" - "and v31.16b, v17.16b, v12.16b\n" - "uzp1 v16.16b, v16.16b, v21.16b\n" - "sqrdmulh v8.4s, v8.4s, v6.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "str d16, [x16, x1]\n" - "srshl v7.4s, v7.4s, v19.4s\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "and v24.16b, v8.16b, v19.16b\n" - "sqrdmulh v5.4s, v5.4s, v20.4s\n" - "sqadd v17.4s, v17.4s, v31.4s\n" - "add v7.4s, v7.4s, v10.4s\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "and v1.16b, v5.16b, v12.16b\n" - "smin v7.4s, v7.4s, v13.4s\n" - "srshl v17.4s, v17.4s, v12.4s\n" - "sqadd v8.4s, v8.4s, v24.4s\n" - "smax v7.4s, v7.4s, v11.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "add v17.4s, v17.4s, v10.4s\n" - "srshl v8.4s, v8.4s, v19.4s\n" - "sqadd v5.4s, v5.4s, v1.4s\n" - "smin v17.4s, v17.4s, v13.4s\n" - "add v8.4s, v8.4s, v10.4s\n" - "smax v17.4s, v17.4s, v11.4s\n" - "srshl v5.4s, v5.4s, v12.4s\n" - "smin v8.4s, v8.4s, v13.4s\n" - "uzp1 v7.16b, v7.16b, v17.16b\n" - "add v5.4s, v5.4s, v10.4s\n" - "uzp1 v7.16b, v7.16b, v7.16b\n" - "str d7, [x6, x1]\n" - "smax v8.4s, v8.4s, v11.4s\n" - "smin v5.4s, v5.4s, v13.4s\n" - "smax v5.4s, v5.4s, v11.4s\n" - "uzp1 v8.16b, v8.16b, v5.16b\n" - "uzp1 v8.16b, v8.16b, v8.16b\n" - "str d8, [x8, x1]\n" - "add x1, x1, #0x8\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v17.4s, v17.4s, v19.4s\n" + "sqadd v8.4s, v8.4s, v2.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "sqadd v7.4s, v7.4s, v3.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "sqadd v5.4s, v5.4s, v25.4s\n" + "srshl v16.4s, v16.4s, v29.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v17.4h, v17.4s\n" + "srshl v7.4s, v7.4s, v29.4s\n" + "sqxtn v10.4h, v10.4s\n" + "srshl v5.4s, v5.4s, v29.4s\n" + "sqxtn v6.4h, v6.4s\n" + "sqxtn2 v15.8h, v16.4s\n" + "sqxtn2 v17.8h, v8.4s\n" + "sqxtn2 v10.8h, v7.4s\n" + "sqxtn2 v6.8h, v5.4s\n" + "sqadd v15.8h, v15.8h, v18.8h\n" + "sqadd v17.8h, v17.8h, v18.8h\n" + "sqadd v10.8h, v10.8h, v18.8h\n" + "sqadd v6.8h, v6.8h, v18.8h\n" + "smax v15.8h, v15.8h, v11.8h\n" + "smax v17.8h, v17.8h, v11.8h\n" + "smax v10.8h, v10.8h, v11.8h\n" + "smax v6.8h, v6.8h, v11.8h\n" + "smin v15.8h, v15.8h, v13.8h\n" + "smin v17.8h, v17.8h, v13.8h\n" + "smin v10.8h, v10.8h, v13.8h\n" + "smin v6.8h, v6.8h, v13.8h\n" + "uzp1 v15.16b, v15.16b, v15.16b\n" + "uzp1 v17.16b, v17.16b, v17.16b\n" + "str d15, [x16, x22]\n" + "uzp1 v10.16b, v10.16b, v10.16b\n" + "uzp1 v6.16b, v6.16b, v6.16b\n" + "str d17, [x8, x22]\n" + "str d10, [x4, x22]\n" + "str d6, [x7, x22]\n" + "add x22, x22, #0x8\n" "beq 124f\n" - "add x3, x3, #0xc8\n" + "add x23, x23, #0xc8\n" "3:" // Oddments - "ldr x12, [%x[params], %[offsetof_Params_bias]]\n" - "tbz x4, #2, 5f\n" - "ld1 { v15.4s }, [x12], #0x10\n" - "tbz x4, #1, 4f\n" - "ld1 { v18.d }[0], [x12], #0x8\n" - "tbz x4, #0, 7f\n" - "ld1 { v18.s }[2], [x12]\n" + "ldr x19, [%x[params], %[offsetof_Params_bias]]\n" + "tbz x0, #2, 5f\n" + "ld1 { v15.4s }, [x19], #0x10\n" + "tbz x0, #1, 4f\n" + "ld1 { v16.d }[0], [x19], #0x8\n" + "tbz x0, #0, 7f\n" + "ld1 { v16.s }[2], [x19]\n" "b 7f\n" "4:" // Oddments: Load bias: Bit 2: Bit 1: Unset - "tbz x4, #0, 7f\n" - "ld1 { v18.s }[0], [x12]\n" + "tbz x0, #0, 7f\n" + "ld1 { v16.s }[0], [x19]\n" "b 7f\n" "5:" // Oddments: Load bias: Bit 2: Unset - "tbz x4, #1, 6f\n" - "ld1 { v15.d }[0], [x12], #0x8\n" - "tbz x4, #0, 7f\n" - "ld1 { v15.s }[2], [x12]\n" + "tbz x0, #1, 6f\n" + "ld1 { v15.d }[0], [x19], #0x8\n" + "tbz x0, #0, 7f\n" + "ld1 { v15.s }[2], [x19]\n" "b 7f\n" "6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 7f\n" - "ld1 { v15.s }[0], [x12]\n" + "tbz x0, #0, 7f\n" + "ld1 { v15.s }[0], [x19]\n" "7:" // Oddments: Load bias: Bit 2: End - "mov v16.16b, v15.16b\n" - "ldr d0, [x3, #0x0]\n" - "mov v21.16b, v18.16b\n" - "ldr d1, [x3, #0x8]\n" - "mov v7.16b, v15.16b\n" - "ldr d2, [x3, #0x10]\n" - "mov v17.16b, v18.16b\n" - "ldr d3, [x3, #0x18]\n" - "mov v8.16b, v15.16b\n" - "ldr d4, [x3, #0x20]\n" - "mov v5.16b, v18.16b\n" - "ldp x28, x27, [x25, #0x0]\n" + "ldr d0, [x23, #0x0]\n" + "ldr d1, [x23, #0x8]\n" + "mov v17.16b, v15.16b\n" + "mov v8.16b, v16.16b\n" + "ldr d2, [x23, #0x10]\n" + "ldr d3, [x23, #0x18]\n" + "mov v10.16b, v15.16b\n" + "mov v7.16b, v16.16b\n" + "ldr d4, [x23, #0x20]\n" + "ldp x28, x6, [x20, #0x0]\n" + "mov v6.16b, v15.16b\n" + "mov v5.16b, v16.16b\n" + "ldp x26, x25, [x20, #0x10]\n" + "ldp x5, x2, [x20, #0x20]\n" "ssubl v0.8h, v0.8b, v14.8b\n" - "ldp x26, x13, [x25, #0x10]\n" "ssubl v1.8h, v1.8b, v14.8b\n" + "ldp x27, x21, [x20, #0x30]\n" + "ldp x12, x19, [x20, #0x40]\n" "ssubl v2.8h, v2.8b, v14.8b\n" - "ldp x24, x23, [x25, #0x20]\n" "ssubl v3.8h, v3.8b, v14.8b\n" "ssubl v4.8h, v4.8b, v14.8b\n" - "ldp x22, x21, [x25, #0x30]\n" - "ldp x20, x0, [x25, #0x40]\n" - "add x28, x28, x10\n" - "add x27, x27, x10\n" - "add x26, x26, x10\n" - "add x13, x13, x10\n" - "add x24, x24, x10\n" - "add x23, x23, x10\n" - "add x22, x22, x10\n" - "add x21, x21, x10\n" - "add x20, x20, x10\n" - "add x0, x0, x10\n" - "tbz x4, #2, 9f\n" + "add x28, x28, x24\n" + "add x6, x6, x24\n" + "add x26, x26, x24\n" + "add x25, x25, x24\n" + "add x5, x5, x24\n" + "add x2, x2, x24\n" + "add x27, x27, x24\n" + "add x21, x21, x24\n" + "add x12, x12, x24\n" + "add x19, x19, x24\n" + "tbz x0, #2, 9f\n" "ld1 { v31.s }[0], [x28], #0x4\n" - "ld1 { v30.s }[0], [x27], #0x4\n" + "ld1 { v30.s }[0], [x6], #0x4\n" "ld1 { v29.s }[0], [x26], #0x4\n" - "ld1 { v28.s }[0], [x13], #0x4\n" - "ld1 { v27.s }[0], [x24], #0x4\n" - "ld1 { v23.s }[0], [x23], #0x4\n" - "ld1 { v25.s }[0], [x22], #0x4\n" + "ld1 { v28.s }[0], [x25], #0x4\n" + "ld1 { v27.s }[0], [x5], #0x4\n" + "ld1 { v23.s }[0], [x2], #0x4\n" + "ld1 { v25.s }[0], [x27], #0x4\n" "ld1 { v24.s }[0], [x21], #0x4\n" - "ld1 { v26.s }[0], [x20], #0x4\n" - "ld1 { v22.s }[0], [x0], #0x4\n" - "tbz x4, #1, 8f\n" + "ld1 { v26.s }[0], [x12], #0x4\n" + "ld1 { v22.s }[0], [x19], #0x4\n" + "tbz x0, #1, 8f\n" "ld1 { v31.h }[2], [x28], #0x2\n" - "ld1 { v30.h }[2], [x27], #0x2\n" + "ld1 { v30.h }[2], [x6], #0x2\n" "ld1 { v29.h }[2], [x26], #0x2\n" - "ld1 { v28.h }[2], [x13], #0x2\n" - "ld1 { v27.h }[2], [x24], #0x2\n" - "ld1 { v23.h }[2], [x23], #0x2\n" - "ld1 { v25.h }[2], [x22], #0x2\n" + "ld1 { v28.h }[2], [x25], #0x2\n" + "ld1 { v27.h }[2], [x5], #0x2\n" + "ld1 { v23.h }[2], [x2], #0x2\n" + "ld1 { v25.h }[2], [x27], #0x2\n" "ld1 { v24.h }[2], [x21], #0x2\n" - "ld1 { v26.h }[2], [x20], #0x2\n" - "ld1 { v22.h }[2], [x0], #0x2\n" - "tbz x4, #0, 11f\n" + "ld1 { v26.h }[2], [x12], #0x2\n" + "ld1 { v22.h }[2], [x19], #0x2\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[6], [x28]\n" - "ld1 { v30.b }[6], [x27]\n" + "ld1 { v30.b }[6], [x6]\n" "ld1 { v29.b }[6], [x26]\n" - "ld1 { v28.b }[6], [x13]\n" - "ld1 { v27.b }[6], [x24]\n" - "ld1 { v23.b }[6], [x23]\n" - "ld1 { v25.b }[6], [x22]\n" + "ld1 { v28.b }[6], [x25]\n" + "ld1 { v27.b }[6], [x5]\n" + "ld1 { v23.b }[6], [x2]\n" + "ld1 { v25.b }[6], [x27]\n" "ld1 { v24.b }[6], [x21]\n" - "ld1 { v26.b }[6], [x20]\n" - "ld1 { v22.b }[6], [x0]\n" + "ld1 { v26.b }[6], [x12]\n" + "ld1 { v22.b }[6], [x19]\n" "b 11f\n" "8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[4], [x28]\n" - "ld1 { v30.b }[4], [x27]\n" + "ld1 { v30.b }[4], [x6]\n" "ld1 { v29.b }[4], [x26]\n" - "ld1 { v28.b }[4], [x13]\n" - "ld1 { v27.b }[4], [x24]\n" - "ld1 { v23.b }[4], [x23]\n" - "ld1 { v25.b }[4], [x22]\n" + "ld1 { v28.b }[4], [x25]\n" + "ld1 { v27.b }[4], [x5]\n" + "ld1 { v23.b }[4], [x2]\n" + "ld1 { v25.b }[4], [x27]\n" "ld1 { v24.b }[4], [x21]\n" - "ld1 { v26.b }[4], [x20]\n" - "ld1 { v22.b }[4], [x0]\n" + "ld1 { v26.b }[4], [x12]\n" + "ld1 { v22.b }[4], [x19]\n" "b 11f\n" "9:" // Oddments: Initial loads: Bit 2: Unset - "tbz x4, #1, 10f\n" + "tbz x0, #1, 10f\n" "ld1 { v31.h }[0], [x28], #0x2\n" - "ld1 { v30.h }[0], [x27], #0x2\n" + "ld1 { v30.h }[0], [x6], #0x2\n" "ld1 { v29.h }[0], [x26], #0x2\n" - "ld1 { v28.h }[0], [x13], #0x2\n" - "ld1 { v27.h }[0], [x24], #0x2\n" - "ld1 { v23.h }[0], [x23], #0x2\n" - "ld1 { v25.h }[0], [x22], #0x2\n" + "ld1 { v28.h }[0], [x25], #0x2\n" + "ld1 { v27.h }[0], [x5], #0x2\n" + "ld1 { v23.h }[0], [x2], #0x2\n" + "ld1 { v25.h }[0], [x27], #0x2\n" "ld1 { v24.h }[0], [x21], #0x2\n" - "ld1 { v26.h }[0], [x20], #0x2\n" - "ld1 { v22.h }[0], [x0], #0x2\n" - "tbz x4, #0, 11f\n" + "ld1 { v26.h }[0], [x12], #0x2\n" + "ld1 { v22.h }[0], [x19], #0x2\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[2], [x28]\n" - "ld1 { v30.b }[2], [x27]\n" + "ld1 { v30.b }[2], [x6]\n" "ld1 { v29.b }[2], [x26]\n" - "ld1 { v28.b }[2], [x13]\n" - "ld1 { v27.b }[2], [x24]\n" - "ld1 { v23.b }[2], [x23]\n" - "ld1 { v25.b }[2], [x22]\n" + "ld1 { v28.b }[2], [x25]\n" + "ld1 { v27.b }[2], [x5]\n" + "ld1 { v23.b }[2], [x2]\n" + "ld1 { v25.b }[2], [x27]\n" "ld1 { v24.b }[2], [x21]\n" - "ld1 { v26.b }[2], [x20]\n" - "ld1 { v22.b }[2], [x0]\n" + "ld1 { v26.b }[2], [x12]\n" + "ld1 { v22.b }[2], [x19]\n" "b 11f\n" "10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 11f\n" + "tbz x0, #0, 11f\n" "ld1 { v31.b }[0], [x28]\n" - "ld1 { v30.b }[0], [x27]\n" + "ld1 { v30.b }[0], [x6]\n" "ld1 { v29.b }[0], [x26]\n" - "ld1 { v28.b }[0], [x13]\n" - "ld1 { v27.b }[0], [x24]\n" - "ld1 { v23.b }[0], [x23]\n" - "ld1 { v25.b }[0], [x22]\n" + "ld1 { v28.b }[0], [x25]\n" + "ld1 { v27.b }[0], [x5]\n" + "ld1 { v23.b }[0], [x2]\n" + "ld1 { v25.b }[0], [x27]\n" "ld1 { v24.b }[0], [x21]\n" - "ld1 { v26.b }[0], [x20]\n" - "ld1 { v22.b }[0], [x0]\n" + "ld1 { v26.b }[0], [x12]\n" + "ld1 { v22.b }[0], [x19]\n" "11:" // Oddments: Initial loads: Bit 2: End "usubl v31.8h, v31.8b, v9.8b\n" - "ldr x20, [x25, #0x50]\n" - "add x20, x20, x10\n" "usubl v30.8h, v30.8b, v9.8b\n" + "smlal v15.4s, v31.4h, v0.4h\n" + "ldr x19, [x20, #0x50]\n" "usubl v29.8h, v29.8b, v9.8b\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "smlal v10.4s, v29.4h, v0.4h\n" "usubl v28.8h, v28.8b, v9.8b\n" + "add x19, x19, x24\n" + "smlal2 v7.4s, v29.8h, v0.8h\n" "usubl v27.8h, v27.8b, v9.8b\n" + "smlal v6.4s, v28.4h, v0.4h\n" + "smlal2 v5.4s, v28.8h, v0.8h\n" + "smlal v15.4s, v30.4h, v1.4h\n" "usubl v23.8h, v23.8b, v9.8b\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal v17.4s, v27.4h, v1.4h\n" "usubl v25.8h, v25.8b, v9.8b\n" + "smlal2 v8.4s, v27.8h, v1.8h\n" + "smlal v10.4s, v28.4h, v1.4h\n" "usubl v24.8h, v24.8b, v9.8b\n" + "smlal2 v7.4s, v28.8h, v1.8h\n" "usubl v26.8h, v26.8b, v9.8b\n" + "smlal v6.4s, v23.4h, v1.4h\n" "usubl v22.8h, v22.8b, v9.8b\n" - "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "smlal v16.4s, v30.4h, v0.4h\n" - "smlal2 v21.4s, v30.8h, v0.8h\n" - "smlal v7.4s, v29.4h, v0.4h\n" - "smlal2 v17.4s, v29.8h, v0.8h\n" - "smlal v8.4s, v28.4h, v0.4h\n" - "smlal2 v5.4s, v28.8h, v0.8h\n" - "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v18.4s, v30.8h, v1.8h\n" - "smlal v16.4s, v27.4h, v1.4h\n" - "smlal2 v21.4s, v27.8h, v1.8h\n" - "smlal v7.4s, v28.4h, v1.4h\n" - "smlal2 v17.4s, v28.8h, v1.8h\n" - "smlal v8.4s, v23.4h, v1.4h\n" "smlal2 v5.4s, v23.8h, v1.8h\n" "smlal v15.4s, v27.4h, v2.4h\n" - "smlal2 v18.4s, v27.8h, v2.8h\n" - "smlal v16.4s, v25.4h, v2.4h\n" - "smlal2 v21.4s, v25.8h, v2.8h\n" - "smlal v7.4s, v23.4h, v2.4h\n" - "smlal2 v17.4s, v23.8h, v2.8h\n" - "tbz x4, #2, 13f\n" - "ld1 { v31.s }[0], [x20], #0x4\n" - "tbz x4, #1, 12f\n" - "ld1 { v31.h }[2], [x20], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[6], [x20]\n" + "smlal2 v16.4s, v27.8h, v2.8h\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal v10.4s, v23.4h, v2.4h\n" + "smlal2 v7.4s, v23.8h, v2.8h\n" + "tbz x0, #2, 13f\n" + "ld1 { v31.s }[0], [x19], #0x4\n" + "tbz x0, #1, 12f\n" + "ld1 { v31.h }[2], [x19], #0x2\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[6], [x19]\n" "b 15f\n" "12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[4], [x20]\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[4], [x19]\n" "b 15f\n" "13:" // Oddments: Load (1, 3): Bit 2: Unset - "tbz x4, #1, 14f\n" - "ld1 { v31.h }[0], [x20], #0x2\n" - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[2], [x20]\n" + "tbz x0, #1, 14f\n" + "ld1 { v31.h }[0], [x19], #0x2\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[2], [x19]\n" "b 15f\n" "14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 15f\n" - "ld1 { v31.b }[0], [x20]\n" + "tbz x0, #0, 15f\n" + "ld1 { v31.b }[0], [x19]\n" "15:" // Oddments: Load (1, 3): Bit 2: End "usubl v31.8h, v31.8b, v9.8b\n" - "ldr x28, [x25, #0x58]\n" - "smlal v15.4s, v25.4h, v3.4h\n" - "add x28, x28, x10\n" - "smlal v8.4s, v31.4h, v2.4h\n" + "ldr x15, [x20, #0x58]\n" + "smlal v6.4s, v31.4h, v2.4h\n" "smlal2 v5.4s, v31.8h, v2.8h\n" - "smlal2 v18.4s, v25.8h, v3.8h\n" - "smlal v16.4s, v24.4h, v3.4h\n" - "smlal2 v21.4s, v24.8h, v3.8h\n" - "smlal v7.4s, v31.4h, v3.4h\n" - "smlal2 v17.4s, v31.8h, v3.8h\n" - "tbz x4, #2, 17f\n" - "ld1 { v30.s }[0], [x28], #0x4\n" - "tbz x4, #1, 16f\n" - "ld1 { v30.h }[2], [x28], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[6], [x28]\n" + "smlal v15.4s, v25.4h, v3.4h\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "add x15, x15, x24\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal v10.4s, v31.4h, v3.4h\n" + "smlal2 v7.4s, v31.8h, v3.8h\n" + "tbz x0, #2, 17f\n" + "ld1 { v30.s }[0], [x15], #0x4\n" + "tbz x0, #1, 16f\n" + "ld1 { v30.h }[2], [x15], #0x2\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[6], [x15]\n" "b 19f\n" "16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[4], [x28]\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[4], [x15]\n" "b 19f\n" "17:" // Oddments: Load (1, 4): Bit 2: Unset - "tbz x4, #1, 18f\n" - "ld1 { v30.h }[0], [x28], #0x2\n" - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[2], [x28]\n" + "tbz x0, #1, 18f\n" + "ld1 { v30.h }[0], [x15], #0x2\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[2], [x15]\n" "b 19f\n" "18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 19f\n" - "ld1 { v30.b }[0], [x28]\n" + "tbz x0, #0, 19f\n" + "ld1 { v30.b }[0], [x15]\n" "19:" // Oddments: Load (1, 4): Bit 2: End "usubl v30.8h, v30.8b, v9.8b\n" - "ldr x0, [x25, #0x60]\n" - "smlal v15.4s, v24.4h, v4.4h\n" - "add x0, x0, x10\n" - "smlal v8.4s, v30.4h, v3.4h\n" + "ldr x19, [x20, #0x60]\n" + "smlal v6.4s, v30.4h, v3.4h\n" "smlal2 v5.4s, v30.8h, v3.8h\n" - "smlal2 v18.4s, v24.8h, v4.8h\n" - "tbz x4, #2, 21f\n" - "ld1 { v27.s }[0], [x0], #0x4\n" - "tbz x4, #1, 20f\n" - "ld1 { v27.h }[2], [x0], #0x2\n" - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[6], [x0]\n" + "smlal v15.4s, v24.4h, v4.4h\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "add x19, x19, x24\n" + "tbz x0, #2, 21f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x0, #1, 20f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[6], [x19]\n" "b 23f\n" "20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[4], [x0]\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[4], [x19]\n" "b 23f\n" "21:" // Oddments: Load (0, 5): Bit 2: Unset - "tbz x4, #1, 22f\n" - "ld1 { v27.h }[0], [x0], #0x2\n" - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[2], [x0]\n" + "tbz x0, #1, 22f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[2], [x19]\n" "b 23f\n" "22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 23f\n" - "ld1 { v27.b }[0], [x0]\n" + "tbz x0, #0, 23f\n" + "ld1 { v27.b }[0], [x19]\n" "23:" // Oddments: Load (0, 5): Bit 2: End "usubl v27.8h, v27.8b, v9.8b\n" - "ldr d0, [x3, #0x28]\n" - "smlal v7.4s, v30.4h, v4.4h\n" - "ldr x7, [x25, #0x68]\n" - "add x7, x7, x10\n" - "smlal v16.4s, v27.4h, v4.4h\n" - "smlal2 v21.4s, v27.8h, v4.8h\n" - "smlal2 v17.4s, v30.8h, v4.8h\n" - "smlal v8.4s, v26.4h, v4.4h\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" + "ldr d0, [x23, #0x28]\n" + "smlal v17.4s, v27.4h, v4.4h\n" + "smlal2 v8.4s, v27.8h, v4.8h\n" + "smlal v10.4s, v30.4h, v4.4h\n" + "smlal2 v7.4s, v30.8h, v4.8h\n" "ssubl v0.8h, v0.8b, v14.8b\n" + "ldr x27, [x20, #0x68]\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "add x27, x27, x24\n" "smlal v15.4s, v29.4h, v0.4h\n" - "smlal2 v18.4s, v29.8h, v0.8h\n" - "smlal v16.4s, v28.4h, v0.4h\n" - "smlal2 v21.4s, v28.8h, v0.8h\n" - "smlal v7.4s, v22.4h, v0.4h\n" - "smlal2 v17.4s, v22.8h, v0.8h\n" - "tbz x4, #2, 25f\n" - "ld1 { v25.s }[0], [x7], #0x4\n" - "tbz x4, #1, 24f\n" - "ld1 { v25.h }[2], [x7], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[6], [x7]\n" + "smlal2 v16.4s, v29.8h, v0.8h\n" + "smlal v17.4s, v28.4h, v0.4h\n" + "smlal2 v8.4s, v28.8h, v0.8h\n" + "smlal v10.4s, v22.4h, v0.4h\n" + "smlal2 v7.4s, v22.8h, v0.8h\n" + "tbz x0, #2, 25f\n" + "ld1 { v25.s }[0], [x27], #0x4\n" + "tbz x0, #1, 24f\n" + "ld1 { v25.h }[2], [x27], #0x2\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[6], [x27]\n" "b 27f\n" "24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[4], [x7]\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[4], [x27]\n" "b 27f\n" "25:" // Oddments: Load (2, 1): Bit 2: Unset - "tbz x4, #1, 26f\n" - "ld1 { v25.h }[0], [x7], #0x2\n" - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[2], [x7]\n" + "tbz x0, #1, 26f\n" + "ld1 { v25.h }[0], [x27], #0x2\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[2], [x27]\n" "b 27f\n" "26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 27f\n" - "ld1 { v25.b }[0], [x7]\n" + "tbz x0, #0, 27f\n" + "ld1 { v25.b }[0], [x27]\n" "27:" // Oddments: Load (2, 1): Bit 2: End + "ldr d1, [x23, #0x30]\n" "usubl v25.8h, v25.8b, v9.8b\n" - "ldr d1, [x3, #0x30]\n" - "smlal v8.4s, v25.4h, v0.4h\n" - "ldr x26, [x25, #0x70]\n" - "add x26, x26, x10\n" - "smlal2 v5.4s, v25.8h, v0.8h\n" "ssubl v1.8h, v1.8b, v14.8b\n" + "ldr x5, [x20, #0x70]\n" + "smlal v6.4s, v25.4h, v0.4h\n" + "smlal2 v5.4s, v25.8h, v0.8h\n" + "add x5, x5, x24\n" "smlal v15.4s, v28.4h, v1.4h\n" - "smlal2 v18.4s, v28.8h, v1.8h\n" - "smlal v16.4s, v23.4h, v1.4h\n" - "smlal2 v21.4s, v23.8h, v1.8h\n" - "smlal v7.4s, v25.4h, v1.4h\n" - "smlal2 v17.4s, v25.8h, v1.8h\n" - "tbz x4, #2, 29f\n" - "ld1 { v24.s }[0], [x26], #0x4\n" - "tbz x4, #1, 28f\n" - "ld1 { v24.h }[2], [x26], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[6], [x26]\n" + "smlal2 v16.4s, v28.8h, v1.8h\n" + "smlal v17.4s, v23.4h, v1.4h\n" + "smlal2 v8.4s, v23.8h, v1.8h\n" + "smlal v10.4s, v25.4h, v1.4h\n" + "smlal2 v7.4s, v25.8h, v1.8h\n" + "tbz x0, #2, 29f\n" + "ld1 { v24.s }[0], [x5], #0x4\n" + "tbz x0, #1, 28f\n" + "ld1 { v24.h }[2], [x5], #0x2\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[6], [x5]\n" "b 31f\n" "28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[4], [x26]\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[4], [x5]\n" "b 31f\n" "29:" // Oddments: Load (2, 2): Bit 2: Unset - "tbz x4, #1, 30f\n" - "ld1 { v24.h }[0], [x26], #0x2\n" - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[2], [x26]\n" + "tbz x0, #1, 30f\n" + "ld1 { v24.h }[0], [x5], #0x2\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[2], [x5]\n" "b 31f\n" "30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 31f\n" - "ld1 { v24.b }[0], [x26]\n" + "tbz x0, #0, 31f\n" + "ld1 { v24.b }[0], [x5]\n" "31:" // Oddments: Load (2, 2): Bit 2: End + "ldr d2, [x23, #0x38]\n" "usubl v24.8h, v24.8b, v9.8b\n" - "ldr d2, [x3, #0x38]\n" - "smlal v8.4s, v24.4h, v1.4h\n" - "ldr x23, [x25, #0x78]\n" - "add x23, x23, x10\n" - "smlal2 v5.4s, v24.8h, v1.8h\n" "ssubl v2.8h, v2.8b, v14.8b\n" + "ldr x11, [x20, #0x78]\n" + "smlal v6.4s, v24.4h, v1.4h\n" + "smlal2 v5.4s, v24.8h, v1.8h\n" + "add x11, x11, x24\n" "smlal v15.4s, v23.4h, v2.4h\n" - "smlal2 v18.4s, v23.8h, v2.8h\n" - "smlal v16.4s, v31.4h, v2.4h\n" - "smlal2 v21.4s, v31.8h, v2.8h\n" - "smlal v7.4s, v24.4h, v2.4h\n" - "smlal2 v17.4s, v24.8h, v2.8h\n" - "tbz x4, #2, 33f\n" - "ld1 { v27.s }[0], [x23], #0x4\n" - "tbz x4, #1, 32f\n" - "ld1 { v27.h }[2], [x23], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[6], [x23]\n" + "smlal2 v16.4s, v23.8h, v2.8h\n" + "smlal v17.4s, v31.4h, v2.4h\n" + "smlal2 v8.4s, v31.8h, v2.8h\n" + "smlal v10.4s, v24.4h, v2.4h\n" + "smlal2 v7.4s, v24.8h, v2.8h\n" + "tbz x0, #2, 33f\n" + "ld1 { v27.s }[0], [x11], #0x4\n" + "tbz x0, #1, 32f\n" + "ld1 { v27.h }[2], [x11], #0x2\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[6], [x11]\n" "b 35f\n" "32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[4], [x23]\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[4], [x11]\n" "b 35f\n" "33:" // Oddments: Load (2, 3): Bit 2: Unset - "tbz x4, #1, 34f\n" - "ld1 { v27.h }[0], [x23], #0x2\n" - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[2], [x23]\n" + "tbz x0, #1, 34f\n" + "ld1 { v27.h }[0], [x11], #0x2\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[2], [x11]\n" "b 35f\n" "34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 35f\n" - "ld1 { v27.b }[0], [x23]\n" + "tbz x0, #0, 35f\n" + "ld1 { v27.b }[0], [x11]\n" "35:" // Oddments: Load (2, 3): Bit 2: End + "ldr d3, [x23, #0x40]\n" "usubl v27.8h, v27.8b, v9.8b\n" - "ldr d3, [x3, #0x40]\n" - "smlal v8.4s, v27.4h, v2.4h\n" - "ldr x20, [x25, #0x80]\n" - "add x20, x20, x10\n" - "smlal2 v5.4s, v27.8h, v2.8h\n" "ssubl v3.8h, v3.8b, v14.8b\n" + "ldr x12, [x20, #0x80]\n" + "smlal v6.4s, v27.4h, v2.4h\n" + "smlal2 v5.4s, v27.8h, v2.8h\n" + "add x12, x12, x24\n" "smlal v15.4s, v31.4h, v3.4h\n" - "smlal2 v18.4s, v31.8h, v3.8h\n" - "smlal v16.4s, v30.4h, v3.4h\n" - "smlal2 v21.4s, v30.8h, v3.8h\n" - "smlal v7.4s, v27.4h, v3.4h\n" - "smlal2 v17.4s, v27.8h, v3.8h\n" - "tbz x4, #2, 37f\n" - "ld1 { v23.s }[0], [x20], #0x4\n" - "tbz x4, #1, 36f\n" - "ld1 { v23.h }[2], [x20], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[6], [x20]\n" + "smlal2 v16.4s, v31.8h, v3.8h\n" + "smlal v17.4s, v30.4h, v3.4h\n" + "smlal2 v8.4s, v30.8h, v3.8h\n" + "smlal v10.4s, v27.4h, v3.4h\n" + "smlal2 v7.4s, v27.8h, v3.8h\n" + "tbz x0, #2, 37f\n" + "ld1 { v23.s }[0], [x12], #0x4\n" + "tbz x0, #1, 36f\n" + "ld1 { v23.h }[2], [x12], #0x2\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[6], [x12]\n" "b 39f\n" "36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[4], [x20]\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[4], [x12]\n" "b 39f\n" "37:" // Oddments: Load (2, 4): Bit 2: Unset - "tbz x4, #1, 38f\n" - "ld1 { v23.h }[0], [x20], #0x2\n" - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[2], [x20]\n" + "tbz x0, #1, 38f\n" + "ld1 { v23.h }[0], [x12], #0x2\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[2], [x12]\n" "b 39f\n" "38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 39f\n" - "ld1 { v23.b }[0], [x20]\n" + "tbz x0, #0, 39f\n" + "ld1 { v23.b }[0], [x12]\n" "39:" // Oddments: Load (2, 4): Bit 2: End + "ldr d4, [x23, #0x48]\n" "usubl v23.8h, v23.8b, v9.8b\n" - "ldr d4, [x3, #0x48]\n" - "smlal v8.4s, v23.4h, v3.4h\n" - "ldr x22, [x25, #0x88]\n" - "add x22, x22, x10\n" - "smlal2 v5.4s, v23.8h, v3.8h\n" "ssubl v4.8h, v4.8b, v14.8b\n" + "ldr x26, [x20, #0x88]\n" + "smlal v6.4s, v23.4h, v3.4h\n" + "smlal2 v5.4s, v23.8h, v3.8h\n" + "add x26, x26, x24\n" "smlal v15.4s, v30.4h, v4.4h\n" - "smlal2 v18.4s, v30.8h, v4.8h\n" - "smlal v16.4s, v26.4h, v4.4h\n" - "smlal2 v21.4s, v26.8h, v4.8h\n" - "smlal v7.4s, v23.4h, v4.4h\n" - "smlal2 v17.4s, v23.8h, v4.8h\n" - "tbz x4, #2, 41f\n" - "ld1 { v28.s }[0], [x22], #0x4\n" - "tbz x4, #1, 40f\n" - "ld1 { v28.h }[2], [x22], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[6], [x22]\n" + "smlal2 v16.4s, v30.8h, v4.8h\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "smlal v10.4s, v23.4h, v4.4h\n" + "smlal2 v7.4s, v23.8h, v4.8h\n" + "tbz x0, #2, 41f\n" + "ld1 { v28.s }[0], [x26], #0x4\n" + "tbz x0, #1, 40f\n" + "ld1 { v28.h }[2], [x26], #0x2\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[6], [x26]\n" "b 43f\n" "40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[4], [x22]\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[4], [x26]\n" "b 43f\n" "41:" // Oddments: Load (2, 5): Bit 2: Unset - "tbz x4, #1, 42f\n" - "ld1 { v28.h }[0], [x22], #0x2\n" - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[2], [x22]\n" + "tbz x0, #1, 42f\n" + "ld1 { v28.h }[0], [x26], #0x2\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[2], [x26]\n" "b 43f\n" "42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 43f\n" - "ld1 { v28.b }[0], [x22]\n" + "tbz x0, #0, 43f\n" + "ld1 { v28.b }[0], [x26]\n" "43:" // Oddments: Load (2, 5): Bit 2: End + "ldr d0, [x23, #0x50]\n" "usubl v28.8h, v28.8b, v9.8b\n" - "ldr d0, [x3, #0x50]\n" - "smlal v8.4s, v28.4h, v4.4h\n" - "ldr x13, [x25, #0x90]\n" - "add x13, x13, x10\n" - "smlal2 v5.4s, v28.8h, v4.8h\n" "ssubl v0.8h, v0.8b, v14.8b\n" + "ldr x14, [x20, #0x90]\n" + "smlal v6.4s, v28.4h, v4.4h\n" + "smlal2 v5.4s, v28.8h, v4.8h\n" + "add x14, x14, x24\n" "smlal v15.4s, v22.4h, v0.4h\n" - "smlal2 v18.4s, v22.8h, v0.8h\n" - "smlal v16.4s, v25.4h, v0.4h\n" - "smlal2 v21.4s, v25.8h, v0.8h\n" - "tbz x4, #2, 45f\n" - "ld1 { v31.s }[0], [x13], #0x4\n" - "tbz x4, #1, 44f\n" - "ld1 { v31.h }[2], [x13], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[6], [x13]\n" + "smlal2 v16.4s, v22.8h, v0.8h\n" + "smlal v17.4s, v25.4h, v0.4h\n" + "smlal2 v8.4s, v25.8h, v0.8h\n" + "tbz x0, #2, 45f\n" + "ld1 { v31.s }[0], [x14], #0x4\n" + "tbz x0, #1, 44f\n" + "ld1 { v31.h }[2], [x14], #0x2\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[6], [x14]\n" "b 47f\n" "44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[4], [x13]\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[4], [x14]\n" "b 47f\n" "45:" // Oddments: Load (3, 0): Bit 2: Unset - "tbz x4, #1, 46f\n" - "ld1 { v31.h }[0], [x13], #0x2\n" - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[2], [x13]\n" + "tbz x0, #1, 46f\n" + "ld1 { v31.h }[0], [x14], #0x2\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[2], [x14]\n" "b 47f\n" "46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 47f\n" - "ld1 { v31.b }[0], [x13]\n" + "tbz x0, #0, 47f\n" + "ld1 { v31.b }[0], [x14]\n" "47:" // Oddments: Load (3, 0): Bit 2: End "usubl v31.8h, v31.8b, v9.8b\n" - "ldr x21, [x25, #0x98]\n" - "smlal v7.4s, v31.4h, v0.4h\n" - "add x21, x21, x10\n" - "smlal2 v17.4s, v31.8h, v0.8h\n" - "tbz x4, #2, 49f\n" - "ld1 { v30.s }[0], [x21], #0x4\n" - "tbz x4, #1, 48f\n" - "ld1 { v30.h }[2], [x21], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[6], [x21]\n" + "ldr x15, [x20, #0x98]\n" + "smlal v10.4s, v31.4h, v0.4h\n" + "smlal2 v7.4s, v31.8h, v0.8h\n" + "add x15, x15, x24\n" + "tbz x0, #2, 49f\n" + "ld1 { v30.s }[0], [x15], #0x4\n" + "tbz x0, #1, 48f\n" + "ld1 { v30.h }[2], [x15], #0x2\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[6], [x15]\n" "b 51f\n" "48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[4], [x21]\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[4], [x15]\n" "b 51f\n" "49:" // Oddments: Load (3, 1): Bit 2: Unset - "tbz x4, #1, 50f\n" - "ld1 { v30.h }[0], [x21], #0x2\n" - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[2], [x21]\n" + "tbz x0, #1, 50f\n" + "ld1 { v30.h }[0], [x15], #0x2\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[2], [x15]\n" "b 51f\n" "50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 51f\n" - "ld1 { v30.b }[0], [x21]\n" + "tbz x0, #0, 51f\n" + "ld1 { v30.b }[0], [x15]\n" "51:" // Oddments: Load (3, 1): Bit 2: End + "ldr d1, [x23, #0x58]\n" "usubl v30.8h, v30.8b, v9.8b\n" - "ldr d1, [x3, #0x58]\n" - "smlal v8.4s, v30.4h, v0.4h\n" - "ldr x14, [x25, #0xa0]\n" - "add x14, x14, x10\n" - "smlal2 v5.4s, v30.8h, v0.8h\n" "ssubl v1.8h, v1.8b, v14.8b\n" + "ldr x21, [x20, #0xa0]\n" + "smlal v6.4s, v30.4h, v0.4h\n" + "smlal2 v5.4s, v30.8h, v0.8h\n" + "add x21, x21, x24\n" "smlal v15.4s, v25.4h, v1.4h\n" - "smlal2 v18.4s, v25.8h, v1.8h\n" - "smlal v16.4s, v24.4h, v1.4h\n" - "smlal2 v21.4s, v24.8h, v1.8h\n" - "smlal v7.4s, v30.4h, v1.4h\n" - "smlal2 v17.4s, v30.8h, v1.8h\n" - "tbz x4, #2, 53f\n" - "ld1 { v26.s }[0], [x14], #0x4\n" - "tbz x4, #1, 52f\n" - "ld1 { v26.h }[2], [x14], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[6], [x14]\n" + "smlal2 v16.4s, v25.8h, v1.8h\n" + "smlal v17.4s, v24.4h, v1.4h\n" + "smlal2 v8.4s, v24.8h, v1.8h\n" + "smlal v10.4s, v30.4h, v1.4h\n" + "smlal2 v7.4s, v30.8h, v1.8h\n" + "tbz x0, #2, 53f\n" + "ld1 { v26.s }[0], [x21], #0x4\n" + "tbz x0, #1, 52f\n" + "ld1 { v26.h }[2], [x21], #0x2\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[6], [x21]\n" "b 55f\n" "52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[4], [x14]\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[4], [x21]\n" "b 55f\n" "53:" // Oddments: Load (3, 2): Bit 2: Unset - "tbz x4, #1, 54f\n" - "ld1 { v26.h }[0], [x14], #0x2\n" - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[2], [x14]\n" + "tbz x0, #1, 54f\n" + "ld1 { v26.h }[0], [x21], #0x2\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[2], [x21]\n" "b 55f\n" "54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 55f\n" - "ld1 { v26.b }[0], [x14]\n" + "tbz x0, #0, 55f\n" + "ld1 { v26.b }[0], [x21]\n" "55:" // Oddments: Load (3, 2): Bit 2: End + "ldr d2, [x23, #0x60]\n" "usubl v26.8h, v26.8b, v9.8b\n" - "ldr d2, [x3, #0x60]\n" - "smlal v8.4s, v26.4h, v1.4h\n" - "ldr x11, [x25, #0xa8]\n" - "add x11, x11, x10\n" - "smlal2 v5.4s, v26.8h, v1.8h\n" "ssubl v2.8h, v2.8b, v14.8b\n" + "ldr x2, [x20, #0xa8]\n" + "smlal v6.4s, v26.4h, v1.4h\n" + "smlal2 v5.4s, v26.8h, v1.8h\n" + "add x2, x2, x24\n" "smlal v15.4s, v24.4h, v2.4h\n" - "smlal2 v18.4s, v24.8h, v2.8h\n" - "smlal v16.4s, v27.4h, v2.4h\n" - "smlal2 v21.4s, v27.8h, v2.8h\n" - "smlal v7.4s, v26.4h, v2.4h\n" - "smlal2 v17.4s, v26.8h, v2.8h\n" - "tbz x4, #2, 57f\n" - "ld1 { v25.s }[0], [x11], #0x4\n" - "tbz x4, #1, 56f\n" - "ld1 { v25.h }[2], [x11], #0x2\n" - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[6], [x11]\n" + "smlal2 v16.4s, v24.8h, v2.8h\n" + "smlal v17.4s, v27.4h, v2.4h\n" + "smlal2 v8.4s, v27.8h, v2.8h\n" + "smlal v10.4s, v26.4h, v2.4h\n" + "smlal2 v7.4s, v26.8h, v2.8h\n" + "tbz x0, #2, 57f\n" + "ld1 { v25.s }[0], [x2], #0x4\n" + "tbz x0, #1, 56f\n" + "ld1 { v25.h }[2], [x2], #0x2\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[6], [x2]\n" "b 59f\n" "56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[4], [x11]\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[4], [x2]\n" "b 59f\n" "57:" // Oddments: Load (3, 3): Bit 2: Unset - "tbz x4, #1, 58f\n" - "ld1 { v25.h }[0], [x11], #0x2\n" - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[2], [x11]\n" + "tbz x0, #1, 58f\n" + "ld1 { v25.h }[0], [x2], #0x2\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[2], [x2]\n" "b 59f\n" "58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 59f\n" - "ld1 { v25.b }[0], [x11]\n" + "tbz x0, #0, 59f\n" + "ld1 { v25.b }[0], [x2]\n" "59:" // Oddments: Load (3, 3): Bit 2: End + "ldr d3, [x23, #0x68]\n" "usubl v25.8h, v25.8b, v9.8b\n" - "ldr d3, [x3, #0x68]\n" - "smlal v8.4s, v25.4h, v2.4h\n" - "ldr x24, [x25, #0xb0]\n" - "add x24, x24, x10\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" "ssubl v3.8h, v3.8b, v14.8b\n" + "ldr x13, [x20, #0xb0]\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x13, x13, x24\n" "smlal v15.4s, v27.4h, v3.4h\n" - "smlal2 v18.4s, v27.8h, v3.8h\n" - "smlal v16.4s, v23.4h, v3.4h\n" - "smlal2 v21.4s, v23.8h, v3.8h\n" - "smlal v7.4s, v25.4h, v3.4h\n" - "smlal2 v17.4s, v25.8h, v3.8h\n" - "tbz x4, #2, 61f\n" - "ld1 { v24.s }[0], [x24], #0x4\n" - "tbz x4, #1, 60f\n" - "ld1 { v24.h }[2], [x24], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[6], [x24]\n" + "smlal2 v16.4s, v27.8h, v3.8h\n" + "smlal v17.4s, v23.4h, v3.4h\n" + "smlal2 v8.4s, v23.8h, v3.8h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "tbz x0, #2, 61f\n" + "ld1 { v24.s }[0], [x13], #0x4\n" + "tbz x0, #1, 60f\n" + "ld1 { v24.h }[2], [x13], #0x2\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[6], [x13]\n" "b 63f\n" "60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[4], [x24]\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[4], [x13]\n" "b 63f\n" "61:" // Oddments: Load (3, 4): Bit 2: Unset - "tbz x4, #1, 62f\n" - "ld1 { v24.h }[0], [x24], #0x2\n" - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[2], [x24]\n" + "tbz x0, #1, 62f\n" + "ld1 { v24.h }[0], [x13], #0x2\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[2], [x13]\n" "b 63f\n" "62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 63f\n" - "ld1 { v24.b }[0], [x24]\n" + "tbz x0, #0, 63f\n" + "ld1 { v24.b }[0], [x13]\n" "63:" // Oddments: Load (3, 4): Bit 2: End + "ldr d4, [x23, #0x70]\n" "usubl v24.8h, v24.8b, v9.8b\n" - "ldr d4, [x3, #0x70]\n" - "smlal v8.4s, v24.4h, v3.4h\n" - "ldr x0, [x25, #0xb8]\n" - "add x0, x0, x10\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" "ssubl v4.8h, v4.8b, v14.8b\n" + "ldr x9, [x20, #0xb8]\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "add x9, x9, x24\n" "smlal v15.4s, v23.4h, v4.4h\n" - "smlal2 v18.4s, v23.8h, v4.8h\n" - "smlal v16.4s, v28.4h, v4.4h\n" - "smlal2 v21.4s, v28.8h, v4.8h\n" - "smlal v7.4s, v24.4h, v4.4h\n" - "smlal2 v17.4s, v24.8h, v4.8h\n" - "tbz x4, #2, 65f\n" - "ld1 { v22.s }[0], [x0], #0x4\n" - "tbz x4, #1, 64f\n" - "ld1 { v22.h }[2], [x0], #0x2\n" - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[6], [x0]\n" + "smlal2 v16.4s, v23.8h, v4.8h\n" + "smlal v17.4s, v28.4h, v4.4h\n" + "smlal2 v8.4s, v28.8h, v4.8h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "tbz x0, #2, 65f\n" + "ld1 { v22.s }[0], [x9], #0x4\n" + "tbz x0, #1, 64f\n" + "ld1 { v22.h }[2], [x9], #0x2\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[6], [x9]\n" "b 67f\n" "64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[4], [x0]\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[4], [x9]\n" "b 67f\n" "65:" // Oddments: Load (3, 5): Bit 2: Unset - "tbz x4, #1, 66f\n" - "ld1 { v22.h }[0], [x0], #0x2\n" - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[2], [x0]\n" + "tbz x0, #1, 66f\n" + "ld1 { v22.h }[0], [x9], #0x2\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[2], [x9]\n" "b 67f\n" "66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 67f\n" - "ld1 { v22.b }[0], [x0]\n" + "tbz x0, #0, 67f\n" + "ld1 { v22.b }[0], [x9]\n" "67:" // Oddments: Load (3, 5): Bit 2: End + "ldr d0, [x23, #0x78]\n" "usubl v22.8h, v22.8b, v9.8b\n" - "ldr d0, [x3, #0x78]\n" - "smlal v8.4s, v22.4h, v4.4h\n" - "ldr x15, [x25, #0xc0]\n" - "add x15, x15, x10\n" - "smlal2 v5.4s, v22.8h, v4.8h\n" "ssubl v0.8h, v0.8b, v14.8b\n" + "ldr x19, [x20, #0xc0]\n" + "smlal v6.4s, v22.4h, v4.4h\n" + "smlal2 v5.4s, v22.8h, v4.8h\n" + "add x19, x19, x24\n" "smlal v15.4s, v31.4h, v0.4h\n" - "smlal2 v18.4s, v31.8h, v0.8h\n" - "smlal v16.4s, v30.4h, v0.4h\n" - "smlal2 v21.4s, v30.8h, v0.8h\n" - "tbz x4, #2, 69f\n" - "ld1 { v27.s }[0], [x15], #0x4\n" - "tbz x4, #1, 68f\n" - "ld1 { v27.h }[2], [x15], #0x2\n" - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[6], [x15]\n" + "smlal2 v16.4s, v31.8h, v0.8h\n" + "smlal v17.4s, v30.4h, v0.4h\n" + "smlal2 v8.4s, v30.8h, v0.8h\n" + "tbz x0, #2, 69f\n" + "ld1 { v27.s }[0], [x19], #0x4\n" + "tbz x0, #1, 68f\n" + "ld1 { v27.h }[2], [x19], #0x2\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[6], [x19]\n" "b 71f\n" "68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[4], [x15]\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[4], [x19]\n" "b 71f\n" "69:" // Oddments: Load (4, 0): Bit 2: Unset - "tbz x4, #1, 70f\n" - "ld1 { v27.h }[0], [x15], #0x2\n" - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[2], [x15]\n" + "tbz x0, #1, 70f\n" + "ld1 { v27.h }[0], [x19], #0x2\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[2], [x19]\n" "b 71f\n" "70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 71f\n" - "ld1 { v27.b }[0], [x15]\n" + "tbz x0, #0, 71f\n" + "ld1 { v27.b }[0], [x19]\n" "71:" // Oddments: Load (4, 0): Bit 2: End "usubl v27.8h, v27.8b, v9.8b\n" - "ldr x9, [x25, #0xc8]\n" - "smlal v7.4s, v27.4h, v0.4h\n" - "add x9, x9, x10\n" - "smlal2 v17.4s, v27.8h, v0.8h\n" - "tbz x4, #2, 73f\n" - "ld1 { v23.s }[0], [x9], #0x4\n" - "tbz x4, #1, 72f\n" - "ld1 { v23.h }[2], [x9], #0x2\n" - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[6], [x9]\n" + "ldr x28, [x20, #0xc8]\n" + "smlal v10.4s, v27.4h, v0.4h\n" + "smlal2 v7.4s, v27.8h, v0.8h\n" + "add x28, x28, x24\n" + "tbz x0, #2, 73f\n" + "ld1 { v23.s }[0], [x28], #0x4\n" + "tbz x0, #1, 72f\n" + "ld1 { v23.h }[2], [x28], #0x2\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[6], [x28]\n" "b 75f\n" "72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[4], [x9]\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[4], [x28]\n" "b 75f\n" "73:" // Oddments: Load (4, 1): Bit 2: Unset - "tbz x4, #1, 74f\n" - "ld1 { v23.h }[0], [x9], #0x2\n" - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[2], [x9]\n" + "tbz x0, #1, 74f\n" + "ld1 { v23.h }[0], [x28], #0x2\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[2], [x28]\n" "b 75f\n" "74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 75f\n" - "ld1 { v23.b }[0], [x9]\n" + "tbz x0, #0, 75f\n" + "ld1 { v23.b }[0], [x28]\n" "75:" // Oddments: Load (4, 1): Bit 2: End + "ldr d1, [x23, #0x80]\n" "usubl v23.8h, v23.8b, v9.8b\n" - "ldr d1, [x3, #0x80]\n" - "smlal v8.4s, v23.4h, v0.4h\n" - "ldr x27, [x25, #0xd0]\n" - "add x27, x27, x10\n" - "smlal2 v5.4s, v23.8h, v0.8h\n" "ssubl v1.8h, v1.8b, v14.8b\n" + "ldr x6, [x20, #0xd0]\n" + "smlal v6.4s, v23.4h, v0.4h\n" + "smlal2 v5.4s, v23.8h, v0.8h\n" + "add x6, x6, x24\n" "smlal v15.4s, v30.4h, v1.4h\n" - "smlal2 v18.4s, v30.8h, v1.8h\n" - "smlal v16.4s, v26.4h, v1.4h\n" - "smlal2 v21.4s, v26.8h, v1.8h\n" - "smlal v7.4s, v23.4h, v1.4h\n" - "smlal2 v17.4s, v23.8h, v1.8h\n" - "tbz x4, #2, 77f\n" - "ld1 { v31.s }[0], [x27], #0x4\n" - "tbz x4, #1, 76f\n" - "ld1 { v31.h }[2], [x27], #0x2\n" - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[6], [x27]\n" + "smlal2 v16.4s, v30.8h, v1.8h\n" + "smlal v17.4s, v26.4h, v1.4h\n" + "smlal2 v8.4s, v26.8h, v1.8h\n" + "smlal v10.4s, v23.4h, v1.4h\n" + "smlal2 v7.4s, v23.8h, v1.8h\n" + "tbz x0, #2, 77f\n" + "ld1 { v31.s }[0], [x6], #0x4\n" + "tbz x0, #1, 76f\n" + "ld1 { v31.h }[2], [x6], #0x2\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[6], [x6]\n" "b 79f\n" "76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[4], [x27]\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[4], [x6]\n" "b 79f\n" "77:" // Oddments: Load (4, 2): Bit 2: Unset - "tbz x4, #1, 78f\n" - "ld1 { v31.h }[0], [x27], #0x2\n" - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[2], [x27]\n" + "tbz x0, #1, 78f\n" + "ld1 { v31.h }[0], [x6], #0x2\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[2], [x6]\n" "b 79f\n" "78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 79f\n" - "ld1 { v31.b }[0], [x27]\n" + "tbz x0, #0, 79f\n" + "ld1 { v31.b }[0], [x6]\n" "79:" // Oddments: Load (4, 2): Bit 2: End + "ldr d2, [x23, #0x88]\n" "usubl v31.8h, v31.8b, v9.8b\n" - "ldr d2, [x3, #0x88]\n" - "smlal v8.4s, v31.4h, v1.4h\n" - "ldr x28, [x25, #0xd8]\n" - "add x28, x28, x10\n" - "smlal2 v5.4s, v31.8h, v1.8h\n" "ssubl v2.8h, v2.8b, v14.8b\n" + "ldr x27, [x20, #0xd8]\n" + "smlal v6.4s, v31.4h, v1.4h\n" + "smlal2 v5.4s, v31.8h, v1.8h\n" + "add x27, x27, x24\n" "smlal v15.4s, v26.4h, v2.4h\n" - "smlal2 v18.4s, v26.8h, v2.8h\n" - "smlal v16.4s, v25.4h, v2.4h\n" - "smlal2 v21.4s, v25.8h, v2.8h\n" - "smlal v7.4s, v31.4h, v2.4h\n" - "smlal2 v17.4s, v31.8h, v2.8h\n" - "tbz x4, #2, 81f\n" - "ld1 { v30.s }[0], [x28], #0x4\n" - "tbz x4, #1, 80f\n" - "ld1 { v30.h }[2], [x28], #0x2\n" - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[6], [x28]\n" + "smlal2 v16.4s, v26.8h, v2.8h\n" + "smlal v17.4s, v25.4h, v2.4h\n" + "smlal2 v8.4s, v25.8h, v2.8h\n" + "smlal v10.4s, v31.4h, v2.4h\n" + "smlal2 v7.4s, v31.8h, v2.8h\n" + "tbz x0, #2, 81f\n" + "ld1 { v30.s }[0], [x27], #0x4\n" + "tbz x0, #1, 80f\n" + "ld1 { v30.h }[2], [x27], #0x2\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[6], [x27]\n" "b 83f\n" "80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[4], [x28]\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[4], [x27]\n" "b 83f\n" "81:" // Oddments: Load (4, 3): Bit 2: Unset - "tbz x4, #1, 82f\n" - "ld1 { v30.h }[0], [x28], #0x2\n" - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[2], [x28]\n" + "tbz x0, #1, 82f\n" + "ld1 { v30.h }[0], [x27], #0x2\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[2], [x27]\n" "b 83f\n" "82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 83f\n" - "ld1 { v30.b }[0], [x28]\n" + "tbz x0, #0, 83f\n" + "ld1 { v30.b }[0], [x27]\n" "83:" // Oddments: Load (4, 3): Bit 2: End + "ldr d3, [x23, #0x90]\n" "usubl v30.8h, v30.8b, v9.8b\n" - "ldr d3, [x3, #0x90]\n" - "smlal v8.4s, v30.4h, v2.4h\n" - "ldr x12, [x25, #0xe0]\n" - "add x12, x12, x10\n" - "smlal2 v5.4s, v30.8h, v2.8h\n" "ssubl v3.8h, v3.8b, v14.8b\n" + "ldr x11, [x20, #0xe0]\n" + "smlal v6.4s, v30.4h, v2.4h\n" + "smlal2 v5.4s, v30.8h, v2.8h\n" + "add x11, x11, x24\n" "smlal v15.4s, v25.4h, v3.4h\n" - "smlal2 v18.4s, v25.8h, v3.8h\n" - "smlal v16.4s, v24.4h, v3.4h\n" - "smlal2 v21.4s, v24.8h, v3.8h\n" - "smlal v7.4s, v30.4h, v3.4h\n" - "smlal2 v17.4s, v30.8h, v3.8h\n" - "tbz x4, #2, 85f\n" - "ld1 { v28.s }[0], [x12], #0x4\n" - "tbz x4, #1, 84f\n" - "ld1 { v28.h }[2], [x12], #0x2\n" - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[6], [x12]\n" + "smlal2 v16.4s, v25.8h, v3.8h\n" + "smlal v17.4s, v24.4h, v3.4h\n" + "smlal2 v8.4s, v24.8h, v3.8h\n" + "smlal v10.4s, v30.4h, v3.4h\n" + "smlal2 v7.4s, v30.8h, v3.8h\n" + "tbz x0, #2, 85f\n" + "ld1 { v28.s }[0], [x11], #0x4\n" + "tbz x0, #1, 84f\n" + "ld1 { v28.h }[2], [x11], #0x2\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[6], [x11]\n" "b 87f\n" "84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[4], [x12]\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[4], [x11]\n" "b 87f\n" "85:" // Oddments: Load (4, 4): Bit 2: Unset - "tbz x4, #1, 86f\n" - "ld1 { v28.h }[0], [x12], #0x2\n" - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[2], [x12]\n" + "tbz x0, #1, 86f\n" + "ld1 { v28.h }[0], [x11], #0x2\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[2], [x11]\n" "b 87f\n" "86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 87f\n" - "ld1 { v28.b }[0], [x12]\n" + "tbz x0, #0, 87f\n" + "ld1 { v28.b }[0], [x11]\n" "87:" // Oddments: Load (4, 4): Bit 2: End + "ldr d4, [x23, #0x98]\n" "usubl v28.8h, v28.8b, v9.8b\n" - "ldr d4, [x3, #0x98]\n" - "smlal v8.4s, v28.4h, v3.4h\n" - "ldr x7, [x25, #0xe8]\n" - "add x7, x7, x10\n" - "smlal2 v5.4s, v28.8h, v3.8h\n" "ssubl v4.8h, v4.8b, v14.8b\n" + "ldr x17, [x20, #0xe8]\n" + "smlal v6.4s, v28.4h, v3.4h\n" + "smlal2 v5.4s, v28.8h, v3.8h\n" + "add x17, x17, x24\n" "smlal v15.4s, v24.4h, v4.4h\n" - "smlal2 v18.4s, v24.8h, v4.8h\n" - "smlal v16.4s, v22.4h, v4.4h\n" - "smlal2 v21.4s, v22.8h, v4.8h\n" - "smlal v7.4s, v28.4h, v4.4h\n" - "smlal2 v17.4s, v28.8h, v4.8h\n" - "tbz x4, #2, 89f\n" - "ld1 { v26.s }[0], [x7], #0x4\n" - "tbz x4, #1, 88f\n" - "ld1 { v26.h }[2], [x7], #0x2\n" - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[6], [x7]\n" + "smlal2 v16.4s, v24.8h, v4.8h\n" + "smlal v17.4s, v22.4h, v4.4h\n" + "smlal2 v8.4s, v22.8h, v4.8h\n" + "smlal v10.4s, v28.4h, v4.4h\n" + "smlal2 v7.4s, v28.8h, v4.8h\n" + "tbz x0, #2, 89f\n" + "ld1 { v26.s }[0], [x17], #0x4\n" + "tbz x0, #1, 88f\n" + "ld1 { v26.h }[2], [x17], #0x2\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[6], [x17]\n" "b 91f\n" "88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[4], [x7]\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[4], [x17]\n" "b 91f\n" "89:" // Oddments: Load (4, 5): Bit 2: Unset - "tbz x4, #1, 90f\n" - "ld1 { v26.h }[0], [x7], #0x2\n" - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[2], [x7]\n" + "tbz x0, #1, 90f\n" + "ld1 { v26.h }[0], [x17], #0x2\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[2], [x17]\n" "b 91f\n" "90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 91f\n" - "ld1 { v26.b }[0], [x7]\n" + "tbz x0, #0, 91f\n" + "ld1 { v26.b }[0], [x17]\n" "91:" // Oddments: Load (4, 5): Bit 2: End + "ldr d0, [x23, #0xa0]\n" "usubl v26.8h, v26.8b, v9.8b\n" - "ldr d0, [x3, #0xa0]\n" - "smlal v8.4s, v26.4h, v4.4h\n" - "ldr x26, [x25, #0xf0]\n" - "add x26, x26, x10\n" - "smlal2 v5.4s, v26.8h, v4.8h\n" "ssubl v0.8h, v0.8b, v14.8b\n" + "ldr x5, [x20, #0xf0]\n" + "smlal v6.4s, v26.4h, v4.4h\n" + "smlal2 v5.4s, v26.8h, v4.8h\n" + "add x5, x5, x24\n" "smlal v15.4s, v27.4h, v0.4h\n" - "smlal2 v18.4s, v27.8h, v0.8h\n" - "smlal v16.4s, v23.4h, v0.4h\n" - "smlal2 v21.4s, v23.8h, v0.8h\n" - "tbz x4, #2, 93f\n" - "ld1 { v25.s }[0], [x26], #0x4\n" - "tbz x4, #1, 92f\n" - "ld1 { v25.h }[2], [x26], #0x2\n" - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[6], [x26]\n" + "smlal2 v16.4s, v27.8h, v0.8h\n" + "smlal v17.4s, v23.4h, v0.4h\n" + "smlal2 v8.4s, v23.8h, v0.8h\n" + "tbz x0, #2, 93f\n" + "ld1 { v25.s }[0], [x5], #0x4\n" + "tbz x0, #1, 92f\n" + "ld1 { v25.h }[2], [x5], #0x2\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[6], [x5]\n" "b 95f\n" "92:" // Oddments: Load (5, 0): Bit 2: Bit 1: Unset - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[4], [x26]\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[4], [x5]\n" "b 95f\n" "93:" // Oddments: Load (5, 0): Bit 2: Unset - "tbz x4, #1, 94f\n" - "ld1 { v25.h }[0], [x26], #0x2\n" - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[2], [x26]\n" + "tbz x0, #1, 94f\n" + "ld1 { v25.h }[0], [x5], #0x2\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[2], [x5]\n" "b 95f\n" "94:" // Oddments: Load (5, 0): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 95f\n" - "ld1 { v25.b }[0], [x26]\n" + "tbz x0, #0, 95f\n" + "ld1 { v25.b }[0], [x5]\n" "95:" // Oddments: Load (5, 0): Bit 2: End "usubl v25.8h, v25.8b, v9.8b\n" - "ldr x23, [x25, #0xf8]\n" - "smlal v7.4s, v25.4h, v0.4h\n" - "add x23, x23, x10\n" - "smlal2 v17.4s, v25.8h, v0.8h\n" - "tbz x4, #2, 97f\n" - "ld1 { v24.s }[0], [x23], #0x4\n" - "tbz x4, #1, 96f\n" - "ld1 { v24.h }[2], [x23], #0x2\n" - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[6], [x23]\n" + "ldr x25, [x20, #0xf8]\n" + "smlal v10.4s, v25.4h, v0.4h\n" + "smlal2 v7.4s, v25.8h, v0.8h\n" + "add x25, x25, x24\n" + "tbz x0, #2, 97f\n" + "ld1 { v24.s }[0], [x25], #0x4\n" + "tbz x0, #1, 96f\n" + "ld1 { v24.h }[2], [x25], #0x2\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[6], [x25]\n" "b 99f\n" "96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[4], [x23]\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[4], [x25]\n" "b 99f\n" "97:" // Oddments: Load (5, 1): Bit 2: Unset - "tbz x4, #1, 98f\n" - "ld1 { v24.h }[0], [x23], #0x2\n" - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[2], [x23]\n" + "tbz x0, #1, 98f\n" + "ld1 { v24.h }[0], [x25], #0x2\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[2], [x25]\n" "b 99f\n" "98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 99f\n" - "ld1 { v24.b }[0], [x23]\n" + "tbz x0, #0, 99f\n" + "ld1 { v24.b }[0], [x25]\n" "99:" // Oddments: Load (5, 1): Bit 2: End + "ldr d1, [x23, #0xa8]\n" "usubl v24.8h, v24.8b, v9.8b\n" - "ldr d1, [x3, #0xa8]\n" - "smlal v8.4s, v24.4h, v0.4h\n" - "ldr x22, [x25, #0x100]\n" - "add x22, x22, x10\n" - "smlal2 v5.4s, v24.8h, v0.8h\n" "ssubl v1.8h, v1.8b, v14.8b\n" + "ldr x26, [x20, #0x100]\n" + "smlal v6.4s, v24.4h, v0.4h\n" + "smlal2 v5.4s, v24.8h, v0.8h\n" + "add x26, x26, x24\n" "smlal v15.4s, v23.4h, v1.4h\n" - "smlal2 v18.4s, v23.8h, v1.8h\n" - "smlal v16.4s, v31.4h, v1.4h\n" - "smlal2 v21.4s, v31.8h, v1.8h\n" - "smlal v7.4s, v24.4h, v1.4h\n" - "smlal2 v17.4s, v24.8h, v1.8h\n" - "tbz x4, #2, 101f\n" - "ld1 { v27.s }[0], [x22], #0x4\n" - "tbz x4, #1, 100f\n" - "ld1 { v27.h }[2], [x22], #0x2\n" - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[6], [x22]\n" + "smlal2 v16.4s, v23.8h, v1.8h\n" + "smlal v17.4s, v31.4h, v1.4h\n" + "smlal2 v8.4s, v31.8h, v1.8h\n" + "smlal v10.4s, v24.4h, v1.4h\n" + "smlal2 v7.4s, v24.8h, v1.8h\n" + "tbz x0, #2, 101f\n" + "ld1 { v27.s }[0], [x26], #0x4\n" + "tbz x0, #1, 100f\n" + "ld1 { v27.h }[2], [x26], #0x2\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[6], [x26]\n" "b 103f\n" "100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[4], [x22]\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[4], [x26]\n" "b 103f\n" "101:" // Oddments: Load (5, 2): Bit 2: Unset - "tbz x4, #1, 102f\n" - "ld1 { v27.h }[0], [x22], #0x2\n" - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[2], [x22]\n" + "tbz x0, #1, 102f\n" + "ld1 { v27.h }[0], [x26], #0x2\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[2], [x26]\n" "b 103f\n" "102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 103f\n" - "ld1 { v27.b }[0], [x22]\n" + "tbz x0, #0, 103f\n" + "ld1 { v27.b }[0], [x26]\n" "103:" // Oddments: Load (5, 2): Bit 2: End + "ldr d2, [x23, #0xb0]\n" "usubl v27.8h, v27.8b, v9.8b\n" - "ldr d2, [x3, #0xb0]\n" - "smlal v8.4s, v27.4h, v1.4h\n" - "ldr x20, [x25, #0x108]\n" - "add x20, x20, x10\n" - "smlal2 v5.4s, v27.8h, v1.8h\n" "ssubl v2.8h, v2.8b, v14.8b\n" + "ldr x12, [x20, #0x108]\n" + "smlal v6.4s, v27.4h, v1.4h\n" + "smlal2 v5.4s, v27.8h, v1.8h\n" + "add x12, x12, x24\n" "smlal v15.4s, v31.4h, v2.4h\n" - "smlal2 v18.4s, v31.8h, v2.8h\n" - "smlal v16.4s, v30.4h, v2.4h\n" - "smlal2 v21.4s, v30.8h, v2.8h\n" - "smlal v7.4s, v27.4h, v2.4h\n" - "smlal2 v17.4s, v27.8h, v2.8h\n" - "tbz x4, #2, 105f\n" - "ld1 { v25.s }[0], [x20], #0x4\n" - "tbz x4, #1, 104f\n" - "ld1 { v25.h }[2], [x20], #0x2\n" - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[6], [x20]\n" + "smlal2 v16.4s, v31.8h, v2.8h\n" + "smlal v17.4s, v30.4h, v2.4h\n" + "smlal2 v8.4s, v30.8h, v2.8h\n" + "smlal v10.4s, v27.4h, v2.4h\n" + "smlal2 v7.4s, v27.8h, v2.8h\n" + "tbz x0, #2, 105f\n" + "ld1 { v25.s }[0], [x12], #0x4\n" + "tbz x0, #1, 104f\n" + "ld1 { v25.h }[2], [x12], #0x2\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[6], [x12]\n" "b 107f\n" "104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[4], [x20]\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[4], [x12]\n" "b 107f\n" "105:" // Oddments: Load (5, 3): Bit 2: Unset - "tbz x4, #1, 106f\n" - "ld1 { v25.h }[0], [x20], #0x2\n" - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[2], [x20]\n" + "tbz x0, #1, 106f\n" + "ld1 { v25.h }[0], [x12], #0x2\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[2], [x12]\n" "b 107f\n" "106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 107f\n" - "ld1 { v25.b }[0], [x20]\n" + "tbz x0, #0, 107f\n" + "ld1 { v25.b }[0], [x12]\n" "107:" // Oddments: Load (5, 3): Bit 2: End + "ldr d3, [x23, #0xb8]\n" "usubl v25.8h, v25.8b, v9.8b\n" - "ldr d3, [x3, #0xb8]\n" - "smlal v8.4s, v25.4h, v2.4h\n" - "ldr x13, [x25, #0x110]\n" - "add x13, x13, x10\n" - "smlal2 v5.4s, v25.8h, v2.8h\n" "ssubl v3.8h, v3.8b, v14.8b\n" + "ldr x14, [x20, #0x110]\n" + "smlal v6.4s, v25.4h, v2.4h\n" + "smlal2 v5.4s, v25.8h, v2.8h\n" + "add x14, x14, x24\n" "smlal v15.4s, v30.4h, v3.4h\n" - "smlal2 v18.4s, v30.8h, v3.8h\n" - "smlal v16.4s, v28.4h, v3.4h\n" - "smlal2 v21.4s, v28.8h, v3.8h\n" - "smlal v7.4s, v25.4h, v3.4h\n" - "smlal2 v17.4s, v25.8h, v3.8h\n" - "tbz x4, #2, 109f\n" - "ld1 { v24.s }[0], [x13], #0x4\n" - "tbz x4, #1, 108f\n" - "ld1 { v24.h }[2], [x13], #0x2\n" - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[6], [x13]\n" + "smlal2 v16.4s, v30.8h, v3.8h\n" + "smlal v17.4s, v28.4h, v3.4h\n" + "smlal2 v8.4s, v28.8h, v3.8h\n" + "smlal v10.4s, v25.4h, v3.4h\n" + "smlal2 v7.4s, v25.8h, v3.8h\n" + "tbz x0, #2, 109f\n" + "ld1 { v24.s }[0], [x14], #0x4\n" + "tbz x0, #1, 108f\n" + "ld1 { v24.h }[2], [x14], #0x2\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[6], [x14]\n" "b 111f\n" "108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[4], [x13]\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[4], [x14]\n" "b 111f\n" "109:" // Oddments: Load (5, 4): Bit 2: Unset - "tbz x4, #1, 110f\n" - "ld1 { v24.h }[0], [x13], #0x2\n" - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[2], [x13]\n" + "tbz x0, #1, 110f\n" + "ld1 { v24.h }[0], [x14], #0x2\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[2], [x14]\n" "b 111f\n" "110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 111f\n" - "ld1 { v24.b }[0], [x13]\n" + "tbz x0, #0, 111f\n" + "ld1 { v24.b }[0], [x14]\n" "111:" // Oddments: Load (5, 4): Bit 2: End + "ldr d4, [x23, #0xc0]\n" "usubl v24.8h, v24.8b, v9.8b\n" - "ldr d4, [x3, #0xc0]\n" - "smlal v8.4s, v24.4h, v3.4h\n" - "ldr x21, [x25, #0x118]\n" - "add x21, x21, x10\n" - "smlal2 v5.4s, v24.8h, v3.8h\n" "ssubl v4.8h, v4.8b, v14.8b\n" + "ldr x21, [x20, #0x118]\n" + "smlal v6.4s, v24.4h, v3.4h\n" + "smlal2 v5.4s, v24.8h, v3.8h\n" + "add x21, x21, x24\n" "smlal v15.4s, v28.4h, v4.4h\n" - "smlal2 v18.4s, v28.8h, v4.8h\n" - "smlal v16.4s, v26.4h, v4.4h\n" - "smlal2 v21.4s, v26.8h, v4.8h\n" - "smlal v7.4s, v24.4h, v4.4h\n" - "smlal2 v17.4s, v24.8h, v4.8h\n" - "tbz x4, #2, 113f\n" + "smlal2 v16.4s, v28.8h, v4.8h\n" + "smlal v17.4s, v26.4h, v4.4h\n" + "smlal2 v8.4s, v26.8h, v4.8h\n" + "smlal v10.4s, v24.4h, v4.4h\n" + "smlal2 v7.4s, v24.8h, v4.8h\n" + "tbz x0, #2, 113f\n" "ld1 { v27.s }[0], [x21], #0x4\n" - "tbz x4, #1, 112f\n" + "tbz x0, #1, 112f\n" "ld1 { v27.h }[2], [x21], #0x2\n" - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[6], [x21]\n" "b 115f\n" "112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[4], [x21]\n" "b 115f\n" "113:" // Oddments: Load (5, 5): Bit 2: Unset - "tbz x4, #1, 114f\n" + "tbz x0, #1, 114f\n" "ld1 { v27.h }[0], [x21], #0x2\n" - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[2], [x21]\n" "b 115f\n" "114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 115f\n" + "tbz x0, #0, 115f\n" "ld1 { v27.b }[0], [x21]\n" "115:" // Oddments: Load (5, 5): Bit 2: End "usubl v27.8h, v27.8b, v9.8b\n" - "smlal v8.4s, v27.4h, v4.4h\n" + "smlal v6.4s, v27.4h, v4.4h\n" "smlal2 v5.4s, v27.8h, v4.8h\n" - "tbz x4, #2, 117f\n" - "ld1 { v6.4s }, [x2], #0x10\n" - "ld1 { v19.4s }, [x5], #0x10\n" - "tbz x4, #1, 116f\n" - "ld1 { v20.d }[0], [x2], #0x8\n" - "ld1 { v12.d }[0], [x5], #0x8\n" - "tbz x4, #0, 119f\n" - "ld1 { v20.s }[2], [x2]\n" - "ld1 { v12.s }[2], [x5]\n" + "tbz x0, #2, 117f\n" + "ld1 { v12.4s }, [x10], #0x10\n" + "ld1 { v19.4s }, [x1], #0x10\n" + "tbz x0, #1, 116f\n" + "ld1 { v20.d }[0], [x10], #0x8\n" + "ld1 { v29.d }[0], [x1], #0x8\n" + "tbz x0, #0, 119f\n" + "ld1 { v20.s }[2], [x10]\n" + "ld1 { v29.s }[2], [x1]\n" "b 119f\n" "116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset - "tbz x4, #0, 119f\n" - "ld1 { v20.s }[0], [x2]\n" - "ld1 { v12.s }[0], [x5]\n" + "tbz x0, #0, 119f\n" + "ld1 { v20.s }[0], [x10]\n" + "ld1 { v29.s }[0], [x1]\n" "b 119f\n" "117:" // Oddments: Load requant params: Bit 2: Unset - "tbz x4, #1, 118f\n" - "ld1 { v6.d }[0], [x2], #0x8\n" - "ld1 { v19.d }[0], [x5], #0x8\n" - "tbz x4, #0, 119f\n" - "ld1 { v6.s }[2], [x2]\n" - "ld1 { v19.s }[2], [x5]\n" + "tbz x0, #1, 118f\n" + "ld1 { v12.d }[0], [x10], #0x8\n" + "ld1 { v19.d }[0], [x1], #0x8\n" + "tbz x0, #0, 119f\n" + "ld1 { v12.s }[2], [x10]\n" + "ld1 { v19.s }[2], [x1]\n" "b 119f\n" "118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 119f\n" - "ld1 { v6.s }[0], [x2]\n" - "ld1 { v19.s }[0], [x5]\n" + "tbz x0, #0, 119f\n" + "ld1 { v12.s }[0], [x10]\n" + "ld1 { v19.s }[0], [x1]\n" "119:" // Oddments: Load requant params: Bit 2: End - "sqrdmulh v15.4s, v15.4s, v6.4s\n" - "add x17, x17, x1\n" - "sqrdmulh v18.4s, v18.4s, v20.4s\n" - "add x16, x16, x1\n" - "sqrdmulh v16.4s, v16.4s, v6.4s\n" - "add x6, x6, x1\n" - "sqrdmulh v21.4s, v21.4s, v20.4s\n" - "add x8, x8, x1\n" - "sqrdmulh v7.4s, v7.4s, v6.4s\n" - "and v28.16b, v15.16b, v19.16b\n" - "and v26.16b, v18.16b, v12.16b\n" - "and v29.16b, v16.16b, v19.16b\n" - "sshr v28.4s, v28.4s, #0x1f\n" + "sqdmulh v15.4s, v15.4s, v12.4s\n" + "sqdmulh v17.4s, v17.4s, v12.4s\n" + "add x16, x16, x22\n" + "add x8, x8, x22\n" + "sqdmulh v10.4s, v10.4s, v12.4s\n" + "sqdmulh v6.4s, v6.4s, v12.4s\n" + "add x4, x4, x22\n" + "add x7, x7, x22\n" + "and v23.16b, v15.16b, v19.16b\n" + "sqdmulh v16.4s, v16.4s, v20.4s\n" + "and v22.16b, v17.16b, v19.16b\n" + "sqdmulh v8.4s, v8.4s, v20.4s\n" + "and v21.16b, v10.16b, v19.16b\n" + "sqdmulh v7.4s, v7.4s, v20.4s\n" + "and v26.16b, v6.16b, v19.16b\n" + "sqdmulh v5.4s, v5.4s, v20.4s\n" + "sshr v23.4s, v23.4s, #0x1f\n" + "and v4.16b, v16.16b, v29.16b\n" + "sshr v22.4s, v22.4s, #0x1f\n" + "and v2.16b, v8.16b, v29.16b\n" + "sshr v21.4s, v21.4s, #0x1f\n" + "and v3.16b, v7.16b, v29.16b\n" "sshr v26.4s, v26.4s, #0x1f\n" - "sshr v29.4s, v29.4s, #0x1f\n" - "sqadd v15.4s, v15.4s, v28.4s\n" - "sqadd v18.4s, v18.4s, v26.4s\n" - "sqadd v16.4s, v16.4s, v29.4s\n" - "and v4.16b, v21.16b, v12.16b\n" - "srshl v15.4s, v15.4s, v19.4s\n" - "srshl v18.4s, v18.4s, v12.4s\n" - "srshl v16.4s, v16.4s, v19.4s\n" + "and v25.16b, v5.16b, v29.16b\n" + "sqadd v15.4s, v15.4s, v23.4s\n" "sshr v4.4s, v4.4s, #0x1f\n" - "add v15.4s, v15.4s, v10.4s\n" - "add v18.4s, v18.4s, v10.4s\n" - "add v16.4s, v16.4s, v10.4s\n" - "smin v15.4s, v15.4s, v13.4s\n" - "smin v18.4s, v18.4s, v13.4s\n" - "smin v16.4s, v16.4s, v13.4s\n" - "smax v15.4s, v15.4s, v11.4s\n" - "smax v18.4s, v18.4s, v11.4s\n" - "smax v16.4s, v16.4s, v11.4s\n" - "sqadd v21.4s, v21.4s, v4.4s\n" - "uzp1 v15.16b, v15.16b, v18.16b\n" - "and v25.16b, v7.16b, v19.16b\n" - "uzp1 v15.16b, v15.16b, v15.16b\n" - "srshl v21.4s, v21.4s, v12.4s\n" + "sqadd v17.4s, v17.4s, v22.4s\n" + "sshr v2.4s, v2.4s, #0x1f\n" + "sqadd v10.4s, v10.4s, v21.4s\n" + "sshr v3.4s, v3.4s, #0x1f\n" + "sqadd v6.4s, v6.4s, v26.4s\n" "sshr v25.4s, v25.4s, #0x1f\n" - "sqrdmulh v17.4s, v17.4s, v20.4s\n" - "sqrdmulh v8.4s, v8.4s, v6.4s\n" - "add v21.4s, v21.4s, v10.4s\n" - "sqadd v7.4s, v7.4s, v25.4s\n" - "and v31.16b, v17.16b, v12.16b\n" - "smin v21.4s, v21.4s, v13.4s\n" - "and v24.16b, v8.16b, v19.16b\n" - "srshl v7.4s, v7.4s, v19.4s\n" - "smax v21.4s, v21.4s, v11.4s\n" - "sshr v31.4s, v31.4s, #0x1f\n" - "sshr v24.4s, v24.4s, #0x1f\n" - "uzp1 v16.16b, v16.16b, v21.16b\n" - "add v7.4s, v7.4s, v10.4s\n" - "uzp1 v16.16b, v16.16b, v16.16b\n" - "sqadd v17.4s, v17.4s, v31.4s\n" - "smin v7.4s, v7.4s, v13.4s\n" - "sqadd v8.4s, v8.4s, v24.4s\n" - "sqrdmulh v5.4s, v5.4s, v20.4s\n" - "smax v7.4s, v7.4s, v11.4s\n" - "srshl v17.4s, v17.4s, v12.4s\n" - "srshl v8.4s, v8.4s, v19.4s\n" - "and v1.16b, v5.16b, v12.16b\n" - "add v17.4s, v17.4s, v10.4s\n" - "add v8.4s, v8.4s, v10.4s\n" - "sshr v1.4s, v1.4s, #0x1f\n" - "smin v17.4s, v17.4s, v13.4s\n" - "smin v8.4s, v8.4s, v13.4s\n" - "sqadd v5.4s, v5.4s, v1.4s\n" - "smax v17.4s, v17.4s, v11.4s\n" - "smax v8.4s, v8.4s, v11.4s\n" - "srshl v5.4s, v5.4s, v12.4s\n" - "uzp1 v7.16b, v7.16b, v17.16b\n" - "uzp1 v7.16b, v7.16b, v7.16b\n" - "add v5.4s, v5.4s, v10.4s\n" - "smin v5.4s, v5.4s, v13.4s\n" - "smax v5.4s, v5.4s, v11.4s\n" - "uzp1 v8.16b, v8.16b, v5.16b\n" - "uzp1 v8.16b, v8.16b, v8.16b\n" - "tbz x4, #2, 121f\n" - "st1 { v15.s }[0], [x17], #0x4\n" - "st1 { v16.s }[0], [x16], #0x4\n" - "st1 { v7.s }[0], [x6], #0x4\n" - "st1 { v8.s }[0], [x8], #0x4\n" - "tbz x4, #1, 120f\n" - "st1 { v15.h }[2], [x17], #0x2\n" - "st1 { v16.h }[2], [x16], #0x2\n" - "st1 { v7.h }[2], [x6], #0x2\n" - "st1 { v8.h }[2], [x8], #0x2\n" - "tbz x4, #0, 123f\n" - "st1 { v15.b }[6], [x17], #0x1\n" - "st1 { v16.b }[6], [x16], #0x1\n" - "st1 { v7.b }[6], [x6], #0x1\n" - "st1 { v8.b }[6], [x8], #0x1\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "sqadd v16.4s, v16.4s, v4.4s\n" + "srshl v17.4s, v17.4s, v19.4s\n" + "sqadd v8.4s, v8.4s, v2.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "sqadd v7.4s, v7.4s, v3.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "sqadd v5.4s, v5.4s, v25.4s\n" + "srshl v16.4s, v16.4s, v29.4s\n" + "sqxtn v15.4h, v15.4s\n" + "srshl v8.4s, v8.4s, v29.4s\n" + "sqxtn v17.4h, v17.4s\n" + "srshl v7.4s, v7.4s, v29.4s\n" + "sqxtn v10.4h, v10.4s\n" + "srshl v5.4s, v5.4s, v29.4s\n" + "sqxtn v6.4h, v6.4s\n" + "sqxtn2 v15.8h, v16.4s\n" + "sqxtn2 v17.8h, v8.4s\n" + "sqxtn2 v10.8h, v7.4s\n" + "sqxtn2 v6.8h, v5.4s\n" + "sqadd v15.8h, v15.8h, v18.8h\n" + "sqadd v17.8h, v17.8h, v18.8h\n" + "sqadd v10.8h, v10.8h, v18.8h\n" + "sqadd v6.8h, v6.8h, v18.8h\n" + "smax v15.8h, v15.8h, v11.8h\n" + "smax v17.8h, v17.8h, v11.8h\n" + "smax v10.8h, v10.8h, v11.8h\n" + "smax v6.8h, v6.8h, v11.8h\n" + "smin v15.8h, v15.8h, v13.8h\n" + "smin v17.8h, v17.8h, v13.8h\n" + "smin v10.8h, v10.8h, v13.8h\n" + "smin v6.8h, v6.8h, v13.8h\n" + "uzp1 v15.16b, v15.16b, v15.16b\n" + "uzp1 v17.16b, v17.16b, v17.16b\n" + "uzp1 v10.16b, v10.16b, v10.16b\n" + "uzp1 v6.16b, v6.16b, v6.16b\n" + "tbz x0, #2, 121f\n" + "st1 { v15.s }[0], [x16], #0x4\n" + "st1 { v17.s }[0], [x8], #0x4\n" + "st1 { v10.s }[0], [x4], #0x4\n" + "st1 { v6.s }[0], [x7], #0x4\n" + "tbz x0, #1, 120f\n" + "st1 { v15.h }[2], [x16], #0x2\n" + "st1 { v17.h }[2], [x8], #0x2\n" + "st1 { v10.h }[2], [x4], #0x2\n" + "st1 { v6.h }[2], [x7], #0x2\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[6], [x16], #0x1\n" + "st1 { v17.b }[6], [x8], #0x1\n" + "st1 { v10.b }[6], [x4], #0x1\n" + "st1 { v6.b }[6], [x7], #0x1\n" "b 123f\n" "120:" // Oddments: Bit 2: Bit 1: Unset - "tbz x4, #0, 123f\n" - "st1 { v15.b }[4], [x17], #0x1\n" - "st1 { v16.b }[4], [x16], #0x1\n" - "st1 { v7.b }[4], [x6], #0x1\n" - "st1 { v8.b }[4], [x8], #0x1\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[4], [x16], #0x1\n" + "st1 { v17.b }[4], [x8], #0x1\n" + "st1 { v10.b }[4], [x4], #0x1\n" + "st1 { v6.b }[4], [x7], #0x1\n" "b 123f\n" "121:" // Oddments: Bit 2: Unset - "tbz x4, #1, 122f\n" - "st1 { v15.h }[0], [x17], #0x2\n" - "st1 { v16.h }[0], [x16], #0x2\n" - "st1 { v7.h }[0], [x6], #0x2\n" - "st1 { v8.h }[0], [x8], #0x2\n" - "tbz x4, #0, 123f\n" - "st1 { v15.b }[2], [x17], #0x1\n" - "st1 { v16.b }[2], [x16], #0x1\n" - "st1 { v7.b }[2], [x6], #0x1\n" - "st1 { v8.b }[2], [x8], #0x1\n" + "tbz x0, #1, 122f\n" + "st1 { v15.h }[0], [x16], #0x2\n" + "st1 { v17.h }[0], [x8], #0x2\n" + "st1 { v10.h }[0], [x4], #0x2\n" + "st1 { v6.h }[0], [x7], #0x2\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[2], [x16], #0x1\n" + "st1 { v17.b }[2], [x8], #0x1\n" + "st1 { v10.b }[2], [x4], #0x1\n" + "st1 { v6.b }[2], [x7], #0x1\n" "b 123f\n" "122:" // Oddments: Bit 2: Unset: Bit 1: Unset - "tbz x4, #0, 123f\n" - "st1 { v15.b }[0], [x17], #0x1\n" - "st1 { v16.b }[0], [x16], #0x1\n" - "st1 { v7.b }[0], [x6], #0x1\n" - "st1 { v8.b }[0], [x8], #0x1\n" + "tbz x0, #0, 123f\n" + "st1 { v15.b }[0], [x16], #0x1\n" + "st1 { v17.b }[0], [x8], #0x1\n" + "st1 { v10.b }[0], [x4], #0x1\n" + "st1 { v6.b }[0], [x7], #0x1\n" "123:" // Oddments: Bit 2: End - "124:" // End - : : [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (¶ms) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst.hpp index 2bfeac0556..6bdcca115c 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,28 +28,23 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl(const uint8_t *const *const, uint8_t *const *const, const void *, const arm_gemm::Requantize32&, const unsigned int, const unsigned int); -struct a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst +class a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst : public GenericDepthfirstKernelStrategy<uint8_t, int8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef int8_t weight_type; - typedef uint8_t return_type; - - typedef void (*kern_type)(const uint8_t *const *const, uint8_t *const *const, const void *, const arm_gemm::Requantize32&, const unsigned int, const unsigned int); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - constexpr static unsigned int n_output_points = 9; + KernelType kernel = a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl; - kern_type kernel = a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl; + public: + a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) : GenericDepthfirstKernelStrategy<uint8_t, int8_t, uint8_t, int32_t>(9, arm_gemm::VLType::None) {} - a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) {} + KernelType get_kernel() const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp index 802030573e..394df363da 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,31 +28,25 @@ #pragma once +#if defined(__aarch64__) + namespace arm_conv { namespace depthwise { void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl(const uint8_t *const *const, uint8_t *const *const, const int8_t *, const int32_t *, const unsigned int, const unsigned int, const int32_t *, const int32_t *, const int32_t *, const arm_gemm::Requantize32&); -struct a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst +struct a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst : GenericDepthfirstMultiplierKernelStrategy<uint8_t, int8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef int8_t weight_type; - typedef uint8_t return_type; - - typedef void (*kern_type)(const uint8_t *const *const, uint8_t *const *const, const int8_t *, const int32_t *, const unsigned int, const unsigned int, const int32_t *, const int32_t *, const int32_t *, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::None; - - constexpr static unsigned int output_rows(void) { return 2; }; - constexpr static unsigned int output_cols(void) { return 8; }; - - constexpr static unsigned int output_col_regs(void) { return 2; }; - - kern_type kernel = a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; - - a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) {} + using Parent = GenericDepthfirstMultiplierKernelStrategy<uint8_t, int8_t, uint8_t, int32_t>; + a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) + : Parent(2, 8, arm_gemm::VLType::None) + { + } + Parent::KernelType kernel = a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv + +#endif // defined(__aarch64__) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index 1cfea9dcbd..1c1fb25e1f 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(2, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp index af8af1840a..d49b14eeaf 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 3; constexpr static unsigned int output_cols = 3; - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst(const CPUInfo *) {} + sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(3, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp index 60234c8a86..ac6ae284fd 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 4; constexpr static unsigned int output_cols = 4; - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst(const CPUInfo *) {} + sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(4, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index 5968309927..82173ee71f 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(2, 3, 2) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index 4a9bd33a1e..f5d4189a47 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); void sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); -class sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16> { private: - typedef void (*indirect_kern_type)(const __fp16 *const *const, __fp16 *const *const, const void *, unsigned int, const __fp16, const __fp16); - indirect_kern_type m_indirect_kernel = sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const __fp16 *, int64_t, int64_t, __fp16 *, int64_t, int64_t, const void *, unsigned int, const __fp16, const __fp16); - direct_kern_type m_direct_kernel = sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl; public: - typedef __fp16 return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = __fp16; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; @@ -59,63 +56,16 @@ class sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<__fp16, __fp16, __fp16, __fp16>(2, 5, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const __fp16 *const *>(input_ptrs), - reinterpret_cast<__fp16 *const *>(outptrs), - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const __fp16 *>(inptr), ld_input_row, ld_input_col, - static_cast<__fp16 *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const __fp16 *>(activation_min), - *static_cast<const __fp16 *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(__ARM_FP16_ARGS) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index e07e6314f8..d7b1de2062 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(2, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp index eb9de9fa01..41ad193364 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 3; constexpr static unsigned int output_cols = 3; - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst(const CPUInfo *) {} + sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(3, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp index d7be9b122e..6073b2ba7d 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 4; constexpr static unsigned int output_cols = 4; - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst(const CPUInfo *) {} + sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(4, 3, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index 28d44d0b5f..17ac74e223 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; @@ -59,63 +56,16 @@ class sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; - - sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(2, 3, 2) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index 751874ffbf..2449c96637 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,7 +28,7 @@ #pragma once -#if __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -36,19 +36,16 @@ namespace depthwise { void sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); void sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); -class sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirstStrategy +class sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<float, float, float, float> { private: - typedef void (*indirect_kern_type)(const float *const *const, float *const *const, const void *, unsigned int, const float, const float); - indirect_kern_type m_indirect_kernel = sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl; - - typedef void (*direct_kern_type)(const unsigned int, const unsigned int, const float *, int64_t, int64_t, float *, int64_t, int64_t, const void *, unsigned int, const float, const float); - direct_kern_type m_direct_kernel = sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl; + using Parent = DepthwiseDepthfirstStrategy<float, float, float, float>; + Parent::IndirectKernelType m_indirect_kernel = sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl; + Parent::DirectKernelType m_direct_kernel = sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl; public: - typedef float return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; + using return_type = float; + constexpr static auto vl_type = arm_gemm::VLType::SVE; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; @@ -59,63 +56,16 @@ class sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst : public IDepthwiseDepthfirs constexpr static unsigned int output_rows = 2; constexpr static unsigned int output_cols = 2; - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; - - sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) + : DepthwiseDepthfirstStrategy<float, float, float, float>(2, 5, 1) {} arm_gemm::VLType get_vl_type(void) const override { return vl_type; } - unsigned int get_kernel_rows(void) const override { return kernel_rows; } - unsigned int get_kernel_cols(void) const override { return kernel_cols; } - - unsigned int get_stride_rows(void) const override { return stride_rows; } - unsigned int get_stride_cols(void) const override { return stride_cols; } - - unsigned int get_output_rows(void) const override { return output_rows; } - unsigned int get_output_cols(void) const override { return output_cols; } - - unsigned int get_input_rows(void) const override { return input_rows; } - unsigned int get_input_cols(void) const override { return input_cols; } - - void indirect_kernel( - const void *const *const input_ptrs, - void *const *const outptrs, - const void *params, - unsigned int n_channels, - const void *activation_min, - const void *activation_max - ) const override - { - m_indirect_kernel( - reinterpret_cast<const float *const *>(input_ptrs), - reinterpret_cast<float *const *>(outptrs), - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } - - void direct_kernel( - const unsigned int n_tile_rows, const unsigned int n_tile_cols, - const void *inptr, int64_t ld_input_row, int64_t ld_input_col, - void *outptr, int64_t ld_output_row, int64_t ld_output_col, - const void *params, unsigned int n_channels, - const void *activation_min, const void *activation_max - ) const override - { - m_direct_kernel( - n_tile_rows, n_tile_cols, - static_cast<const float *>(inptr), ld_input_row, ld_input_col, - static_cast<float *>(outptr), ld_output_row, ld_output_col, - params, n_channels, - *static_cast<const float *>(activation_min), - *static_cast<const float *>(activation_max) - ); - } + Parent::IndirectKernelType get_indirect_kernel() const override { return m_indirect_kernel; } + Parent::DirectKernelType get_direct_kernel() const override { return m_direct_kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // __aarch64__ && defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst.hpp index bd071d370c..62faca97a9 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,22 +35,14 @@ namespace depthwise { void sve_fp32_nhwc_generic_output9_mla_depthfirst_impl(const float *const *const, float *const *const, const void *, const void *, const unsigned int, const unsigned int, const float, const float); -struct sve_fp32_nhwc_generic_output9_mla_depthfirst +class sve_fp32_nhwc_generic_output9_mla_depthfirst : public GenericDepthfirstKernelStrategy<float, float, float, float> { - typedef float bias_type; - typedef float input_type; - typedef float weight_type; - typedef float return_type; + KernelType kernel = sve_fp32_nhwc_generic_output9_mla_depthfirst_impl; - typedef void (*kern_type)(const float *const *const, float *const *const, const void *, const void *, const unsigned int, const unsigned int, const float, const float); + public: + sve_fp32_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) : GenericDepthfirstKernelStrategy<float, float, float, float>(9, arm_gemm::VLType::SVE) {} - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - constexpr static unsigned int n_output_points = 9; - - kern_type kernel = sve_fp32_nhwc_generic_output9_mla_depthfirst_impl; - - sve_fp32_nhwc_generic_output9_mla_depthfirst(const CPUInfo *) {} + KernelType get_kernel() const override { return kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst.hpp index 563f0fc59f..8640343747 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,33 +35,24 @@ namespace depthwise { void sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_impl(const float *const *const, float *const *const, const void *, const unsigned int, const float, const float); -struct sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst +struct sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst : DepthfirstMultiplierStrategy<float, float, float, float> { - typedef float bias_type; - typedef float input_type; - typedef float weight_type; - typedef float return_type; - - typedef void (*kern_type)(const float *const *const, float *const *const, const void *, const unsigned int, const float, const float); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - + using Parent = DepthfirstMultiplierStrategy<float, float, float, float>; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 3; - constexpr static unsigned int output_cols = 3; - - constexpr static unsigned int input_rows = 7; - constexpr static unsigned int input_cols = 7; - constexpr static unsigned int input_col_quads = 2; + sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst(const CPUInfo *) + : Parent(3, 3, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::SVE; } - sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst.hpp index e9378c2a12..a4ee87cce2 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,33 +35,24 @@ namespace depthwise { void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_impl(const float *const *const, float *const *const, const void *, const unsigned int, const float, const float); -struct sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst +struct sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst : DepthfirstMultiplierStrategy<float, float, float, float> { - typedef float bias_type; - typedef float input_type; - typedef float weight_type; - typedef float return_type; - - typedef void (*kern_type)(const float *const *const, float *const *const, const void *, const unsigned int, const float, const float); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - + using Parent = DepthfirstMultiplierStrategy<float, float, float, float>; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 4; - - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 8; - constexpr static unsigned int input_col_quads = 2; + sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst(const CPUInfo *) + : Parent(2, 4, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::SVE; } - sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp index 6849e562bc..e1f0b50d89 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,35 +28,25 @@ #pragma once -#if defined(ARM_COMPUTE_ENABLE_SVE) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { void sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl(const float *const *const, float *const *const, const float *, const float *, const unsigned int, const unsigned int, const float, const float); -struct sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst +struct sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst : GenericDepthfirstMultiplierKernelStrategy<float, float, float, float> { - typedef float bias_type; - typedef float input_type; - typedef float weight_type; - typedef float return_type; - - typedef void (*kern_type)(const float *const *const, float *const *const, const float *, const float *, const unsigned int, const unsigned int, const float, const float); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - constexpr static unsigned int output_rows(void) { return 2; }; - constexpr static unsigned int output_cols(void) { return 8; }; - - constexpr static unsigned int output_col_regs(void) { return 2; }; - - kern_type kernel = sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; - - sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) {} + using Parent = GenericDepthfirstMultiplierKernelStrategy<float, float, float, float>; + sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst(const CPUInfo *) + : Parent(2, 8, arm_gemm::VLType::SVE) + { + } + Parent::KernelType kernel = sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(ARM_COMPUTE_ENABLE_SVE) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp index 39974fde88..4e2ee43374 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,39 +34,40 @@ namespace arm_conv { namespace depthwise { -void sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *, int8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); +void sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32&, const int32_t *, const int32_t *, int8_t *const *); -struct sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst +class sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(const int8_t *const *, int8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int32_t *, const int8_t *, const arm_gemm::Requantize32 &, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - constexpr static parameter_packing_fn pack_parameters = interleave_sve_s8q_3x3_dot::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_s8q_3x3_dot::get_packed_size; - - kern_type kernel = sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; - - sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) {} + sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} + + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + + Parent::KernelType kernel = sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + size_t get_storage_size(const DepthwiseArgs &args) const override + { + return interleave_sve_s8q_3x3_dot::get_packed_size(args); + } + + void pack_parameters( + const DepthwiseArgs &args, void *buffer, const void *biases, const arm_gemm::Requantize32 &qp, + const void *weights, size_t ld_weight_col, size_t ld_weight_row + ) const override + { + interleave_sve_s8q_3x3_dot::pack_parameters( + args.input_channels, buffer, reinterpret_cast<const int32_t *>(biases), + reinterpret_cast<const int8_t *>(weights), qp, ld_weight_col, ld_weight_row + ); + } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp index 8e9e5f4aeb..800803770a 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,15 @@ namespace arm_conv { namespace depthwise { -void sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *const inptrs, int8_t *const *const outptrs, const void *params, const uint64_t n_channels, const arm_gemm::Requantize32& qp) +void sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl( + const unsigned int n_channels, + const int8_t *const *const inptrs, + const int8_t *params, + const int32_t *, // Bias, should be wrapped into the parameters + const arm_gemm::Requantize32& qp, + const int32_t *, const int32_t *, // Requant parameters, also wrapped + int8_t *const *const outptrs +) { __asm__ __volatile__( "ldp x11, x10, [%x[inptrs], #0x0]\n" @@ -446,7 +454,7 @@ void sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *cons "b.any 1b\n" "addvl SP, SP, #8\n" : [params] "+&r" (params) - : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) + : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); } diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index f788829572..3e9765165c 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,47 +29,35 @@ #pragma once -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { void sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); -struct sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst +class sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } - constexpr static parameter_packing_fn pack_parameters = interleave_sve_s8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_s8q_3x3_mla::get_packed_size; + sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} - kern_type kernel = sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType kernel = sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; - sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp index 87387960f1..3583308357 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include <cstddef> #include <cstdint> -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -415,4 +415,4 @@ void sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index 5c2b4f6f53..78bcd1407f 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,47 +29,35 @@ #pragma once -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { void sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); -struct sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst +class sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } - constexpr static parameter_packing_fn pack_parameters = interleave_sve_s8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_s8q_3x3_mla::get_packed_size; + sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 2, 2) {} - kern_type kernel = sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + Parent::KernelType kernel = sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; - sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp index b4a1026aaa..ba8c1fdb8d 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include <cstddef> #include <cstdint> -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -456,4 +456,4 @@ void sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index 948c5ad2e7..41ecd520ae 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,47 +29,35 @@ #pragma once -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { void sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); -struct sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst +class sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, int8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } - constexpr static parameter_packing_fn pack_parameters = interleave_sve_s8q_5x5_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_s8q_5x5_mla::get_packed_size; + sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 5, 5, 1, 1) {} - kern_type kernel = sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType kernel = sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; - sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp index 565c145f92..4733c89199 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include <cstddef> #include <cstdint> -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -657,4 +657,4 @@ void sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp index 176c4f878e..2e8c2019db 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,33 +35,24 @@ namespace depthwise { void sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl(const int8_t *const *const, int8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); -struct sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst +struct sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst : DepthfirstMultiplierStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - typedef void (*kern_type)(const int8_t *const *const, int8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - + using Parent = DepthfirstMultiplierStrategy<int8_t, int8_t, int8_t, int32_t>; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 4; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 9; - constexpr static unsigned int input_col_quads = 1; + sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst(const CPUInfo *) + : Parent(2, 4, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::SVE; } - sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp index 10eee34d62..4874fb9a77 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,33 +35,24 @@ namespace depthwise { void sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl(const int8_t *const *const, int8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); -struct sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst +struct sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst : DepthfirstMultiplierStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - typedef void (*kern_type)(const int8_t *const *const, int8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - + using Parent = DepthfirstMultiplierStrategy<int8_t, int8_t, int8_t, int32_t>; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 4; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 8; - constexpr static unsigned int input_cols = 6; - constexpr static unsigned int input_col_quads = 1; + sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst(const CPUInfo *) + : Parent(4, 2, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::SVE; } - sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp index b5c6e983ae..0d185fcafc 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,39 +34,40 @@ namespace arm_conv { namespace depthwise { -void sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *, int8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); +void sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(unsigned int, const int8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32&, const int32_t *, const int32_t *, int8_t *const *); -struct sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst +class sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst : public DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t> { - typedef int32_t bias_type; - typedef int8_t input_type; - typedef int8_t weight_type; - typedef int8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(const int8_t *const *, int8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int32_t *, const int8_t *, const arm_gemm::Requantize32 &, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<int8_t, int8_t, int8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - constexpr static parameter_packing_fn pack_parameters = interleave_sve_s8q_3x3_dot::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_s8q_3x3_dot::get_packed_size; - - kern_type kernel = sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; - - sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) {} + sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} + + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + + Parent::KernelType kernel = sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + size_t get_storage_size(const DepthwiseArgs &args) const override + { + return interleave_sve_s8q_3x3_dot::get_packed_size(args); + } + + void pack_parameters( + const DepthwiseArgs &args, void *buffer, const void *biases, const arm_gemm::Requantize32 &qp, + const void *weights, size_t ld_weight_col, size_t ld_weight_row + ) const override + { + interleave_sve_s8q_3x3_dot::pack_parameters( + args.input_channels, buffer, reinterpret_cast<const int32_t *>(biases), + reinterpret_cast<const int8_t *>(weights), qp, ld_weight_col, ld_weight_row + ); + } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp index 095c1de8f2..391e98b561 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,15 @@ namespace arm_conv { namespace depthwise { -void sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *const inptrs, int8_t *const *const outptrs, const void *params, const uint64_t n_channels, const arm_gemm::Requantize32& qp) +void sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl( + const unsigned int n_channels, + const int8_t *const *const inptrs, + const int8_t *params, + const int32_t *, // Bias, should be wrapped into the parameters + const arm_gemm::Requantize32& qp, + const int32_t *, const int32_t *, // Requant parameters, also wrapped + int8_t *const *const outptrs +) { __asm__ __volatile__( "ldp x11, x10, [%x[inptrs], #0x0]\n" @@ -377,7 +385,7 @@ void sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const int8_t *const *con "b.any 1b\n" "addvl SP, SP, #8\n" : [params] "+&r" (params) - : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) + : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); } diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp index a087e801dc..648b2da163 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,39 +34,40 @@ namespace arm_conv { namespace depthwise { -void sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const uint8_t *const *, uint8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); +void sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32&, const int32_t *, const int32_t *, uint8_t *const *); -struct sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst +class sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef uint32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(const uint8_t *const *, uint8_t *const *, const void *, uint64_t, const arm_gemm::Requantize32&); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int32_t *, const uint8_t *, const arm_gemm::Requantize32 &, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; - - constexpr static parameter_packing_fn pack_parameters = interleave_sve_u8q_3x3_dot::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_u8q_3x3_dot::get_packed_size; - - kern_type kernel = sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; - - sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) {} + sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} + + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + + Parent::KernelType kernel = sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } + size_t get_storage_size(const DepthwiseArgs &args) const override + { + return interleave_sve_u8q_3x3_dot::get_packed_size(args); + } + + void pack_parameters( + const DepthwiseArgs &args, void *buffer, const void *biases, const arm_gemm::Requantize32 &qp, + const void *weights, size_t ld_weight_col, size_t ld_weight_row + ) const override + { + interleave_sve_u8q_3x3_dot::pack_parameters( + args.input_channels, buffer, reinterpret_cast<const int32_t *>(biases), + reinterpret_cast<const uint8_t *>(weights), qp, ld_weight_col, ld_weight_row + ); + } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp index 0d4b9e6687..440f57ed00 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,15 @@ namespace arm_conv { namespace depthwise { -void sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const uint8_t *const *const inptrs, uint8_t *const *const outptrs, const void *params, const uint64_t n_channels, const arm_gemm::Requantize32& qp) +void sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl( + const unsigned int n_channels, + const uint8_t *const *const inptrs, + const uint8_t *params, + const int32_t *, // Bias, should be wrapped into the parameters + const arm_gemm::Requantize32& qp, + const int32_t *, const int32_t *, // Requant parameters, also wrapped + uint8_t *const *const outptrs +) { __asm__ __volatile__( "ldp x11, x10, [%x[inptrs], #0x0]\n" @@ -446,7 +454,7 @@ void sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const uint8_t *const *con "b.any 1b\n" "addvl SP, SP, #8\n" : [params] "+&r" (params) - : [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) + : [inptrs] "r" (inptrs), [n_channels] "r" ((long unsigned int) n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp) : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); } diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index b524fd7c93..1cf20ef721 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,37 +36,25 @@ namespace depthwise { void sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst +class sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const uint8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } - constexpr static parameter_packing_fn pack_parameters = interleave_sve_u8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_u8q_3x3_mla::get_packed_size; + sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} - kern_type kernel = sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType kernel = sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; - sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp index 52dc4683ad..7bfa5fc4c7 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include <cstddef> #include <cstdint> -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -415,4 +415,4 @@ void sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index 981864270d..a794095c6f 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,47 +29,35 @@ #pragma once -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { void sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst +class sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const uint8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } - constexpr static parameter_packing_fn pack_parameters = interleave_sve_u8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_u8q_3x3_mla::get_packed_size; + sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 2, 2) {} - kern_type kernel = sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + Parent::KernelType kernel = sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; - sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp index 34ba8ece12..e1b2d257b0 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include <cstddef> #include <cstdint> -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -456,4 +456,4 @@ void sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index b1b16c55d3..ac0a00b245 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,47 +29,35 @@ #pragma once -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { void sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst +class sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const uint8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const uint8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, uint8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } - constexpr static parameter_packing_fn pack_parameters = interleave_sve_u8q_5x5_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_u8q_5x5_mla::get_packed_size; + sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 5, 5, 1, 1) {} - kern_type kernel = sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType kernel = sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; - sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp index 441da6da7a..0b2182f995 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include <cstddef> #include <cstdint> -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -657,4 +657,4 @@ void sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp index dbf70c3f8e..81c954a11b 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,33 +35,24 @@ namespace depthwise { void sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl(const uint8_t *const *const, uint8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); -struct sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst +struct sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst : DepthfirstMultiplierStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef uint32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - typedef void (*kern_type)(const uint8_t *const *const, uint8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - + using Parent = DepthfirstMultiplierStrategy<uint8_t, uint8_t, uint8_t, int32_t>; constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 4; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 9; - constexpr static unsigned int input_col_quads = 1; + sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst(const CPUInfo *) + : Parent(2, 4, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::SVE; } - sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp index 90fefdcda3..e7173de65a 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,33 +35,24 @@ namespace depthwise { void sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl(const uint8_t *const *const, uint8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); -struct sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst +struct sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst : DepthfirstMultiplierStrategy<uint8_t, uint8_t, uint8_t, int32_t> { - typedef uint32_t bias_type; - typedef uint8_t input_type; - typedef uint8_t weight_type; - typedef uint8_t return_type; - - typedef void (*kern_type)(const uint8_t *const *const, uint8_t *const *const, const void *, unsigned int, const arm_gemm::Requantize32&); - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - + using Parent = DepthfirstMultiplierStrategy<uint8_t, uint8_t, uint8_t, int32_t>; constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 4; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 8; - constexpr static unsigned int input_cols = 6; - constexpr static unsigned int input_col_quads = 1; + sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst(const CPUInfo *) + : Parent(4, 2, kernel_rows, kernel_cols, stride_rows, stride_cols) + { + } - kern_type kernel = sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl; + arm_gemm::VLType get_vl_type() const override { return arm_gemm::VLType::SVE; } - sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst(const CPUInfo *) {} + Parent::KernelType kernel = sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl; + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp index 8ab2e5ba2a..3d475daf72 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,47 +29,35 @@ #pragma once -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { void sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst +class sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef int8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 4; - constexpr static unsigned int input_cols = 4; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } - constexpr static parameter_packing_fn pack_parameters = interleave_sve_s8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_s8q_3x3_mla::get_packed_size; + sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 1, 1) {} - kern_type kernel = sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType kernel = sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl; - sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp index 4b9be8f3e3..dc8fad95fa 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include <cstddef> #include <cstdint> -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -415,4 +415,4 @@ void sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp index f652e48e42..9a3db20f73 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,47 +29,35 @@ #pragma once -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { void sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst +class sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef int8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 3; constexpr static unsigned int kernel_cols = 3; constexpr static unsigned int stride_rows = 2; constexpr static unsigned int stride_cols = 2; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 5; - constexpr static unsigned int input_cols = 5; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } - constexpr static parameter_packing_fn pack_parameters = interleave_sve_s8q_3x3_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_s8q_3x3_mla::get_packed_size; + sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 3, 3, 2, 2) {} - kern_type kernel = sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; + Parent::KernelType kernel = sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl; - sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp index 400e62d248..9adf100a0f 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include <cstddef> #include <cstdint> -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -456,4 +456,4 @@ void sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp index f07ea13a03..06ca42eed9 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -29,47 +29,35 @@ #pragma once -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { void sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); -struct sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst +class sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t> { - typedef int32_t bias_type; - typedef uint8_t input_type; - typedef int8_t weight_type; - typedef uint8_t return_type; - - constexpr static arm_gemm::VLType vl_type = arm_gemm::VLType::SVE; - - typedef void (*kern_type)(unsigned int, const uint8_t *const *, const int8_t *, const int32_t *, const arm_gemm::Requantize32 &, const int32_t *, const int32_t *, uint8_t *const *); - typedef void (*parameter_packing_fn)(unsigned int, void *, const int8_t *, size_t, size_t); - typedef size_t (*parameter_sizing_fn)(const DepthwiseArgs &); + using Parent = DepthwiseDepthfirstStrategy<uint8_t, int8_t, uint8_t, int32_t>; + public: constexpr static unsigned int kernel_rows = 5; constexpr static unsigned int kernel_cols = 5; constexpr static unsigned int stride_rows = 1; constexpr static unsigned int stride_cols = 1; - constexpr static unsigned int output_rows = 2; - constexpr static unsigned int output_cols = 2; - - constexpr static unsigned int input_rows = 6; - constexpr static unsigned int input_cols = 6; + arm_gemm::VLType get_vl_type(void) const override { return arm_gemm::VLType::SVE; } + unsigned int get_accumulator_depth_vl(void) const override { return 2; } - constexpr static parameter_packing_fn pack_parameters = interleave_sve_s8q_5x5_mla::pack_parameters; - constexpr static parameter_sizing_fn get_packed_size = interleave_sve_s8q_5x5_mla::get_packed_size; + sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) : Parent(2, 2, 5, 5, 1, 1) {} - kern_type kernel = sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; + Parent::KernelType kernel = sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl; - sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst(const CPUInfo *) {} + Parent::KernelType get_kernel(void) const override { return kernel; } }; } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp index 29582da0f6..9cf95e9588 100644 --- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,7 +27,7 @@ #include <cstddef> #include <cstdint> -#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#if defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) namespace arm_conv { namespace depthwise { @@ -657,4 +657,4 @@ void sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl( } // namespace depthwise } // namespace arm_conv -#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) && defined(ARM_COMPUTE_ENABLE_SVE2) +#endif // defined(__aarch64__) && defined(ARM_COMPUTE_ENABLE_SVE) |