From c827e99fc46521f43719b0c2d1b6f05d66abf68c Mon Sep 17 00:00:00 2001 From: ramelg01 Date: Fri, 8 Apr 2022 03:52:28 +0100 Subject: =?UTF-8?q?Update=20Neon=E2=84=A2=20pooling=20kernel?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Reduce duplication and simplify overall structure. - Improve multi-threaded performance by sharing more data in lower-level caches. Partially Resolves: COMPMID-5054 Signed-off-by: Ramy Elgammal Change-Id: I5f4dc50913401d5c1cbfc10b866fae9490cbc4d7 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7404 Tested-by: Arm Jenkins Reviewed-by: Andrew Mundy Reviewed-by: Sheri Zhang Comments-Addressed: Arm Jenkins --- .../generic.cpp | 343 ++++++++++----------- 1 file changed, 170 insertions(+), 173 deletions(-) (limited to 'src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp') diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp index 11376e0fe2..d48c4ec640 100644 --- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp +++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,15 +22,15 @@ * SOFTWARE. */ +#if defined(__aarch64__) + #include "pooling.hpp" -#include #include +#include #include #include -#if defined(__aarch64__) - namespace arm_conv { namespace pooling { @@ -87,13 +87,13 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( f_rescale_value *= 2.0f; } - int64_t large_rescale_value = round(f_rescale_value * static_cast(1ll << 31)); - if (large_rescale_value == (1ll << 31)) + int64_t long_rescale_value = round(f_rescale_value * static_cast(1ll << 31)); + if (long_rescale_value == (1ll << 31)) { shift_value++; - large_rescale_value >>= 1; + long_rescale_value >>= 1; } - rescale_value = static_cast(large_rescale_value); + rescale_value = static_cast(long_rescale_value); } @@ -119,20 +119,20 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( ); __asm__ __volatile__( - "mov x26, #0x0\n" - "mov x25, #0x10\n" // cntb _, ALL, #1 - "mov x24, #0x20\n" // cntb _, ALL, #2 - "mov x23, #0x30\n" // cntb _, ALL, #3 "cmp %x[n_channels], #0x40\n" + "mov x26, #0x0\n" + "mov x25, #0x10\n" // cntb _, ALL, #1 + "mov x24, #0x20\n" // cntb _, ALL, #2 + "mov x23, #0x30\n" // cntb _, ALL, #3 "blt 7f\n" "1:" // 4-vectors of channels "ld1r { v15.4s }, [%x[accumulator_init]]\n" + "lsr x22, %x[n_valid_cells], #0x1\n" "mov v14.16b, v15.16b\n" - "mov x19, %x[inptrs]\n" "mov v13.16b, v15.16b\n" - "lsr x22, %x[n_valid_cells], #0x1\n" "mov v12.16b, v15.16b\n" "mov v11.16b, v15.16b\n" + "mov x19, %x[inptrs]\n" "mov v10.16b, v15.16b\n" "mov v9.16b, v15.16b\n" "mov v8.16b, v15.16b\n" @@ -146,10 +146,10 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "mov v0.16b, v15.16b\n" "cbz x22, 4f\n" "ldp x21, x20, [x19, #0x0]\n" - "ldr q31, [x21, x26]\n" + "subs x22, x22, #0x1\n" "add x19, x19, #0x10\n" + "ldr q31, [x21, x26]\n" "ldr q30, [x20, x26]\n" - "subs x22, x22, #0x1\n" "ldr q29, [x21, x25]\n" "ldr q28, [x20, x25]\n" "ldr q27, [x21, x24]\n" @@ -159,28 +159,28 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "beq 3f\n" "2:" // 4-vectors of channels: 2 inputs loop "uaddl v23.8h, v31.8b, v30.8b\n" - "ldp x21, x20, [x19, #0x0]\n" - "add x19, x19, #0x10\n" "uaddl2 v22.8h, v31.16b, v30.16b\n" - "ldr q31, [x21, x26]\n" - "uaddl v21.8h, v29.8b, v28.8b\n" + "ldp x21, x20, [x19, #0x0]\n" "subs x22, x22, #0x1\n" + "uaddl v21.8h, v29.8b, v28.8b\n" "uaddl2 v20.8h, v29.16b, v28.16b\n" - "ldr q30, [x20, x26]\n" + "add x19, x19, #0x10\n" + "ldr q31, [x21, x26]\n" "uaddl v19.8h, v27.8b, v26.8b\n" - "ldr q29, [x21, x25]\n" "uaddl2 v18.8h, v27.16b, v26.16b\n" - "ldr q28, [x20, x25]\n" + "ldr q30, [x20, x26]\n" + "ldr q29, [x21, x25]\n" "uaddl v17.8h, v25.8b, v24.8b\n" - "ldr q27, [x21, x24]\n" "uaddl2 v16.8h, v25.16b, v24.16b\n" - "ldr q26, [x20, x24]\n" + "ldr q28, [x20, x25]\n" + "ldr q27, [x21, x24]\n" "uaddw v15.4s, v15.4s, v23.4h\n" - "ldr q25, [x21, x23]\n" "uaddw2 v14.4s, v14.4s, v23.8h\n" - "ldr q24, [x20, x23]\n" + "ldr q26, [x20, x24]\n" + "ldr q25, [x21, x23]\n" "uaddw v13.4s, v13.4s, v22.4h\n" "uaddw2 v12.4s, v12.4s, v22.8h\n" + "ldr q24, [x20, x23]\n" "uaddw v11.4s, v11.4s, v21.4h\n" "uaddw2 v10.4s, v10.4s, v21.8h\n" "uaddw v9.4s, v9.4s, v20.4h\n" @@ -224,17 +224,17 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "beq 6f\n" "5:" // 4-vectors of channels: Single input loop "ldr x21, [x19], #0x8\n" - "subs x20, x20, #0x1\n" "ldr q31, [x21, x26]\n" "uxtl v23.8h, v31.8b\n" - "ldr q29, [x21, x25]\n" "uxtl2 v22.8h, v31.16b\n" + "ldr q29, [x21, x25]\n" "ldr q27, [x21, x24]\n" - "ldr q25, [x21, x23]\n" "uxtl v21.8h, v29.8b\n" "uxtl2 v20.8h, v29.16b\n" + "ldr q25, [x21, x23]\n" "uxtl v19.8h, v27.8b\n" "uxtl2 v18.8h, v27.16b\n" + "subs x20, x20, #0x1\n" "uxtl v17.8h, v25.8b\n" "uxtl2 v16.8h, v25.16b\n" "uaddw v15.4s, v15.4s, v23.4h\n" @@ -255,64 +255,62 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "uaddw2 v0.4s, v0.4s, v16.8h\n" "bgt 5b\n" "6:" // 4-vectors of channels: Single input loop: End - "movi v21.4s, #0x0\n" - "ld1r { v20.4s }, [%x[combined_rescale_value]]\n" + "ld1r { v19.4s }, [%x[left_shift]]\n" + "ld1r { v18.4s }, [%x[combined_rescale_value]]\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "srshl v14.4s, v14.4s, v19.4s\n" + "srshl v13.4s, v13.4s, v19.4s\n" + "srshl v12.4s, v12.4s, v19.4s\n" + "ld1r { v17.4s }, [%x[right_shift]]\n" "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n" - "movi v19.4s, #0xff\n" - "ld1r { v18.4s }, [%x[left_shift]]\n" + "srshl v11.4s, v11.4s, v19.4s\n" + "srshl v10.4s, v10.4s, v19.4s\n" + "ld1r { v16.4s }, [x19]\n" "sub %x[n_channels], %x[n_channels], #0x40\n" - "srshl v15.4s, v15.4s, v18.4s\n" - "ld1r { v17.4s }, [%x[right_shift]]\n" + "srshl v9.4s, v9.4s, v19.4s\n" + "srshl v8.4s, v8.4s, v19.4s\n" "cmp %x[n_channels], #0x40\n" - "srshl v14.4s, v14.4s, v18.4s\n" - "ld1r { v16.4s }, [x19]\n" - "srshl v13.4s, v13.4s, v18.4s\n" - "srshl v12.4s, v12.4s, v18.4s\n" - "srshl v11.4s, v11.4s, v18.4s\n" - "sqrdmulh v15.4s, v15.4s, v20.4s\n" - "sqrdmulh v14.4s, v14.4s, v20.4s\n" - "sqrdmulh v13.4s, v13.4s, v20.4s\n" - "sqrdmulh v12.4s, v12.4s, v20.4s\n" + "srshl v7.4s, v7.4s, v19.4s\n" + "srshl v6.4s, v6.4s, v19.4s\n" + "srshl v5.4s, v5.4s, v19.4s\n" + "srshl v4.4s, v4.4s, v19.4s\n" + "srshl v3.4s, v3.4s, v19.4s\n" + "srshl v2.4s, v2.4s, v19.4s\n" + "srshl v1.4s, v1.4s, v19.4s\n" + "srshl v0.4s, v0.4s, v19.4s\n" + "sqrdmulh v15.4s, v15.4s, v18.4s\n" + "sqrdmulh v14.4s, v14.4s, v18.4s\n" + "sqrdmulh v13.4s, v13.4s, v18.4s\n" + "sqrdmulh v12.4s, v12.4s, v18.4s\n" + "sqrdmulh v11.4s, v11.4s, v18.4s\n" + "sqrdmulh v10.4s, v10.4s, v18.4s\n" + "sqrdmulh v9.4s, v9.4s, v18.4s\n" + "sqrdmulh v8.4s, v8.4s, v18.4s\n" + "sqrdmulh v7.4s, v7.4s, v18.4s\n" + "sqrdmulh v6.4s, v6.4s, v18.4s\n" + "sqrdmulh v5.4s, v5.4s, v18.4s\n" + "sqrdmulh v4.4s, v4.4s, v18.4s\n" + "sqrdmulh v3.4s, v3.4s, v18.4s\n" + "sqrdmulh v2.4s, v2.4s, v18.4s\n" + "sqrdmulh v1.4s, v1.4s, v18.4s\n" + "sqrdmulh v0.4s, v0.4s, v18.4s\n" "srshl v15.4s, v15.4s, v17.4s\n" "srshl v14.4s, v14.4s, v17.4s\n" "srshl v13.4s, v13.4s, v17.4s\n" "srshl v12.4s, v12.4s, v17.4s\n" - "sqrdmulh v11.4s, v11.4s, v20.4s\n" - "srshl v10.4s, v10.4s, v18.4s\n" - "srshl v9.4s, v9.4s, v18.4s\n" - "srshl v8.4s, v8.4s, v18.4s\n" "srshl v11.4s, v11.4s, v17.4s\n" - "sqrdmulh v10.4s, v10.4s, v20.4s\n" - "sqrdmulh v9.4s, v9.4s, v20.4s\n" - "sqrdmulh v8.4s, v8.4s, v20.4s\n" - "srshl v7.4s, v7.4s, v18.4s\n" "srshl v10.4s, v10.4s, v17.4s\n" "srshl v9.4s, v9.4s, v17.4s\n" "srshl v8.4s, v8.4s, v17.4s\n" - "sqrdmulh v7.4s, v7.4s, v20.4s\n" - "srshl v6.4s, v6.4s, v18.4s\n" - "srshl v5.4s, v5.4s, v18.4s\n" - "srshl v4.4s, v4.4s, v18.4s\n" "srshl v7.4s, v7.4s, v17.4s\n" - "sqrdmulh v6.4s, v6.4s, v20.4s\n" - "sqrdmulh v5.4s, v5.4s, v20.4s\n" - "sqrdmulh v4.4s, v4.4s, v20.4s\n" - "srshl v3.4s, v3.4s, v18.4s\n" "srshl v6.4s, v6.4s, v17.4s\n" "srshl v5.4s, v5.4s, v17.4s\n" "srshl v4.4s, v4.4s, v17.4s\n" - "sqrdmulh v3.4s, v3.4s, v20.4s\n" - "srshl v2.4s, v2.4s, v18.4s\n" - "srshl v1.4s, v1.4s, v18.4s\n" - "srshl v0.4s, v0.4s, v18.4s\n" "srshl v3.4s, v3.4s, v17.4s\n" - "sqrdmulh v2.4s, v2.4s, v20.4s\n" - "sqrdmulh v1.4s, v1.4s, v20.4s\n" - "sqrdmulh v0.4s, v0.4s, v20.4s\n" - "add v15.4s, v15.4s, v16.4s\n" "srshl v2.4s, v2.4s, v17.4s\n" "srshl v1.4s, v1.4s, v17.4s\n" "srshl v0.4s, v0.4s, v17.4s\n" + "add v15.4s, v15.4s, v16.4s\n" "add v14.4s, v14.4s, v16.4s\n" "add v13.4s, v13.4s, v16.4s\n" "add v12.4s, v12.4s, v16.4s\n" @@ -328,53 +326,55 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "add v2.4s, v2.4s, v16.4s\n" "add v1.4s, v1.4s, v16.4s\n" "add v0.4s, v0.4s, v16.4s\n" - "smax v15.4s, v15.4s, v21.4s\n" - "smax v14.4s, v14.4s, v21.4s\n" - "smax v13.4s, v13.4s, v21.4s\n" - "smin v15.4s, v15.4s, v19.4s\n" - "smin v14.4s, v14.4s, v19.4s\n" - "smin v13.4s, v13.4s, v19.4s\n" - "smax v12.4s, v12.4s, v21.4s\n" - "smax v11.4s, v11.4s, v21.4s\n" - "smax v10.4s, v10.4s, v21.4s\n" - "smin v12.4s, v12.4s, v19.4s\n" - "smin v11.4s, v11.4s, v19.4s\n" - "smin v10.4s, v10.4s, v19.4s\n" - "smax v9.4s, v9.4s, v21.4s\n" - "smax v8.4s, v8.4s, v21.4s\n" - "smax v7.4s, v7.4s, v21.4s\n" - "smin v9.4s, v9.4s, v19.4s\n" - "smin v8.4s, v8.4s, v19.4s\n" - "smin v7.4s, v7.4s, v19.4s\n" - "smax v6.4s, v6.4s, v21.4s\n" - "smax v5.4s, v5.4s, v21.4s\n" - "smax v4.4s, v4.4s, v21.4s\n" - "smin v6.4s, v6.4s, v19.4s\n" - "smin v5.4s, v5.4s, v19.4s\n" - "smin v4.4s, v4.4s, v19.4s\n" - "smax v3.4s, v3.4s, v21.4s\n" - "smax v2.4s, v2.4s, v21.4s\n" - "smax v1.4s, v1.4s, v21.4s\n" - "smin v3.4s, v3.4s, v19.4s\n" - "smin v2.4s, v2.4s, v19.4s\n" - "smin v1.4s, v1.4s, v19.4s\n" - "smax v0.4s, v0.4s, v21.4s\n" + "movi v16.4s, #0x0\n" + "smax v15.4s, v15.4s, v16.4s\n" + "smax v14.4s, v14.4s, v16.4s\n" + "smax v13.4s, v13.4s, v16.4s\n" + "smax v12.4s, v12.4s, v16.4s\n" + "smax v11.4s, v11.4s, v16.4s\n" + "smax v10.4s, v10.4s, v16.4s\n" + "smax v9.4s, v9.4s, v16.4s\n" + "smax v8.4s, v8.4s, v16.4s\n" + "smax v7.4s, v7.4s, v16.4s\n" + "smax v6.4s, v6.4s, v16.4s\n" + "smax v5.4s, v5.4s, v16.4s\n" + "smax v4.4s, v4.4s, v16.4s\n" + "smax v3.4s, v3.4s, v16.4s\n" + "smax v2.4s, v2.4s, v16.4s\n" + "smax v1.4s, v1.4s, v16.4s\n" + "smax v0.4s, v0.4s, v16.4s\n" + "movi v16.4s, #0xff\n" + "smin v15.4s, v15.4s, v16.4s\n" + "smin v14.4s, v14.4s, v16.4s\n" + "smin v13.4s, v13.4s, v16.4s\n" + "smin v12.4s, v12.4s, v16.4s\n" + "smin v11.4s, v11.4s, v16.4s\n" + "smin v10.4s, v10.4s, v16.4s\n" + "smin v9.4s, v9.4s, v16.4s\n" + "smin v8.4s, v8.4s, v16.4s\n" + "smin v7.4s, v7.4s, v16.4s\n" + "smin v6.4s, v6.4s, v16.4s\n" + "smin v5.4s, v5.4s, v16.4s\n" + "smin v4.4s, v4.4s, v16.4s\n" + "smin v3.4s, v3.4s, v16.4s\n" + "smin v2.4s, v2.4s, v16.4s\n" + "smin v1.4s, v1.4s, v16.4s\n" + "smin v0.4s, v0.4s, v16.4s\n" "uzp1 v23.16b, v15.16b, v14.16b\n" "uzp1 v16.16b, v13.16b, v12.16b\n" - "smin v0.4s, v0.4s, v19.4s\n" "uzp1 v22.16b, v11.16b, v10.16b\n" - "uzp1 v21.16b, v9.16b, v8.16b\n" - "uzp1 v20.16b, v7.16b, v6.16b\n" + "uzp1 v18.16b, v9.16b, v8.16b\n" + "uzp1 v21.16b, v7.16b, v6.16b\n" "uzp1 v17.16b, v5.16b, v4.16b\n" - "uzp1 v19.16b, v3.16b, v2.16b\n" - "uzp1 v18.16b, v1.16b, v0.16b\n" + "uzp1 v20.16b, v3.16b, v2.16b\n" + "uzp1 v19.16b, v1.16b, v0.16b\n" "uzp1 v16.16b, v23.16b, v16.16b\n" + "uzp1 v18.16b, v22.16b, v18.16b\n" "str q16, [%x[outptr], x26]\n" - "uzp1 v16.16b, v22.16b, v21.16b\n" "add x26, x26, #0x40\n" - "uzp1 v17.16b, v20.16b, v17.16b\n" - "str q16, [%x[outptr], x25]\n" - "uzp1 v16.16b, v19.16b, v18.16b\n" + "uzp1 v17.16b, v21.16b, v17.16b\n" + "uzp1 v16.16b, v20.16b, v19.16b\n" + "str q18, [%x[outptr], x25]\n" "add x25, x25, #0x40\n" "str q17, [%x[outptr], x24]\n" "add x24, x24, #0x40\n" @@ -387,30 +387,30 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "blt 14f\n" "8:" // Single vector of channels: Loop "ld1r { v15.4s }, [%x[accumulator_init]]\n" + "lsr x22, %x[n_valid_cells], #0x1\n" "mov v14.16b, v15.16b\n" - "mov x19, %x[inptrs]\n" "mov v13.16b, v15.16b\n" - "lsr x22, %x[n_valid_cells], #0x1\n" "mov v12.16b, v15.16b\n" + "mov x19, %x[inptrs]\n" "cbz x22, 11f\n" "ldp x21, x20, [x19, #0x0]\n" - "ldr q31, [x21, x26]\n" + "subs x22, x22, #0x1\n" "add x19, x19, #0x10\n" + "ldr q31, [x21, x26]\n" "ldr q30, [x20, x26]\n" - "subs x22, x22, #0x1\n" "beq 10f\n" "9:" // Single vector of channels: Loop: 2 inputs loop "uaddl v23.8h, v31.8b, v30.8b\n" - "ldp x21, x20, [x19, #0x0]\n" - "add x19, x19, #0x10\n" "uaddl2 v22.8h, v31.16b, v30.16b\n" - "ldr q31, [x21, x26]\n" - "uaddw v15.4s, v15.4s, v23.4h\n" + "ldp x21, x20, [x19, #0x0]\n" "subs x22, x22, #0x1\n" + "uaddw v15.4s, v15.4s, v23.4h\n" "uaddw2 v14.4s, v14.4s, v23.8h\n" - "ldr q30, [x20, x26]\n" + "add x19, x19, #0x10\n" + "ldr q31, [x21, x26]\n" "uaddw v13.4s, v13.4s, v22.4h\n" "uaddw2 v12.4s, v12.4s, v22.8h\n" + "ldr q30, [x20, x26]\n" "bgt 9b\n" "10:" // Single vector of channels: Loop: 2 inputs tail "uaddl v23.8h, v31.8b, v30.8b\n" @@ -424,33 +424,31 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "beq 13f\n" "12:" // Single vector of channels: Loop: Single input loop "ldr x21, [x19], #0x8\n" - "subs x20, x20, #0x1\n" "ldr q31, [x21, x26]\n" "uxtl v23.8h, v31.8b\n" "uxtl2 v22.8h, v31.16b\n" + "subs x20, x20, #0x1\n" "uaddw v15.4s, v15.4s, v23.4h\n" "uaddw2 v14.4s, v14.4s, v23.8h\n" "uaddw v13.4s, v13.4s, v22.4h\n" "uaddw2 v12.4s, v12.4s, v22.8h\n" "bgt 12b\n" "13:" // Single vector of channels: Loop: Single input loop: End - "movi v21.4s, #0x0\n" - "ld1r { v20.4s }, [%x[combined_rescale_value]]\n" + "ld1r { v19.4s }, [%x[left_shift]]\n" + "ld1r { v18.4s }, [%x[combined_rescale_value]]\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "srshl v14.4s, v14.4s, v19.4s\n" + "srshl v13.4s, v13.4s, v19.4s\n" + "srshl v12.4s, v12.4s, v19.4s\n" + "ld1r { v17.4s }, [%x[right_shift]]\n" "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n" - "movi v19.4s, #0xff\n" - "ld1r { v18.4s }, [%x[left_shift]]\n" + "sqrdmulh v15.4s, v15.4s, v18.4s\n" + "sqrdmulh v14.4s, v14.4s, v18.4s\n" + "ld1r { v16.4s }, [x19]\n" "sub %x[n_channels], %x[n_channels], #0x10\n" - "srshl v15.4s, v15.4s, v18.4s\n" - "ld1r { v17.4s }, [%x[right_shift]]\n" + "sqrdmulh v13.4s, v13.4s, v18.4s\n" + "sqrdmulh v12.4s, v12.4s, v18.4s\n" "cmp %x[n_channels], #0x10\n" - "srshl v14.4s, v14.4s, v18.4s\n" - "ld1r { v16.4s }, [x19]\n" - "srshl v13.4s, v13.4s, v18.4s\n" - "srshl v12.4s, v12.4s, v18.4s\n" - "sqrdmulh v15.4s, v15.4s, v20.4s\n" - "sqrdmulh v14.4s, v14.4s, v20.4s\n" - "sqrdmulh v13.4s, v13.4s, v20.4s\n" - "sqrdmulh v12.4s, v12.4s, v20.4s\n" "srshl v15.4s, v15.4s, v17.4s\n" "srshl v14.4s, v14.4s, v17.4s\n" "srshl v13.4s, v13.4s, v17.4s\n" @@ -459,15 +457,17 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "add v14.4s, v14.4s, v16.4s\n" "add v13.4s, v13.4s, v16.4s\n" "add v12.4s, v12.4s, v16.4s\n" - "smax v15.4s, v15.4s, v21.4s\n" - "smax v14.4s, v14.4s, v21.4s\n" - "smax v13.4s, v13.4s, v21.4s\n" - "smin v15.4s, v15.4s, v19.4s\n" - "smin v14.4s, v14.4s, v19.4s\n" - "smin v13.4s, v13.4s, v19.4s\n" - "smax v12.4s, v12.4s, v21.4s\n" + "movi v16.4s, #0x0\n" + "smax v15.4s, v15.4s, v16.4s\n" + "smax v14.4s, v14.4s, v16.4s\n" + "smax v13.4s, v13.4s, v16.4s\n" + "smax v12.4s, v12.4s, v16.4s\n" + "movi v16.4s, #0xff\n" + "smin v15.4s, v15.4s, v16.4s\n" + "smin v14.4s, v14.4s, v16.4s\n" + "smin v13.4s, v13.4s, v16.4s\n" + "smin v12.4s, v12.4s, v16.4s\n" "uzp1 v23.16b, v15.16b, v14.16b\n" - "smin v12.4s, v12.4s, v19.4s\n" "uzp1 v16.16b, v13.16b, v12.16b\n" "uzp1 v16.16b, v23.16b, v16.16b\n" "str q16, [%x[outptr], x26]\n" @@ -476,20 +476,20 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "cbz %x[n_channels], 43f\n" "14:" // Oddments "ld1r { v15.4s }, [%x[accumulator_init]]\n" - "mov v14.16b, v15.16b\n" + "lsr x22, %x[n_valid_cells], #0x1\n" "add %x[outptr], %x[outptr], x26\n" + "mov v14.16b, v15.16b\n" "mov v13.16b, v15.16b\n" - "mov x19, %x[inptrs]\n" "mov v12.16b, v15.16b\n" - "lsr x22, %x[n_valid_cells], #0x1\n" + "mov x19, %x[inptrs]\n" "cbz x22, 24f\n" "15:" // Oddments: 2 inputs loop - "movi v31.16b, #0x0\n" "ldp x21, x20, [x19, #0x0]\n" "add x19, x19, #0x10\n" - "movi v30.16b, #0x0\n" "add x21, x21, x26\n" + "movi v31.16b, #0x0\n" "add x20, x20, x26\n" + "movi v30.16b, #0x0\n" "tbz %x[n_channels], #3, 19f\n" "ldr d31, [x21], #0x8\n" "ldr d30, [x20], #0x8\n" @@ -551,8 +551,8 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "ldr b30, [x20], #0x1\n" "23:" // Oddments: 2 inputs loop: Load: Bit 3: End "uaddl v23.8h, v31.8b, v30.8b\n" - "subs x22, x22, #0x1\n" "uaddl2 v22.8h, v31.16b, v30.16b\n" + "subs x22, x22, #0x1\n" "uaddw v15.4s, v15.4s, v23.4h\n" "uaddw2 v14.4s, v14.4s, v23.8h\n" "uaddw v13.4s, v13.4s, v22.4h\n" @@ -562,9 +562,9 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "ands x20, %x[n_valid_cells], #0x1\n" "beq 34f\n" "25:" // Oddments: Single input loop - "movi v31.16b, #0x0\n" "ldr x21, [x19], #0x8\n" "add x21, x21, x26\n" + "movi v31.16b, #0x0\n" "tbz %x[n_channels], #3, 29f\n" "ldr d31, [x21], #0x8\n" "tbz %x[n_channels], #2, 27f\n" @@ -611,29 +611,27 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "ldr b31, [x21], #0x1\n" "33:" // Oddments: Single input loop: Load: Bit 3: End "uxtl v23.8h, v31.8b\n" - "subs x20, x20, #0x1\n" "uxtl2 v22.8h, v31.16b\n" + "subs x20, x20, #0x1\n" "uaddw v15.4s, v15.4s, v23.4h\n" "uaddw2 v14.4s, v14.4s, v23.8h\n" "uaddw v13.4s, v13.4s, v22.4h\n" "uaddw2 v12.4s, v12.4s, v22.8h\n" "bgt 25b\n" "34:" // Oddments: Single input loop: End - "movi v21.4s, #0x0\n" - "ld1r { v20.4s }, [%x[combined_rescale_value]]\n" - "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n" - "movi v19.4s, #0xff\n" - "ld1r { v18.4s }, [%x[left_shift]]\n" + "ld1r { v19.4s }, [%x[left_shift]]\n" + "ld1r { v18.4s }, [%x[combined_rescale_value]]\n" + "srshl v15.4s, v15.4s, v19.4s\n" + "srshl v14.4s, v14.4s, v19.4s\n" + "srshl v13.4s, v13.4s, v19.4s\n" + "srshl v12.4s, v12.4s, v19.4s\n" "ld1r { v17.4s }, [%x[right_shift]]\n" - "srshl v15.4s, v15.4s, v18.4s\n" + "add x19, %x[quant_params], %[offsetof_qp_output_offset]\n" + "sqrdmulh v15.4s, v15.4s, v18.4s\n" + "sqrdmulh v14.4s, v14.4s, v18.4s\n" "ld1r { v16.4s }, [x19]\n" - "srshl v14.4s, v14.4s, v18.4s\n" - "srshl v13.4s, v13.4s, v18.4s\n" - "srshl v12.4s, v12.4s, v18.4s\n" - "sqrdmulh v15.4s, v15.4s, v20.4s\n" - "sqrdmulh v14.4s, v14.4s, v20.4s\n" - "sqrdmulh v13.4s, v13.4s, v20.4s\n" - "sqrdmulh v12.4s, v12.4s, v20.4s\n" + "sqrdmulh v13.4s, v13.4s, v18.4s\n" + "sqrdmulh v12.4s, v12.4s, v18.4s\n" "srshl v15.4s, v15.4s, v17.4s\n" "srshl v14.4s, v14.4s, v17.4s\n" "srshl v13.4s, v13.4s, v17.4s\n" @@ -642,15 +640,17 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "add v14.4s, v14.4s, v16.4s\n" "add v13.4s, v13.4s, v16.4s\n" "add v12.4s, v12.4s, v16.4s\n" - "smax v15.4s, v15.4s, v21.4s\n" - "smax v14.4s, v14.4s, v21.4s\n" - "smax v13.4s, v13.4s, v21.4s\n" - "smin v15.4s, v15.4s, v19.4s\n" - "smin v14.4s, v14.4s, v19.4s\n" - "smin v13.4s, v13.4s, v19.4s\n" - "smax v12.4s, v12.4s, v21.4s\n" + "movi v16.4s, #0x0\n" + "smax v15.4s, v15.4s, v16.4s\n" + "smax v14.4s, v14.4s, v16.4s\n" + "smax v13.4s, v13.4s, v16.4s\n" + "smax v12.4s, v12.4s, v16.4s\n" + "movi v16.4s, #0xff\n" + "smin v15.4s, v15.4s, v16.4s\n" + "smin v14.4s, v14.4s, v16.4s\n" + "smin v13.4s, v13.4s, v16.4s\n" + "smin v12.4s, v12.4s, v16.4s\n" "uzp1 v23.16b, v15.16b, v14.16b\n" - "smin v12.4s, v12.4s, v19.4s\n" "uzp1 v16.16b, v13.16b, v12.16b\n" "uzp1 v16.16b, v23.16b, v16.16b\n" "tbz %x[n_channels], #3, 38f\n" @@ -698,9 +698,7 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( "tbz %x[n_channels], #0, 42f\n" "st1 { v16.b }[0], [%x[outptr]], #0x1\n" "42:" // Oddments: Store: Bit 3: End - "43:" // End - : [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr) : [accumulator_init] "r" (&accumulator_init), [combined_rescale_value] "r" (&combined_rescale_value), [inptrs] "r" (inptrs), [left_shift] "r" (&left_shift), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [quant_params] "r" (&qp), [right_shift] "r" (&right_shift) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26" @@ -709,5 +707,4 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl( } // namespace pooling } // namespace arm_conv - #endif // defined(__aarch64__) -- cgit v1.2.1