diff options
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp')
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp | 55 |
1 files changed, 26 insertions, 29 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp index 87c73740e7..9157d29eba 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp @@ -92,9 +92,6 @@ void a64_hybrid_fp16_mla_6x32_a55 ( break; } __asm__ __volatile__( -#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - ".arch armv8.2-a+fp16\n" -#endif "1:" // Row loop "cmp %x[M], #0x6\n" "bge 246f\n" @@ -1305,14 +1302,14 @@ void a64_hybrid_fp16_mla_6x32_a55 ( "ldr q9, [x16, #0x10]\n" "ldr q10, [x16, #0x20]\n" "mov v12.16b, v8.16b\n" - "mov v16.16b, v8.16b\n" - "mov v13.16b, v9.16b\n" - "mov v17.16b, v9.16b\n" - "mov v14.16b, v10.16b\n" - "mov v18.16b, v10.16b\n" "ldr q11, [x16, #0x30]\n" + "mov v13.16b, v9.16b\n" "add x16, x16, #0x40\n" + "mov v14.16b, v10.16b\n" "mov v15.16b, v11.16b\n" + "mov v16.16b, v8.16b\n" + "mov v17.16b, v9.16b\n" + "mov v18.16b, v10.16b\n" "mov v19.16b, v11.16b\n" "b 120f\n" "101:" // Height 3: no bias @@ -2158,18 +2155,18 @@ void a64_hybrid_fp16_mla_6x32_a55 ( "ldr q9, [x16, #0x10]\n" "ldr q10, [x16, #0x20]\n" "mov v12.16b, v8.16b\n" - "mov v16.16b, v8.16b\n" + "ldr q11, [x16, #0x30]\n" "mov v13.16b, v9.16b\n" - "mov v17.16b, v9.16b\n" + "add x16, x16, #0x40\n" "mov v14.16b, v10.16b\n" + "mov v15.16b, v11.16b\n" + "mov v16.16b, v8.16b\n" + "mov v17.16b, v9.16b\n" "mov v18.16b, v10.16b\n" + "mov v19.16b, v11.16b\n" "mov v20.16b, v8.16b\n" "mov v21.16b, v9.16b\n" "mov v22.16b, v10.16b\n" - "ldr q11, [x16, #0x30]\n" - "add x16, x16, #0x40\n" - "mov v15.16b, v11.16b\n" - "mov v19.16b, v11.16b\n" "mov v23.16b, v11.16b\n" "b 169f\n" "150:" // Height 4: no bias @@ -3182,22 +3179,22 @@ void a64_hybrid_fp16_mla_6x32_a55 ( "ldr q9, [x16, #0x10]\n" "ldr q10, [x16, #0x20]\n" "mov v12.16b, v8.16b\n" - "mov v16.16b, v8.16b\n" + "ldr q11, [x16, #0x30]\n" "mov v13.16b, v9.16b\n" - "mov v17.16b, v9.16b\n" + "add x16, x16, #0x40\n" "mov v14.16b, v10.16b\n" + "mov v15.16b, v11.16b\n" + "mov v16.16b, v8.16b\n" + "mov v17.16b, v9.16b\n" "mov v18.16b, v10.16b\n" + "mov v19.16b, v11.16b\n" "mov v20.16b, v8.16b\n" "mov v21.16b, v9.16b\n" "mov v22.16b, v10.16b\n" + "mov v23.16b, v11.16b\n" "mov v24.16b, v8.16b\n" "mov v25.16b, v9.16b\n" "mov v26.16b, v10.16b\n" - "ldr q11, [x16, #0x30]\n" - "add x16, x16, #0x40\n" - "mov v15.16b, v11.16b\n" - "mov v19.16b, v11.16b\n" - "mov v23.16b, v11.16b\n" "mov v27.16b, v11.16b\n" "b 218f\n" "199:" // Height 5: no bias @@ -4380,26 +4377,26 @@ void a64_hybrid_fp16_mla_6x32_a55 ( "ldr q9, [x16, #0x10]\n" "ldr q10, [x16, #0x20]\n" "mov v12.16b, v8.16b\n" - "mov v16.16b, v8.16b\n" + "ldr q11, [x16, #0x30]\n" "mov v13.16b, v9.16b\n" - "mov v17.16b, v9.16b\n" + "add x16, x16, #0x40\n" "mov v14.16b, v10.16b\n" + "mov v15.16b, v11.16b\n" + "mov v16.16b, v8.16b\n" + "mov v17.16b, v9.16b\n" "mov v18.16b, v10.16b\n" + "mov v19.16b, v11.16b\n" "mov v20.16b, v8.16b\n" "mov v21.16b, v9.16b\n" "mov v22.16b, v10.16b\n" + "mov v23.16b, v11.16b\n" "mov v24.16b, v8.16b\n" "mov v25.16b, v9.16b\n" "mov v26.16b, v10.16b\n" + "mov v27.16b, v11.16b\n" "mov v28.16b, v8.16b\n" "mov v29.16b, v9.16b\n" "mov v30.16b, v10.16b\n" - "ldr q11, [x16, #0x30]\n" - "add x16, x16, #0x40\n" - "mov v15.16b, v11.16b\n" - "mov v19.16b, v11.16b\n" - "mov v23.16b, v11.16b\n" - "mov v27.16b, v11.16b\n" "mov v31.16b, v11.16b\n" "b 267f\n" "248:" // Height 6: no bias |