diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2020-07-06 19:10:38 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2020-07-07 15:07:37 +0000 |
commit | 0cc50ed757f06f4f076e261cb7253dd67264dec6 (patch) | |
tree | 866306f780f7529ea172d78210355aed761ad7a4 /src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp | |
parent | ab23dd0fbc632063235a6ad408241dc79a35d3e4 (diff) | |
download | ComputeLibrary-0cc50ed757f06f4f076e261cb7253dd67264dec6.tar.gz |
COMPMID-3324: Remove pretransposed support from NEON backend
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I394c6c539969940e0119cbc14174909d47e65de6
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3519
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp')
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp index f4fba227d6..3f1df76a6a 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp @@ -32,7 +32,7 @@ namespace arm_gemm { -void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) { +void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) { const int K_stride = K; const long loops_count = ((K + 4) / 8) - 1; K -= loops_count * 8; @@ -40,7 +40,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float K -= (regs_count + 1) * 4; const long blocks_count = K / 1; float nullbias[16]; - if (!append && !bias) { + if (!accumulate && !bias) { memset(nullbias, 0, (16 * sizeof(float))); } float minval = - static_cast<float>(std::numeric_limits<float>::infinity()); @@ -89,7 +89,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float float result_buffer[64]; const unsigned long ldcb = (use_result_buffer ? 16 : ldc) * sizeof(float); float *c_ptr_real = c_ptr0; - if (use_result_buffer && append) { + if (use_result_buffer && accumulate) { for(int cy=0; cy<std::min(M-y, 4); cy++) { for(unsigned int cx=0; cx<width; cx++) { result_buffer[cy * 16 + cx] = c_ptr_real[cy * ldc + cx]; @@ -104,7 +104,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float switch(rows_to_compute) { case 1: __asm __volatile ( - "cbnz %[append], 1f\n" + "cbnz %[accumulate], 1f\n" "ldr q16, [%[biasptr]]\n" "ldr q17, [%[biasptr], #0x10]\n" "ldr q18, [%[biasptr], #0x20]\n" @@ -336,7 +336,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float "str q19, [%[c_ptr0], #0x30]\n" "add %[c_ptr0], %[c_ptr0], #0x40\n" : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks) - : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr) + : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr) : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory" ); break; @@ -346,7 +346,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float "c_ptr1 .req X1\n" "add a_ptr1, %[a_ptr0], %[lda]\n" "add c_ptr1, %[c_ptr0], %[ldc]\n" - "cbnz %[append], 1f\n" + "cbnz %[accumulate], 1f\n" "ldr q16, [%[biasptr]]\n" "ldr q17, [%[biasptr], #0x10]\n" "ldr q18, [%[biasptr], #0x20]\n" @@ -697,7 +697,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float ".unreq a_ptr1\n" ".unreq c_ptr1\n" : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks) - : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr) + : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr) : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "cc", "memory" ); break; @@ -711,7 +711,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float "add c_ptr1, %[c_ptr0], %[ldc]\n" "add a_ptr2, a_ptr1, %[lda]\n" "add c_ptr2, c_ptr1, %[ldc]\n" - "cbnz %[append], 1f\n" + "cbnz %[accumulate], 1f\n" "ldr q16, [%[biasptr]]\n" "ldr q17, [%[biasptr], #0x10]\n" "ldr q18, [%[biasptr], #0x20]\n" @@ -1181,7 +1181,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float ".unreq c_ptr1\n" ".unreq c_ptr2\n" : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks) - : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr) + : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr) : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory" ); break; @@ -1200,7 +1200,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float "add c_ptr2, c_ptr1, %[ldc]\n" "add a_ptr3, a_ptr2, %[lda]\n" "add c_ptr3, c_ptr2, %[ldc]\n" - "cbnz %[append], 1f\n" + "cbnz %[accumulate], 1f\n" "ldr q16, [%[biasptr]]\n" "ldr q17, [%[biasptr], #0x10]\n" "ldr q18, [%[biasptr], #0x20]\n" @@ -1789,7 +1789,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float ".unreq c_ptr2\n" ".unreq c_ptr3\n" : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks) - : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr) + : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr) : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory" ); break; |