diff options
author | Michalis Spyrou <michalis.spyrou@arm.com> | 2021-04-20 12:15:52 +0100 |
---|---|---|
committer | Michalis Spyrou <michalis.spyrou@arm.com> | 2021-04-20 17:10:48 +0000 |
commit | 778b95cb755880ab4adc972fbf3b7022a99b63f9 (patch) | |
tree | 13a4f6060e20a0989c558d7750386b8f32827e4b /src/core/NEON/kernels | |
parent | dcf4c87cf78a5f1667699c1a3511d09356938660 (diff) | |
download | ComputeLibrary-778b95cb755880ab4adc972fbf3b7022a99b63f9.tar.gz |
Update assembly code
This patch brings performance uplift on Cortex-A35.
Resolves: COMPMID-4316
Change-Id: I2b9c02a599373f780dd1b981b821e33bd59a3422
Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5461
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels')
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/asmlib.hpp | 1 | ||||
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp | 12 | ||||
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6.hpp | 75 | ||||
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6/generic.cpp | 381 | ||||
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/mergeresults.cpp | 9 |
5 files changed, 474 insertions, 4 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/asmlib.hpp b/src/core/NEON/kernels/arm_gemm/asmlib.hpp index 00674334a9..7766656adb 100644 --- a/src/core/NEON/kernels/arm_gemm/asmlib.hpp +++ b/src/core/NEON/kernels/arm_gemm/asmlib.hpp @@ -32,6 +32,7 @@ // "Correct" version #define ASM_PREFETCH(address) "PRFM PLDL1KEEP, " address "\n" +#define ASM_PREFETCHU(address) "PRFUM PLDL1KEEP, " address "\n" #define ASM_PREFETCHL2(address) "PRFM PLDL2KEEP, " address "\n" #define ASM_PREFETCHW(address) "PRFM PSTL1KEEP, " address "\n" #define ASM_PREFETCHWL2(address) "PRFM PSTL2KEEP, " address "\n" diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp index 6c3743dce7..d94814fb4c 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,6 +35,7 @@ #include "kernels/a64_hybrid_fp32_mla_6x16.hpp" #include "kernels/a64_hybrid_fp32_mla_8x4.hpp" #include "kernels/a64_sgemm_8x12.hpp" +#include "kernels/a64_sgemm_8x6.hpp" #include "kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp" #include "kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp" @@ -110,7 +111,14 @@ static const GemmImplementation<float, float> gemm_fp32_methods[] = [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float>(args); } }, #endif // __ARM_FEATURE_SVE - +// Cortex-A35 specific kernel - use for any problem on A35, and never in any other cases. +{ + GemmMethod::GEMM_INTERLEAVED, + "a64_sgemm_8x6", + nullptr, + [](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A35; }, + [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x6, float, float>(args); } +}, // Arm® Neon™ hybrid methods { GemmMethod::GEMM_HYBRID, diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6.hpp new file mode 100644 index 0000000000..c1318a2a06 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6.hpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2017-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef __aarch64__ + +#include "../std_transforms_fixed.hpp" + +namespace arm_gemm { + +// Actual kernel implementations +void a64_sgemm_asimd_8x6(const float *, const float *, float *, int, int, int); + +// 8x6 SGEMM "strategy" class. +// +// This describes the characteristics of a family of kernels, in terms of +// the required interleave properties and the output block size. +// +// All kernels in the family must share these characteristics. The actual +// kernel to be used can be chosen at runtime, based on the CPU_type +// structure. +class cls_a64_sgemm_8x6 { +public: + typedef float operand_type; + typedef float result_type; + + typedef void (*kern_type)(const float *, const float *, float *, int, int, int); + + /* Kernel blocking parameters */ + static unsigned int out_width() { + return 6; + } + + static unsigned int out_height() { + return 8; + } + + static unsigned int k_unroll() { + return 1; + } + + // Use the standard fixed size transforms. + StdTransformsFixed<operand_type, result_type, 8, 6, 1> transforms = {}; + + kern_type kernel=a64_sgemm_asimd_8x6; + + cls_a64_sgemm_8x6(const CPUInfo *) { + + } +}; + +} // namespace arm_gemm + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6/generic.cpp new file mode 100644 index 0000000000..9b81374d2d --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6/generic.cpp @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2017-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifdef __aarch64__ + +#include <arm_neon.h> + +#include "../../asmlib.hpp" + +// Kernel implementation. +// +// Assume that "Apanel" points to a chunk of A blocks (each size 8xK) in read-order. +// Assume that "Bpanel" points to a chunk of B blocks (each size 12xK) in read-order. +// Assume that "Cpanel" points to a chunk of C output blocks (each size +// 12x8), the chunks being arranged in a row major fashion. +// +// Note that the intent of this is that either ablocks or bblocks will be 1 +// - this construction allows the output loop to proceed in either order. + +namespace arm_gemm { + +void a64_sgemm_asimd_8x6(const float *Apanel, const float *Bpanel, float *Cpanel, int ablocks, int bblocks, int K) { + const float *a_ptr = Apanel; + float *c_ptr = Cpanel; + + for (int yb=0; yb<ablocks; yb++) { + const float *a_ptr0 = a_ptr; + const float *b_ptr = Bpanel; + + for (int xb=0; xb<bblocks; xb++) { + a_ptr = a_ptr0; + // Fix up for odd lengths - set a flag if K is odd, but make + // sure we round up the iteration count. + int oddk = (K & 1); + int k = ((K+1)/2) - 1; + + register float32x4_t a0 asm("v0"); + register float32x4_t a1 asm("v1"); + register float32x4_t a2 asm("v2"); + register float32x4_t a3 asm("v3"); + register float32x4_t b0 asm("v4"); + register float32x4_t b1 asm("v5"); + register float32x4_t b2 asm("v6"); + + __asm __volatile ( + // Initialize result registers, load initial operands, prime prefetches. + "movi v8.2s, #0x0\n" + "ld1r { %[a0].2s }, [%[a_ptr]], #4\n" + "movi v9.2s, #0x0\n" + "movi v10.2s, #0x0\n" + "ld1r { %[a1].2s }, [%[a_ptr]], #4\n" + "movi v11.2s, #0x0\n" + "movi v12.2s, #0x0\n" + "movi v13.2s, #0x0\n" + "movi v14.2s, #0x0\n" + ASM_PREFETCH("[%[b_ptr], #64]") + ASM_PREFETCHU("[%[a_ptr], #52]") + ASM_PREFETCHU("[%[a_ptr], #116]") + ASM_PREFETCH("[%[b_ptr], #128]") + "movi v15.2s, #0x0\n" + "movi v16.2s, #0x0\n" + "movi v17.2s, #0x0\n" + "movi v18.2s, #0x0\n" + "movi v19.2s, #0x0\n" + "movi v20.2s, #0x0\n" + "movi v21.2s, #0x0\n" + "movi v22.2s, #0x0\n" + "movi v23.2s, #0x0\n" + "movi v24.2s, #0x0\n" + "movi v25.2s, #0x0\n" + "movi v26.2s, #0x0\n" + "movi v27.2s, #0x0\n" + "movi v28.2s, #0x0\n" + "movi v29.2s, #0x0\n" + "movi v30.2s, #0x0\n" + "movi v31.2s, #0x0\n" + + // Skip loop if we are doing zero iterations of it. + "cbz %w[k], 4f\n" + + // Loop proper + "1:\n" + "ldr %d[b0], [%[b_ptr], #0]\n" + "ld1r { %[a2].2s }, [%[a_ptr]], #4\n" + "ldr %d[b1], [%[b_ptr], #8]\n" + "fmla v8.2s , %[b0].2s, %[a0].2s\n" + "fmla v9.2s , %[b0].2s, %[a1].2s\n" + "fmla v10.2s, %[b0].2s, %[a2].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v16.2s, %[b1].2s, %[a0].2s\n" + "fmla v17.2s, %[b1].2s, %[a1].2s\n" + "fmla v11.2s, %[b0].2s, %[a3].2s\n" + + "ldr %d[b2], [%[b_ptr], #16]\n" + "fmla v18.2s, %[b1].2s, %[a2].2s\n" + "fmla v19.2s, %[b1].2s, %[a3].2s\n" + "fmla v24.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a0].2s }, [%[a_ptr]], #4\n" + "fmla v25.2s, %[b2].2s, %[a1].2s\n" + "fmla v26.2s, %[b2].2s, %[a2].2s\n" + "fmla v27.2s, %[b2].2s, %[a3].2s\n" + + "ld1r { %[a1].2s }, [%[a_ptr]], #4\n" + "fmla v12.2s, %[b0].2s, %[a0].2s\n" + "fmla v20.2s, %[b1].2s, %[a0].2s\n" + "fmla v28.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a2].2s }, [%[a_ptr]], #4\n" + "fmla v13.2s, %[b0].2s, %[a1].2s\n" + "fmla v21.2s, %[b1].2s, %[a1].2s\n" + "fmla v29.2s, %[b2].2s, %[a1].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v14.2s, %[b0].2s, %[a2].2s\n" + "fmla v22.2s, %[b1].2s, %[a2].2s\n" + "fmla v30.2s, %[b2].2s, %[a2].2s\n" + + "ld1r { %[a0].2s }, [%[a_ptr]], #4\n" + "fmla v15.2s, %[b0].2s, %[a3].2s\n" + "fmla v23.2s, %[b1].2s, %[a3].2s\n" + "fmla v31.2s, %[b2].2s, %[a3].2s\n" + + "ld1r { %[a1].2s }, [%[a_ptr]], #4\n" + ASM_PREFETCH("[%[b_ptr], #128]") + "subs %w[k], %w[k], #1\n" + ASM_PREFETCHU("[%[a_ptr], #156]") + "ldr %d[b0], [%[b_ptr], #24]\n" + "ld1r { %[a2].2s }, [%[a_ptr]], #4\n" + + "ldr %d[b1], [%[b_ptr], #32]\n" + "fmla v8.2s , %[b0].2s, %[a0].2s\n" + "fmla v9.2s , %[b0].2s, %[a1].2s\n" + "fmla v10.2s, %[b0].2s, %[a2].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v16.2s, %[b1].2s, %[a0].2s\n" + "fmla v17.2s, %[b1].2s, %[a1].2s\n" + "fmla v11.2s, %[b0].2s, %[a3].2s\n" + + "ldr %d[b2], [%[b_ptr], #40]\n" + "fmla v18.2s, %[b1].2s, %[a2].2s\n" + "fmla v19.2s, %[b1].2s, %[a3].2s\n" + "fmla v24.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a0].2s }, [%[a_ptr]], #4\n" + "fmla v25.2s, %[b2].2s, %[a1].2s\n" + "fmla v26.2s, %[b2].2s, %[a2].2s\n" + "fmla v27.2s, %[b2].2s, %[a3].2s\n" + + "ld1r { %[a1].2s }, [%[a_ptr]], #4\n" + "fmla v12.2s, %[b0].2s, %[a0].2s\n" + "fmla v20.2s, %[b1].2s, %[a0].2s\n" + "fmla v28.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a2].2s }, [%[a_ptr]], #4\n" + "fmla v13.2s, %[b0].2s, %[a1].2s\n" + "fmla v21.2s, %[b1].2s, %[a1].2s\n" + "fmla v29.2s, %[b2].2s, %[a1].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v14.2s, %[b0].2s, %[a2].2s\n" + "fmla v22.2s, %[b1].2s, %[a2].2s\n" + "fmla v30.2s, %[b2].2s, %[a2].2s\n" + + "ld1r { %[a0].2s }, [%[a_ptr]], #4\n" + "fmla v15.2s, %[b0].2s, %[a3].2s\n" + "fmla v23.2s, %[b1].2s, %[a3].2s\n" + "fmla v31.2s, %[b2].2s, %[a3].2s\n" + + "ld1r { %[a1].2s }, [%[a_ptr]], #4\n" + "add %[b_ptr], %[b_ptr], #48\n" + ASM_PREFETCHU("[%[a_ptr], #188]") + "bne 1b\n" + + // Target to use when K is 1 or 2 (i.e. zero iterations of main loop) + "4:\n" + ASM_PREFETCH("[%[c_ptr]]") + ASM_PREFETCH("[%[c_ptr], #64]") + + "ldr %d[b0], [%[b_ptr]]\n" + "ld1r { %[a2].2s }, [%[a_ptr]], #4\n" + + // Branch to alternative tail for odd K + "cbnz %w[oddk], 2f\n" + + // Detached final iteration (even K) + "ldr %d[b1], [%[b_ptr], #8]\n" + "fmla v8.2s , %[b0].2s, %[a0].2s\n" + "fmla v9.2s , %[b0].2s, %[a1].2s\n" + "fmla v10.2s, %[b0].2s, %[a2].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v16.2s, %[b1].2s, %[a0].2s\n" + "fmla v17.2s, %[b1].2s, %[a1].2s\n" + "fmla v11.2s, %[b0].2s, %[a3].2s\n" + + "ldr %d[b2], [%[b_ptr], #16]\n" + "fmla v18.2s, %[b1].2s, %[a2].2s\n" + "fmla v19.2s, %[b1].2s, %[a3].2s\n" + "fmla v24.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a0].2s }, [%[a_ptr]], #4\n" + "fmla v25.2s, %[b2].2s, %[a1].2s\n" + "fmla v26.2s, %[b2].2s, %[a2].2s\n" + "fmla v27.2s, %[b2].2s, %[a3].2s\n" + + "ld1r { %[a1].2s }, [%[a_ptr]], #4\n" + "fmla v12.2s, %[b0].2s, %[a0].2s\n" + "fmla v20.2s, %[b1].2s, %[a0].2s\n" + "fmla v28.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a2].2s }, [%[a_ptr]], #4\n" + "fmla v13.2s, %[b0].2s, %[a1].2s\n" + "fmla v21.2s, %[b1].2s, %[a1].2s\n" + "fmla v29.2s, %[b2].2s, %[a1].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v14.2s, %[b0].2s, %[a2].2s\n" + "fmla v22.2s, %[b1].2s, %[a2].2s\n" + "fmla v30.2s, %[b2].2s, %[a2].2s\n" + + "ld1r { %[a0].2s }, [%[a_ptr]], #4\n" + "fmla v15.2s, %[b0].2s, %[a3].2s\n" + "fmla v23.2s, %[b1].2s, %[a3].2s\n" + "fmla v31.2s, %[b2].2s, %[a3].2s\n" + + "ldr %d[b0], [%[b_ptr], #24]\n" + "add %[b_ptr], %[b_ptr], #48\n" + ASM_PREFETCH("[%[b_ptr], #128]") + "ld1r { %[a1].2s }, [%[a_ptr]], #4\n" + "ld1r { %[a2].2s }, [%[a_ptr]], #4\n" + + "ldr %d[b1], [%[b_ptr], #-16]\n" + "fmla v8.2s , %[b0].2s, %[a0].2s\n" + "fmla v9.2s , %[b0].2s, %[a1].2s\n" + "fmla v10.2s, %[b0].2s, %[a2].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v16.2s, %[b1].2s, %[a0].2s\n" + "fmla v17.2s, %[b1].2s, %[a1].2s\n" + "fmla v11.2s, %[b0].2s, %[a3].2s\n" + + "ldr %d[b2], [%[b_ptr], #-8]\n" + "fmla v18.2s, %[b1].2s, %[a2].2s\n" + "fmla v19.2s, %[b1].2s, %[a3].2s\n" + "fmla v24.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a0].2s }, [%[a_ptr]], #4\n" + "fmla v25.2s, %[b2].2s, %[a1].2s\n" + "fmla v26.2s, %[b2].2s, %[a2].2s\n" + "fmla v27.2s, %[b2].2s, %[a3].2s\n" + + "ld1r { %[a1].2s }, [%[a_ptr]], #4\n" + "fmla v12.2s, %[b0].2s, %[a0].2s\n" + "fmla v20.2s, %[b1].2s, %[a0].2s\n" + "fmla v28.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a2].2s }, [%[a_ptr]], #4\n" + "fmla v13.2s, %[b0].2s, %[a1].2s\n" + "fmla v21.2s, %[b1].2s, %[a1].2s\n" + "fmla v29.2s, %[b2].2s, %[a1].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v14.2s, %[b0].2s, %[a2].2s\n" + "fmla v22.2s, %[b1].2s, %[a2].2s\n" + "fmla v30.2s, %[b2].2s, %[a2].2s\n" + + "fmla v15.2s, %[b0].2s, %[a3].2s\n" + "fmla v23.2s, %[b1].2s, %[a3].2s\n" + "fmla v31.2s, %[b2].2s, %[a3].2s\n" + + "b 3f\n" + + // Detached final iteration (odd K) + "2:\n" + "ldr %d[b1], [%[b_ptr], #8]\n" + "fmla v8.2s , %[b0].2s, %[a0].2s\n" + "fmla v9.2s , %[b0].2s, %[a1].2s\n" + "fmla v10.2s, %[b0].2s, %[a2].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v16.2s, %[b1].2s, %[a0].2s\n" + "fmla v17.2s, %[b1].2s, %[a1].2s\n" + "fmla v11.2s, %[b0].2s, %[a3].2s\n" + + "ldr %d[b2], [%[b_ptr], #16]\n" + "fmla v18.2s, %[b1].2s, %[a2].2s\n" + "fmla v19.2s, %[b1].2s, %[a3].2s\n" + "fmla v24.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a0].2s }, [%[a_ptr]], #4\n" + "fmla v25.2s, %[b2].2s, %[a1].2s\n" + "fmla v26.2s, %[b2].2s, %[a2].2s\n" + "fmla v27.2s, %[b2].2s, %[a3].2s\n" + + "ld1r { %[a1].2s }, [%[a_ptr]], #4\n" + "fmla v12.2s, %[b0].2s, %[a0].2s\n" + "fmla v20.2s, %[b1].2s, %[a0].2s\n" + "fmla v28.2s, %[b2].2s, %[a0].2s\n" + + "ld1r { %[a2].2s }, [%[a_ptr]], #4\n" + "fmla v13.2s, %[b0].2s, %[a1].2s\n" + "fmla v21.2s, %[b1].2s, %[a1].2s\n" + "fmla v29.2s, %[b2].2s, %[a1].2s\n" + + "ld1r { %[a3].2s }, [%[a_ptr]], #4\n" + "fmla v14.2s, %[b0].2s, %[a2].2s\n" + "fmla v22.2s, %[b1].2s, %[a2].2s\n" + "fmla v30.2s, %[b2].2s, %[a2].2s\n" + + "fmla v15.2s, %[b0].2s, %[a3].2s\n" + "fmla v23.2s, %[b1].2s, %[a3].2s\n" + "fmla v31.2s, %[b2].2s, %[a3].2s\n" + + "add %[b_ptr], %[b_ptr], #24\n" + + // Common tail + "3:\n" + "str d8, [%[c_ptr], #0]\n" + "str d16, [%[c_ptr], #8]\n" + "str d24, [%[c_ptr], #16]\n" + "str d9, [%[c_ptr], #24]\n" + "str d17, [%[c_ptr], #32]\n" + "str d25, [%[c_ptr], #40]\n" + "str d10, [%[c_ptr], #48]\n" + "str d18, [%[c_ptr], #56]\n" + "str d26, [%[c_ptr], #64]\n" + "str d11, [%[c_ptr], #72]\n" + "str d19, [%[c_ptr], #80]\n" + "str d27, [%[c_ptr], #88]\n" + "str d12, [%[c_ptr], #96]\n" + "str d20, [%[c_ptr], #104]\n" + "str d28, [%[c_ptr], #112]\n" + "str d13, [%[c_ptr], #120]\n" + "str d21, [%[c_ptr], #128]\n" + "str d29, [%[c_ptr], #136]\n" + "str d14, [%[c_ptr], #144]\n" + "str d22, [%[c_ptr], #152]\n" + "str d30, [%[c_ptr], #160]\n" + "str d15, [%[c_ptr], #168]\n" + "str d23, [%[c_ptr], #176]\n" + "str d31, [%[c_ptr], #184]\n" + "add %[c_ptr], %[c_ptr], #192\n" + + : + [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), + [a0] "+w" (a0), [a1] "+w" (a1), [a2] "+w" (a2), [a3] "+w" (a3), + [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k) + : [oddk] "r" (oddk) + : "x20", "x21", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", + "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory" + ); + } + } +} + +} // namespace arm_gemm + +#endif diff --git a/src/core/NEON/kernels/arm_gemm/mergeresults.cpp b/src/core/NEON/kernels/arm_gemm/mergeresults.cpp index 8ca947a6dc..adcdc88fd9 100644 --- a/src/core/NEON/kernels/arm_gemm/mergeresults.cpp +++ b/src/core/NEON/kernels/arm_gemm/mergeresults.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 Arm Limited. + * Copyright (c) 2017-2018, 2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,7 +25,6 @@ /* As some of the merges need these headers, but are all included in the * arm_gemm namespace, put these headers here. */ #include <algorithm> -#include <limits> #include <arm_neon.h> @@ -97,6 +96,12 @@ void MergeResults(Tout * out, const Tin * in, int ldc, int y0, int ymax, int x0, #include "merges/list.hpp" +/* Cortex-A53 8x6 SGEMM kernel uses a templated merge as the optimized merge + * generator cannot cope with the width (6) not being a multiple of VL (4). */ +#ifdef __aarch64__ +template void MergeResults<6u, 8u, false, float, float>(float *, float const*, int, int, int, int, int, float const *, Activation, bool); +#endif + #if defined(__aarch64__) && defined(__ARM_FP16_ARGS) template void MergeResults<12u, 8u, false, float, __fp16>(__fp16*, float const*, int, int, int, int, int, __fp16 const*, Activation, bool); #endif |