From ae0fc8612dba6faebf58c3ebbfae8d6e639d432d Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Mon, 30 Sep 2019 12:39:40 +0100 Subject: COMPMID-2452: Dot product optimizations on merge/transforms -Adds optimized gemm transforms for AArch64. -Optimized gemm merger to only support alpha==1 and (beta==0 || beta=1) cases Change-Id: I55793b12a0381f4fd53f521d0e57416809904d96 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/2003 Tested-by: Arm Jenkins Reviewed-by: Michalis Spyrou --- .../transforms/a64_interleave_8way_block4_8bit.hpp | 223 +++++++++++++++++++++ 1 file changed, 223 insertions(+) create mode 100644 src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp (limited to 'src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp') diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp new file mode 100644 index 0000000000..3c4f6d0b19 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2017-2019 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#if defined(__aarch64__) && !defined(__ARM_FEATURE_SVE) + +#include + +#include "../asmlib.hpp" + +template<> +template +inline void TransformImpl<8, 4, false, 1, 1, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax) { + uint8_t *outptr = reinterpret_cast(out); + const uint8_t *inptr = reinterpret_cast(in); + bool first = true; + + /* Helper functions to copy blocks about used for odd case. */ + class t { + public: + static inline void copy_4_inc(uint8_t *&out, const uint8_t *&in) { + uint32_t *out_word = reinterpret_cast(out); + const uint32_t *in_word = reinterpret_cast(in); + + *out_word++ = *in_word++; + + out = reinterpret_cast(out_word); + in = reinterpret_cast(in_word); + } + + static inline void copy_pad(uint8_t *&out, const uint8_t *&in, size_t count) { + for (unsigned int i=0; i<4; i++) { + if (i < count) { + *out++ = *in++; + } else { + *out++ = 0; + } + } + } + }; + + uint8_t zerobuff[64]; // 32 for asm loop plus up to 31 for overflow loop + + for (int y=y0; y31) || first;x-=32) { + /* Cope with ragged cases by copying from a buffer of zeroes instead */ + /* 'first' forces this to always run at least once, needed if the total size is <=32. */ + if ((y + 7) >= ymax) { + switch ((y + 7) - ymax) { + /* Everything falls through in here */ + case 6: + inptr1 = zerobuff; + case 5: + inptr2 = zerobuff; + case 4: + inptr3 = zerobuff; + case 3: + inptr4 = zerobuff; + case 2: + inptr5 = zerobuff; + case 1: + inptr6 = zerobuff; + case 0: + inptr7 = zerobuff; + break; + + default: + UNREACHABLE("Impossible."); + } + } + + if (first) { + if (x<=31) { + break; + } + + first = false; + } + + __asm __volatile ( + // Load up 8 elements (2 vectors) from each of 8 sources. + "LDP q0, q1, [%[inptr0]], #32\n" // q0=A0A1A2A3 + "LDP q2, q3, [%[inptr1]], #32\n" // q2=B0B1B2B3 + "LDP q4, q5, [%[inptr2]], #32\n" // q4=C0C1C2C3 + "ZIP1 v16.4s, v0.4s, v4.4s\n" // q16=A0C0A1C1 + ASM_PREFETCH("[%[inptr0], #128]") + "LDP q6, q7, [%[inptr3]], #32\n" // q6=D0D1D2D3 + "ZIP1 v17.4s, v2.4s, v6.4s\n" // q17=B0D0B1D1 + "LDP q8, q9, [%[inptr4]], #32\n" + "LDP q10, q11, [%[inptr5]], #32\n" + "LDP q12, q13, [%[inptr6]], #32\n" + "ZIP1 v18.4s, v8.4s, v12.4s\n" + ASM_PREFETCH("[%[inptr1], #128]") + "LDP q14, q15, [%[inptr7]], #32\n" + "ZIP1 v19.4s, v10.4s, v14.4s\n" + + "ZIP1 v20.4s, v16.4s, v17.4s\n" // q20=A0B0C0D0 + ASM_PREFETCH("[%[inptr2], #128]") + "ZIP1 v21.4s, v18.4s, v19.4s\n" + "ZIP2 v22.4s, v16.4s, v17.4s\n" + "ZIP2 v23.4s, v18.4s, v19.4s\n" + + "ZIP2 v16.4s, v0.4s, v4.4s\n" + ASM_PREFETCH("[%[inptr3], #128]") + "ZIP2 v17.4s, v2.4s, v6.4s\n" + "STP q20, q21, [%[outptr]], #32\n" // Write back the first element of each source + + "ZIP2 v18.4s, v8.4s, v12.4s\n" + "ZIP2 v19.4s, v10.4s, v14.4s\n" + "STP q22, q23, [%[outptr]], #32\n" // Write back the second element of each source + + "ZIP1 v20.4s, v16.4s, v17.4s\n" + ASM_PREFETCH("[%[inptr4], #128]") + "ZIP1 v21.4s, v18.4s, v19.4s\n" + "ZIP2 v22.4s, v16.4s, v17.4s\n" + "ZIP2 v23.4s, v18.4s, v19.4s\n" + + "ZIP1 v16.4s, v1.4s, v5.4s\n" + ASM_PREFETCH("[%[inptr5], #128]") + "ZIP1 v17.4s, v3.4s, v7.4s\n" + "STP q20, q21, [%[outptr]], #32\n" // Third element + + "ZIP1 v18.4s, v9.4s, v13.4s\n" + "ZIP1 v19.4s, v11.4s, v15.4s\n" + "STP q22, q23, [%[outptr]], #32\n" // Fourth element + + "ZIP1 v20.4s, v16.4s, v17.4s\n" + "ZIP1 v21.4s, v18.4s, v19.4s\n" + "ZIP2 v22.4s, v16.4s, v17.4s\n" + ASM_PREFETCH("[%[inptr6], #128]") + "ZIP2 v23.4s, v18.4s, v19.4s\n" + + "ZIP2 v16.4s, v1.4s, v5.4s\n" + "ZIP2 v17.4s, v3.4s, v7.4s\n" + "STP q20, q21, [%[outptr]], #32\n" // Fifth element + + "ZIP2 v18.4s, v9.4s, v13.4s\n" + ASM_PREFETCH("[%[inptr7], #128]") + "ZIP2 v19.4s, v11.4s, v15.4s\n" + "STP q22, q23, [%[outptr]], #32\n" // Sixth element + + "ZIP1 v20.4s, v16.4s, v17.4s\n" + "ZIP1 v21.4s, v18.4s, v19.4s\n" + "STP q20, q21, [%[outptr]], #32\n" // Seventh element + + "ZIP2 v22.4s, v16.4s, v17.4s\n" + "ZIP2 v23.4s, v18.4s, v19.4s\n" + "STP q22, q23, [%[outptr]], #32\n" // Eighth element + : [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), + [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7), [outptr] "+r" (outptr) + : + : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", + "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "memory" + ); + } + + // Copy any leftover blocks of 4 a complete block at a time. + for (;x>4;x-=4) { + t::copy_4_inc(outptr, inptr0); + t::copy_4_inc(outptr, inptr1); + t::copy_4_inc(outptr, inptr2); + t::copy_4_inc(outptr, inptr3); + t::copy_4_inc(outptr, inptr4); + t::copy_4_inc(outptr, inptr5); + t::copy_4_inc(outptr, inptr6); + t::copy_4_inc(outptr, inptr7); + } + + // Final block with padding, if any. + if (x > 0) { + t::copy_pad(outptr, inptr0, x); + t::copy_pad(outptr, inptr1, x); + t::copy_pad(outptr, inptr2, x); + t::copy_pad(outptr, inptr3, x); + t::copy_pad(outptr, inptr4, x); + t::copy_pad(outptr, inptr5, x); + t::copy_pad(outptr, inptr6, x); + t::copy_pad(outptr, inptr7, x); + } + } +} + +#endif // __aarch64__ && !__ARM_FEATURE_SVE -- cgit v1.2.1