diff options
author | Moritz Pflanzer <moritz.pflanzer@arm.com> | 2017-09-15 10:42:58 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:35:24 +0000 |
commit | 80373f607cb12693824411510c39e367a4dfbdb5 (patch) | |
tree | ddc4d038783ed91ff227fb259a85fefc09e46319 /arm_compute/core/NEON/kernels/assembly/transforms | |
parent | c09314a288dc2aa7ef75a09a8ff5dede3f80974a (diff) | |
download | ComputeLibrary-80373f607cb12693824411510c39e367a4dfbdb5.tar.gz |
COMPMID-481: Add AArch32 GEMM
Change-Id: Idba0b30bfb27866a46a22388014ab81432ea28dc
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/86196
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/kernels/assembly/transforms')
3 files changed, 282 insertions, 2 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a32_interleave_6way_32bit.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a32_interleave_6way_32bit.hpp new file mode 100644 index 0000000000..1c1f85c11c --- /dev/null +++ b/arm_compute/core/NEON/kernels/assembly/transforms/a32_interleave_6way_32bit.hpp @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef __arm__ + +#include "../asmlib.hpp" + +#include <arm_neon.h> + +template<> +template<typename T> +void TransformImpl<6, 1, false, 4, 4>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax) { + uint32_t *outptr = reinterpret_cast<uint32_t *>(out); + const uint32_t *inptr = reinterpret_cast<const uint32_t *>(in); + + uint32_t zerobuff[8]; + + for (int y=y0; y<ymax; y+=6) { + const uint32_t *inptr0 = inptr + y * ldin + k0; + const uint32_t *inptr1 = inptr0 + ldin; + const uint32_t *inptr2 = inptr1 + ldin; + const uint32_t *inptr3 = inptr2 + ldin; + const uint32_t *inptr4 = inptr3 + ldin; + const uint32_t *inptr5 = inptr4 + ldin; + + //prefetch_2x(inptr0); + //prefetch_2x(inptr1); + //prefetch_2x(inptr2); + //prefetch_2x(inptr3); + //prefetch_2x(inptr4); + //prefetch_2x(inptr5); + + int x=(kmax-k0); + for (;x>7;x-=8) { + /* Cope with ragged cases by copying from a buffer of zeroes instead */ + if ((y + 5) >= ymax) { + switch ((y + 5) - ymax) { + /* Everything falls through in here */ + case 4: + inptr1 = zerobuff; + case 3: + inptr2 = zerobuff; + case 2: + inptr3 = zerobuff; + case 1: + inptr4 = zerobuff; + case 0: + inptr5 = zerobuff; + default: + break; + } + } + + + __asm __volatile ( + // Load up 8 elements (2 vectors) from each of 8 sources. + "VLD1.32 {d0-d3}, [%[inptr0]]!\n" // q0=A0A1A2A3 + "VLD1.32 {d4-d7}, [%[inptr1]]!\n" // q2=B0B1B2B3 + "VLD1.32 {d8-d11}, [%[inptr2]]!\n" // q4=C0C1C2C3 + "VZIP.32 q0, q4\n" // q0=A0C0A1C1, q4 = A2C2A3C3 + "VLD1.32 {d12-d15}, [%[inptr3]]!\n" // q6=D0D1D2D3 + "VZIP.32 q2, q6\n" // q2=B0D0B1D1, q6 = B2D2B3D3 + "VLD1.32 {d16-d19}, [%[inptr4]]!\n" + "VLD1.32 {d20-d23}, [%[inptr5]]!\n" + "VZIP.32 q8, q10\n" // q8=E0F0E1F1, q10 = E2F2E3F3 + ASM_PREFETCH("[%[inptr0], #128]") + "VZIP.32 q0, q2\n" // q0 = A0B0C0D0, q2 = A1B1C1D1 + + // Store first elements + "VST1.32 {d0-d1}, [%[outptr]]!\n" + "VST1.32 {d16}, [%[outptr]]!\n" + + "VZIP.32 q4, q6\n" // q4 = A2B2C2D2, q6 = A3B3C3D3 + + // Store second elements + "VST1.32 {d4-d5}, [%[outptr]]!\n" + "VZIP.32 q1, q5\n" + ASM_PREFETCH("[%[inptr1], #128]") + "VST1.32 {d17}, [%[outptr]]!\n" + "VZIP.32 q3, q7\n" + + // Store third elements + "VZIP.32 q9, q11\n" + "VST1.32 {d8-d9}, [%[outptr]]!\n" + "VZIP.32 q1, q3\n" + ASM_PREFETCH("[%[inptr2], #128]") + "VST1.32 {d20}, [%[outptr]]!\n" + + // Store fourth elements + "VZIP.32 q5, q7\n" + "VST1.32 {d12-d13}, [%[outptr]]!\n" + ASM_PREFETCH("[%[inptr3], #128]") + "VST1.32 {d21}, [%[outptr]]!\n" + + // Fifth + "VST1.32 {d2-d3}, [%[outptr]]!\n" + ASM_PREFETCH("[%[inptr4], #128]") + "VST1.32 {d18}, [%[outptr]]!\n" + + // Sixth + "VST1.32 {d6-d7}, [%[outptr]]!\n" + ASM_PREFETCH("[%[inptr5], #128]") + "VST1.32 {d19}, [%[outptr]]!\n" + + // Seventh + "VST1.32 {d10-d11}, [%[outptr]]!\n" + "VST1.32 {d22}, [%[outptr]]!\n" + + // Eigth + "VST1.32 {d14-d15}, [%[outptr]]!\n" + "VST1.32 {d23}, [%[outptr]]!\n" + + : [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3), + [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [outptr] "+r" (outptr) + : + : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" + ); + } + + for (;x>0;x--) { + *outptr++ = *inptr0++; + *outptr++ = *inptr1++; + *outptr++ = *inptr2++; + *outptr++ = *inptr3++; + *outptr++ = *inptr4++; + *outptr++ = *inptr5++; + } + } +} + +#endif // __arm__ diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a32_transpose_interleave_8way_32bit.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a32_transpose_interleave_8way_32bit.hpp new file mode 100644 index 0000000000..a5a5a1058f --- /dev/null +++ b/arm_compute/core/NEON/kernels/assembly/transforms/a32_transpose_interleave_8way_32bit.hpp @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once + +#ifdef __arm__ + +#include "transpose_interleave_common.hpp" + +// Generic unblocked transposed 8x32-bit sized specialisation +template <> +template <typename T> +inline void TransformImpl<8, 1, true, 4, 4>::Transform( + T* out, const T* const in, const int stride, + const int x0, const int xmax, const int k0, const int kmax +) { + // Redirect to a 16x uint16_t specialisation + TransformImpl<16, 1, true, 2, 2>::Transform( + reinterpret_cast<uint16_t *>(out), + reinterpret_cast<const uint16_t * const>(in), + stride*2, x0*2, xmax*2, k0, kmax + ); +} + +// Generic 12x16-bit sized specialisation +template <> +template <typename T> +inline void TransformImpl<16, 1, true, 2, 2>::Transform( + T* out, const T* const in, const int stride, + const int x0, const int xmax, const int k0, const int kmax +) { + // Redirect to a uint16_t specialisation + Transform( + reinterpret_cast<uint16_t *>(out), + reinterpret_cast<const uint16_t * const>(in), + stride, x0, xmax, k0, kmax + ); +} + +// Specialised 16 x uint16_t version +template <> +inline void TransposeInterleaveCommon<16, uint16_t, uint16_t>::moveblock_1x1(const uint16_t *&in0, uint16_t *out) { + __asm volatile ( + "VLD1.32 {d0-d3}, [%[in0]]!\n" + "VST1.32 {d0-d3}, [%[out]]\n" + ASM_PREFETCH("[%[in0], #192]") + : [in0] "+r" (in0), + [out] "+r" (out) + : + : "q0", "q1", "memory" + ); +} + +template <> +inline void TransposeInterleaveCommon<16, uint16_t, uint16_t>::moveblock_1x2(const uint16_t *&in0, const uint16_t *&in1, uint16_t *out) { + __asm volatile ( + "VLD1.32 {d0-d3}, [%[in0]]!\n" + "VST1.32 {d0-d3}, [%[out]]!\n" + ASM_PREFETCH("[%[in0], #192]") + "VLD1.32 {d0-d3}, [%[in1]]!\n" + "VST1.32 {d0-d3}, [%[out]]\n" + ASM_PREFETCH("[%[in1], #192]") + "SUB %[out], %[out], #32\n" + : [in0] "+r" (in0), + [in1] "+r" (in1), + [out] "+r" (out) + : + : "q0", "q1", "memory" + ); +} + +template <> +inline void TransposeInterleaveCommon<16, uint16_t, uint16_t>::moveblock_1x4(const uint16_t *&in0, const uint16_t *&in1, const uint16_t *&in2, const uint16_t *&in3, uint16_t *out) { + __asm __volatile ( + "VLD1.32 {d0-d3}, [%[in0]]!\n" + "VST1.32 {d0-d3}, [%[out]]!\n" + ASM_PREFETCH("[%[in0], #192]") + "VLD1.32 {d0-d3}, [%[in1]]!\n" + "VST1.32 {d0-d3}, [%[out]]!\n" + ASM_PREFETCH("[%[in1], #192]") + "VLD1.32 {d0-d3}, [%[in2]]!\n" + "VST1.32 {d0-d3}, [%[out]]!\n" + ASM_PREFETCH("[%[in2], #192]") + "VLD1.32 {d0-d3}, [%[in3]]!\n" + "VST1.32 {d0-d3}, [%[out]]\n" + ASM_PREFETCH("[%[in3], #192]") + "SUB %[out], %[out], #96\n" + : [in0] "+r" (in0), + [in1] "+r" (in1), + [in2] "+r" (in2), + [in3] "+r" (in3), + [out] "+r" (out) + : + : "q0", "q1", "memory" + ); +} + +template <> +template <> +inline void TransformImpl<16, 1, true, 2, 2>::Transform( + uint16_t* out, const uint16_t* const in, const int stride, + const int x0, const int xmax, const int k0, const int kmax +) { + TransposeInterleaveCommon<16, uint16_t, uint16_t>::Transform(out, in, stride, x0, xmax, k0, kmax); +} + +#endif // __arm__ diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/list.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/list.hpp index 3cf6b41ffa..13e1b5468b 100644 --- a/arm_compute/core/NEON/kernels/assembly/transforms/list.hpp +++ b/arm_compute/core/NEON/kernels/assembly/transforms/list.hpp @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -//#include "a32_interleave_6way_32bit.hpp" -//#include "a32_transpose_interleave_8way_32bit.hpp" +#include "a32_interleave_6way_32bit.hpp" +#include "a32_transpose_interleave_8way_32bit.hpp" //#include "a64_interleave_8way_16bit.hpp" #include "a64_interleave_8way_32bit.hpp" //#include "a64_interleave_8way_half_to_float.hpp" |