aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp180
1 files changed, 90 insertions, 90 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp
index 1d2d4969f6..fd812165fd 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_half_to_float.hpp
@@ -29,17 +29,15 @@
#include "../asmlib.hpp"
-template <>
-template <>
-inline void TransformImpl<8, 1, false, 4, 2>::Transform(float *out, const __fp16 *in, int ldin, int y0, int ymax, int k0, int kmax)
-{
- float *outptr = out;
- const __fp16 *inptr = in;
+template<>
+template<>
+inline void TransformImpl<8, 1, false, 4, 2>::Transform(float *out, const __fp16 *in, int ldin, int y0, int ymax, int k0, int kmax) {
+ float *outptr = out;
+ const __fp16 *inptr = in;
__fp16 zerobuff[8];
- for(int y = y0; y < ymax; y += 8)
- {
+ for (int y=y0; y<ymax; y+=8) {
const __fp16 *inptr0 = inptr + y * ldin + k0;
const __fp16 *inptr1 = inptr0 + ldin;
const __fp16 *inptr2 = inptr1 + ldin;
@@ -58,14 +56,11 @@ inline void TransformImpl<8, 1, false, 4, 2>::Transform(float *out, const __fp16
prefetch_2x(inptr6);
prefetch_2x(inptr7);
- int x = (kmax - k0);
- for(; x > 7; x -= 8)
- {
+ int x=(kmax-k0);
+ for (;x>7;x-=8) {
/* Cope with ragged cases by copying from a buffer of zeroes instead */
- if((y + 7) >= ymax)
- {
- switch((y + 7) - ymax)
- {
+ if ((y + 7) >= ymax) {
+ switch ((y + 7) - ymax) {
/* Everything falls through in here */
case 6:
inptr1 = zerobuff;
@@ -88,95 +83,100 @@ inline void TransformImpl<8, 1, false, 4, 2>::Transform(float *out, const __fp16
}
}
- __asm __volatile(
+ __asm __volatile (
// Load up 8 elements (2 vectors) from each of 8 sources.
- "LDR q0, [%[inptr0]], #16\n"
- "LDR q2, [%[inptr1]], #16\n"
- "FCVTL2 v1.4s, v0.8h\n"
- "FCVTL v0.4s, v0.4h\n"
- "LDR q4, [%[inptr2]], #16\n" // q4=C0C1C2C3
- "FCVTL2 v3.4s, v2.8h\n"
- "FCVTL v2.4s, v2.4h\n"
- "FCVTL2 v5.4s, v4.8h\n"
- "FCVTL v4.4s, v4.4h\n"
- "ZIP1 v16.4s, v0.4s, v4.4s\n" // q16=A0C0A1C1
+ "LDR q0, [%[inptr0]], #16\n"
+ "LDR q2, [%[inptr1]], #16\n"
+ "FCVTL2 v1.4s, v0.8h\n"
+ "FCVTL v0.4s, v0.4h\n"
+ "LDR q4, [%[inptr2]], #16\n" // q4=C0C1C2C3
+ "FCVTL2 v3.4s, v2.8h\n"
+ "FCVTL v2.4s, v2.4h\n"
+ "FCVTL2 v5.4s, v4.8h\n"
+ "FCVTL v4.4s, v4.4h\n"
+ "ZIP1 v16.4s, v0.4s, v4.4s\n" // q16=A0C0A1C1
ASM_PREFETCH("[%[inptr0], #128]")
- "LDR q6, [%[inptr3]], #16\n" // q6=D0D1D2D3
- "FCVTL2 v7.4s, v6.8h\n"
- "FCVTL v6.4s, v6.4h\n"
- "ZIP1 v17.4s, v2.4s, v6.4s\n" // q17=B0D0B1D1
- "LDR q8, [%[inptr4]], #16\n"
- "LDR q10, [%[inptr5]], #16\n"
- "FCVTL2 v9.4s, v8.8h\n"
- "FCVTL v8.4s, v8.4h\n" ASM_PREFETCH("[%[inptr1], #128]")
- "LDR q12, [%[inptr6]], #16\n"
- "FCVTL2 v11.4s, v10.8h\n"
- "FCVTL v10.4s, v10.4h\n"
- "FCVTL2 v13.4s, v12.8h\n"
- "FCVTL v12.4s, v12.4h\n"
- "ZIP1 v18.4s, v8.4s, v12.4s\n"
- "LDR q14, [%[inptr7]], #16\n"
- "FCVTL2 v15.4s, v14.8h\n"
- "FCVTL v14.4s, v14.4h\n"
- "ZIP1 v19.4s, v10.4s, v14.4s\n"
+ "LDR q6, [%[inptr3]], #16\n" // q6=D0D1D2D3
+ "FCVTL2 v7.4s, v6.8h\n"
+ "FCVTL v6.4s, v6.4h\n"
+ "ZIP1 v17.4s, v2.4s, v6.4s\n" // q17=B0D0B1D1
+ "LDR q8, [%[inptr4]], #16\n"
+ "LDR q10, [%[inptr5]], #16\n"
+ "FCVTL2 v9.4s, v8.8h\n"
+ "FCVTL v8.4s, v8.4h\n"
+ ASM_PREFETCH("[%[inptr1], #128]")
+ "LDR q12, [%[inptr6]], #16\n"
+ "FCVTL2 v11.4s, v10.8h\n"
+ "FCVTL v10.4s, v10.4h\n"
+ "FCVTL2 v13.4s, v12.8h\n"
+ "FCVTL v12.4s, v12.4h\n"
+ "ZIP1 v18.4s, v8.4s, v12.4s\n"
+ "LDR q14, [%[inptr7]], #16\n"
+ "FCVTL2 v15.4s, v14.8h\n"
+ "FCVTL v14.4s, v14.4h\n"
+ "ZIP1 v19.4s, v10.4s, v14.4s\n"
ASM_PREFETCH("[%[inptr2], #128]")
- "ZIP1 v20.4s, v16.4s, v17.4s\n" // q20=A0B0C0D0
- "ZIP1 v21.4s, v18.4s, v19.4s\n"
- "ZIP2 v22.4s, v16.4s, v17.4s\n"
- "ZIP2 v23.4s, v18.4s, v19.4s\n" ASM_PREFETCH("[%[inptr3], #128]")
-
- "ZIP2 v16.4s, v0.4s, v4.4s\n"
- "ZIP2 v17.4s, v2.4s, v6.4s\n"
- "STP q20, q21, [%[outptr]], #32\n" // Write back the first element of each source
-
- "ZIP2 v18.4s, v8.4s, v12.4s\n" ASM_PREFETCH("[%[inptr4], #128]")
- "ZIP2 v19.4s, v10.4s, v14.4s\n"
- "STP q22, q23, [%[outptr]], #32\n" // Write back the second element of each source
-
- "ZIP1 v20.4s, v16.4s, v17.4s\n"
- "ZIP1 v21.4s, v18.4s, v19.4s\n" ASM_PREFETCH("[%[inptr5], #128]")
- "ZIP2 v22.4s, v16.4s, v17.4s\n"
- "ZIP2 v23.4s, v18.4s, v19.4s\n"
-
- "ZIP1 v16.4s, v1.4s, v5.4s\n"
- "ZIP1 v17.4s, v3.4s, v7.4s\n" ASM_PREFETCH("[%[inptr6], #128]")
- "STP q20, q21, [%[outptr]], #32\n" // Third element
-
- "ZIP1 v18.4s, v9.4s, v13.4s\n"
- "ZIP1 v19.4s, v11.4s, v15.4s\n"
- "STP q22, q23, [%[outptr]], #32\n" // Fourth element
+ "ZIP1 v20.4s, v16.4s, v17.4s\n" // q20=A0B0C0D0
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+ ASM_PREFETCH("[%[inptr3], #128]")
+
+ "ZIP2 v16.4s, v0.4s, v4.4s\n"
+ "ZIP2 v17.4s, v2.4s, v6.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Write back the first element of each source
+
+ "ZIP2 v18.4s, v8.4s, v12.4s\n"
+ ASM_PREFETCH("[%[inptr4], #128]")
+ "ZIP2 v19.4s, v10.4s, v14.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Write back the second element of each source
+
+ "ZIP1 v20.4s, v16.4s, v17.4s\n"
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ ASM_PREFETCH("[%[inptr5], #128]")
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+
+ "ZIP1 v16.4s, v1.4s, v5.4s\n"
+ "ZIP1 v17.4s, v3.4s, v7.4s\n"
+ ASM_PREFETCH("[%[inptr6], #128]")
+ "STP q20, q21, [%[outptr]], #32\n" // Third element
+
+ "ZIP1 v18.4s, v9.4s, v13.4s\n"
+ "ZIP1 v19.4s, v11.4s, v15.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Fourth element
ASM_PREFETCH("[%[inptr7], #128]")
- "ZIP1 v20.4s, v16.4s, v17.4s\n"
- "ZIP1 v21.4s, v18.4s, v19.4s\n"
- "ZIP2 v22.4s, v16.4s, v17.4s\n"
- "ZIP2 v23.4s, v18.4s, v19.4s\n"
+ "ZIP1 v20.4s, v16.4s, v17.4s\n"
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
- "ZIP2 v16.4s, v1.4s, v5.4s\n"
- "ZIP2 v17.4s, v3.4s, v7.4s\n"
- "STP q20, q21, [%[outptr]], #32\n" // Fifth element
+ "ZIP2 v16.4s, v1.4s, v5.4s\n"
+ "ZIP2 v17.4s, v3.4s, v7.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Fifth element
- "ZIP2 v18.4s, v9.4s, v13.4s\n"
- "ZIP2 v19.4s, v11.4s, v15.4s\n"
- "STP q22, q23, [%[outptr]], #32\n" // Sixth element
+ "ZIP2 v18.4s, v9.4s, v13.4s\n"
+ "ZIP2 v19.4s, v11.4s, v15.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Sixth element
- "ZIP1 v20.4s, v16.4s, v17.4s\n"
- "ZIP1 v21.4s, v18.4s, v19.4s\n"
- "STP q20, q21, [%[outptr]], #32\n" // Seventh element
+ "ZIP1 v20.4s, v16.4s, v17.4s\n"
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Seventh element
- "ZIP2 v22.4s, v16.4s, v17.4s\n"
- "ZIP2 v23.4s, v18.4s, v19.4s\n"
- "STP q22, q23, [%[outptr]], #32\n" // Eighth element
- : [inptr0] "+r"(inptr0), [inptr1] "+r"(inptr1), [inptr2] "+r"(inptr2), [inptr3] "+r"(inptr3),
- [inptr4] "+r"(inptr4), [inptr5] "+r"(inptr5), [inptr6] "+r"(inptr6), [inptr7] "+r"(inptr7), [outptr] "+r"(outptr)
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Eighth element
+ : [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3),
+ [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7), [outptr] "+r" (outptr)
:
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12",
- "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
+ "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
+ );
}
- for(; x > 0; x--)
- {
+ for (;x>0;x--) {
*outptr++ = *inptr0++;
*outptr++ = *inptr1++;
*outptr++ = *inptr2++;