aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/merges
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2018-07-03 16:22:02 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit5f707736413aeac77818c42838296966f8dc6761 (patch)
treeb829ed3243ea5f3085f288836132416c78bc2e72 /src/core/NEON/kernels/arm_gemm/merges
parent7485d5a62685cb745ab50e970adb722cb71557ac (diff)
downloadComputeLibrary-5f707736413aeac77818c42838296966f8dc6761.tar.gz
COMPMID-1369: Revert accidental formatting of RSH's repo
Pulled latest fixes from David's repo: commit f43ebe932c84083332b0b1a0348241b69dda63a7 Author: David Mansell <David.Mansell@arm.com> Date: Tue Jul 3 18:09:01 2018 +0100 Whitespace tidying, fixed comment in gemv_batched imported from ACL. Change-Id: Ie37a623f44e90d88072236cb853ac55ac82d5f51 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/138530 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: David Mansell <david.mansell@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/merges')
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/a32_merge_float_8x6.hpp253
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_12x8.hpp403
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_to_half_12x8.hpp512
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/a64_merge_half_24x8.hpp420
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp340
5 files changed, 1188 insertions, 740 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a32_merge_float_8x6.hpp b/src/core/NEON/kernels/arm_gemm/merges/a32_merge_float_8x6.hpp
index b44e56499f..2b833937a8 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a32_merge_float_8x6.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a32_merge_float_8x6.hpp
@@ -27,9 +27,8 @@
#include <arm_neon.h>
-template <>
-inline void MergeResults<8, 6>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta)
-{
+template<>
+inline void MergeResults<8, 6>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta) {
const float *inptr = in;
prefetch_6x(inptr);
prefetch_6x(inptr + 96);
@@ -37,8 +36,7 @@ inline void MergeResults<8, 6>(float *out, const float *in, const int ldout, con
float32x4_t av = vdupq_n_f32(alpha);
float32x4_t bv = vdupq_n_f32(beta);
- for(int y = y0; y < ymax; y += 8)
- {
+ for (int y=y0; y<ymax; y+=8) {
float *outptr0 = out + (y * ldout) + x0;
float *outptr1 = outptr0 + ldout;
float *outptr2 = outptr1 + ldout;
@@ -53,17 +51,14 @@ inline void MergeResults<8, 6>(float *out, const float *in, const int ldout, con
prefetch_2x(outptr4);
prefetch_2x(outptr5);
- for(int i = x0; i < xmax; i += 8)
- {
+ for (int i=x0; i<xmax; i+=8) {
float dummyres[8];
/* Make sure we throw away results if Y isn't a multiple of 8.
* We do this by pointing the result pointer at a dummy buffer
* we later discard. */
- if((y + 5) >= ymax)
- {
- switch((y + 5) - ymax)
- {
+ if ((y+5) >= ymax) {
+ switch ((y + 5) - ymax) {
case 4:
outptr1 = dummyres;
case 3:
@@ -81,84 +76,168 @@ inline void MergeResults<8, 6>(float *out, const float *in, const int ldout, con
}
}
- /* For ragged X, manually copy over the valid results. */
- if((i + 7) >= xmax)
- {
- for(int xi = 0; xi < 8; xi++)
- {
- if((i + xi) < xmax)
- {
- *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
- outptr0++;
- *outptr1 = (alpha * inptr[xi + 8]) + (*outptr1 * beta);
- outptr1++;
- *outptr2 = (alpha * inptr[xi + 16]) + (*outptr2 * beta);
- outptr2++;
- *outptr3 = (alpha * inptr[xi + 24]) + (*outptr3 * beta);
- outptr3++;
- *outptr4 = (alpha * inptr[xi + 32]) + (*outptr4 * beta);
- outptr4++;
- *outptr5 = (alpha * inptr[xi + 40]) + (*outptr5 * beta);
- outptr5++;
+ if (beta == 0.0f) {
+ /* If beta=0, don't read the original input at all. */
+
+ /* For ragged X, manually copy over the valid results. */
+ if ((i+7) >= xmax) {
+ for (int xi=0; xi<8; xi++) {
+ if ((i+xi) < xmax) {
+ *outptr0 = (alpha * inptr[xi]);
+ outptr0++;
+ *outptr1 = (alpha * inptr[xi + 8]);
+ outptr1++;
+ *outptr2 = (alpha * inptr[xi + 16]);
+ outptr2++;
+ *outptr3 = (alpha * inptr[xi + 24]);
+ outptr3++;
+ *outptr4 = (alpha * inptr[xi + 32]);
+ outptr4++;
+ *outptr5 = (alpha * inptr[xi + 40]);
+ outptr5++;
+ }
}
+ inptr += 48;
+ } else {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ // Rows 0-1
+ "VLD1.32 {d0-d3}, [%[inptr]]!\n"
+ "VLD1.32 {d4-d7}, [%[inptr]]!\n"
+
+ "VMUL.f32 q4, q0, %q[av]\n"
+ ASM_PREFETCH("[%[inptr], #352]")
+ "VMUL.f32 q5, q1, %q[av]\n"
+ "VST1.32 {d8-d11}, [%[outptr0]]!\n"
+ ASM_PREFETCH("[%[inptr], #416]")
+ "VMUL.f32 q6, q2, %q[av]\n"
+ ASM_PREFETCH("[%[inptr], #480]")
+ "VMUL.f32 q7, q3, %q[av]\n"
+ "VST1.32 {d12-d15}, [%[outptr1]]!\n"
+
+ // Rows 2-3
+ "VLD1.32 {d0-d3}, [%[inptr]]!\n"
+ "VLD1.32 {d4-d7}, [%[inptr]]!\n"
+
+ "VMUL.f32 q4, q0, %q[av]\n"
+ ASM_PREFETCH("[%[outptr0], #96]")
+ "VMUL.f32 q5, q1, %q[av]\n"
+ "VST1.32 {d8-d11}, [%[outptr2]]!\n"
+ ASM_PREFETCH("[%[outptr1], #96]")
+ "VMUL.f32 q6, q2, %q[av]\n"
+ ASM_PREFETCH("[%[outptr2], #96]")
+ "VMUL.f32 q7, q3, %q[av]\n"
+ "VST1.32 {d12-d15}, [%[outptr3]]!\n"
+
+ // Rows 4-5
+ "VLD1.32 {d0-d3}, [%[inptr]]!\n"
+ "VLD1.32 {d4-d7}, [%[inptr]]!\n"
+
+ "VMUL.f32 q4, q0, %q[av]\n"
+ ASM_PREFETCH("[%[outptr3], #96]")
+ "VMUL.f32 q5, q1, %q[av]\n"
+ "VST1.32 {d8-d11}, [%[outptr4]]!\n"
+ ASM_PREFETCH("[%[outptr4], #96]")
+ "VMUL.f32 q6, q2, %q[av]\n"
+ ASM_PREFETCH("[%[outptr5], #128]")
+ "VMUL.f32 q7, q3, %q[av]\n"
+ "VST1.32 {d12-d15}, [%[outptr5]]!\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [inptr] "+r" (inptr)
+ : [av] "w" (av), [bv] "w" (bv)
+ : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
+ );
+ }
+ } else {
+ /* Non-zero beta: Read output and apply beta. */
+
+ /* For ragged X, manually copy over the valid results. */
+ if ((i+7) >= xmax) {
+ for (int xi=0; xi<8; xi++) {
+ if ((i+xi) < xmax) {
+ *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+ outptr0++;
+ *outptr1 = (alpha * inptr[xi + 8]) + (*outptr1 * beta);
+ outptr1++;
+ *outptr2 = (alpha * inptr[xi + 16]) + (*outptr2 * beta);
+ outptr2++;
+ *outptr3 = (alpha * inptr[xi + 24]) + (*outptr3 * beta);
+ outptr3++;
+ *outptr4 = (alpha * inptr[xi + 32]) + (*outptr4 * beta);
+ outptr4++;
+ *outptr5 = (alpha * inptr[xi + 40]) + (*outptr5 * beta);
+ outptr5++;
+ }
+ }
+ inptr += 48;
+ } else {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ // Rows 0-1
+ "VLD1.32 {d8-d11}, [%[outptr0]]\n"
+ "VMUL.f32 q4, q4, %q[bv]\n"
+ "VLD1.32 {d12-d15}, [%[outptr1]]\n"
+ "VMUL.f32 q5, q5, %q[bv]\n"
+ "VLD1.32 {d0-d3}, [%[inptr]]!\n"
+ "VMUL.f32 q6, q6, %q[bv]\n"
+ "VLD1.32 {d4-d7}, [%[inptr]]!\n"
+ "VMUL.f32 q7, q7, %q[bv]\n"
+
+ "VMLA.f32 q4, q0, %q[av]\n"
+ ASM_PREFETCH("[%[inptr], #352]")
+ "VMLA.f32 q5, q1, %q[av]\n"
+ "VST1.32 {d8-d11}, [%[outptr0]]!\n"
+ ASM_PREFETCH("[%[inptr], #416]")
+ "VMLA.f32 q6, q2, %q[av]\n"
+ ASM_PREFETCH("[%[inptr], #480]")
+ "VMLA.f32 q7, q3, %q[av]\n"
+ "VST1.32 {d12-d15}, [%[outptr1]]!\n"
+
+ // Rows 2-3
+ "VLD1.32 {d8-d11}, [%[outptr2]]\n"
+ "VMUL.f32 q4, q4, %q[bv]\n"
+ "VLD1.32 {d12-d15}, [%[outptr3]]\n"
+ "VMUL.f32 q5, q5, %q[bv]\n"
+ "VLD1.32 {d0-d3}, [%[inptr]]!\n"
+ "VMUL.f32 q6, q6, %q[bv]\n"
+ "VLD1.32 {d4-d7}, [%[inptr]]!\n"
+ "VMUL.f32 q7, q7, %q[bv]\n"
+
+ "VMLA.f32 q4, q0, %q[av]\n"
+ ASM_PREFETCH("[%[outptr0], #96]")
+ "VMLA.f32 q5, q1, %q[av]\n"
+ "VST1.32 {d8-d11}, [%[outptr2]]!\n"
+ ASM_PREFETCH("[%[outptr1], #96]")
+ "VMLA.f32 q6, q2, %q[av]\n"
+ ASM_PREFETCH("[%[outptr2], #96]")
+ "VMLA.f32 q7, q3, %q[av]\n"
+ "VST1.32 {d12-d15}, [%[outptr3]]!\n"
+
+ // Rows 4-5
+ "VLD1.32 {d8-d11}, [%[outptr4]]\n"
+ "VMUL.f32 q4, q4, %q[bv]\n"
+ "VLD1.32 {d12-d15}, [%[outptr5]]\n"
+ "VMUL.f32 q5, q5, %q[bv]\n"
+ "VLD1.32 {d0-d3}, [%[inptr]]!\n"
+ "VMUL.f32 q6, q6, %q[bv]\n"
+ "VLD1.32 {d4-d7}, [%[inptr]]!\n"
+ "VMUL.f32 q7, q7, %q[bv]\n"
+
+ "VMLA.f32 q4, q0, %q[av]\n"
+ ASM_PREFETCH("[%[outptr3], #96]")
+ "VMLA.f32 q5, q1, %q[av]\n"
+ "VST1.32 {d8-d11}, [%[outptr4]]!\n"
+ ASM_PREFETCH("[%[outptr4], #96]")
+ "VMLA.f32 q6, q2, %q[av]\n"
+ ASM_PREFETCH("[%[outptr5], #128]")
+ "VMLA.f32 q7, q3, %q[av]\n"
+ "VST1.32 {d12-d15}, [%[outptr5]]!\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [inptr] "+r" (inptr)
+ : [av] "w" (av), [bv] "w" (bv)
+ : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
+ );
}
- inptr += 48;
- }
- else
- {
- /* Optimized routine to copy an entire block */
- __asm __volatile(
- // Rows 0-1
- "VLD1.32 {d8-d11}, [%[outptr0]]\n"
- "VMUL.f32 q4, q4, %q[bv]\n"
- "VLD1.32 {d12-d15}, [%[outptr1]]\n"
- "VMUL.f32 q5, q5, %q[bv]\n"
- "VLD1.32 {d0-d3}, [%[inptr]]!\n"
- "VMUL.f32 q6, q6, %q[bv]\n"
- "VLD1.32 {d4-d7}, [%[inptr]]!\n"
- "VMUL.f32 q7, q7, %q[bv]\n"
-
- "VMLA.f32 q4, q0, %q[av]\n" ASM_PREFETCH("[%[inptr], #352]")
- "VMLA.f32 q5, q1, %q[av]\n"
- "VST1.32 {d8-d11}, [%[outptr0]]!\n" ASM_PREFETCH("[%[inptr], #416]") "VMLA.f32 q6, q2, %q[av]\n" ASM_PREFETCH("[%[inptr], #480]")
- "VMLA.f32 q7, q3, %q[av]\n"
- "VST1.32 {d12-d15}, [%[outptr1]]!\n"
-
- // Rows 2-3
- "VLD1.32 {d8-d11}, [%[outptr2]]\n"
- "VMUL.f32 q4, q4, %q[bv]\n"
- "VLD1.32 {d12-d15}, [%[outptr3]]\n"
- "VMUL.f32 q5, q5, %q[bv]\n"
- "VLD1.32 {d0-d3}, [%[inptr]]!\n"
- "VMUL.f32 q6, q6, %q[bv]\n"
- "VLD1.32 {d4-d7}, [%[inptr]]!\n"
- "VMUL.f32 q7, q7, %q[bv]\n"
-
- "VMLA.f32 q4, q0, %q[av]\n" ASM_PREFETCH("[%[outptr0], #96]")
- "VMLA.f32 q5, q1, %q[av]\n"
- "VST1.32 {d8-d11}, [%[outptr2]]!\n" ASM_PREFETCH("[%[outptr1], #96]") "VMLA.f32 q6, q2, %q[av]\n" ASM_PREFETCH("[%[outptr2], #96]")
- "VMLA.f32 q7, q3, %q[av]\n"
- "VST1.32 {d12-d15}, [%[outptr3]]!\n"
-
- // Rows 4-5
- "VLD1.32 {d8-d11}, [%[outptr4]]\n"
- "VMUL.f32 q4, q4, %q[bv]\n"
- "VLD1.32 {d12-d15}, [%[outptr5]]\n"
- "VMUL.f32 q5, q5, %q[bv]\n"
- "VLD1.32 {d0-d3}, [%[inptr]]!\n"
- "VMUL.f32 q6, q6, %q[bv]\n"
- "VLD1.32 {d4-d7}, [%[inptr]]!\n"
- "VMUL.f32 q7, q7, %q[bv]\n"
-
- "VMLA.f32 q4, q0, %q[av]\n" ASM_PREFETCH("[%[outptr3], #96]")
- "VMLA.f32 q5, q1, %q[av]\n"
- "VST1.32 {d8-d11}, [%[outptr4]]!\n" ASM_PREFETCH("[%[outptr4], #96]") "VMLA.f32 q6, q2, %q[av]\n" ASM_PREFETCH("[%[outptr5], #128]")
- "VMLA.f32 q7, q3, %q[av]\n"
- "VST1.32 {d12-d15}, [%[outptr5]]!\n"
- : [outptr0] "+r"(outptr0), [outptr1] "+r"(outptr1), [outptr2] "+r"(outptr2), [outptr3] "+r"(outptr3),
- [outptr4] "+r"(outptr4), [outptr5] "+r"(outptr5), [inptr] "+r"(inptr)
- : [av] "w"(av), [bv] "w"(bv)
- : "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7");
}
}
}
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_12x8.hpp
index 3b59a43c52..f6befa2d14 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_12x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_12x8.hpp
@@ -25,9 +25,8 @@
#ifdef __aarch64__
-template <>
-inline void MergeResults<12, 8>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta)
-{
+template<>
+inline void MergeResults<12, 8>(float *out, const float *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const float alpha, const float beta) {
const float *inptr = in;
prefetch_6x(inptr);
prefetch_6x(inptr + 96);
@@ -35,8 +34,7 @@ inline void MergeResults<12, 8>(float *out, const float *in, const int ldout, co
float32x4_t av = vdupq_n_f32(alpha);
float32x4_t bv = vdupq_n_f32(beta);
- for(int y = y0; y < ymax; y += 8)
- {
+ for (int y=y0; y<ymax; y+=8) {
float *outptr0 = out + (y * ldout) + x0;
float *outptr1 = outptr0 + ldout;
float *outptr2 = outptr1 + ldout;
@@ -55,17 +53,14 @@ inline void MergeResults<12, 8>(float *out, const float *in, const int ldout, co
prefetch_2x(outptr6);
prefetch_2x(outptr7);
- for(int i = x0; i < xmax; i += 12)
- {
+ for (int i=x0; i<xmax; i+=12) {
float dummyres[12];
/* Make sure we throw away results if Y isn't a multiple of 8.
* We do this by pointing the result pointer at a dummy buffer
* we later discard. */
- if((y + 7) >= ymax)
- {
- switch((y + 7) - ymax)
- {
+ if ((y+7) >= ymax) {
+ switch ((y + 7) - ymax) {
case 6:
outptr1 = dummyres;
case 5:
@@ -87,147 +82,259 @@ inline void MergeResults<12, 8>(float *out, const float *in, const int ldout, co
}
}
- /* For ragged X, manually copy over the valid results. */
- if((i + 11) >= xmax)
- {
- for(int xi = 0; xi < 12; xi++)
- {
- if((i + xi) < xmax)
- {
- *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
- outptr0++;
- *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
- outptr1++;
- *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
- outptr2++;
- *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
- outptr3++;
- *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
- outptr4++;
- *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
- outptr5++;
- *outptr6 = (alpha * inptr[xi + 72]) + (*outptr6 * beta);
- outptr6++;
- *outptr7 = (alpha * inptr[xi + 84]) + (*outptr7 * beta);
- outptr7++;
+ if (beta==0.0f) {
+ /* If beta==0, don't read the original input at all. */
+
+ /* For ragged X, manually copy over the valid results. */
+ if ((i+11) >= xmax) {
+ for (int xi=0; xi<12; xi++) {
+ if ((i+xi) < xmax) {
+ *outptr0 = (alpha * inptr[xi]);
+ outptr0++;
+ *outptr1 = (alpha * inptr[xi + 12]);
+ outptr1++;
+ *outptr2 = (alpha * inptr[xi + 24]);
+ outptr2++;
+ *outptr3 = (alpha * inptr[xi + 36]);
+ outptr3++;
+ *outptr4 = (alpha * inptr[xi + 48]);
+ outptr4++;
+ *outptr5 = (alpha * inptr[xi + 60]);
+ outptr5++;
+ *outptr6 = (alpha * inptr[xi + 72]);
+ outptr6++;
+ *outptr7 = (alpha * inptr[xi + 84]);
+ outptr7++;
+ }
}
+ inptr += 96;
+ } else {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ // Rows 0-1
+ "LDP q0, q1, [%[inptr]]\n"
+ "FMUL v16.4s, v0.4s, %[av].4s\n"
+ "LDP q2, q3, [%[inptr], #32]\n"
+ "FMUL v17.4s, v1.4s, %[av].4s\n"
+ "LDP q4, q5, [%[inptr], #64]\n"
+ "FMUL v18.4s, v2.4s, %[av].4s\n"
+ "STP q16, q17, [%[outptr0]], #32\n"
+ ASM_PREFETCH("[%[inptr], #768]")
+ "FMUL v19.4s, v3.4s, %[av].4s\n"
+ "STR q18, [%[outptr0]], #16\n"
+ "FMUL v20.4s, v4.4s, %[av].4s\n"
+ "STP q19, q20, [%[outptr1]], #32\n"
+ ASM_PREFETCH("[%[inptr], #832]")
+ "FMUL v21.4s, v5.4s, %[av].4s\n"
+ "STR q21, [%[outptr1]], #16\n"
+
+ // Rows 2-3
+ "LDP q0, q1, [%[inptr], #96]\n"
+ "FMUL v16.4s, v0.4s, %[av].4s\n"
+ "LDP q2, q3, [%[inptr], #128]\n"
+ "FMUL v17.4s, v1.4s, %[av].4s\n"
+ "LDP q4, q5, [%[inptr], #160]\n"
+ "FMUL v18.4s, v2.4s, %[av].4s\n"
+ "STP q16, q17, [%[outptr2]], #32\n"
+ ASM_PREFETCH("[%[inptr], #896]")
+ "FMUL v19.4s, v3.4s, %[av].4s\n"
+ "STR q18, [%[outptr2]], #16\n"
+ "FMUL v20.4s, v4.4s, %[av].4s\n"
+ "STP q19, q20, [%[outptr3]], #32\n"
+ ASM_PREFETCH("[%[inptr], #1024]")
+ "FMUL v21.4s, v5.4s, %[av].4s\n"
+ "STR q21, [%[outptr3]], #16\n"
+
+ // Rows 4-5
+ "LDP q0, q1, [%[inptr], #192]\n"
+ "FMUL v16.4s, v0.4s, %[av].4s\n"
+ "LDP q2, q3, [%[inptr], #224]\n"
+ "FMUL v17.4s, v1.4s, %[av].4s\n"
+ "LDP q4, q5, [%[inptr], #256]\n"
+ "FMUL v18.4s, v2.4s, %[av].4s\n"
+ "STP q16, q17, [%[outptr4]], #32\n"
+ ASM_PREFETCH("[%[inptr], #960]")
+ "FMUL v19.4s, v3.4s, %[av].4s\n"
+ "STR q18, [%[outptr4]], #16\n"
+ "FMUL v20.4s, v4.4s, %[av].4s\n"
+ "STP q19, q20, [%[outptr5]], #32\n"
+ ASM_PREFETCH("[%[inptr], #1088]")
+ "FMUL v21.4s, v5.4s, %[av].4s\n"
+ "STR q21, [%[outptr5]], #16\n"
+
+ // Rows 6-7
+ "LDP q0, q1, [%[inptr], #288]\n"
+ "FMUL v16.4s, v0.4s, %[av].4s\n"
+ "LDP q2, q3, [%[inptr], #320]\n"
+ "FMUL v17.4s, v1.4s, %[av].4s\n"
+ "LDP q4, q5, [%[inptr], #352]\n"
+ "FMUL v18.4s, v2.4s, %[av].4s\n"
+ "STP q16, q17, [%[outptr6]], #32\n"
+ "FMUL v19.4s, v3.4s, %[av].4s\n"
+ "STR q18, [%[outptr6]], #16\n"
+ "FMUL v20.4s, v4.4s, %[av].4s\n"
+ "STP q19, q20, [%[outptr7]], #32\n"
+ "FMUL v21.4s, v5.4s, %[av].4s\n"
+ "STR q21, [%[outptr7]], #16\n"
+ "ADD %[inptr], %[inptr], #384\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr)
+ : [av] "w" (av), [bv] "w" (bv)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
+ );
+ }
+ } else {
+ /* For ragged X, manually copy over the valid results. */
+ if ((i+11) >= xmax) {
+ for (int xi=0; xi<12; xi++) {
+ if ((i+xi) < xmax) {
+ *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+ outptr0++;
+ *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+ outptr1++;
+ *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
+ outptr2++;
+ *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
+ outptr3++;
+ *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
+ outptr4++;
+ *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
+ outptr5++;
+ *outptr6 = (alpha * inptr[xi + 72]) + (*outptr6 * beta);
+ outptr6++;
+ *outptr7 = (alpha * inptr[xi + 84]) + (*outptr7 * beta);
+ outptr7++;
+ }
+ }
+ inptr += 96;
+ } else {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ // Rows 0-1
+ "LDP q16, q17, [%[outptr0]]\n"
+ "FMUL v16.4s, v16.4s, %[bv].4s\n"
+ "LDR q18, [%[outptr0], #32]\n"
+ "FMUL v17.4s, v17.4s, %[bv].4s\n"
+ "LDP q19, q20, [%[outptr1]]\n"
+ "FMUL v18.4s, v18.4s, %[bv].4s\n"
+ "LDR q21, [%[outptr1], #32]\n"
+ ASM_PREFETCH("[%[inptr], #768]")
+ "FMUL v19.4s, v19.4s, %[bv].4s\n"
+ "LDP q0, q1, [%[inptr]]\n"
+ "FMUL v20.4s, v20.4s, %[bv].4s\n"
+ "LDP q2, q3, [%[inptr], #32]\n"
+ "FMUL v21.4s, v21.4s, %[bv].4s\n"
+ "LDP q4, q5, [%[inptr], #64]\n"
+ "FMLA v16.4s, v0.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[inptr], #832]")
+ "FMLA v17.4s, v1.4s, %[av].4s\n"
+ "STP q16, q17, [%[outptr0]], #32\n"
+ "FMLA v18.4s, v2.4s, %[av].4s\n"
+ "STR q18, [%[outptr0]], #16\n"
+ "FMLA v19.4s, v3.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[inptr], #896]")
+ "FMLA v20.4s, v4.4s, %[av].4s\n"
+ "STP q19, q20, [%[outptr1]], #32\n"
+ "FMLA v21.4s, v5.4s, %[av].4s\n"
+ "STR q21, [%[outptr1]], #16\n"
+
+ // Rows 2-3
+ "LDP q16, q17, [%[outptr2]]\n"
+ "FMUL v16.4s, v16.4s, %[bv].4s\n"
+ "LDR q18, [%[outptr2], #32]\n"
+ "FMUL v17.4s, v17.4s, %[bv].4s\n"
+ "LDP q19, q20, [%[outptr3]]\n"
+ "FMUL v18.4s, v18.4s, %[bv].4s\n"
+ "LDR q21, [%[outptr3], #32]\n"
+ ASM_PREFETCH("[%[inptr], #960]")
+ "FMUL v19.4s, v19.4s, %[bv].4s\n"
+ "LDP q0, q1, [%[inptr], #96]\n"
+ "FMUL v20.4s, v20.4s, %[bv].4s\n"
+ "LDP q2, q3, [%[inptr], #128]\n"
+ "FMUL v21.4s, v21.4s, %[bv].4s\n"
+ "LDP q4, q5, [%[inptr], #160]\n"
+ "FMLA v16.4s, v0.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[inptr], #1024]")
+ "FMLA v17.4s, v1.4s, %[av].4s\n"
+ "STP q16, q17, [%[outptr2]], #32\n"
+ "FMLA v18.4s, v2.4s, %[av].4s\n"
+ "STR q18, [%[outptr2]], #16\n"
+ "FMLA v19.4s, v3.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[inptr], #1088]")
+ "FMLA v20.4s, v4.4s, %[av].4s\n"
+ "STP q19, q20, [%[outptr3]], #32\n"
+ "FMLA v21.4s, v5.4s, %[av].4s\n"
+ "STR q21, [%[outptr3]], #16\n"
+
+ // Rows 4-5
+ ASM_PREFETCH("[%[outptr0], #80]")
+ "LDP q16, q17, [%[outptr4]]\n"
+ "FMUL v16.4s, v16.4s, %[bv].4s\n"
+ "LDR q18, [%[outptr4], #32]\n"
+ "FMUL v17.4s, v17.4s, %[bv].4s\n"
+ "LDP q19, q20, [%[outptr5]]\n"
+ "FMUL v18.4s, v18.4s, %[bv].4s\n"
+ "LDR q21, [%[outptr5], #32]\n"
+ ASM_PREFETCH("[%[outptr1], #80]")
+ "FMUL v19.4s, v19.4s, %[bv].4s\n"
+ "LDP q0, q1, [%[inptr], #192]\n"
+ "FMUL v20.4s, v20.4s, %[bv].4s\n"
+ "LDP q2, q3, [%[inptr], #224]\n"
+ "FMUL v21.4s, v21.4s, %[bv].4s\n"
+ "LDP q4, q5, [%[inptr], #256]\n"
+ "FMLA v16.4s, v0.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[outptr2], #80]")
+ "FMLA v17.4s, v1.4s, %[av].4s\n"
+ "STP q16, q17, [%[outptr4]], #32\n"
+ "FMLA v18.4s, v2.4s, %[av].4s\n"
+ "STR q18, [%[outptr4]], #16\n"
+ "FMLA v19.4s, v3.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[outptr3], #80]")
+ "FMLA v20.4s, v4.4s, %[av].4s\n"
+ "STP q19, q20, [%[outptr5]], #32\n"
+ "FMLA v21.4s, v5.4s, %[av].4s\n"
+ "STR q21, [%[outptr5]], #16\n"
+
+ // Rows 6-7
+ ASM_PREFETCH("[%[outptr4], #80]")
+ "LDP q16, q17, [%[outptr6]]\n"
+ "FMUL v16.4s, v16.4s, %[bv].4s\n"
+ "LDR q18, [%[outptr6], #32]\n"
+ "FMUL v17.4s, v17.4s, %[bv].4s\n"
+ "LDP q19, q20, [%[outptr7]]\n"
+ "FMUL v18.4s, v18.4s, %[bv].4s\n"
+ "LDR q21, [%[outptr7], #32]\n"
+ ASM_PREFETCH("[%[outptr5], #80]")
+ "FMUL v19.4s, v19.4s, %[bv].4s\n"
+ "LDP q0, q1, [%[inptr], #288]\n"
+ "FMUL v20.4s, v20.4s, %[bv].4s\n"
+ "LDP q2, q3, [%[inptr], #320]\n"
+ "FMUL v21.4s, v21.4s, %[bv].4s\n"
+ "LDP q4, q5, [%[inptr], #352]\n"
+ "FMLA v16.4s, v0.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[outptr6], #128]")
+ "FMLA v17.4s, v1.4s, %[av].4s\n"
+ "STP q16, q17, [%[outptr6]], #32\n"
+ "FMLA v18.4s, v2.4s, %[av].4s\n"
+ "STR q18, [%[outptr6]], #16\n"
+ "FMLA v19.4s, v3.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[outptr7], #128]")
+ "FMLA v20.4s, v4.4s, %[av].4s\n"
+ "STP q19, q20, [%[outptr7]], #32\n"
+ "FMLA v21.4s, v5.4s, %[av].4s\n"
+ "STR q21, [%[outptr7]], #16\n"
+ "ADD %[inptr], %[inptr], #384\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr)
+ : [av] "w" (av), [bv] "w" (bv)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
+ );
}
- inptr += 96;
- }
- else
- {
- /* Optimized routine to copy an entire block */
- __asm __volatile(
- // Rows 0-1
- "LDP q16, q17, [%[outptr0]]\n"
- "FMUL v16.4s, v16.4s, %[bv].4s\n"
- "LDR q18, [%[outptr0], #32]\n"
- "FMUL v17.4s, v17.4s, %[bv].4s\n"
- "LDP q19, q20, [%[outptr1]]\n"
- "FMUL v18.4s, v18.4s, %[bv].4s\n"
- "LDR q21, [%[outptr1], #32]\n" ASM_PREFETCH("[%[inptr], #768]")
- "FMUL v19.4s, v19.4s, %[bv].4s\n"
- "LDP q0, q1, [%[inptr]]\n"
- "FMUL v20.4s, v20.4s, %[bv].4s\n"
- "LDP q2, q3, [%[inptr], #32]\n"
- "FMUL v21.4s, v21.4s, %[bv].4s\n"
- "LDP q4, q5, [%[inptr], #64]\n"
- "FMLA v16.4s, v0.4s, %[av].4s\n" ASM_PREFETCH("[%[inptr], #832]")
- "FMLA v17.4s, v1.4s, %[av].4s\n"
- "STP q16, q17, [%[outptr0]], #32\n"
- "FMLA v18.4s, v2.4s, %[av].4s\n"
- "STR q18, [%[outptr0]], #16\n"
- "FMLA v19.4s, v3.4s, %[av].4s\n" ASM_PREFETCH("[%[inptr], #896]")
- "FMLA v20.4s, v4.4s, %[av].4s\n"
- "STP q19, q20, [%[outptr1]], #32\n"
- "FMLA v21.4s, v5.4s, %[av].4s\n"
- "STR q21, [%[outptr1]], #16\n"
-
- // Rows 2-3
- "LDP q16, q17, [%[outptr2]]\n"
- "FMUL v16.4s, v16.4s, %[bv].4s\n"
- "LDR q18, [%[outptr2], #32]\n"
- "FMUL v17.4s, v17.4s, %[bv].4s\n"
- "LDP q19, q20, [%[outptr3]]\n"
- "FMUL v18.4s, v18.4s, %[bv].4s\n"
- "LDR q21, [%[outptr3], #32]\n" ASM_PREFETCH("[%[inptr], #960]")
- "FMUL v19.4s, v19.4s, %[bv].4s\n"
- "LDP q0, q1, [%[inptr], #96]\n"
- "FMUL v20.4s, v20.4s, %[bv].4s\n"
- "LDP q2, q3, [%[inptr], #128]\n"
- "FMUL v21.4s, v21.4s, %[bv].4s\n"
- "LDP q4, q5, [%[inptr], #160]\n"
- "FMLA v16.4s, v0.4s, %[av].4s\n" ASM_PREFETCH("[%[inptr], #1024]")
- "FMLA v17.4s, v1.4s, %[av].4s\n"
- "STP q16, q17, [%[outptr2]], #32\n"
- "FMLA v18.4s, v2.4s, %[av].4s\n"
- "STR q18, [%[outptr2]], #16\n"
- "FMLA v19.4s, v3.4s, %[av].4s\n" ASM_PREFETCH("[%[inptr], #1088]")
- "FMLA v20.4s, v4.4s, %[av].4s\n"
- "STP q19, q20, [%[outptr3]], #32\n"
- "FMLA v21.4s, v5.4s, %[av].4s\n"
- "STR q21, [%[outptr3]], #16\n"
-
- // Rows 4-5
- ASM_PREFETCH("[%[outptr0], #80]")
- "LDP q16, q17, [%[outptr4]]\n"
- "FMUL v16.4s, v16.4s, %[bv].4s\n"
- "LDR q18, [%[outptr4], #32]\n"
- "FMUL v17.4s, v17.4s, %[bv].4s\n"
- "LDP q19, q20, [%[outptr5]]\n"
- "FMUL v18.4s, v18.4s, %[bv].4s\n"
- "LDR q21, [%[outptr5], #32]\n" ASM_PREFETCH("[%[outptr1], #80]")
- "FMUL v19.4s, v19.4s, %[bv].4s\n"
- "LDP q0, q1, [%[inptr], #192]\n"
- "FMUL v20.4s, v20.4s, %[bv].4s\n"
- "LDP q2, q3, [%[inptr], #224]\n"
- "FMUL v21.4s, v21.4s, %[bv].4s\n"
- "LDP q4, q5, [%[inptr], #256]\n"
- "FMLA v16.4s, v0.4s, %[av].4s\n" ASM_PREFETCH("[%[outptr2], #80]")
- "FMLA v17.4s, v1.4s, %[av].4s\n"
- "STP q16, q17, [%[outptr4]], #32\n"
- "FMLA v18.4s, v2.4s, %[av].4s\n"
- "STR q18, [%[outptr4]], #16\n"
- "FMLA v19.4s, v3.4s, %[av].4s\n" ASM_PREFETCH("[%[outptr3], #80]")
- "FMLA v20.4s, v4.4s, %[av].4s\n"
- "STP q19, q20, [%[outptr5]], #32\n"
- "FMLA v21.4s, v5.4s, %[av].4s\n"
- "STR q21, [%[outptr5]], #16\n"
-
- // Rows 6-7
- ASM_PREFETCH("[%[outptr4], #80]")
- "LDP q16, q17, [%[outptr6]]\n"
- "FMUL v16.4s, v16.4s, %[bv].4s\n"
- "LDR q18, [%[outptr6], #32]\n"
- "FMUL v17.4s, v17.4s, %[bv].4s\n"
- "LDP q19, q20, [%[outptr7]]\n"
- "FMUL v18.4s, v18.4s, %[bv].4s\n"
- "LDR q21, [%[outptr7], #32]\n" ASM_PREFETCH("[%[outptr5], #80]")
- "FMUL v19.4s, v19.4s, %[bv].4s\n"
- "LDP q0, q1, [%[inptr], #288]\n"
- "FMUL v20.4s, v20.4s, %[bv].4s\n"
- "LDP q2, q3, [%[inptr], #320]\n"
- "FMUL v21.4s, v21.4s, %[bv].4s\n"
- "LDP q4, q5, [%[inptr], #352]\n"
- "FMLA v16.4s, v0.4s, %[av].4s\n" ASM_PREFETCH("[%[outptr6], #128]")
- "FMLA v17.4s, v1.4s, %[av].4s\n"
- "STP q16, q17, [%[outptr6]], #32\n"
- "FMLA v18.4s, v2.4s, %[av].4s\n"
- "STR q18, [%[outptr6]], #16\n"
- "FMLA v19.4s, v3.4s, %[av].4s\n" ASM_PREFETCH("[%[outptr7], #128]")
- "FMLA v20.4s, v4.4s, %[av].4s\n"
- "STP q19, q20, [%[outptr7]], #32\n"
- "FMLA v21.4s, v5.4s, %[av].4s\n"
- "STR q21, [%[outptr7]], #16\n"
- "ADD %[inptr], %[inptr], #384\n"
- : [outptr0] "+r"(outptr0), [outptr1] "+r"(outptr1), [outptr2] "+r"(outptr2), [outptr3] "+r"(outptr3),
- [outptr4] "+r"(outptr4), [outptr5] "+r"(outptr5), [outptr6] "+r"(outptr6), [outptr7] "+r"(outptr7),
- [inptr] "+r"(inptr)
- : [av] "w"(av), [bv] "w"(bv)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21");
}
}
}
}
-#endif // __aarch64__ \ No newline at end of file
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_to_half_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_to_half_12x8.hpp
index 9708fe189d..e7a7521823 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_to_half_12x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_float_to_half_12x8.hpp
@@ -28,9 +28,8 @@
#include <arm_neon.h>
-template <>
-inline void MergeResults<12, 8>(__fp16 *out, const float *in, int ldout, int y0, int ymax, int x0, int xmax, const __fp16 alpha, const __fp16 beta)
-{
+template<>
+inline void MergeResults<12,8>(__fp16 *out, const float *in, int ldout, int y0, int ymax, int x0, int xmax, const __fp16 alpha, const __fp16 beta) {
const float *inptr = in;
prefetch_6x(inptr);
prefetch_6x(inptr + 24);
@@ -38,8 +37,7 @@ inline void MergeResults<12, 8>(__fp16 *out, const float *in, int ldout, int y0,
float32x4_t av = vdupq_n_f32(alpha);
float32x4_t bv = vdupq_n_f32(beta);
- for(int y = y0; y < ymax; y += 8)
- {
+ for (int y=y0; y<ymax; y+=8) {
__fp16 *outptr0 = out + (y * ldout) + x0;
__fp16 *outptr1 = outptr0 + ldout;
__fp16 *outptr2 = outptr1 + ldout;
@@ -58,17 +56,14 @@ inline void MergeResults<12, 8>(__fp16 *out, const float *in, int ldout, int y0,
prefetch_2x(outptr6);
prefetch_2x(outptr7);
- for(int i = x0; i < xmax; i += 12)
- {
+ for (int i=x0; i<xmax; i+=12) {
__fp16 dummyres[12];
/* Make sure we throw away results if Y isn't a multiple of 8.
* We do this by pointing the result pointer at a dummy buffer
* we later discard. */
- if((y + 7) >= ymax)
- {
- switch((y + 7) - ymax)
- {
+ if ((y+7) >= ymax) {
+ switch ((y + 7) - ymax) {
case 6:
outptr1 = dummyres;
case 5:
@@ -90,182 +85,335 @@ inline void MergeResults<12, 8>(__fp16 *out, const float *in, int ldout, int y0,
}
}
- /* For ragged X, manually copy over the valid results. */
- if((i + 11) >= xmax)
- {
- for(int xi = 0; xi < 12; xi++)
- {
- if((i + xi) < xmax)
- {
- *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
- outptr0++;
- *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
- outptr1++;
- *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
- outptr2++;
- *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
- outptr3++;
- *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
- outptr4++;
- *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
- outptr5++;
- *outptr6 = (alpha * inptr[xi + 72]) + (*outptr6 * beta);
- outptr6++;
- *outptr7 = (alpha * inptr[xi + 84]) + (*outptr7 * beta);
- outptr7++;
+ if (beta == ((__fp16)0.0f)) {
+ /* If beta==0, don't read the output. */
+ /* For ragged X, manually copy over the valid results. */
+ if ((i+11) >= xmax) {
+ for (int xi=0; xi<12; xi++) {
+ if ((i+xi) < xmax) {
+ *outptr0 = (alpha * inptr[xi]);
+ outptr0++;
+ *outptr1 = (alpha * inptr[xi + 12]);
+ outptr1++;
+ *outptr2 = (alpha * inptr[xi + 24]);
+ outptr2++;
+ *outptr3 = (alpha * inptr[xi + 36]);
+ outptr3++;
+ *outptr4 = (alpha * inptr[xi + 48]);
+ outptr4++;
+ *outptr5 = (alpha * inptr[xi + 60]);
+ outptr5++;
+ *outptr6 = (alpha * inptr[xi + 72]);
+ outptr6++;
+ *outptr7 = (alpha * inptr[xi + 84]);
+ outptr7++;
+ }
}
+ inptr += 96;
+ } else {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ // Rows 0-1
+ "LDP q0, q1, [%[inptr]]\n"
+ "LDP q2, q3, [%[inptr], #32]\n"
+ "LDP q4, q5, [%[inptr], #64]\n"
+ "FMUL v16.4s, v0.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[inptr], #768]")
+ "FMUL v17.4s, v1.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[inptr], #832]")
+ "FCVTN v16.4h, v16.4s\n"
+ ASM_PREFETCH("[%[inptr], #896]")
+ "FCVTN2 v16.8h, v17.4s\n"
+ ASM_PREFETCH("[%[inptr], #960]")
+ "FMUL v18.4s, v2.4s, %[av].4s\n"
+ "STR q16, [%[outptr0]], #16\n"
+ "FCVTN v18.4h, v18.4s\n"
+ "STR d18, [%[outptr0]], #8\n"
+ "FMUL v19.4s, v3.4s, %[av].4s\n"
+ "FMUL v20.4s, v4.4s, %[av].4s\n"
+ "FCVTN v19.4h, v19.4s\n"
+ "FCVTN2 v19.8h, v20.4s\n"
+ "STR q19, [%[outptr1]], #16\n"
+ "FMUL v21.4s, v5.4s, %[av].4s\n"
+ "FCVTN v21.4h, v21.4s\n"
+ "STR d21, [%[outptr1]], #8\n"
+
+ // Rows 2-3
+ "LDP q0, q1, [%[inptr], #96]\n"
+ "LDP q2, q3, [%[inptr], #128]\n"
+ "LDP q4, q5, [%[inptr], #160]\n"
+ "FMUL v16.4s, v0.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[inptr], #1024]")
+ "FMUL v17.4s, v1.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[inptr], #1088]")
+ "FCVTN v16.4h, v16.4s\n"
+ ASM_PREFETCH("[%[outptr0], #64]")
+ "FCVTN2 v16.8h, v17.4s\n"
+ ASM_PREFETCH("[%[outptr1], #64]")
+ "FMUL v18.4s, v2.4s, %[av].4s\n"
+ "STR q16, [%[outptr2]], #16\n"
+ "FCVTN v18.4h, v18.4s\n"
+ "STR d18, [%[outptr2]], #8\n"
+ "FMUL v19.4s, v3.4s, %[av].4s\n"
+ "FMUL v20.4s, v4.4s, %[av].4s\n"
+ "FCVTN v19.4h, v19.4s\n"
+ "FCVTN2 v19.8h, v20.4s\n"
+ "STR q19, [%[outptr3]], #16\n"
+ "FMUL v21.4s, v5.4s, %[av].4s\n"
+ "FCVTN v21.4h, v21.4s\n"
+ "STR d21, [%[outptr3]], #8\n"
+
+ // Rows 4-5
+ "LDP q0, q1, [%[inptr], #192]\n"
+ "LDP q2, q3, [%[inptr], #224]\n"
+ "LDP q4, q5, [%[inptr], #256]\n"
+ "FMUL v16.4s, v0.4s, %[av].4s\n"
+ "FMUL v17.4s, v1.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[outptr2], #64]")
+ "FCVTN v16.4h, v16.4s\n"
+ ASM_PREFETCH("[%[outptr3], #64]")
+ "FCVTN2 v16.8h, v17.4s\n"
+ ASM_PREFETCH("[%[outptr4], #88]")
+ "FMUL v18.4s, v2.4s, %[av].4s\n"
+ "STR q16, [%[outptr4]], #16\n"
+ "FCVTN v18.4h, v18.4s\n"
+ "STR d18, [%[outptr4]], #8\n"
+ "FMUL v19.4s, v3.4s, %[av].4s\n"
+ "FMUL v20.4s, v4.4s, %[av].4s\n"
+ "FCVTN v19.4h, v19.4s\n"
+ "FCVTN2 v19.8h, v20.4s\n"
+ "STR q19, [%[outptr5]], #16\n"
+ "FMUL v21.4s, v5.4s, %[av].4s\n"
+ "FCVTN v21.4h, v21.4s\n"
+ "STR d21, [%[outptr5]], #8\n"
+
+ // Rows 6-7
+ "LDP q0, q1, [%[inptr], #288]\n"
+ "LDP q2, q3, [%[inptr], #320]\n"
+ "LDP q4, q5, [%[inptr], #352]\n"
+ "FMUL v16.4s, v0.4s, %[av].4s\n"
+ "FMUL v17.4s, v1.4s, %[av].4s\n"
+ ASM_PREFETCH("[%[outptr5], #64]")
+ "FCVTN v16.4h, v16.4s\n"
+ ASM_PREFETCH("[%[outptr6], #88]")
+ "FCVTN2 v16.8h, v17.4s\n"
+ ASM_PREFETCH("[%[outptr7], #88]")
+ "FMUL v18.4s, v2.4s, %[av].4s\n"
+ "STR q16, [%[outptr6]], #16\n"
+ "FCVTN v18.4h, v18.4s\n"
+ "STR d18, [%[outptr6]], #8\n"
+ "FMUL v19.4s, v3.4s, %[av].4s\n"
+ "FMUL v20.4s, v4.4s, %[av].4s\n"
+ "FCVTN v19.4h, v19.4s\n"
+ "FCVTN2 v19.8h, v20.4s\n"
+ "STR q19, [%[outptr7]], #16\n"
+ "FMUL v21.4s, v5.4s, %[av].4s\n"
+ "FCVTN v21.4h, v21.4s\n"
+ "STR d21, [%[outptr7]], #8\n"
+ "ADD %[inptr], %[inptr], #384\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr)
+ : [av] "w" (av), [bv] "w" (bv)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
+ );
}
- inptr += 96;
- }
- else
- {
- /* Optimized routine to copy an entire block */
- __asm __volatile(
- // Rows 0-1
- "LDR q16, [%[outptr0]]\n"
- "FCVTL2 v17.4s, v16.8h\n"
- "LDR d18, [%[outptr0], #16]\n"
- "FCVTL v16.4s, v16.4h\n"
- "LDR q19, [%[outptr1]]\n"
- "FMUL v17.4s, v17.4s, %[bv].4s\n"
- "LDR d21, [%[outptr1], #16]\n"
- "FMUL v16.4s, v16.4s, %[bv].4s\n"
- "LDP q0, q1, [%[inptr]]\n"
- "FCVTL v18.4s, v18.4h\n"
- "LDP q2, q3, [%[inptr], #32]\n"
- "FCVTL2 v20.4s, v19.8h\n"
- "LDP q4, q5, [%[inptr], #64]\n"
- "FCVTL v19.4s, v19.4h\n" ASM_PREFETCH("[%[inptr], #768]") "FCVTL v21.4s, v21.4h\n" ASM_PREFETCH("[%[inptr], #832]") "FMUL v18.4s, v18.4s, %[bv].4s\n" ASM_PREFETCH("[%[inptr], #896]")
- "FMUL v20.4s, v20.4s, %[bv].4s\n" ASM_PREFETCH("[%[inptr], #960]")
- "FMUL v19.4s, v19.4s, %[bv].4s\n"
- "FMUL v21.4s, v21.4s, %[bv].4s\n"
- "FMLA v16.4s, v0.4s, %[av].4s\n"
- "FMLA v17.4s, v1.4s, %[av].4s\n"
- "FCVTN v16.4h, v16.4s\n"
- "FCVTN2 v16.8h, v17.4s\n"
- "FMLA v18.4s, v2.4s, %[av].4s\n"
- "STR q16, [%[outptr0]], #16\n"
- "FCVTN v18.4h, v18.4s\n"
- "STR d18, [%[outptr0]], #8\n"
- "FMLA v19.4s, v3.4s, %[av].4s\n"
- "FMLA v20.4s, v4.4s, %[av].4s\n"
- "FCVTN v19.4h, v19.4s\n"
- "FCVTN2 v19.8h, v20.4s\n"
- "STR q19, [%[outptr1]], #16\n"
- "FMLA v21.4s, v5.4s, %[av].4s\n"
- "FCVTN v21.4h, v21.4s\n"
- "STR d21, [%[outptr1]], #8\n"
+ } else {
+ /* For ragged X, manually copy over the valid results. */
+ if ((i+11) >= xmax) {
+ for (int xi=0; xi<12; xi++) {
+ if ((i+xi) < xmax) {
+ *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+ outptr0++;
+ *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+ outptr1++;
+ *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
+ outptr2++;
+ *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
+ outptr3++;
+ *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
+ outptr4++;
+ *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
+ outptr5++;
+ *outptr6 = (alpha * inptr[xi + 72]) + (*outptr6 * beta);
+ outptr6++;
+ *outptr7 = (alpha * inptr[xi + 84]) + (*outptr7 * beta);
+ outptr7++;
+ }
+ }
+ inptr += 96;
+ } else {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ // Rows 0-1
+ "LDR q16, [%[outptr0]]\n"
+ "FCVTL2 v17.4s, v16.8h\n"
+ "LDR d18, [%[outptr0], #16]\n"
+ "FCVTL v16.4s, v16.4h\n"
+ "LDR q19, [%[outptr1]]\n"
+ "FMUL v17.4s, v17.4s, %[bv].4s\n"
+ "LDR d21, [%[outptr1], #16]\n"
+ "FMUL v16.4s, v16.4s, %[bv].4s\n"
+ "LDP q0, q1, [%[inptr]]\n"
+ "FCVTL v18.4s, v18.4h\n"
+ "LDP q2, q3, [%[inptr], #32]\n"
+ "FCVTL2 v20.4s, v19.8h\n"
+ "LDP q4, q5, [%[inptr], #64]\n"
+ "FCVTL v19.4s, v19.4h\n"
+ ASM_PREFETCH("[%[inptr], #768]")
+ "FCVTL v21.4s, v21.4h\n"
+ ASM_PREFETCH("[%[inptr], #832]")
+ "FMUL v18.4s, v18.4s, %[bv].4s\n"
+ ASM_PREFETCH("[%[inptr], #896]")
+ "FMUL v20.4s, v20.4s, %[bv].4s\n"
+ ASM_PREFETCH("[%[inptr], #960]")
+ "FMUL v19.4s, v19.4s, %[bv].4s\n"
+ "FMUL v21.4s, v21.4s, %[bv].4s\n"
+ "FMLA v16.4s, v0.4s, %[av].4s\n"
+ "FMLA v17.4s, v1.4s, %[av].4s\n"
+ "FCVTN v16.4h, v16.4s\n"
+ "FCVTN2 v16.8h, v17.4s\n"
+ "FMLA v18.4s, v2.4s, %[av].4s\n"
+ "STR q16, [%[outptr0]], #16\n"
+ "FCVTN v18.4h, v18.4s\n"
+ "STR d18, [%[outptr0]], #8\n"
+ "FMLA v19.4s, v3.4s, %[av].4s\n"
+ "FMLA v20.4s, v4.4s, %[av].4s\n"
+ "FCVTN v19.4h, v19.4s\n"
+ "FCVTN2 v19.8h, v20.4s\n"
+ "STR q19, [%[outptr1]], #16\n"
+ "FMLA v21.4s, v5.4s, %[av].4s\n"
+ "FCVTN v21.4h, v21.4s\n"
+ "STR d21, [%[outptr1]], #8\n"
- // Rows 2-3
- "LDR q16, [%[outptr2]]\n"
- "FCVTL2 v17.4s, v16.8h\n"
- "LDR d18, [%[outptr2], #16]\n"
- "FCVTL v16.4s, v16.4h\n"
- "LDR q19, [%[outptr3]]\n"
- "FMUL v17.4s, v17.4s, %[bv].4s\n"
- "LDR d21, [%[outptr3], #16]\n"
- "FMUL v16.4s, v16.4s, %[bv].4s\n"
- "LDP q0, q1, [%[inptr], #96]\n"
- "FCVTL v18.4s, v18.4h\n"
- "LDP q2, q3, [%[inptr], #128]\n"
- "FCVTL2 v20.4s, v19.8h\n"
- "LDP q4, q5, [%[inptr], #160]\n"
- "FCVTL v19.4s, v19.4h\n" ASM_PREFETCH("[%[inptr], #1024]") "FCVTL v21.4s, v21.4h\n" ASM_PREFETCH("[%[inptr], #1088]") "FMUL v18.4s, v18.4s, %[bv].4s\n" ASM_PREFETCH("[%[outptr0], #64]")
- "FMUL v20.4s, v20.4s, %[bv].4s\n" ASM_PREFETCH("[%[outptr1], #64]")
- "FMUL v19.4s, v19.4s, %[bv].4s\n"
- "FMUL v21.4s, v21.4s, %[bv].4s\n"
- "FMLA v16.4s, v0.4s, %[av].4s\n"
- "FMLA v17.4s, v1.4s, %[av].4s\n"
- "FCVTN v16.4h, v16.4s\n"
- "FCVTN2 v16.8h, v17.4s\n"
- "FMLA v18.4s, v2.4s, %[av].4s\n"
- "STR q16, [%[outptr2]], #16\n"
- "FCVTN v18.4h, v18.4s\n"
- "STR d18, [%[outptr2]], #8\n"
- "FMLA v19.4s, v3.4s, %[av].4s\n"
- "FMLA v20.4s, v4.4s, %[av].4s\n"
- "FCVTN v19.4h, v19.4s\n"
- "FCVTN2 v19.8h, v20.4s\n"
- "STR q19, [%[outptr3]], #16\n"
- "FMLA v21.4s, v5.4s, %[av].4s\n"
- "FCVTN v21.4h, v21.4s\n"
- "STR d21, [%[outptr3]], #8\n"
+ // Rows 2-3
+ "LDR q16, [%[outptr2]]\n"
+ "FCVTL2 v17.4s, v16.8h\n"
+ "LDR d18, [%[outptr2], #16]\n"
+ "FCVTL v16.4s, v16.4h\n"
+ "LDR q19, [%[outptr3]]\n"
+ "FMUL v17.4s, v17.4s, %[bv].4s\n"
+ "LDR d21, [%[outptr3], #16]\n"
+ "FMUL v16.4s, v16.4s, %[bv].4s\n"
+ "LDP q0, q1, [%[inptr], #96]\n"
+ "FCVTL v18.4s, v18.4h\n"
+ "LDP q2, q3, [%[inptr], #128]\n"
+ "FCVTL2 v20.4s, v19.8h\n"
+ "LDP q4, q5, [%[inptr], #160]\n"
+ "FCVTL v19.4s, v19.4h\n"
+ ASM_PREFETCH("[%[inptr], #1024]")
+ "FCVTL v21.4s, v21.4h\n"
+ ASM_PREFETCH("[%[inptr], #1088]")
+ "FMUL v18.4s, v18.4s, %[bv].4s\n"
+ ASM_PREFETCH("[%[outptr0], #64]")
+ "FMUL v20.4s, v20.4s, %[bv].4s\n"
+ ASM_PREFETCH("[%[outptr1], #64]")
+ "FMUL v19.4s, v19.4s, %[bv].4s\n"
+ "FMUL v21.4s, v21.4s, %[bv].4s\n"
+ "FMLA v16.4s, v0.4s, %[av].4s\n"
+ "FMLA v17.4s, v1.4s, %[av].4s\n"
+ "FCVTN v16.4h, v16.4s\n"
+ "FCVTN2 v16.8h, v17.4s\n"
+ "FMLA v18.4s, v2.4s, %[av].4s\n"
+ "STR q16, [%[outptr2]], #16\n"
+ "FCVTN v18.4h, v18.4s\n"
+ "STR d18, [%[outptr2]], #8\n"
+ "FMLA v19.4s, v3.4s, %[av].4s\n"
+ "FMLA v20.4s, v4.4s, %[av].4s\n"
+ "FCVTN v19.4h, v19.4s\n"
+ "FCVTN2 v19.8h, v20.4s\n"
+ "STR q19, [%[outptr3]], #16\n"
+ "FMLA v21.4s, v5.4s, %[av].4s\n"
+ "FCVTN v21.4h, v21.4s\n"
+ "STR d21, [%[outptr3]], #8\n"
- // Rows 4-5
- "LDR q16, [%[outptr4]]\n"
- "FCVTL2 v17.4s, v16.8h\n"
- "LDR d18, [%[outptr4], #16]\n"
- "FCVTL v16.4s, v16.4h\n"
- "LDR q19, [%[outptr5]]\n"
- "FMUL v17.4s, v17.4s, %[bv].4s\n"
- "LDR d21, [%[outptr5], #16]\n"
- "FMUL v16.4s, v16.4s, %[bv].4s\n"
- "LDP q0, q1, [%[inptr], #192]\n"
- "FCVTL v18.4s, v18.4h\n"
- "LDP q2, q3, [%[inptr], #224]\n"
- "FCVTL2 v20.4s, v19.8h\n"
- "LDP q4, q5, [%[inptr], #256]\n"
- "FCVTL v19.4s, v19.4h\n" ASM_PREFETCH("[%[outptr2], #64]") "FCVTL v21.4s, v21.4h\n" ASM_PREFETCH("[%[outptr3], #64]") "FMUL v18.4s, v18.4s, %[bv].4s\n" ASM_PREFETCH("[%[outptr4], #88]")
- "FMUL v20.4s, v20.4s, %[bv].4s\n"
- "FMUL v19.4s, v19.4s, %[bv].4s\n"
- "FMUL v21.4s, v21.4s, %[bv].4s\n"
- "FMLA v16.4s, v0.4s, %[av].4s\n"
- "FMLA v17.4s, v1.4s, %[av].4s\n"
- "FCVTN v16.4h, v16.4s\n"
- "FCVTN2 v16.8h, v17.4s\n"
- "FMLA v18.4s, v2.4s, %[av].4s\n"
- "STR q16, [%[outptr4]], #16\n"
- "FCVTN v18.4h, v18.4s\n"
- "STR d18, [%[outptr4]], #8\n"
- "FMLA v19.4s, v3.4s, %[av].4s\n"
- "FMLA v20.4s, v4.4s, %[av].4s\n"
- "FCVTN v19.4h, v19.4s\n"
- "FCVTN2 v19.8h, v20.4s\n"
- "STR q19, [%[outptr5]], #16\n"
- "FMLA v21.4s, v5.4s, %[av].4s\n"
- "FCVTN v21.4h, v21.4s\n"
- "STR d21, [%[outptr5]], #8\n"
+ // Rows 4-5
+ "LDR q16, [%[outptr4]]\n"
+ "FCVTL2 v17.4s, v16.8h\n"
+ "LDR d18, [%[outptr4], #16]\n"
+ "FCVTL v16.4s, v16.4h\n"
+ "LDR q19, [%[outptr5]]\n"
+ "FMUL v17.4s, v17.4s, %[bv].4s\n"
+ "LDR d21, [%[outptr5], #16]\n"
+ "FMUL v16.4s, v16.4s, %[bv].4s\n"
+ "LDP q0, q1, [%[inptr], #192]\n"
+ "FCVTL v18.4s, v18.4h\n"
+ "LDP q2, q3, [%[inptr], #224]\n"
+ "FCVTL2 v20.4s, v19.8h\n"
+ "LDP q4, q5, [%[inptr], #256]\n"
+ "FCVTL v19.4s, v19.4h\n"
+ ASM_PREFETCH("[%[outptr2], #64]")
+ "FCVTL v21.4s, v21.4h\n"
+ ASM_PREFETCH("[%[outptr3], #64]")
+ "FMUL v18.4s, v18.4s, %[bv].4s\n"
+ ASM_PREFETCH("[%[outptr4], #88]")
+ "FMUL v20.4s, v20.4s, %[bv].4s\n"
+ "FMUL v19.4s, v19.4s, %[bv].4s\n"
+ "FMUL v21.4s, v21.4s, %[bv].4s\n"
+ "FMLA v16.4s, v0.4s, %[av].4s\n"
+ "FMLA v17.4s, v1.4s, %[av].4s\n"
+ "FCVTN v16.4h, v16.4s\n"
+ "FCVTN2 v16.8h, v17.4s\n"
+ "FMLA v18.4s, v2.4s, %[av].4s\n"
+ "STR q16, [%[outptr4]], #16\n"
+ "FCVTN v18.4h, v18.4s\n"
+ "STR d18, [%[outptr4]], #8\n"
+ "FMLA v19.4s, v3.4s, %[av].4s\n"
+ "FMLA v20.4s, v4.4s, %[av].4s\n"
+ "FCVTN v19.4h, v19.4s\n"
+ "FCVTN2 v19.8h, v20.4s\n"
+ "STR q19, [%[outptr5]], #16\n"
+ "FMLA v21.4s, v5.4s, %[av].4s\n"
+ "FCVTN v21.4h, v21.4s\n"
+ "STR d21, [%[outptr5]], #8\n"
- // Rows 6-7
- "LDR q16, [%[outptr6]]\n"
- "FCVTL2 v17.4s, v16.8h\n"
- "LDR d18, [%[outptr6], #16]\n"
- "FCVTL v16.4s, v16.4h\n"
- "LDR q19, [%[outptr7]]\n"
- "FMUL v17.4s, v17.4s, %[bv].4s\n"
- "LDR d21, [%[outptr7], #16]\n"
- "FMUL v16.4s, v16.4s, %[bv].4s\n"
- "LDP q0, q1, [%[inptr], #288]\n"
- "FCVTL v18.4s, v18.4h\n"
- "LDP q2, q3, [%[inptr], #320]\n"
- "FCVTL2 v20.4s, v19.8h\n"
- "LDP q4, q5, [%[inptr], #352]\n"
- "FCVTL v19.4s, v19.4h\n" ASM_PREFETCH("[%[outptr5], #64]") "FCVTL v21.4s, v21.4h\n" ASM_PREFETCH("[%[outptr6], #88]") "FMUL v18.4s, v18.4s, %[bv].4s\n" ASM_PREFETCH("[%[outptr7], #88]")
- "FMUL v20.4s, v20.4s, %[bv].4s\n"
- "FMUL v19.4s, v19.4s, %[bv].4s\n"
- "FMUL v21.4s, v21.4s, %[bv].4s\n"
- "FMLA v16.4s, v0.4s, %[av].4s\n"
- "FMLA v17.4s, v1.4s, %[av].4s\n"
- "FCVTN v16.4h, v16.4s\n"
- "FCVTN2 v16.8h, v17.4s\n"
- "FMLA v18.4s, v2.4s, %[av].4s\n"
- "STR q16, [%[outptr6]], #16\n"
- "FCVTN v18.4h, v18.4s\n"
- "STR d18, [%[outptr6]], #8\n"
- "FMLA v19.4s, v3.4s, %[av].4s\n"
- "FMLA v20.4s, v4.4s, %[av].4s\n"
- "FCVTN v19.4h, v19.4s\n"
- "FCVTN2 v19.8h, v20.4s\n"
- "STR q19, [%[outptr7]], #16\n"
- "FMLA v21.4s, v5.4s, %[av].4s\n"
- "FCVTN v21.4h, v21.4s\n"
- "STR d21, [%[outptr7]], #8\n"
- "ADD %[inptr], %[inptr], #384\n"
- : [outptr0] "+r"(outptr0), [outptr1] "+r"(outptr1), [outptr2] "+r"(outptr2), [outptr3] "+r"(outptr3),
- [outptr4] "+r"(outptr4), [outptr5] "+r"(outptr5), [outptr6] "+r"(outptr6), [outptr7] "+r"(outptr7),
- [inptr] "+r"(inptr)
- : [av] "w"(av), [bv] "w"(bv)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21");
+ // Rows 6-7
+ "LDR q16, [%[outptr6]]\n"
+ "FCVTL2 v17.4s, v16.8h\n"
+ "LDR d18, [%[outptr6], #16]\n"
+ "FCVTL v16.4s, v16.4h\n"
+ "LDR q19, [%[outptr7]]\n"
+ "FMUL v17.4s, v17.4s, %[bv].4s\n"
+ "LDR d21, [%[outptr7], #16]\n"
+ "FMUL v16.4s, v16.4s, %[bv].4s\n"
+ "LDP q0, q1, [%[inptr], #288]\n"
+ "FCVTL v18.4s, v18.4h\n"
+ "LDP q2, q3, [%[inptr], #320]\n"
+ "FCVTL2 v20.4s, v19.8h\n"
+ "LDP q4, q5, [%[inptr], #352]\n"
+ "FCVTL v19.4s, v19.4h\n"
+ ASM_PREFETCH("[%[outptr5], #64]")
+ "FCVTL v21.4s, v21.4h\n"
+ ASM_PREFETCH("[%[outptr6], #88]")
+ "FMUL v18.4s, v18.4s, %[bv].4s\n"
+ ASM_PREFETCH("[%[outptr7], #88]")
+ "FMUL v20.4s, v20.4s, %[bv].4s\n"
+ "FMUL v19.4s, v19.4s, %[bv].4s\n"
+ "FMUL v21.4s, v21.4s, %[bv].4s\n"
+ "FMLA v16.4s, v0.4s, %[av].4s\n"
+ "FMLA v17.4s, v1.4s, %[av].4s\n"
+ "FCVTN v16.4h, v16.4s\n"
+ "FCVTN2 v16.8h, v17.4s\n"
+ "FMLA v18.4s, v2.4s, %[av].4s\n"
+ "STR q16, [%[outptr6]], #16\n"
+ "FCVTN v18.4h, v18.4s\n"
+ "STR d18, [%[outptr6]], #8\n"
+ "FMLA v19.4s, v3.4s, %[av].4s\n"
+ "FMLA v20.4s, v4.4s, %[av].4s\n"
+ "FCVTN v19.4h, v19.4s\n"
+ "FCVTN2 v19.8h, v20.4s\n"
+ "STR q19, [%[outptr7]], #16\n"
+ "FMLA v21.4s, v5.4s, %[av].4s\n"
+ "FCVTN v21.4h, v21.4s\n"
+ "STR d21, [%[outptr7]], #8\n"
+ "ADD %[inptr], %[inptr], #384\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr)
+ : [av] "w" (av), [bv] "w" (bv)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
+ );
+ }
}
}
}
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_half_24x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_half_24x8.hpp
index 08cfc00523..3ed43b10bd 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_half_24x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_half_24x8.hpp
@@ -23,12 +23,12 @@
*/
#pragma once
-#if defined(__aarch64__) && defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC)
+// AArch64 only, and either the FP16_KERNELS option set or the target explicitly supports FP16 vectors.
+#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
-template <>
+template<>
inline void MergeResults<24, 8>(__fp16 *out, const __fp16 *in, const int ldout, const int y0, const int ymax,
- const int x0, const int xmax, const __fp16 alpha, const __fp16 beta)
-{
+ const int x0, const int xmax, const __fp16 alpha, const __fp16 beta) {
const __fp16 *inptr = in;
prefetch_6x(inptr);
prefetch_6x(inptr + 48);
@@ -36,8 +36,7 @@ inline void MergeResults<24, 8>(__fp16 *out, const __fp16 *in, const int ldout,
float16x8_t va = vdupq_n_f16(alpha);
float16x8_t vb = vdupq_n_f16(beta);
- for(int y = y0; y < ymax; y += 8)
- {
+ for (int y=y0; y<ymax; y+=8) {
__fp16 *outptr0 = out + (y * ldout) + x0;
__fp16 *outptr1 = outptr0 + ldout;
__fp16 *outptr2 = outptr1 + ldout;
@@ -56,17 +55,14 @@ inline void MergeResults<24, 8>(__fp16 *out, const __fp16 *in, const int ldout,
prefetch_2x(outptr6);
prefetch_2x(outptr7);
- for(int i = x0; i < xmax; i += 24)
- {
+ for (int i=x0; i<xmax; i+=24) {
__fp16 dummyres[24];
/* Make sure we throw away results if Y isn't a multiple of 8.
* We do this by pointing the result pointer at a dummy buffer
* we later discard. */
- if((y + 7) >= ymax)
- {
- switch((y + 7) - ymax)
- {
+ if ((y+7) >= ymax) {
+ switch ((y + 7) - ymax) {
case 6:
outptr1 = dummyres;
case 5:
@@ -85,149 +81,277 @@ inline void MergeResults<24, 8>(__fp16 *out, const __fp16 *in, const int ldout,
default:
UNREACHABLE("Impossible.");
+
}
}
- /* For ragged X, manually copy over the valid results. */
- if((i + 23) >= xmax)
- {
- for(int xi = 0; xi < 24; xi++)
- {
- if((i + xi) < xmax)
- {
- *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
- outptr0++;
- *outptr1 = (alpha * inptr[xi + 24]) + (*outptr1 * beta);
- outptr1++;
- *outptr2 = (alpha * inptr[xi + 48]) + (*outptr2 * beta);
- outptr2++;
- *outptr3 = (alpha * inptr[xi + 72]) + (*outptr3 * beta);
- outptr3++;
- *outptr4 = (alpha * inptr[xi + 96]) + (*outptr4 * beta);
- outptr4++;
- *outptr5 = (alpha * inptr[xi + 120]) + (*outptr5 * beta);
- outptr5++;
- *outptr6 = (alpha * inptr[xi + 144]) + (*outptr6 * beta);
- outptr6++;
- *outptr7 = (alpha * inptr[xi + 168]) + (*outptr7 * beta);
- outptr7++;
+ if (beta == (__fp16)0.0f) {
+ /* If beta===0, don't read the output. */
+
+ /* For ragged X, manually copy over the valid results. */
+ if ((i+23) >= xmax) {
+ for (int xi=0; xi<24; xi++) {
+ if ((i+xi) < xmax) {
+ *outptr0 = (alpha * inptr[xi]);
+ outptr0++;
+ *outptr1 = (alpha * inptr[xi + 24]);
+ outptr1++;
+ *outptr2 = (alpha * inptr[xi + 48]);
+ outptr2++;
+ *outptr3 = (alpha * inptr[xi + 72]);
+ outptr3++;
+ *outptr4 = (alpha * inptr[xi + 96]);
+ outptr4++;
+ *outptr5 = (alpha * inptr[xi + 120]);
+ outptr5++;
+ *outptr6 = (alpha * inptr[xi + 144]);
+ outptr6++;
+ *outptr7 = (alpha * inptr[xi + 168]);
+ outptr7++;
+ }
}
+ inptr += 192;
+ } else {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ ".arch armv8.2-a+fp16\n"
+#endif
+ // Rows 0-1
+ ASM_PREFETCH("[%[inptr], #768]")
+ "LDP q0, q1, [%[inptr]]\n"
+ "LDP q2, q3, [%[inptr], #32]\n"
+ "LDP q4, q5, [%[inptr], #64]\n"
+ "FMUL v16.8h, v0.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[inptr], #832]")
+ "FMUL v17.8h, v1.8h, %[va].8h\n"
+ "STP q16, q17, [%[outptr0]], #32\n"
+ "FMUL v18.8h, v2.8h, %[va].8h\n"
+ "STR q18, [%[outptr0]], #16\n"
+ "FMUL v19.8h, v3.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[inptr], #896]")
+ "FMUL v20.8h, v4.8h, %[va].8h\n"
+ "STP q19, q20, [%[outptr1]], #32\n"
+ "FMUL v21.8h, v5.8h, %[va].8h\n"
+ "STR q21, [%[outptr1]], #16\n"
+ ASM_PREFETCH("[%[inptr], #960]")
+
+ // Rows 2-3
+ ASM_PREFETCH("[%[inptr], #1024]")
+ "LDP q0, q1, [%[inptr], #96]\n"
+ "LDP q2, q3, [%[inptr], #128]\n"
+ "LDP q4, q5, [%[inptr], #160]\n"
+ "FMUL v16.8h, v0.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[inptr], #1088]")
+ "FMUL v17.8h, v1.8h, %[va].8h\n"
+ "STP q16, q17, [%[outptr2]], #32\n"
+ "FMUL v18.8h, v2.8h, %[va].8h\n"
+ "STR q18, [%[outptr2]], #16\n"
+ "FMUL v19.8h, v3.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr0], #80]")
+ "FMUL v20.8h, v4.8h, %[va].8h\n"
+ "STP q19, q20, [%[outptr3]], #32\n"
+ "FMUL v21.8h, v5.8h, %[va].8h\n"
+ "STR q21, [%[outptr3]], #16\n"
+ ASM_PREFETCH("[%[outptr1], #80]")
+
+ // Rows 4-5
+ ASM_PREFETCH("[%[outptr2], #80]")
+ "LDP q0, q1, [%[inptr], #192]\n"
+ "LDP q2, q3, [%[inptr], #224]\n"
+ "LDP q4, q5, [%[inptr], #256]\n"
+ "FMUL v16.8h, v0.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr3], #80]")
+ "FMUL v17.8h, v1.8h, %[va].8h\n"
+ "STP q16, q17, [%[outptr4]], #32\n"
+ "FMUL v18.8h, v2.8h, %[va].8h\n"
+ "STR q18, [%[outptr4]], #16\n"
+ "FMUL v19.8h, v3.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr4], #80]")
+ "FMUL v20.8h, v4.8h, %[va].8h\n"
+ "STP q19, q20, [%[outptr5]], #32\n"
+ "FMUL v21.8h, v5.8h, %[va].8h\n"
+ "STR q21, [%[outptr5]], #16\n"
+
+ // Rows 6-7
+ ASM_PREFETCH("[%[outptr5], #80]")
+ "LDP q0, q1, [%[inptr], #288]\n"
+ "LDP q2, q3, [%[inptr], #320]\n"
+ "LDP q4, q5, [%[inptr], #352]\n"
+ "FMUL v16.8h, v0.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr6], #128]")
+ "FMUL v17.8h, v1.8h, %[va].8h\n"
+ "STP q16, q17, [%[outptr6]], #32\n"
+ "FMUL v18.8h, v2.8h, %[va].8h\n"
+ "STR q18, [%[outptr6]], #16\n"
+ "FMUL v19.8h, v3.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr7], #128]")
+ "FMUL v20.8h, v4.8h, %[va].8h\n"
+ "STP q19, q20, [%[outptr7]], #32\n"
+ "FMUL v21.8h, v5.8h, %[va].8h\n"
+ "STR q21, [%[outptr7]], #16\n"
+ "ADD %[inptr], %[inptr], #384\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr)
+ : [va] "w" (va), [vb] "w" (vb)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
+ );
+ }
+ } else {
+ /* For ragged X, manually copy over the valid results. */
+ if ((i+23) >= xmax) {
+ for (int xi=0; xi<24; xi++) {
+ if ((i+xi) < xmax) {
+ *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+ outptr0++;
+ *outptr1 = (alpha * inptr[xi + 24]) + (*outptr1 * beta);
+ outptr1++;
+ *outptr2 = (alpha * inptr[xi + 48]) + (*outptr2 * beta);
+ outptr2++;
+ *outptr3 = (alpha * inptr[xi + 72]) + (*outptr3 * beta);
+ outptr3++;
+ *outptr4 = (alpha * inptr[xi + 96]) + (*outptr4 * beta);
+ outptr4++;
+ *outptr5 = (alpha * inptr[xi + 120]) + (*outptr5 * beta);
+ outptr5++;
+ *outptr6 = (alpha * inptr[xi + 144]) + (*outptr6 * beta);
+ outptr6++;
+ *outptr7 = (alpha * inptr[xi + 168]) + (*outptr7 * beta);
+ outptr7++;
+ }
+ }
+ inptr += 192;
+ } else {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ ".arch armv8.2-a+fp16\n"
+#endif
+ // Rows 0-1
+ "LDP q16, q17, [%[outptr0]]\n"
+ "FMUL v16.8h, v16.8h, %[vb].8h\n"
+ "LDR q18, [%[outptr0], #32]\n"
+ "FMUL v17.8h, v17.8h, %[vb].8h\n"
+ "LDP q19, q20, [%[outptr1]]\n"
+ "FMUL v18.8h, v18.8h, %[vb].8h\n"
+ ASM_PREFETCH("[%[inptr], #768]")
+ "LDR q21, [%[outptr1], #32]\n"
+ "FMUL v19.8h, v19.8h, %[vb].8h\n"
+ "LDP q0, q1, [%[inptr]]\n"
+ "FMUL v20.8h, v20.8h, %[vb].8h\n"
+ "LDP q2, q3, [%[inptr], #32]\n"
+ "FMUL v21.8h, v21.8h, %[vb].8h\n"
+ "LDP q4, q5, [%[inptr], #64]\n"
+ "FMLA v16.8h, v0.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[inptr], #832]")
+ "FMLA v17.8h, v1.8h, %[va].8h\n"
+ "STP q16, q17, [%[outptr0]], #32\n"
+ "FMLA v18.8h, v2.8h, %[va].8h\n"
+ "STR q18, [%[outptr0]], #16\n"
+ "FMLA v19.8h, v3.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[inptr], #896]")
+ "FMLA v20.8h, v4.8h, %[va].8h\n"
+ "STP q19, q20, [%[outptr1]], #32\n"
+ "FMLA v21.8h, v5.8h, %[va].8h\n"
+ "STR q21, [%[outptr1]], #16\n"
+ ASM_PREFETCH("[%[inptr], #960]")
+
+ // Rows 2-3
+ "LDP q16, q17, [%[outptr2]]\n"
+ "FMUL v16.8h, v16.8h, %[vb].8h\n"
+ "LDR q18, [%[outptr2], #32]\n"
+ "FMUL v17.8h, v17.8h, %[vb].8h\n"
+ "LDP q19, q20, [%[outptr3]]\n"
+ "FMUL v18.8h, v18.8h, %[vb].8h\n"
+ ASM_PREFETCH("[%[inptr], #1024]")
+ "LDR q21, [%[outptr3], #32]\n"
+ "FMUL v19.8h, v19.8h, %[vb].8h\n"
+ "LDP q0, q1, [%[inptr], #96]\n"
+ "FMUL v20.8h, v20.8h, %[vb].8h\n"
+ "LDP q2, q3, [%[inptr], #128]\n"
+ "FMUL v21.8h, v21.8h, %[vb].8h\n"
+ "LDP q4, q5, [%[inptr], #160]\n"
+ "FMLA v16.8h, v0.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[inptr], #1088]")
+ "FMLA v17.8h, v1.8h, %[va].8h\n"
+ "STP q16, q17, [%[outptr2]], #32\n"
+ "FMLA v18.8h, v2.8h, %[va].8h\n"
+ "STR q18, [%[outptr2]], #16\n"
+ "FMLA v19.8h, v3.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr0], #80]")
+ "FMLA v20.8h, v4.8h, %[va].8h\n"
+ "STP q19, q20, [%[outptr3]], #32\n"
+ "FMLA v21.8h, v5.8h, %[va].8h\n"
+ "STR q21, [%[outptr3]], #16\n"
+ ASM_PREFETCH("[%[outptr1], #80]")
+
+ // Rows 4-5
+ "LDP q16, q17, [%[outptr4]]\n"
+ "FMUL v16.8h, v16.8h, %[vb].8h\n"
+ "LDR q18, [%[outptr4], #32]\n"
+ "FMUL v17.8h, v17.8h, %[vb].8h\n"
+ "LDP q19, q20, [%[outptr5]]\n"
+ "FMUL v18.8h, v18.8h, %[vb].8h\n"
+ ASM_PREFETCH("[%[outptr2], #80]")
+ "LDR q21, [%[outptr5], #32]\n"
+ "FMUL v19.8h, v19.8h, %[vb].8h\n"
+ "LDP q0, q1, [%[inptr], #192]\n"
+ "FMUL v20.8h, v20.8h, %[vb].8h\n"
+ "LDP q2, q3, [%[inptr], #224]\n"
+ "FMUL v21.8h, v21.8h, %[vb].8h\n"
+ "LDP q4, q5, [%[inptr], #256]\n"
+ "FMLA v16.8h, v0.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr3], #80]")
+ "FMLA v17.8h, v1.8h, %[va].8h\n"
+ "STP q16, q17, [%[outptr4]], #32\n"
+ "FMLA v18.8h, v2.8h, %[va].8h\n"
+ "STR q18, [%[outptr4]], #16\n"
+ "FMLA v19.8h, v3.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr4], #80]")
+ "FMLA v20.8h, v4.8h, %[va].8h\n"
+ "STP q19, q20, [%[outptr5]], #32\n"
+ "FMLA v21.8h, v5.8h, %[va].8h\n"
+ "STR q21, [%[outptr5]], #16\n"
+
+ // Rows 6-7
+ "LDP q16, q17, [%[outptr6]]\n"
+ "FMUL v16.8h, v16.8h, %[vb].8h\n"
+ "LDR q18, [%[outptr6], #32]\n"
+ "FMUL v17.8h, v17.8h, %[vb].8h\n"
+ "LDP q19, q20, [%[outptr7]]\n"
+ ASM_PREFETCH("[%[outptr5], #80]")
+ "FMUL v18.8h, v18.8h, %[vb].8h\n"
+ "LDR q21, [%[outptr7], #32]\n"
+ "FMUL v19.8h, v19.8h, %[vb].8h\n"
+ "LDP q0, q1, [%[inptr], #288]\n"
+ "FMUL v20.8h, v20.8h, %[vb].8h\n"
+ "LDP q2, q3, [%[inptr], #320]\n"
+ "FMUL v21.8h, v21.8h, %[vb].8h\n"
+ "LDP q4, q5, [%[inptr], #352]\n"
+ "FMLA v16.8h, v0.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr6], #128]")
+ "FMLA v17.8h, v1.8h, %[va].8h\n"
+ "STP q16, q17, [%[outptr6]], #32\n"
+ "FMLA v18.8h, v2.8h, %[va].8h\n"
+ "STR q18, [%[outptr6]], #16\n"
+ "FMLA v19.8h, v3.8h, %[va].8h\n"
+ ASM_PREFETCH("[%[outptr7], #128]")
+ "FMLA v20.8h, v4.8h, %[va].8h\n"
+ "STP q19, q20, [%[outptr7]], #32\n"
+ "FMLA v21.8h, v5.8h, %[va].8h\n"
+ "STR q21, [%[outptr7]], #16\n"
+ "ADD %[inptr], %[inptr], #384\n"
+ : [outptr0] "+r" (outptr0), [outptr1] "+r" (outptr1), [outptr2] "+r" (outptr2), [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4), [outptr5] "+r" (outptr5), [outptr6] "+r" (outptr6), [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr)
+ : [va] "w" (va), [vb] "w" (vb)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21"
+ );
}
- inptr += 192;
- }
- else
- {
- /* Optimized routine to copy an entire block */
- __asm __volatile(
- ".arch armv8.2-a+fp16\n"
- // Rows 0-1
- "LDP q16, q17, [%[outptr0]]\n"
- "FMUL v16.8h, v16.8h, %[vb].8h\n"
- "LDR q18, [%[outptr0], #32]\n"
- "FMUL v17.8h, v17.8h, %[vb].8h\n"
- "LDP q19, q20, [%[outptr1]]\n"
- "FMUL v18.8h, v18.8h, %[vb].8h\n" ASM_PREFETCH("[%[inptr], #768]")
- "LDR q21, [%[outptr1], #32]\n"
- "FMUL v19.8h, v19.8h, %[vb].8h\n"
- "LDP q0, q1, [%[inptr]]\n"
- "FMUL v20.8h, v20.8h, %[vb].8h\n"
- "LDP q2, q3, [%[inptr], #32]\n"
- "FMUL v21.8h, v21.8h, %[vb].8h\n"
- "LDP q4, q5, [%[inptr], #64]\n"
- "FMLA v16.8h, v0.8h, %[va].8h\n" ASM_PREFETCH("[%[inptr], #832]")
- "FMLA v17.8h, v1.8h, %[va].8h\n"
- "STP q16, q17, [%[outptr0]], #32\n"
- "FMLA v18.8h, v2.8h, %[va].8h\n"
- "STR q18, [%[outptr0]], #16\n"
- "FMLA v19.8h, v3.8h, %[va].8h\n" ASM_PREFETCH("[%[inptr], #896]")
- "FMLA v20.8h, v4.8h, %[va].8h\n"
- "STP q19, q20, [%[outptr1]], #32\n"
- "FMLA v21.8h, v5.8h, %[va].8h\n"
- "STR q21, [%[outptr1]], #16\n" ASM_PREFETCH("[%[inptr], #960]")
-
- // Rows 2-3
- "LDP q16, q17, [%[outptr2]]\n"
- "FMUL v16.8h, v16.8h, %[vb].8h\n"
- "LDR q18, [%[outptr2], #32]\n"
- "FMUL v17.8h, v17.8h, %[vb].8h\n"
- "LDP q19, q20, [%[outptr3]]\n"
- "FMUL v18.8h, v18.8h, %[vb].8h\n" ASM_PREFETCH("[%[inptr], #1024]")
- "LDR q21, [%[outptr3], #32]\n"
- "FMUL v19.8h, v19.8h, %[vb].8h\n"
- "LDP q0, q1, [%[inptr], #96]\n"
- "FMUL v20.8h, v20.8h, %[vb].8h\n"
- "LDP q2, q3, [%[inptr], #128]\n"
- "FMUL v21.8h, v21.8h, %[vb].8h\n"
- "LDP q4, q5, [%[inptr], #160]\n"
- "FMLA v16.8h, v0.8h, %[va].8h\n" ASM_PREFETCH("[%[inptr], #1088]")
- "FMLA v17.8h, v1.8h, %[va].8h\n"
- "STP q16, q17, [%[outptr2]], #32\n"
- "FMLA v18.8h, v2.8h, %[va].8h\n"
- "STR q18, [%[outptr2]], #16\n"
- "FMLA v19.8h, v3.8h, %[va].8h\n" ASM_PREFETCH("[%[outptr0], #80]")
- "FMLA v20.8h, v4.8h, %[va].8h\n"
- "STP q19, q20, [%[outptr3]], #32\n"
- "FMLA v21.8h, v5.8h, %[va].8h\n"
- "STR q21, [%[outptr3]], #16\n" ASM_PREFETCH("[%[outptr1], #80]")
-
- // Rows 4-5
- "LDP q16, q17, [%[outptr4]]\n"
- "FMUL v16.8h, v16.8h, %[vb].8h\n"
- "LDR q18, [%[outptr4], #32]\n"
- "FMUL v17.8h, v17.8h, %[vb].8h\n"
- "LDP q19, q20, [%[outptr5]]\n"
- "FMUL v18.8h, v18.8h, %[vb].8h\n" ASM_PREFETCH("[%[outptr2], #80]")
- "LDR q21, [%[outptr5], #32]\n"
- "FMUL v19.8h, v19.8h, %[vb].8h\n"
- "LDP q0, q1, [%[inptr], #192]\n"
- "FMUL v20.8h, v20.8h, %[vb].8h\n"
- "LDP q2, q3, [%[inptr], #224]\n"
- "FMUL v21.8h, v21.8h, %[vb].8h\n"
- "LDP q4, q5, [%[inptr], #256]\n"
- "FMLA v16.8h, v0.8h, %[va].8h\n" ASM_PREFETCH("[%[outptr3], #80]")
- "FMLA v17.8h, v1.8h, %[va].8h\n"
- "STP q16, q17, [%[outptr4]], #32\n"
- "FMLA v18.8h, v2.8h, %[va].8h\n"
- "STR q18, [%[outptr4]], #16\n"
- "FMLA v19.8h, v3.8h, %[va].8h\n" ASM_PREFETCH("[%[outptr4], #80]")
- "FMLA v20.8h, v4.8h, %[va].8h\n"
- "STP q19, q20, [%[outptr5]], #32\n"
- "FMLA v21.8h, v5.8h, %[va].8h\n"
- "STR q21, [%[outptr5]], #16\n"
-
- // Rows 6-7
- "LDP q16, q17, [%[outptr6]]\n"
- "FMUL v16.8h, v16.8h, %[vb].8h\n"
- "LDR q18, [%[outptr6], #32]\n"
- "FMUL v17.8h, v17.8h, %[vb].8h\n"
- "LDP q19, q20, [%[outptr7]]\n" ASM_PREFETCH("[%[outptr5], #80]")
- "FMUL v18.8h, v18.8h, %[vb].8h\n"
- "LDR q21, [%[outptr7], #32]\n"
- "FMUL v19.8h, v19.8h, %[vb].8h\n"
- "LDP q0, q1, [%[inptr], #288]\n"
- "FMUL v20.8h, v20.8h, %[vb].8h\n"
- "LDP q2, q3, [%[inptr], #320]\n"
- "FMUL v21.8h, v21.8h, %[vb].8h\n"
- "LDP q4, q5, [%[inptr], #352]\n"
- "FMLA v16.8h, v0.8h, %[va].8h\n" ASM_PREFETCH("[%[outptr6], #128]")
- "FMLA v17.8h, v1.8h, %[va].8h\n"
- "STP q16, q17, [%[outptr6]], #32\n"
- "FMLA v18.8h, v2.8h, %[va].8h\n"
- "STR q18, [%[outptr6]], #16\n"
- "FMLA v19.8h, v3.8h, %[va].8h\n" ASM_PREFETCH("[%[outptr7], #128]")
- "FMLA v20.8h, v4.8h, %[va].8h\n"
- "STP q19, q20, [%[outptr7]], #32\n"
- "FMLA v21.8h, v5.8h, %[va].8h\n"
- "STR q21, [%[outptr7]], #16\n"
- "ADD %[inptr], %[inptr], #384\n"
- : [outptr0] "+r"(outptr0), [outptr1] "+r"(outptr1), [outptr2] "+r"(outptr2), [outptr3] "+r"(outptr3),
- [outptr4] "+r"(outptr4), [outptr5] "+r"(outptr5), [outptr6] "+r"(outptr6), [outptr7] "+r"(outptr7),
- [inptr] "+r"(inptr)
- : [va] "w"(va), [vb] "w"(vb)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21");
}
}
}
}
-#endif // __aarch64__ && __ARM_FEATURE_FP16_SCALAR_ARITHMETIC
+#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp
index 79dd1f07e3..1a51505a25 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp
@@ -25,18 +25,16 @@
#ifdef __aarch64__
-template <>
-inline void MergeResults<12, 8>(int32_t *out, const int32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const int32_t alpha, const int32_t beta)
-{
+template<>
+inline void MergeResults<12, 8>(int32_t *out, const int32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const int32_t alpha, const int32_t beta) {
const int32_t *inptr = in;
prefetch_6x(inptr);
prefetch_6x(inptr + 96);
int32x4_t alpha_value = vdupq_n_s32(alpha);
- int32x4_t beta_value = vdupq_n_s32(beta);
+ int32x4_t beta_value = vdupq_n_s32(beta);
- for(int y = y0; y < ymax; y += 8)
- {
+ for (int y=y0; y<ymax; y+=8) {
int32_t *outptr0 = out + (y * ldout) + x0;
int32_t *outptr1 = outptr0 + ldout;
int32_t *outptr2 = outptr1 + ldout;
@@ -55,17 +53,14 @@ inline void MergeResults<12, 8>(int32_t *out, const int32_t *in, const int ldout
prefetch_2x(outptr6);
prefetch_2x(outptr7);
- for(int i = x0; i < xmax; i += 12)
- {
+ for (int i=x0; i<xmax; i+=12) {
int32_t dummyres[12];
/* Make sure we throw away results if Y isn't a multiple of 8.
* We do this by pointing the result pointer at a dummy buffer
* we later discard. */
- if((y + 7) >= ymax)
- {
- switch((y + 7) - ymax)
- {
+ if ((y+7) >= ymax) {
+ switch ((y + 7) - ymax) {
case 6:
outptr1 = dummyres;
case 5:
@@ -88,12 +83,9 @@ inline void MergeResults<12, 8>(int32_t *out, const int32_t *in, const int ldout
}
/* For ragged X, manually copy over the valid results. */
- if((i + 11) >= xmax)
- {
- for(int xi = 0; xi < 12; xi++)
- {
- if((i + xi) < xmax)
- {
+ if ((i+11) >= xmax) {
+ for (int xi=0; xi<12; xi++) {
+ if ((i+xi) < xmax) {
*outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
outptr0++;
*outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
@@ -113,177 +105,175 @@ inline void MergeResults<12, 8>(int32_t *out, const int32_t *in, const int ldout
}
}
inptr += 96;
- }
- else
- {
+ } else {
/* Optimized routine to copy an entire block */
- __asm __volatile(
- // Row 0
- ASM_PREFETCH("[%x[outptr1], #192]")
- "ldr q3, [%x[outptr0]]\n"
- "ldr q4, [%x[outptr0], #0x10]\n"
- "ldr q5, [%x[outptr0], #0x20]\n"
- "mul v3.4s, v3.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr]]\n"
- "mul v4.4s, v4.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x10]\n"
- "mul v5.4s, v5.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x20]\n"
- "mla v3.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q0, [%x[outptr1]]\n"
- "mla v4.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q1, [%x[outptr1], #0x10]\n"
- "mla v5.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q2, [%x[outptr1], #0x20]\n"
+ __asm __volatile (
+ // Row 0
+ ASM_PREFETCH("[%x[outptr1], #192]")
+ "ldr q3, [%x[outptr0]]\n"
+ "ldr q4, [%x[outptr0], #0x10]\n"
+ "ldr q5, [%x[outptr0], #0x20]\n"
+ "mul v3.4s, v3.4s, %[beta_value].4s\n"
+ "ldr q6, [%x[inptr]]\n"
+ "mul v4.4s, v4.4s, %[beta_value].4s\n"
+ "ldr q7, [%x[inptr], #0x10]\n"
+ "mul v5.4s, v5.4s, %[beta_value].4s\n"
+ "ldr q8, [%x[inptr], #0x20]\n"
+ "mla v3.4s, v6.4s, %[alpha_value].4s\n"
+ "ldr q0, [%x[outptr1]]\n"
+ "mla v4.4s, v7.4s, %[alpha_value].4s\n"
+ "ldr q1, [%x[outptr1], #0x10]\n"
+ "mla v5.4s, v8.4s, %[alpha_value].4s\n"
+ "ldr q2, [%x[outptr1], #0x20]\n"
- // Row 1
- ASM_PREFETCH("[%x[outptr2], #192]")
- "mul v0.4s, v0.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x30]\n"
- "str q3, [%x[outptr0]], #0x10\n"
- "mul v1.4s, v1.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x40]\n"
- "str q4, [%x[outptr0]], #0x10\n"
- "mul v2.4s, v2.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x50]\n"
- "str q5, [%x[outptr0]], #0x10\n"
- "mla v0.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q3, [%x[outptr2]]\n"
- "mla v1.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q4, [%x[outptr2], #0x10]\n"
- "mla v2.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q5, [%x[outptr2], #0x20]\n"
+ // Row 1
+ ASM_PREFETCH("[%x[outptr2], #192]")
+ "mul v0.4s, v0.4s, %[beta_value].4s\n"
+ "ldr q6, [%x[inptr], #0x30]\n"
+ "str q3, [%x[outptr0]], #0x10\n"
+ "mul v1.4s, v1.4s, %[beta_value].4s\n"
+ "ldr q7, [%x[inptr], #0x40]\n"
+ "str q4, [%x[outptr0]], #0x10\n"
+ "mul v2.4s, v2.4s, %[beta_value].4s\n"
+ "ldr q8, [%x[inptr], #0x50]\n"
+ "str q5, [%x[outptr0]], #0x10\n"
+ "mla v0.4s, v6.4s, %[alpha_value].4s\n"
+ "ldr q3, [%x[outptr2]]\n"
+ "mla v1.4s, v7.4s, %[alpha_value].4s\n"
+ "ldr q4, [%x[outptr2], #0x10]\n"
+ "mla v2.4s, v8.4s, %[alpha_value].4s\n"
+ "ldr q5, [%x[outptr2], #0x20]\n"
- // Row 2
- ASM_PREFETCH("[%x[outptr3], #192]")
- "mul v3.4s, v3.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x60]\n"
- "str q0, [%x[outptr1]], #0x10\n"
- "mul v4.4s, v4.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x70]\n"
- "str q1, [%x[outptr1]], #0x10\n"
- "mul v5.4s, v5.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x80]\n"
- "str q2, [%x[outptr1]], #0x10\n"
- "mla v3.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q0, [%x[outptr3]]\n"
- "mla v4.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q1, [%x[outptr3], #0x10]\n"
- "mla v5.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q2, [%x[outptr3], #0x20]\n"
+ // Row 2
+ ASM_PREFETCH("[%x[outptr3], #192]")
+ "mul v3.4s, v3.4s, %[beta_value].4s\n"
+ "ldr q6, [%x[inptr], #0x60]\n"
+ "str q0, [%x[outptr1]], #0x10\n"
+ "mul v4.4s, v4.4s, %[beta_value].4s\n"
+ "ldr q7, [%x[inptr], #0x70]\n"
+ "str q1, [%x[outptr1]], #0x10\n"
+ "mul v5.4s, v5.4s, %[beta_value].4s\n"
+ "ldr q8, [%x[inptr], #0x80]\n"
+ "str q2, [%x[outptr1]], #0x10\n"
+ "mla v3.4s, v6.4s, %[alpha_value].4s\n"
+ "ldr q0, [%x[outptr3]]\n"
+ "mla v4.4s, v7.4s, %[alpha_value].4s\n"
+ "ldr q1, [%x[outptr3], #0x10]\n"
+ "mla v5.4s, v8.4s, %[alpha_value].4s\n"
+ "ldr q2, [%x[outptr3], #0x20]\n"
- // Row 3
- ASM_PREFETCH("[%x[outptr4], #192]")
- "mul v0.4s, v0.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x90]\n"
- "str q3, [%x[outptr2]], #0x10\n"
- "mul v1.4s, v1.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0xa0]\n"
- "str q4, [%x[outptr2]], #0x10\n"
- "mul v2.4s, v2.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0xb0]\n"
- "str q5, [%x[outptr2]], #0x10\n"
- "mla v0.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q3, [%x[outptr4]]\n"
- "mla v1.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q4, [%x[outptr4], #0x10]\n"
- "mla v2.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q5, [%x[outptr4], #0x20]\n"
+ // Row 3
+ ASM_PREFETCH("[%x[outptr4], #192]")
+ "mul v0.4s, v0.4s, %[beta_value].4s\n"
+ "ldr q6, [%x[inptr], #0x90]\n"
+ "str q3, [%x[outptr2]], #0x10\n"
+ "mul v1.4s, v1.4s, %[beta_value].4s\n"
+ "ldr q7, [%x[inptr], #0xa0]\n"
+ "str q4, [%x[outptr2]], #0x10\n"
+ "mul v2.4s, v2.4s, %[beta_value].4s\n"
+ "ldr q8, [%x[inptr], #0xb0]\n"
+ "str q5, [%x[outptr2]], #0x10\n"
+ "mla v0.4s, v6.4s, %[alpha_value].4s\n"
+ "ldr q3, [%x[outptr4]]\n"
+ "mla v1.4s, v7.4s, %[alpha_value].4s\n"
+ "ldr q4, [%x[outptr4], #0x10]\n"
+ "mla v2.4s, v8.4s, %[alpha_value].4s\n"
+ "ldr q5, [%x[outptr4], #0x20]\n"
- // Row 4
- ASM_PREFETCH("[%x[outptr5], #192]")
- "mul v3.4s, v3.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0xc0]\n"
- "str q0, [%x[outptr3]], #0x10\n"
- "mul v4.4s, v4.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0xd0]\n"
- "str q1, [%x[outptr3]], #0x10\n"
- "mul v5.4s, v5.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0xe0]\n"
- "str q2, [%x[outptr3]], #0x10\n"
- "mla v3.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q0, [%x[outptr5]]\n"
- "mla v4.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q1, [%x[outptr5], #0x10]\n"
- "mla v5.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q2, [%x[outptr5], #0x20]\n"
+ // Row 4
+ ASM_PREFETCH("[%x[outptr5], #192]")
+ "mul v3.4s, v3.4s, %[beta_value].4s\n"
+ "ldr q6, [%x[inptr], #0xc0]\n"
+ "str q0, [%x[outptr3]], #0x10\n"
+ "mul v4.4s, v4.4s, %[beta_value].4s\n"
+ "ldr q7, [%x[inptr], #0xd0]\n"
+ "str q1, [%x[outptr3]], #0x10\n"
+ "mul v5.4s, v5.4s, %[beta_value].4s\n"
+ "ldr q8, [%x[inptr], #0xe0]\n"
+ "str q2, [%x[outptr3]], #0x10\n"
+ "mla v3.4s, v6.4s, %[alpha_value].4s\n"
+ "ldr q0, [%x[outptr5]]\n"
+ "mla v4.4s, v7.4s, %[alpha_value].4s\n"
+ "ldr q1, [%x[outptr5], #0x10]\n"
+ "mla v5.4s, v8.4s, %[alpha_value].4s\n"
+ "ldr q2, [%x[outptr5], #0x20]\n"
- // Row 5
- ASM_PREFETCH("[%x[outptr6], #192]")
- "mul v0.4s, v0.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0xf0]\n"
- "str q3, [%x[outptr4]], #0x10\n"
- "mul v1.4s, v1.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x100]\n"
- "str q4, [%x[outptr4]], #0x10\n"
- "mul v2.4s, v2.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x110]\n"
- "str q5, [%x[outptr4]], #0x10\n"
- "mla v0.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q3, [%x[outptr6]]\n"
- "mla v1.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q4, [%x[outptr6], #0x10]\n"
- "mla v2.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q5, [%x[outptr6], #0x20]\n"
+ // Row 5
+ ASM_PREFETCH("[%x[outptr6], #192]")
+ "mul v0.4s, v0.4s, %[beta_value].4s\n"
+ "ldr q6, [%x[inptr], #0xf0]\n"
+ "str q3, [%x[outptr4]], #0x10\n"
+ "mul v1.4s, v1.4s, %[beta_value].4s\n"
+ "ldr q7, [%x[inptr], #0x100]\n"
+ "str q4, [%x[outptr4]], #0x10\n"
+ "mul v2.4s, v2.4s, %[beta_value].4s\n"
+ "ldr q8, [%x[inptr], #0x110]\n"
+ "str q5, [%x[outptr4]], #0x10\n"
+ "mla v0.4s, v6.4s, %[alpha_value].4s\n"
+ "ldr q3, [%x[outptr6]]\n"
+ "mla v1.4s, v7.4s, %[alpha_value].4s\n"
+ "ldr q4, [%x[outptr6], #0x10]\n"
+ "mla v2.4s, v8.4s, %[alpha_value].4s\n"
+ "ldr q5, [%x[outptr6], #0x20]\n"
- // Row 6
- ASM_PREFETCH("[%x[outptr7], #192]")
- "mul v3.4s, v3.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x120]\n"
- "str q0, [%x[outptr5]], #0x10\n"
- "mul v4.4s, v4.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x130]\n"
- "str q1, [%x[outptr5]], #0x10\n"
- "mul v5.4s, v5.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x140]\n"
- "str q2, [%x[outptr5]], #0x10\n"
- "mla v3.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q0, [%x[outptr7]]\n"
- "mla v4.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q1, [%x[outptr7], #0x10]\n"
- "mla v5.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q2, [%x[outptr7], #0x20]\n"
+ // Row 6
+ ASM_PREFETCH("[%x[outptr7], #192]")
+ "mul v3.4s, v3.4s, %[beta_value].4s\n"
+ "ldr q6, [%x[inptr], #0x120]\n"
+ "str q0, [%x[outptr5]], #0x10\n"
+ "mul v4.4s, v4.4s, %[beta_value].4s\n"
+ "ldr q7, [%x[inptr], #0x130]\n"
+ "str q1, [%x[outptr5]], #0x10\n"
+ "mul v5.4s, v5.4s, %[beta_value].4s\n"
+ "ldr q8, [%x[inptr], #0x140]\n"
+ "str q2, [%x[outptr5]], #0x10\n"
+ "mla v3.4s, v6.4s, %[alpha_value].4s\n"
+ "ldr q0, [%x[outptr7]]\n"
+ "mla v4.4s, v7.4s, %[alpha_value].4s\n"
+ "ldr q1, [%x[outptr7], #0x10]\n"
+ "mla v5.4s, v8.4s, %[alpha_value].4s\n"
+ "ldr q2, [%x[outptr7], #0x20]\n"
- // Row 7
- "mul v0.4s, v0.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x150]\n"
- "str q3, [%x[outptr6]], #0x10\n"
- "mul v1.4s, v1.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x160]\n"
- "str q4, [%x[outptr6]], #0x10\n"
- "mul v2.4s, v2.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x170]\n"
- "str q5, [%x[outptr6]], #0x10\n"
- "mla v0.4s, v6.4s, %[alpha_value].4s\n"
- "mla v1.4s, v7.4s, %[alpha_value].4s\n"
- "mla v2.4s, v8.4s, %[alpha_value].4s\n"
- "str q0, [%x[outptr7]], #0x10\n"
- "str q1, [%x[outptr7]], #0x10\n"
- "str q2, [%x[outptr7]], #0x10\n"
+ // Row 7
+ "mul v0.4s, v0.4s, %[beta_value].4s\n"
+ "ldr q6, [%x[inptr], #0x150]\n"
+ "str q3, [%x[outptr6]], #0x10\n"
+ "mul v1.4s, v1.4s, %[beta_value].4s\n"
+ "ldr q7, [%x[inptr], #0x160]\n"
+ "str q4, [%x[outptr6]], #0x10\n"
+ "mul v2.4s, v2.4s, %[beta_value].4s\n"
+ "ldr q8, [%x[inptr], #0x170]\n"
+ "str q5, [%x[outptr6]], #0x10\n"
+ "mla v0.4s, v6.4s, %[alpha_value].4s\n"
+ "mla v1.4s, v7.4s, %[alpha_value].4s\n"
+ "mla v2.4s, v8.4s, %[alpha_value].4s\n"
+ "str q0, [%x[outptr7]], #0x10\n"
+ "str q1, [%x[outptr7]], #0x10\n"
+ "str q2, [%x[outptr7]], #0x10\n"
- "add %x[inptr], %x[inptr], #0x180\n"
- : [outptr0] "+r"(outptr0),
- [outptr1] "+r"(outptr1),
- [outptr2] "+r"(outptr2),
- [outptr3] "+r"(outptr3),
- [outptr4] "+r"(outptr4),
- [outptr5] "+r"(outptr5),
- [outptr6] "+r"(outptr6),
- [outptr7] "+r"(outptr7),
- [inptr] "+r"(inptr)
- : [alpha_value] "w"(alpha_value),
- [beta_value] "w"(beta_value)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8");
+ "add %x[inptr], %x[inptr], #0x180\n"
+ : [outptr0] "+r" (outptr0),
+ [outptr1] "+r" (outptr1),
+ [outptr2] "+r" (outptr2),
+ [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4),
+ [outptr5] "+r" (outptr5),
+ [outptr6] "+r" (outptr6),
+ [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr)
+ : [alpha_value] "w" (alpha_value),
+ [beta_value] "w" (beta_value)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"
+ );
}
}
}
}
-template <>
-inline void MergeResults<12, 8>(uint32_t *out, const uint32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const uint32_t alpha, const uint32_t beta)
-{
- // Since the above code uses only MUL and MLA instructions discard the "unsignedness" and proceed safely.
- MergeResults<12, 8>(reinterpret_cast<int32_t *>(out), reinterpret_cast<const int32_t *>(in), ldout, y0, ymax, x0, xmax, static_cast<const int32_t>(alpha), static_cast<const int32_t>(beta));
+template<>
+inline void MergeResults<12, 8>(uint32_t *out, const uint32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const uint32_t alpha, const uint32_t beta) {
+ // Since the above code uses only MUL and MLA instructions discard the "unsignedness" and proceed safely.
+ MergeResults<12, 8>(reinterpret_cast<int32_t*>(out), reinterpret_cast<const int32_t*>(in), ldout, y0, ymax, x0, xmax, static_cast<const int32_t>(alpha), static_cast<const int32_t>(beta));
}
#endif // __aarch64__