aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-09-30 12:39:40 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-09-30 18:31:55 +0000
commitae0fc8612dba6faebf58c3ebbfae8d6e639d432d (patch)
treeb31ee1085376b86fa33f2c6782b8a54f899116f9
parent9637b2e4fc33b2264aa5586dd6b2ed1045db5075 (diff)
downloadComputeLibrary-ae0fc8612dba6faebf58c3ebbfae8d6e639d432d.tar.gz
COMPMID-2452: Dot product optimizations on merge/transforms
-Adds optimized gemm transforms for AArch64. -Optimized gemm merger to only support alpha==1 and (beta==0 || beta=1) cases Change-Id: I55793b12a0381f4fd53f521d0e57416809904d96 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/2003 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_int8.cpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp390
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp223
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/list.hpp1
6 files changed, 452 insertions, 168 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
index 1bd9126da9..b4edece8d5 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
@@ -221,7 +221,6 @@ public:
compute_row_sums(_qp, _Ksize, (m_end - m_start),
this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + (m_start * this->_lda), this->_lda,
local_row_sums);
-// row_bias + (multi * _nbatches * _Msize) + (batch * _Msize) + m_start);
}
{
@@ -231,7 +230,6 @@ public:
requantize_block_32(_qp, (nmax - n0), (m_end - m_start), result_buffer, (nmax - n0),
this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc,
-// row_bias + (multi * _nbatches * _Msize) + (batch * _Msize) + m_start, col_bias);
local_row_sums, col_bias + (multi * _Nsize) + n0);
}
} while (p.next_dim0());
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
index 53749fd4f6..49241a7129 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
@@ -98,7 +98,7 @@ static const GemmImplementation<int8_t, int32_t> gemm_s8_methods[] = {
{
GemmMethod::GEMM_INTERLEAVED,
"gemm_s8_12x8",
- [](const GemmArgs<int32_t> &args) { return args._ci->has_dotprod(); },
+ [](const GemmArgs<int32_t> &args) { return args._ci->has_dotprod() && args._alpha==1 && (args._beta==0 || args._beta==1); },
nullptr,
[](const GemmArgs<int32_t> &args) { return new GemmInterleaved<gemm_s8_12x8, int8_t, int32_t>(args); }
},
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
index 84a45f700e..f5a30720d2 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
@@ -98,7 +98,7 @@ static const GemmImplementation<uint8_t, uint32_t> gemm_u8_methods[] = {
{
GemmMethod::GEMM_INTERLEAVED,
"gemm_u8_12x8",
- [](const GemmArgs<uint32_t> &args) { return args._ci->has_dotprod(); },
+ [](const GemmArgs<uint32_t> &args) { return args._ci->has_dotprod() && args._alpha==1 && (args._beta==0 || args._beta==1); },
nullptr,
[](const GemmArgs<uint32_t> &args) { return new GemmInterleaved<gemm_u8_12x8, uint8_t, uint32_t>(args); }
},
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp
index 0212dfdbb6..ce7310d13d 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_int32_12x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,9 +31,6 @@ inline void MergeResults<12, 8, false>(int32_t *out, const int32_t *in, const in
prefetch_6x(inptr);
prefetch_6x(inptr + 96);
- int32x4_t alpha_value = vdupq_n_s32(alpha);
- int32x4_t beta_value = vdupq_n_s32(beta);
-
for (int y=y0; y<ymax; y+=8) {
int32_t *outptr0 = out + (y * ldout) + x0;
int32_t *outptr1 = outptr0 + ldout;
@@ -92,185 +89,250 @@ inline void MergeResults<12, 8, false>(int32_t *out, const int32_t *in, const in
if ((i+11) >= xmax) {
for (int xi=0; xi<12; xi++) {
if ((i+xi) < xmax) {
- *outptr0 = (alpha * inptr[xi]) + (*outptr0 * beta);
+ *outptr0 = (inptr[xi]) + (*outptr0 * beta);
outptr0++;
- *outptr1 = (alpha * inptr[xi + 12]) + (*outptr1 * beta);
+ *outptr1 = (inptr[xi + 12]) + (*outptr1 * beta);
outptr1++;
- *outptr2 = (alpha * inptr[xi + 24]) + (*outptr2 * beta);
+ *outptr2 = (inptr[xi + 24]) + (*outptr2 * beta);
outptr2++;
- *outptr3 = (alpha * inptr[xi + 36]) + (*outptr3 * beta);
+ *outptr3 = (inptr[xi + 36]) + (*outptr3 * beta);
outptr3++;
- *outptr4 = (alpha * inptr[xi + 48]) + (*outptr4 * beta);
+ *outptr4 = (inptr[xi + 48]) + (*outptr4 * beta);
outptr4++;
- *outptr5 = (alpha * inptr[xi + 60]) + (*outptr5 * beta);
+ *outptr5 = (inptr[xi + 60]) + (*outptr5 * beta);
outptr5++;
- *outptr6 = (alpha * inptr[xi + 72]) + (*outptr6 * beta);
+ *outptr6 = (inptr[xi + 72]) + (*outptr6 * beta);
outptr6++;
- *outptr7 = (alpha * inptr[xi + 84]) + (*outptr7 * beta);
+ *outptr7 = (inptr[xi + 84]) + (*outptr7 * beta);
outptr7++;
}
}
inptr += 96;
} else {
- /* Optimized routine to copy an entire block */
- __asm __volatile (
- // Row 0
- ASM_PREFETCH("[%x[outptr1], #192]")
- "ldr q3, [%x[outptr0]]\n"
- "ldr q4, [%x[outptr0], #0x10]\n"
- "ldr q5, [%x[outptr0], #0x20]\n"
- "mul v3.4s, v3.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr]]\n"
- "mul v4.4s, v4.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x10]\n"
- "mul v5.4s, v5.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x20]\n"
- "mla v3.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q0, [%x[outptr1]]\n"
- "mla v4.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q1, [%x[outptr1], #0x10]\n"
- "mla v5.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q2, [%x[outptr1], #0x20]\n"
+ if (beta == 0u) {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ // Row 0
+ ASM_PREFETCH("[%x[outptr1], #192]")
+ "ldr q0, [%x[inptr]]\n"
+ "ldr q1, [%x[inptr], #0x10]\n"
+ "ldr q2, [%x[inptr], #0x20]\n"
+
+ // Row 1
+ ASM_PREFETCH("[%x[outptr2], #192]")
+ "ldr q3, [%x[inptr], #0x30]\n"
+ "str q0, [%x[outptr0]], #0x10\n"
+ "ldr q4, [%x[inptr], #0x40]\n"
+ "str q1, [%x[outptr0]], #0x10\n"
+ "ldr q5, [%x[inptr], #0x50]\n"
+ "str q2, [%x[outptr0]], #0x10\n"
+
+ // Row 2
+ ASM_PREFETCH("[%x[outptr3], #192]")
+ "ldr q0, [%x[inptr], #0x60]\n"
+ "str q3, [%x[outptr1]], #0x10\n"
+ "ldr q1, [%x[inptr], #0x70]\n"
+ "str q4, [%x[outptr1]], #0x10\n"
+ "ldr q2, [%x[inptr], #0x80]\n"
+ "str q5, [%x[outptr1]], #0x10\n"
+
+ // Row 3
+ ASM_PREFETCH("[%x[outptr4], #192]")
+ "ldr q3, [%x[inptr], #0x90]\n"
+ "str q0, [%x[outptr2]], #0x10\n"
+ "ldr q4, [%x[inptr], #0xa0]\n"
+ "str q1, [%x[outptr2]], #0x10\n"
+ "ldr q5, [%x[inptr], #0xb0]\n"
+ "str q2, [%x[outptr2]], #0x10\n"
+
+ // Row 4
+ ASM_PREFETCH("[%x[outptr5], #192]")
+ "ldr q0, [%x[inptr], #0xc0]\n"
+ "str q3, [%x[outptr3]], #0x10\n"
+ "ldr q1, [%x[inptr], #0xd0]\n"
+ "str q4, [%x[outptr3]], #0x10\n"
+ "ldr q2, [%x[inptr], #0xe0]\n"
+ "str q5, [%x[outptr3]], #0x10\n"
+
+ // Row 5
+ ASM_PREFETCH("[%x[outptr6], #192]")
+ "ldr q3, [%x[inptr], #0xf0]\n"
+ "str q0, [%x[outptr4]], #0x10\n"
+ "ldr q4, [%x[inptr], #0x100]\n"
+ "str q1, [%x[outptr4]], #0x10\n"
+ "ldr q5, [%x[inptr], #0x110]\n"
+ "str q2, [%x[outptr4]], #0x10\n"
- // Row 1
- ASM_PREFETCH("[%x[outptr2], #192]")
- "mul v0.4s, v0.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x30]\n"
- "str q3, [%x[outptr0]], #0x10\n"
- "mul v1.4s, v1.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x40]\n"
- "str q4, [%x[outptr0]], #0x10\n"
- "mul v2.4s, v2.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x50]\n"
- "str q5, [%x[outptr0]], #0x10\n"
- "mla v0.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q3, [%x[outptr2]]\n"
- "mla v1.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q4, [%x[outptr2], #0x10]\n"
- "mla v2.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q5, [%x[outptr2], #0x20]\n"
+ // Row 6
+ ASM_PREFETCH("[%x[outptr7], #192]")
+ "ldr q0, [%x[inptr], #0x120]\n"
+ "str q3, [%x[outptr5]], #0x10\n"
+ "ldr q1, [%x[inptr], #0x130]\n"
+ "str q4, [%x[outptr5]], #0x10\n"
+ "ldr q2, [%x[inptr], #0x140]\n"
+ "str q5, [%x[outptr5]], #0x10\n"
- // Row 2
- ASM_PREFETCH("[%x[outptr3], #192]")
- "mul v3.4s, v3.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x60]\n"
- "str q0, [%x[outptr1]], #0x10\n"
- "mul v4.4s, v4.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x70]\n"
- "str q1, [%x[outptr1]], #0x10\n"
- "mul v5.4s, v5.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x80]\n"
- "str q2, [%x[outptr1]], #0x10\n"
- "mla v3.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q0, [%x[outptr3]]\n"
- "mla v4.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q1, [%x[outptr3], #0x10]\n"
- "mla v5.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q2, [%x[outptr3], #0x20]\n"
+ // Row 7
+ "ldr q3, [%x[inptr], #0x150]\n"
+ "str q0, [%x[outptr6]], #0x10\n"
+ "ldr q4, [%x[inptr], #0x160]\n"
+ "str q1, [%x[outptr6]], #0x10\n"
+ "ldr q5, [%x[inptr], #0x170]\n"
+ "str q2, [%x[outptr6]], #0x10\n"
+ "str q3, [%x[outptr7]], #0x10\n"
+ "str q4, [%x[outptr7]], #0x10\n"
+ "str q5, [%x[outptr7]], #0x10\n"
- // Row 3
- ASM_PREFETCH("[%x[outptr4], #192]")
- "mul v0.4s, v0.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x90]\n"
- "str q3, [%x[outptr2]], #0x10\n"
- "mul v1.4s, v1.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0xa0]\n"
- "str q4, [%x[outptr2]], #0x10\n"
- "mul v2.4s, v2.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0xb0]\n"
- "str q5, [%x[outptr2]], #0x10\n"
- "mla v0.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q3, [%x[outptr4]]\n"
- "mla v1.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q4, [%x[outptr4], #0x10]\n"
- "mla v2.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q5, [%x[outptr4], #0x20]\n"
+ "add %x[inptr], %x[inptr], #0x180\n"
+ : [outptr0] "+r" (outptr0),
+ [outptr1] "+r" (outptr1),
+ [outptr2] "+r" (outptr2),
+ [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4),
+ [outptr5] "+r" (outptr5),
+ [outptr6] "+r" (outptr6),
+ [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr)
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6"
+ );
+ } else {
+ /* Optimized routine to copy an entire block */
+ __asm __volatile (
+ // Row 0
+ ASM_PREFETCH("[%x[outptr1], #192]")
+ "ldr q3, [%x[outptr0]]\n"
+ "ldr q4, [%x[outptr0], #0x10]\n"
+ "ldr q5, [%x[outptr0], #0x20]\n"
+ "ldr q6, [%x[inptr]]\n"
+ "ldr q7, [%x[inptr], #0x10]\n"
+ "ldr q8, [%x[inptr], #0x20]\n"
+ "add v3.4s, v3.4s, v6.4s\n"
+ "ldr q0, [%x[outptr1]]\n"
+ "add v4.4s, v4.4s, v7.4s\n"
+ "ldr q1, [%x[outptr1], #0x10]\n"
+ "add v5.4s, v5.4s, v8.4s\n"
+ "ldr q2, [%x[outptr1], #0x20]\n"
- // Row 4
- ASM_PREFETCH("[%x[outptr5], #192]")
- "mul v3.4s, v3.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0xc0]\n"
- "str q0, [%x[outptr3]], #0x10\n"
- "mul v4.4s, v4.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0xd0]\n"
- "str q1, [%x[outptr3]], #0x10\n"
- "mul v5.4s, v5.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0xe0]\n"
- "str q2, [%x[outptr3]], #0x10\n"
- "mla v3.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q0, [%x[outptr5]]\n"
- "mla v4.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q1, [%x[outptr5], #0x10]\n"
- "mla v5.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q2, [%x[outptr5], #0x20]\n"
+ // Row 1
+ ASM_PREFETCH("[%x[outptr2], #192]")
+ "ldr q6, [%x[inptr], #0x30]\n"
+ "str q3, [%x[outptr0]], #0x10\n"
+ "ldr q7, [%x[inptr], #0x40]\n"
+ "str q4, [%x[outptr0]], #0x10\n"
+ "ldr q8, [%x[inptr], #0x50]\n"
+ "str q5, [%x[outptr0]], #0x10\n"
+ "add v0.4s, v0.4s, v6.4s\n"
+ "ldr q3, [%x[outptr2]]\n"
+ "add v1.4s, v1.4s, v7.4s\n"
+ "ldr q4, [%x[outptr2], #0x10]\n"
+ "add v2.4s, v2.4s, v8.4s\n"
+ "ldr q5, [%x[outptr2], #0x20]\n"
- // Row 5
- ASM_PREFETCH("[%x[outptr6], #192]")
- "mul v0.4s, v0.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0xf0]\n"
- "str q3, [%x[outptr4]], #0x10\n"
- "mul v1.4s, v1.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x100]\n"
- "str q4, [%x[outptr4]], #0x10\n"
- "mul v2.4s, v2.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x110]\n"
- "str q5, [%x[outptr4]], #0x10\n"
- "mla v0.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q3, [%x[outptr6]]\n"
- "mla v1.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q4, [%x[outptr6], #0x10]\n"
- "mla v2.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q5, [%x[outptr6], #0x20]\n"
+ // Row 2
+ ASM_PREFETCH("[%x[outptr3], #192]")
+ "ldr q6, [%x[inptr], #0x60]\n"
+ "str q0, [%x[outptr1]], #0x10\n"
+ "ldr q7, [%x[inptr], #0x70]\n"
+ "str q1, [%x[outptr1]], #0x10\n"
+ "ldr q8, [%x[inptr], #0x80]\n"
+ "str q2, [%x[outptr1]], #0x10\n"
+ "add v3.4s, v3.4s, v6.4s\n"
+ "ldr q0, [%x[outptr3]]\n"
+ "add v4.4s, v4.4s, v7.4s\n"
+ "ldr q1, [%x[outptr3], #0x10]\n"
+ "add v5.4s, v5.4s, v8.4s\n"
+ "ldr q2, [%x[outptr3], #0x20]\n"
- // Row 6
- ASM_PREFETCH("[%x[outptr7], #192]")
- "mul v3.4s, v3.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x120]\n"
- "str q0, [%x[outptr5]], #0x10\n"
- "mul v4.4s, v4.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x130]\n"
- "str q1, [%x[outptr5]], #0x10\n"
- "mul v5.4s, v5.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x140]\n"
- "str q2, [%x[outptr5]], #0x10\n"
- "mla v3.4s, v6.4s, %[alpha_value].4s\n"
- "ldr q0, [%x[outptr7]]\n"
- "mla v4.4s, v7.4s, %[alpha_value].4s\n"
- "ldr q1, [%x[outptr7], #0x10]\n"
- "mla v5.4s, v8.4s, %[alpha_value].4s\n"
- "ldr q2, [%x[outptr7], #0x20]\n"
+ // Row 3
+ ASM_PREFETCH("[%x[outptr4], #192]")
+ "ldr q6, [%x[inptr], #0x90]\n"
+ "str q3, [%x[outptr2]], #0x10\n"
+ "ldr q7, [%x[inptr], #0xa0]\n"
+ "str q4, [%x[outptr2]], #0x10\n"
+ "ldr q8, [%x[inptr], #0xb0]\n"
+ "str q5, [%x[outptr2]], #0x10\n"
+ "add v0.4s, v0.4s, v6.4s\n"
+ "ldr q3, [%x[outptr4]]\n"
+ "add v1.4s, v1.4s, v7.4s\n"
+ "ldr q4, [%x[outptr4], #0x10]\n"
+ "add v2.4s, v2.4s, v8.4s\n"
+ "ldr q5, [%x[outptr4], #0x20]\n"
- // Row 7
- "mul v0.4s, v0.4s, %[beta_value].4s\n"
- "ldr q6, [%x[inptr], #0x150]\n"
- "str q3, [%x[outptr6]], #0x10\n"
- "mul v1.4s, v1.4s, %[beta_value].4s\n"
- "ldr q7, [%x[inptr], #0x160]\n"
- "str q4, [%x[outptr6]], #0x10\n"
- "mul v2.4s, v2.4s, %[beta_value].4s\n"
- "ldr q8, [%x[inptr], #0x170]\n"
- "str q5, [%x[outptr6]], #0x10\n"
- "mla v0.4s, v6.4s, %[alpha_value].4s\n"
- "mla v1.4s, v7.4s, %[alpha_value].4s\n"
- "mla v2.4s, v8.4s, %[alpha_value].4s\n"
- "str q0, [%x[outptr7]], #0x10\n"
- "str q1, [%x[outptr7]], #0x10\n"
- "str q2, [%x[outptr7]], #0x10\n"
+ // Row 4
+ ASM_PREFETCH("[%x[outptr5], #192]")
+ "ldr q6, [%x[inptr], #0xc0]\n"
+ "str q0, [%x[outptr3]], #0x10\n"
+ "ldr q7, [%x[inptr], #0xd0]\n"
+ "str q1, [%x[outptr3]], #0x10\n"
+ "ldr q8, [%x[inptr], #0xe0]\n"
+ "str q2, [%x[outptr3]], #0x10\n"
+ "add v3.4s, v3.4s, v6.4s\n"
+ "ldr q0, [%x[outptr5]]\n"
+ "add v4.4s, v4.4s, v7.4s\n"
+ "ldr q1, [%x[outptr5], #0x10]\n"
+ "add v5.4s, v5.4s, v8.4s\n"
+ "ldr q2, [%x[outptr5], #0x20]\n"
- "add %x[inptr], %x[inptr], #0x180\n"
- : [outptr0] "+r" (outptr0),
- [outptr1] "+r" (outptr1),
- [outptr2] "+r" (outptr2),
- [outptr3] "+r" (outptr3),
- [outptr4] "+r" (outptr4),
- [outptr5] "+r" (outptr5),
- [outptr6] "+r" (outptr6),
- [outptr7] "+r" (outptr7),
- [inptr] "+r" (inptr)
- : [alpha_value] "w" (alpha_value),
- [beta_value] "w" (beta_value)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"
- );
+ // Row 5
+ ASM_PREFETCH("[%x[outptr6], #192]")
+ "ldr q6, [%x[inptr], #0xf0]\n"
+ "str q3, [%x[outptr4]], #0x10\n"
+ "ldr q7, [%x[inptr], #0x100]\n"
+ "str q4, [%x[outptr4]], #0x10\n"
+ "ldr q8, [%x[inptr], #0x110]\n"
+ "str q5, [%x[outptr4]], #0x10\n"
+ "add v0.4s, v0.4s, v6.4s\n"
+ "ldr q3, [%x[outptr6]]\n"
+ "add v1.4s, v1.4s, v7.4s\n"
+ "ldr q4, [%x[outptr6], #0x10]\n"
+ "add v2.4s, v2.4s, v8.4s\n"
+ "ldr q5, [%x[outptr6], #0x20]\n"
+
+ // Row 6
+ ASM_PREFETCH("[%x[outptr7], #192]")
+ "ldr q6, [%x[inptr], #0x120]\n"
+ "str q0, [%x[outptr5]], #0x10\n"
+ "ldr q7, [%x[inptr], #0x130]\n"
+ "str q1, [%x[outptr5]], #0x10\n"
+ "ldr q8, [%x[inptr], #0x140]\n"
+ "str q2, [%x[outptr5]], #0x10\n"
+ "add v3.4s, v3.4s, v6.4s\n"
+ "ldr q0, [%x[outptr7]]\n"
+ "add v4.4s, v4.4s, v7.4s\n"
+ "ldr q1, [%x[outptr7], #0x10]\n"
+ "add v5.4s, v5.4s, v8.4s\n"
+ "ldr q2, [%x[outptr7], #0x20]\n"
+
+ // Row 7
+ "ldr q6, [%x[inptr], #0x150]\n"
+ "str q3, [%x[outptr6]], #0x10\n"
+ "ldr q7, [%x[inptr], #0x160]\n"
+ "str q4, [%x[outptr6]], #0x10\n"
+ "ldr q8, [%x[inptr], #0x170]\n"
+ "str q5, [%x[outptr6]], #0x10\n"
+ "add v0.4s, v0.4s, v6.4s\n"
+ "add v1.4s, v1.4s, v7.4s\n"
+ "add v2.4s, v2.4s, v8.4s\n"
+ "str q0, [%x[outptr7]], #0x10\n"
+ "str q1, [%x[outptr7]], #0x10\n"
+ "str q2, [%x[outptr7]], #0x10\n"
+
+ "add %x[inptr], %x[inptr], #0x180\n"
+ : [outptr0] "+r" (outptr0),
+ [outptr1] "+r" (outptr1),
+ [outptr2] "+r" (outptr2),
+ [outptr3] "+r" (outptr3),
+ [outptr4] "+r" (outptr4),
+ [outptr5] "+r" (outptr5),
+ [outptr6] "+r" (outptr6),
+ [outptr7] "+r" (outptr7),
+ [inptr] "+r" (inptr)
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"
+ );
+
+ }
}
}
}
@@ -279,7 +341,7 @@ inline void MergeResults<12, 8, false>(int32_t *out, const int32_t *in, const in
template<>
inline void MergeResults<12, 8>(uint32_t *out, const uint32_t *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const uint32_t alpha, const uint32_t beta) {
// Since the above code uses only MUL and MLA instructions discard the "unsignedness" and proceed safely.
- MergeResults<12, 8>(reinterpret_cast<int32_t*>(out), reinterpret_cast<const int32_t*>(in), ldout, y0, ymax, x0, xmax, static_cast<int32_t>(alpha), static_cast<int32_t>(beta));
+ MergeResults<12, 8>(reinterpret_cast<int32_t*>(out), reinterpret_cast<const int32_t*>(in), ldout, y0, ymax, x0, xmax, static_cast<const int32_t>(alpha), static_cast<const int32_t>(beta));
}
#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp
new file mode 100644
index 0000000000..3c4f6d0b19
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_block4_8bit.hpp
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2017-2019 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#if defined(__aarch64__) && !defined(__ARM_FEATURE_SVE)
+
+#include <arm_neon.h>
+
+#include "../asmlib.hpp"
+
+template<>
+template<typename T>
+inline void TransformImpl<8, 4, false, 1, 1, false>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax) {
+ uint8_t *outptr = reinterpret_cast<uint8_t *>(out);
+ const uint8_t *inptr = reinterpret_cast<const uint8_t *>(in);
+ bool first = true;
+
+ /* Helper functions to copy blocks about used for odd case. */
+ class t {
+ public:
+ static inline void copy_4_inc(uint8_t *&out, const uint8_t *&in) {
+ uint32_t *out_word = reinterpret_cast<uint32_t *>(out);
+ const uint32_t *in_word = reinterpret_cast<const uint32_t *>(in);
+
+ *out_word++ = *in_word++;
+
+ out = reinterpret_cast<uint8_t *>(out_word);
+ in = reinterpret_cast<const uint8_t *>(in_word);
+ }
+
+ static inline void copy_pad(uint8_t *&out, const uint8_t *&in, size_t count) {
+ for (unsigned int i=0; i<4; i++) {
+ if (i < count) {
+ *out++ = *in++;
+ } else {
+ *out++ = 0;
+ }
+ }
+ }
+ };
+
+ uint8_t zerobuff[64]; // 32 for asm loop plus up to 31 for overflow loop
+
+ for (int y=y0; y<ymax; y+=8) {
+ const uint8_t *inptr0 = inptr + y * ldin + k0;
+ const uint8_t *inptr1 = inptr0 + ldin;
+ const uint8_t *inptr2 = inptr1 + ldin;
+ const uint8_t *inptr3 = inptr2 + ldin;
+ const uint8_t *inptr4 = inptr3 + ldin;
+ const uint8_t *inptr5 = inptr4 + ldin;
+ const uint8_t *inptr6 = inptr5 + ldin;
+ const uint8_t *inptr7 = inptr6 + ldin;
+
+ prefetch_2x(inptr0);
+ prefetch_2x(inptr1);
+ prefetch_2x(inptr2);
+ prefetch_2x(inptr3);
+ prefetch_2x(inptr4);
+ prefetch_2x(inptr5);
+ prefetch_2x(inptr6);
+ prefetch_2x(inptr7);
+
+ int x=(kmax-k0);
+ for (;(x>31) || first;x-=32) {
+ /* Cope with ragged cases by copying from a buffer of zeroes instead */
+ /* 'first' forces this to always run at least once, needed if the total size is <=32. */
+ if ((y + 7) >= ymax) {
+ switch ((y + 7) - ymax) {
+ /* Everything falls through in here */
+ case 6:
+ inptr1 = zerobuff;
+ case 5:
+ inptr2 = zerobuff;
+ case 4:
+ inptr3 = zerobuff;
+ case 3:
+ inptr4 = zerobuff;
+ case 2:
+ inptr5 = zerobuff;
+ case 1:
+ inptr6 = zerobuff;
+ case 0:
+ inptr7 = zerobuff;
+ break;
+
+ default:
+ UNREACHABLE("Impossible.");
+ }
+ }
+
+ if (first) {
+ if (x<=31) {
+ break;
+ }
+
+ first = false;
+ }
+
+ __asm __volatile (
+ // Load up 8 elements (2 vectors) from each of 8 sources.
+ "LDP q0, q1, [%[inptr0]], #32\n" // q0=A0A1A2A3
+ "LDP q2, q3, [%[inptr1]], #32\n" // q2=B0B1B2B3
+ "LDP q4, q5, [%[inptr2]], #32\n" // q4=C0C1C2C3
+ "ZIP1 v16.4s, v0.4s, v4.4s\n" // q16=A0C0A1C1
+ ASM_PREFETCH("[%[inptr0], #128]")
+ "LDP q6, q7, [%[inptr3]], #32\n" // q6=D0D1D2D3
+ "ZIP1 v17.4s, v2.4s, v6.4s\n" // q17=B0D0B1D1
+ "LDP q8, q9, [%[inptr4]], #32\n"
+ "LDP q10, q11, [%[inptr5]], #32\n"
+ "LDP q12, q13, [%[inptr6]], #32\n"
+ "ZIP1 v18.4s, v8.4s, v12.4s\n"
+ ASM_PREFETCH("[%[inptr1], #128]")
+ "LDP q14, q15, [%[inptr7]], #32\n"
+ "ZIP1 v19.4s, v10.4s, v14.4s\n"
+
+ "ZIP1 v20.4s, v16.4s, v17.4s\n" // q20=A0B0C0D0
+ ASM_PREFETCH("[%[inptr2], #128]")
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+
+ "ZIP2 v16.4s, v0.4s, v4.4s\n"
+ ASM_PREFETCH("[%[inptr3], #128]")
+ "ZIP2 v17.4s, v2.4s, v6.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Write back the first element of each source
+
+ "ZIP2 v18.4s, v8.4s, v12.4s\n"
+ "ZIP2 v19.4s, v10.4s, v14.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Write back the second element of each source
+
+ "ZIP1 v20.4s, v16.4s, v17.4s\n"
+ ASM_PREFETCH("[%[inptr4], #128]")
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+
+ "ZIP1 v16.4s, v1.4s, v5.4s\n"
+ ASM_PREFETCH("[%[inptr5], #128]")
+ "ZIP1 v17.4s, v3.4s, v7.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Third element
+
+ "ZIP1 v18.4s, v9.4s, v13.4s\n"
+ "ZIP1 v19.4s, v11.4s, v15.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Fourth element
+
+ "ZIP1 v20.4s, v16.4s, v17.4s\n"
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ ASM_PREFETCH("[%[inptr6], #128]")
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+
+ "ZIP2 v16.4s, v1.4s, v5.4s\n"
+ "ZIP2 v17.4s, v3.4s, v7.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Fifth element
+
+ "ZIP2 v18.4s, v9.4s, v13.4s\n"
+ ASM_PREFETCH("[%[inptr7], #128]")
+ "ZIP2 v19.4s, v11.4s, v15.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Sixth element
+
+ "ZIP1 v20.4s, v16.4s, v17.4s\n"
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Seventh element
+
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Eighth element
+ : [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3),
+ [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7), [outptr] "+r" (outptr)
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12",
+ "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "memory"
+ );
+ }
+
+ // Copy any leftover blocks of 4 a complete block at a time.
+ for (;x>4;x-=4) {
+ t::copy_4_inc(outptr, inptr0);
+ t::copy_4_inc(outptr, inptr1);
+ t::copy_4_inc(outptr, inptr2);
+ t::copy_4_inc(outptr, inptr3);
+ t::copy_4_inc(outptr, inptr4);
+ t::copy_4_inc(outptr, inptr5);
+ t::copy_4_inc(outptr, inptr6);
+ t::copy_4_inc(outptr, inptr7);
+ }
+
+ // Final block with padding, if any.
+ if (x > 0) {
+ t::copy_pad(outptr, inptr0, x);
+ t::copy_pad(outptr, inptr1, x);
+ t::copy_pad(outptr, inptr2, x);
+ t::copy_pad(outptr, inptr3, x);
+ t::copy_pad(outptr, inptr4, x);
+ t::copy_pad(outptr, inptr5, x);
+ t::copy_pad(outptr, inptr6, x);
+ t::copy_pad(outptr, inptr7, x);
+ }
+ }
+}
+
+#endif // __aarch64__ && !__ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/list.hpp b/src/core/NEON/kernels/arm_gemm/transforms/list.hpp
index 9cd5983ce0..c0c2ca19d7 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/list.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/list.hpp
@@ -26,6 +26,7 @@
#include "a64_block16_interleave4_8bit.hpp"
#include "a64_interleave_8way_16bit.hpp"
#include "a64_interleave_8way_32bit.hpp"
+#include "a64_interleave_8way_block4_8bit.hpp"
#include "a64_interleave_8way_half_to_float.hpp"
#include "a64_transpose_interleave_12way_16bit.hpp"
#include "a64_transpose_interleave_12way_half_to_float.hpp"