aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2017-11-02 16:09:35 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit6ff12a0f7765f62b8d0fa8554021e1cac2789f19 (patch)
tree9338db697789106b49ea391634be8b3c08ef9f97
parent97988a4b3ef0f840432daf95b6e4b2ad7e5feefd (diff)
downloadComputeLibrary-6ff12a0f7765f62b8d0fa8554021e1cac2789f19.tar.gz
COMPMID-662: Integrated the new a64_s8_gemm_12x8 + dot product kernel into ACL.
Change-Id: Id8f919e486a132fc58346c9f84fccbeeb83d19b3 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94233 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h2
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h78
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h2
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h8
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h2
-rw-r--r--arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h6
-rw-r--r--arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp13
-rw-r--r--arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp61
-rw-r--r--arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/a55r1.hpp367
-rw-r--r--arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/dot_toolchain_support.h66
-rw-r--r--arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/generic.hpp363
-rw-r--r--arm_compute/core/NEON/kernels/assembly/profiler.hpp18
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowp.h8
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h1
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp38
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp130
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp174
-rw-r--r--src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp483
-rw-r--r--src/runtime/NEON/functions/NEGEMM.cpp2
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowp.cpp4
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp86
-rw-r--r--tests/validation/CPP/GEMMLowp.cpp8
-rw-r--r--tests/validation/CPP/GEMMLowp.h2
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp26
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h46
26 files changed, 1189 insertions, 806 deletions
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 918dfc6914..8dedf38b3e 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -61,7 +61,6 @@
#include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h b/arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h
index e298bfdebd..9e0fe8059b 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h
@@ -30,7 +30,7 @@ namespace arm_compute
{
class ITensor;
-/** AssemblyBase/armv7a NEON kernel to multiply two input matrices "A" and "B". */
+/** Base class for GEMM NEON kernels implemented in Assembly. */
class NEGEMMAssemblyBaseKernel : public INEKernel
{
public:
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h
deleted file mode 100644
index 32105ad6d4..0000000000
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_NEGEMMLOWPASSEMBLYBASE_H__
-#define __ARM_COMPUTE_NEGEMMLOWPASSEMBLYBASE_H__
-
-#include "arm_compute/core/NEON/INEKernel.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** GEMMLOWP AssemblyBase NEON kernel to multiply two input matrices "A" and "B". */
-class NEGEMMLowpAssemblyBaseKernel : public INEKernel
-{
-public:
- /** Constructor */
- NEGEMMLowpAssemblyBaseKernel()
- : _input0(nullptr), _input1(nullptr), _output(nullptr), _workspace(nullptr), _transform_0(true), _transform_1(true)
- {
- }
-
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEGEMMLowpAssemblyBaseKernel(const NEGEMMLowpAssemblyBaseKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
- NEGEMMLowpAssemblyBaseKernel &operator=(const NEGEMMLowpAssemblyBaseKernel &) = delete;
- /** Allow instances of this class to be moved */
- NEGEMMLowpAssemblyBaseKernel(NEGEMMLowpAssemblyBaseKernel &&) = default;
- /** Allow instances of this class to be moved */
- NEGEMMLowpAssemblyBaseKernel &operator=(NEGEMMLowpAssemblyBaseKernel &&) = default;
-
- virtual ~NEGEMMLowpAssemblyBaseKernel() = default;
-
- /** Initialise the kernel's input and output.
- *
- * The computed function is C = a * AxB + b * C.
- *
- * @param[in] input0 Input tensor containing the Matrix A. Data types supported: F32
- * @param[in] input1 Input tensor containing the Matrix B. Data types supported: same as @p input0
- * @param[in,out] output Output tensor to store the result of matrix multiplication. If @p beta is not zero the values are multiplied by @p beta before the result is accumulated. Otherwise the values are overwritten by the result. Data types supported: same as @p input0.
- */
- void configure(const ITensor *input0, const ITensor *input1, ITensor *output)
- {
- internal_configure(input0, input1, output);
- }
-
-protected:
- virtual void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output) = 0;
-
- const ITensor *_input0;
- const ITensor *_input1;
- ITensor *_output;
- ITensor *_workspace;
- bool _transform_0;
- bool _transform_1;
-};
-} // namespace arm_compute
-#endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYBASE_H__*/
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h
index 77b2bdc177..8908fabc1e 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h
@@ -62,7 +62,7 @@ public:
* @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of input0.
* Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p vector_sum_col
* @param[in] mm_result Input tensor containing the result of @ref NEGEMMLowpMatrixMultiplyKernel. Data type supported: same as @p vector_sum_col
- * @param[out] output Output tensor containing the result of GEMMLowP. Data type supported: U8
+ * @param[out] output Output tensor containing the result of GEMMLowP. Data type supported: S8
* @param[in] num_mtx_a_cols Number of matrix A columns
* @param[in] a_offset Offset to be added to each element of the matrix A.
* @param[in] b_offset Offset to be added to each element of the matrix B.
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
index 670274b8f3..f145eb6ca3 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
@@ -35,8 +35,8 @@ class ITensor;
* @note @ref NEGEMMLowpMatrixMultiplyKernel low precision matrix product kernel
* This kernel performs the following computation:
*
- * -# Convert a values from uint8 to int32
- * -# Convert b values from uint8 to int32
+ * -# Convert a values from int8 to int32
+ * -# Convert b values from int8 to int32
* -# Compute the int32 matrix product of the resulting a * b and store the result as int32
*
*/
@@ -58,7 +58,7 @@ public:
* The input matrices @p input0 and @p input1 must be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel. These two
* kernels change the layout of the original matrices to be more cache-friendly.
*
- * @param[in] input0 Input tensor containing the interleaved Matrix A. Data type supported: U8
+ * @param[in] input0 Input tensor containing the interleaved Matrix A. Data type supported: S8
* @param[in] input1 Input tensor containing the transposed Matrix B. Data type supported: same as @p input0
* @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32
*/
@@ -74,4 +74,4 @@ private:
bool _slide_matrix_b;
};
} // namespace arm_compute
-#endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYKERNEL_H__*/ \ No newline at end of file
+#endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYKERNEL_H__*/
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
index 143e8b917b..a069969681 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
@@ -48,7 +48,7 @@ public:
public:
/** Initialise the kernel's input and output.
*
- * @param[in] input Input tensor containing the interleaved or transposed matrix. Data type supported: U8
+ * @param[in] input Input tensor containing the interleaved or transposed matrix. Data type supported: S8
* @param[out] output Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32
* @param[in] k Number of matrix A columns (or matrix B rows)
* @param[in] is_reshaped True if the input tensor has been reshaped
diff --git a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
index 4eab7f91fc..32779891db 100644
--- a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
+++ b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h
@@ -24,7 +24,7 @@
#ifndef __ARM_COMPUTE_NEGEMMLOWPAARCH64V8P4KERNEL_H__
#define __ARM_COMPUTE_NEGEMMLOWPAARCH64V8P4KERNEL_H__
-#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h"
// Enable only if compiled for AArch64-V8.2-A targets
#ifdef ARM_COMPUTE_AARCH64_V8_2
@@ -34,7 +34,7 @@ namespace arm_compute
class ITensor;
/** AArch64 NEON kernel to multiply two input matrices "A" and "B". */
-class NEGEMMLowpAArch64V8P4Kernel : public NEGEMMLowpAssemblyBaseKernel
+class NEGEMMLowpAArch64V8P4Kernel : public NEGEMMAssemblyBaseKernel
{
public:
// Inherited methods overridden:
@@ -42,7 +42,7 @@ public:
bool is_parallelisable() const override;
protected:
- void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output) override;
+ void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1) override;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_AARCH64_V8_2 */
diff --git a/arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp b/arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp
index a186d88355..659ef837f5 100644
--- a/arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp
@@ -24,6 +24,7 @@
#pragma once
#include <stdio.h>
+#include <cassert>
#include "gemm_common.hpp"
#include "profiler.hpp"
@@ -114,12 +115,13 @@ public:
// Work out the rounded size of M - needed for some buffers.
Mround = (M + (strat.out_height - 1)) / strat.out_height;
Mround *= strat.out_height;
+
}
// Actually execute the GEMM.
void execute(const To *A, const int lda, const To *B, const int ldb, Tr *C, const int ldc, const Tr alpha, const Tr beta, void *working_space) const override {
+ assert(working_space);
profiler prof;
-
int8_t *working_space_bytes = reinterpret_cast<int8_t *>(working_space);
intptr_t working_space_int = reinterpret_cast<intptr_t>(working_space_bytes);
size_t diff = 0;
@@ -128,7 +130,6 @@ public:
diff = 0x10 - (working_space_int & 0xF);
}
- // TODO: Multithreaded implementations could share the burden of transforming these blocks.
Toi * const a_panel = reinterpret_cast<Toi *>(working_space_bytes + diff);
Toi * const b_panel = reinterpret_cast<Toi *>(working_space_bytes + get_a_working_size() + diff);
Tri * const c_panel = reinterpret_cast<Tri *>(working_space_bytes + get_a_working_size() + get_b_working_size() + diff);
@@ -141,7 +142,7 @@ public:
int kern_k = ((kmax - k0) + (strat.k_unroll - 1)) / strat.k_unroll;
kern_k *= strat.k_unroll;
- prof(PROFILE_PREPA, [&](void) {
+ prof(PROFILE_PREPA, (M * (kmax-k0) * sizeof(Toi)), [&](void) {
if (trA ^ strategy::A_transpose) {
Transform<strategy::A_interleave, strategy::A_block, true>(a_panel, A, lda, 0, M, k0, kmax);
} else {
@@ -155,7 +156,7 @@ public:
int bblocks = (xmax - x0 + strat.out_width - 1) / strat.out_width;
- prof(PROFILE_PREPB, [&](void) {
+ prof(PROFILE_PREPB, (xmax-x0) * (kmax-k0) * sizeof(Toi), [&](void) {
if (trB ^ strategy::B_transpose) {
Transform<strategy::B_interleave, strategy::B_block, true>(b_panel, B, ldb, x0, xmax, k0, kmax);
} else {
@@ -167,8 +168,8 @@ public:
unsigned int ymax = y + strat.out_height;
if (ymax > M) ymax = M;
- prof(PROFILE_KERNEL, [&](void) { strat.kernel(a_panel + (y * kern_k), b_panel, c_panel, 1, bblocks, kern_k); });
- prof(PROFILE_MERGE, [&](void) { MergeResults<strategy::out_width, strategy::out_height>(C, c_panel, ldc, y, ymax, x0, xmax, alpha, (k0==0 ? beta : static_cast<Tr>(1))); });
+ prof(PROFILE_KERNEL, (strat.out_height * bblocks * strat.out_width * kern_k), [&](void) { strat.kernel(a_panel + (y * kern_k), b_panel, c_panel, 1, bblocks, kern_k); });
+ prof(PROFILE_MERGE, (strat.out_height * bblocks * strat.out_width * sizeof(Tr)), [&](void) { MergeResults<strategy::out_width, strategy::out_height>(C, c_panel, ldc, y, ymax, x0, xmax, alpha, (k0==0 ? beta : static_cast<Tr>(1))); });
}
}
}
diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp
new file mode 100644
index 0000000000..88cbb361b3
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+// Load the actual kernel
+#include "a64_gemm_s8_12x8/generic.hpp"
+
+class gemm_s8_12x8 {
+public:
+ typedef int8_t operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)(const int8_t *, const int8_t *, int32_t *, int, int, int);
+
+ /* Describes the data layout for A input */
+ static const int A_interleave = 8;
+ static const int A_block = 4;
+ static const bool A_transpose = false;
+
+ /* Same for B input */
+ static const int B_interleave = 12;
+ static const int B_block = 4;
+ static const bool B_transpose = true;
+
+ /* Kernel blocking parameters */
+ static const int out_width = 12;
+ static const int out_height = 8;
+ static const int k_unroll = 4;
+
+ kern_type kernel = nullptr;
+
+ gemm_s8_12x8(const CPUInfo *ci) {
+ kernel = a64_gemm_s8_12x8;
+ }
+};
+
+#endif // __aarch64__
+
diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/a55r1.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/a55r1.hpp
new file mode 100644
index 0000000000..5ed930c0b0
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/a55r1.hpp
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <arm_neon.h>
+#include "dot_toolchain_support.h"
+#include <cassert>
+
+void a64_gemm_s8_12x8_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpanel, int ablocks, int bblocks, int K) {
+ assert(Apanel);
+ assert(Bpanel);
+ assert(Cpanel);
+ K/=4;
+ const long int row_jump=0;
+ const long int block_jump=0;
+ const int32_t *a_ptr = reinterpret_cast<const int32_t*>(Apanel);
+ int32_t *c_ptr = reinterpret_cast<int32_t*>(Cpanel);
+ for (int yb=0; yb<ablocks; yb++) {
+ const int32_t *a_ptr0 = a_ptr;
+ const int32_t *b_ptr = reinterpret_cast<const int32_t*>(Bpanel);
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ // Fix up for odd lengths - set a flag if K is odd, but make
+ // sure we round up the iteration count.
+ int oddk = (K & 1);
+ int k = ((K+1)/2) - 1;
+ register int32x4_t a0 asm("v0");
+ register int32x4_t a1 asm("v1");
+ register int32x4_t b0 asm("v2");
+ register int32x4_t b1 asm("v3");
+ register int32x4_t b2 asm("v4");
+ register int32x4_t a0a asm("v5");
+ register int32x4_t a1a asm("v6");
+
+ __asm __volatile (
+
+ // Initialize result registers, load initial operands, prime prefetches.
+ "movi v8.4s, #0x0\n"
+ "ldp %q[a0], %q[a1], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldp %q[b0], %q[b1], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #64]")
+ "movi v13.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #64]")
+ "movi v14.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #128]")
+ "movi v15.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #128]")
+ "movi v16.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #192]")
+ "movi v17.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #256]")
+ "movi v18.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #192]")
+ "movi v19.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #320]")
+ "movi v20.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #256]")
+ "movi v21.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #384]")
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+
+ // Skip loop if we are doing zero iterations of it.
+ "cbz %w[k], 4f\n"
+
+ _DECLARE_SDOT
+
+ // Loop proper
+ "1:\n"
+ "sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
+
+ "sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+
+ "sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
+ "sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
+ "sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
+ ASM_PREFETCH("[%[a_ptr], #320]")
+ "sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
+ "sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
+ "sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
+ "sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
+ "sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
+ "sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
+ "sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
+ ASM_PREFETCH("[%[b_ptr], #448]")
+ "sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
+ "sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
+ "sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
+ "sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
+ "sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
+ "sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
+
+ "sdot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
+ "sdot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
+ "ldr %q[a0], [%[a_ptr], #64]\n"
+ "sdot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "sdot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
+ "sdot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
+ "ldr %q[a1], [%[a_ptr], #80]\n"
+ "sdot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
+ "sdot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
+ "sdot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
+
+ "sdot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0a].4b[1]\n"
+ ASM_PREFETCH("[%[b_ptr], #512]")
+ "sdot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
+ "sdot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
+ "sdot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
+ "sdot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
+ "sdot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
+ "sdot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
+ "ldr %q[b1], [%[b_ptr], #112]\n"
+
+ "sdot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
+ "sdot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "sdot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
+ "sdot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "sdot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
+ "sdot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "sdot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
+ "sdot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
+ "bne 1b\n"
+
+ // Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
+ "4:\n"
+
+ // Branch to alternative tail for odd K
+ "cbnz %w[oddk], 2f\n"
+
+ // Detached final iteration (even K)
+ "sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
+ "sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
+ "sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
+ "sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
+ "sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
+ "sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
+ "sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
+ "sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
+ "sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
+ "sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
+ "sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
+ "sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
+ "sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
+ "sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
+ "sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
+ "sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
+
+ "sdot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
+
+ "add %[b_ptr], %[b_ptr], %[block_jump]\n"
+ "sdot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "sdot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "str q8, [%[c_ptr], #0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0a].4b[1]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "sdot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
+ "str q24, [%[c_ptr], #32]\n"
+
+ "sdot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "sdot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "sdot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "sdot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+
+ "sdot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "sdot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "sdot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+
+ "sdot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "sdot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "sdot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
+ "str q12, [%[c_ptr], #192]\n"
+
+ "sdot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "sdot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "sdot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
+ "str q13, [%[c_ptr], #240]\n"
+
+ "sdot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "sdot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "sdot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
+ "str q14, [%[c_ptr], #288]\n"
+
+ "sdot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "sdot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "sdot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
+ "str q15, [%[c_ptr], #336]\n"
+
+ "b 3f\n"
+
+ // Detached final iteration (odd K)
+ "2:\n"
+ "sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
+ "str q8, [%[c_ptr], #0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+
+ "sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+
+ "sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+
+ "sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
+ "str q12, [%[c_ptr], #192]\n"
+
+ "sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
+ "str q13, [%[c_ptr], #240]\n"
+
+ "sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
+ "str q14, [%[c_ptr], #288]\n"
+
+ "sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
+ "str q15, [%[c_ptr], #336]\n"
+
+
+ // Common tail
+ "3:\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
+
+
+
+ ".purgem sdot\n"
+ :
+ [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
+ [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k)
+ : [oddk] "r" (oddk), [row_jump] "r" (row_jump), [block_jump] "r" (block_jump)
+ : "x20", "x21", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18",
+ "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory"
+ );
+
+
+ }
+ }
+
+}
+
+#endif
+
diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/dot_toolchain_support.h b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/dot_toolchain_support.h
new file mode 100644
index 0000000000..1d6fd1623e
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/dot_toolchain_support.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+// Define a macro to assemble the UDOT instruction (in the absence of toolchain support)
+#define _DECLARE_SDOT ".altmacro\n"\
+ ".macro sdot opd:req, opn:req, opm:req\n"\
+ "local vd, vn, vm, h, l\n"\
+ ".irp reg,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n"\
+ ".ifeqs \"\\opd\",\"v\\reg\\.4s\"\n"\
+ ".set vd,\\reg\n"\
+ ".endif\n"\
+ ".ifeqs \"\\opn\",\"v\\reg\\.16b\"\n"\
+ ".set vn,\\reg\n"\
+ ".endif\n"\
+ ".irp idx,0,1,2,3\n"\
+ ".ifeqs \"\\opm\",\"v\\reg\\.4b[\\idx\\]\"\n"\
+ ".set vm,\\reg\n"\
+ ".set h,\\idx / 2\n"\
+ ".set l,\\idx %% 2\n"\
+ ".endif\n"\
+ ".endr\n"\
+ ".endr\n"\
+ ".ifndef vd\n"\
+ ".error \"Bad operand \\opd\"\n"\
+ ".exitm\n"\
+ ".endif\n"\
+ ".ifndef vn\n"\
+ ".error \"Bad operand \\opn\"\n"\
+ ".exitm\n"\
+ ".endif\n"\
+ ".ifndef vm\n"\
+ ".error \"Bad operand \\opm\"\n"\
+ ".exitm\n"\
+ ".endif\n"\
+ ".ifndef h\n"\
+ ".error \"Bad operand \\opm\"\n"\
+ ".exitm\n"\
+ ".endif\n"\
+ ".ifndef l\n"\
+ ".error \"Bad operand \\opm\"\n"\
+ ".exitm\n"\
+ ".endif\n"\
+ ".int 0x4f80e000 | vd | (vn << 5) | (vm << 16) | (l << 21) | (h << 11)\n"\
+ ".endm\n"\
+
diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/generic.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/generic.hpp
new file mode 100644
index 0000000000..bfad0373b2
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8/generic.hpp
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <arm_neon.h>
+#include "dot_toolchain_support.h"
+#include <cassert>
+
+
+inline void a64_gemm_s8_12x8(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpanel, int ablocks, int bblocks, int K) {
+ assert(Apanel);
+ assert(Bpanel);
+ assert(Cpanel);
+ K/=4;
+ const long int row_jump=0;
+ const long int block_jump=0;
+ const int32_t *a_ptr = reinterpret_cast<const int32_t*>(Apanel);
+ int32_t *c_ptr = reinterpret_cast<int32_t*>(Cpanel);
+ for (int yb=0; yb<ablocks; yb++) {
+ const int32_t *a_ptr0 = a_ptr;
+ const int32_t *b_ptr = reinterpret_cast<const int32_t*>(Bpanel);
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+ // Fix up for odd lengths - set a flag if K is odd, but make
+ // sure we round up the iteration count.
+ int oddk = (K & 1);
+ int k = ((K+1)/2) - 1;
+ register int32x4_t a0 asm("v0");
+ register int32x4_t a1 asm("v1");
+ register int32x4_t b0 asm("v2");
+ register int32x4_t b1 asm("v3");
+ register int32x4_t b2 asm("v4");
+ register int32x4_t a0a asm("v5");
+ register int32x4_t a1a asm("v6");
+ __asm __volatile (
+ // Initialize result registers, load initial operands, prime prefetches.
+ "movi v8.4s, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "movi v11.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #64]")
+ "movi v13.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #64]")
+ "movi v14.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #128]")
+ "movi v15.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #128]")
+ "movi v16.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #192]")
+ "movi v17.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #256]")
+ "movi v18.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #192]")
+ "movi v19.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #320]")
+ "movi v20.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #256]")
+ "movi v21.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #384]")
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+
+ // Skip loop if we are doing zero iterations of it.
+ "cbz %w[k], 4f\n"
+
+ _DECLARE_SDOT
+
+ // Loop proper
+ "1:\n"
+ "sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
+ "sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
+
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
+ "sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
+ "sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
+ ASM_PREFETCH("[%[a_ptr], #320]")
+ "sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
+ "sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
+ "sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
+ "sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
+ "sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
+ "sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
+ "sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
+ ASM_PREFETCH("[%[b_ptr], #448]")
+ "sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
+ "sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
+ "sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
+ "sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
+ "sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
+ "sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
+
+ "sdot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
+ "sdot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
+ "ldr %q[a0], [%[a_ptr], #64]\n"
+ "sdot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "sdot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
+ "sdot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
+ "ldr %q[a1], [%[a_ptr], #80]\n"
+ "sdot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
+ "sdot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
+ "sdot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
+
+ "sdot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0a].4b[1]\n"
+ ASM_PREFETCH("[%[b_ptr], #512]")
+ "sdot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
+ "sdot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
+ "sdot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
+ "sdot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
+ "sdot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
+ "sdot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
+ "ldr %q[b1], [%[b_ptr], #112]\n"
+
+ "sdot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
+ "sdot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "sdot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
+ "sdot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "sdot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
+ "sdot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "sdot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
+ "sdot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
+ "bne 1b\n"
+
+ // Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
+ "4:\n"
+
+ // Branch to alternative tail for odd K
+ "cbnz %w[oddk], 2f\n"
+
+ // Detached final iteration (even K)
+ "sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
+ "sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
+ "sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
+ "sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
+ "sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
+ "sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
+ "sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
+ "sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
+ "sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
+ "sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
+ "sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
+ "sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
+ "sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
+ "sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
+ "sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
+ "sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
+
+ "sdot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
+
+ "add %[b_ptr], %[b_ptr], %[block_jump]\n"
+ "sdot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "sdot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "str q8, [%[c_ptr], #0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0a].4b[1]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "sdot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
+ "str q24, [%[c_ptr], #32]\n"
+
+ "sdot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "sdot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "sdot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "sdot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+
+ "sdot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "sdot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "sdot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+
+ "sdot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "sdot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "sdot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
+ "str q12, [%[c_ptr], #192]\n"
+
+ "sdot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "sdot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "sdot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
+ "str q13, [%[c_ptr], #240]\n"
+
+ "sdot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "sdot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "sdot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
+ "str q14, [%[c_ptr], #288]\n"
+
+ "sdot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "sdot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "sdot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
+ "str q15, [%[c_ptr], #336]\n"
+
+ "b 3f\n"
+
+ // Detached final iteration (odd K)
+ "2:\n"
+ "sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
+ "add %[b_ptr], %[b_ptr], %[row_jump]\n"
+ "sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
+ "str q8, [%[c_ptr], #0]\n"
+ "sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+
+ "sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+
+ "sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+
+ "sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
+ "str q12, [%[c_ptr], #192]\n"
+
+ "sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
+ "str q13, [%[c_ptr], #240]\n"
+
+ "sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
+ "str q14, [%[c_ptr], #288]\n"
+
+ "sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
+ "str q15, [%[c_ptr], #336]\n"
+
+
+ // Common tail
+ "3:\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
+
+ ".purgem sdot\n"
+ :
+ [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
+ [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [k] "+r" (k)
+ : [oddk] "r" (oddk), [row_jump] "r" (row_jump), [block_jump] "r" (block_jump)
+ : "x20", "x21", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18",
+ "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc"
+ );
+ }
+ }
+
+
+}
+
+
+#endif
diff --git a/arm_compute/core/NEON/kernels/assembly/profiler.hpp b/arm_compute/core/NEON/kernels/assembly/profiler.hpp
index d2f8ba923a..f7a1d1c70c 100644
--- a/arm_compute/core/NEON/kernels/assembly/profiler.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/profiler.hpp
@@ -31,6 +31,7 @@ class profiler {
private:
static const int maxevents = 10000;
unsigned long times[maxevents];
+ unsigned long units[maxevents];
int events[maxevents];
int currentevent;
int countfd;
@@ -45,35 +46,38 @@ public:
close(countfd);
int tots[5];
unsigned long counts[5];
+ unsigned long tunits[5];
const char * descs[] = { "Prepare A", "Prepare B", "Kernel", "Merge" };
for (int i=1; i<5; i++) {
tots[i] = 0;
counts[i] = 0;
+ tunits[i] = 0;
}
printf("Profiled events:\n");
for (int i=0; i<currentevent; i++) {
- printf("%10s: %ld\n", descs[events[i]-1], times[i]);
tots[events[i]]++;
counts[events[i]] += times[i];
+ tunits[events[i]] += units[i];
}
- printf("%20s %9s %9s %9s\n", "", "Events", "Total", "Average");
+ printf("%20s %9s %9s %9s %12s %9s\n", "", "Events", "Total", "Average", "Bytes/MACs", "Per cycle");
for (int i=1; i<5; i++) {
- printf("%20s: %9d %9ld %9ld\n",descs[i-1],tots[i],counts[i],counts[i]/tots[i]);
+ printf("%20s: %9d %9ld %9ld %12lu %9.2f\n",descs[i-1],tots[i],counts[i],counts[i]/tots[i],tunits[i],(float)tunits[i]/counts[i]);
}
}
template <typename T>
- void operator() (int i, T func) {
+ void operator() (int i, unsigned long u, T func) {
if (currentevent==maxevents) {
func();
} else {
+ events[currentevent] = i;
+ units[currentevent] = u;
start_counter(countfd);
func();
long long cycs = stop_counter(countfd);
- events[currentevent] = i;
times[currentevent++] = cycs;
}
}
@@ -84,7 +88,7 @@ public:
class profiler {
public:
template <typename T>
- void operator() (int i, T func) {
+ void operator() (int i, unsigned long u, T func) {
func();
}
};
@@ -95,3 +99,5 @@ public:
#define PROFILE_PREPB 2
#define PROFILE_KERNEL 3
#define PROFILE_MERGE 4
+
+
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
index 865c729d39..59c919e161 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
@@ -57,14 +57,14 @@ public:
* @note GEMM_LOWP: low precision GEMM kernel
* This kernel performs the following computations:
*
- * -# Convert a values from uint8 to int32 and add a_offset to each of them.
- * -# Convert b values from uint8 to int32 and add b_offset to each of them.
+ * -# Convert a values from int8 to int32 and add a_offset to each of them.
+ * -# Convert b values from int8 to int32 and add b_offset to each of them.
* -# Compute the int32 matrix product of the resulting a * b.
* -# Add output_offset to each entry of the result.
* -# Multiply each entry of the result and round to the nearest integer
- * -# Clamp the resulting int32 values to the [0..255] range and cast to uint8.
+ * -# Clamp the resulting int32 values to the [0..255] range and cast to int8.
*
- * @param[in] a First input tensor (Matrix A). Data type supported: U8.
+ * @param[in] a First input tensor (Matrix A). Data type supported: S8.
* @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
* @param[out] output Output tensor. Data type supported: same as @p a.
* @param[in] a_offset Offset to be added to each element of the matrix A.
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index 4866e781f5..c81a432295 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -78,6 +78,7 @@ private:
std::unique_ptr<INEKernel> _mtx_b_reshape_kernel;
Tensor _tmp_a;
Tensor _tmp_b;
+ Tensor _workspace;
};
}
#endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__ */
diff --git a/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp
index 400c6d9d8c..255e486365 100644
--- a/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp
@@ -145,7 +145,7 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
// Convert S32 to U16
- const int16x8x2_t in_u16 =
+ const int16x8x2_t in_s16 =
{
{
vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
@@ -153,10 +153,10 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
}
};
- // Convert U16 to U8
- const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+ // Convert S16 to S8
+ const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
- vst1q_u8(out.ptr(), out_u8);
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
},
vector_sum_col, vector_sum_row, mm_result, out);
}
@@ -209,7 +209,7 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
// Convert S32 to U16
- const int16x8x2_t in_u16 =
+ const int16x8x2_t in_s16 =
{
{
vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
@@ -217,10 +217,10 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
}
};
- // Convert U16 to U8
- const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+ // Convert S16 to S8
+ const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
- vst1q_u8(out.ptr(), out_u8);
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
},
vector_sum_row, mm_result, out);
}
@@ -295,8 +295,8 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
- // Convert S32 to U16
- const int16x8x2_t in_u16 =
+ // Convert S32 to S16
+ const int16x8x2_t in_s16 =
{
{
vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
@@ -304,10 +304,10 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
}
};
- // Convert U16 to U8
- const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+ // Convert S16 to S8
+ const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
- vst1q_u8(out.ptr(), out_u8);
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
},
vector_sum_col, mm_result, out);
}
@@ -346,8 +346,8 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
- // Convert S32 to U16
- const int16x8x2_t in_u16 =
+ // Convert S32 to S16
+ const int16x8x2_t in_s16 =
{
{
vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
@@ -355,10 +355,10 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
}
};
- // Convert U16 to U8
- const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+ // Convert U16 to S8
+ const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
- vst1q_u8(out.ptr(), out_u8);
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
},
mm_result, out);
}
@@ -375,7 +375,7 @@ void NEGEMMLowpFinalizeKernel::configure(const ITensor *vector_sum_col, const IT
int32_t c_offset, int32_t c_mult_int, int32_t shift)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S8);
TensorShape mm_result_shape = mm_result->info()->tensor_shape();
TensorShape output_shape = output->info()->tensor_shape();
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
index 3e614a8bfc..4b9c9f3e64 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
@@ -52,7 +52,7 @@ NEGEMMLowpMatrixMultiplyKernel::NEGEMMLowpMatrixMultiplyKernel()
void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
@@ -127,115 +127,115 @@ void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo
// All the values needed for computing a single 4x4 block will be read from consecutive memory positions
execute_window_loop(window, [&](const Coordinates & id)
{
- const uint8_t *mtx_a0 = ina.ptr();
- const uint8_t *mtx_b0 = inb.ptr();
+ auto *mtx_a0 = reinterpret_cast<const int8_t *>(ina.ptr());
+ auto *mtx_b0 = reinterpret_cast<const int8_t *>(inb.ptr());
// Note: Since the input are all positives, we can use uint32_t
// Accumulators for the block 0
- uint32x4x4_t c0 =
+ int32x4x4_t c0 =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
// Accumulators for the block 1
- uint32x4x4_t c1 =
+ int32x4x4_t c1 =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
// Accumulators for the block 2
- uint32x4x4_t c2 =
+ int32x4x4_t c2 =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
// Accumulators for the block 3
- uint32x4x4_t c3 =
+ int32x4x4_t c3 =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
for(int k = 0; k < width_b; k += 16, mtx_a0 += 4, mtx_b0 += 16)
{
- const uint8x8_t a00_u8 = vld1_u8(mtx_a0);
- const uint8x16_t b00_u8 = vld1q_u8(mtx_b0);
+ const int8x8_t a00_s8 = vld1_s8(mtx_a0);
+ const int8x16_t b00_s8 = vld1q_s8(mtx_b0);
- // Convert a00_u8 to uint16_t and get the lower part
- const uint16x4_t a00_u16 = vget_low_u16(vmovl_u8(a00_u8));
+ // Convert a00_s8 to uint16_t and get the lower part
+ const int16x4_t a00_s16 = vget_low_s16(vmovl_s8(a00_s8));
- // Convert b00_u8 to int16_t
- const uint16x4x4_t b00_u16 =
+ // Convert b00_s8 to int16_t
+ const int16x4x4_t b00_s16 =
{
{
- vget_low_u16(vmovl_u8(vget_low_u8(b00_u8))),
- vget_high_u16(vmovl_u8(vget_low_u8(b00_u8))),
- vget_low_u16(vmovl_u8(vget_high_u8(b00_u8))),
- vget_high_u16(vmovl_u8(vget_high_u8(b00_u8)))
+ vget_low_s16(vmovl_s8(vget_low_s8(b00_s8))),
+ vget_high_s16(vmovl_s8(vget_low_s8(b00_s8))),
+ vget_low_s16(vmovl_s8(vget_high_s8(b00_s8))),
+ vget_high_s16(vmovl_s8(vget_high_s8(b00_s8)))
}
};
// 4x4 block 0
- c0.val[0] = vmlal_lane_u16(c0.val[0], b00_u16.val[0], a00_u16, 0);
- c0.val[1] = vmlal_lane_u16(c0.val[1], b00_u16.val[1], a00_u16, 0);
- c0.val[2] = vmlal_lane_u16(c0.val[2], b00_u16.val[2], a00_u16, 0);
- c0.val[3] = vmlal_lane_u16(c0.val[3], b00_u16.val[3], a00_u16, 0);
+ c0.val[0] = vmlal_lane_s16(c0.val[0], b00_s16.val[0], a00_s16, 0);
+ c0.val[1] = vmlal_lane_s16(c0.val[1], b00_s16.val[1], a00_s16, 0);
+ c0.val[2] = vmlal_lane_s16(c0.val[2], b00_s16.val[2], a00_s16, 0);
+ c0.val[3] = vmlal_lane_s16(c0.val[3], b00_s16.val[3], a00_s16, 0);
// 4x4 block 1
- c1.val[0] = vmlal_lane_u16(c1.val[0], b00_u16.val[0], a00_u16, 1);
- c1.val[1] = vmlal_lane_u16(c1.val[1], b00_u16.val[1], a00_u16, 1);
- c1.val[2] = vmlal_lane_u16(c1.val[2], b00_u16.val[2], a00_u16, 1);
- c1.val[3] = vmlal_lane_u16(c1.val[3], b00_u16.val[3], a00_u16, 1);
+ c1.val[0] = vmlal_lane_s16(c1.val[0], b00_s16.val[0], a00_s16, 1);
+ c1.val[1] = vmlal_lane_s16(c1.val[1], b00_s16.val[1], a00_s16, 1);
+ c1.val[2] = vmlal_lane_s16(c1.val[2], b00_s16.val[2], a00_s16, 1);
+ c1.val[3] = vmlal_lane_s16(c1.val[3], b00_s16.val[3], a00_s16, 1);
// 4x4 block 2
- c2.val[0] = vmlal_lane_u16(c2.val[0], b00_u16.val[0], a00_u16, 2);
- c2.val[1] = vmlal_lane_u16(c2.val[1], b00_u16.val[1], a00_u16, 2);
- c2.val[2] = vmlal_lane_u16(c2.val[2], b00_u16.val[2], a00_u16, 2);
- c2.val[3] = vmlal_lane_u16(c2.val[3], b00_u16.val[3], a00_u16, 2);
+ c2.val[0] = vmlal_lane_s16(c2.val[0], b00_s16.val[0], a00_s16, 2);
+ c2.val[1] = vmlal_lane_s16(c2.val[1], b00_s16.val[1], a00_s16, 2);
+ c2.val[2] = vmlal_lane_s16(c2.val[2], b00_s16.val[2], a00_s16, 2);
+ c2.val[3] = vmlal_lane_s16(c2.val[3], b00_s16.val[3], a00_s16, 2);
// 4x4 block 3
- c3.val[0] = vmlal_lane_u16(c3.val[0], b00_u16.val[0], a00_u16, 3);
- c3.val[1] = vmlal_lane_u16(c3.val[1], b00_u16.val[1], a00_u16, 3);
- c3.val[2] = vmlal_lane_u16(c3.val[2], b00_u16.val[2], a00_u16, 3);
- c3.val[3] = vmlal_lane_u16(c3.val[3], b00_u16.val[3], a00_u16, 3);
+ c3.val[0] = vmlal_lane_s16(c3.val[0], b00_s16.val[0], a00_s16, 3);
+ c3.val[1] = vmlal_lane_s16(c3.val[1], b00_s16.val[1], a00_s16, 3);
+ c3.val[2] = vmlal_lane_s16(c3.val[2], b00_s16.val[2], a00_s16, 3);
+ c3.val[3] = vmlal_lane_s16(c3.val[3], b00_s16.val[3], a00_s16, 3);
}
auto mtx_out = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(mtx_out + 0 * out_stride + 0, vreinterpretq_s32_u32(c0.val[0]));
- vst1q_s32(mtx_out + 0 * out_stride + 4, vreinterpretq_s32_u32(c0.val[1]));
- vst1q_s32(mtx_out + 0 * out_stride + 8, vreinterpretq_s32_u32(c0.val[2]));
- vst1q_s32(mtx_out + 0 * out_stride + 12, vreinterpretq_s32_u32(c0.val[3]));
- vst1q_s32(mtx_out + 1 * out_stride + 0, vreinterpretq_s32_u32(c1.val[0]));
- vst1q_s32(mtx_out + 1 * out_stride + 4, vreinterpretq_s32_u32(c1.val[1]));
- vst1q_s32(mtx_out + 1 * out_stride + 8, vreinterpretq_s32_u32(c1.val[2]));
- vst1q_s32(mtx_out + 1 * out_stride + 12, vreinterpretq_s32_u32(c1.val[3]));
- vst1q_s32(mtx_out + 2 * out_stride + 0, vreinterpretq_s32_u32(c2.val[0]));
- vst1q_s32(mtx_out + 2 * out_stride + 4, vreinterpretq_s32_u32(c2.val[1]));
- vst1q_s32(mtx_out + 2 * out_stride + 8, vreinterpretq_s32_u32(c2.val[2]));
- vst1q_s32(mtx_out + 2 * out_stride + 12, vreinterpretq_s32_u32(c2.val[3]));
- vst1q_s32(mtx_out + 3 * out_stride + 0, vreinterpretq_s32_u32(c3.val[0]));
- vst1q_s32(mtx_out + 3 * out_stride + 4, vreinterpretq_s32_u32(c3.val[1]));
- vst1q_s32(mtx_out + 3 * out_stride + 8, vreinterpretq_s32_u32(c3.val[2]));
- vst1q_s32(mtx_out + 3 * out_stride + 12, vreinterpretq_s32_u32(c3.val[3]));
+ vst1q_s32(mtx_out + 0 * out_stride + 0, c0.val[0]);
+ vst1q_s32(mtx_out + 0 * out_stride + 4, c0.val[1]);
+ vst1q_s32(mtx_out + 0 * out_stride + 8, c0.val[2]);
+ vst1q_s32(mtx_out + 0 * out_stride + 12, c0.val[3]);
+ vst1q_s32(mtx_out + 1 * out_stride + 0, c1.val[0]);
+ vst1q_s32(mtx_out + 1 * out_stride + 4, c1.val[1]);
+ vst1q_s32(mtx_out + 1 * out_stride + 8, c1.val[2]);
+ vst1q_s32(mtx_out + 1 * out_stride + 12, c1.val[3]);
+ vst1q_s32(mtx_out + 2 * out_stride + 0, c2.val[0]);
+ vst1q_s32(mtx_out + 2 * out_stride + 4, c2.val[1]);
+ vst1q_s32(mtx_out + 2 * out_stride + 8, c2.val[2]);
+ vst1q_s32(mtx_out + 2 * out_stride + 12, c2.val[3]);
+ vst1q_s32(mtx_out + 3 * out_stride + 0, c3.val[0]);
+ vst1q_s32(mtx_out + 3 * out_stride + 4, c3.val[1]);
+ vst1q_s32(mtx_out + 3 * out_stride + 8, c3.val[2]);
+ vst1q_s32(mtx_out + 3 * out_stride + 12, c3.val[3]);
},
ina, inb, out);
}
diff --git a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
index 3f841bbf59..9df13ce0e3 100644
--- a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
@@ -51,7 +51,7 @@ INEGEMMLowpReductionKernel::INEGEMMLowpReductionKernel()
void NEGEMMLowpMatrixAReductionKernel::configure(const ITensor *mtx_a_interleaved4x4, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_a_interleaved4x4, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_a_interleaved4x4, 1, DataType::S8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
_input = mtx_a_interleaved4x4;
@@ -97,9 +97,9 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
execute_window_loop(collapsed_window, [&](const Coordinates & id)
{
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- uint32x4_t sum_row = vdupq_n_u32(0);
+ int32x4_t sum_row = vdupq_n_s32(0);
- const uint8_t *matrix_a = in.ptr() + (id.x() / 4) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2];
+ auto matrix_a = reinterpret_cast<const int8_t *>(in.ptr() + (id.x() / 4) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a));
@@ -109,43 +109,43 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
// This for loop performs 4 accumulations
for(; i <= (_k - 4); i += 4)
{
- const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i * 4);
+ const int8x16_t a0_s8 = vld1q_s8(matrix_a + i * 4);
// Convert U8 to U16
- uint16x4x4_t a0_u16 =
+ int16x4x4_t a0_s16 =
{
{
- vget_low_u16(vmovl_u8(vget_low_u8(a0_u8))),
- vget_high_u16(vmovl_u8(vget_low_u8(a0_u8))),
- vget_low_u16(vmovl_u8(vget_high_u8(a0_u8))),
- vget_high_u16(vmovl_u8(vget_high_u8(a0_u8)))
+ vget_low_s16(vmovl_s8(vget_low_s8(a0_s8))),
+ vget_high_s16(vmovl_s8(vget_low_s8(a0_s8))),
+ vget_low_s16(vmovl_s8(vget_high_s8(a0_s8))),
+ vget_high_s16(vmovl_s8(vget_high_s8(a0_s8)))
}
};
// Accumulate to U16
- a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[1]);
- a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[2]);
- a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[3]);
+ a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[1]);
+ a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[2]);
+ a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[3]);
// Accumulate to U32
- sum_row = vaddw_u16(sum_row, a0_u16.val[0]);
+ sum_row = vaddw_s16(sum_row, a0_s16.val[0]);
}
// This for loop performs the leftover accumulations
for(; i < _k; ++i)
{
- const uint8x8_t a0_u8 = vld1_u8(matrix_a + i * 4);
+ const int8x8_t a0_s8 = vld1_s8(matrix_a + i * 4);
// Convert U8 to U16
- const uint16x4_t a0_u16 = vget_low_u16(vmovl_u8(a0_u8));
+ const int16x4_t a0_s16 = vget_low_s16(vmovl_s8(a0_s8));
// Accumulate to U32
- sum_row = vaddw_u16(sum_row, a0_u16);
+ sum_row = vaddw_s16(sum_row, a0_s16);
}
auto vector_sum_row = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(vector_sum_row, vreinterpretq_s32_u32(sum_row));
+ vst1q_s32(vector_sum_row, sum_row);
},
in, out);
}
@@ -154,10 +154,10 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
execute_window_loop(collapsed_window, [&](const Coordinates & id)
{
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- uint32x4_t sum_row_s32 = vdupq_n_u32(0);
- unsigned int sum_row = 0;
+ int32x4_t sum_row_s32 = vdupq_n_s32(0);
+ int32_t sum_row = 0;
- const uint8_t *matrix_a = in.ptr() + id.x() * _input->info()->strides_in_bytes()[1] + +id.y() * _input->info()->strides_in_bytes()[2];
+ auto matrix_a = reinterpret_cast<const int8_t *>(in.ptr() + id.x() * _input->info()->strides_in_bytes()[1] + +id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a));
@@ -167,29 +167,29 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
// This for loop performs 16 accumulations
for(; i <= (_k - 16); i += 16)
{
- const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i);
+ const int8x16_t a0_s8 = vld1q_s8(matrix_a + i);
// Partial accumulations in U16
- const uint16x8_t tmp_sum0 = vaddl_u8(vget_low_u8(a0_u8), vget_high_u8(a0_u8));
+ const int16x8_t tmp_sum0 = vaddl_s8(vget_low_s8(a0_s8), vget_high_s8(a0_s8));
// Accumulate to U32
- sum_row_s32 = vaddq_u32(sum_row_s32, vpaddlq_u16(tmp_sum0));
+ sum_row_s32 = vaddq_s32(sum_row_s32, vpaddlq_s16(tmp_sum0));
}
// This for loop performs the leftover accumulations
for(; i < _k; ++i)
{
- sum_row += static_cast<unsigned int>(matrix_a[i]);
+ sum_row += static_cast<int32_t>(matrix_a[i]);
}
#if defined(__aarch64__)
// Reduction operation available on 64 bit architectures only
- sum_row += vaddvq_u32(sum_row_s32);
+ sum_row += vaddvq_s32(sum_row_s32);
#else // __aarch64__
- uint32x2_t tmp = vpadd_u32(vget_high_u32(sum_row_s32), vget_low_u32(sum_row_s32));
- tmp = vpadd_u32(tmp, tmp);
+ int32x2_t tmp = vpadd_s32(vget_high_s32(sum_row_s32), vget_low_s32(sum_row_s32));
+ tmp = vpadd_s32(tmp, tmp);
- sum_row += vget_lane_u32(tmp, 0);
+ sum_row += vget_lane_s32(tmp, 0);
#endif // __aarch64__
*(reinterpret_cast<int *>(out.ptr())) = static_cast<int>(sum_row);
@@ -200,7 +200,7 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
void NEGEMMLowpMatrixBReductionKernel::configure(const ITensor *mtx_b_transposed1xW, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_b_transposed1xW, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_b_transposed1xW, 1, DataType::S8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
_input = mtx_b_transposed1xW;
@@ -246,17 +246,17 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
execute_window_loop(collapsed_window, [&](const Coordinates & id)
{
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- uint32x4x4_t sum_col =
+ int32x4x4_t sum_col =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
- const uint8_t *matrix_b = in.ptr() + (id.x() / 16) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2];
+ auto matrix_b = reinterpret_cast<const int8_t *>(in.ptr() + (id.x() / 16) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b));
@@ -265,14 +265,14 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
int i = 0;
for(; i < _k; ++i)
{
- const uint8x16_t b0_u8 = vld1q_u8(matrix_b + i * 16);
+ const int8x16_t b0_s8 = vld1q_s8(matrix_b + i * 16);
- // Convert U8 to U16
- const uint16x8x2_t b0_u16 =
+ // Convert S8 to U16
+ const int16x8x2_t b0_s16 =
{
{
- vmovl_u8(vget_low_u8(b0_u8)),
- vmovl_u8(vget_high_u8(b0_u8))
+ vmovl_s8(vget_low_s8(b0_s8)),
+ vmovl_s8(vget_high_s8(b0_s8))
}
};
@@ -280,20 +280,20 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
sum_col =
{
{
- vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])),
- vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])),
- vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])),
- vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1]))
+ vaddw_s16(sum_col.val[0], vget_low_s16(b0_s16.val[0])),
+ vaddw_s16(sum_col.val[1], vget_high_s16(b0_s16.val[0])),
+ vaddw_s16(sum_col.val[2], vget_low_s16(b0_s16.val[1])),
+ vaddw_s16(sum_col.val[3], vget_high_s16(b0_s16.val[1]))
}
};
}
auto vector_sum_col = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0]));
- vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1]));
- vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2]));
- vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3]));
+ vst1q_s32(vector_sum_col + 0, sum_col.val[0]);
+ vst1q_s32(vector_sum_col + 4, sum_col.val[1]);
+ vst1q_s32(vector_sum_col + 8, sum_col.val[2]);
+ vst1q_s32(vector_sum_col + 12, sum_col.val[3]);
},
in, out);
}
@@ -326,17 +326,17 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
}
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- uint32x4x4_t sum_col =
+ int32x4x4_t sum_col =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
- const uint8_t *matrix_b = inb.ptr() + id.y() * _input->info()->strides_in_bytes()[2];
+ auto matrix_b = reinterpret_cast<const int8_t *>(inb.ptr() + id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b));
@@ -347,10 +347,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
// This for loop performs 4 accumulations
for(; i <= (_k - 4); i += 4)
{
- const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride);
- const uint8x16_t b1_u8 = vld1q_u8(matrix_b + 1 * in_b_stride);
- const uint8x16_t b2_u8 = vld1q_u8(matrix_b + 2 * in_b_stride);
- const uint8x16_t b3_u8 = vld1q_u8(matrix_b + 3 * in_b_stride);
+ const int8x16_t b0_s8 = vld1q_s8(matrix_b + 0 * in_b_stride);
+ const int8x16_t b1_s8 = vld1q_s8(matrix_b + 1 * in_b_stride);
+ const int8x16_t b2_s8 = vld1q_s8(matrix_b + 2 * in_b_stride);
+ const int8x16_t b3_s8 = vld1q_s8(matrix_b + 3 * in_b_stride);
#if __arm__
asm volatile("PLD [%0, #128*1]" ::"r"(matrix_b + 1 * in_b_stride));
@@ -360,31 +360,31 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
#endif /* __arm__ */
// Partial accumulation in u16
- uint16x8x2_t tmp_sum =
+ int16x8x2_t tmp_sum =
{
{
- vdupq_n_u16(0),
- vdupq_n_u16(0)
+ vdupq_n_s16(0),
+ vdupq_n_s16(0)
}
};
- tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b0_u8));
- tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b1_u8));
- tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b2_u8));
- tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b3_u8));
- tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b0_u8));
- tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b1_u8));
- tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b2_u8));
- tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b3_u8));
+ tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b0_s8));
+ tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b1_s8));
+ tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b2_s8));
+ tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b3_s8));
+ tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b0_s8));
+ tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b1_s8));
+ tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b2_s8));
+ tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b3_s8));
// Accumulate to U32
sum_col =
{
{
- vaddw_u16(sum_col.val[0], vget_low_u16(tmp_sum.val[0])),
- vaddw_u16(sum_col.val[1], vget_high_u16(tmp_sum.val[0])),
- vaddw_u16(sum_col.val[2], vget_low_u16(tmp_sum.val[1])),
- vaddw_u16(sum_col.val[3], vget_high_u16(tmp_sum.val[1]))
+ vaddw_s16(sum_col.val[0], vget_low_s16(tmp_sum.val[0])),
+ vaddw_s16(sum_col.val[1], vget_high_s16(tmp_sum.val[0])),
+ vaddw_s16(sum_col.val[2], vget_low_s16(tmp_sum.val[1])),
+ vaddw_s16(sum_col.val[3], vget_high_s16(tmp_sum.val[1]))
}
};
@@ -394,14 +394,14 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
// This for loop perfoms the leftover accumulations
for(; i < _k; ++i)
{
- const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride);
+ const int8x16_t b0_s8 = vld1q_s8(matrix_b + 0 * in_b_stride);
- // Convert U8 to U16
- const uint16x8x2_t b0_u16 =
+ // Convert S8 to S16
+ const int16x8x2_t b0_s16 =
{
{
- vmovl_u8(vget_low_u8(b0_u8)),
- vmovl_u8(vget_high_u8(b0_u8))
+ vmovl_s8(vget_low_s8(b0_s8)),
+ vmovl_s8(vget_high_s8(b0_s8))
}
};
@@ -409,10 +409,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
sum_col =
{
{
- vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])),
- vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])),
- vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])),
- vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1]))
+ vaddw_s16(sum_col.val[0], vget_low_s16(b0_s16.val[0])),
+ vaddw_s16(sum_col.val[1], vget_high_s16(b0_s16.val[0])),
+ vaddw_s16(sum_col.val[2], vget_low_s16(b0_s16.val[1])),
+ vaddw_s16(sum_col.val[3], vget_high_s16(b0_s16.val[1]))
}
};
@@ -421,11 +421,11 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
auto vector_sum_col = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0]));
- vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1]));
- vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2]));
- vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3]));
+ vst1q_s32(vector_sum_col + 0, sum_col.val[0]);
+ vst1q_s32(vector_sum_col + 4, sum_col.val[1]);
+ vst1q_s32(vector_sum_col + 8, sum_col.val[2]);
+ vst1q_s32(vector_sum_col + 12, sum_col.val[3]);
},
inb, out);
}
-} \ No newline at end of file
+}
diff --git a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
index 8728e77d9e..5fe198f455 100644
--- a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
+++ b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
@@ -35,6 +35,12 @@
#include "arm_compute/core/Window.h"
#include "support/ToolchainSupport.h"
+namespace arm_compute
+{
+#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp"
+#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp"
+} // namespace arm_compute
+
#include <arm_neon.h>
#include <cstddef>
#include <cstdint>
@@ -42,91 +48,22 @@
// Enable only if compiled for AArch64-V8.2-A targets
#ifdef ARM_COMPUTE_AARCH64_V8_2
-#define ASM_PREFETCH(address) "PRFM PLDL1KEEP, " address "\n"
-#define ASM_PREFETCHL2(address) "PRFM PLDL2KEEP, " address "\n"
-#define ASM_PREFETCHW(address) "PRFM PSTL1KEEP, " address "\n"
-#define ASM_PREFETCHWL2(address) "PRFM PSTL2KEEP, " address "\n"
-
-static inline void stincpld(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
- uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
- uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
- uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
-{
- __asm __volatile(
- "LDR q0, [%[ptr0]]\n"
- "LDR q1, [%[ptr1]]\n"
- "LDR q2, [%[ptr2]]\n"
- "LDR q3, [%[ptr3]]\n"
- "LDR q4, [%[ptr4]]\n"
- "LDR q5, [%[ptr5]]\n"
- "LDR q6, [%[ptr6]]\n"
- "LDR q7, [%[ptr7]]\n"
- "ADD v0.4s, v0.4s, %[v0].4s\n" ASM_PREFETCH("[%[ptr0], #80]") "ADD v1.4s, v1.4s, %[v1].4s\n" ASM_PREFETCH("[%[ptr1], #80]") "ADD v2.4s, v2.4s, %[v2].4s\n" ASM_PREFETCH("[%[ptr2], #80]")
- "ADD v3.4s, v3.4s, %[v3].4s\n" ASM_PREFETCH("[%[ptr3], #80]") "ADD v4.4s, v4.4s, %[v4].4s\n" ASM_PREFETCH("[%[ptr4], #80]") "ADD v5.4s, v5.4s, %[v5].4s\n" ASM_PREFETCH("[%[ptr5], #80]")
- "ADD v6.4s, v6.4s, %[v6].4s\n" ASM_PREFETCH("[%[ptr6], #80]") "ADD v7.4s, v7.4s, %[v7].4s\n" ASM_PREFETCH("[%[ptr7], #80]")
- "STR q0, [%[ptr0]], #16\n"
- "STR q1, [%[ptr1]], #16\n"
- "STR q2, [%[ptr2]], #16\n"
- "STR q3, [%[ptr3]], #16\n"
- "STR q4, [%[ptr4]], #16\n"
- "STR q5, [%[ptr5]], #16\n"
- "STR q6, [%[ptr6]], #16\n"
- "STR q7, [%[ptr7]], #16\n"
- : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
- [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
- : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
- [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
- : "x20", "x21", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
-}
-
-static inline void stinc(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
- uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
- uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
- uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
-{
- __asm __volatile(
- "LDR q0, [%[ptr0]]\n"
- "LDR q1, [%[ptr1]]\n"
- "LDR q2, [%[ptr2]]\n"
- "LDR q3, [%[ptr3]]\n"
- "LDR q4, [%[ptr4]]\n"
- "LDR q5, [%[ptr5]]\n"
- "LDR q6, [%[ptr6]]\n"
- "LDR q7, [%[ptr7]]\n"
- "ADD v0.4s, v0.4s, %[v0].4s\n"
- "ADD v1.4s, v1.4s, %[v1].4s\n"
- "ADD v2.4s, v2.4s, %[v2].4s\n"
- "ADD v3.4s, v3.4s, %[v3].4s\n"
- "ADD v4.4s, v4.4s, %[v4].4s\n"
- "ADD v5.4s, v5.4s, %[v5].4s\n"
- "ADD v6.4s, v6.4s, %[v6].4s\n"
- "ADD v7.4s, v7.4s, %[v7].4s\n"
- "STR q0, [%[ptr0]], #16\n"
- "STR q1, [%[ptr1]], #16\n"
- "STR q2, [%[ptr2]], #16\n"
- "STR q3, [%[ptr3]], #16\n"
- "STR q4, [%[ptr4]], #16\n"
- "STR q5, [%[ptr5]], #16\n"
- "STR q6, [%[ptr6]], #16\n"
- "STR q7, [%[ptr7]], #16\n"
- : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
- [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
- : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
- [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
-}
-
namespace arm_compute
{
-void NEGEMMLowpAArch64V8P4Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output)
+void NEGEMMLowpAArch64V8P4Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
- _input0 = input0;
- _input1 = input1;
- _output = output;
+ _input0 = input0;
+ _input1 = input1;
+ _output = output;
+ _workspace = workspace;
+ _alpha = alpha;
+ _beta = beta;
+ _transform_0 = transform_0;
+ _transform_1 = transform_1;
// Configure kernel window
Window win = calculate_max_window(*output->info());
@@ -149,375 +86,49 @@ bool NEGEMMLowpAArch64V8P4Kernel::is_parallelisable() const
return false;
}
-#define _UDOT_MACRO \
- ".altmacro\n" \
- ".macro udot opd:req, opn:req, opm:req\n" \
- "local vd, vn, vm, h, l\n" \
- ".irp reg,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \
- ".ifeqs \"\\opd\",\"v\\reg\\.4s\"\n" \
- ".set vd,\\reg\n" \
- ".endif\n" \
- ".ifeqs \"\\opn\",\"v\\reg\\.16b\"\n" \
- ".set vn,\\reg\n" \
- ".endif\n" \
- ".irp idx,0,1,2,3\n" \
- ".ifeqs \"\\opm\",\"v\\reg\\.4b[\\idx\\]\"\n" \
- ".set vm,\\reg\n" \
- ".set h,\\idx / 2\n" \
- ".set l,\\idx %% 2\n" \
- ".endif\n" \
- ".endr\n" \
- ".endr\n" \
- ".ifndef vd\n" \
- ".error \"Bad operand \\opd\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".ifndef vn\n" \
- ".error \"Bad operand \\opn\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".ifndef vm\n" \
- ".error \"Bad operand \\opm\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".ifndef h\n" \
- ".error \"Bad operand \\opm\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".ifndef l\n" \
- ".error \"Bad operand \\opm\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".int 0x6f80e000 | vd | (vn << 5) | (vm << 16) | (l << 21) | (h << 11)\n" \
- ".endm\n"
-
-#define _PREFETCH_ \
- __asm __volatile( \
- "" ASM_PREFETCH("[%[a_ptr], #64]") \
- ASM_PREFETCH("[%[a_ptr], #128]") \
- ASM_PREFETCH("[%[a_ptr], #192]") \
- : \
- : \
- [a_ptr] "r"(a_ptr), [b_ptr] "r"(b_ptr) \
- : "x20", "x21", "memory"); \
- __asm __volatile( \
- "" ASM_PREFETCH("[%[b_ptr]]") \
- ASM_PREFETCH("[%[b_ptr], #64]") \
- ASM_PREFETCH("[%[b_ptr], #128]") \
- ASM_PREFETCH("[%[b_ptr], #192]") \
- : \
- : \
- [b_ptr] "r"(b_ptr) \
- : "x20", "x21"); \
- __asm __volatile( \
- "" \
- : [r00] "+w"(r00), [r01] "+w"(r01), \
- [r10] "+w"(r10), [r11] "+w"(r11), \
- [r20] "+w"(r20), [r21] "+w"(r21), \
- [r30] "+w"(r30), [r31] "+w"(r31), \
- [a0] "+w"(a0), [a1] "+w"(a1), \
- [b0] "+w"(b0), [b1] "+w"(b1), [b2] "=w"(b2), \
- [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr) \
- : \
- :); \
- __asm __volatile( \
- "" \
- : [r02] "+w"(r02), \
- [r12] "+w"(r12), \
- [r22] "+w"(r22), \
- [r32] "+w"(r32), \
- [r40] "+w"(r40), \
- [r50] "+w"(r50), \
- [r60] "+w"(r60), \
- [r70] "+w"(r70), \
- [a0a] "=w"(a0a), [a1a] "=w"(a1a), \
- [b0] "+w"(b0), [b2] "+w"(b2), [b5] "=&w"(b5) \
- : \
- :); \
- __asm __volatile( \
- "" \
- : \
- [r41] "+w"(r41), [r42] "+w"(r42), \
- [r51] "+w"(r51), [r52] "+w"(r52), \
- [r61] "+w"(r61), [r62] "+w"(r62), \
- [r71] "+w"(r71), [r72] "+w"(r72), \
- [a1] "+w"(a1), \
- [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), \
- [b_ptr] "+r"(b_ptr), [k] "+r"(k) \
- : \
- :);
-
void NEGEMMLowpAArch64V8P4Kernel::run(const Window &window, const ThreadInfo &info)
{
- ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
- const int x_block = 348;
- const int k_block = 1664;
- const int nthreads = 1;
- const int M = _output->info()->tensor_shape().y();
- const int N = _output->info()->tensor_shape().x();
- const int K = _input0->info()->tensor_shape().x() >> 3;
-
- int yblocksperthread = ((M / nthreads) + 7) / 8;
-
- if(yblocksperthread < 1)
- {
- yblocksperthread = 1;
- }
-
- const int lda = _input0->info()->strides_in_bytes().y();
- const int ldb = _input1->info()->strides_in_bytes().y();
- const int ldc = _output->info()->strides_in_bytes().y();
- const int ldc2 = _output->info()->strides_in_bytes().x();
- const int ldc3 = ldc / sizeof(uint32_t);
+ const int lda = _input0->info()->strides_in_bytes().y();
+ const int ldb = _input1->info()->strides_in_bytes().y();
+ const int ldc = _output->info()->strides_in_bytes().y() / sizeof(int32_t);
- const int threadid = 0;
- int y0 = threadid * yblocksperthread * 8;
- int ymax = (threadid + 1) * yblocksperthread * 8;
- if(y0 >= M)
- {
- return;
- }
- if(ymax > M)
- {
- ymax = M;
- }
- for(int k0 = 0; k0 < K; k0 += k_block)
- {
- int kmax = k0 + k_block;
- if(kmax > K)
- {
- kmax = K;
- }
+ const auto in1_ptr = reinterpret_cast<const int8_t *>(_input1->buffer());
- for(int x0 = 0; x0 < N; x0 += x_block)
- {
- int xmax = x0 + x_block;
- if(xmax > N)
- {
- xmax = N;
- }
+ const int M = std::min(_output->info()->tensor_shape().y(), static_cast<size_t>(window.y().end())) - window.y().start();
+ const int N = _output->info()->tensor_shape().x();
+ const int K = _input0->info()->tensor_shape().x();
- for(int y = y0; y < ymax; y += 8)
- {
- auto c_ptr0 = reinterpret_cast<uint32_t *>(_output->buffer() + (y * ldc) + x0 * ldc2);
- uint32_t *c_ptr1 = c_ptr0 + ldc3;
- uint32_t *c_ptr2 = c_ptr1 + ldc3;
- uint32_t *c_ptr3 = c_ptr2 + ldc3;
- uint32_t *c_ptr4 = c_ptr3 + ldc3;
- uint32_t *c_ptr5 = c_ptr4 + ldc3;
- uint32_t *c_ptr6 = c_ptr5 + ldc3;
- uint32_t *c_ptr7 = c_ptr6 + ldc3;
+ // Only iterate over batches
+ Window win(window);
+ win.set(0, Window::Dimension(0, 1, 1));
+ win.set(1, Window::Dimension(0, 1, 1));
- __asm __volatile(
- "" ASM_PREFETCH("[%[c_ptr0]]")
- ASM_PREFETCH("[%[c_ptr1]]")
- ASM_PREFETCH("[%[c_ptr2]]")
- ASM_PREFETCH("[%[c_ptr3]]")
- ASM_PREFETCH("[%[c_ptr4]]")
- ASM_PREFETCH("[%[c_ptr5]]")
- ASM_PREFETCH("[%[c_ptr6]]")
- ASM_PREFETCH("[%[c_ptr7]]")
- :
- : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
- [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
- : "x20", "x21");
+ Iterator in0(_input0, window);
+ Iterator out(_output, window);
- for(int x = x0; x < xmax; x += 12)
- {
- register uint32x4_t r00 asm("v8");
- register uint32x4_t r10 asm("v9");
- register uint32x4_t r20 asm("v10");
- register uint32x4_t r30 asm("v11");
- register uint32x4_t r40 asm("v12");
- register uint32x4_t r50 asm("v13");
- register uint32x4_t r60 asm("v14");
- register uint32x4_t r70 asm("v15");
- register uint32x4_t r01 asm("v16");
- register uint32x4_t r11 asm("v17");
- register uint32x4_t r21 asm("v18");
- register uint32x4_t r31 asm("v19");
- register uint32x4_t r41 asm("v20");
- register uint32x4_t r51 asm("v21");
- register uint32x4_t r61 asm("v22");
- register uint32x4_t r71 asm("v23");
- register uint32x4_t r02 asm("v24");
- register uint32x4_t r12 asm("v25");
- register uint32x4_t r22 asm("v26");
- register uint32x4_t r32 asm("v27");
- register uint32x4_t r42 asm("v28");
- register uint32x4_t r52 asm("v29");
- register uint32x4_t r62 asm("v30");
- register uint32x4_t r72 asm("v31");
+ GemmInterleaved<gemm_s8_12x8, int8_t, int32_t> gemm(&info.cpu_info, M, N, K, !_transform_1, !_transform_1);
- register uint8x16_t a0 asm("v0");
- register uint8x16_t a1 asm("v1");
- register uint8x16_t b0 asm("v2");
- register uint8x16_t b1 asm("v3");
- register uint8x16_t b2 asm("v4");
- register uint8x16_t a0a asm("v5");
- register uint8x16_t a1a asm("v6");
- register uint8x16_t b5 asm("v7");
- const uint8_t *a_ptr = _input0->buffer() + ((y / 8) * lda) + (k0 * 8);
- const uint8_t *b_ptr = _input1->buffer() + ((x / 12) * ldb) + (k0 * 12);
+ constexpr size_t alignment = 4096;
+ const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id;
+ void *workspace = _workspace->buffer() + offset;
+ size_t workspace_size = _workspace->info()->total_size();
- r00 = r01 = r02 = r10 = r11 = r12 = r20 = r21 = r22 = r30 = r31 = r32 = vdupq_n_u32(0);
- r40 = r41 = r42 = r50 = r51 = r52 = r60 = r61 = r62 = r70 = r71 = r72 = vdupq_n_u32(0);
-
- int k = ((kmax - k0) / 8) - 1;
-
- a0 = vld1q_u8(a_ptr);
- b0 = vld1q_u8(b_ptr);
- a1 = vld1q_u8(a_ptr + 16);
- b1 = vld1q_u8(b_ptr + 16);
-
- _PREFETCH_
-
- __asm __volatile(
- _UDOT_MACRO
- "1:\n"
- "udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
- "udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
- "udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
- "udot v17.4s, %[b1].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[a_ptr], #256]")
- "udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
-
- "udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "udot v25.4s, %[b2].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #256]")
- "udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
-
- "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
- "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
- "ldr %q[a0], [%[a_ptr], #64]\n"
- "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
- "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
- "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
- "ldr %q[a1], [%[a_ptr], #80]\n"
- "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
- "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
- "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #96]\n"
-
- "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
- "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #320]")
- "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
- "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
- "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
- "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
- "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
- "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #112]\n"
-
- "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
- "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
- "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
- "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "subs %w[k], %w[k], #1\n"
- "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
- "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
-
- "bne 1b\n"
-
- "udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
- "udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
- "udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
- "udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
- "udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
-
- "udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
-
- "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0]]") "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1]]") "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
- ASM_PREFETCH("[%[c_ptr2]]") "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3]]") "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4]]")
- "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5]]") "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6]]") "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- ASM_PREFETCH("[%[c_ptr7]]")
-
- "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0], #48]") "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1], #48]") "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
- ASM_PREFETCH("[%[c_ptr2], #48]") "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3], #48]") "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4], #48]")
- "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5], #48]") "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6], #48]") "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- ASM_PREFETCH("[%[c_ptr7], #48]")
-
- "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
- "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
- "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
- "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
- "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
- "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
-
- // Clean up macro namespace
- ".purgem udot\n"
-
- :
- [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr),
- [a0] "+w"(a0), [a1] "+w"(a1), [a0a] "+w"(a0a), [a1a] "+w"(a1a),
- [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), [k] "+r"(k)
- : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
- [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
- : "x20", "x21");
-
- stincpld(r00, r10, r20, r30, r40, r50, r60, r70, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
- stinc(r01, r11, r21, r31, r41, r51, r61, r71, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
- stinc(r02, r12, r22, r32, r42, r52, r62, r72, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
- }
- }
- }
+ if(support::cpp11::align(alignment, gemm.get_working_size(), workspace, workspace_size) == nullptr)
+ {
+ ARM_COMPUTE_ERROR("Not enough space to align buffer!");
}
+
+ execute_window_loop(win, [&](const Coordinates & id)
+ {
+ gemm.execute(reinterpret_cast<const int8_t *>(in0.ptr()), lda,
+ reinterpret_cast<const int8_t *>(in1_ptr), ldb,
+ reinterpret_cast<int32_t *>(out.ptr()), ldc,
+ _alpha, _beta, workspace);
+ },
+ in0, out);
}
} // namespace arm_compute
#endif /* ARM_COMPUTE_AARCH64_V8_2 */
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index ff92ef8351..2dea9317a5 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -114,7 +114,7 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe
#endif /* defined(__arm__) || defined(__aarch64__) */
constexpr size_t alignment = 4096;
- _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
+ _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::S8));
_memory_group.manage(&_workspace);
// Configure matrix multiplication kernel
diff --git a/src/runtime/NEON/functions/NEGEMMLowp.cpp b/src/runtime/NEON/functions/NEGEMMLowp.cpp
index ab7fa079b1..90bc6a205b 100644
--- a/src/runtime/NEON/functions/NEGEMMLowp.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowp.cpp
@@ -45,7 +45,7 @@ NEGEMMLowp::NEGEMMLowp(std::shared_ptr<IMemoryManager> memory_manager)
void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t output_mult_int, int32_t shift)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((a), 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((a), 1, DataType::S8);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output);
ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A");
@@ -131,4 +131,4 @@ void NEGEMMLowp::run()
NEScheduler::get().schedule(&_finalize_kernel, Window::DimY);
_memory_group.release();
-} \ No newline at end of file
+}
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 11ae054e11..29104cc378 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -26,9 +26,8 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
@@ -39,16 +38,22 @@
#include "arm_compute/runtime/TensorAllocator.h"
#include "support/ToolchainSupport.h"
+namespace arm_compute
+{
+#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp"
+#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp"
+} // namespace arm_compute
+
using namespace arm_compute;
NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _tmp_a(), _tmp_b()
+ : _memory_group(std::move(memory_manager)), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _tmp_a(), _tmp_b(), _workspace()
{
}
void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::S8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
@@ -62,42 +67,22 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
if(cpu_has_dotprod != 0)
{
- TensorShape shape_a_int = a->info()->tensor_shape();
- shape_a_int.set(0, a->info()->dimension(0) * 8.f);
- shape_a_int.set(1, std::ceil(a->info()->dimension(1) / 8.f));
-
- TensorShape shape_b_int = b->info()->tensor_shape();
- shape_b_int.set(0, b->info()->dimension(0) * 12.f);
- shape_b_int.set(1, std::ceil(b->info()->dimension(1) / 12.f));
-
- TensorInfo info_a_int(shape_a_int, 1, a->info()->data_type());
- TensorInfo info_b_int(shape_b_int, 1, b->info()->data_type());
- _tmp_a.allocator()->init(info_a_int);
- _tmp_b.allocator()->init(info_b_int);
- _memory_group.manage(&_tmp_a);
- _memory_group.manage(&_tmp_b);
-
- // Configure interleave blocked kernel for matrix A
- {
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMInterleaveBlockedKernel>();
- k->configure(a, &_tmp_a, 8, 4, false);
- _mtx_a_reshape_kernel = std::move(k);
- }
-
- // Configure interleave blocked kernel for matrix B
- {
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMInterleaveBlockedKernel>();
- k->configure(b, &_tmp_b, 12, 4, true);
- _mtx_b_reshape_kernel = std::move(k);
- }
-
// Configure matrix multiply kernel
- {
- // NEGEMMLowpAArch64V8P4Kernel only compiled in AArch64 targets
- auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpAArch64V8P4Kernel>();
- k->configure(&_tmp_a, &_tmp_b, output);
- _mm_kernel = std::move(k);
- }
+ struct CPUInfo ci = NEScheduler::get().cpu_info();
+ const int M = output->info()->tensor_shape().y();
+ const int N = output->info()->tensor_shape().x();
+ const int K = a->info()->tensor_shape().x();
+
+ GemmInterleaved<gemm_s8_12x8, int8_t, int32_t> gemm(&ci, M, N, K, false, false);
+ constexpr size_t alignment = 4096;
+ _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
+ _memory_group.manage(&_workspace);
+ // Configure matrix multiplication kernel
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpAArch64V8P4Kernel>();
+ k->configure(a, b, output, &_workspace, 1.f, 1.f);
+ _mm_kernel = std::move(k);
+
+ _workspace.allocator()->allocate();
}
else
#endif /* ARM_COMPUTE_AARCH64_V8_2 */
@@ -139,25 +124,28 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
k->configure(&_tmp_a, &_tmp_b, output);
_mm_kernel = std::move(k);
}
- }
- // Allocate tensors
- _tmp_a.allocator()->allocate();
- _tmp_b.allocator()->allocate();
+ // Allocate tensors
+ _tmp_a.allocator()->allocate();
+ _tmp_b.allocator()->allocate();
+ }
}
void NEGEMMLowpMatrixMultiplyCore::run()
{
_memory_group.acquire();
- // Run reshape matrix A
- NEScheduler::get().schedule(_mtx_a_reshape_kernel.get(), Window::DimY);
+ if(_mtx_a_reshape_kernel)
+ {
+ NEScheduler::get().schedule(_mtx_a_reshape_kernel.get(), Window::DimY);
+ }
- // Run reshape matrix B
- NEScheduler::get().schedule(_mtx_b_reshape_kernel.get(), Window::DimY);
+ if(_mtx_b_reshape_kernel)
+ {
+ NEScheduler::get().schedule(_mtx_b_reshape_kernel.get(), Window::DimY);
+ }
- // Run matrix multiply kernel
NEScheduler::get().schedule(_mm_kernel.get(), Window::DimY);
_memory_group.release();
-} \ No newline at end of file
+}
diff --git a/tests/validation/CPP/GEMMLowp.cpp b/tests/validation/CPP/GEMMLowp.cpp
index 06926e631e..e1d76503cd 100644
--- a/tests/validation/CPP/GEMMLowp.cpp
+++ b/tests/validation/CPP/GEMMLowp.cpp
@@ -34,7 +34,7 @@ namespace validation
{
namespace reference
{
-SimpleTensor<uint32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, SimpleTensor<uint32_t> &c)
+SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, SimpleTensor<int32_t> &c)
{
ARM_COMPUTE_UNUSED(a);
ARM_COMPUTE_UNUSED(b);
@@ -99,15 +99,15 @@ SimpleTensor<T> gemmlowp(const SimpleTensor<T> &a, const SimpleTensor<T> &b, Sim
for(int j = 0; j < cols; ++j)
{
const int32_t result = ((c_offset + acc[j]) * c_mult_int) >> out_shift;
- c[j + i * cols] = static_cast<uint8_t>(std::min(255, std::max(0, result)));
+ c[j + i * cols] = static_cast<int8_t>(std::min(127, std::max(-128, result)));
}
}
return c;
}
-template SimpleTensor<uint8_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, SimpleTensor<uint8_t> &c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift);
+template SimpleTensor<int8_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, SimpleTensor<int8_t> &c,
+ int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/CPP/GEMMLowp.h b/tests/validation/CPP/GEMMLowp.h
index 0428e9e34f..2f903f2fe2 100644
--- a/tests/validation/CPP/GEMMLowp.h
+++ b/tests/validation/CPP/GEMMLowp.h
@@ -35,7 +35,7 @@ namespace validation
{
namespace reference
{
-SimpleTensor<uint32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, SimpleTensor<uint32_t> &c);
+SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, SimpleTensor<int32_t> &c);
template <typename T>
SimpleTensor<T> gemmlowp(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &c,
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index de7495bb6a..4924f98ea6 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -45,14 +45,15 @@ namespace validation
{
namespace
{
-const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
-const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4);
+const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
+const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4);
+const auto data_matrix_multiply = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 16);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(GEMMLowp)
-TEST_SUITE(U8)
+TEST_SUITE(S8)
TEST_SUITE(INTERLEAVE_BLOCKED)
@@ -79,7 +80,7 @@ TEST_SUITE_END()
using NEGEMMLowpOffsetFixture = GEMMLowpOffsetValidationFixture<Tensor, Accessor, NEGEMMLowp>;
DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()), framework::dataset::make("DataType",
- DataType::U8)),
+ DataType::S8)),
shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type)
{
// Create tensors
@@ -96,31 +97,28 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
gemmlowp.configure(&a, &b, &c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpDataset(), framework::dataset::make("DataType", DataType::U8)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpDataset(), framework::dataset::make("DataType", DataType::S8)))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpOffsetFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpDataset(), framework::dataset::make("DataType", DataType::U8)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpOffsetFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpDataset(), framework::dataset::make("DataType", DataType::S8)))
{
// Validate output
validate(Accessor(_target), _reference);
}
+TEST_SUITE_END() // U8
-//FIXME: This is in the process of being updated, for more info please refer to COMPMID-624.
-#if 0 // defined(__aarch64__)
-TEST_SUITE(U32)
-using NEGEMMLowpFixture = GEMMLowpValidationFixture<Tensor, Accessor, NEGEMMLowp>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpFixture, framework::DatasetMode::PRECOMMIT, framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K",
- 16))
+TEST_SUITE(S32)
+using NEGEMMLowpMatrixMultiplyFixture = GEMMLowpMatrixMultiplyValidationFixture<Tensor, Accessor, NEGEMMLowpMatrixMultiplyCore>;
+FIXTURE_DATA_TEST_CASE(MatrixMultiply, NEGEMMLowpMatrixMultiplyFixture, framework::DatasetMode::PRECOMMIT, data_matrix_multiply)
{
// Validate output
validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
-#endif // defined(__aarch64__)
-TEST_SUITE_END()
+
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index 1a4084fdc2..fba44008ba 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -57,7 +57,7 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- ARM_COMPUTE_ERROR_ON(tensor.data_type() != DataType::U8);
+ ARM_COMPUTE_ERROR_ON(tensor.data_type() != DataType::S8);
std::uniform_int_distribution<> distribution(0, 3);
library->fill(tensor, distribution, i);
}
@@ -97,28 +97,28 @@ protected:
return c;
}
- SimpleTensor<uint8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
+ SimpleTensor<int8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
+ int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
{
// Create reference
- SimpleTensor<uint8_t> a{ shape_a, data_type, 1 };
- SimpleTensor<uint8_t> b{ shape_b, data_type, 1 };
- SimpleTensor<uint8_t> c{ shape_c, data_type, 1 };
+ SimpleTensor<int8_t> a{ shape_a, data_type, 1 };
+ SimpleTensor<int8_t> b{ shape_b, data_type, 1 };
+ SimpleTensor<int8_t> c{ shape_c, data_type, 1 };
// Fill reference
fill(a, 0);
fill(b, 1);
fill(c, 2);
- return reference::gemmlowp<uint8_t>(a, b, c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
+ return reference::gemmlowp<int8_t>(a, b, c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
}
- TensorType _target{};
- SimpleTensor<uint8_t> _reference{};
+ TensorType _target{};
+ SimpleTensor<int8_t> _reference{};
};
template <typename TensorType, typename AccessorType, typename FunctionType>
-class GEMMLowpValidationFixture : public framework::Fixture
+class GEMMLowpMatrixMultiplyValidationFixture : public framework::Fixture
{
public:
template <typename...>
@@ -142,9 +142,9 @@ protected:
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
{
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
- TensorType b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
- TensorType c = create_tensor<TensorType>(shape_c, DataType::U32, 1);
+ TensorType a = create_tensor<TensorType>(shape_a, DataType::S8, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, DataType::S8, 1);
+ TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
// Create and configure function
FunctionType gemmlowp;
@@ -164,8 +164,8 @@ protected:
ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
// Fill tensors
- fill(AccessorType(a), 0, 0, 3);
- fill(AccessorType(b), 1, 0, 3);
+ fill(AccessorType(a), 0, -128, 127);
+ fill(AccessorType(b), 1, -128, 127);
fill(AccessorType(c), 2, 0, 0);
// Compute GEMM function
@@ -173,23 +173,23 @@ protected:
return c;
}
- SimpleTensor<uint32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
+ SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
{
// Create reference
- SimpleTensor<uint8_t> a{ shape_a, DataType::U8, 1 };
- SimpleTensor<uint8_t> b{ shape_b, DataType::U8, 1 };
- SimpleTensor<uint32_t> c{ shape_c, DataType::U32, 1 };
+ SimpleTensor<int8_t> a{ shape_a, DataType::S8, 1 };
+ SimpleTensor<int8_t> b{ shape_b, DataType::S8, 1 };
+ SimpleTensor<int32_t> c{ shape_c, DataType::S32, 1 };
// Fill reference
- fill(a, 0, 0, 3);
- fill(b, 1, 0, 3);
+ fill(a, 0, -128, 127);
+ fill(b, 1, -128, 127);
fill(c, 2, 0, 0);
return reference::gemmlowp(a, b, c);
}
- TensorType _target{};
- SimpleTensor<uint32_t> _reference{};
+ TensorType _target{};
+ SimpleTensor<int32_t> _reference{};
};
} // namespace validation