aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2017-10-09 15:05:40 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitab18212dd287cc0ec9b7c1a2c72455fe75ebd13d (patch)
treef802205d85785da671ddd1949ba61b9dc36a3035
parented194b1fbec6627896c5c12f74460b9142b98f7d (diff)
downloadComputeLibrary-ab18212dd287cc0ec9b7c1a2c72455fe75ebd13d.tar.gz
COMPMID-616 - Optimizing GEMMLowp on NEON intrinsics
Change-Id: Ibbeff5d37249b6e8fc34ad496035a1511c9da5a3 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94072 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com>
-rw-r--r--arm_compute/core/Dimensions.h2
-rw-r--r--arm_compute/core/NEON/NEKernels.h2
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h2
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h103
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h29
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h112
-rw-r--r--arm_compute/core/TensorShape.h2
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowp.h60
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h83
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp509
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp395
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp431
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowp.cpp160
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp163
-rw-r--r--tests/datasets/LargeGEMMLowpDataset.h2
-rw-r--r--tests/datasets/SmallGEMMLowpDataset.h2
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp16
18 files changed, 1626 insertions, 448 deletions
diff --git a/arm_compute/core/Dimensions.h b/arm_compute/core/Dimensions.h
index 96dd3711cb..70b6e1a301 100644
--- a/arm_compute/core/Dimensions.h
+++ b/arm_compute/core/Dimensions.h
@@ -119,8 +119,8 @@ public:
/** Collapse dimensions.
*
- * @param[in] first Dimensions into which the following @p n are collapsed.
* @param[in] n Number of dimensions to collapse into @p first.
+ * @param[in] first Dimensions into which the following @p n are collapsed.
*/
void collapse(size_t n, size_t first = 0)
{
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index 8d8ecda6de..918dfc6914 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -62,7 +62,9 @@
#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
#include "arm_compute/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
index aa942c40fb..cdeb11d606 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h
@@ -46,7 +46,7 @@ public:
* @param[in] input Input tensor. Data types supported: U8
* @param[out] output Output tensor which stores the interleaved matrix. Data type supported: same as @p input.
* @param[in] block_height The height of the blocks to be interleaved.
- * @param[in] block_width The width of the blocks to be interleved.
+ * @param[in] block_width The width of the blocks to be interleaved.
* @param[in] transpose True if transpose operation must be performed, false otherwise.
*/
void configure(const ITensor *input, ITensor *output, unsigned int block_height, unsigned int block_width, bool transpose);
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h
new file mode 100644
index 0000000000..77b2bdc177
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPFINALIZEKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMLOWPFINALIZEKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/* NEON kernel used to finalize the GEMMLowp result
+ *
+ * This kernel performs the following computations:
+ *
+ * -# Add offset terms to final result
+ * -# Multiply each entry of result and round to nearest integer
+ * -# Clamp the resulting int32 values to the [0..255] range and cast to uint8.
+ */
+class NEGEMMLowpFinalizeKernel : public INEKernel
+{
+public:
+ /** Constructor */
+ NEGEMMLowpFinalizeKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers)*/
+ NEGEMMLowpFinalizeKernel(const NEGEMMLowpFinalizeKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers)*/
+ NEGEMMLowpFinalizeKernel &operator=(const NEGEMMLowpFinalizeKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NEGEMMLowpFinalizeKernel(NEGEMMLowpFinalizeKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NEGEMMLowpFinalizeKernel &operator=(NEGEMMLowpFinalizeKernel &&) = default;
+ /** Initialise the kernel's input and output.
+ *
+ * @note The input row-vectors @p vector_sum_col and @p vector_sum_row must be the output of @ref NEGEMMLowpMatrixBReductionKernel and @ref NEGEMMLowpMatrixAReductionKernel kernels.
+ * These 2 vectors are needed to handle the offset of matrix product
+ * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
+ *
+ * @param[in] vector_sum_col Input row-vector of sums of all the entries in each column of input1.
+ * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: S32
+ * @param[in] vector_sum_row Input row-vector of sums of all the entries in each row of input0.
+ * Note: vector_sum_row can be a nullptr in case b_offset = 0. Data type supported: same as @p vector_sum_col
+ * @param[in] mm_result Input tensor containing the result of @ref NEGEMMLowpMatrixMultiplyKernel. Data type supported: same as @p vector_sum_col
+ * @param[out] output Output tensor containing the result of GEMMLowP. Data type supported: U8
+ * @param[in] num_mtx_a_cols Number of matrix A columns
+ * @param[in] a_offset Offset to be added to each element of the matrix A.
+ * @param[in] b_offset Offset to be added to each element of the matrix B.
+ * @param[in] c_offset Offset to be added to each element of the output matrix
+ * @param[in] c_mult_int Value to be multiplied to each entry of the result.
+ * @param[in] shift Number of bits to shift right the result.
+ */
+ void configure(const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *mm_result, ITensor *output, int32_t num_mtx_a_cols, int32_t a_offset, int32_t b_offset, int32_t c_offset,
+ int32_t c_mult_int, int32_t shift);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+ /** Template function to run the finalize kernel
+ *
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
+ */
+ template <bool add_a_offset, bool add_b_offset>
+ void finalize(const Window &window);
+ using FinalizeFunctionPtr = void (NEGEMMLowpFinalizeKernel::*)(const Window &window);
+
+ FinalizeFunctionPtr _func;
+ const ITensor *_vector_sum_col;
+ const ITensor *_vector_sum_row;
+ const ITensor *_mm_result;
+ ITensor *_output;
+ int32_t _a_offset;
+ int32_t _b_offset;
+ int32_t _c_offset;
+ int32_t _k_offset;
+ int32_t _c_mult_int;
+ int32_t _shift;
+ bool _slide_vector_sum_col;
+};
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_NEGEMMLOWPFINALIZEKERNEL_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
index f526d213cc..670274b8f3 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h
@@ -35,12 +35,9 @@ class ITensor;
* @note @ref NEGEMMLowpMatrixMultiplyKernel low precision matrix product kernel
* This kernel performs the following computation:
*
- * -# Convert a values from uint8 to int32 and add a_offset to each of them.
- * -# Convert b values from uint8 to int32 and add b_offset to each of them.
- * -# Compute the int32 matrix product of the resulting a * b.
- * -# Add output_offset to each entry of the result.
- * -# Multiply each entry of the result and round to the nearest integer
- * -# Clamp the resulting int32 values to the [0..255] range and cast to uint8.
+ * -# Convert a values from uint8 to int32
+ * -# Convert b values from uint8 to int32
+ * -# Compute the int32 matrix product of the resulting a * b and store the result as int32
*
*/
class NEGEMMLowpMatrixMultiplyKernel : public INEKernel
@@ -61,16 +58,12 @@ public:
* The input matrices @p input0 and @p input1 must be the output of the kernels: @ref NEGEMMInterleave4x4Kernel and @ref NEGEMMTranspose1xWKernel. These two
* kernels change the layout of the original matrices to be more cache-friendly.
*
- * @param[in] input0 Input tensor containing the interleaved Matrix A. Data type supported: U8
- * @param[in] input1 Input tensor containing the transposed Matrix B. Data type supported: same as @p input0
- * @param[out] output Output tensor to store the result of matrix multiplication, Data type supported: same as @p input0
- * @param[in] a_offset Offset to be added to each element of the matrix A.
- * @param[in] b_offset Offset to be added to each element of the matrix B.
- * @param[in] output_offset Offset to be added to each element of the output matrix
- * @param[in] output_mult_int Value to be multipied to each entry of the result.
- * @param[in] shift Number of bits to shift right the result.
+ * @param[in] input0 Input tensor containing the interleaved Matrix A. Data type supported: U8
+ * @param[in] input1 Input tensor containing the transposed Matrix B. Data type supported: same as @p input0
+ * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32
*/
- void configure(const ITensor *input0, const ITensor *input1, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift);
+ void configure(const ITensor *input0, const ITensor *input1, ITensor *output);
+
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
@@ -78,11 +71,7 @@ private:
const ITensor *_input0;
const ITensor *_input1;
ITensor *_output;
- int32_t _a_offset;
- int32_t _b_offset;
- int32_t _output_offset;
- int32_t _output_mult_int;
- int32_t _shift;
+ bool _slide_matrix_b;
};
} // namespace arm_compute
#endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYKERNEL_H__*/ \ No newline at end of file
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
new file mode 100644
index 0000000000..143e8b917b
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H__
+#define __ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Common interface for all NEON reduction kernels */
+class INEGEMMLowpReductionKernel : public INEKernel
+{
+public:
+ /** Constructor */
+ INEGEMMLowpReductionKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers)*/
+ INEGEMMLowpReductionKernel(const INEGEMMLowpReductionKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers)*/
+ INEGEMMLowpReductionKernel &operator=(const INEGEMMLowpReductionKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ INEGEMMLowpReductionKernel(INEGEMMLowpReductionKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ INEGEMMLowpReductionKernel &operator=(INEGEMMLowpReductionKernel &&) = default;
+
+public:
+ /** Initialise the kernel's input and output.
+ *
+ * @param[in] input Input tensor containing the interleaved or transposed matrix. Data type supported: U8
+ * @param[out] output Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32
+ * @param[in] k Number of matrix A columns (or matrix B rows)
+ * @param[in] is_reshaped True if the input tensor has been reshaped
+ */
+ virtual void configure(const ITensor *input, ITensor *output, int32_t k, bool is_reshaped) = 0;
+
+protected:
+ const ITensor *_input;
+ ITensor *_output;
+ int32_t _k;
+ bool _is_reshaped;
+};
+
+/** NEON kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
+ *
+ * @note This stage is needed to handle the offset of matrix product
+ * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
+ */
+class NEGEMMLowpMatrixAReductionKernel : public INEGEMMLowpReductionKernel
+{
+public:
+ /** Initialise the kernel's input and output.
+ *
+ * @note The input matrix @p mtx_a_interleaved4x4 must be the output of @ref NEGEMMInterleave4x4Kernel.
+ *
+ * @param[in] mtx_a_interleaved4x4 Input tensor containing the interleaved Matrix A. Data type supported: U8
+ * @param[out] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a_interleaved4x4. Data type supported: S32
+ * @param[in] num_mtx_a_cols Number of matrix A columns
+ * @param[in] is_interleaved4x4 True if the input tensor is interleaved4x4
+ */
+ void configure(const ITensor *mtx_a_interleaved4x4, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4) override;
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+};
+
+/** NEON kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
+ *
+ * @note This stage is needed to handle the offset of matrix product
+ * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
+ */
+class NEGEMMLowpMatrixBReductionKernel : public INEGEMMLowpReductionKernel
+{
+public:
+ /** Initialise the kernel's input and output.
+ *
+ * @note The input matrix @p mtx_b_transposed1xW must be the output of @ref NEGEMMTranspose1xWKernel kernel.
+ *
+ * @param[in] mtx_b_transposed1xW Input tensor containing the transposed Matrix B. Data type supported: Data type supported: U8
+ * @param[out] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b_transposed1xW. Data type supported: S32
+ * @param[in] num_mtx_b_rows Number of matrix B rows
+ * @param[in] is_transposed1xW True if the input tensor is transposed 1xW
+ */
+ void configure(const ITensor *mtx_b_transposed1xW, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW) override;
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+};
+} // namespace arm_compute
+
+#endif /* __ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H__ */
diff --git a/arm_compute/core/TensorShape.h b/arm_compute/core/TensorShape.h
index 3b395e74ce..ad102607e8 100644
--- a/arm_compute/core/TensorShape.h
+++ b/arm_compute/core/TensorShape.h
@@ -117,8 +117,8 @@ public:
/** Collapse the first n dimensions.
*
+ * @param[in] n Number of dimensions to collapse into @p first
* @param[in] first Dimensions into which the following @p n are collapsed.
- * @param[in] n Number of dimensions to collapse into @p first.
*/
void collapse(size_t n, size_t first = 0)
{
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index c614e50ff2..563ade288a 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -61,6 +61,7 @@
#include "arm_compute/runtime/NEON/functions/NEGEMM.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowp.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h"
#include "arm_compute/runtime/NEON/functions/NEGaussian3x3.h"
#include "arm_compute/runtime/NEON/functions/NEGaussian5x5.h"
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
index 84850dbe9d..865c729d39 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowp.h
@@ -25,17 +25,13 @@
#define __ARM_COMPUTE_NEGEMMLOWP_H__
#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
#include "arm_compute/runtime/IFunction.h"
-#include "arm_compute/runtime/Tensor.h"
-
-#include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
+#include "arm_compute/runtime/Tensor.h"
#include <memory>
@@ -43,12 +39,13 @@ namespace arm_compute
{
class ITensor;
-/** Basic function to execute GEMMLowp on NEON. This function calls the following NEON kernels:
-*
-* -# @ref NEGEMMInterleave4x4Kernel
-* -# @ref NEGEMMTranspose1xWKernel
-* -# @ref NEGEMMLowpMatrixMultiplyKernel
-*
+/** Basic function to execute GEMMLowp on NEON. This function calls the following NEON kernels/function:
+ *
+ * -# @ref NEGEMMLowpMatrixAReductionKernel
+ * -# @ref NEGEMMLowpMatrixBReductionKernel
+ * -# @ref NEGEMMLowpMatrixMultiplyCore
+ * -# @ref NEGEMMLowpFinalizeKernel
+ *
*/
class NEGEMMLowp : public IFunction
{
@@ -58,7 +55,7 @@ public:
/** Initialise the kernel's inputs, output
*
* @note GEMM_LOWP: low precision GEMM kernel
- * This kernel performs the following computation:
+ * This kernel performs the following computations:
*
* -# Convert a values from uint8 to int32 and add a_offset to each of them.
* -# Convert b values from uint8 to int32 and add b_offset to each of them.
@@ -72,35 +69,26 @@ public:
* @param[out] output Output tensor. Data type supported: same as @p a.
* @param[in] a_offset Offset to be added to each element of the matrix A.
* @param[in] b_offset Offset to be added to each element of the matrix B.
- * @param[in] output_offset Offset to be added to each element of the output matrix
+ * @param[in] c_offset Offset to be added to each element of the output matrix
* @param[in] output_mult_int Value to be multiplied to each element of the output matrix
* @param[in] shift Number of bits to shift right the result.
*/
- void configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift);
- /** Initialise the kernel's inputs, output
- *
- * @note GEMM_LOWP: low precision GEMM kernel
- * This kernel performs the following computation:
- *
- * @param[in] a First input tensor (Matrix A). Data type supported: U8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: U32.
- */
- void configure(const ITensor *a, const ITensor *b, ITensor *output);
+ void configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t output_mult_int, int32_t shift);
// Inherited methods overridden:
void run() override;
private:
- MemoryGroup _memory_group;
- NEGEMMInterleave4x4Kernel _interleave_kernel;
- NEGEMMTranspose1xWKernel _transpose_kernel;
- NEGEMMLowpMatrixMultiplyKernel _mm_kernel;
- std::unique_ptr<NEGEMMLowpAssemblyBaseKernel> _mm_optimised_kernel;
- NEGEMMInterleaveBlockedKernel _interleave_blocked;
- NEGEMMInterleaveBlockedKernel _interleave_blocked_transposed;
- Tensor _tmp_a;
- Tensor _tmp_b;
+ MemoryGroup _memory_group;
+ NEGEMMLowpMatrixMultiplyCore _mm_func;
+ NEGEMMLowpMatrixAReductionKernel _mtx_a_reduction_kernel;
+ NEGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel;
+ NEGEMMLowpFinalizeKernel _finalize_kernel;
+ Tensor _vector_sum_col;
+ Tensor _vector_sum_row;
+ Tensor _mm_output;
+ int32_t _a_offset;
+ int32_t _b_offset;
};
}
#endif /*__ARM_COMPUTE_NEGEMMLOWP_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
new file mode 100644
index 0000000000..4866e781f5
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__
+#define __ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/Tensor.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Basic function to execute GEMMLowpMatrixMultiplyCore on NEON. This function calls the following NEON kernels if the DOT product instruction is not available:
+ *
+ * -# @ref NEGEMMInterleave4x4Kernel
+ * -# @ref NEGEMMTranspose1xWKernel
+ * -# @ref NEGEMMLowpMatrixMultiplyKernel
+ *
+ * otherwise if the DOT product instruction is available:
+ *
+ * -# @ref NEGEMMInterleaveBlockedKernel
+ * -# @ref NEGEMMLowpAArch64V8P4Kernel
+ *
+*/
+class NEGEMMLowpMatrixMultiplyCore : public IFunction
+{
+public:
+ /** Constructor */
+ NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Initialise the kernel's inputs, output
+ *
+ * @note GEMM_LOWP: low precision GEMM kernel
+ * This kernel performs the following computations:
+ *
+ * -# Convert a values from uint8 to int32
+ * -# Convert b values from uint8 to int32
+ * -# Compute the int32 matrix product of the resulting a * b.
+ *
+ * @param[in] a First input tensor (Matrix A). Data type supported: U8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ */
+ void configure(const ITensor *a, const ITensor *b, ITensor *output);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ MemoryGroup _memory_group;
+ std::unique_ptr<INEKernel> _mm_kernel;
+ std::unique_ptr<INEKernel> _mtx_a_reshape_kernel;
+ std::unique_ptr<INEKernel> _mtx_b_reshape_kernel;
+ Tensor _tmp_a;
+ Tensor _tmp_b;
+};
+}
+#endif /*__ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H__ */
diff --git a/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp
new file mode 100644
index 0000000000..400c6d9d8c
--- /dev/null
+++ b/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+
+using namespace arm_compute;
+
+namespace arm_compute
+{
+class Coordinates;
+} // namespace arm_compute
+
+template <bool add_a_offset, bool add_b_offset>
+void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
+{
+ const int32x4_t c_offset_s32 = vdupq_n_s32(_c_offset);
+ const int32x4_t shift_s32 = vdupq_n_s32(-_shift);
+
+ Window collapsed_window = window.collapse_if_possible(IKernel::window(), Window::DimZ);
+
+ if(add_a_offset && add_b_offset) // true, true
+ {
+ // Set window for vector_sum_col
+ Window win_vector_sum_col(collapsed_window);
+ win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
+ if(!_slide_vector_sum_col)
+ {
+ win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
+ }
+
+ // Set window for vector_sum_row
+ Window win_vector_sum_row(collapsed_window);
+ win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
+ win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
+
+ Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col);
+ Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row);
+ Iterator mm_result(_mm_result, window);
+ Iterator out(_output, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ // Compute the leftover term due to a_offset.
+ int32x4x4_t a_offset_term_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 12)
+ }
+ };
+
+ a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], _a_offset);
+ a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], _a_offset);
+ a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], _a_offset);
+ a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], _a_offset);
+
+ // Compute the leftover term due to b_offset.
+ int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(vector_sum_row.ptr()) + id.y());
+ b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, _b_offset);
+
+ // Add a_offset_term_s32 and b_offset_term_s32
+ int32x4x4_t offset_term_s32 =
+ {
+ {
+ vdupq_n_s32(_k_offset),
+ vdupq_n_s32(_k_offset),
+ vdupq_n_s32(_k_offset),
+ vdupq_n_s32(_k_offset)
+ }
+ };
+
+ offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], vaddq_s32(a_offset_term_s32.val[0], b_offset_term_s32));
+ offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], vaddq_s32(a_offset_term_s32.val[1], b_offset_term_s32));
+ offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32));
+ offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32));
+
+ // Add c_offset
+ offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], c_offset_s32);
+ offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], c_offset_s32);
+ offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], c_offset_s32);
+ offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], c_offset_s32);
+
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 12)
+ }
+ };
+
+ // Add the offset terms to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]);
+
+ // Multiply by c_mult_int
+ in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int);
+ in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int);
+ in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int);
+ in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int);
+
+ // Shift final result (negative value shift right)
+ in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32);
+ in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32);
+ in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
+ in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
+
+ // Convert S32 to U16
+ const int16x8x2_t in_u16 =
+ {
+ {
+ vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
+ vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])),
+ }
+ };
+
+ // Convert U16 to U8
+ const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+
+ vst1q_u8(out.ptr(), out_u8);
+ },
+ vector_sum_col, vector_sum_row, mm_result, out);
+ }
+ else if(!add_a_offset && add_b_offset) // false, true
+ {
+ // Set window for vector_sum_row
+ Window win_vector_sum_row(collapsed_window);
+ win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
+ win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
+
+ Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row);
+ Iterator mm_result(_mm_result, window);
+ Iterator out(_output, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ // Compute the leftover term due to b_offset.
+ int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(vector_sum_row.ptr()) + id.y());
+ b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, _b_offset);
+
+ // Add b_offset_term_s32 and c_offset_term_s32
+ int32x4_t offset_term_s32 = vaddq_s32(b_offset_term_s32, c_offset_s32);
+
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 12)
+ }
+ };
+
+ // Add the offset terms to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32);
+
+ // Multiply by c_mult_int
+ in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int);
+ in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int);
+ in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int);
+ in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int);
+
+ // Shift final result (negative value shift right)
+ in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32);
+ in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32);
+ in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
+ in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
+
+ // Convert S32 to U16
+ const int16x8x2_t in_u16 =
+ {
+ {
+ vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
+ vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])),
+ }
+ };
+
+ // Convert U16 to U8
+ const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+
+ vst1q_u8(out.ptr(), out_u8);
+ },
+ vector_sum_row, mm_result, out);
+ }
+ else if(add_a_offset && !add_b_offset) // true, false
+ {
+ // Set window for vector_sum_col
+ Window win_vector_sum_col(collapsed_window);
+ win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
+ if(!_slide_vector_sum_col)
+ {
+ win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
+ }
+
+ Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col);
+ Iterator mm_result(_mm_result, window);
+ Iterator out(_output, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ // Compute the leftover term due to a_offset.
+ int32x4x4_t a_offset_term_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(vector_sum_col.ptr()) + 12)
+ }
+ };
+
+ a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], _a_offset);
+ a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], _a_offset);
+ a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], _a_offset);
+ a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], _a_offset);
+
+ // Add a_offset_term_s32 and b_offset_term_s32
+ int32x4x4_t offset_term_s32 =
+ {
+ {
+ vaddq_s32(c_offset_s32, a_offset_term_s32.val[0]),
+ vaddq_s32(c_offset_s32, a_offset_term_s32.val[1]),
+ vaddq_s32(c_offset_s32, a_offset_term_s32.val[2]),
+ vaddq_s32(c_offset_s32, a_offset_term_s32.val[3])
+ }
+ };
+
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 12)
+ }
+ };
+
+ // Add the offset terms to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]);
+
+ // Multiply by c_mult_int
+ in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int);
+ in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int);
+ in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int);
+ in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int);
+
+ // Shift final result (negative value shift right)
+ in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32);
+ in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32);
+ in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
+ in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
+
+ // Convert S32 to U16
+ const int16x8x2_t in_u16 =
+ {
+ {
+ vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
+ vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
+ }
+ };
+
+ // Convert U16 to U8
+ const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+
+ vst1q_u8(out.ptr(), out_u8);
+ },
+ vector_sum_col, mm_result, out);
+ }
+ else // false, false
+ {
+ Iterator mm_result(_mm_result, window);
+ Iterator out(_output, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 12)
+ }
+ };
+
+ // Add the offset terms to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], c_offset_s32);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], c_offset_s32);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], c_offset_s32);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], c_offset_s32);
+
+ // Multiply by c_mult_int
+ in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int);
+ in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int);
+ in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int);
+ in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int);
+
+ // Shift final result (negative value shift right)
+ in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32);
+ in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32);
+ in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
+ in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
+
+ // Convert S32 to U16
+ const int16x8x2_t in_u16 =
+ {
+ {
+ vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
+ vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
+ }
+ };
+
+ // Convert U16 to U8
+ const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+
+ vst1q_u8(out.ptr(), out_u8);
+ },
+ mm_result, out);
+ }
+}
+
+NEGEMMLowpFinalizeKernel::NEGEMMLowpFinalizeKernel()
+ : _func(nullptr), _vector_sum_col(nullptr), _vector_sum_row(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _c_offset(0), _k_offset(0), _c_mult_int(0), _shift(0),
+ _slide_vector_sum_col(true)
+{
+}
+
+void NEGEMMLowpFinalizeKernel::configure(const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *mm_result, ITensor *output, int32_t num_mtx_a_cols, int32_t a_offset,
+ int32_t b_offset,
+ int32_t c_offset, int32_t c_mult_int, int32_t shift)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
+
+ TensorShape mm_result_shape = mm_result->info()->tensor_shape();
+ TensorShape output_shape = output->info()->tensor_shape();
+
+ mm_result_shape.collapse(2);
+ output_shape.collapse(2);
+
+ ARM_COMPUTE_ERROR_ON_MSG(mm_result_shape[2] != output_shape[2], "mm_result tensor must have the same number of batches of output tensor");
+
+ // If a_offset == 0, vector_sum_col can be a nullptr
+ if(a_offset != 0)
+ {
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
+ ARM_COMPUTE_ERROR_ON(vector_sum_col->info()->dimension(0) != mm_result->info()->dimension(0));
+
+ TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape();
+ vector_sum_col_shape.collapse(1);
+
+ // Check if vector_sum_col_shape should be slidden or not
+ // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
+ // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
+ _slide_vector_sum_col = vector_sum_col_shape[1] != 1;
+ }
+
+ // If b_offset == 0, vector_sum_row can be a nullptr
+ if(b_offset != 0)
+ {
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
+ ARM_COMPUTE_ERROR_ON(vector_sum_row->info()->dimension(0) != mm_result->info()->dimension(1));
+
+ TensorShape vector_sum_row_shape = vector_sum_row->info()->tensor_shape();
+ vector_sum_row_shape.collapse(1);
+
+ ARM_COMPUTE_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[2], "mm_result tensor must have the same number of batches of output tensor");
+
+ if(a_offset != 0)
+ {
+ TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape();
+ vector_sum_col_shape.collapse(1);
+
+ ARM_COMPUTE_ERROR_ON_MSG(vector_sum_col_shape[1] != 1
+ && vector_sum_col_shape[1] != vector_sum_row_shape[1],
+ "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
+ }
+ }
+
+ _vector_sum_col = vector_sum_col;
+ _vector_sum_row = vector_sum_row;
+ _mm_result = mm_result;
+ _output = output;
+ _a_offset = a_offset;
+ _b_offset = b_offset;
+ _k_offset = a_offset * b_offset * num_mtx_a_cols;
+ _c_offset = c_offset;
+ _c_mult_int = c_mult_int;
+ _shift = shift;
+
+ constexpr unsigned int num_elems_processed_per_iteration = 16;
+
+ // Configure kernel window
+ Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
+
+ AccessWindowHorizontal mm_result_access(mm_result->info(), 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_result_access(output->info(), 0, num_elems_processed_per_iteration);
+
+ // Accordingly with a_offset and b_offset, we can have 4 cases:
+ // a_offset != 0 && b_offset != 0
+ // a_offset = 0 && b_offset != 0
+ // a_offset != 0 && b_offset = 0
+ // a_offset = 0 && b_offset = 0
+ if(a_offset != 0 && b_offset != 0)
+ {
+ // Set the function to use
+ _func = &NEGEMMLowpFinalizeKernel::finalize<true, true>;
+
+ AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0);
+ AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration);
+
+ update_window_and_padding(win,
+ vector_sum_col_access,
+ vector_sum_row_access,
+ mm_result_access,
+ output_result_access);
+ }
+ else if(a_offset == 0 && b_offset != 0)
+ {
+ // Set the function to use
+ _func = &NEGEMMLowpFinalizeKernel::finalize<false, true>;
+
+ AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0);
+
+ update_window_and_padding(win,
+ vector_sum_row_access,
+ mm_result_access,
+ output_result_access);
+ }
+ else if(a_offset != 0 && b_offset == 0)
+ {
+ // Set the function to use
+ _func = &NEGEMMLowpFinalizeKernel::finalize<true, false>;
+
+ AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration);
+
+ update_window_and_padding(win,
+ vector_sum_col_access,
+ mm_result_access,
+ output_result_access);
+ }
+ else
+ {
+ // Set the function to use
+ _func = &NEGEMMLowpFinalizeKernel::finalize<false, false>;
+
+ update_window_and_padding(win,
+ mm_result_access,
+ output_result_access);
+ }
+
+ output_result_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+
+ INEKernel::configure(win);
+}
+
+void NEGEMMLowpFinalizeKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+ (this->*_func)(window);
+}
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
index cbba4461a2..3e614a8bfc 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2017 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,6 +23,7 @@
*/
#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
+#include "arm_compute/core/AccessWindowStatic.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
@@ -45,35 +46,43 @@ class Coordinates;
} // namespace arm_compute
NEGEMMLowpMatrixMultiplyKernel::NEGEMMLowpMatrixMultiplyKernel()
- : _input0(nullptr), _input1(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _output_offset(0), _output_mult_int(0), _shift(0)
+ : _input0(nullptr), _input1(nullptr), _output(nullptr), _slide_matrix_b(true)
{
}
-void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output,
- int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift)
+void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1, output);
-
- _input0 = input0;
- _input1 = input1;
- _output = output;
- _a_offset = a_offset;
- _b_offset = b_offset;
- _output_offset = output_offset;
- _output_mult_int = output_mult_int;
- _shift = shift;
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
+
+ // Check if matrix B should be slidden or not
+ // Don't slide matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
+ // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
+ TensorShape in0_shape = input0->info()->tensor_shape();
+ TensorShape in1_shape = input1->info()->tensor_shape();
+ TensorShape out_shape = output->info()->tensor_shape();
+
+ in0_shape.collapse(2);
+ in1_shape.collapse(2);
+ out_shape.collapse(2);
+
+ ARM_COMPUTE_ERROR_ON_MSG(in0_shape[2] != out_shape[2], "Output tensor must have the same number of batches of input0 tensor");
+ ARM_COMPUTE_ERROR_ON_MSG(in1_shape[2] != 1 && in0_shape[2] != in1_shape[2], "Input1 tensor must have the same number of batches of input0 or the number of batches must be set to 1");
+
+ _input0 = input0;
+ _input1 = input1;
+ _output = output;
+ _slide_matrix_b = in1_shape[2] != 1;
constexpr unsigned int num_elems_processed_per_iteration_x = 16;
constexpr unsigned int num_elems_processed_per_iteration_y = 4;
Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
- AccessWindowHorizontal in0_access(input0->info(), 0, num_elems_processed_per_iteration_x);
+ AccessWindowStatic in0_access(input0->info(), 0, 0, ceil_to_multiple(input0->info()->dimension(0), 8), input0->info()->dimension(1));
AccessWindowHorizontal in1_access(input1->info(), 0, num_elems_processed_per_iteration_x);
+ AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
update_window_and_padding(win, in0_access, in1_access, output_access);
@@ -88,337 +97,145 @@ void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
const size_t in_b_stride = _input1->info()->strides_in_bytes()[1];
- const size_t out_stride = _output->info()->strides_in_bytes()[1];
+ const size_t out_stride = _output->info()->strides_in_bytes()[1] / _output->info()->element_size();
- /* Set step_x and step_y for matrix A. Scale by a factor of 4 the Y range as the input interleaved matrix A has 4 times less the rows of the output matrix */
+ // Set step_x and step_y for matrix A. Scale by a factor of 4 the Y range as the input interleaved matrix A has 4 times less the rows of the output matrix
Window win_a(window);
win_a.set(Window::DimX, Window::Dimension(0, 0, 0));
- win_a.set(Window::DimY, Window::Dimension(window.y().start() >> 2, window.y().end() >> 2, 1));
+ win_a.set(Window::DimY, Window::Dimension(window.y().start() / 4, window.y().end() / 4, 1));
- /* Set step_x and step_y for matrix B. Scale by a factor of 16 the X range as the input transposed matrix A has 16 times less the cols of the output matrix */
- Window win_b(window);
- win_b.set(Window::DimX, Window::Dimension(window.x().start() >> 4, window.x().end() >> 4, in_b_stride));
+ // Set step_x and step_y for matrix B. Scale by a factor of 16 the X range as the input transposed matrix A has 16 times less the columns of the output matrix
+ Window win_b;
+ // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
+ // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
+ if(_slide_matrix_b)
+ {
+ win_b = window;
+ }
+ win_b.set(Window::DimX, Window::Dimension(window.x().start() / 16, window.x().end() / 16, in_b_stride));
win_b.set(Window::DimY, Window::Dimension(0, 0, 0));
- /* The step x and step y for the output matrix has been already set using in configure() */
+ // The step x and step y for the output matrix has been already set using in configure()
Iterator ina(_input0, win_a);
Iterator inb(_input1, win_b);
Iterator out(_output, window);
- const int32x4_t voffset_a = vdupq_n_s32(_a_offset);
- const int32x4_t voffset_b = vdupq_n_s32(_b_offset);
- const int32x4_t vshiftr = vdupq_n_s32(-_shift);
-
const int width_b = _input1->info()->dimension(0);
// The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW
// The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration
// All the values needed for computing a single 4x4 block will be read from consecutive memory positions
- execute_window_loop(window, [&](const Coordinates &)
+ execute_window_loop(window, [&](const Coordinates & id)
{
const uint8_t *mtx_a0 = ina.ptr();
const uint8_t *mtx_b0 = inb.ptr();
+ // Note: Since the input are all positives, we can use uint32_t
// Accumulators for the block 0
- int32x4x4_t c0 =
+ uint32x4x4_t c0 =
{
{
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
// Accumulators for the block 1
- int32x4x4_t c1 =
+ uint32x4x4_t c1 =
{
{
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
// Accumulators for the block 2
- int32x4x4_t c2 =
+ uint32x4x4_t c2 =
{
{
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
// Accumulators for the block 3
- int32x4x4_t c3 =
+ uint32x4x4_t c3 =
{
{
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset),
- vdupq_n_s32(_output_offset)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
- int k = 0;
- // This for loop performs 4 accumulations per iteration
- for(; k <= (width_b - 64); k += 64, mtx_a0 += 16, mtx_b0 += 64)
+ for(int k = 0; k < width_b; k += 16, mtx_a0 += 4, mtx_b0 += 16)
{
- const uint8x8_t p00 = vld1_u8(mtx_a0 + 0);
- const uint8x8_t p01 = vld1_u8(mtx_a0 + 8);
- const uint8x8_t q00l = vld1_u8(mtx_b0 + 0);
- const uint8x8_t q00h = vld1_u8(mtx_b0 + 8);
- const uint8x8_t q01l = vld1_u8(mtx_b0 + 16);
- const uint8x8_t q01h = vld1_u8(mtx_b0 + 24);
- const uint8x8_t q02l = vld1_u8(mtx_b0 + 32);
- const uint8x8_t q02h = vld1_u8(mtx_b0 + 40);
- const uint8x8_t q03l = vld1_u8(mtx_b0 + 48);
- const uint8x8_t q03h = vld1_u8(mtx_b0 + 56);
-
- const int32x4_t ia0l = vaddw_s16(voffset_a, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(p00))));
- const int32x4_t ia0h = vaddw_s16(voffset_a, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(p00))));
- const int32x4_t ia1l = vaddw_s16(voffset_a, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(p01))));
- const int32x4_t ia1h = vaddw_s16(voffset_a, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(p01))));
-
- const int32x2x4_t ia0 =
- {
- {
- vget_low_s32(ia0l),
- vget_high_s32(ia0l),
- vget_low_s32(ia0h),
- vget_high_s32(ia0h)
- }
- };
-
- const int32x2x4_t ia1 =
- {
- {
- vget_low_s32(ia1l),
- vget_high_s32(ia1l),
- vget_low_s32(ia1h),
- vget_high_s32(ia1h)
- }
- };
-
- const int32x4x4_t ib0 =
- {
- {
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q00l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q00l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q00h)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q00h))))
- }
- };
+ const uint8x8_t a00_u8 = vld1_u8(mtx_a0);
+ const uint8x16_t b00_u8 = vld1q_u8(mtx_b0);
- const int32x4x4_t ib1 =
- {
- {
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q01l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q01l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q01h)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q01h))))
- }
- };
+ // Convert a00_u8 to uint16_t and get the lower part
+ const uint16x4_t a00_u16 = vget_low_u16(vmovl_u8(a00_u8));
- const int32x4x4_t ib2 =
+ // Convert b00_u8 to int16_t
+ const uint16x4x4_t b00_u16 =
{
{
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q02l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q02l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q02h)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q02h))))
- }
- };
-
- const int32x4x4_t ib3 =
- {
- {
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q03l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q03l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q03h)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q03h))))
- }
- };
-
- // 4x4 block 0 - Accumulation 0
- c0.val[0] = vmlaq_lane_s32(c0.val[0], ib0.val[0], ia0.val[0], 0);
- c0.val[1] = vmlaq_lane_s32(c0.val[1], ib0.val[0], ia0.val[0], 1);
- c0.val[2] = vmlaq_lane_s32(c0.val[2], ib0.val[0], ia0.val[1], 0);
- c0.val[3] = vmlaq_lane_s32(c0.val[3], ib0.val[0], ia0.val[1], 1);
- // 4x4 block 0 - Accumulation 1
- c0.val[0] = vmlaq_lane_s32(c0.val[0], ib1.val[0], ia0.val[2], 0);
- c0.val[1] = vmlaq_lane_s32(c0.val[1], ib1.val[0], ia0.val[2], 1);
- c0.val[2] = vmlaq_lane_s32(c0.val[2], ib1.val[0], ia0.val[3], 0);
- c0.val[3] = vmlaq_lane_s32(c0.val[3], ib1.val[0], ia0.val[3], 1);
- // 4x4 block 0 - Accumulation 2
- c0.val[0] = vmlaq_lane_s32(c0.val[0], ib2.val[0], ia1.val[0], 0);
- c0.val[1] = vmlaq_lane_s32(c0.val[1], ib2.val[0], ia1.val[0], 1);
- c0.val[2] = vmlaq_lane_s32(c0.val[2], ib2.val[0], ia1.val[1], 0);
- c0.val[3] = vmlaq_lane_s32(c0.val[3], ib2.val[0], ia1.val[1], 1);
- // 4x4 block 0 - Accumulation 3
- c0.val[0] = vmlaq_lane_s32(c0.val[0], ib3.val[0], ia1.val[2], 0);
- c0.val[1] = vmlaq_lane_s32(c0.val[1], ib3.val[0], ia1.val[2], 1);
- c0.val[2] = vmlaq_lane_s32(c0.val[2], ib3.val[0], ia1.val[3], 0);
- c0.val[3] = vmlaq_lane_s32(c0.val[3], ib3.val[0], ia1.val[3], 1);
-
- // 4x4 block 1 - Accumulation 0
- c1.val[0] = vmlaq_lane_s32(c1.val[0], ib0.val[1], ia0.val[0], 0);
- c1.val[1] = vmlaq_lane_s32(c1.val[1], ib0.val[1], ia0.val[0], 1);
- c1.val[2] = vmlaq_lane_s32(c1.val[2], ib0.val[1], ia0.val[1], 0);
- c1.val[3] = vmlaq_lane_s32(c1.val[3], ib0.val[1], ia0.val[1], 1);
- // 4x4 block 1 - Accumulation 1
- c1.val[0] = vmlaq_lane_s32(c1.val[0], ib1.val[1], ia0.val[2], 0);
- c1.val[1] = vmlaq_lane_s32(c1.val[1], ib1.val[1], ia0.val[2], 1);
- c1.val[2] = vmlaq_lane_s32(c1.val[2], ib1.val[1], ia0.val[3], 0);
- c1.val[3] = vmlaq_lane_s32(c1.val[3], ib1.val[1], ia0.val[3], 1);
- // 4x4 block 1 - Accumulation 2
- c1.val[0] = vmlaq_lane_s32(c1.val[0], ib2.val[1], ia1.val[0], 0);
- c1.val[1] = vmlaq_lane_s32(c1.val[1], ib2.val[1], ia1.val[0], 1);
- c1.val[2] = vmlaq_lane_s32(c1.val[2], ib2.val[1], ia1.val[1], 0);
- c1.val[3] = vmlaq_lane_s32(c1.val[3], ib2.val[1], ia1.val[1], 1);
- // 4x4 block 1 - Accumulation 3
- c1.val[0] = vmlaq_lane_s32(c1.val[0], ib3.val[1], ia1.val[2], 0);
- c1.val[1] = vmlaq_lane_s32(c1.val[1], ib3.val[1], ia1.val[2], 1);
- c1.val[2] = vmlaq_lane_s32(c1.val[2], ib3.val[1], ia1.val[3], 0);
- c1.val[3] = vmlaq_lane_s32(c1.val[3], ib3.val[1], ia1.val[3], 1);
-
- // 4x4 block 2 - Accumulation 0
- c2.val[0] = vmlaq_lane_s32(c2.val[0], ib0.val[2], ia0.val[0], 0);
- c2.val[1] = vmlaq_lane_s32(c2.val[1], ib0.val[2], ia0.val[0], 1);
- c2.val[2] = vmlaq_lane_s32(c2.val[2], ib0.val[2], ia0.val[1], 0);
- c2.val[3] = vmlaq_lane_s32(c2.val[3], ib0.val[2], ia0.val[1], 1);
- // 4x4 block 2 - Accumulation 1
- c2.val[0] = vmlaq_lane_s32(c2.val[0], ib1.val[2], ia0.val[2], 0);
- c2.val[1] = vmlaq_lane_s32(c2.val[1], ib1.val[2], ia0.val[2], 1);
- c2.val[2] = vmlaq_lane_s32(c2.val[2], ib1.val[2], ia0.val[3], 0);
- c2.val[3] = vmlaq_lane_s32(c2.val[3], ib1.val[2], ia0.val[3], 1);
- // 4x4 block 2 - Accumulation 2
- c2.val[0] = vmlaq_lane_s32(c2.val[0], ib2.val[2], ia1.val[0], 0);
- c2.val[1] = vmlaq_lane_s32(c2.val[1], ib2.val[2], ia1.val[0], 1);
- c2.val[2] = vmlaq_lane_s32(c2.val[2], ib2.val[2], ia1.val[1], 0);
- c2.val[3] = vmlaq_lane_s32(c2.val[3], ib2.val[2], ia1.val[1], 1);
- // 4x4 block 2 - Accumulation 3
- c2.val[0] = vmlaq_lane_s32(c2.val[0], ib3.val[2], ia1.val[2], 0);
- c2.val[1] = vmlaq_lane_s32(c2.val[1], ib3.val[2], ia1.val[2], 1);
- c2.val[2] = vmlaq_lane_s32(c2.val[2], ib3.val[2], ia1.val[3], 0);
- c2.val[3] = vmlaq_lane_s32(c2.val[3], ib3.val[2], ia1.val[3], 1);
-
- // 4x4 block 3 - Accumulation 0
- c3.val[0] = vmlaq_lane_s32(c3.val[0], ib0.val[3], ia0.val[0], 0);
- c3.val[1] = vmlaq_lane_s32(c3.val[1], ib0.val[3], ia0.val[0], 1);
- c3.val[2] = vmlaq_lane_s32(c3.val[2], ib0.val[3], ia0.val[1], 0);
- c3.val[3] = vmlaq_lane_s32(c3.val[3], ib0.val[3], ia0.val[1], 1);
- // 4x4 block 3 - Accumulation 1
- c3.val[0] = vmlaq_lane_s32(c3.val[0], ib1.val[3], ia0.val[2], 0);
- c3.val[1] = vmlaq_lane_s32(c3.val[1], ib1.val[3], ia0.val[2], 1);
- c3.val[2] = vmlaq_lane_s32(c3.val[2], ib1.val[3], ia0.val[3], 0);
- c3.val[3] = vmlaq_lane_s32(c3.val[3], ib1.val[3], ia0.val[3], 1);
- // 4x4 block 3 - Accumulation 2
- c3.val[0] = vmlaq_lane_s32(c3.val[0], ib2.val[3], ia1.val[0], 0);
- c3.val[1] = vmlaq_lane_s32(c3.val[1], ib2.val[3], ia1.val[0], 1);
- c3.val[2] = vmlaq_lane_s32(c3.val[2], ib2.val[3], ia1.val[1], 0);
- c3.val[3] = vmlaq_lane_s32(c3.val[3], ib2.val[3], ia1.val[1], 1);
- // 4x4 block 3 - Accumulation 3
- c3.val[0] = vmlaq_lane_s32(c3.val[0], ib3.val[3], ia1.val[2], 0);
- c3.val[1] = vmlaq_lane_s32(c3.val[1], ib3.val[3], ia1.val[2], 1);
- c3.val[2] = vmlaq_lane_s32(c3.val[2], ib3.val[3], ia1.val[3], 0);
- c3.val[3] = vmlaq_lane_s32(c3.val[3], ib3.val[3], ia1.val[3], 1);
- }
-
- // This for loop handles the left-over accumulations
- for(; k < width_b; k += 16, mtx_a0 += 4, mtx_b0 += 16)
- {
- const uint8x8_t p00 = vld1_u8(mtx_a0);
- const uint8x8_t q00l = vld1_u8(mtx_b0);
- const uint8x8_t q00h = vld1_u8(mtx_b0 + 8);
-
- const int32x4_t ia0 = vaddw_s16(voffset_a, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(p00))));
-
- const int32x2x2_t ia =
- {
- {
- vget_low_s32(ia0),
- vget_high_s32(ia0)
- }
- };
-
- const int32x4x4_t ib0 =
- {
- {
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q00l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q00l)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(q00h)))),
- vaddw_s16(voffset_b, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(q00h))))
+ vget_low_u16(vmovl_u8(vget_low_u8(b00_u8))),
+ vget_high_u16(vmovl_u8(vget_low_u8(b00_u8))),
+ vget_low_u16(vmovl_u8(vget_high_u8(b00_u8))),
+ vget_high_u16(vmovl_u8(vget_high_u8(b00_u8)))
}
};
// 4x4 block 0
- c0.val[0] = vmlaq_lane_s32(c0.val[0], ib0.val[0], ia.val[0], 0);
- c0.val[1] = vmlaq_lane_s32(c0.val[1], ib0.val[0], ia.val[0], 1);
- c0.val[2] = vmlaq_lane_s32(c0.val[2], ib0.val[0], ia.val[1], 0);
- c0.val[3] = vmlaq_lane_s32(c0.val[3], ib0.val[0], ia.val[1], 1);
+ c0.val[0] = vmlal_lane_u16(c0.val[0], b00_u16.val[0], a00_u16, 0);
+ c0.val[1] = vmlal_lane_u16(c0.val[1], b00_u16.val[1], a00_u16, 0);
+ c0.val[2] = vmlal_lane_u16(c0.val[2], b00_u16.val[2], a00_u16, 0);
+ c0.val[3] = vmlal_lane_u16(c0.val[3], b00_u16.val[3], a00_u16, 0);
// 4x4 block 1
- c1.val[0] = vmlaq_lane_s32(c1.val[0], ib0.val[1], ia.val[0], 0);
- c1.val[1] = vmlaq_lane_s32(c1.val[1], ib0.val[1], ia.val[0], 1);
- c1.val[2] = vmlaq_lane_s32(c1.val[2], ib0.val[1], ia.val[1], 0);
- c1.val[3] = vmlaq_lane_s32(c1.val[3], ib0.val[1], ia.val[1], 1);
+ c1.val[0] = vmlal_lane_u16(c1.val[0], b00_u16.val[0], a00_u16, 1);
+ c1.val[1] = vmlal_lane_u16(c1.val[1], b00_u16.val[1], a00_u16, 1);
+ c1.val[2] = vmlal_lane_u16(c1.val[2], b00_u16.val[2], a00_u16, 1);
+ c1.val[3] = vmlal_lane_u16(c1.val[3], b00_u16.val[3], a00_u16, 1);
// 4x4 block 2
- c2.val[0] = vmlaq_lane_s32(c2.val[0], ib0.val[2], ia.val[0], 0);
- c2.val[1] = vmlaq_lane_s32(c2.val[1], ib0.val[2], ia.val[0], 1);
- c2.val[2] = vmlaq_lane_s32(c2.val[2], ib0.val[2], ia.val[1], 0);
- c2.val[3] = vmlaq_lane_s32(c2.val[3], ib0.val[2], ia.val[1], 1);
+ c2.val[0] = vmlal_lane_u16(c2.val[0], b00_u16.val[0], a00_u16, 2);
+ c2.val[1] = vmlal_lane_u16(c2.val[1], b00_u16.val[1], a00_u16, 2);
+ c2.val[2] = vmlal_lane_u16(c2.val[2], b00_u16.val[2], a00_u16, 2);
+ c2.val[3] = vmlal_lane_u16(c2.val[3], b00_u16.val[3], a00_u16, 2);
// 4x4 block 3
- c3.val[0] = vmlaq_lane_s32(c3.val[0], ib0.val[3], ia.val[0], 0);
- c3.val[1] = vmlaq_lane_s32(c3.val[1], ib0.val[3], ia.val[0], 1);
- c3.val[2] = vmlaq_lane_s32(c3.val[2], ib0.val[3], ia.val[1], 0);
- c3.val[3] = vmlaq_lane_s32(c3.val[3], ib0.val[3], ia.val[1], 1);
+ c3.val[0] = vmlal_lane_u16(c3.val[0], b00_u16.val[0], a00_u16, 3);
+ c3.val[1] = vmlal_lane_u16(c3.val[1], b00_u16.val[1], a00_u16, 3);
+ c3.val[2] = vmlal_lane_u16(c3.val[2], b00_u16.val[2], a00_u16, 3);
+ c3.val[3] = vmlal_lane_u16(c3.val[3], b00_u16.val[3], a00_u16, 3);
}
- c0.val[0] = vshlq_s32(vmulq_n_s32(c0.val[0], _output_mult_int), vshiftr);
- c0.val[1] = vshlq_s32(vmulq_n_s32(c0.val[1], _output_mult_int), vshiftr);
- c0.val[2] = vshlq_s32(vmulq_n_s32(c0.val[2], _output_mult_int), vshiftr);
- c0.val[3] = vshlq_s32(vmulq_n_s32(c0.val[3], _output_mult_int), vshiftr);
-
- c1.val[0] = vshlq_s32(vmulq_n_s32(c1.val[0], _output_mult_int), vshiftr);
- c1.val[1] = vshlq_s32(vmulq_n_s32(c1.val[1], _output_mult_int), vshiftr);
- c1.val[2] = vshlq_s32(vmulq_n_s32(c1.val[2], _output_mult_int), vshiftr);
- c1.val[3] = vshlq_s32(vmulq_n_s32(c1.val[3], _output_mult_int), vshiftr);
-
- c2.val[0] = vshlq_s32(vmulq_n_s32(c2.val[0], _output_mult_int), vshiftr);
- c2.val[1] = vshlq_s32(vmulq_n_s32(c2.val[1], _output_mult_int), vshiftr);
- c2.val[2] = vshlq_s32(vmulq_n_s32(c2.val[2], _output_mult_int), vshiftr);
- c2.val[3] = vshlq_s32(vmulq_n_s32(c2.val[3], _output_mult_int), vshiftr);
-
- c3.val[0] = vshlq_s32(vmulq_n_s32(c3.val[0], _output_mult_int), vshiftr);
- c3.val[1] = vshlq_s32(vmulq_n_s32(c3.val[1], _output_mult_int), vshiftr);
- c3.val[2] = vshlq_s32(vmulq_n_s32(c3.val[2], _output_mult_int), vshiftr);
- c3.val[3] = vshlq_s32(vmulq_n_s32(c3.val[3], _output_mult_int), vshiftr);
-
- const uint8x16x4_t r =
- {
- {
- vcombine_u8(vqmovun_s16(vcombine_s16(vqmovn_s32(c0.val[0]), vqmovn_s32(c1.val[0]))),
- vqmovun_s16(vcombine_s16(vqmovn_s32(c2.val[0]), vqmovn_s32(c3.val[0])))),
- vcombine_u8(vqmovun_s16(vcombine_s16(vqmovn_s32(c0.val[1]), vqmovn_s32(c1.val[1]))),
- vqmovun_s16(vcombine_s16(vqmovn_s32(c2.val[1]), vqmovn_s32(c3.val[1])))),
- vcombine_u8(vqmovun_s16(vcombine_s16(vqmovn_s32(c0.val[2]), vqmovn_s32(c1.val[2]))),
- vqmovun_s16(vcombine_s16(vqmovn_s32(c2.val[2]), vqmovn_s32(c3.val[2])))),
- vcombine_u8(vqmovun_s16(vcombine_s16(vqmovn_s32(c0.val[3]), vqmovn_s32(c1.val[3]))),
- vqmovun_s16(vcombine_s16(vqmovn_s32(c2.val[3]), vqmovn_s32(c3.val[3]))))
- }
- };
-
- uint8_t *const mtx_out = out.ptr();
- vst1q_u8(mtx_out + 0 * out_stride, r.val[0]);
- vst1q_u8(mtx_out + 1 * out_stride, r.val[1]);
- vst1q_u8(mtx_out + 2 * out_stride, r.val[2]);
- vst1q_u8(mtx_out + 3 * out_stride, r.val[3]);
+ auto mtx_out = reinterpret_cast<int32_t *>(out.ptr());
+ vst1q_s32(mtx_out + 0 * out_stride + 0, vreinterpretq_s32_u32(c0.val[0]));
+ vst1q_s32(mtx_out + 0 * out_stride + 4, vreinterpretq_s32_u32(c0.val[1]));
+ vst1q_s32(mtx_out + 0 * out_stride + 8, vreinterpretq_s32_u32(c0.val[2]));
+ vst1q_s32(mtx_out + 0 * out_stride + 12, vreinterpretq_s32_u32(c0.val[3]));
+ vst1q_s32(mtx_out + 1 * out_stride + 0, vreinterpretq_s32_u32(c1.val[0]));
+ vst1q_s32(mtx_out + 1 * out_stride + 4, vreinterpretq_s32_u32(c1.val[1]));
+ vst1q_s32(mtx_out + 1 * out_stride + 8, vreinterpretq_s32_u32(c1.val[2]));
+ vst1q_s32(mtx_out + 1 * out_stride + 12, vreinterpretq_s32_u32(c1.val[3]));
+ vst1q_s32(mtx_out + 2 * out_stride + 0, vreinterpretq_s32_u32(c2.val[0]));
+ vst1q_s32(mtx_out + 2 * out_stride + 4, vreinterpretq_s32_u32(c2.val[1]));
+ vst1q_s32(mtx_out + 2 * out_stride + 8, vreinterpretq_s32_u32(c2.val[2]));
+ vst1q_s32(mtx_out + 2 * out_stride + 12, vreinterpretq_s32_u32(c2.val[3]));
+ vst1q_s32(mtx_out + 3 * out_stride + 0, vreinterpretq_s32_u32(c3.val[0]));
+ vst1q_s32(mtx_out + 3 * out_stride + 4, vreinterpretq_s32_u32(c3.val[1]));
+ vst1q_s32(mtx_out + 3 * out_stride + 8, vreinterpretq_s32_u32(c3.val[2]));
+ vst1q_s32(mtx_out + 3 * out_stride + 12, vreinterpretq_s32_u32(c3.val[3]));
},
ina, inb, out);
}
diff --git a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
new file mode 100644
index 0000000000..3f841bbf59
--- /dev/null
+++ b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+
+using namespace arm_compute;
+
+namespace arm_compute
+{
+class Coordinates;
+} // namespace arm_compute
+
+INEGEMMLowpReductionKernel::INEGEMMLowpReductionKernel()
+ : _input(), _output(), _k(0), _is_reshaped(false)
+{
+}
+
+void NEGEMMLowpMatrixAReductionKernel::configure(const ITensor *mtx_a_interleaved4x4, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_a_interleaved4x4, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
+
+ _input = mtx_a_interleaved4x4;
+ _output = vector_sum_row;
+ _k = num_mtx_a_cols;
+ _is_reshaped = is_interleaved4x4;
+
+ const unsigned int num_elems_processed_per_iteration = _is_reshaped ? 4 : 1;
+
+ // Configure kernel window
+ Window win = calculate_max_window(*_output->info(), Steps(num_elems_processed_per_iteration));
+
+ AccessWindowStatic input_access(_input->info(), 0, 0, ceil_to_multiple(_input->info()->dimension(0), 16), _input->info()->dimension(1));
+ AccessWindowHorizontal output_access(_output->info(), 0, num_elems_processed_per_iteration);
+
+ update_window_and_padding(win,
+ input_access,
+ output_access);
+
+ output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), _output->info()->tensor_shape()));
+
+ INEKernel::configure(win);
+}
+
+void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+ Window collapsed_window = window.collapse_if_possible(IKernel::window(), Window::DimY);
+
+ Window win_input(collapsed_window);
+ win_input.set(Window::DimX, Window::Dimension(0, 0, 0));
+ win_input.set(Window::DimY, Window::Dimension(0, 0, 0));
+ win_input.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ Iterator in(_input, win_input);
+ Iterator out(_output, collapsed_window);
+
+ if(_is_reshaped)
+ {
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ // Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
+ uint32x4_t sum_row = vdupq_n_u32(0);
+
+ const uint8_t *matrix_a = in.ptr() + (id.x() / 4) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2];
+
+#if __arm__
+ asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a));
+#endif /* __arm__ */
+
+ int i = 0;
+ // This for loop performs 4 accumulations
+ for(; i <= (_k - 4); i += 4)
+ {
+ const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i * 4);
+
+ // Convert U8 to U16
+ uint16x4x4_t a0_u16 =
+ {
+ {
+ vget_low_u16(vmovl_u8(vget_low_u8(a0_u8))),
+ vget_high_u16(vmovl_u8(vget_low_u8(a0_u8))),
+ vget_low_u16(vmovl_u8(vget_high_u8(a0_u8))),
+ vget_high_u16(vmovl_u8(vget_high_u8(a0_u8)))
+ }
+ };
+
+ // Accumulate to U16
+ a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[1]);
+ a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[2]);
+ a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[3]);
+
+ // Accumulate to U32
+ sum_row = vaddw_u16(sum_row, a0_u16.val[0]);
+ }
+
+ // This for loop performs the leftover accumulations
+ for(; i < _k; ++i)
+ {
+ const uint8x8_t a0_u8 = vld1_u8(matrix_a + i * 4);
+
+ // Convert U8 to U16
+ const uint16x4_t a0_u16 = vget_low_u16(vmovl_u8(a0_u8));
+
+ // Accumulate to U32
+ sum_row = vaddw_u16(sum_row, a0_u16);
+ }
+
+ auto vector_sum_row = reinterpret_cast<int32_t *>(out.ptr());
+
+ vst1q_s32(vector_sum_row, vreinterpretq_s32_u32(sum_row));
+ },
+ in, out);
+ }
+ else // it is not reshaped
+ {
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ // Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
+ uint32x4_t sum_row_s32 = vdupq_n_u32(0);
+ unsigned int sum_row = 0;
+
+ const uint8_t *matrix_a = in.ptr() + id.x() * _input->info()->strides_in_bytes()[1] + +id.y() * _input->info()->strides_in_bytes()[2];
+
+#if __arm__
+ asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a));
+#endif /* __arm__ */
+
+ int i = 0;
+ // This for loop performs 16 accumulations
+ for(; i <= (_k - 16); i += 16)
+ {
+ const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i);
+
+ // Partial accumulations in U16
+ const uint16x8_t tmp_sum0 = vaddl_u8(vget_low_u8(a0_u8), vget_high_u8(a0_u8));
+
+ // Accumulate to U32
+ sum_row_s32 = vaddq_u32(sum_row_s32, vpaddlq_u16(tmp_sum0));
+ }
+
+ // This for loop performs the leftover accumulations
+ for(; i < _k; ++i)
+ {
+ sum_row += static_cast<unsigned int>(matrix_a[i]);
+ }
+
+#if defined(__aarch64__)
+ // Reduction operation available on 64 bit architectures only
+ sum_row += vaddvq_u32(sum_row_s32);
+#else // __aarch64__
+ uint32x2_t tmp = vpadd_u32(vget_high_u32(sum_row_s32), vget_low_u32(sum_row_s32));
+ tmp = vpadd_u32(tmp, tmp);
+
+ sum_row += vget_lane_u32(tmp, 0);
+#endif // __aarch64__
+
+ *(reinterpret_cast<int *>(out.ptr())) = static_cast<int>(sum_row);
+ },
+ in, out);
+ }
+}
+
+void NEGEMMLowpMatrixBReductionKernel::configure(const ITensor *mtx_b_transposed1xW, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_b_transposed1xW, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
+
+ _input = mtx_b_transposed1xW;
+ _output = vector_sum_col;
+ _k = num_mtx_b_rows;
+ _is_reshaped = is_transposed1xW;
+
+ constexpr unsigned int num_elems_processed_per_iteration = 16;
+
+ // Configure kernel window
+ Window win = calculate_max_window(*vector_sum_col->info(), Steps(num_elems_processed_per_iteration));
+
+ AccessWindowStatic input_access(_input->info(), 0, 0, ceil_to_multiple(_input->info()->dimension(0), 16), _input->info()->dimension(1));
+ AccessWindowHorizontal output_access(_output->info(), 0, num_elems_processed_per_iteration);
+
+ update_window_and_padding(win,
+ input_access,
+ output_access);
+
+ output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), _output->info()->tensor_shape()));
+
+ INEKernel::configure(win);
+}
+
+void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+ Window collapsed_window = window.collapse_if_possible(IKernel::window(), Window::DimY);
+
+ if(_is_reshaped)
+ {
+ Window win_input(collapsed_window);
+ win_input.set(Window::DimX, Window::Dimension(0, 0, 0));
+ win_input.set(Window::DimY, Window::Dimension(0, 0, 0));
+ win_input.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ Iterator in(_input, win_input);
+ Iterator out(_output, collapsed_window);
+
+ execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ {
+ // Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
+ uint32x4x4_t sum_col =
+ {
+ {
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
+ }
+ };
+
+ const uint8_t *matrix_b = in.ptr() + (id.x() / 16) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2];
+
+#if __arm__
+ asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b));
+#endif /* __arm__ */
+
+ int i = 0;
+ for(; i < _k; ++i)
+ {
+ const uint8x16_t b0_u8 = vld1q_u8(matrix_b + i * 16);
+
+ // Convert U8 to U16
+ const uint16x8x2_t b0_u16 =
+ {
+ {
+ vmovl_u8(vget_low_u8(b0_u8)),
+ vmovl_u8(vget_high_u8(b0_u8))
+ }
+ };
+
+ // Accumulate to U32
+ sum_col =
+ {
+ {
+ vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])),
+ vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])),
+ vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])),
+ vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1]))
+ }
+ };
+ }
+
+ auto vector_sum_col = reinterpret_cast<int32_t *>(out.ptr());
+
+ vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0]));
+ vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1]));
+ vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2]));
+ vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3]));
+ },
+ in, out);
+ }
+ else // it is not reshaped
+ {
+ const auto width_matrix_b = static_cast<int>(_input->info()->dimension(0));
+ const auto in_b_stride = static_cast<int>(_input->info()->strides_in_bytes()[1]);
+
+ // The implementation computes 16 elements per iteration
+ const int window_start_x = 16 * info.thread_id;
+ const int window_step_x = 16 * info.num_threads;
+ // Make sure (window_end_x - window_start_x) is a multiple of window_step_x
+ const int window_end_x = ceil_to_multiple(width_matrix_b - window_start_x, window_step_x) + window_start_x;
+
+ Window win_out(collapsed_window);
+ win_out.set(Window::DimX, Window::Dimension(window_start_x, window_end_x, window_step_x));
+
+ Window win_in(win_out);
+ win_in.set(Window::DimY, Window::Dimension(0, 0, 0));
+ win_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ Iterator inb(_input, win_in);
+ Iterator out(_output, win_out);
+
+ execute_window_loop(win_out, [&](const Coordinates & id)
+ {
+ if(id.x() > width_matrix_b)
+ {
+ return;
+ }
+
+ // Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
+ uint32x4x4_t sum_col =
+ {
+ {
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
+ }
+ };
+
+ const uint8_t *matrix_b = inb.ptr() + id.y() * _input->info()->strides_in_bytes()[2];
+
+#if __arm__
+ asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b));
+ asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b + in_b_stride));
+#endif /* __arm__ */
+
+ int i = 0;
+ // This for loop performs 4 accumulations
+ for(; i <= (_k - 4); i += 4)
+ {
+ const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride);
+ const uint8x16_t b1_u8 = vld1q_u8(matrix_b + 1 * in_b_stride);
+ const uint8x16_t b2_u8 = vld1q_u8(matrix_b + 2 * in_b_stride);
+ const uint8x16_t b3_u8 = vld1q_u8(matrix_b + 3 * in_b_stride);
+
+#if __arm__
+ asm volatile("PLD [%0, #128*1]" ::"r"(matrix_b + 1 * in_b_stride));
+ asm volatile("PLD [%0, #128*1]" ::"r"(matrix_b + 2 * in_b_stride));
+ asm volatile("PLD [%0, #128*1]" ::"r"(matrix_b + 3 * in_b_stride));
+ asm volatile("PLD [%0, #128*1]" ::"r"(matrix_b + 4 * in_b_stride));
+#endif /* __arm__ */
+
+ // Partial accumulation in u16
+ uint16x8x2_t tmp_sum =
+ {
+ {
+ vdupq_n_u16(0),
+ vdupq_n_u16(0)
+ }
+ };
+
+ tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b0_u8));
+ tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b1_u8));
+ tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b2_u8));
+ tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b3_u8));
+ tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b0_u8));
+ tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b1_u8));
+ tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b2_u8));
+ tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b3_u8));
+
+ // Accumulate to U32
+ sum_col =
+ {
+ {
+ vaddw_u16(sum_col.val[0], vget_low_u16(tmp_sum.val[0])),
+ vaddw_u16(sum_col.val[1], vget_high_u16(tmp_sum.val[0])),
+ vaddw_u16(sum_col.val[2], vget_low_u16(tmp_sum.val[1])),
+ vaddw_u16(sum_col.val[3], vget_high_u16(tmp_sum.val[1]))
+ }
+ };
+
+ matrix_b += 4 * in_b_stride;
+ }
+
+ // This for loop perfoms the leftover accumulations
+ for(; i < _k; ++i)
+ {
+ const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride);
+
+ // Convert U8 to U16
+ const uint16x8x2_t b0_u16 =
+ {
+ {
+ vmovl_u8(vget_low_u8(b0_u8)),
+ vmovl_u8(vget_high_u8(b0_u8))
+ }
+ };
+
+ // Accumulate to U32
+ sum_col =
+ {
+ {
+ vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])),
+ vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])),
+ vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])),
+ vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1]))
+ }
+ };
+
+ matrix_b += in_b_stride;
+ }
+
+ auto vector_sum_col = reinterpret_cast<int32_t *>(out.ptr());
+
+ vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0]));
+ vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1]));
+ vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2]));
+ vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3]));
+ },
+ inb, out);
+ }
+} \ No newline at end of file
diff --git a/src/runtime/NEON/functions/NEGEMMLowp.cpp b/src/runtime/NEON/functions/NEGEMMLowp.cpp
index 12136cbcb5..ab7fa079b1 100644
--- a/src/runtime/NEON/functions/NEGEMMLowp.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowp.cpp
@@ -31,120 +31,104 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "support/ToolchainSupport.h"
using namespace arm_compute;
-#define NEGEMMLOWP_VALIDATE_DIMENSIONS(a, b, output) \
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((a), 1, DataType::U8); \
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((b), 1, DataType::U8); \
- ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); \
- ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The C matrix must have the same number of rows as the matrix A"); \
- ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The C matrix must have the same number of columns as the matrix C");
-
NEGEMMLowp::NEGEMMLowp(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _mm_optimised_kernel(nullptr), _interleave_blocked(), _interleave_blocked_transposed(), _tmp_a(),
- _tmp_b()
+ : _memory_group(std::move(memory_manager)), _mm_func(), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), _finalize_kernel(), _vector_sum_col(), _vector_sum_row(), _mm_output(), _a_offset(0),
+ _b_offset(0)
{
}
-void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output)
+void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t output_mult_int, int32_t shift)
{
- NEGEMMLOWP_VALIDATE_DIMENSIONS(a, b, output);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((a), 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output);
+ ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
+ ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A");
+ ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The output matrix must have the same number of columns as the matrix B");
+
+ _a_offset = a_offset;
+ _b_offset = b_offset;
+
+ // Initialize matrix multiply output tensor
+ const TensorShape &shape_mm_output = output->info()->tensor_shape();
+ TensorInfo info_mm_output(shape_mm_output, 1, DataType::S32);
+ _mm_output.allocator()->init(info_mm_output);
+ _memory_group.manage(&_mm_output);
- const struct CPUInfo ci = NEScheduler::get().cpu_info();
- const int cpu_has_dotprod = static_cast<int>(ci.CPU) & static_cast<int>(CPUTarget::DOT);
- if(cpu_has_dotprod != 0)
+ // Initialize Matrix B reduction kernel only if _a_offset is not equal to 0
+ if(_a_offset != 0)
{
-#ifdef ARM_COMPUTE_AARCH64_V8_2
- // NEGEMMLowpAArch64V8P4Kernel only compiled in AArch64 targets
- _mm_optimised_kernel = support::cpp14::make_unique<NEGEMMLowpAArch64V8P4Kernel>();
- TensorShape shape_a_int = a->info()->tensor_shape();
- shape_a_int.set(0, a->info()->dimension(0) * 8.f);
- shape_a_int.set(1, std::ceil(a->info()->dimension(1) / 8.f));
-
- TensorShape shape_b_int = b->info()->tensor_shape();
- shape_b_int.set(0, b->info()->dimension(0) * 12.f);
- shape_b_int.set(1, std::ceil(b->info()->dimension(1) / 12.f));
-
- TensorInfo info_a_int(shape_a_int, 1, a->info()->data_type());
- TensorInfo info_b_int(shape_b_int, 1, b->info()->data_type());
- _tmp_a.allocator()->init(info_a_int);
- _tmp_b.allocator()->init(info_b_int);
-
- _memory_group.manage(&_tmp_a);
- _memory_group.manage(&_tmp_b);
-
- _interleave_blocked.configure(a, &_tmp_a, 8, 4, false);
- _interleave_blocked_transposed.configure(b, &_tmp_b, 12, 4, true);
- _mm_optimised_kernel->configure(&_tmp_a, &_tmp_b, output);
-
- _tmp_a.allocator()->allocate();
- _tmp_b.allocator()->allocate();
-#endif /* ARM_COMPUTE_AARCH64_V8_2 */
+ TensorShape shape_vector_sum_col = b->info()->tensor_shape();
+ shape_vector_sum_col.remove_dimension(1);
+ TensorInfo info_vector_sum_col(shape_vector_sum_col, 1, DataType::S32);
+ _vector_sum_col.allocator()->init(info_vector_sum_col);
+ _memory_group.manage(&_vector_sum_col);
+
+ // Configure Matrix B reduction kernel
+ _mtx_b_reduction_kernel.configure(b, &_vector_sum_col, a->info()->dimension(0), false);
}
- else
+
+ // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
+ if(_b_offset != 0)
{
- ARM_COMPUTE_ERROR("Not implemented");
- //FIXME: This is in the process of being updated, for more info please refer to COMPMID-624.
+ TensorShape shape_vector_sum_row = a->info()->tensor_shape();
+ shape_vector_sum_row.set(Window::DimX, a->info()->dimension(1));
+ shape_vector_sum_row.remove_dimension(1);
+ TensorInfo info_vector_sum_row(shape_vector_sum_row, 1, DataType::S32);
+ _vector_sum_row.allocator()->init(info_vector_sum_row);
+ _memory_group.manage(&_vector_sum_row);
+
+ // Configure Matrix A reduction kernel
+ _mtx_a_reduction_kernel.configure(a, &_vector_sum_row, a->info()->dimension(0), false);
}
-}
-void NEGEMMLowp::run()
-{
- _memory_group.acquire();
+ // Configure matrix multiply function
+ _mm_func.configure(a, b, &_mm_output);
+
+ // Configure finalize kernel
+ _finalize_kernel.configure(_a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, &_mm_output, output, a->info()->dimension(0), a_offset, b_offset, c_offset,
+ output_mult_int, shift);
- if(_mm_optimised_kernel != nullptr)
+ // Allocate tensors
+ _mm_output.allocator()->allocate();
+
+ if(_a_offset != 0)
{
- NEScheduler::get().schedule(&_interleave_blocked, Window::DimY);
- NEScheduler::get().schedule(&_interleave_blocked_transposed, Window::DimY);
- NEScheduler::get().schedule(_mm_optimised_kernel.get(), Window::DimY);
+ _vector_sum_col.allocator()->allocate();
}
- else
+
+ if(_b_offset != 0)
{
- /* Run interleave kernel */
- NEScheduler::get().schedule(&_interleave_kernel, Window::DimY);
- /* Run transpose kernel */
- NEScheduler::get().schedule(&_transpose_kernel, Window::DimY);
- /* Run matrix multiply kernel */
- NEScheduler::get().schedule(&_mm_kernel, Window::DimY);
+ _vector_sum_row.allocator()->allocate();
}
-
- _memory_group.release();
}
-void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift)
+void NEGEMMLowp::run()
{
- NEGEMMLOWP_VALIDATE_DIMENSIONS(a, b, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
-
- /* The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] */
- TensorShape shape_tmp_a = a->info()->tensor_shape();
- shape_tmp_a.set(0, a->info()->dimension(0) * 4);
- shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.f));
-
- TensorShape shape_tmp_b = b->info()->tensor_shape();
- shape_tmp_b.set(0, b->info()->dimension(1) * 16);
- shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / 16.f));
+ _memory_group.acquire();
- TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type());
- TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type());
- _tmp_a.allocator()->init(info_a);
- _tmp_b.allocator()->init(info_b);
+ // Run matrix A reduction kernel only if _b_offset is not equal to 0
+ if(_b_offset != 0)
+ {
+ NEScheduler::get().schedule(&_mtx_a_reduction_kernel, Window::DimX);
+ }
- // Manage intermediate buffers
- _memory_group.manage(&_tmp_a);
- _memory_group.manage(&_tmp_b);
+ // Run matrix B reduction kernel only if _a_offset is not equal to 0
+ if(_a_offset != 0)
+ {
+ NEScheduler::get().schedule(&_mtx_b_reduction_kernel, Window::DimX);
+ }
- _interleave_kernel.configure(a, &_tmp_a);
- _transpose_kernel.configure(b, &_tmp_b);
- _mm_kernel.configure(&_tmp_a, &_tmp_b, output, a_offset, b_offset, output_offset, output_mult_int, shift);
+ // Run matrix multiply core function
+ _mm_func.run();
- _tmp_a.allocator()->allocate();
- _tmp_b.allocator()->allocate();
-}
+ // Run finalise kernel
+ NEScheduler::get().schedule(&_finalize_kernel, Window::DimY);
-#undef NEGEMMLOWP_VALIDATE_DIMENSIONS
+ _memory_group.release();
+} \ No newline at end of file
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
new file mode 100644
index 0000000000..11ae054e11
--- /dev/null
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpAssemblyBaseKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+
+NEGEMMLowpMatrixMultiplyCore::NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _mm_kernel(nullptr), _mtx_a_reshape_kernel(nullptr), _mtx_b_reshape_kernel(nullptr), _tmp_a(), _tmp_b()
+{
+}
+
+void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
+ ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
+ ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A");
+ ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The output matrix must have the same number of columns as the matrix B");
+
+#ifdef ARM_COMPUTE_AARCH64_V8_2
+ // Check for DOT product instruction
+ const struct CPUInfo ci = NEScheduler::get().cpu_info();
+ const int cpu_has_dotprod = static_cast<int>(ci.CPU) & static_cast<int>(CPUTarget::DOT);
+
+ if(cpu_has_dotprod != 0)
+ {
+ TensorShape shape_a_int = a->info()->tensor_shape();
+ shape_a_int.set(0, a->info()->dimension(0) * 8.f);
+ shape_a_int.set(1, std::ceil(a->info()->dimension(1) / 8.f));
+
+ TensorShape shape_b_int = b->info()->tensor_shape();
+ shape_b_int.set(0, b->info()->dimension(0) * 12.f);
+ shape_b_int.set(1, std::ceil(b->info()->dimension(1) / 12.f));
+
+ TensorInfo info_a_int(shape_a_int, 1, a->info()->data_type());
+ TensorInfo info_b_int(shape_b_int, 1, b->info()->data_type());
+ _tmp_a.allocator()->init(info_a_int);
+ _tmp_b.allocator()->init(info_b_int);
+ _memory_group.manage(&_tmp_a);
+ _memory_group.manage(&_tmp_b);
+
+ // Configure interleave blocked kernel for matrix A
+ {
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMInterleaveBlockedKernel>();
+ k->configure(a, &_tmp_a, 8, 4, false);
+ _mtx_a_reshape_kernel = std::move(k);
+ }
+
+ // Configure interleave blocked kernel for matrix B
+ {
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMInterleaveBlockedKernel>();
+ k->configure(b, &_tmp_b, 12, 4, true);
+ _mtx_b_reshape_kernel = std::move(k);
+ }
+
+ // Configure matrix multiply kernel
+ {
+ // NEGEMMLowpAArch64V8P4Kernel only compiled in AArch64 targets
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpAArch64V8P4Kernel>();
+ k->configure(&_tmp_a, &_tmp_b, output);
+ _mm_kernel = std::move(k);
+ }
+ }
+ else
+#endif /* ARM_COMPUTE_AARCH64_V8_2 */
+ {
+ // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ]
+ TensorShape shape_tmp_a = a->info()->tensor_shape();
+ shape_tmp_a.set(0, a->info()->dimension(0) * 4);
+ shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.f));
+
+ // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ]
+ TensorShape shape_tmp_b = b->info()->tensor_shape();
+ shape_tmp_b.set(0, b->info()->dimension(1) * 16);
+ shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / 16.f));
+
+ TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type());
+ TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type());
+ _tmp_a.allocator()->init(info_a);
+ _tmp_b.allocator()->init(info_b);
+ _memory_group.manage(&_tmp_a);
+ _memory_group.manage(&_tmp_b);
+
+ // Configure interleave kernel
+ {
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMInterleave4x4Kernel>();
+ k->configure(a, &_tmp_a);
+ _mtx_a_reshape_kernel = std::move(k);
+ }
+
+ // Configure transpose kernel
+ {
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMTranspose1xWKernel>();
+ k->configure(b, &_tmp_b);
+ _mtx_b_reshape_kernel = std::move(k);
+ }
+
+ // Configure matrix multiply kernel
+ {
+ auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpMatrixMultiplyKernel>();
+ k->configure(&_tmp_a, &_tmp_b, output);
+ _mm_kernel = std::move(k);
+ }
+ }
+
+ // Allocate tensors
+ _tmp_a.allocator()->allocate();
+ _tmp_b.allocator()->allocate();
+}
+
+void NEGEMMLowpMatrixMultiplyCore::run()
+{
+ _memory_group.acquire();
+
+ // Run reshape matrix A
+ NEScheduler::get().schedule(_mtx_a_reshape_kernel.get(), Window::DimY);
+
+ // Run reshape matrix B
+ NEScheduler::get().schedule(_mtx_b_reshape_kernel.get(), Window::DimY);
+
+ // Run matrix multiply kernel
+ NEScheduler::get().schedule(_mm_kernel.get(), Window::DimY);
+
+ _memory_group.release();
+} \ No newline at end of file
diff --git a/tests/datasets/LargeGEMMLowpDataset.h b/tests/datasets/LargeGEMMLowpDataset.h
index 610a999dd8..10f79e423d 100644
--- a/tests/datasets/LargeGEMMLowpDataset.h
+++ b/tests/datasets/LargeGEMMLowpDataset.h
@@ -43,6 +43,8 @@ public:
LargeGEMMLowpDataset()
{
add_config(TensorShape(923U, 429U), TensorShape(871U, 923U), TensorShape(871U, 429U), 0, 0, 0, 1, 0);
+ add_config(TensorShape(873U, 513U), TensorShape(784U, 873U), TensorShape(784U, 513U), 0, 4, 3, 2, 0);
+ add_config(TensorShape(697U, 872U), TensorShape(563U, 697U), TensorShape(563U, 872U), -2, 0, 1, 1, 0);
add_config(TensorShape(1021U, 973U), TensorShape(783U, 1021U), TensorShape(783U, 973U), 5, 13, -6, 2, 2);
add_config(TensorShape(681U, 1023U), TensorShape(213U, 681U), TensorShape(213U, 1023U), -3, -2, 8, 4, 3);
add_config(TensorShape(941U, 1011U), TensorShape(623U, 941U), TensorShape(623U, 1011U), -9, 1, -3, 3, 1);
diff --git a/tests/datasets/SmallGEMMLowpDataset.h b/tests/datasets/SmallGEMMLowpDataset.h
index 2ed7a759ea..b7fe3907ad 100644
--- a/tests/datasets/SmallGEMMLowpDataset.h
+++ b/tests/datasets/SmallGEMMLowpDataset.h
@@ -43,6 +43,8 @@ public:
SmallGEMMLowpDataset()
{
add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0, 0, 1, 0);
+ add_config(TensorShape(52U, 13U), TensorShape(33U, 52U), TensorShape(33U, 13U), 0, 4, 3, 2, 0);
+ add_config(TensorShape(52U, 26U), TensorShape(33U, 52U), TensorShape(33U, 26U), -2, 0, 1, 1, 0);
add_config(TensorShape(31U, 27U), TensorShape(23U, 31U), TensorShape(23U, 27U), 5, 13, -6, 2, 2);
add_config(TensorShape(38U, 12U), TensorShape(21U, 38U), TensorShape(21U, 12U), -3, -2, 8, 4, 3);
add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1, -3, 3, 1);
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index dede3aef29..de7495bb6a 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -21,11 +21,13 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/NEON/functions/NEGEMMLowp.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
+#include "tests/NEON/Helper.h"
#include "tests/datasets/LargeGEMMLowpDataset.h"
#include "tests/datasets/SmallGEMMLowpDataset.h"
#include "tests/framework/Asserts.h"
@@ -35,9 +37,6 @@
#include "tests/validation/fixtures/GEMMInterleaveBlockedFixture.h"
#include "tests/validation/fixtures/GEMMLowpFixture.h"
-#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
-#include "tests/NEON/Helper.h"
-
namespace arm_compute
{
namespace test
@@ -46,15 +45,8 @@ namespace validation
{
namespace
{
-const auto data_mnk = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 12, 15);
-const auto data_offsets = framework::dataset::make("a", -3, 3) * framework::dataset::make("b", -1, 2) * framework::dataset::make("c", 1, 3) * framework::dataset::make("cm", 0,
- 3)
- * framework::dataset::make("shift", 0, 4);
-
-const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
-
+const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4);
-
} // namespace
TEST_SUITE(NEON)
@@ -115,7 +107,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpOffsetFixture, framework::DatasetMode
// Validate output
validate(Accessor(_target), _reference);
}
-TEST_SUITE_END()
//FIXME: This is in the process of being updated, for more info please refer to COMPMID-624.
#if 0 // defined(__aarch64__)
@@ -131,6 +122,7 @@ TEST_SUITE_END()
#endif // defined(__aarch64__)
TEST_SUITE_END()
TEST_SUITE_END()
+TEST_SUITE_END()
} // namespace validation
} // namespace test
} // namespace arm_compute