aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2017-08-15 11:45:22 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitedfa9f463bed084f8b0953557202b2a1e56da817 (patch)
tree5d1e92926d112fde05dcbc61324d96f73f692390
parentdc460f13ee65e27b2a428e44c2d80afb1f516a99 (diff)
downloadComputeLibrary-edfa9f463bed084f8b0953557202b2a1e56da817.tar.gz
COMPMID-477 - Optimized batched case in CLConvolutionLayer
Change-Id: I4ef18f49f1da0cb816aaa0762466b940792c15ed Reviewed-on: http://mpd-gerrit.cambridge.arm.com/84162 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h18
-rw-r--r--arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h42
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMM.h2
-rw-r--r--src/core/CL/CLKernelLibrary.cpp15
-rw-r--r--src/core/CL/cl_kernels/gemm.cl568
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp96
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp5
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp239
-rw-r--r--src/runtime/CL/functions/CLGEMM.cpp30
-rw-r--r--tests/model_objects/AlexNet.h4
-rw-r--r--tests/networks_new/AlexNetNetwork.h4
-rw-r--r--tests/validation_new/CL/FullyConnectedLayer.cpp14
-rw-r--r--tests/validation_new/NEON/FullyConnectedLayer.cpp4
-rw-r--r--tests/validation_new/fixtures/FullyConnectedLayerFixture.h15
15 files changed, 549 insertions, 511 deletions
diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
index dec63e0679..a768a19914 100644
--- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
@@ -30,10 +30,10 @@ namespace arm_compute
{
class ICLTensor;
-/** OpenCL kernel to multiply two input matrices "A" and "B" or to multiply a vector "A" by a matrix "B". All elements of the output matrix/vector will be multiplied by alpha
+/** OpenCL kernel to multiply two input matrices "A" and "B" . All elements of the output matrix will be multiplied by alpha
*
- * @note If the output tensor is a matrix, the implementation assumes that the input tensors @p input0 and @p input1 are both matrices and reshaped respectively with @ref CLGEMMInterleave4x4Kernel" and @ref CLGEMMTranspose1xWKernel
- * @note If the output tensor is a vector and the data type is F32, the implementation assumes that the first input tensor @p input0 is a vector and the second input tensor @p input1 a matrix. The implementation also assumes that both tensors have not been reshaped
+ * @note If the input tensors @p input0 and @p input1 have been reshaped respectively with @ref CLGEMMInterleave4x4Kernel" and @ref CLGEMMTranspose1xWKernel,
+ * the flag @p is_interleaved_transposed must be set to true
*
* @attention The second input tensor must have at least 2 dimensions (matrix)
*
@@ -53,13 +53,13 @@ public:
CLGEMMMatrixMultiplyKernel &operator=(CLGEMMMatrixMultiplyKernel &&) = default;
/** Initialise the kernel's input, output and alpha
*
- * @param[in] input0 Input tensor containing the interleaved Matrix A or the vector A. Data types supported: QS8/QS16/F16/F32
- * @param[in] input1 Input tensor containing the transposed Matrix B if the first input tensor A is not a vector.
- * If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0
- * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
- * @param[in] alpha Weight of the matrix product
+ * @param[in] input0 Input tensor containing the Matrix A. Data types supported: QS8/QS16/F16/F32
+ * @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0
+ * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
+ * @param[in] alpha Weight of the matrix product
+ * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel
*/
- void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha);
+ void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha, bool is_interleaved_transposed = true);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
index a29f68fcf1..e076f51b26 100644
--- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
@@ -24,12 +24,10 @@
#ifndef __ARM_COMPUTE_CLFULLYCONNECTEDLAYER_H__
#define __ARM_COMPUTE_CLFULLYCONNECTEDLAYER_H__
-#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-#include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
#include "arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h"
#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
-#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
#include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
#include "arm_compute/core/CL/kernels/CLTransposeKernel.h"
#include "arm_compute/runtime/CL/CLTensor.h"
@@ -38,41 +36,25 @@ namespace arm_compute
{
/** Basic function to reshape the weights of Fully Connected layer with OpenCL. This function calls the following kernels:
*
- * -# @ref CLTransposeKernel (if @p transpose_weights is set to true)
- * -# @ref CLGEMMTranspose1xWKernel (if @p is_batched_fc_layer is set to true)
+ * -# @ref CLTransposeKernel
*
* @note The fully connected layer accepts "weights" tensors only with 2 dimensions.
*/
-class CLFullyConnectedLayerReshapeWeights : public IFunction
+class CLFullyConnectedLayerReshapeWeights : public ICLSimpleFunction
{
public:
- /** Constructor */
- CLFullyConnectedLayerReshapeWeights();
/** Set the input and output tensors.
*
- * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QS8/QS16/F16/F32.
- * @param[out] output Destination tensor. Data type supported: Same as @p input.
- * @param[in] transpose_weights True if the weights must be transposed. Data types supported: Same as @p weights.
- * @param[in] is_batched_fc_layer True if it is a batched fully connected layer
+ * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QS8/QS16/F16/F32.
+ * @param[out] output Destination tensor which stores the transposed input tensor. Data type supported: Same as @p input.
*/
- void configure(const ICLTensor *input, ICLTensor *output, bool transpose_weights, bool is_batched_fc_layer);
-
- // Inherited methods overridden:
- void run() override;
-
-private:
- CLTransposeKernel _transpose_kernel;
- CLGEMMTranspose1xWKernel _transpose1xW_kernel;
- CLTensor _transpose_output;
- bool _transpose_weights;
- bool _is_batched_fc_layer;
+ void configure(const ICLTensor *input, ICLTensor *output);
};
/** Basic function to compute a Fully Connected layer on OpenCL. This function calls the following OpenCL kernels:
*
* -# @ref CLIm2ColKernel (called when the input comes from a convolutional layer)
- * -# @ref CLFullyConnectedLayerReshapeWeights (if @p are_weights_reshaped is set to false) (called once)
- * -# @ref CLGEMMInterleave4x4Kernel (called if we have a multi-batch input)
+ * -# @ref CLFullyConnectedLayerReshapeWeights (if @p are_weights_reshaped is set to false and transpose_weights is set to true ) (called once)
* -# @ref CLGEMMMatrixMultiplyKernel
* -# @ref CLGEMMMatrixAccumulateBiasesKernel (if @p biases is not equal to nullptr)
*
@@ -85,7 +67,7 @@ public:
CLFullyConnectedLayer();
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data type supported: QS8/F16/F32.
+ * @param[in] input Source tensor. Data type supported: QS8/QS16/F16/F32.
* @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
* @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
* @param[out] output Destination tensor. Data type supported: Same as @p input.
@@ -98,17 +80,17 @@ public:
void run() override;
private:
+ void configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output);
+ void configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output);
+
CLIm2ColKernel _im2col_kernel;
CLFullyConnectedLayerReshapeWeights _reshape_weights_kernel;
- CLGEMMInterleave4x4Kernel _interleave4x4_kernel;
CLGEMMMatrixMultiplyKernel _mm_kernel;
CLGEMMMatrixAccumulateBiasesKernel _accumulate_biases_kernel;
CLTensor _im2col_output;
- CLTensor _interleave4x4_output;
CLTensor _reshape_weights_output;
bool _are_weights_reshaped;
- bool _is_batched_fc_layer;
- bool _linearize_input;
+ bool _is_fc_after_conv;
bool _accumulate_biases;
};
}
diff --git a/arm_compute/runtime/CL/functions/CLGEMM.h b/arm_compute/runtime/CL/functions/CLGEMM.h
index 9207efd68f..9b887305cb 100644
--- a/arm_compute/runtime/CL/functions/CLGEMM.h
+++ b/arm_compute/runtime/CL/functions/CLGEMM.h
@@ -76,7 +76,7 @@ private:
CLGEMMMatrixAdditionKernel _ma_kernel;
CLTensor _tmp_a;
CLTensor _tmp_b;
- bool _run_vector_matrix_multiplication;
+ bool _is_interleaved_transposed;
bool _run_addition;
};
}
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 019f3ea132..2589bd12b5 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -168,16 +168,15 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "gemm_ma_f32", "gemm.cl" },
{ "gemm_ma_qs8", "gemm.cl" },
{ "gemm_ma_qs16", "gemm.cl" },
- { "gemm_mm_u8", "gemm.cl" },
- { "gemm_mm_f16", "gemm.cl" },
- { "gemm_mm_f32_midgard", "gemm.cl" },
- { "gemm_mm_f32_bifrost", "gemm.cl" },
+ { "gemm_mm_interleaved_transposed_u8", "gemm.cl" },
+ { "gemm_mm_interleaved_transposed_f16", "gemm.cl" },
+ { "gemm_mm_interleaved_transposed_f32_midgard", "gemm.cl" },
+ { "gemm_mm_interleaved_transposed_f32_bifrost", "gemm.cl" },
+ { "gemm_mm_interleaved_transposed_qs8", "gemm.cl" },
+ { "gemm_mm_interleaved_transposed_qs16", "gemm.cl" },
+ { "gemm_mm_floating_point", "gemm.cl" },
{ "gemm_mm_qs8", "gemm.cl" },
{ "gemm_mm_qs16", "gemm.cl" },
- { "gemm_vm_f16", "gemm.cl" },
- { "gemm_vm_f32", "gemm.cl" },
- { "gemm_vm_qs8", "gemm.cl" },
- { "gemm_vm_qs16", "gemm.cl" },
{ "gemm_lc_vm_f32", "gemm.cl" },
{ "gemm_transpose1x16", "gemm.cl" },
{ "gemm_transpose1x8", "gemm.cl" },
diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl
index 00c73e7be0..35a2e4704f 100644
--- a/src/core/CL/cl_kernels/gemm.cl
+++ b/src/core/CL/cl_kernels/gemm.cl
@@ -48,10 +48,10 @@ __kernel void gemm_transpose1x4(IMAGE_DECLARATION(src),
uint x = get_global_id(0);
uint y = get_global_id(1);
- /* Compute address for Matrix B - source */
+ // Compute address for Matrix B - source
Image src = CONVERT_TO_IMAGE_STRUCT(src);
- /* Compute address for Matrix B transposed - destination. X and Y are swapped */
+ // Compute address for Matrix B transposed - destination. X and Y are swapped
uint dst_addr_in_bytes = y * 16 + ((x * dst_stride_y + dst_offset_first_element_in_bytes));
uint4 b0 = vload4(0, (__global uint *)src.ptr);
@@ -288,11 +288,11 @@ __kernel void gemm_accumulate_biases(
}
#endif /* DATA_TYPE */
-#ifdef WIDTH_MATRIX_B
+#ifdef COLS_B
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication
*
- * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B
+ * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DCOLS_B
*
* @param[in] src0_ptr Pointer to the source matrix. Supported formats: U8
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -318,14 +318,14 @@ __kernel void gemm_accumulate_biases(
* @param[in] c_mult_int Multiplied with each element of the matrix C.
* @param[in] shift Number of bits to shift right the result.
*/
-__kernel void gemm_mm_u8(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst),
- int a_offset,
- int b_offset,
- int c_offset,
- int c_mult_int,
- int shift)
+__kernel void gemm_mm_interleaved_transposed_u8(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst),
+ int a_offset,
+ int b_offset,
+ int c_offset,
+ int c_mult_int,
+ int shift)
{
/* src_addr.s0 = address of matrix A */
/* src_addr.s1 = address of matrix B */
@@ -338,7 +338,7 @@ __kernel void gemm_mm_u8(IMAGE_DECLARATION(src0),
src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
/* Compute end row address for matrix B */
- int end_row_mtx_b = src_addr.s1 + WIDTH_MATRIX_B;
+ int end_row_mtx_b = src_addr.s1 + COLS_B;
/* Reset accumulators */
int16 c00 = 0.0f;
@@ -392,13 +392,13 @@ __kernel void gemm_mm_u8(IMAGE_DECLARATION(src0),
vstore16(convert_uchar16_sat(c20), 0, (__global uchar *)(offset(&dst, 0, 2)));
vstore16(convert_uchar16_sat(c30), 0, (__global uchar *)(offset(&dst, 0, 3)));
}
-#endif /* WIDTH_MATRIX_B */
+#endif /* COLS_B */
-#if defined(WIDTH_MATRIX_B) && defined(ALPHA)
+#if defined(COLS_B) && defined(ALPHA)
/** This OpenCL kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
*
- * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B and -DALPHA
+ * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -419,9 +419,9 @@ __kernel void gemm_mm_u8(IMAGE_DECLARATION(src0),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_mm_f32_midgard(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst))
+__kernel void gemm_mm_interleaved_transposed_f32_midgard(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
{
/* src_addr.s0 = address of matrix A */
/* src_addr.s1 = address of matrix B */
@@ -437,7 +437,7 @@ __kernel void gemm_mm_f32_midgard(IMAGE_DECLARATION(src0),
src_addr = src_addr >> 2;
/* Compute end row address for matrix B */
- int end_row_mtx_b = src_addr.s1 + WIDTH_MATRIX_B;
+ int end_row_mtx_b = src_addr.s1 + COLS_B;
/* Reset accumulators */
float4 c00 = 0.0f;
@@ -497,7 +497,7 @@ __kernel void gemm_mm_f32_midgard(IMAGE_DECLARATION(src0),
/** This OpenCL kernel is optimised for Bifrost. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
*
- * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B and -DALPHA
+ * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -518,9 +518,9 @@ __kernel void gemm_mm_f32_midgard(IMAGE_DECLARATION(src0),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_mm_f32_bifrost(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst))
+__kernel void gemm_mm_interleaved_transposed_f32_bifrost(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
{
// src_addr_a = address of matrix A
// src_addr_b = address of matrix B
@@ -528,7 +528,7 @@ __kernel void gemm_mm_f32_bifrost(IMAGE_DECLARATION(src0),
__global float *src_addr_b = (__global float *)(src1_ptr + get_global_id(0) * src1_stride_y + src1_offset_first_element_in_bytes);
// Compute end row address for matrix B
- __global float *src_end_addr_b = src_addr_b + WIDTH_MATRIX_B;
+ __global float *src_end_addr_b = src_addr_b + COLS_B;
// Reset accumulators
float c00 = 0.0f;
@@ -707,7 +707,7 @@ __kernel void gemm_mm_f32_bifrost(IMAGE_DECLARATION(src0),
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication
*
- * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B and -DALPHA
+ * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -728,9 +728,9 @@ __kernel void gemm_mm_f32_bifrost(IMAGE_DECLARATION(src0),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_mm_f16(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst))
+__kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
{
/* src_addr.s0 = address of matrix A */
/* src_addr.s1 = address of matrix B */
@@ -746,7 +746,7 @@ __kernel void gemm_mm_f16(IMAGE_DECLARATION(src0),
src_addr = src_addr >> 1;
/* Compute end row address for matrix B */
- int end_row_mtx_b = src_addr.s1 + WIDTH_MATRIX_B;
+ int end_row_mtx_b = src_addr.s1 + COLS_B;
/* Reset accumulators */
half8 c00 = 0.0f;
@@ -807,7 +807,7 @@ __kernel void gemm_mm_f16(IMAGE_DECLARATION(src0),
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 8 bit fixed point precision
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication
*
- * @attention The width of matrix B, the alpha's value and fixed point position need to be passed at compile time using -DWIDTH_MATRIX_B -DALPHA and -DFIXED_POINT_POSITION
+ * @attention The width of matrix B, the alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION
*
* @note: ALPHA must be passed in 8 bit fixed point format
*
@@ -830,9 +830,9 @@ __kernel void gemm_mm_f16(IMAGE_DECLARATION(src0),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst))
+__kernel void gemm_mm_interleaved_transposed_qs8(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
{
/* src_addr.s0 = address of matrix A */
/* src_addr.s1 = address of matrix B */
@@ -845,7 +845,7 @@ __kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0),
src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
/* Compute end row address for matrix B */
- int end_row_mtx_b = src_addr.s1 + WIDTH_MATRIX_B;
+ int end_row_mtx_b = src_addr.s1 + COLS_B;
/* Reset accumulators */
short8 c00 = 0.0f;
@@ -899,7 +899,7 @@ __kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0),
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 16 bit fixed point precision
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication
*
- * @attention The width of matrix B, the alpha's value and fixed point position need to be passed at compile time using -DWIDTH_MATRIX_B -DALPHA and -DFIXED_POINT_POSITION
+ * @attention The width of matrix B, the alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION
*
* @note: ALPHA must be passed in 16 bit fixed point format
*
@@ -922,9 +922,9 @@ __kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst))
+__kernel void gemm_mm_interleaved_transposed_qs16(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
{
/* src_addr.s0 = address of matrix A */
/* src_addr.s1 = address of matrix B */
@@ -940,7 +940,7 @@ __kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0),
src_addr = src_addr >> 1;
/* Compute end row address for matrix B */
- int end_row_mtx_b = src_addr.s1 + WIDTH_MATRIX_B;
+ int end_row_mtx_b = src_addr.s1 + COLS_B;
/* Reset accumulators */
int8 c00 = 0.0f;
@@ -983,14 +983,17 @@ __kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0),
}
#endif // defined(FIXED_POINT_POSITION)
-#ifdef WIDTH_VECTOR_A
-/** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1)
- *
- * @attention The width of vector A, the width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_VECTOR_A -DWIDTH_MATRIX_B and -DALPHA
+#if defined(COLS_A) && defined(NUM_ELEMS_PROCESSED_PER_THREAD_X) && (NUM_ELEMS_PROCESSED_PER_THREAD_Y)
+#if defined(DATA_TYPE)
+#define VECTOR_TYPE VEC_DATA_TYPE(DATA_TYPE, NUM_ELEMS_PROCESSED_PER_THREAD_X)
+/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not beed reshaped
*
- * @attention The input vector A and matrix B must not be reshaped
+ * @note This OpenCL kernel works with floating point data types (F16/F32)
+ * @note The floating point data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
+ * @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y
+ * @note The width of matrix A and the alpha's value need to be passed at compile time using -DCOLS_A and -DALPHA
*
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
+ * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16/F32
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
@@ -1009,127 +1012,136 @@ __kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_vm_f32(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst))
+__kernel void gemm_mm_floating_point(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
{
- int idx = get_global_id(0) * 4;
+ int idx = get_global_id(0) * NUM_ELEMS_PROCESSED_PER_THREAD_X;
- /* Compute the address for the vector A and matrix B */
+ // Compute starting address for matrix A and Matrix B
int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
- src_addr.s1 += idx * sizeof(float);
-
- int end_row_vec_a = src_addr.s0 + (WIDTH_VECTOR_A * sizeof(float));
-
- float4 acc = 0.0f;
- for(; src_addr.s0 <= (end_row_vec_a - 2 * sizeof(float)); src_addr += (int2)(2 * sizeof(float), 2 * src1_stride_y))
- {
- float2 a0 = vload2(0, (__global float *)(src0_ptr + src_addr.s0));
- float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
- float4 b1 = vload4(0, (__global float *)(src1_ptr + src_addr.s1 + src1_stride_y));
-
- acc += b0 * (float4)a0.s0;
- acc += b1 * (float4)a0.s1;
- }
-
- for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(sizeof(float), src1_stride_y))
- {
- float a0 = *((__global float *)(src0_ptr + src_addr.s0));
- float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
-
- acc += b0 * (float4)a0;
- }
-
- /* Compute destination address */
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
- /* Multiply by the weight of vector-matrix product */
- acc = acc * (float4)ALPHA;
-
- vstore4(acc, 0, (__global float *)(offset(&dst, 0, 0)));
-}
-
-/** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1)
- *
- * @attention The width of vector A, the width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_VECTOR_A -DWIDTH_MATRIX_B and -DALPHA
- *
- * @attention The input vector A and matrix B must not be reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- */
-__kernel void gemm_vm_f16(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst))
-{
- int idx = get_global_id(0) * 8;
+ // Update address for the matrix A
+ src_addr.s0 += get_global_id(1) * src0_stride_y * NUM_ELEMS_PROCESSED_PER_THREAD_Y;
- /* Compute the address for the vector A and matrix B */
- int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
- src_addr.s1 += idx * sizeof(half);
+ // Update address for the matrix B
+ src_addr.s1 += idx * sizeof(DATA_TYPE);
- int end_row_vec_a = src_addr.s0 + (WIDTH_VECTOR_A * sizeof(half));
+ int end_row_vec_a = src_addr.s0 + (COLS_A * sizeof(DATA_TYPE));
- half8 acc = 0.0f;
+ VECTOR_TYPE acc0 = 0.0f;
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ VECTOR_TYPE acc1 = 0.0f;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ VECTOR_TYPE acc2 = 0.0f;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ VECTOR_TYPE acc3 = 0.0f;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- for(; src_addr.s0 <= (end_row_vec_a - 4 * sizeof(half)); src_addr += (int2)(4 * sizeof(half), 4 * src1_stride_y))
+ for(; src_addr.s0 <= (end_row_vec_a - 2 * sizeof(DATA_TYPE)); src_addr += (int2)(2 * sizeof(DATA_TYPE), 2 * src1_stride_y))
{
- half4 a0 = vload4(0, (__global half *)(src0_ptr + src_addr.s0));
- half8 b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1 + 0 * src1_stride_y));
- half8 b1 = vload8(0, (__global half *)(src1_ptr + src_addr.s1 + 1 * src1_stride_y));
- half8 b2 = vload8(0, (__global half *)(src1_ptr + src_addr.s1 + 2 * src1_stride_y));
- half8 b3 = vload8(0, (__global half *)(src1_ptr + src_addr.s1 + 3 * src1_stride_y));
-
- acc += b0 * (half8)a0.s0;
- acc += b1 * (half8)a0.s1;
- acc += b2 * (half8)a0.s2;
- acc += b3 * (half8)a0.s3;
+ // Load values from matrix A
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a0 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a1 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a2 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ a3 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ // Load values from matrix B
+ VECTOR_TYPE b0 = VLOAD(NUM_ELEMS_PROCESSED_PER_THREAD_X)(0, (__global DATA_TYPE *)(src1_ptr + src_addr.s1));
+ VECTOR_TYPE b1 = VLOAD(NUM_ELEMS_PROCESSED_PER_THREAD_X)(0, (__global DATA_TYPE *)(src1_ptr + src_addr.s1 + src1_stride_y));
+
+ // Accumulate
+ acc0 += b0 * (VECTOR_TYPE)a0.s0;
+ acc0 += b1 * (VECTOR_TYPE)a0.s1;
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc1 += b0 * (VECTOR_TYPE)a1.s0;
+ acc1 += b1 * (VECTOR_TYPE)a1.s1;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc2 += b0 * (VECTOR_TYPE)a2.s0;
+ acc2 += b1 * (VECTOR_TYPE)a2.s1;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc3 += b0 * (VECTOR_TYPE)a3.s0;
+ acc3 += b1 * (VECTOR_TYPE)a3.s1;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
- for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(sizeof(half), src1_stride_y))
+ for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(sizeof(DATA_TYPE), src1_stride_y))
{
- half a0 = *((__global half *)(src0_ptr + src_addr.s0));
- half8 b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1));
-
- acc += b0 * (half8)a0;
+ // Load values from matrix A
+ DATA_TYPE a0 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ DATA_TYPE a1 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ DATA_TYPE a2 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ DATA_TYPE a3 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ // Load values from matrix B
+ VECTOR_TYPE b0 = VLOAD(NUM_ELEMS_PROCESSED_PER_THREAD_X)(0, (__global DATA_TYPE *)(src1_ptr + src_addr.s1));
+
+ // Accumulate
+ acc0 += b0 * (VECTOR_TYPE)a0;
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc1 += b0 * (VECTOR_TYPE)a1;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc2 += b0 * (VECTOR_TYPE)a2;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc3 += b0 * (VECTOR_TYPE)a3;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
- /* Compute destination address */
+ // Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Multiply by the weight of vector-matrix product */
- acc = acc * (half8)ALPHA;
-
- vstore8(acc, 0, (__global half *)(offset(&dst, 0, 0)));
+ // Multiply by the weight of matrix-matrix product and store the result
+ acc0 = acc0 * (VECTOR_TYPE)ALPHA;
+ VSTORE(NUM_ELEMS_PROCESSED_PER_THREAD_X)
+ (acc0, 0, (__global DATA_TYPE *)(offset(&dst, 0, 0)));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc1 = acc1 * (VECTOR_TYPE)ALPHA;
+ VSTORE(NUM_ELEMS_PROCESSED_PER_THREAD_X)
+ (acc1, 0, (__global DATA_TYPE *)(offset(&dst, 0, 1)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc2 = acc2 * (VECTOR_TYPE)ALPHA;
+ VSTORE(NUM_ELEMS_PROCESSED_PER_THREAD_X)
+ (acc2, 0, (__global DATA_TYPE *)(offset(&dst, 0, 2)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc3 = acc3 * (VECTOR_TYPE)ALPHA;
+ VSTORE(NUM_ELEMS_PROCESSED_PER_THREAD_X)
+ (acc3, 0, (__global DATA_TYPE *)(offset(&dst, 0, 3)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
+#endif // defined(DATA_TYPE)
#ifdef FIXED_POINT_POSITION
-/** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1) in 8 bit fixed point
- *
- * @attention The width of vector A, the width of matrix B, the alpha's value and the fixed point position need to be passed at compile time using -DWIDTH_VECTOR_A -DWIDTH_MATRIX_B, -DALPHA and -DFIXED_POINT_POSITION
+/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not beed reshaped
*
- * @attention The input vector A and matrix B must not be reshaped
+ * @note This OpenCL kernel works with fixed point data types QS8
+ * @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y
+ * @note The width of matrix A, the number of elements processed per thread along the Y direction and the alpha's value need to be passed at compile time using -DCOLS_A, -DNUM_ELEMS_PROCESSED_PER_THREAD_Y and -DALPHA
+ * @note The fixed point position need to be passed at compile time using -DFIXED_POINT_POSITION
+ * @note The alpha value must be passed in 8 bit fixed point format using -DALPHA
*
- * @note: ALPHA must be passed in 8 bit fixed point format
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS8
+ * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS8/QS16
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
@@ -1148,72 +1160,143 @@ __kernel void gemm_vm_f16(IMAGE_DECLARATION(src0),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_vm_qs8(IMAGE_DECLARATION(src0),
+__kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0),
IMAGE_DECLARATION(src1),
IMAGE_DECLARATION(dst))
{
- int idx = get_global_id(0) * 16;
+ int idx = get_global_id(0) * NUM_ELEMS_PROCESSED_PER_THREAD_X;
- /* Compute the address for the vector A and matrix B */
+ // Compute starting address for matrix A and Matrix B
int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
- src_addr.s1 += idx;
-
- int end_row_vec_a = src_addr.s0 + WIDTH_VECTOR_A;
-
- short8 acc0 = 0;
- short8 acc1 = 0;
- /* This for loop performs 4 accumulations per iteration */
- for(; src_addr.s0 <= (end_row_vec_a - 4); src_addr += (int2)(4, 4 * src1_stride_y))
+ // Update address for the matrix A
+ src_addr.s0 += get_global_id(1) * src0_stride_y * NUM_ELEMS_PROCESSED_PER_THREAD_Y;
+
+ // Update address for the matrix B
+ src_addr.s1 += idx * sizeof(char);
+
+ int end_row_vec_a = src_addr.s0 + (COLS_A * sizeof(char));
+
+ short8 acc00 = 0;
+ short8 acc01 = 0;
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ short8 acc10 = 0;
+ short8 acc11 = 0;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ short8 acc20 = 0;
+ short8 acc21 = 0;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ short8 acc30 = 0;
+ short8 acc31 = 0;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+
+ // This for loop performs 4 accumulations per iteration
+ for(; src_addr.s0 <= (end_row_vec_a - 2); src_addr += (int2)(2, 2 * src1_stride_y))
{
- char4 a0 = vload4(0, (__global char *)(src0_ptr + src_addr.s0));
+ char2 a0 = vload2(0, (__global char *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ char2 a1 = vload2(0, (__global char *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ char2 a2 = vload2(0, (__global char *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ char2 a3 = vload2(0, (__global char *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
char16 b0 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 0 * src1_stride_y));
char16 b1 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 1 * src1_stride_y));
- char16 b2 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 2 * src1_stride_y));
- char16 b3 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 3 * src1_stride_y));
-
- acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s0, b0.s01234567, FIXED_POINT_POSITION);
- acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s1, b1.s01234567, FIXED_POINT_POSITION);
- acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s2, b2.s01234567, FIXED_POINT_POSITION);
- acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s3, b3.s01234567, FIXED_POINT_POSITION);
-
- acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s0, b0.s89ABCDEF, FIXED_POINT_POSITION);
- acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s1, b1.s89ABCDEF, FIXED_POINT_POSITION);
- acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s2, b2.s89ABCDEF, FIXED_POINT_POSITION);
- acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s3, b3.s89ABCDEF, FIXED_POINT_POSITION);
+
+ acc00 = mlal_sat_qs8x8(acc00, (char8)a0.s0, b0.s01234567, FIXED_POINT_POSITION);
+ acc00 = mlal_sat_qs8x8(acc00, (char8)a0.s1, b1.s01234567, FIXED_POINT_POSITION);
+ acc01 = mlal_sat_qs8x8(acc01, (char8)a0.s0, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ acc01 = mlal_sat_qs8x8(acc01, (char8)a0.s1, b1.s89ABCDEF, FIXED_POINT_POSITION);
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc10 = mlal_sat_qs8x8(acc10, (char8)a1.s0, b0.s01234567, FIXED_POINT_POSITION);
+ acc10 = mlal_sat_qs8x8(acc10, (char8)a1.s1, b1.s01234567, FIXED_POINT_POSITION);
+ acc11 = mlal_sat_qs8x8(acc11, (char8)a1.s0, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ acc11 = mlal_sat_qs8x8(acc11, (char8)a1.s1, b1.s89ABCDEF, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc20 = mlal_sat_qs8x8(acc20, (char8)a2.s0, b0.s01234567, FIXED_POINT_POSITION);
+ acc20 = mlal_sat_qs8x8(acc20, (char8)a2.s1, b1.s01234567, FIXED_POINT_POSITION);
+ acc21 = mlal_sat_qs8x8(acc21, (char8)a2.s0, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ acc21 = mlal_sat_qs8x8(acc21, (char8)a2.s1, b1.s89ABCDEF, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc30 = mlal_sat_qs8x8(acc30, (char8)a3.s0, b0.s01234567, FIXED_POINT_POSITION);
+ acc30 = mlal_sat_qs8x8(acc30, (char8)a3.s1, b1.s01234567, FIXED_POINT_POSITION);
+ acc31 = mlal_sat_qs8x8(acc31, (char8)a3.s0, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ acc31 = mlal_sat_qs8x8(acc31, (char8)a3.s1, b1.s89ABCDEF, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
- /* Left-over accumulations */
+ // Left-over accumulations
for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(1, src1_stride_y))
{
- char a0 = *((__global char *)(src0_ptr + src_addr.s0));
+ char a0 = *((__global char *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ char a1 = *((__global char *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ char a2 = *((__global char *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ char a3 = *((__global char *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
char16 b0 = vload16(0, (__global char *)(src1_ptr + src_addr.s1));
- acc0 = mlal_sat_qs8x8(acc0, (char8)a0, b0.s01234567, FIXED_POINT_POSITION);
- acc1 = mlal_sat_qs8x8(acc1, (char8)a0, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ acc00 = mlal_sat_qs8x8(acc00, (char8)a0, b0.s01234567, FIXED_POINT_POSITION);
+ acc01 = mlal_sat_qs8x8(acc01, (char8)a0, b0.s89ABCDEF, FIXED_POINT_POSITION);
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc10 = mlal_sat_qs8x8(acc10, (char8)a1, b0.s01234567, FIXED_POINT_POSITION);
+ acc11 = mlal_sat_qs8x8(acc11, (char8)a1, b0.s89ABCDEF, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc20 = mlal_sat_qs8x8(acc20, (char8)a2, b0.s01234567, FIXED_POINT_POSITION);
+ acc21 = mlal_sat_qs8x8(acc21, (char8)a2, b0.s89ABCDEF, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc30 = mlal_sat_qs8x8(acc30, (char8)a3, b0.s01234567, FIXED_POINT_POSITION);
+ acc31 = mlal_sat_qs8x8(acc31, (char8)a3, b0.s89ABCDEF, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
- /* Compute destination address */
+ // Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Multiply by the weight of matrix product */
- char16 acc_qs8 = convert_char16_sat((short16)(acc0, acc1));
-
+ // Multiply by the weight of matrix product and store the result
+ char16 acc_qs8;
+ acc_qs8 = convert_char16_sat((short16)(acc00, acc01));
acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
-
- /* Store 16 values */
vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 0)));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc_qs8 = convert_char16_sat((short16)(acc10, acc11));
+ acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+ vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 1)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc_qs8 = convert_char16_sat((short16)(acc20, acc21));
+ acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+ vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 2)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc_qs8 = convert_char16_sat((short16)(acc30, acc31));
+ acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+ vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 3)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
-/** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1) in 16 bit fixed point
+/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not beed reshaped
*
- * @attention The width of vector A, the width of matrix B, the alpha's value and the fixed point position need to be passed at compile time using -DWIDTH_VECTOR_A -DWIDTH_MATRIX_B, -DALPHA and -DFIXED_POINT_POSITION
+ * @note This OpenCL kernel works with fixed point data types QS16
+ * @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y
+ * @note The width of matrix A, the number of elements processed per thread along the Y direction and the alpha's value need to be passed at compile time using -DCOLS_A, -DNUM_ELEMS_PROCESSED_PER_THREAD_Y and -DALPHA
+ * @note The fixed point position need to be passed at compile time using -DFIXED_POINT_POSITION
+ * @note The alpha value must be passed in 16 bit fixed point format using -DALPHA
*
- * @attention The input vector A and matrix B must not be reshaped
- *
- * @note: ALPHA must be passed in 16 bit fixed point format
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS16
+ * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS8/QS16
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
@@ -1232,59 +1315,120 @@ __kernel void gemm_vm_qs8(IMAGE_DECLARATION(src0),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_vm_qs16(IMAGE_DECLARATION(src0),
+__kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0),
IMAGE_DECLARATION(src1),
IMAGE_DECLARATION(dst))
{
- int idx = get_global_id(0) * 8;
+ int idx = get_global_id(0) * NUM_ELEMS_PROCESSED_PER_THREAD_X;
- /* Compute the address for the vector A and matrix B */
+ // Compute starting address for matrix A and Matrix B
int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
+
+ // Update address for the matrix A
+ src_addr.s0 += get_global_id(1) * src0_stride_y * NUM_ELEMS_PROCESSED_PER_THREAD_Y;
+
+ // Update address for the matrix B
src_addr.s1 += idx * sizeof(short);
- int end_row_vec_a = src_addr.s0 + (WIDTH_VECTOR_A * sizeof(short));
+ int end_row_vec_a = src_addr.s0 + (COLS_A * sizeof(short));
- /* Reset accumulator */
int8 acc0 = 0;
-
- /* This for loop performs 4 accumulations per iteration */
- for(; src_addr.s0 <= (end_row_vec_a - 4 * sizeof(short)); src_addr += (int2)(4 * sizeof(short), 4 * src1_stride_y))
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ int8 acc1 = 0;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ int8 acc2 = 0;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ int8 acc3 = 0;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+
+ // This for loop performs 4 accumulations per iteration
+ for(; src_addr.s0 <= (end_row_vec_a - 2 * sizeof(short)); src_addr += (int2)(2 * sizeof(short), 2 * src1_stride_y))
{
- short4 a0 = vload4(0, (__global short *)(src0_ptr + src_addr.s0));
+ short2 a0 = vload2(0, (__global short *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ short2 a1 = vload2(0, (__global short *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ short2 a2 = vload2(0, (__global short *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ short2 a3 = vload2(0, (__global short *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
short8 b0 = vload8(0, (__global short *)(src1_ptr + src_addr.s1 + 0 * src1_stride_y));
short8 b1 = vload8(0, (__global short *)(src1_ptr + src_addr.s1 + 1 * src1_stride_y));
- short8 b2 = vload8(0, (__global short *)(src1_ptr + src_addr.s1 + 2 * src1_stride_y));
- short8 b3 = vload8(0, (__global short *)(src1_ptr + src_addr.s1 + 3 * src1_stride_y));
acc0 = mlal_sat_qs16x8(acc0, (short8)a0.s0, b0, FIXED_POINT_POSITION);
acc0 = mlal_sat_qs16x8(acc0, (short8)a0.s1, b1, FIXED_POINT_POSITION);
- acc0 = mlal_sat_qs16x8(acc0, (short8)a0.s2, b2, FIXED_POINT_POSITION);
- acc0 = mlal_sat_qs16x8(acc0, (short8)a0.s3, b3, FIXED_POINT_POSITION);
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc1 = mlal_sat_qs16x8(acc1, (short8)a1.s0, b0, FIXED_POINT_POSITION);
+ acc1 = mlal_sat_qs16x8(acc1, (short8)a1.s1, b1, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc2 = mlal_sat_qs16x8(acc2, (short8)a2.s0, b0, FIXED_POINT_POSITION);
+ acc2 = mlal_sat_qs16x8(acc2, (short8)a2.s1, b1, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc3 = mlal_sat_qs16x8(acc3, (short8)a3.s0, b0, FIXED_POINT_POSITION);
+ acc3 = mlal_sat_qs16x8(acc3, (short8)a3.s1, b1, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
- /* Left-over accumulations */
+ // Left-over accumulations
for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(sizeof(short), src1_stride_y))
{
- short a0 = *((__global short *)(src0_ptr + src_addr.s0));
+ short a0 = *((__global short *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ short a1 = *((__global short *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ short a2 = *((__global short *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ short a3 = *((__global short *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
short8 b0 = vload8(0, (__global short *)(src1_ptr + src_addr.s1));
acc0 = mlal_sat_qs16x8(acc0, (short8)a0, b0, FIXED_POINT_POSITION);
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc1 = mlal_sat_qs16x8(acc1, (short8)a1, b0, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc2 = mlal_sat_qs16x8(acc2, (short8)a2, b0, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc3 = mlal_sat_qs16x8(acc3, (short8)a3, b0, FIXED_POINT_POSITION);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
- /* Compute destination address */
+ // Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Multiply by the weight of matrix product */
- short8 acc_qs16 = convert_short8_sat(acc0);
-
+ // Multiply by the weight of matrix product and store the result
+ short8 acc_qs16;
+ acc_qs16 = convert_short8_sat(acc0);
acc_qs16 = mul_sat_qs16x8(acc_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
-
- /* Store 8 values */
vstore8(acc_qs16, 0, (__global short *)(offset(&dst, 0, 0)));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc_qs16 = convert_short8_sat(acc1);
+ acc_qs16 = mul_sat_qs16x8(acc_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
+ vstore8(acc_qs16, 0, (__global short *)(offset(&dst, 0, 1)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc_qs16 = convert_short8_sat(acc2);
+ acc_qs16 = mul_sat_qs16x8(acc_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
+ vstore8(acc_qs16, 0, (__global short *)(offset(&dst, 0, 2)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc_qs16 = convert_short8_sat(acc3);
+ acc_qs16 = mul_sat_qs16x8(acc_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
+ vstore8(acc_qs16, 0, (__global short *)(offset(&dst, 0, 3)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
-#endif /* defined(FIXED_POINT_POSITION) */
-#endif /* defined(WIDTH_VECTOR_A) */
-#endif /* defined(WIDTH_MATRIX_B) && defined(ALPHA) */
+#endif // defined(FIXED_POINT_POSITION)
+#endif // defined(COLS_A) && defined(NUM_ELEMS_PROCESSED_PER_THREAD_X) && (NUM_ELEMS_PROCESSED_PER_THREAD_Y)
+#endif // defined(COLS_B) && defined(ALPHA)
#ifdef BETA
/** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
@@ -1508,4 +1652,4 @@ __kernel void gemm_lc_vm_f32(IMAGE_DECLARATION(src0),
vstore4(acc, 0, (__global float *)(offset(&dst, 0, 0)));
}
-#endif /* WIDTH_VECTOR_A */
+#endif /* WIDTH_VECTOR_A */ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
index ce68c1f9cd..ef572cfc7e 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
@@ -64,8 +64,8 @@ void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const IC
_output = output;
// Create kernel and set static arguments
- std::set<std::string> build_opts = { ("-DWIDTH_MATRIX_B=" + support::cpp11::to_string(input1->info()->dimension(0))) };
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mm_u8", build_opts));
+ std::set<std::string> build_opts = { ("-DCOLS_B=" + support::cpp11::to_string(input1->info()->dimension(0))) };
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mm_interleaved_transposed_u8", build_opts));
unsigned int idx = 3 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
_kernel.setArg<int32_t>(idx++, a_offset);
_kernel.setArg<int32_t>(idx++, b_offset);
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
index 39526a23e1..684e3232d5 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
@@ -48,13 +48,13 @@ CLGEMMMatrixMultiplyKernel::CLGEMMMatrixMultiplyKernel()
{
}
-void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha)
+void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha, bool is_interleaved_transposed)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1, output);
ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input0, input1, output);
- if(output->info()->dimension(1) == 1)
+ if(!is_interleaved_transposed)
{
ARM_COMPUTE_ERROR_ON(input0->info()->dimension(0) != input1->info()->dimension(1));
}
@@ -72,79 +72,89 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
_lws_hint = cl::NDRange(8, 8);
}
- std::ostringstream mm_arguments;
- mm_arguments << "-DWIDTH_MATRIX_B=" << input1->info()->dimension(0) << " ";
+ std::set<std::string> build_opts;
+ build_opts.emplace(("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0))));
+ build_opts.emplace(("-DCOLS_B=" + support::cpp11::to_string(input1->info()->dimension(0))));
+
if(is_data_type_fixed_point(input0->info()->data_type()))
{
- mm_arguments << "-DALPHA=" << (input0->info()->data_type() == DataType::QS8 ?
- sqcvt_qs8_f32(alpha, input0->info()->fixed_point_position()) :
- sqcvt_qs16_f32(alpha, input0->info()->fixed_point_position()))
- << " ";
- mm_arguments << "-DFIXED_POINT_POSITION=" << input0->info()->fixed_point_position() << " ";
+ build_opts.emplace(("-DALPHA=" + support::cpp11::to_string((input0->info()->data_type() == DataType::QS8 ?
+ sqcvt_qs8_f32(alpha, input0->info()->fixed_point_position()) :
+ sqcvt_qs16_f32(alpha, input0->info()->fixed_point_position())))));
+
+ build_opts.emplace(("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input0->info()->fixed_point_position())));
}
else
{
- mm_arguments << "-DALPHA=" << alpha << " ";
+ build_opts.emplace(("-DALPHA=" + float_to_string_with_full_precision(alpha)));
}
- std::set<std::string> build_opts;
- // Check if the output tensor is a vector. If so,the kernel runs the vector-matrix multiplication
- if(output->info()->dimension(1) == 1)
+ if(is_interleaved_transposed)
{
- mm_arguments << "-DWIDTH_VECTOR_A=" << input0->info()->dimension(0) << " ";
- build_opts.emplace(mm_arguments.str());
-
// Create kernel
std::string data_type_name = lower_string(string_from_data_type(input0->info()->data_type()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(("gemm_vm_" + data_type_name), build_opts));
+
+ if(data_type_name == "f32")
+ {
+ GPUTarget arch_target = get_arch_from_target(get_target());
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mm_interleaved_transposed_f32_" + string_from_target(arch_target), build_opts));
+ }
+ else
+ {
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mm_interleaved_transposed_" + data_type_name, build_opts));
+ }
// Configure window kernel
- const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(input0->info()->data_type());
+ const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(input0->info()->data_type());
+ constexpr unsigned int num_elems_processed_per_iteration_y = 4;
- Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x));
+ Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- AccessWindowStatic input0_access(input0->info(), 0, 0, input0->info()->tensor_shape().x(), 1);
- AccessWindowHorizontal input1_access(input1->info(), 0, num_elems_processed_per_iteration_x);
- AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration_x);
+ AccessWindowRectangle input0_access(input0->info(), 0, 0, num_elems_processed_per_iteration_y, 1, 1.f, 0.25f);
+ AccessWindowTranspose input1_access(input1->info(), 0, 0, num_elems_processed_per_iteration_x, 1, 0.f, 0.25f);
+ AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
update_window_and_padding(win, input0_access, input1_access, output_access);
- Coordinates coord;
- coord.set_num_dimensions(output->info()->num_dimensions());
- output_access.set_valid_region(win, ValidRegion(coord, output->info()->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
ICLKernel::configure(win);
}
- else
+ else // The input tensors have not been reshaped
{
- build_opts.emplace(mm_arguments.str());
+ ARM_COMPUTE_ERROR_ON(input0->info()->dimension(0) != input1->info()->dimension(1));
- // Create kernel
- std::string data_type_name = lower_string(string_from_data_type(input0->info()->data_type()));
+ // Special case for 1xN, 2xN, 3xN and 4xN input0 tensor
+ const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(input0->info()->data_type());
+ const unsigned int num_elems_processed_per_iteration_y = std::min(static_cast<int>(output->info()->dimension(1)), 4);
- if(data_type_name == "f32")
+ build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type())));
+ build_opts.emplace(("-DNUM_ELEMS_PROCESSED_PER_THREAD_X=" + support::cpp11::to_string(num_elems_processed_per_iteration_x)));
+ build_opts.emplace(("-DNUM_ELEMS_PROCESSED_PER_THREAD_Y=" + support::cpp11::to_string(num_elems_processed_per_iteration_y)));
+
+ // Create kernel
+ if(is_data_type_fixed_point(input0->info()->data_type()))
{
- GPUTarget arch_target = get_arch_from_target(get_target());
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mm_f32_" + string_from_target(arch_target), build_opts));
+ std::string kernel_name = "gemm_mm_" + lower_string(string_from_data_type(input0->info()->data_type()));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel((kernel_name), build_opts));
}
else
{
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mm_" + data_type_name, build_opts));
+ std::string kernel_name = "gemm_mm_floating_point";
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel((kernel_name), build_opts));
}
- // Configure window kernel
- const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(input0->info()->data_type());
- constexpr unsigned int num_elems_processed_per_iteration_y = 4;
-
Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- AccessWindowRectangle input0_access(input0->info(), 0, 0, num_elems_processed_per_iteration_y, 1, 1.f, 0.25f);
- AccessWindowTranspose input1_access(input1->info(), 0, 0, num_elems_processed_per_iteration_x, 1, 0.f, 0.25f);
+ AccessWindowStatic input0_access(input0->info(), 0, 0, input0->info()->dimension(0), ceil_to_multiple(input0->info()->dimension(1), num_elems_processed_per_iteration_y));
+ AccessWindowStatic input1_access(input1->info(), 0, 0, ceil_to_multiple(input1->info()->dimension(0), num_elems_processed_per_iteration_x), input1->info()->dimension(1));
AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
update_window_and_padding(win, input0_access, input1_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+ Coordinates coord;
+ coord.set_num_dimensions(output->info()->num_dimensions());
+ output_access.set_valid_region(win, ValidRegion(coord, output->info()->tensor_shape()));
ICLKernel::configure(win);
}
@@ -157,9 +167,9 @@ void CLGEMMMatrixMultiplyKernel::run(const Window &window, cl::CommandQueue &que
Window slice = window.first_slice_window_2D();
Window slice_matrix_b = slice;
- slice_matrix_b.set(Window::DimX, Window::Dimension(0, _input1->info()->dimension(0), 1));
- slice_matrix_b.set(Window::DimY, Window::Dimension(0, _input1->info()->dimension(1), 1));
- slice_matrix_b.set(Window::DimZ, Window::Dimension(0, 1, 1));
+
+ slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1));
+ slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1));
do
{
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index b1b83985d0..0bbec94e78 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -197,9 +197,12 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
// Configure kernels
_input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias);
+
+ // Configure matrix multiply
if(_is_fully_connected_convolution)
{
- _mm_kernel.configure(&_input_im2col_reshaped, weights, &_gemm_output, 1.0f);
+ // The matrix A and Matrix B have not been reshaped
+ _mm_kernel.configure(&_input_im2col_reshaped, weights, &_gemm_output, 1.0f, false);
}
else
{
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 66a858d3ed..f7cea551f6 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -26,217 +26,127 @@
#include "arm_compute/core/Size2D.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "support/ToolchainSupport.h"
#include <algorithm>
-#include <cmath>
-namespace arm_compute
+using namespace arm_compute;
+
+void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output)
{
-CLFullyConnectedLayerReshapeWeights::CLFullyConnectedLayerReshapeWeights()
- : _transpose_kernel(), _transpose1xW_kernel(), _transpose_output(), _transpose_weights(false), _is_batched_fc_layer(false)
+ auto k = arm_compute::support::cpp14::make_unique<CLTransposeKernel>();
+ k->configure(input, output);
+ _kernel = std::move(k);
+}
+
+CLFullyConnectedLayer::CLFullyConnectedLayer()
+ : _im2col_kernel(), _reshape_weights_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _reshape_weights_output(), _are_weights_reshaped(true), _is_fc_after_conv(true),
+ _accumulate_biases(false)
{
}
-void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLTensor *output, bool transpose_weights, bool is_batched_fc_layer)
+void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON(input->info()->num_dimensions() > 2);
- ARM_COMPUTE_ERROR_ON(output == nullptr);
- ARM_COMPUTE_ERROR_ON(!transpose_weights && !is_batched_fc_layer);
+ ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
- const DataType data_type = input->info()->data_type();
+ const DataType dt = input->info()->data_type();
const int fixed_point_position = input->info()->fixed_point_position();
- _transpose_weights = transpose_weights;
- _is_batched_fc_layer = is_batched_fc_layer;
+ // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
- // Check if we need to transpose the weights
- if(_transpose_weights)
- {
- if(_is_batched_fc_layer)
- {
- // Initialize the output tensor for transpose
- TensorShape shape_transposed(input->info()->dimension(1), input->info()->dimension(0));
- _transpose_output.allocator()->init(TensorInfo(shape_transposed, 1, data_type, fixed_point_position));
- _transpose_kernel.configure(input, &_transpose_output);
+ // Initialize output tensor for im2col
+ TensorShape shape_im2col;
+ shape_im2col.set(0, input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2));
+ shape_im2col.set(1, input->info()->dimension(3));
+ shape_im2col.set(2, input->info()->dimension(4));
+ shape_im2col.set(3, input->info()->dimension(5));
+ _im2col_output.allocator()->init(TensorInfo(shape_im2col, 1, dt, fixed_point_position));
- // Configure transpose 1xW kernel
- _transpose1xW_kernel.configure(&_transpose_output, output);
+ // Configure im2col kernel
+ _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
- // Allocate temporary tensor used for transposing the weights
- _transpose_output.allocator()->allocate();
- }
- else
- {
- _transpose_kernel.configure(input, output);
- }
- }
- else
- {
- if(_is_batched_fc_layer)
- {
- // Configure transpose 1xW kernel
- _transpose1xW_kernel.configure(input, output);
- }
- else
- {
- ARM_COMPUTE_ERROR("Configuration transpose_weights=false & is_batched_fc_layer=false not supported");
- }
- }
-}
-
-void CLFullyConnectedLayerReshapeWeights::run()
-{
- if(_transpose_weights)
- {
- CLScheduler::get().enqueue(_transpose_kernel, _is_batched_fc_layer);
- }
+ // Configure matrix multiply kernel
+ _mm_kernel.configure(&_im2col_output, weights, output, 1.0f, false);
- if(_is_batched_fc_layer)
- {
- CLScheduler::get().enqueue(_transpose1xW_kernel);
- }
+ // Allocate the output tensor for im2col once all the configure methods have been called
+ _im2col_output.allocator()->allocate();
}
-CLFullyConnectedLayer::CLFullyConnectedLayer()
- : _im2col_kernel(), _reshape_weights_kernel(), _interleave4x4_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _interleave4x4_output(), _reshape_weights_output(),
- _are_weights_reshaped(false), _is_batched_fc_layer(false), _linearize_input(false), _accumulate_biases(false)
+void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
{
+ ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
+
+ // Configure matrix multiply kernel
+ _mm_kernel.configure(input, weights, output, 1.0f, false);
}
void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, bool transpose_weights, bool are_weights_reshaped)
{
- // With the Fully Connected layer we can have 4 different cases:
- // 1) Convolution layer -> Fully Connected layer without batches
- // 2) Fully Connected layer -> Fully Connected layer without batches
- // 3) Convolution layer -> Fully Connected layer with batches
- // 4) Fully Connected layer -> Fully Connected layer with batches
-
- // Expected shape before transpose and reshaping
- // Input: In x B (In and B can be multi-dimensional)
- // Weights: flat(In) x Out
- // Biases: Out
- // Output: Out x B (B can be multi-dimensional)
-
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, weights, output);
+ ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() != 2);
- const DataType data_type = input->info()->data_type();
- const int fixed_point_position = input->info()->fixed_point_position();
- const int num_batch_dimensions = std::max(0, static_cast<int>(output->info()->tensor_shape().num_dimensions()) - 1);
- const int num_input_dimensions = input->info()->tensor_shape().num_dimensions() - num_batch_dimensions;
- const size_t linear_input_size = input->info()->tensor_shape().total_size_lower(num_input_dimensions);
-
- _linearize_input = input->info()->tensor_shape().x() != linear_input_size;
- _are_weights_reshaped = are_weights_reshaped;
- _accumulate_biases = biases != nullptr;
- _is_batched_fc_layer = num_batch_dimensions > 0;
-
- // Check if number of batches match
- ARM_COMPUTE_ERROR_ON(input->info()->tensor_shape().total_size_upper(num_input_dimensions) != output->info()->tensor_shape().total_size_upper(1));
- ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 2);
+ _are_weights_reshaped = transpose_weights ? are_weights_reshaped : true;
+ _is_fc_after_conv = true;
+ _accumulate_biases = false;
- const size_t interleave_width = 16 / input->info()->element_size();
- const ICLTensor *weights_to_use = weights;
-
- if(!are_weights_reshaped && (transpose_weights || _is_batched_fc_layer))
+ if(biases != nullptr)
{
- weights_to_use = &_reshape_weights_output;
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
+
+ _accumulate_biases = true;
- TensorShape reshaped_weights_shape(weights->info()->tensor_shape());
+ // Configure accumulate biases kernel
+ _accumulate_biases_kernel.configure(output, biases);
+ }
- // Transpose weights if the user hasn't done it
- if(transpose_weights)
- {
- const size_t shape_x = reshaped_weights_shape.x();
- reshaped_weights_shape.set(0, reshaped_weights_shape.y());
- reshaped_weights_shape.set(1, shape_x);
- }
+ // With the Fully Connected layer we can have 4 different cases:
+ // 1) Convolution layer -> Fully Connected layer without batches
+ // 2) Fully Connected layer -> Fully Connected layer without batches
+ // 3) Convolution layer -> Fully Connected layer with batches
+ // 4) Fully Connected layer -> Fully Connected layer with batches
- // If the we run multiple batches we need 1xW transpose, too.
- if(_is_batched_fc_layer)
- {
- const float shape_x = reshaped_weights_shape.x();
- reshaped_weights_shape.set(0, reshaped_weights_shape.y() * interleave_width);
- reshaped_weights_shape.set(1, static_cast<unsigned int>(std::ceil(shape_x / interleave_width)));
- }
+ const ICLTensor *weights_to_use = weights;
- _reshape_weights_output.allocator()->init(TensorInfo(reshaped_weights_shape, 1, data_type, fixed_point_position));
+ if(!_are_weights_reshaped)
+ {
+ weights_to_use = &_reshape_weights_output;
// Reshape the weights
- _reshape_weights_kernel.configure(weights, &_reshape_weights_output, transpose_weights, _is_batched_fc_layer);
+ _reshape_weights_kernel.configure(weights, &_reshape_weights_output);
}
- // Check correct shape of weights
- if(_is_batched_fc_layer)
+ // Check if we have a fully connected layer with batches
+ const bool is_batched_fc_layer = output->info()->dimension(1) > 1;
+
+ if(is_batched_fc_layer)
{
- // Transpose + Transpose1xW
- ARM_COMPUTE_ERROR_ON(weights_to_use->info()->tensor_shape().x() != linear_input_size * interleave_width);
- ARM_COMPUTE_ERROR_ON(weights_to_use->info()->tensor_shape().y() != static_cast<unsigned int>(std::ceil(static_cast<float>(output->info()->tensor_shape().x()) / interleave_width)));
+ _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(input->info()->tensor_shape().cbegin() + 3,
+ input->info()->tensor_shape().cend(),
+ output->info()->tensor_shape().cbegin() + 1));
}
else
{
- // Transpose
- ARM_COMPUTE_ERROR_ON(weights_to_use->info()->tensor_shape().x() != output->info()->tensor_shape().x());
- ARM_COMPUTE_ERROR_ON(weights_to_use->info()->tensor_shape().y() != linear_input_size);
+ _is_fc_after_conv = input->info()->num_dimensions() > 1;
}
- const ICLTensor *multiply_input = input;
-
- if(_linearize_input)
+ if(_is_fc_after_conv)
{
- TensorShape shape_im2col(input->info()->tensor_shape());
- shape_im2col.collapse(num_input_dimensions);
- _im2col_output.allocator()->init(TensorInfo(shape_im2col, 1, data_type, fixed_point_position));
-
- // Configure im2col kernel
- _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
-
- multiply_input = &_im2col_output;
+ // Fully Connected layer after a Convolution Layer without batches
+ configure_conv_fc(input, weights_to_use, output);
}
-
- if(_is_batched_fc_layer)
- {
- TensorShape shape_interleaved(multiply_input->info()->tensor_shape());
- shape_interleaved.set(0, shape_interleaved.x() * 4);
- shape_interleaved.set(1, std::ceil(shape_interleaved.y() / 4.f));
- _interleave4x4_output.allocator()->init(TensorInfo(shape_interleaved, 1, data_type, fixed_point_position));
-
- // Configure interleave4x4 kernel
- _interleave4x4_kernel.configure(multiply_input, &_interleave4x4_output);
-
- multiply_input = &_interleave4x4_output;
- }
-
- // Configure matrix multiply kernel
- _mm_kernel.configure(multiply_input, weights_to_use, output, 1.0f);
-
- if(_accumulate_biases)
+ else
{
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
- ARM_COMPUTE_ERROR_ON(biases->info()->tensor_shape().x() != output->info()->tensor_shape().x());
-
- // Configure accumulate biases kernel
- _accumulate_biases_kernel.configure(output, biases);
+ // Fully Connected layer after a Fully Connected Layer without batches
+ configure_fc_fc(input, weights_to_use, output);
}
// Allocate the transpose tensor if the are_weights_reshaped flag is false and once all the configure methods have been called
- if(!are_weights_reshaped && (transpose_weights || _is_batched_fc_layer))
+ if(!_are_weights_reshaped)
{
// Allocate the tensor for the weights reshaped
_reshape_weights_output.allocator()->allocate();
}
-
- if(_linearize_input)
- {
- _im2col_output.allocator()->allocate();
- }
-
- if(_is_batched_fc_layer)
- {
- _interleave4x4_output.allocator()->allocate();
- }
}
void CLFullyConnectedLayer::run()
@@ -249,17 +159,11 @@ void CLFullyConnectedLayer::run()
}
// Linearize input if it comes from a convolutional layer
- if(_linearize_input)
+ if(_is_fc_after_conv)
{
CLScheduler::get().enqueue(_im2col_kernel, false);
}
- // Interleave input
- if(_is_batched_fc_layer)
- {
- CLScheduler::get().enqueue(_interleave4x4_kernel, false);
- }
-
// Run matrix multiply
CLScheduler::get().enqueue(_mm_kernel, !_accumulate_biases);
@@ -269,4 +173,3 @@ void CLFullyConnectedLayer::run()
CLScheduler::get().enqueue(_accumulate_biases_kernel);
}
}
-} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index e81d8a6b97..9867229a7c 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -39,7 +39,7 @@
using namespace arm_compute;
CLGEMM::CLGEMM()
- : _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _ma_kernel(), _tmp_a(), _tmp_b(), _run_vector_matrix_multiplication(false), _run_addition(false)
+ : _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _ma_kernel(), _tmp_a(), _tmp_b(), _is_interleaved_transposed(false), _run_addition(false)
{
}
@@ -59,12 +59,16 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
- _mm_kernel.set_target(CLScheduler::get().target());
+ // If the input tensor has less than 16 rows, we run a special version of GEMM without reshaping the input tensors
+ _is_interleaved_transposed = a->info()->dimension(1) > 16;
- // Check if the first input tensor is a vector. If so, all the kernels for reshaping the tensors can be skipped
- if(a->info()->dimension(1) != 1)
+ const ICLTensor *matrix_a = a;
+ const ICLTensor *matrix_b = b;
+
+ if(_is_interleaved_transposed)
{
- _run_vector_matrix_multiplication = false;
+ matrix_a = &_tmp_a;
+ matrix_b = &_tmp_b;
TensorShape shape_tmp_a = a->info()->tensor_shape();
TensorShape shape_tmp_b = b->info()->tensor_shape();
@@ -89,19 +93,17 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
_transpose_kernel.configure(b, &_tmp_b);
// Configure matrix multiply kernel
- _mm_kernel.configure(&_tmp_a, &_tmp_b, output, alpha);
+ _mm_kernel.set_target(CLScheduler::get().target());
+ }
+ _mm_kernel.configure(matrix_a, matrix_b, output, alpha, _is_interleaved_transposed);
+
+ if(_is_interleaved_transposed)
+ {
// Allocate intermediate tensors
_tmp_a.allocator()->allocate();
_tmp_b.allocator()->allocate();
}
- else // The first input tensor is a vector
- {
- _run_vector_matrix_multiplication = true;
-
- // Configure the matrix multiply kernel
- _mm_kernel.configure(a, b, output, alpha);
- }
// Configure matrix addition kernel
if(beta != 0 && c != nullptr)
@@ -113,7 +115,7 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
void CLGEMM::run()
{
- if(!_run_vector_matrix_multiplication)
+ if(_is_interleaved_transposed)
{
// Run interleave kernel
CLScheduler::get().enqueue(_interleave_kernel, false);
diff --git a/tests/model_objects/AlexNet.h b/tests/model_objects/AlexNet.h
index c9fd448d5d..45622e2118 100644
--- a/tests/model_objects/AlexNet.h
+++ b/tests/model_objects/AlexNet.h
@@ -24,6 +24,8 @@
#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
+#include "arm_compute/runtime/Tensor.h"
+
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/Utils.h"
@@ -149,7 +151,7 @@ public:
b[6]->allocator()->init(TensorInfo(TensorShape(4096U), 1, dt, fixed_point_position));
b[7]->allocator()->init(TensorInfo(TensorShape(1000U), 1, dt, fixed_point_position));
- if(_batches > 1)
+ if(_batches > 1 && std::is_same<TensorType, Tensor>::value)
{
w[5]->allocator()->init(TensorInfo(TensorShape(9216U * dt_size, 4096U / dt_size), 1, dt, fixed_point_position));
w[6]->allocator()->init(TensorInfo(TensorShape(4096U * dt_size, 4096U / dt_size), 1, dt, fixed_point_position));
diff --git a/tests/networks_new/AlexNetNetwork.h b/tests/networks_new/AlexNetNetwork.h
index 39c69daf60..b3a719671d 100644
--- a/tests/networks_new/AlexNetNetwork.h
+++ b/tests/networks_new/AlexNetNetwork.h
@@ -24,6 +24,8 @@
#ifndef __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
#define __ARM_COMPUTE_TEST_MODEL_OBJECTS_ALEXNET_H__
+#include "arm_compute/runtime/Tensor.h"
+
#include "AssetsLibrary.h"
#include "Globals.h"
#include "Utils.h"
@@ -153,7 +155,7 @@ public:
b[6].allocator()->init(TensorInfo(TensorShape(4096U), 1, _data_type, _fixed_point_position));
b[7].allocator()->init(TensorInfo(TensorShape(1000U), 1, _data_type, _fixed_point_position));
- if(_batches > 1)
+ if(_batches > 1 && std::is_same<TensorType, Tensor>::value)
{
w[5].allocator()->init(TensorInfo(TensorShape(9216U * data_type_size, 4096U / data_type_size), 1, _data_type, _fixed_point_position));
w[6].allocator()->init(TensorInfo(TensorShape(4096U * data_type_size, 4096U / data_type_size), 1, _data_type, _fixed_point_position));
diff --git a/tests/validation_new/CL/FullyConnectedLayer.cpp b/tests/validation_new/CL/FullyConnectedLayer.cpp
index 9bf3a75d88..e43997c47b 100644
--- a/tests/validation_new/CL/FullyConnectedLayer.cpp
+++ b/tests/validation_new/CL/FullyConnectedLayer.cpp
@@ -80,16 +80,6 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
const size_t shape_x = ws.x();
ws.set(0, ws.y());
ws.set(1, shape_x);
-
- // Weights have to be passed reshaped
- // Transpose 1xW for batched version
- if(!reshape_weights && dst_shape.y() > 1)
- {
- const float transpose_width = 16.0f / data_size_from_type(data_type);
- const size_t shape_x = ws.x();
- ws.set(0, ws.y() * static_cast<unsigned int>(transpose_width));
- ws.set(1, static_cast<unsigned int>(std::ceil(shape_x / transpose_width)));
- }
}
// Create tensors
@@ -113,7 +103,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
}
template <typename T>
-using CLFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T>;
+using CLFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
@@ -150,7 +140,7 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using CLFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T>;
+using CLFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, false>;
TEST_SUITE(Quantized)
TEST_SUITE(QS8)
diff --git a/tests/validation_new/NEON/FullyConnectedLayer.cpp b/tests/validation_new/NEON/FullyConnectedLayer.cpp
index 6eb18ebc6a..e859fb3872 100644
--- a/tests/validation_new/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation_new/NEON/FullyConnectedLayer.cpp
@@ -117,7 +117,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
}
template <typename T>
-using NEFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
+using NEFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
TEST_SUITE(Float)
#ifdef ARM_COMPUTE_ENABLE_FP16
@@ -156,7 +156,7 @@ TEST_SUITE_END()
TEST_SUITE_END()
template <typename T>
-using NEFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
+using NEFullyConnectedLayerFixedPointFixture = FullyConnectedLayerValidationFixedPointFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
TEST_SUITE(Quantized)
TEST_SUITE(QS8)
diff --git a/tests/validation_new/fixtures/FullyConnectedLayerFixture.h b/tests/validation_new/fixtures/FullyConnectedLayerFixture.h
index eb4aad8952..0953b0b67e 100644
--- a/tests/validation_new/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation_new/fixtures/FullyConnectedLayerFixture.h
@@ -76,7 +76,7 @@ RawTensor transpose(const RawTensor &src, int interleave = 1)
}
} // namespace
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool run_interleave>
class FullyConnectedLayerValidationFixedPointFixture : public framework::Fixture
{
public:
@@ -131,7 +131,7 @@ protected:
// Weights have to be passed reshaped
// Transpose 1xW for batched version
- if(!reshape_weights && output_shape.y() > 1)
+ if(!reshape_weights && output_shape.y() > 1 && run_interleave)
{
const int transpose_width = 16 / data_size_from_type(data_type);
const float shape_x = reshaped_weights_shape.x();
@@ -182,7 +182,7 @@ protected:
tmp = transpose(tmp);
// Reshape weights for batched runs
- if(!reshape_weights && output_shape.y() > 1)
+ if(!reshape_weights && output_shape.y() > 1 && run_interleave)
{
// Transpose with interleave
const int interleave_size = 16 / tmp.element_size();
@@ -232,15 +232,16 @@ protected:
DataType _data_type{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool run_interleave>
+class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T, run_interleave>
{
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type)
{
- FullyConnectedLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type,
- 0);
+ FullyConnectedLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
+ reshape_weights, data_type,
+ 0);
}
};
} // namespace validation