aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Lokhmotov <psyhtest@users.noreply.github.com>2017-11-20 11:02:10 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit3e80c7fa601d5996e8ada3b2f6c69327f066ec17 (patch)
treee1d4f1c8c1dafe46005feb4e716ed80b6bbe9489
parentd7295b7079f6b9126596cea998146ca9c6e87706 (diff)
downloadComputeLibrary-3e80c7fa601d5996e8ada3b2f6c69327f066ec17.tar.gz
COMPMID-661: Optimize FC layer with 2 new Bifrost kernels and LWS tuning (#33)
Change-Id: Ie56ac88dff5ff339572cec562e8cd62dc7f0aa8b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/109805 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h4
-rw-r--r--src/core/CL/CLKernelLibrary.cpp2
-rw-r--r--src/core/CL/ICLKernel.cpp2
-rw-r--r--src/core/CL/cl_kernels/gemm.cl756
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp23
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp127
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp14
7 files changed, 703 insertions, 225 deletions
diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
index f71e2a33f9..0fa22143cf 100644
--- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
@@ -81,8 +81,8 @@ public:
void run() override;
private:
- void configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output);
- void configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output);
+ void configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, const GPUTarget gpu_target);
+ void configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, const GPUTarget gpu_target);
CLMemoryGroup _memory_group;
CLIm2ColKernel _im2col_kernel;
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 9a2bb81708..6cc5a9a6b5 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -225,6 +225,8 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "gemm_mm_interleaved_transposed_qs8", "gemm.cl" },
{ "gemm_mm_interleaved_transposed_qs16", "gemm.cl" },
{ "gemm_mm_floating_point", "gemm.cl" },
+ { "gemm_mm_floating_point_f32_bifrost", "gemm.cl" },
+ { "gemm_mm_floating_point_f32_bifrost_1000", "gemm.cl" },
{ "gemm_mm_qs8", "gemm.cl" },
{ "gemm_mm_qs16", "gemm.cl" },
{ "gemm_lc_vm_f32", "gemm.cl" },
diff --git a/src/core/CL/ICLKernel.cpp b/src/core/CL/ICLKernel.cpp
index 13037a771d..3eb94b7ddc 100644
--- a/src/core/CL/ICLKernel.cpp
+++ b/src/core/CL/ICLKernel.cpp
@@ -194,4 +194,4 @@ cl::NDRange ICLKernel::gws_from_window(const Window &window)
(window.z().end() - window.z().start()) / window.z().step());
return gws;
-} \ No newline at end of file
+}
diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl
index d08e821431..15111ed352 100644
--- a/src/core/CL/cl_kernels/gemm.cl
+++ b/src/core/CL/cl_kernels/gemm.cl
@@ -80,10 +80,10 @@ __kernel void gemm_transpose1x8(IMAGE_DECLARATION(src),
uint x = get_global_id(0);
uint y = get_global_id(1);
- /* Compute address for Matrix B - source */
+ // Compute address for Matrix B - source
Image src = CONVERT_TO_IMAGE_STRUCT(src);
- /* Compute address for Matrix B transposed - destination. X and Y are swapped */
+ // Compute address for Matrix B transposed - destination. X and Y are swapped
uint dst_addr_in_bytes = y * 16 + ((x * dst_stride_y + dst_offset_first_element_in_bytes));
ushort8 b0 = vload8(0, (__global ushort *)src.ptr);
@@ -112,10 +112,10 @@ __kernel void gemm_transpose1x16(IMAGE_DECLARATION(src),
uint x = get_global_id(0);
uint y = get_global_id(1);
- /* Compute address for Matrix B - source */
+ // Compute address for Matrix B - source
Image src = CONVERT_TO_IMAGE_STRUCT(src);
- /* Compute address for Matrix B transposed - destination. X and Y are swapped */
+ // Compute address for Matrix B transposed - destination. X and Y are swapped
uint dst_addr_in_bytes = y * 16 + ((x * dst_stride_y + dst_offset_first_element_in_bytes));
uchar16 b0 = vload16(0, (__global uchar *)src.ptr);
@@ -141,11 +141,11 @@ __kernel void gemm_transpose1x16(IMAGE_DECLARATION(src),
__kernel void gemm_interleave4x4_32bit(IMAGE_DECLARATION(src),
IMAGE_DECLARATION(dst))
{
- /* Compute source and destination addresses */
+ // Compute source and destination addresses
Image src = CONVERT_TO_IMAGE_STRUCT(src);
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Load values from Matrix A */
+ // Load values from Matrix A
uint4 a0 = vload4(0, (__global uint *)(offset(&src, 0, 0)));
uint4 a1 = vload4(0, (__global uint *)(offset(&src, 0, 1)));
uint4 a2 = vload4(0, (__global uint *)(offset(&src, 0, 2)));
@@ -182,11 +182,11 @@ __kernel void gemm_interleave4x4_32bit(IMAGE_DECLARATION(src),
__kernel void gemm_interleave4x4_16bit(IMAGE_DECLARATION(src),
IMAGE_DECLARATION(dst))
{
- /* Compute source and destination addresses */
+ // Compute source and destination addresses
Image src = CONVERT_TO_IMAGE_STRUCT(src);
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Load values from Matrix A */
+ // Load values from Matrix A
ushort8 a0 = vload8(0, (__global ushort *)(offset(&src, 0, 0)));
ushort8 a1 = vload8(0, (__global ushort *)(offset(&src, 0, 1)));
ushort8 a2 = vload8(0, (__global ushort *)(offset(&src, 0, 2)));
@@ -223,11 +223,11 @@ __kernel void gemm_interleave4x4_16bit(IMAGE_DECLARATION(src),
__kernel void gemm_interleave4x4_8bit(IMAGE_DECLARATION(src),
IMAGE_DECLARATION(dst))
{
- /* Compute source and destination addresses */
+ // Compute source and destination addresses
Image src = CONVERT_TO_IMAGE_STRUCT(src);
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Load values from Matrix A */
+ // Load values from Matrix A
uchar16 a0 = vload16(0, (__global uchar *)(offset(&src, 0, 0)));
uchar16 a1 = vload16(0, (__global uchar *)(offset(&src, 0, 1)));
uchar16 a2 = vload16(0, (__global uchar *)(offset(&src, 0, 2)));
@@ -250,49 +250,11 @@ __kernel void gemm_interleave4x4_8bit(IMAGE_DECLARATION(src),
vstore16(val0, 0, ((__global uchar *)dst.ptr) + 48);
}
-/** This kernel accumulates each row with the biases vector
- *
- * @note The data type must be passed at compile time -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
- *
- * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: U8/S8/QS8/U16/S16/F16/U32/S32/F32
- * @param[in] accum_stride_x Stride of the accmulate tensor in X dimension (in bytes)
- * @param[in] accum_step_x accum_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] accum_stride_y Stride of the accumlulate tensor in Y dimension (in bytes)
- * @param[in] accum_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] accum_offset_first_element_in_bytes The offset of the first element in the accumulate tensor
- * @param[in] biases_ptr Pointer to the biases vector. Same as @p accum_ptr
- * @param[in] biases_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] biases_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-#ifdef DATA_TYPE
-__kernel void gemm_accumulate_biases(
- IMAGE_DECLARATION(accum),
- VECTOR_DECLARATION(biases))
-{
- Image accum = CONVERT_TO_IMAGE_STRUCT(accum);
- Vector biases = CONVERT_TO_VECTOR_STRUCT(biases);
-
- VEC_DATA_TYPE(DATA_TYPE, 16)
- accum_value = vload16(0, (__global DATA_TYPE *)accum.ptr);
- VEC_DATA_TYPE(DATA_TYPE, 16)
- biases_value = vload16(0, (__global DATA_TYPE *)biases.ptr);
-#ifdef FIXED_POINT_POSITION
- accum_value = ADD_SAT_OP_EXPAND(biases_value, accum_value, DATA_TYPE, 16);
-#else // FIXED_POINT_POSITION
- accum_value = biases_value + accum_value;
-#endif // FIXED_POINT_POSITION
-
- // Store result in the accummulate buffer
- vstore16(accum_value, 0, (__global DATA_TYPE *)accum.ptr);
-}
-#endif /* DATA_TYPE */
-
-#ifdef COLS_B
+#if defined(COLS_B)
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication
*
- * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DCOLS_B
+ * @attention The number of matrix B columns needs to be passed at compile time using -DCOLS_B
*
* @param[in] src0_ptr Pointer to the source matrix. Supported formats: U8
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -327,20 +289,20 @@ __kernel void gemm_mm_interleaved_transposed_u8(IMAGE_DECLARATION(src0),
int c_mult_int,
int shift)
{
- /* src_addr.s0 = address of matrix A */
- /* src_addr.s1 = address of matrix B */
+ // src_addr.s0 = address of matrix A
+ // src_addr.s1 = address of matrix B
- /* Compute address for matrix A and B */
+ // Compute address for matrix A and B
int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y),
(src1_stride_y));
- /* Add offset_first_element_in_bytes */
+ // Add offset_first_element_in_bytes
src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
- /* Compute end row address for matrix B */
+ // Compute end row address for matrix B
int end_row_mtx_b = src_addr.s1 + COLS_B;
- /* Reset accumulators */
+ // Reset accumulators
int16 c00 = 0.0f;
int16 c10 = 0.0f;
int16 c20 = 0.0f;
@@ -348,7 +310,7 @@ __kernel void gemm_mm_interleaved_transposed_u8(IMAGE_DECLARATION(src0),
for(; src_addr.s1 <= (end_row_mtx_b - 8); src_addr += (int2)(8, 32))
{
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ // Load values from matrix A (interleaved) and matrix B (transposed)
int8 a0 = (int8)a_offset + convert_int8(vload8(0, ((__global uchar *)src0_ptr) + src_addr.s0));
int16 b0 = (int16)b_offset + convert_int16(vload16(0, ((__global uchar *)src1_ptr) + src_addr.s1));
@@ -367,7 +329,7 @@ __kernel void gemm_mm_interleaved_transposed_u8(IMAGE_DECLARATION(src0),
for(; src_addr.s1 < end_row_mtx_b; src_addr += (int2)(4, 16))
{
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ // Load values from matrix A (interleaved) and matrix B (transposed)
int4 a0 = (int4)a_offset + convert_int4(vload4(0, ((__global uchar *)src0_ptr) + src_addr.s0));
int16 b0 = (int16)b_offset + convert_int16(vload16(0, ((__global uchar *)src1_ptr) + src_addr.s1));
@@ -377,28 +339,26 @@ __kernel void gemm_mm_interleaved_transposed_u8(IMAGE_DECLARATION(src0),
c30 += (int16)a0.s3 * b0;
}
- /* Compute destination address */
+ // Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Multiply by the weight of matrix product */
+ // Multiply by the weight of matrix product
c00 = (((int16)c_offset + c00) * (int16)c_mult_int) >> shift;
c10 = (((int16)c_offset + c10) * (int16)c_mult_int) >> shift;
c20 = (((int16)c_offset + c20) * (int16)c_mult_int) >> shift;
c30 = (((int16)c_offset + c30) * (int16)c_mult_int) >> shift;
- /* Store 4x16 block */
+ // Store 4x16 block
vstore16(convert_uchar16_sat(c00), 0, (__global uchar *)(offset(&dst, 0, 0)));
vstore16(convert_uchar16_sat(c10), 0, (__global uchar *)(offset(&dst, 0, 1)));
vstore16(convert_uchar16_sat(c20), 0, (__global uchar *)(offset(&dst, 0, 2)));
vstore16(convert_uchar16_sat(c30), 0, (__global uchar *)(offset(&dst, 0, 3)));
}
-#endif /* COLS_B */
-#if defined(COLS_B) && defined(ALPHA)
/** This OpenCL kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
*
- * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @attention The number of columns of matrix B and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -423,23 +383,23 @@ __kernel void gemm_mm_interleaved_transposed_f32_midgard(IMAGE_DECLARATION(src0)
IMAGE_DECLARATION(src1),
IMAGE_DECLARATION(dst))
{
- /* src_addr.s0 = address of matrix A */
- /* src_addr.s1 = address of matrix B */
+ // src_addr.s0 = address of matrix A
+ // src_addr.s1 = address of matrix B
- /* Compute address for matrix A and B */
+ // Compute address for matrix A and B
int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y),
(src1_stride_y));
- /* Add offset_first_element_in_bytes */
+ // Add offset_first_element_in_bytes
src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
- /* Divide by 4 in order to get the src_addr in unit of float */
+ // Divide by 4 in order to get the src_addr in unit of float
src_addr = src_addr >> 2;
- /* Compute end row address for matrix B */
+ // Compute end row address for matrix B
int end_row_mtx_b = src_addr.s1 + COLS_B;
- /* Reset accumulators */
+ // Reset accumulators
float4 c00 = 0.0f;
float4 c10 = 0.0f;
float4 c20 = 0.0f;
@@ -447,7 +407,7 @@ __kernel void gemm_mm_interleaved_transposed_f32_midgard(IMAGE_DECLARATION(src0)
for(; src_addr.s1 <= (end_row_mtx_b - 8); src_addr += (int2)(8, 8))
{
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ // Load values from matrix A (interleaved) and matrix B (transposed)
float4 a0 = vload4(0, ((__global float *)src0_ptr) + src_addr.s0);
float4 b0 = vload4(0, ((__global float *)src1_ptr) + src_addr.s1);
@@ -456,7 +416,7 @@ __kernel void gemm_mm_interleaved_transposed_f32_midgard(IMAGE_DECLARATION(src0)
c20 += (float4)a0.s2 * b0;
c30 += (float4)a0.s3 * b0;
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ // Load values from matrix A (interleaved) and matrix B (transposed)
a0 = vload4(0, ((__global float *)src0_ptr) + src_addr.s0 + 4);
b0 = vload4(0, ((__global float *)src1_ptr) + src_addr.s1 + 4);
@@ -468,7 +428,7 @@ __kernel void gemm_mm_interleaved_transposed_f32_midgard(IMAGE_DECLARATION(src0)
for(; src_addr.s1 < end_row_mtx_b; src_addr += (int2)(4, 4))
{
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ // Load values from matrix A (interleaved) and matrix B (transposed)
float4 a0 = vload4(0, ((__global float *)src0_ptr) + src_addr.s0);
float4 b0 = vload4(0, ((__global float *)src1_ptr) + src_addr.s1);
@@ -478,26 +438,28 @@ __kernel void gemm_mm_interleaved_transposed_f32_midgard(IMAGE_DECLARATION(src0)
c30 += (float4)a0.s3 * b0;
}
- /* Compute destination address */
+ // Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Multiply by the weight of matrix product */
+#if defined(ALPHA)
+ // Multiply by the weight of matrix product
c00 = c00 * (float4)ALPHA;
c10 = c10 * (float4)ALPHA;
c20 = c20 * (float4)ALPHA;
c30 = c30 * (float4)ALPHA;
+#endif // defined(ALPHA)
- /* Store 4x4 block */
+ // Store 4x4 block
vstore4(c00, 0, (__global float *)(offset(&dst, 0, 0)));
vstore4(c10, 0, (__global float *)(offset(&dst, 0, 1)));
vstore4(c20, 0, (__global float *)(offset(&dst, 0, 2)));
vstore4(c30, 0, (__global float *)(offset(&dst, 0, 3)));
}
-/** This OpenCL kernel is optimised for Bifrost. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
+/** This OpenCL kernel is optimized for Bifrost. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
*
- * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @attention The number of matrix B columns and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -677,6 +639,7 @@ __kernel void gemm_mm_interleaved_transposed_f32_bifrost(IMAGE_DECLARATION(src0)
// Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
+#if defined(ALPHA)
// Multiply by the weight of matrix product
c00 = c00 * ALPHA;
c01 = c01 * ALPHA;
@@ -694,6 +657,7 @@ __kernel void gemm_mm_interleaved_transposed_f32_bifrost(IMAGE_DECLARATION(src0)
c31 = c31 * ALPHA;
c32 = c32 * ALPHA;
c33 = c33 * ALPHA;
+#endif // defined(ALPHA)
barrier(CLK_GLOBAL_MEM_FENCE);
@@ -708,7 +672,7 @@ __kernel void gemm_mm_interleaved_transposed_f32_bifrost(IMAGE_DECLARATION(src0)
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication
*
- * @attention The width of matrix B and the alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
+ * @attention The number of matrix B columns and the optional alpha's value need to be passed at compile time using -DCOLS_B and -DALPHA
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -733,23 +697,23 @@ __kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0),
IMAGE_DECLARATION(src1),
IMAGE_DECLARATION(dst))
{
- /* src_addr.s0 = address of matrix A */
- /* src_addr.s1 = address of matrix B */
+ // src_addr.s0 = address of matrix A
+ // src_addr.s1 = address of matrix B
- /* Compute address for matrix A and B */
+ // Compute address for matrix A and B
int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y),
(src1_stride_y));
- /* Add offset_first_element_in_bytes */
+ // Add offset_first_element_in_bytes
src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
- /* Divide by 2 in order to get the src_addr in unit of half */
+ // Divide by 2 in order to get the src_addr in unit of half
src_addr = src_addr >> 1;
- /* Compute end row address for matrix B */
+ // Compute end row address for matrix B
int end_row_mtx_b = src_addr.s1 + COLS_B;
- /* Reset accumulators */
+ // Reset accumulators
half8 c00 = 0.0f;
half8 c10 = 0.0f;
half8 c20 = 0.0f;
@@ -757,7 +721,7 @@ __kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0),
for(; src_addr.s1 <= (end_row_mtx_b - 16); src_addr += (int2)(8, 16))
{
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ // Load values from matrix A (interleaved) and matrix B (transposed)
half4 a0 = vload4(0, ((__global half *)src0_ptr) + src_addr.s0);
half8 b0 = vload8(0, ((__global half *)src1_ptr) + src_addr.s1);
@@ -766,7 +730,7 @@ __kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0),
c20 += (half8)a0.s2 * b0;
c30 += (half8)a0.s3 * b0;
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ // Load values from matrix A (interleaved) and matrix B (transposed)
a0 = vload4(0, ((__global half *)src0_ptr) + src_addr.s0 + 4);
b0 = vload8(0, ((__global half *)src1_ptr) + src_addr.s1 + 8);
@@ -778,7 +742,7 @@ __kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0),
for(; src_addr.s1 < end_row_mtx_b; src_addr += (int2)(4, 8))
{
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ // Load values from matrix A (interleaved) and matrix B (transposed)
half4 a0 = vload4(0, ((__global half *)src0_ptr) + src_addr.s0);
half8 b0 = vload8(0, ((__global half *)src1_ptr) + src_addr.s1);
@@ -788,16 +752,18 @@ __kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0),
c30 += (half8)a0.s3 * b0;
}
- /* Compute destination address */
+ // Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Multiply by the weight of matrix product */
+#if defined(ALPHA)
+ // Multiply by the weight of matrix product
c00 = c00 * (half8)ALPHA;
c10 = c10 * (half8)ALPHA;
c20 = c20 * (half8)ALPHA;
c30 = c30 * (half8)ALPHA;
+#endif // defined(ALPHA)
- /* Store 4x8 block */
+ // Store 4x8 block
vstore8(c00, 0, (__global half *)(offset(&dst, 0, 0)));
vstore8(c10, 0, (__global half *)(offset(&dst, 0, 1)));
vstore8(c20, 0, (__global half *)(offset(&dst, 0, 2)));
@@ -805,11 +771,11 @@ __kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0),
}
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED)
-#ifdef FIXED_POINT_POSITION
+#if defined(FIXED_POINT_POSITION)
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 8 bit fixed point precision
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication
*
- * @attention The width of matrix B, the alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION
+ * @attention The number of matrix B columns, the optional alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION
*
* @note: ALPHA must be passed in 8 bit fixed point format
*
@@ -836,20 +802,20 @@ __kernel void gemm_mm_interleaved_transposed_qs8(IMAGE_DECLARATION(src0),
IMAGE_DECLARATION(src1),
IMAGE_DECLARATION(dst))
{
- /* src_addr.s0 = address of matrix A */
- /* src_addr.s1 = address of matrix B */
+ // src_addr.s0 = address of matrix A
+ // src_addr.s1 = address of matrix B
- /* Compute address for matrix A and B */
+ // Compute address for matrix A and B
int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y),
(src1_stride_y));
- /* Add offset_first_element_in_bytes */
+ // Add offset_first_element_in_bytes
src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
- /* Compute end row address for matrix B */
+ // Compute end row address for matrix B
int end_row_mtx_b = src_addr.s1 + COLS_B;
- /* Reset accumulators */
+ // Reset accumulators
short8 c00 = 0.0f;
short8 c10 = 0.0f;
short8 c20 = 0.0f;
@@ -859,10 +825,10 @@ __kernel void gemm_mm_interleaved_transposed_qs8(IMAGE_DECLARATION(src0),
short8 c21 = 0.0f;
short8 c31 = 0.0f;
- /* This for loop performs 1 accumulation for each iteration */
+ // This for loop performs 1 accumulation for each iteration
for(; src_addr.s1 <= (end_row_mtx_b - 16); src_addr += (int2)(4, 16))
{
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ // Load values from matrix A (interleaved) and matrix B (transposed)
char4 a0 = vload4(0, ((__global char *)src0_ptr) + src_addr.s0);
char16 b0 = vload16(0, ((__global char *)src1_ptr) + src_addr.s1);
@@ -877,21 +843,23 @@ __kernel void gemm_mm_interleaved_transposed_qs8(IMAGE_DECLARATION(src0),
c31 = mlal_sat_qs8x8(c31, (char8)a0.s3, b0.s89ABCDEF, FIXED_POINT_POSITION);
}
- /* Compute destination address */
+ // Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Multiply by the weight of matrix product */
+ // Multiply by the weight of matrix product
char16 c00_qs8 = convert_char16_sat((short16)(c00, c01));
char16 c10_qs8 = convert_char16_sat((short16)(c10, c11));
char16 c20_qs8 = convert_char16_sat((short16)(c20, c21));
char16 c30_qs8 = convert_char16_sat((short16)(c30, c31));
+#if defined(ALPHA)
c00_qs8 = mul_sat_qs8x16(c00_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
c10_qs8 = mul_sat_qs8x16(c10_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
c20_qs8 = mul_sat_qs8x16(c20_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
c30_qs8 = mul_sat_qs8x16(c30_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
- /* Store 16x4 block */
+ // Store 16x4 block
vstore16(c00_qs8, 0, (__global char *)(offset(&dst, 0, 0)));
vstore16(c10_qs8, 0, (__global char *)(offset(&dst, 0, 1)));
vstore16(c20_qs8, 0, (__global char *)(offset(&dst, 0, 2)));
@@ -901,7 +869,7 @@ __kernel void gemm_mm_interleaved_transposed_qs8(IMAGE_DECLARATION(src0),
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 16 bit fixed point precision
* Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication
*
- * @attention The width of matrix B, the alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION
+ * @attention The number of matrix B columns, the optional alpha's value and fixed point position need to be passed at compile time using -DCOLS_B -DALPHA and -DFIXED_POINT_POSITION
*
* @note: ALPHA must be passed in 16 bit fixed point format
*
@@ -928,29 +896,29 @@ __kernel void gemm_mm_interleaved_transposed_qs16(IMAGE_DECLARATION(src0),
IMAGE_DECLARATION(src1),
IMAGE_DECLARATION(dst))
{
- /* src_addr.s0 = address of matrix A */
- /* src_addr.s1 = address of matrix B */
+ // src_addr.s0 = address of matrix A
+ // src_addr.s1 = address of matrix B
- /* Compute address for matrix A and B */
+ // Compute address for matrix A and B
int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y),
(src1_stride_y));
- /* Add offset_first_element_in_bytes */
+ // Add offset_first_element_in_bytes
src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
- /* Divide by 2 in order to get the src_addr in unit of short */
+ // Divide by 2 in order to get the src_addr in unit of short
src_addr = src_addr >> 1;
- /* Compute end row address for matrix B */
+ // Compute end row address for matrix B
int end_row_mtx_b = src_addr.s1 + COLS_B;
- /* Reset accumulators */
+ // Reset accumulators
int8 c00 = 0.0f;
int8 c10 = 0.0f;
int8 c20 = 0.0f;
int8 c30 = 0.0f;
- /* This for loop performs 1 accumulation for each iteration */
+ // This for loop performs 1 accumulation for each iteration
for(; src_addr.s1 <= (end_row_mtx_b - 8); src_addr += (int2)(4, 8))
{
/* Load values from matrix A (interleaved) and matrix B (transposed) */
@@ -963,27 +931,30 @@ __kernel void gemm_mm_interleaved_transposed_qs16(IMAGE_DECLARATION(src0),
c30 = mlal_sat_qs16x8(c30, (short8)a0.s3, b0, FIXED_POINT_POSITION);
}
- /* Compute destination address */
+ // Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Multiply by the weight of matrix product */
+ // Multiply by the weight of matrix product
short8 c00_qs16 = convert_short8_sat(c00);
short8 c10_qs16 = convert_short8_sat(c10);
short8 c20_qs16 = convert_short8_sat(c20);
short8 c30_qs16 = convert_short8_sat(c30);
+#if defined(ALPHA)
c00_qs16 = mul_sat_qs16x8(c00_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
c10_qs16 = mul_sat_qs16x8(c10_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
c20_qs16 = mul_sat_qs16x8(c20_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
c30_qs16 = mul_sat_qs16x8(c30_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
- /* Store 8x4 block */
+ // Store 8x4 block
vstore8(c00_qs16, 0, (__global short *)(offset(&dst, 0, 0)));
vstore8(c10_qs16, 0, (__global short *)(offset(&dst, 0, 1)));
vstore8(c20_qs16, 0, (__global short *)(offset(&dst, 0, 2)));
vstore8(c30_qs16, 0, (__global short *)(offset(&dst, 0, 3)));
}
#endif // defined(FIXED_POINT_POSITION)
+#endif // defined(COLS_B)
#if defined(COLS_A) && defined(NUM_ELEMS_PROCESSED_PER_THREAD_X) && (NUM_ELEMS_PROCESSED_PER_THREAD_Y)
#if defined(DATA_TYPE)
@@ -993,7 +964,7 @@ __kernel void gemm_mm_interleaved_transposed_qs16(IMAGE_DECLARATION(src0),
* @note This OpenCL kernel works with floating point data types (F16/F32)
* @note The floating point data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
* @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y
- * @note The width of matrix A and the alpha's value need to be passed at compile time using -DCOLS_A and -DALPHA
+ * @note The number of matrix A columns and the optional alpha's value need to be passed at compile time using -DCOLS_A and -DALPHA
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16/F32
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -1113,35 +1084,459 @@ __kernel void gemm_mm_floating_point(IMAGE_DECLARATION(src0),
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
// Multiply by the weight of matrix-matrix product and store the result
+#if defined(ALPHA)
acc0 = acc0 * (VECTOR_TYPE)ALPHA;
+#endif // defined(ALPHA)
VSTORE(NUM_ELEMS_PROCESSED_PER_THREAD_X)
(acc0, 0, (__global DATA_TYPE *)(offset(&dst, 0, 0)));
#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if defined(ALPHA)
acc1 = acc1 * (VECTOR_TYPE)ALPHA;
+#endif // defined(ALPHA)
VSTORE(NUM_ELEMS_PROCESSED_PER_THREAD_X)
(acc1, 0, (__global DATA_TYPE *)(offset(&dst, 0, 1)));
#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if defined(ALPHA)
acc2 = acc2 * (VECTOR_TYPE)ALPHA;
+#endif // defined(ALPHA)
VSTORE(NUM_ELEMS_PROCESSED_PER_THREAD_X)
(acc2, 0, (__global DATA_TYPE *)(offset(&dst, 0, 2)));
#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+#if defined(ALPHA)
acc3 = acc3 * (VECTOR_TYPE)ALPHA;
+#endif // defined(ALPHA)
VSTORE(NUM_ELEMS_PROCESSED_PER_THREAD_X)
(acc3, 0, (__global DATA_TYPE *)(offset(&dst, 0, 3)));
#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
#endif // defined(DATA_TYPE)
-#ifdef FIXED_POINT_POSITION
+/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not beed reshaped
+ *
+ * @note This OpenCL kernel works with the 32-bit floating point data type (float) and uses the fma units.
+ * @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y.
+ * This kernel optimally uses -DNUM_ELEMS_PROCESSED_PER_THREAD_X=4.
+ * @note The number of matrix A columns must be passed at compile time using -DCOLS_A.
+ * @note The optional value of scalar alpha is passed at compile time using -DALPHA=alpha
+ *
+ * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16/F32
+ * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
+ * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
+ * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
+ * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
+ * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
+ * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
+ * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
+ * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
+ */
+__kernel void gemm_mm_floating_point_f32_bifrost(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
+{
+ int idx = get_global_id(0) * NUM_ELEMS_PROCESSED_PER_THREAD_X;
+
+ // Compute starting address for matrix A and matrix B
+ int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
+
+ // Update address for matrix A
+ src_addr.s0 += get_global_id(1) * src0_stride_y * NUM_ELEMS_PROCESSED_PER_THREAD_Y;
+
+ // Update address for matrix B
+ src_addr.s1 += idx * sizeof(float);
+
+ // Address boundary for matrix A
+ int end_row_vec_a = src_addr.s0 + (COLS_A * sizeof(float));
+
+ // Initialize accumulators
+ float acc00 = 0.0f;
+ float acc01 = 0.0f;
+ float acc02 = 0.0f;
+ float acc03 = 0.0f;
+
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ float acc10 = 0.0f;
+ float acc11 = 0.0f;
+ float acc12 = 0.0f;
+ float acc13 = 0.0f;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ float acc20 = 0.0f;
+ float acc21 = 0.0f;
+ float acc22 = 0.0f;
+ float acc23 = 0.0f;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ float acc30 = 0.0f;
+ float acc31 = 0.0f;
+ float acc32 = 0.0f;
+ float acc33 = 0.0f;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+
+ // A and B src indices get incremented at the same time.
+ for(; src_addr.s0 <= (end_row_vec_a - 2 * (int)sizeof(float)); src_addr += (int2)(2 * sizeof(float), 2 * src1_stride_y))
+ {
+ // Load values from matrix A
+ float2 a0 = vload2(0, (__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ float2 a1 = vload2(0, (__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ float2 a2 = vload2(0, (__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ float2 a3 = vload2(0, (__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ // Load values from matrix B
+ float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1 + 0 * src1_stride_y));
+ float4 b1 = vload4(0, (__global float *)(src1_ptr + src_addr.s1 + 1 * src1_stride_y));
+
+ // Multiply and accumulate
+ acc00 = fma(a0.s0, b0.s0, acc00);
+ acc00 = fma(a0.s1, b1.s0, acc00);
+ acc01 = fma(a0.s0, b0.s1, acc01);
+ acc01 = fma(a0.s1, b1.s1, acc01);
+ acc02 = fma(a0.s0, b0.s2, acc02);
+ acc02 = fma(a0.s1, b1.s2, acc02);
+ acc03 = fma(a0.s1, b1.s3, acc03);
+ acc03 = fma(a0.s0, b0.s3, acc03);
+
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc10 = fma(a1.s0, b0.s0, acc10);
+ acc11 = fma(a1.s0, b0.s1, acc11);
+ acc12 = fma(a1.s0, b0.s2, acc12);
+ acc13 = fma(a1.s0, b0.s3, acc13);
+
+ acc10 = fma(a1.s1, b1.s0, acc10);
+ acc11 = fma(a1.s1, b1.s1, acc11);
+ acc12 = fma(a1.s1, b1.s2, acc12);
+ acc13 = fma(a1.s1, b1.s3, acc13);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc20 = fma(a2.s0, b0.s0, acc20);
+ acc21 = fma(a2.s0, b0.s1, acc21);
+ acc22 = fma(a2.s0, b0.s2, acc22);
+ acc23 = fma(a2.s0, b0.s3, acc23);
+
+ acc20 = fma(a2.s1, b1.s0, acc20);
+ acc21 = fma(a2.s1, b1.s1, acc21);
+ acc22 = fma(a2.s1, b1.s2, acc22);
+ acc23 = fma(a2.s1, b1.s3, acc23);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc30 = fma(a3.s0, b0.s0, acc30);
+ acc31 = fma(a3.s0, b0.s1, acc31);
+ acc32 = fma(a3.s0, b0.s2, acc32);
+ acc33 = fma(a3.s0, b0.s3, acc33);
+
+ acc30 = fma(a3.s1, b1.s0, acc30);
+ acc31 = fma(a3.s1, b1.s1, acc31);
+ acc32 = fma(a3.s1, b1.s2, acc32);
+ acc33 = fma(a3.s1, b1.s3, acc33);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ }
+
+ for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(sizeof(float), src1_stride_y))
+ {
+ // Load values from matrix A
+ float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ // Load values from matrix B
+ float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
+
+ // Multiply and accumulate
+ acc00 = fma(a0, b0.s0, acc00);
+ acc01 = fma(a0, b0.s1, acc01);
+ acc02 = fma(a0, b0.s2, acc02);
+ acc03 = fma(a0, b0.s3, acc03);
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc10 = fma(a1, b0.s0, acc10);
+ acc11 = fma(a1, b0.s1, acc11);
+ acc12 = fma(a1, b0.s2, acc12);
+ acc13 = fma(a1, b0.s3, acc13);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc20 = fma(a2, b0.s0, acc20);
+ acc21 = fma(a2, b0.s1, acc21);
+ acc22 = fma(a2, b0.s2, acc22);
+ acc23 = fma(a2, b0.s3, acc23);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc30 = fma(a3, b0.s0, acc30);
+ acc31 = fma(a3, b0.s1, acc31);
+ acc32 = fma(a3, b0.s2, acc32);
+ acc33 = fma(a3, b0.s3, acc33);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ }
+
+ // Compute destination address
+ Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
+
+ // Multiply by the weight of matrix-matrix product and store the result
+#if defined(ALPHA)
+ acc00 = acc00 * ALPHA;
+ acc01 = acc01 * ALPHA;
+ acc02 = acc02 * ALPHA;
+ acc03 = acc03 * ALPHA;
+#endif // defined(ALPHA)
+
+ float4 acc0 = ((float4)(acc00, acc01, acc02, acc03));
+ vstore4(acc0, 0, (__global float *)(offset(&dst, 0, 0)));
+
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if defined(ALPHA)
+ acc10 = acc10 * ALPHA;
+ acc11 = acc11 * ALPHA;
+ acc12 = acc12 * ALPHA;
+ acc13 = acc13 * ALPHA;
+#endif // defined(ALPHA)
+ float4 acc1 = ((float4)(acc10, acc11, acc12, acc13));
+ vstore4(acc1, 0, (__global float *)(offset(&dst, 0, 1)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if defined(ALPHA)
+ acc20 = acc20 * ALPHA;
+ acc21 = acc21 * ALPHA;
+ acc22 = acc22 * ALPHA;
+ acc23 = acc23 * ALPHA;
+#endif // defined(ALPHA)
+ float4 acc2 = ((float4)(acc20, acc21, acc22, acc23));
+ vstore4(acc2, 0, (__global float *)(offset(&dst, 0, 2)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+#if defined(ALPHA)
+ acc30 = acc30 * ALPHA;
+ acc31 = acc31 * ALPHA;
+ acc32 = acc32 * ALPHA;
+ acc33 = acc33 * ALPHA;
+#endif // defined(ALPHA)
+ float4 acc3 = ((float4)(acc30, acc31, acc32, acc33));
+ vstore4(acc3, 0, (__global float *)(offset(&dst, 0, 3)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+}
+
+/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not been reshaped
+ *
+ * @note This OpenCL kernel works with the 32-bit floating point data type (float) and uses the fma units.
+ * This OpenCL kernel is optimized for Bifrost when the number of matrix B columns is less or equal to 1000.
+ * @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y.
+ * This kernel optimally uses -DNUM_ELEMS_PROCESSED_PER_THREAD_X=2.
+ * @note The number of matrix A columns must be passed at compile time using -DCOLS_A.
+ * @note The optional value of scalar alpha is passed at compile time using -DALPHA=alpha if alpha!=1.0f.
+ *
+ * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16/F32
+ * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
+ * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
+ * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
+ * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
+ * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
+ * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
+ * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
+ * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
+ */
+__kernel void gemm_mm_floating_point_f32_bifrost_1000(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
+{
+ // Requires 2 NUM_ELEMS_PROCESSED_PER_THREAD_X, C vect2, A vect4, B (2 vload2) // to fix for NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ int idx = get_global_id(0) * NUM_ELEMS_PROCESSED_PER_THREAD_X;
+
+ // Compute starting address for matrix A and Matrix B
+ int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
+
+ // Update address for the matrix A
+ src_addr.s0 += get_global_id(1) * src0_stride_y * NUM_ELEMS_PROCESSED_PER_THREAD_Y;
+
+ // Update address for the matrix B
+ src_addr.s1 += idx * sizeof(float);
+
+ // Address boundary for the matrix A
+ int end_row_vec_a = src_addr.s0 + (COLS_A * sizeof(float));
+
+ // Initialize accumulators
+ float acc00 = 0.0f;
+ float acc01 = 0.0f;
+
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ float acc10 = 0.0f;
+ float acc11 = 0.0f;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ float acc20 = 0.0f;
+ float acc21 = 0.0f;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ float acc30 = 0.0f;
+ float acc31 = 0.0f;
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+
+ // A and B src indices get incremented at the same time.
+ for(; src_addr.s0 <= (end_row_vec_a - 4 * (int)sizeof(float)); src_addr += (int2)(4 * sizeof(float), 4 * src1_stride_y))
+ {
+ // Load values from matrix A
+ float4 a0 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+
+ // Load values from matrix B
+ float2 b0 = vload2(0, (__global float *)(src1_ptr + src_addr.s1 + 0 * src1_stride_y));
+ float2 b1 = vload2(0, (__global float *)(src1_ptr + src_addr.s1 + 1 * src1_stride_y));
+ float2 b2 = vload2(0, (__global float *)(src1_ptr + src_addr.s1 + 2 * src1_stride_y));
+ float2 b3 = vload2(0, (__global float *)(src1_ptr + src_addr.s1 + 3 * src1_stride_y));
+
+ // Multiply and accumulate
+ acc00 = fma(a0.s0, b0.s0, acc00);
+ acc00 = fma(a0.s1, b1.s0, acc00);
+ acc00 = fma(a0.s2, b2.s0, acc00);
+ acc00 = fma(a0.s3, b3.s0, acc00);
+
+ acc01 = fma(a0.s0, b0.s1, acc01);
+ acc01 = fma(a0.s1, b1.s1, acc01);
+ acc01 = fma(a0.s2, b2.s1, acc01);
+ acc01 = fma(a0.s3, b3.s1, acc01);
+
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ a0 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+ acc10 = fma(a0.s0, b0.s0, acc10);
+ acc10 = fma(a0.s1, b1.s0, acc10);
+ acc10 = fma(a0.s2, b2.s0, acc10);
+ acc10 = fma(a0.s3, b3.s0, acc10);
+
+ acc11 = fma(a0.s0, b0.s1, acc11);
+ acc11 = fma(a0.s1, b1.s1, acc11);
+ acc11 = fma(a0.s2, b2.s1, acc11);
+ acc11 = fma(a0.s3, b3.s1, acc11);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ a0 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+ acc20 = fma(a0.s0, b0.s0, acc20);
+ acc20 = fma(a0.s1, b1.s0, acc20);
+ acc20 = fma(a0.s2, b2.s0, acc20);
+ acc20 = fma(a0.s3, b3.s0, acc20);
+
+ acc21 = fma(a0.s0, b0.s1, acc21);
+ acc21 = fma(a0.s1, b1.s1, acc21);
+ acc21 = fma(a0.s2, b2.s1, acc21);
+ acc21 = fma(a0.s3, b3.s1, acc21);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ a0 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+ acc30 = fma(a0.s0, b0.s0, acc30);
+ acc30 = fma(a0.s1, b1.s0, acc30);
+ acc30 = fma(a0.s2, b2.s0, acc30);
+ acc30 = fma(a0.s3, b3.s0, acc30);
+
+ acc31 = fma(a0.s0, b0.s1, acc31);
+ acc31 = fma(a0.s1, b1.s1, acc31);
+ acc31 = fma(a0.s2, b2.s1, acc31);
+ acc31 = fma(a0.s3, b3.s1, acc31);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ }
+ // float size increment
+ for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(4, src1_stride_y))
+ {
+ // Load values from matrix A
+ float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ // Load values from matrix B
+ float2 b0 = vload2(0, (__global float *)(src1_ptr + src_addr.s1));
+
+ // Multiply and accumulate
+ acc00 = fma(a0, b0.s0, acc00);
+ acc01 = fma(a0, b0.s1, acc01);
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+ acc10 = fma(a1, b0.s0, acc10);
+ acc11 = fma(a1, b0.s1, acc11);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+ acc20 = fma(a2, b0.s0, acc20);
+ acc21 = fma(a2, b0.s1, acc21);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ acc30 = fma(a3, b0.s0, acc30);
+ acc31 = fma(a3, b0.s1, acc31);
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+ }
+
+ // Compute destination address
+ Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
+
+ // Multiply by the weight of matrix-matrix product and store the result
+#if defined(ALPHA)
+ acc00 = acc00 * ALPHA;
+ acc01 = acc01 * ALPHA;
+#endif // defined(ALPHA)
+ float2 acc0 = ((float2)(acc00, acc01));
+ vstore2(acc0, 0, (__global float *)(offset(&dst, 0, 0)));
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if defined(ALPHA)
+ acc10 = acc10 * ALPHA;
+ acc11 = acc11 * ALPHA;
+#endif // defined(ALPHA)
+ float2 acc1 = ((float2)(acc10, acc11));
+ vstore2(acc1, 0, (__global float *)(offset(&dst, 0, 1)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if defined(ALPHA)
+ acc20 = acc20 * ALPHA;
+ acc21 = acc21 * ALPHA;
+#endif // defined(ALPHA)
+ float2 acc2 = ((float2)(acc20, acc21));
+ vstore2(acc2, 0, (__global float *)(offset(&dst, 0, 2)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
+#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+#if defined(ALPHA)
+ acc30 = acc30 * ALPHA;
+ acc31 = acc31 * ALPHA;
+#endif // defined(ALPHA)
+ float2 acc3 = (float2)(acc30, acc31);
+ vstore2(acc3, 0, (__global float *)(offset(&dst, 0, 3)));
+#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
+}
+
+#if defined(FIXED_POINT_POSITION)
/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not beed reshaped
*
* @note This OpenCL kernel works with fixed point data types QS8
* @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y
- * @note The width of matrix A, the number of elements processed per thread along the Y direction and the alpha's value need to be passed at compile time using -DCOLS_A, -DNUM_ELEMS_PROCESSED_PER_THREAD_Y and -DALPHA
+ * @note The number matrix A columns, the number of elements processed per thread along the Y direction and the alpha's value need to be passed at compile time using -DCOLS_A, -DNUM_ELEMS_PROCESSED_PER_THREAD_Y and -DALPHA
* @note The fixed point position need to be passed at compile time using -DFIXED_POINT_POSITION
- * @note The alpha value must be passed in 8 bit fixed point format using -DALPHA
+ * @note The optional alpha value must be passed in 8 bit fixed point format using -DALPHA
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS8/QS16
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -1271,21 +1666,29 @@ __kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0),
// Multiply by the weight of matrix product and store the result
char16 acc_qs8;
acc_qs8 = convert_char16_sat((short16)(acc00, acc01));
+#if defined(ALPHA)
acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 0)));
#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
acc_qs8 = convert_char16_sat((short16)(acc10, acc11));
+#if defined(ALPHA)
acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 1)));
#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
acc_qs8 = convert_char16_sat((short16)(acc20, acc21));
+#if defined(ALPHA)
acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 2)));
#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
acc_qs8 = convert_char16_sat((short16)(acc30, acc31));
+#if defined(ALPHA)
acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 3)));
#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
@@ -1294,9 +1697,9 @@ __kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0),
*
* @note This OpenCL kernel works with fixed point data types QS16
* @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y
- * @note The width of matrix A, the number of elements processed per thread along the Y direction and the alpha's value need to be passed at compile time using -DCOLS_A, -DNUM_ELEMS_PROCESSED_PER_THREAD_Y and -DALPHA
+ * @note The number of matrix A columns, the number of elements processed per thread along the Y direction and the alpha's value need to be passed at compile time using -DCOLS_A, -DNUM_ELEMS_PROCESSED_PER_THREAD_Y and -DALPHA
* @note The fixed point position need to be passed at compile time using -DFIXED_POINT_POSITION
- * @note The alpha value must be passed in 16 bit fixed point format using -DALPHA
+ * @note The optional alpha value must be passed in 16 bit fixed point format using -DALPHA
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS8/QS16
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
@@ -1410,29 +1813,36 @@ __kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0),
// Multiply by the weight of matrix product and store the result
short8 acc_qs16;
acc_qs16 = convert_short8_sat(acc0);
+#if defined(ALPHA)
acc_qs16 = mul_sat_qs16x8(acc_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
vstore8(acc_qs16, 0, (__global short *)(offset(&dst, 0, 0)));
#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
acc_qs16 = convert_short8_sat(acc1);
+#if defined(ALPHA)
acc_qs16 = mul_sat_qs16x8(acc_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
vstore8(acc_qs16, 0, (__global short *)(offset(&dst, 0, 1)));
#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
acc_qs16 = convert_short8_sat(acc2);
+#if defined(ALPHA)
acc_qs16 = mul_sat_qs16x8(acc_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
vstore8(acc_qs16, 0, (__global short *)(offset(&dst, 0, 2)));
#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
acc_qs16 = convert_short8_sat(acc3);
+#if defined(ALPHA)
acc_qs16 = mul_sat_qs16x8(acc_qs16, (short8)ALPHA, FIXED_POINT_POSITION);
+#endif // defined(ALPHA)
vstore8(acc_qs16, 0, (__global short *)(offset(&dst, 0, 3)));
#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
}
#endif // defined(FIXED_POINT_POSITION)
#endif // defined(COLS_A) && defined(NUM_ELEMS_PROCESSED_PER_THREAD_X) && (NUM_ELEMS_PROCESSED_PER_THREAD_Y)
-#endif // defined(COLS_B) && defined(ALPHA)
-#ifdef BETA
+#if defined(BETA)
/** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
*
* @attention The beta's value need to be passed at compile time using -DBETA
@@ -1453,20 +1863,20 @@ __kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0),
__kernel void gemm_ma_f32(IMAGE_DECLARATION(src),
IMAGE_DECLARATION(dst))
{
- /* Compute source and destination addresses */
+ // Compute source and destination addresses
Image src = CONVERT_TO_IMAGE_STRUCT(src);
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Load values from A x B */
+ // Load values from A x B
float4 alpha_ab = vload4(0, (__global float *)dst.ptr);
- /* Load values from Matrix C */
+ // Load values from Matrix C
float4 c = vload4(0, (__global float *)src.ptr);
- /* Computes alpha * axb + beta * c */
+ // Computes alpha * axb + beta * c
float4 out = alpha_ab + (float4)BETA * c;
- /* Store final result in axb matrix */
+ // Store final result in axb matrix
vstore4(out, 0, (__global float *)dst.ptr);
}
@@ -1490,24 +1900,24 @@ __kernel void gemm_ma_f32(IMAGE_DECLARATION(src),
__kernel void gemm_ma_f16(IMAGE_DECLARATION(src),
IMAGE_DECLARATION(dst))
{
- /* Compute source and destination addresses */
+ // Compute source and destination addresses
Image src = CONVERT_TO_IMAGE_STRUCT(src);
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Load values from A x B */
+ // Load values from A x B
half8 alpha_ab = vload8(0, (__global half *)dst.ptr);
- /* Load values from Matrix C */
+ // Load values from Matrix C
half8 c = vload8(0, (__global half *)src.ptr);
- /* Computes alpha * axb + beta * c */
+ // Computes alpha * axb + beta * c
half8 out = alpha_ab + (half8)BETA * c;
- /* Store final result in axb matrix */
+ // Store final result in axb matrix
vstore8(out, 0, (__global half *)dst.ptr);
}
-#ifdef FIXED_POINT_POSITION
+#if defined(FIXED_POINT_POSITION)
/** This OpenCL kernel performs the in-place matrix addition between 2 matrices in 8 bit fixed point taking into account that the second matrix might be weighted by a scalar value beta:
*
* @attention The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION
@@ -1530,20 +1940,20 @@ __kernel void gemm_ma_f16(IMAGE_DECLARATION(src),
__kernel void gemm_ma_qs8(IMAGE_DECLARATION(src),
IMAGE_DECLARATION(dst))
{
- /* Compute source and destination addresses */
+ // Compute source and destination addresses
Image src = CONVERT_TO_IMAGE_STRUCT(src);
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Load values from A x B */
+ // Load values from A x B
char16 alpha_ab = vload16(0, (__global char *)dst.ptr);
- /* Load values from Matrix C */
+ // Load values from Matrix C
char16 c = vload16(0, (__global char *)src.ptr);
- /* Computes alpha * axb + beta * c */
+ // Computes alpha * axb + beta * c
char16 out = mla_sat_qs8x16(alpha_ab, (char16)BETA, c, FIXED_POINT_POSITION);
- /* Store final result in axb matrix */
+ // Store final result in axb matrix
vstore16(out, 0, (__global char *)dst.ptr);
}
@@ -1569,26 +1979,26 @@ __kernel void gemm_ma_qs8(IMAGE_DECLARATION(src),
__kernel void gemm_ma_qs16(IMAGE_DECLARATION(src),
IMAGE_DECLARATION(dst))
{
- /* Compute source and destination addresses */
+ // Compute source and destination addresses
Image src = CONVERT_TO_IMAGE_STRUCT(src);
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
- /* Load values from A x B */
+ // Load values from A x B
short8 alpha_ab = vload8(0, (__global short *)dst.ptr);
- /* Load values from Matrix C */
+ // Load values from Matrix C
short8 c = vload8(0, (__global short *)src.ptr);
- /* Computes alpha * axb + beta * c */
+ // Computes alpha * axb + beta * c
short8 out = mla_sat_qs16x8(alpha_ab, (short8)BETA, c, FIXED_POINT_POSITION);
- /* Store final result in axb matrix */
+ // Store final result in axb matrix
vstore8(out, 0, (__global short *)dst.ptr);
}
-#endif /* defined(FIXED_POINT_POSITION) */
-#endif /* defined(BETA) */
+#endif // defined(FIXED_POINT_POSITION)
+#endif // defined(BETA)
-#ifdef WIDTH_VECTOR_A
+#if defined(WIDTH_VECTOR_A)
/** This OpenCL kernel computes the vector by matrix multiplication between each row of A (src0) and matrix B (src1) used for locally connected layer
*
* @attention The width of A need to be passed at compile time using -DWIDTH_VECTOR_A
@@ -1623,7 +2033,7 @@ __kernel void gemm_lc_vm_f32(IMAGE_DECLARATION(src0),
int idx = get_global_id(0) * 4;
int idy = get_global_id(1);
- /* Compute the address for the vector A and matrix B */
+ // Compute the address for the vector A and matrix B
int2 src_addr = ((int2)(src0_offset_first_element_in_bytes + src0_stride_y * idy, src1_offset_first_element_in_bytes + src1_stride_z * idy));
src_addr.s1 += idx * sizeof(float);
@@ -1649,9 +2059,49 @@ __kernel void gemm_lc_vm_f32(IMAGE_DECLARATION(src0),
acc += b0 * (float4)a0;
}
- /* Compute destination address */
+ // Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
vstore4(acc, 0, (__global float *)(offset(&dst, 0, 0)));
}
-#endif /* WIDTH_VECTOR_A */
+#endif // defined(WIDTH_VECTOR_A)
+
+/** This kernel accumulates each row with the biases vector.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=short.
+ * @note The vector size must be passed at compile time using -DVECTOR_SIZE e.g. -DVECTOR_SIZE=16.
+ *
+ * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: U8/S8/QS8/U16/S16/F16/U32/S32/F32
+ * @param[in] accum_stride_x Stride of the accmulate tensor in X dimension (in bytes)
+ * @param[in] accum_step_x accum_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] accum_stride_y Stride of the accumlulate tensor in Y dimension (in bytes)
+ * @param[in] accum_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] accum_offset_first_element_in_bytes The offset of the first element in the accumulate tensor
+ * @param[in] biases_ptr Pointer to the biases vector. Same as @p accum_ptr
+ * @param[in] biases_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] biases_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+#if defined(DATA_TYPE) && defined(VECTOR_SIZE)
+__kernel void gemm_accumulate_biases(
+ IMAGE_DECLARATION(accum),
+ VECTOR_DECLARATION(biases))
+{
+ Image accum = CONVERT_TO_IMAGE_STRUCT(accum);
+ Vector biases = CONVERT_TO_VECTOR_STRUCT(biases);
+
+ // Vector size, i.e. number of vector elements.
+ VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
+ accum_value = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)accum.ptr);
+ VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
+ biases_value = VLOAD(VECTOR_SIZE)(0, (__global DATA_TYPE *)biases.ptr);
+#ifdef FIXED_POINT_POSITION
+ accum_value = ADD_SAT_OP_EXPAND(biases_value, accum_value, DATA_TYPE, VECTOR_SIZE);
+#else // FIXED_POINT_POSITION
+ accum_value = biases_value + accum_value;
+#endif // FIXED_POINT_POSITION
+ // Store result in the accumulate buffer
+ VSTORE(VECTOR_SIZE)
+ (accum_value, 0, (__global DATA_TYPE *)accum.ptr);
+}
+#endif // defined(DATA_TYPE) && defined(VECTOR_SIZE)
diff --git a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp
index 263cfab2dc..015b4f70a4 100644
--- a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp
@@ -51,18 +51,23 @@ void CLGEMMMatrixAccumulateBiasesKernel::configure(ICLTensor *accum, const ICLTe
_biases = biases;
_accum = accum;
- std::set<std::string> build_opts;
- build_opts.insert(("-DDATA_TYPE=" + get_cl_type_from_data_type(accum->info()->data_type())));
- if(is_data_type_fixed_point(accum->info()->data_type()))
- {
- build_opts.emplace("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(accum->info()->fixed_point_position()));
- }
+ // Get the target architecture
+ GPUTarget arch_target = get_arch_from_target(get_target());
+ // Select the vector size to use (8 for Bifrost; 16 for Midgard).
+ const unsigned int vector_size = (arch_target == GPUTarget::BIFROST) ? 8 : 16;
+
+ // Add build options
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(accum->info()->data_type()));
+ build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
+ build_opts.add_option_if(is_data_type_fixed_point(accum->info()->data_type()),
+ "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(accum->info()->fixed_point_position()));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_accumulate_biases", build_opts));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_accumulate_biases", build_opts.options()));
// Configure kernel window
- const unsigned int num_elems_processed_per_iteration = 16;
+ const unsigned int num_elems_processed_per_iteration = vector_size;
Window win = calculate_max_window(*_accum->info(), Steps(num_elems_processed_per_iteration));
@@ -92,7 +97,7 @@ void CLGEMMMatrixAccumulateBiasesKernel::run(const Window &window, cl::CommandQu
add_2D_tensor_argument(idx, _accum, accum_slice);
add_1D_tensor_argument(idx, _biases, biases_slice);
- enqueue(queue, *this, accum_slice);
+ enqueue(queue, *this, accum_slice, _lws_hint);
}
while(window.slide_window_slice_2D(accum_slice));
}
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
index b184c507ff..d39dcdb336 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
@@ -38,7 +38,6 @@
#include "arm_compute/core/Window.h"
#include <set>
-#include <sstream>
#include <string>
using namespace arm_compute;
@@ -53,7 +52,6 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1, output);
ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input0, input1, output);
-
if(!is_interleaved_transposed)
{
ARM_COMPUTE_ERROR_ON(input0->info()->dimension(0) != input1->info()->dimension(1));
@@ -63,49 +61,44 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
_input1 = input1;
_output = output;
- if(output->info()->dimension(1) == 196)
- {
- _lws_hint = cl::NDRange(1, 7);
- }
- else
- {
- _lws_hint = cl::NDRange(8, 8);
- }
+ const DataType data_type = input0->info()->data_type();
+ const int fp_pos = input0->info()->fixed_point_position();
- std::set<std::string> build_opts;
- build_opts.emplace(("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0))));
- build_opts.emplace(("-DCOLS_B=" + support::cpp11::to_string(input1->info()->dimension(0))));
+ // Get target architecture
+ GPUTarget arch_target = get_arch_from_target(get_target());
- if(is_data_type_fixed_point(input0->info()->data_type()))
- {
- build_opts.emplace(("-DALPHA=" + support::cpp11::to_string((input0->info()->data_type() == DataType::QS8 ?
- sqcvt_qs8_f32(alpha, input0->info()->fixed_point_position()) :
- sqcvt_qs16_f32(alpha, input0->info()->fixed_point_position())))));
+ // Configure LWS hint
+ _lws_hint = (output->info()->dimension(1) == 196) ? cl::NDRange(1, 7) : cl::NDRange(8, 8);
- build_opts.emplace(("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input0->info()->fixed_point_position())));
- }
- else
+ // Create build options
+ CLBuildOptions build_opts;
+ build_opts.add_option_if(is_data_type_fixed_point(data_type), "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(fp_pos));
+
+ const bool multiply_alpha = std::abs(1.0f - alpha) > 0.00001f;
+
+ // Only define ALPHA when alpha is not 1.0f. This avoids performing unnecessary multiplications.
+ if(multiply_alpha)
{
- build_opts.emplace(("-DALPHA=" + float_to_string_with_full_precision(alpha)));
+ build_opts.add_option_if_else(is_data_type_fixed_point(data_type),
+ "-DALPHA=" + support::cpp11::to_string((data_type == DataType::QS8 ? sqcvt_qs8_f32(alpha, fp_pos) : sqcvt_qs16_f32(alpha, fp_pos))),
+ "-DALPHA=" + float_to_string_with_full_precision(alpha));
}
+ std::string kernel_name;
if(is_interleaved_transposed)
{
- // Create kernel
- std::string data_type_name = lower_string(string_from_data_type(input0->info()->data_type()));
-
- if(data_type_name == "f32")
+ build_opts.add_option("-DCOLS_B=" + support::cpp11::to_string(input1->info()->dimension(0)));
+ if(data_type == DataType::F32)
{
- GPUTarget arch_target = get_arch_from_target(get_target());
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mm_interleaved_transposed_f32_" + string_from_target(arch_target), build_opts));
+ kernel_name = "gemm_mm_interleaved_transposed_f32_" + string_from_target(arch_target);
}
else
{
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mm_interleaved_transposed_" + data_type_name, build_opts));
+ kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type));
}
- // Configure window kernel
- const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(input0->info()->data_type());
+ // Configure kernel window
+ const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
constexpr unsigned int num_elems_processed_per_iteration_y = 4;
Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
@@ -122,28 +115,47 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
}
else // The input tensors have not been reshaped
{
- ARM_COMPUTE_ERROR_ON(input0->info()->dimension(0) != input1->info()->dimension(1));
+ build_opts.add_option("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0)));
- // Special case for 1xN, 2xN, 3xN and 4xN input0 tensor
- const unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(input0->info()->data_type());
+ // Special case for 1xN, 2xN, 3xN and 4xN input0 tensor. num_elems_processed_per_iteration_x is set up for the default case.
+ unsigned int num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type);
const unsigned int num_elems_processed_per_iteration_y = std::min(static_cast<int>(output->info()->dimension(1)), 4);
- build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type())));
- build_opts.emplace(("-DNUM_ELEMS_PROCESSED_PER_THREAD_X=" + support::cpp11::to_string(num_elems_processed_per_iteration_x)));
- build_opts.emplace(("-DNUM_ELEMS_PROCESSED_PER_THREAD_Y=" + support::cpp11::to_string(num_elems_processed_per_iteration_y)));
-
- // Create kernel
- if(is_data_type_fixed_point(input0->info()->data_type()))
+ // Create kernels according to the architecture, data type and input size.
+ if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32)
{
- std::string kernel_name = "gemm_mm_" + lower_string(string_from_data_type(input0->info()->data_type()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel((kernel_name), build_opts));
+ // The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and
+ // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g.
+ // FC6 and FC7 of AlexNet and VGG-16).
+ if(input1->info()->dimension(0) <= 1000)
+ {
+ // Each work-item processes 2 elements in the X dimension.
+ num_elems_processed_per_iteration_x = 2;
+ kernel_name = "gemm_mm_floating_point_f32_bifrost_1000";
+ }
+ else
+ {
+ // Each work-item processes 4 elements in the X dimension (as in the default case).
+ num_elems_processed_per_iteration_x = 4;
+ kernel_name = "gemm_mm_floating_point_f32_bifrost";
+ }
+ // The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels
+ // via exhaustive autotuning over a range of representative layer configurations.
+ _lws_hint = cl::NDRange(4);
}
- else
+ else if(is_data_type_fixed_point(data_type))
{
- std::string kernel_name = "gemm_mm_floating_point";
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel((kernel_name), build_opts));
+ kernel_name = "gemm_mm_" + lower_string(string_from_data_type(data_type));
}
+ else // (MIDGARD and F32) or (F16)
+ {
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ kernel_name = "gemm_mm_floating_point";
+ }
+ build_opts.add_option("-DNUM_ELEMS_PROCESSED_PER_THREAD_Y=" + support::cpp11::to_string(num_elems_processed_per_iteration_y));
+ build_opts.add_option("-DNUM_ELEMS_PROCESSED_PER_THREAD_X=" + support::cpp11::to_string(num_elems_processed_per_iteration_x));
+ // Configure window
Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
AccessWindowStatic input0_access(input0->info(), 0, 0, input0->info()->dimension(0), ceil_to_multiple(input0->info()->dimension(1), num_elems_processed_per_iteration_y));
@@ -157,18 +169,21 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
output_access.set_valid_region(win, ValidRegion(coord, output->info()->tensor_shape()));
ICLKernel::configure(win);
-
- // Set config_id for enabling LWS tuning
- _config_id = "gemm_";
- _config_id += (is_interleaved_transposed ? "reshaped_" : "");
- _config_id += lower_string(string_from_data_type(input0->info()->data_type()));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(1));
- _config_id += "_";
- _config_id += support::cpp11::to_string(output->info()->dimension(0));
- _config_id += "_";
- _config_id += (is_interleaved_transposed ? support::cpp11::to_string(input1->info()->dimension(0)) : support::cpp11::to_string(input1->info()->dimension(1)));
}
+
+ // Create kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+
+ // Set config_id for enabling LWS tuning
+ _config_id = "gemm_";
+ _config_id += (is_interleaved_transposed ? "reshaped_" : "");
+ _config_id += lower_string(string_from_data_type(input0->info()->data_type()));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(1));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(0));
+ _config_id += "_";
+ _config_id += (is_interleaved_transposed ? support::cpp11::to_string(input1->info()->dimension(0)) : support::cpp11::to_string(input1->info()->dimension(1)));
}
void CLGEMMMatrixMultiplyKernel::run(const Window &window, cl::CommandQueue &queue)
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 03d5dbdfd1..72d374e9c2 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -45,7 +45,7 @@ CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> mem
{
}
-void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
+void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, const GPUTarget gpu_target)
{
ARM_COMPUTE_ERROR_ON((weights->info()->dimension(1) != (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))));
@@ -67,17 +67,19 @@ void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLT
_im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
// Configure matrix multiply kernel
+ _mm_kernel.set_target(gpu_target);
_mm_kernel.configure(&_im2col_output, weights, output, 1.0f, false);
// Allocate the output tensor for im2col once all the configure methods have been called
_im2col_output.allocator()->allocate();
}
-void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output)
+void CLFullyConnectedLayer::configure_fc_fc(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, const GPUTarget gpu_target)
{
ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != weights->info()->dimension(1));
// Configure matrix multiply kernel
+ _mm_kernel.set_target(gpu_target);
_mm_kernel.configure(input, weights, output, 1.0f, false);
}
@@ -91,6 +93,9 @@ void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *w
_is_fc_after_conv = true;
_accumulate_biases = false;
+ // Get GPU target
+ const GPUTarget gpu_target = CLScheduler::get().target();
+
if(biases != nullptr)
{
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
@@ -98,6 +103,7 @@ void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *w
_accumulate_biases = true;
// Configure accumulate biases kernel
+ _accumulate_biases_kernel.set_target(gpu_target);
_accumulate_biases_kernel.configure(output, biases);
}
@@ -134,12 +140,12 @@ void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *w
if(_is_fc_after_conv)
{
// Fully Connected layer after a Convolution Layer without batches
- configure_conv_fc(input, weights_to_use, output);
+ configure_conv_fc(input, weights_to_use, output, gpu_target);
}
else
{
// Fully Connected layer after a Fully Connected Layer without batches
- configure_fc_fc(input, weights_to_use, output);
+ configure_fc_fc(input, weights_to_use, output, gpu_target);
}
// Allocate the transpose tensor if the are_weights_reshaped flag is false and once all the configure methods have been called