aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/cl_kernels
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2019-05-30 09:53:10 +0100
committerGian Marco Iodice <gianmarco.iodice@arm.com>2019-05-30 13:12:09 +0000
commitdb18a6f67c32d8d01830186529c3cc61741385cc (patch)
tree7dfacffd4b57ad8e8d14f85e329539e85dd99645 /src/core/CL/cl_kernels
parent5b7d537d918becb894d94d91726ce79e63d72fc1 (diff)
downloadComputeLibrary-db18a6f67c32d8d01830186529c3cc61741385cc.tar.gz
COMPMID-2373: Remove unused gemmlowp opencl kernels
Change-Id: Ie1fe6e80957007b41f6db860f073764e37d91b9f Signed-off-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-on: https://review.mlplatform.org/c/1252 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/CL/cl_kernels')
-rw-r--r--src/core/CL/cl_kernels/gemmlowp.cl595
1 files changed, 1 insertions, 594 deletions
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index 18ccb65aaf..b1ba8e0377 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -193,599 +193,6 @@ __kernel void gemmlowp_mm_interleaved_transposed_midgard(IMAGE_DECLARATION(src0)
vstore4(c30, 0, (__global int *)(dst.ptr + 3 * dst_stride_y));
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
}
-
-/** This OpenCL kernel is optimized for Bifrost and computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B must be reshaped respectively with @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel before running the matrix multiplication
- *
- * @attention The number of matrix B columns needs to be passed at compile time using -DCOLS_B
- * @note The transposition width step (mult_transpose1xW_width * 4) must be passed at compile time using -DTRANSPOSE1XW_WIDTH_STEP (i.e. -DTRANSPOSE1XW_WIDTH_STEP=2)
- * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
- *
- * @note In case the output has to be reinterpreted as a 3D tensor (i.e. output of convolution layer), the following information must be passed at compile time:
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data type: QASYMM8
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data type: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemmlowp_mm_interleaved_transposed_bifrost(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
- uint dst_stride_z
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- const int x = get_global_id(0) / TRANSPOSE1XW_WIDTH_STEP;
- const int y = get_global_id(1) / MULT_INTERLEAVE4X4_HEIGHT;
- const int z = get_global_id(2);
-
- // Offset
- const int offset_row_a = (get_global_id(1) % MULT_INTERLEAVE4X4_HEIGHT) * 4;
- const int offset_row_b = (get_global_id(0) % TRANSPOSE1XW_WIDTH_STEP) * 4;
-
- // src_addr_a = address of matrix A
- // src_addr_b = address of matrix B
- __global uchar *src_addr_a = (__global uchar *)(src0_ptr + z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes);
- __global uchar *src_addr_b = (__global uchar *)(src1_ptr + x * src1_stride_y + src1_offset_first_element_in_bytes);
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src_addr_b += (z % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src_addr_b += z * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- // Compute end row address for matrix B
- __global uchar *src_end_addr_b = src_addr_b + COLS_B;
-
- src_addr_a += offset_row_a;
- src_addr_b += offset_row_b;
-
- // Reset accumulators
- uint c00 = 0;
- uint c01 = 0;
- uint c02 = 0;
- uint c03 = 0;
- uint c10 = 0;
- uint c11 = 0;
- uint c12 = 0;
- uint c13 = 0;
- uint c20 = 0;
- uint c21 = 0;
- uint c22 = 0;
- uint c23 = 0;
- uint c30 = 0;
- uint c31 = 0;
- uint c32 = 0;
- uint c33 = 0;
-
-#if MULT_INTERLEAVE4X4_HEIGHT == 1
- for(; src_addr_b <= (src_end_addr_b - (int)(32 * TRANSPOSE1XW_WIDTH_STEP)); src_addr_a += (32 * MULT_INTERLEAVE4X4_HEIGHT), src_addr_b += (32 * TRANSPOSE1XW_WIDTH_STEP))
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- uchar16 a0 = vload16(0, src_addr_a);
- uchar4 b0 = vload4(0, src_addr_b);
-
- c00 += (ushort)a0.s0 * b0.s0;
- c01 += (ushort)a0.s0 * b0.s1;
- c02 += (ushort)a0.s0 * b0.s2;
- c03 += (ushort)a0.s0 * b0.s3;
-
- c10 += (ushort)a0.s1 * b0.s0;
- c11 += (ushort)a0.s1 * b0.s1;
- c12 += (ushort)a0.s1 * b0.s2;
- c13 += (ushort)a0.s1 * b0.s3;
-
- c20 += (ushort)a0.s2 * b0.s0;
- c21 += (ushort)a0.s2 * b0.s1;
- c22 += (ushort)a0.s2 * b0.s2;
- c23 += (ushort)a0.s2 * b0.s3;
-
- c30 += (ushort)a0.s3 * b0.s0;
- c31 += (ushort)a0.s3 * b0.s1;
- c32 += (ushort)a0.s3 * b0.s2;
- c33 += (ushort)a0.s3 * b0.s3;
-
- // Load values from matrix B (transposed)
- b0 = vload4(0, src_addr_b + 4 * TRANSPOSE1XW_WIDTH_STEP);
-
- c00 += (ushort)a0.s4 * b0.s0;
- c01 += (ushort)a0.s4 * b0.s1;
- c02 += (ushort)a0.s4 * b0.s2;
- c03 += (ushort)a0.s4 * b0.s3;
-
- c10 += (ushort)a0.s5 * b0.s0;
- c11 += (ushort)a0.s5 * b0.s1;
- c12 += (ushort)a0.s5 * b0.s2;
- c13 += (ushort)a0.s5 * b0.s3;
-
- c20 += (ushort)a0.s6 * b0.s0;
- c21 += (ushort)a0.s6 * b0.s1;
- c22 += (ushort)a0.s6 * b0.s2;
- c23 += (ushort)a0.s6 * b0.s3;
-
- c30 += (ushort)a0.s7 * b0.s0;
- c31 += (ushort)a0.s7 * b0.s1;
- c32 += (ushort)a0.s7 * b0.s2;
- c33 += (ushort)a0.s7 * b0.s3;
-
- // Load values from matrix B (transposed)
- b0 = vload4(0, src_addr_b + 8 * TRANSPOSE1XW_WIDTH_STEP);
-
- c00 += (ushort)a0.s8 * b0.s0;
- c01 += (ushort)a0.s8 * b0.s1;
- c02 += (ushort)a0.s8 * b0.s2;
- c03 += (ushort)a0.s8 * b0.s3;
-
- c10 += (ushort)a0.s9 * b0.s0;
- c11 += (ushort)a0.s9 * b0.s1;
- c12 += (ushort)a0.s9 * b0.s2;
- c13 += (ushort)a0.s9 * b0.s3;
-
- c20 += (ushort)a0.sA * b0.s0;
- c21 += (ushort)a0.sA * b0.s1;
- c22 += (ushort)a0.sA * b0.s2;
- c23 += (ushort)a0.sA * b0.s3;
-
- c30 += (ushort)a0.sB * b0.s0;
- c31 += (ushort)a0.sB * b0.s1;
- c32 += (ushort)a0.sB * b0.s2;
- c33 += (ushort)a0.sB * b0.s3;
-
- // Load values from matrix B (transposed)
- b0 = vload4(0, src_addr_b + 12 * TRANSPOSE1XW_WIDTH_STEP);
-
- c00 += (ushort)a0.sC * b0.s0;
- c01 += (ushort)a0.sC * b0.s1;
- c02 += (ushort)a0.sC * b0.s2;
- c03 += (ushort)a0.sC * b0.s3;
-
- c10 += (ushort)a0.sD * b0.s0;
- c11 += (ushort)a0.sD * b0.s1;
- c12 += (ushort)a0.sD * b0.s2;
- c13 += (ushort)a0.sD * b0.s3;
-
- c20 += (ushort)a0.sE * b0.s0;
- c21 += (ushort)a0.sE * b0.s1;
- c22 += (ushort)a0.sE * b0.s2;
- c23 += (ushort)a0.sE * b0.s3;
-
- c30 += (ushort)a0.sF * b0.s0;
- c31 += (ushort)a0.sF * b0.s1;
- c32 += (ushort)a0.sF * b0.s2;
- c33 += (ushort)a0.sF * b0.s3;
-
- // Load values from matrix A (interleaved) and matrix B (transposed)
- a0 = vload16(0, src_addr_a + 16);
- b0 = vload4(0, src_addr_b + 16 * TRANSPOSE1XW_WIDTH_STEP);
-
- c00 += (ushort)a0.s0 * b0.s0;
- c01 += (ushort)a0.s0 * b0.s1;
- c02 += (ushort)a0.s0 * b0.s2;
- c03 += (ushort)a0.s0 * b0.s3;
-
- c10 += (ushort)a0.s1 * b0.s0;
- c11 += (ushort)a0.s1 * b0.s1;
- c12 += (ushort)a0.s1 * b0.s2;
- c13 += (ushort)a0.s1 * b0.s3;
-
- c20 += (ushort)a0.s2 * b0.s0;
- c21 += (ushort)a0.s2 * b0.s1;
- c22 += (ushort)a0.s2 * b0.s2;
- c23 += (ushort)a0.s2 * b0.s3;
-
- c30 += (ushort)a0.s3 * b0.s0;
- c31 += (ushort)a0.s3 * b0.s1;
- c32 += (ushort)a0.s3 * b0.s2;
- c33 += (ushort)a0.s3 * b0.s3;
-
- // Load values from matrix B (transposed)
- b0 = vload4(0, src_addr_b + 20 * TRANSPOSE1XW_WIDTH_STEP);
-
- c00 += (ushort)a0.s4 * b0.s0;
- c01 += (ushort)a0.s4 * b0.s1;
- c02 += (ushort)a0.s4 * b0.s2;
- c03 += (ushort)a0.s4 * b0.s3;
-
- c10 += (ushort)a0.s5 * b0.s0;
- c11 += (ushort)a0.s5 * b0.s1;
- c12 += (ushort)a0.s5 * b0.s2;
- c13 += (ushort)a0.s5 * b0.s3;
-
- c20 += (ushort)a0.s6 * b0.s0;
- c21 += (ushort)a0.s6 * b0.s1;
- c22 += (ushort)a0.s6 * b0.s2;
- c23 += (ushort)a0.s6 * b0.s3;
-
- c30 += (ushort)a0.s7 * b0.s0;
- c31 += (ushort)a0.s7 * b0.s1;
- c32 += (ushort)a0.s7 * b0.s2;
- c33 += (ushort)a0.s7 * b0.s3;
-
- // Load values from matrix B (transposed)
- b0 = vload4(0, src_addr_b + 24 * TRANSPOSE1XW_WIDTH_STEP);
-
- c00 += (ushort)a0.s8 * b0.s0;
- c01 += (ushort)a0.s8 * b0.s1;
- c02 += (ushort)a0.s8 * b0.s2;
- c03 += (ushort)a0.s8 * b0.s3;
-
- c10 += (ushort)a0.s9 * b0.s0;
- c11 += (ushort)a0.s9 * b0.s1;
- c12 += (ushort)a0.s9 * b0.s2;
- c13 += (ushort)a0.s9 * b0.s3;
-
- c20 += (ushort)a0.sA * b0.s0;
- c21 += (ushort)a0.sA * b0.s1;
- c22 += (ushort)a0.sA * b0.s2;
- c23 += (ushort)a0.sA * b0.s3;
-
- c30 += (ushort)a0.sB * b0.s0;
- c31 += (ushort)a0.sB * b0.s1;
- c32 += (ushort)a0.sB * b0.s2;
- c33 += (ushort)a0.sB * b0.s3;
-
- // Load values from matrix B (transposed)
- b0 = vload4(0, src_addr_b + 28 * TRANSPOSE1XW_WIDTH_STEP);
-
- c00 += (ushort)a0.sC * b0.s0;
- c01 += (ushort)a0.sC * b0.s1;
- c02 += (ushort)a0.sC * b0.s2;
- c03 += (ushort)a0.sC * b0.s3;
-
- c10 += (ushort)a0.sD * b0.s0;
- c11 += (ushort)a0.sD * b0.s1;
- c12 += (ushort)a0.sD * b0.s2;
- c13 += (ushort)a0.sD * b0.s3;
-
- c20 += (ushort)a0.sE * b0.s0;
- c21 += (ushort)a0.sE * b0.s1;
- c22 += (ushort)a0.sE * b0.s2;
- c23 += (ushort)a0.sE * b0.s3;
-
- c30 += (ushort)a0.sF * b0.s0;
- c31 += (ushort)a0.sF * b0.s1;
- c32 += (ushort)a0.sF * b0.s2;
- c33 += (ushort)a0.sF * b0.s3;
- }
-#endif // MULT_INTERLEAVE4X4_HEIGHT == 1
-
- for(; src_addr_b < src_end_addr_b; src_addr_a += (4 * MULT_INTERLEAVE4X4_HEIGHT), src_addr_b += (4 * TRANSPOSE1XW_WIDTH_STEP))
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- uchar4 a0 = vload4(0, src_addr_a);
- uchar4 b0 = vload4(0, src_addr_b);
-
- c00 += (ushort)a0.s0 * b0.s0;
- c01 += (ushort)a0.s0 * b0.s1;
- c02 += (ushort)a0.s0 * b0.s2;
- c03 += (ushort)a0.s0 * b0.s3;
-
- c10 += (ushort)a0.s1 * b0.s0;
- c11 += (ushort)a0.s1 * b0.s1;
- c12 += (ushort)a0.s1 * b0.s2;
- c13 += (ushort)a0.s1 * b0.s3;
-
- c20 += (ushort)a0.s2 * b0.s0;
- c21 += (ushort)a0.s2 * b0.s1;
- c22 += (ushort)a0.s2 * b0.s2;
- c23 += (ushort)a0.s2 * b0.s3;
-
- c30 += (ushort)a0.s3 * b0.s0;
- c31 += (ushort)a0.s3 * b0.s1;
- c32 += (ushort)a0.s3 * b0.s2;
- c33 += (ushort)a0.s3 * b0.s3;
- }
-
- // Compute destination address
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D
- uint4 zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst.ptr += z * dst_stride_z * DEPTH_GEMM3D;
-
- // Store 4x4 block
- vstore4((int4)(c00, c01, c02, c03), 0, (__global int *)(dst.ptr + 0 * dst_stride_y + zout.s0));
- vstore4((int4)(c10, c11, c12, c13), 0, (__global int *)(dst.ptr + 1 * dst_stride_y + zout.s1));
- vstore4((int4)(c20, c21, c22, c23), 0, (__global int *)(dst.ptr + 2 * dst_stride_y + zout.s2));
- vstore4((int4)(c30, c31, c32, c33), 0, (__global int *)(dst.ptr + 3 * dst_stride_y + zout.s3));
-
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst.ptr += z * dst_stride_z;
-
- // Store 4x4 block
- vstore4((int4)(c00, c01, c02, c03), 0, (__global int *)(dst.ptr + 0 * dst_stride_y));
- vstore4((int4)(c10, c11, c12, c13), 0, (__global int *)(dst.ptr + 1 * dst_stride_y));
- vstore4((int4)(c20, c21, c22, c23), 0, (__global int *)(dst.ptr + 2 * dst_stride_y));
- vstore4((int4)(c30, c31, c32, c33), 0, (__global int *)(dst.ptr + 3 * dst_stride_y));
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-}
-
-#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-/** This OpenCL kernel is optimized for Bifrost and computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B must be reshaped respectively with @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel before running the matrix multiplication
- *
- * @attention The number of matrix B columns needs to be passed at compile time using -DCOLS_B
- * @note The transposition width step (mult_transpose1xW_width * 4) must be passed at compile time using -DTRANSPOSE1XW_WIDTH_STEP (i.e. -DTRANSPOSE1XW_WIDTH_STEP=2)
- * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DMULT_INTERLEAVE4X4_HEIGHT (i.e. -DMULT_INTERLEAVE4X4_HEIGHT=2)
- *
- * @note In case the output has to be reinterpreted as a 3D tensor (i.e. output of convolution layer), the following information must be passed at compile time:
- * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
- * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data type: QASYMM8
- * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data type: same as @p src0_ptr
- * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
- * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
- * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data type: S32
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
- */
-__kernel void gemmlowp_mm_interleaved_transposed_bifrost_dot8(IMAGE_DECLARATION(src0),
- IMAGE_DECLARATION(src1),
- IMAGE_DECLARATION(dst),
- uint src0_stride_z,
- uint src1_stride_z,
- uint dst_stride_z
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_OUTPUT_AS_3D
- )
-{
- // Offset
- const int offset_row_a = (get_global_id(1) % MULT_INTERLEAVE4X4_HEIGHT) * 4;
- const int offset_row_b = (get_global_id(0) % TRANSPOSE1XW_WIDTH_STEP) * 4;
-
- // src_addr_a = address of matrix A
- // src_addr_b = address of matrix B
- __global uchar *src_addr_a = (__global uchar *)(src0_ptr + (get_global_id(1) / MULT_INTERLEAVE4X4_HEIGHT) * src0_stride_y + get_global_id(2) * src0_stride_z + src0_offset_first_element_in_bytes);
- __global uchar *src_addr_b = (__global uchar *)(src1_ptr + (get_global_id(0) / TRANSPOSE1XW_WIDTH_STEP) * src1_stride_y + src1_offset_first_element_in_bytes);
-
-#if defined(MATRIX_B_DEPTH)
- // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
- src_addr_b += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z;
-#else // defined(MATRIX_B_DEPTH)
- src_addr_b += get_global_id(2) * src1_stride_z;
-#endif // defined(MATRIX_B_DEPTH)
-
- src_addr_a += offset_row_a;
- src_addr_b += offset_row_b;
-
- // Reset accumulators
- uint c00 = 0;
- uint c01 = 0;
- uint c02 = 0;
- uint c03 = 0;
-
- uint c10 = 0;
- uint c11 = 0;
- uint c12 = 0;
- uint c13 = 0;
-
- uint c20 = 0;
- uint c21 = 0;
- uint c22 = 0;
- uint c23 = 0;
-
- uint c30 = 0;
- uint c31 = 0;
- uint c32 = 0;
- uint c33 = 0;
-
-#define COLS_MTX_B (COLS_B / (16 * MULT_TRANSPOSE1XW_WIDTH))
-
-#if MULT_INTERLEAVE4X4_HEIGHT == 1
- int i = 0;
- for(; i <= (int)(COLS_MTX_B - 8); i += 8)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- uchar16 a0 = vload16(0, src_addr_a);
- uchar4 b0 = vload4(0, src_addr_b);
- uchar4 b1 = vload4(0, src_addr_b + 4 * TRANSPOSE1XW_WIDTH_STEP);
- uchar4 b2 = vload4(0, src_addr_b + 8 * TRANSPOSE1XW_WIDTH_STEP);
- uchar4 b3 = vload4(0, src_addr_b + 12 * TRANSPOSE1XW_WIDTH_STEP);
- uchar4 b4 = vload4(0, src_addr_b + 16 * TRANSPOSE1XW_WIDTH_STEP);
- uchar4 b5 = vload4(0, src_addr_b + 20 * TRANSPOSE1XW_WIDTH_STEP);
- uchar4 b6 = vload4(0, src_addr_b + 24 * TRANSPOSE1XW_WIDTH_STEP);
- uchar4 b7 = vload4(0, src_addr_b + 28 * TRANSPOSE1XW_WIDTH_STEP);
-
- // Accumulate
- ARM_DOT((uchar4)(a0.s0123), (uchar4)(b0.s0, b1.s0, b2.s0, b3.s0), c00);
- ARM_DOT((uchar4)(a0.s0123), (uchar4)(b0.s1, b1.s1, b2.s1, b3.s1), c01);
- ARM_DOT((uchar4)(a0.s0123), (uchar4)(b0.s2, b1.s2, b2.s2, b3.s2), c02);
- ARM_DOT((uchar4)(a0.s0123), (uchar4)(b0.s3, b1.s3, b2.s3, b3.s3), c03);
-
- ARM_DOT((uchar4)(a0.s4567), (uchar4)(b0.s0, b1.s0, b2.s0, b3.s0), c10);
- ARM_DOT((uchar4)(a0.s4567), (uchar4)(b0.s1, b1.s1, b2.s1, b3.s1), c11);
- ARM_DOT((uchar4)(a0.s4567), (uchar4)(b0.s2, b1.s2, b2.s2, b3.s2), c12);
- ARM_DOT((uchar4)(a0.s4567), (uchar4)(b0.s3, b1.s3, b2.s3, b3.s3), c13);
-
- ARM_DOT((uchar4)(a0.s89AB), (uchar4)(b0.s0, b1.s0, b2.s0, b3.s0), c20);
- ARM_DOT((uchar4)(a0.s89AB), (uchar4)(b0.s1, b1.s1, b2.s1, b3.s1), c21);
- ARM_DOT((uchar4)(a0.s89AB), (uchar4)(b0.s2, b1.s2, b2.s2, b3.s2), c22);
- ARM_DOT((uchar4)(a0.s89AB), (uchar4)(b0.s3, b1.s3, b2.s3, b3.s3), c23);
-
- ARM_DOT((uchar4)(a0.sCDEF), (uchar4)(b0.s0, b1.s0, b2.s0, b3.s0), c30);
- ARM_DOT((uchar4)(a0.sCDEF), (uchar4)(b0.s1, b1.s1, b2.s1, b3.s1), c31);
- ARM_DOT((uchar4)(a0.sCDEF), (uchar4)(b0.s2, b1.s2, b2.s2, b3.s2), c32);
- ARM_DOT((uchar4)(a0.sCDEF), (uchar4)(b0.s3, b1.s3, b2.s3, b3.s3), c33);
-
- // Accumulate
- a0 = vload16(0, src_addr_a + 16);
-
- ARM_DOT((uchar4)(a0.s0123), (uchar4)(b4.s0, b5.s0, b6.s0, b7.s0), c00);
- ARM_DOT((uchar4)(a0.s0123), (uchar4)(b4.s1, b5.s1, b6.s1, b7.s1), c01);
- ARM_DOT((uchar4)(a0.s0123), (uchar4)(b4.s2, b5.s2, b6.s2, b7.s2), c02);
- ARM_DOT((uchar4)(a0.s0123), (uchar4)(b4.s3, b5.s3, b6.s3, b7.s3), c03);
-
- ARM_DOT((uchar4)(a0.s4567), (uchar4)(b4.s0, b5.s0, b6.s0, b7.s0), c10);
- ARM_DOT((uchar4)(a0.s4567), (uchar4)(b4.s1, b5.s1, b6.s1, b7.s1), c11);
- ARM_DOT((uchar4)(a0.s4567), (uchar4)(b4.s2, b5.s2, b6.s2, b7.s2), c12);
- ARM_DOT((uchar4)(a0.s4567), (uchar4)(b4.s3, b5.s3, b6.s3, b7.s3), c13);
-
- ARM_DOT((uchar4)(a0.s89AB), (uchar4)(b4.s0, b5.s0, b6.s0, b7.s0), c20);
- ARM_DOT((uchar4)(a0.s89AB), (uchar4)(b4.s1, b5.s1, b6.s1, b7.s1), c21);
- ARM_DOT((uchar4)(a0.s89AB), (uchar4)(b4.s2, b5.s2, b6.s2, b7.s2), c22);
- ARM_DOT((uchar4)(a0.s89AB), (uchar4)(b4.s3, b5.s3, b6.s3, b7.s3), c23);
-
- ARM_DOT((uchar4)(a0.sCDEF), (uchar4)(b4.s0, b5.s0, b6.s0, b7.s0), c30);
- ARM_DOT((uchar4)(a0.sCDEF), (uchar4)(b4.s1, b5.s1, b6.s1, b7.s1), c31);
- ARM_DOT((uchar4)(a0.sCDEF), (uchar4)(b4.s2, b5.s2, b6.s2, b7.s2), c32);
- ARM_DOT((uchar4)(a0.sCDEF), (uchar4)(b4.s3, b5.s3, b6.s3, b7.s3), c33);
-
- src_addr_a += 32;
- src_addr_b += 32 * TRANSPOSE1XW_WIDTH_STEP;
- }
-#endif // MULT_INTERLEAVE4X4_HEIGHT == 1
- int i_left_over = 0;
- for(; i < (int)(COLS_MTX_B); ++i)
- {
- // Load values from matrix A (interleaved) and matrix B (transposed)
- uchar16 a0 = vload16(0, src_addr_a + (i_left_over % 4) + ((i_left_over / 4) * 16));
- uchar4 b0 = vload4(0, src_addr_b);
-
- c00 += a0.s0 * b0.s0;
- c01 += a0.s0 * b0.s1;
- c02 += a0.s0 * b0.s2;
- c03 += a0.s0 * b0.s3;
-
- c10 += a0.s4 * b0.s0;
- c11 += a0.s4 * b0.s1;
- c12 += a0.s4 * b0.s2;
- c13 += a0.s4 * b0.s3;
-
- c20 += a0.s8 * b0.s0;
- c21 += a0.s8 * b0.s1;
- c22 += a0.s8 * b0.s2;
- c23 += a0.s8 * b0.s3;
-
- c30 += a0.sC * b0.s0;
- c31 += a0.sC * b0.s1;
- c32 += a0.sC * b0.s2;
- c33 += a0.sC * b0.s3;
-
- i_left_over++;
- src_addr_b += 4 * TRANSPOSE1XW_WIDTH_STEP;
- }
-
- // Compute destination address
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
-
-#if defined(REINTERPRET_OUTPUT_AS_3D)
- // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension
- // in order to take into account the presence of possible cross plane paddings
- //
- // | |
- // | plane0 |
- // | |
- // |__________________|
- // |******************|
- // | cross_plane_pad |
- // |******************|
- // | |
- // | plane1 |
- // | |
- // |__________________|
-
- // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D
- uint4 zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D;
- zout = min(DEPTH_GEMM3D - 1, zout);
-
- // Add offset due to the cross plane paddings
- zout *= (cross_plane_pad * dst_stride_y);
-
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply dst_stride_z by DEPTH_GEMM3D
- dst.ptr += get_global_id(2) * dst_stride_z * DEPTH_GEMM3D;
-
- // Store 4x4 block
- vstore4((int4)(c00, c01, c02, c03), 0, (__global int *)(dst.ptr + 0 * dst_stride_y + zout.s0));
- vstore4((int4)(c10, c11, c12, c13), 0, (__global int *)(dst.ptr + 1 * dst_stride_y + zout.s1));
- vstore4((int4)(c20, c21, c22, c23), 0, (__global int *)(dst.ptr + 2 * dst_stride_y + zout.s2));
- vstore4((int4)(c30, c31, c32, c33), 0, (__global int *)(dst.ptr + 3 * dst_stride_y + zout.s3));
-
-#else // defined(REINTERPRET_OUTPUT_AS_3D)
- // Add offset for batched GEMM
- dst.ptr += get_global_id(2) * dst_stride_z;
-
- // Store 4x4 block
- vstore4((int4)(c00, c01, c02, c03), 0, (__global int *)(dst.ptr + 0 * dst_stride_y));
- vstore4((int4)(c10, c11, c12, c13), 0, (__global int *)(dst.ptr + 1 * dst_stride_y));
- vstore4((int4)(c20, c21, c22, c23), 0, (__global int *)(dst.ptr + 2 * dst_stride_y));
- vstore4((int4)(c30, c31, c32, c33), 0, (__global int *)(dst.ptr + 3 * dst_stride_y));
-#endif // defined(REINTERPRET_OUTPUT_AS_3D)
-}
-#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-
#endif // defined(COLS_B) && defined(MULT_INTERLEAVE4X4_HEIGHT) && defined(TRANSPOSE1XW_WIDTH_STEP)
#if defined(NUM_ELEMS_PROCESSED_PER_THREAD_X) && defined(NUM_ELEMS_PROCESSED_PER_THREAD_Y) && defined(COLS_A)
@@ -4006,4 +3413,4 @@ __kernel void gemmlowp_output_stage_quantize_down_float(TENSOR3D_DECLARATION(src
// Store the result
vstore4(res, 0, dst_addr);
}
-#endif // defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET) \ No newline at end of file
+#endif // defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)