aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdnan AlSinan <adnan.alsinan@arm.com>2021-12-10 12:34:02 +0000
committerSiCong Li <sicong.li@arm.com>2021-12-23 17:01:14 +0000
commit3e155a52f19db046f83e30c25182460b00d108c7 (patch)
treef3ec747af21c8ee7b95d6dc88bf6ea4b4aa44659
parentf76a502a73ca628e2a2556abeaa60ed17bb68d97 (diff)
downloadComputeLibrary-3e155a52f19db046f83e30c25182460b00d108c7.tar.gz
Rework gemm_reshape_lhs_ with new macros
Resolves COMPMID-4892 Signed-off-by: Adnan AlSinan <adnan.alsinan@arm.com> Change-Id: I52f23ca293506fc693ae829daccc6e889a050752 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6833 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/common/gemm_utils.cl421
-rw-r--r--src/core/CL/cl_kernels/helpers.h8
-rw-r--r--src/core/CL/cl_kernels/tile_helpers.h36
-rw-r--r--src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp64
-rw-r--r--src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h3
-rw-r--r--tests/validation/CL/GEMMReshapeLHSMatrix.cpp39
6 files changed, 240 insertions, 331 deletions
diff --git a/src/core/CL/cl_kernels/common/gemm_utils.cl b/src/core/CL/cl_kernels/common/gemm_utils.cl
index 2e49614f81..be57d94ce6 100644
--- a/src/core/CL/cl_kernels/common/gemm_utils.cl
+++ b/src/core/CL/cl_kernels/common/gemm_utils.cl
@@ -21,56 +21,12 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "helpers.h"
-#include "tile_helpers.h"
#include "gemm_helpers.h"
+#include "helpers.h"
#include "repeat.h"
+#include "tile_helpers.h"
-#if defined(M0) && defined(K0) && defined(V0) && defined(DATA_TYPE) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(PARTIAL_LOAD_M0) && defined(PARTIAL_LOAD_K0)
-#define INC2 (VEC_DATA_TYPE(uint, 2))(0, 1)
-#define INC3 (VEC_DATA_TYPE(uint, 3))(0, 1, 2)
-#define INC4 (VEC_DATA_TYPE(uint, 4))(0, 1, 2, 3)
-#define INC8 (VEC_DATA_TYPE(uint, 8))(0, 1, 2, 3, 4, 5, 6, 7)
-#define INC16 (VEC_DATA_TYPE(uint, 16))(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
-#define CONCAT_INC(K0) INC##K0
-#define INC(K0) CONCAT_INC(K0)
-
-#if(SRC_WIDTH % K0)
-#define BOUNDARY_CONDITION_X(x, a) \
- ({ \
- a = select(0, a, CONVERT(((x * (VEC_DATA_TYPE(uint, K0))K0 + INC(K0)) < (VEC_DATA_TYPE(uint, K0))SRC_WIDTH), VEC_DATA_TYPE(DATA_TYPE, K0))); \
- })
-#else // (SRC_WIDTH % K0)
-#define BOUNDARY_CONDITION_X(x, a) \
- ({})
-#endif // (SRC_WIDTH % K0)
-
-#define LOAD_TENSOR_BOUNDARY_AWARE_M0XK0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
- ({ \
- if(y * M0 + M0 >= SRC_HEIGHT && PARTIAL_LOAD_M0 != 0) \
- { \
- if(x * K0 + K0 >= SRC_WIDTH && (PARTIAL_LOAD_K0 != 0)) \
- { \
- LOAD_TENSOR_M0XN0(PARTIAL_LOAD_M0, PARTIAL_LOAD_K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
- } \
- else \
- { \
- LOAD_TENSOR_M0XN0(PARTIAL_LOAD_M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
- } \
- } \
- else \
- { \
- if(x * K0 + K0 >= SRC_WIDTH && (PARTIAL_LOAD_K0 != 0)) \
- { \
- LOAD_TENSOR_M0XN0(M0, PARTIAL_LOAD_K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
- } \
- else \
- { \
- LOAD_TENSOR_M0XN0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
- } \
- } \
- })
-
+#if defined(RESHAPE_LHS_NT)
/** This OpenCL kernel reshapes the lhs input matrix. The kernel splits the input matrix in blocks of size M0xK0 and stores each one (not transposed) in
* the output matrix unrolling the values.
*
@@ -78,45 +34,35 @@
* @note The width of the input tensor must be passed at compile time using -DSRC_WIDTH (e.g. -DSRC_WIDTH=16)
* @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
* @note The block's dimensions (M0 and K0) must be passed at compile time using -DM0 and -DK0 (e.g. -DM0=2, -DK0=2).
- * @note The number of M0xK0 vertical blocks to store on the same output row must be passed at compile time using -DV0 (e.g. -DV0=2)
- * @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_LOAD_M0 (e.g. -DPARTIAL_LOAD_M0=1)
- * @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_LOAD_K0 (e.g. -DPARTIAL_LOAD_K0=1)
+ * @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_M0 (e.g. -DPARTIAL_M0=1)
+ * @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_K0 (e.g. -DPARTIAL_K0=1)
* @note Only the following values for M0, K0 and V0 are supported:
* M0: 2,3,4,5,6,7,8
* K0: 2,3,4,8,16
* V0: greater than 0
- * @note In case the input has to be reinterpreted as a 3D tensor (e.g. input of convolution layer 1x1), the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# HEIGHT_GEMM3D: The height of the input in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the input in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
* @note If the M0xK0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
*
- * @param[in] src_ptr Pointer to the source LHS tensor. Supported data types: All
- * @param[in] src_stride_x Stride of the source LHS tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source LHS tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source LHS tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source LHS tensor
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the depth dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: All
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the depth dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] M The size of height dimension of the source tensor, affected by reinterpret_input_as_3d
+ * @param[in] V0 The number of blocks to place on the same row. It must be greater than 0.
*/
-__kernel void gemm_reshape_lhs_matrix_nt(TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst)
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
- )
+__kernel void gemm_reshape_lhs_matrix_nt(TENSOR3D_T(src, BUFFER),
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int V0)
{
// Block size
#define BLOCK_SIZE ((M0) * (K0))
@@ -135,126 +81,63 @@ __kernel void gemm_reshape_lhs_matrix_nt(TENSOR3D_DECLARATION(src),
#define OUTPUT_STEP_X (K0)
#endif // defined(INTERLEAVE)
- // Compute source and destination addresses
- uint x = get_global_id(0);
- uint y = get_global_id(1);
- uint z = get_global_id(2);
-
- // ------------------ Compute input/output addresses ---------------------------
-
- // Compute the input address
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)K0 * sizeof(DATA_TYPE) + y * (uint)M0 * src_stride_y;
-
- // Compute the output address
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)BLOCK_SIZE * (uint)V0 * sizeof(DATA_TYPE)) + ((y / (uint)V0) * (uint)dst_stride_y) + ((y % V0) *
- (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE));
+ const int x = GET_SPATIAL_IDX(0, 1, 0); // K
+ const int y = GET_SPATIAL_IDX(1, 1, 0); // M
+ const int z = GET_SPATIAL_IDX(2, 1, 0); // Batch size
- // Create variables: uint zin0=0, zin1=0, zin2=0...zin(M0-1)=0;
- REPEAT_VAR_INIT_TO_CONST(M0, uint, zin, 0);
+ const int xi = x * K0;
+ const int yi = y * M0;
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply src_stride_z by DEPTH_GEMM3D
+ const int xo = x * BLOCK_SIZE * V0 + (y % V0) * OUTPUT_OFFSET_X;
+ const int yo = (y / V0);
- input_ptr += z * (uint)src_stride_z * DEPTH_GEMM3D;
-
- // The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
- CALCULATE_Z_OFFSET(M0, uint, zin, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, cross_plane_pad, src_stride_y);
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
-
- input_ptr += z * (uint)src_stride_z;
+ // src_stride_z is expressed as M * src_stride_y, to handle case where reinterpret_input_as_3d=true
+ src_offset_first_element_in_bytes += yi * src_stride_y + z * M * src_stride_y;
+ dst_offset_first_element_in_bytes += yo * dst_stride_y + z * dst_stride_z;
-#endif // defined(REINTERPRET_INPUT_AS_3D)
+ TILE(DATA_TYPE, M0, K0, in);
- // Add offset for batched GEMM
- output_ptr += z * (uint)dst_stride_z;
+ // Initialize the input tile to zero
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ in[_i].v = 0;
+ });
- // ---------------------------Load input values --------------------------------
- // Load values from the LHS matrix
- REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, K0), a, 0);
+ bool x_cond = (xi + K0 >= src_w) && (PARTIAL_K0 != 0);
+ bool y_cond = (yi + M0 >= M) && (PARTIAL_M0 != 0);
+ // Load input tile
+ TILE(uint, M0, 1, in_indirect_y);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ in_indirect_y[_i].v = _i;
- LOAD_TENSOR_BOUNDARY_AWARE_M0XK0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin);
+ });
+#if PARTIAL_M0 != 0
+ if(y_cond)
+ {
+ T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, PARTIAL_M0, K0, PARTIAL_K0, BUFFER, src, xi, src_stride_y, x_cond, in, in_indirect_y);
+ }
+ else
+#endif // PARTIAL_M0 != 0
+ {
+ T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, K0, PARTIAL_K0, BUFFER, src, xi, src_stride_y, x_cond, in, in_indirect_y);
+ }
- // ---------------------------Store output values ------------------------------
- REPEAT_VAR_INIT_TO_CONST(16, uint, zout, 0);
- STORE_BLOCK(M0, K0, DATA_TYPE, a, output_ptr, OUTPUT_STEP_X * sizeof(DATA_TYPE), zout);
+ // Store output tile
+ TILE(uint, M0, 1, dst_indirect_y);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ dst_indirect_y[_i].v = _i;
+ });
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, K0, 0, BUFFER, dst, xo, (OUTPUT_STEP_X * sizeof(DATA_TYPE)), false, in, dst_indirect_y);
#undef BLOCK_SIZE
#undef OUTPUT_OFFSET_X
#undef OUTPUT_STEP_X
}
+#endif // defined(RESHAPE_LHS_NT)
-#if M0 == 2
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, M0) \
- res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i); \
- VSTORE(M0) \
- (res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- })
-#elif M0 == 3 // M0 == 3
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, M0) \
- res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i, a2.s##i); \
- VSTORE(M0) \
- (res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- })
-#elif M0 == 4 // M0 == 4
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, M0) \
- res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
- VSTORE(M0) \
- (res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- })
-#elif M0 == 5 // M0 == 5
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- res0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
- DATA_TYPE res1 = a4.s##i; \
- VSTORE(4) \
- (res0, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- *((__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE)) + 4) = res1; \
- })
-#elif M0 == 6 // M0 == 6
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- res0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
- VEC_DATA_TYPE(DATA_TYPE, 2) \
- res1 = (VEC_DATA_TYPE(DATA_TYPE, 2))(a4.s##i, a5.s##i); \
- VSTORE(4) \
- (res0, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- VSTORE(2) \
- (res1, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE)) + 4); \
- })
-#elif M0 == 7 // M0 == 7
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- res0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
- VEC_DATA_TYPE(DATA_TYPE, 3) \
- res1 = (VEC_DATA_TYPE(DATA_TYPE, 3))(a4.s##i, a5.s##i, a6.s##i); \
- VSTORE(4) \
- (res0, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- VSTORE(3) \
- (res1, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE)) + 4); \
- })
-#elif M0 == 8 // M0 == 8
-#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, M0) \
- res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i, a2.s##i, a3.s##i, a4.s##i, a5.s##i, a6.s##i, a7.s##i); \
- VSTORE(M0) \
- (res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
- })
-#else // M0 not supported
-#error "M0 value not supported"
-#endif // N0 conditions
-
+#if defined(RESHAPE_LHS_T)
/** This OpenCL kernel reshapes the lhs input matrix. The kernel splits the input matrix in blocks of size M0xK0 and stores each one (transposed) in
* the output matrix unrolling the values.
*
@@ -262,45 +145,35 @@ __kernel void gemm_reshape_lhs_matrix_nt(TENSOR3D_DECLARATION(src),
* @note The width of the input tensor must be passed at compile time using -DSRC_WIDTH (e.g. -DSRC_WIDTH=16)
* @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
* @note The block's dimensions (M0 and K0) must be passed at compile time using -DM0 and -DK0 (e.g. -DM0=2, -DK0=2).
- * @note The number of M0xK0 vertical blocks to store on the same output row must be passed at compile time using -DV0 (e.g. -DV0=2)
- * @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_LOAD_M0 (e.g. -DPARTIAL_LOAD_M0=1)
- * @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_LOAD_K0 (e.g. -DPARTIAL_LOAD_K0=1)
+ * @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_M0 (e.g. -DPARTIAL_M0=1)
+ * @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_K0 (e.g. -DPARTIAL_K0=1)
* @note Only the following values for M0, K0 and V0 are supported:
- * M0: 2,3,4,5,6,7,8
+ * M0: 2,3,4,8,16
* K0: 2,3,4,8,16
* V0: greater than 0
- * @note In case the input has to be reinterpreted as a 3D tensor (e.g. input of convolution layer 1x1), the following information must be passed at compile time:
- * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
- * -# HEIGHT_GEMM3D: The height of the input in case it has to be reinterpreted as a 3D tensor.
- * -# DEPTH_GEMM3D: The depth of the input in case it has to be reinterpreted as a 3D tensor
- * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
* @note If the M0xK0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
*
- * @param[in] src_ptr Pointer to the source LHS tensor. Supported data types: All
- * @param[in] src_stride_x Stride of the source LHS tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source LHS tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source LHS tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source LHS tensor
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
- * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the depth dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: All
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the depth dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] M The size of height dimension of the source tensor, affected by reinterpret_input_as_3d
+ * @param[in] V0 The number of blocks to place on the same row. It must be greater than 0
*/
-__kernel void gemm_reshape_lhs_matrix_t(TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst)
-#if defined(REINTERPRET_INPUT_AS_3D)
- ,
- uint cross_plane_pad
-#endif // REINTERPRET_INPUT_AS_3D
- )
+__kernel void gemm_reshape_lhs_matrix_t(TENSOR3D_T(src, BUFFER),
+ TENSOR3D_T(dst, BUFFER),
+ const int M,
+ const int V0)
{
// Block size
#define BLOCK_SIZE ((M0) * (K0))
@@ -319,78 +192,72 @@ __kernel void gemm_reshape_lhs_matrix_t(TENSOR3D_DECLARATION(src),
#define OUTPUT_STEP_X (M0)
#endif // defined(INTERLEAVE)
- // Compute source and destination addresses
- uint x = get_global_id(0);
- uint y = get_global_id(1);
- uint z = get_global_id(2);
-
- // ------------------ Compute input/output addresses ---------------------------
-
- // Compute the input address
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)K0 * sizeof(DATA_TYPE) + y * (uint)M0 * src_stride_y;
+ const int x = GET_SPATIAL_IDX(0, 1, 0); // K
+ const int y = GET_SPATIAL_IDX(1, 1, 0); // M
+ const int z = GET_SPATIAL_IDX(2, 1, 0); // Batch size
- // Compute the output address
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)BLOCK_SIZE * (uint)V0 * sizeof(DATA_TYPE)) + ((y / (uint)V0) * (uint)dst_stride_y) + ((y % V0) *
- (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE));
+ const int xi = x * K0;
+ const int yi = y * M0;
- // Create variables: uint zin0=0, zin1=0, zin2=0...zin(M0-1)=0;
- REPEAT_VAR_INIT_TO_CONST(M0, uint, zin, 0);
+ const int xo = x * BLOCK_SIZE * V0 + ((y % V0) * OUTPUT_OFFSET_X);
+ const int yo = (y / V0);
-#if defined(REINTERPRET_INPUT_AS_3D)
- // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
- // multiply src_stride_z by DEPTH_GEMM3D
-
- input_ptr += z * (uint)src_stride_z * DEPTH_GEMM3D;
-
- // The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
- CALCULATE_Z_OFFSET(M0, uint, zin, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, cross_plane_pad, src_stride_y);
-
-#else // defined(REINTERPRET_INPUT_AS_3D)
+ // src_stride_z is expressed as M * src_stride_y, to handle case where reinterpret_input_as_3d=true
+ src_offset_first_element_in_bytes += yi * src_stride_y + z * M * src_stride_y;
+ dst_offset_first_element_in_bytes += yo * dst_stride_y + z * dst_stride_z;
- input_ptr += z * (uint)src_stride_z;
+ TILE(DATA_TYPE, M0, K0, in);
+ TILE(DATA_TYPE, K0, M0, in_tr);
-#endif // defined(REINTERPRET_INPUT_AS_3D)
+ // Initialize the tile to zero
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ in[_i].v = 0;
+ });
- // Add offset for batched GEMM
- output_ptr += z * (uint)dst_stride_z;
+ // Load input tile
+ bool x_cond = (xi + K0 >= src_w) && (PARTIAL_K0 != 0);
+ bool y_cond = (yi + M0 >= M) && (PARTIAL_M0 != 0);
- // ---------------------------Load input values --------------------------------
- REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, K0), a, 0);
+ TILE(uint, M0, 1, in_indirect_y);
+ LOOP_UNROLLING(int, _i, 0, 1, M0,
+ {
+ in_indirect_y[_i].v = _i;
- LOAD_TENSOR_BOUNDARY_AWARE_M0XK0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin);
+ });
+#if PARTIAL_M0 != 0
+ if(y_cond)
+ {
+ T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, PARTIAL_M0, K0, PARTIAL_K0, BUFFER, src, xi, src_stride_y, x_cond, in, in_indirect_y);
+ }
+ else
+#endif // PARTIAL_M0 != 0
+ {
+ T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, K0, PARTIAL_K0, BUFFER, src, xi, src_stride_y, x_cond, in, in_indirect_y);
+ }
+ // Transpose input tile
+ LOOP_UNROLLING(int, m0, 0, 1, M0,
+ {
+ LOOP_UNROLLING(int, k0, 0, 1, K0,
+ {
+ in_tr[k0].s[m0] = in[m0].s[k0];
+ })
+ });
- // ---------------------------Transpose and store block -----------------------
+ TILE(uint, K0, 1, dst_indirect_y);
+ LOOP_UNROLLING(int, _i, 0, 1, K0,
+ {
+ dst_indirect_y[_i].v = _i;
+ });
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 0);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 1);
-#if K0 > 2
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 2);
-#endif // K0 > 2
-#if K0 > 3
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 3);
-#endif // K0 > 3
-#if K0 > 4
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 4);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 5);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 6);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 7);
-#endif // K0 > 4
-#if K0 > 8
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 8);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 9);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, A);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, B);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, C);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, D);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, E);
- TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, F);
-#endif // K0 > 8
+ // Store output tile
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, K0, M0, 0, BUFFER, dst, xo, (OUTPUT_STEP_X * sizeof(DATA_TYPE)), false, in_tr, dst_indirect_y);
#undef BLOCK_SIZE
#undef OUTPUT_OFFSET_X
#undef OUTPUT_STEP_X
}
-#endif // defined(M0) && defined(K0) && defined(V0) && defined(DATA_TYPE) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(PARTIAL_LOAD_M0) && defined(PARTIAL_LOAD_K0)
+#endif // defined(RESHAPE_LHS_T)
#if defined(RESHAPE_RHS_NT)
/** This OpenCL kernel reshapes the rhs input matrix. The kernel splits the input matrix in blocks of size K0xN0 and stores each one (not transposed) in
@@ -398,7 +265,6 @@ __kernel void gemm_reshape_lhs_matrix_t(TENSOR3D_DECLARATION(src),
*
* @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
* @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (e.g. -DK0=2, -DN0=2).
- * @note The number of K0xN0 vertical blocks to store on the same output row must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
* @note Only the following values for K0, N0 and H0 are supported:
* N0: 2,3,4,8,16
@@ -419,7 +285,7 @@ __kernel void gemm_reshape_lhs_matrix_t(TENSOR3D_DECLARATION(src),
* @param[in] dst_h The size of the height dimension of the destination tensor
* @param[in] dst_n The size of the depth dimension of the destination tensor
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] H0 The number of blocks to place on the same row. It must be greater than 0.
+ * @param[in] H0 The number of blocks to place on the same row. It must be greater than 0
*/
__kernel void gemm_reshape_rhs_matrix_nt(TENSOR3D_T(src, BUFFER),
TENSOR3D_T(dst, BUFFER),
@@ -492,7 +358,6 @@ __kernel void gemm_reshape_rhs_matrix_nt(TENSOR3D_T(src, BUFFER),
*
* @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
* @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (e.g. -DK0=2, -DN0=2).
- * @note The number of K0xN0 vertical blocks to store on the same output row must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
* @note The option -DTRANSPOSE must passed at compile time.
* @note Only the following values for K0, N0 and H0 are supported:
diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h
index 88a7665eeb..bfb693e376 100644
--- a/src/core/CL/cl_kernels/helpers.h
+++ b/src/core/CL/cl_kernels/helpers.h
@@ -392,18 +392,18 @@
#define vload_partial_12(DATA, OFFSET, PTR) \
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
vload_partial_4(DATA.s89AB, OFFSET, PTR + 8);
-
+// For vload_partial_{13,14,15}, an 8-vector size has been passed, because vectors size of size 5,6,7 are not supported
#define vload_partial_13(DATA, OFFSET, PTR) \
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
- vload_partial_5(DATA.s89ABC, OFFSET, PTR + 8);
+ vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8);
#define vload_partial_14(DATA, OFFSET, PTR) \
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
- vload_partial_6(DATA.s89ABCD, OFFSET, PTR + 8);
+ vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8);
#define vload_partial_15(DATA, OFFSET, PTR) \
vload_partial_8(DATA.s01234567, OFFSET, PTR); \
- vload_partial_7(DATA.s89ABCDE, OFFSET, PTR + 8);
+ vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8);
#define vload_partial_16(DATA, OFFSET, PTR) \
DATA = vload16(OFFSET, PTR);
diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h
index 30c37da1d0..5706248e98 100644
--- a/src/core/CL/cl_kernels/tile_helpers.h
+++ b/src/core/CL/cl_kernels/tile_helpers.h
@@ -496,6 +496,42 @@
}) \
})
+/** Load a tile from global memory (tensor) using an indirect Y index tile and conditionally use a different length for the load
+ *
+ * @note If WIDTH1_CONDITION is true, the load will use the WIDTH1 length for the store
+ * @note The vectors are stored in reverse order so the invalid rows are overwritten by the valid ones
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] HEIGHT Number of dst rows
+ * @param[in] WIDTH0 Store width to use if WIDTH1_CONDITION = false
+ * @param[in] WIDTH1 Store width to use if WIDTH1_CONDITION = true
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image).
+ * In case of cl_image, only WIDTH multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] X Starting X position
+ * @param[in] STRIDE_Y Stride Y (in bytes) used to load each row.
+ * @param[in] WIDTH1_CONDITION Condition to select the WIDTH1 store
+ * @param[out] dst Output tile
+ * @param[out] indirect_y Indirect Y index tile
+ */
+#define T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, HEIGHT, WIDTH0, WIDTH1, TENSOR_TYPE, TENSOR, X, STRIDE_Y, WIDTH1_CONDITION, dst, indirect_y) \
+ ({ \
+ if(WIDTH1_CONDITION) \
+ { \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
+ VLOAD_PARTIAL(WIDTH0, WIDTH1) \
+ (dst[HEIGHT - 1 - _i].v, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \
+ }) \
+ } \
+ else \
+ { \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
+ dst[HEIGHT - 1 - _i].v = V_LOAD(DATA_TYPE, WIDTH0, TENSOR_TYPE, TENSOR, X, (indirect_y[HEIGHT - 1 - _i].v), STRIDE_Y); \
+ }) \
+ } \
+ })
/** Load a tile from global memory (tensor) when the tensor is stored using a NHWC layout
*
* @param[in] DATA_TYPE Data type
diff --git a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp
index 4a01c77d0a..413c70ae1b 100644
--- a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp
+++ b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.cpp
@@ -55,6 +55,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
ARM_COMPUTE_RETURN_ERROR_ON_MSG(((lhs_info.k0 & (lhs_info.k0 - 1)) && lhs_info.k0 != 3), "Only 2,3,4,8,16 are supported for k0");
ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8);
+ ARM_COMPUTE_RETURN_ERROR_ON((lhs_info.m0 > 4 && lhs_info.m0 < 8) && lhs_info.transpose);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
@@ -70,11 +71,10 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
+Window configure_window(ITensorInfo *src, ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
{
const unsigned int num_elems_processed_per_iteration_x = lhs_info.k0;
const unsigned int num_elems_processed_per_iteration_y = lhs_info.m0;
- bool window_changed = false;
TensorInfo tmp_info(*src);
@@ -91,23 +91,13 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *src, ITenso
auto_init_if_empty(*dst, src->clone()->set_tensor_shape(misc::shape_calculator::compute_lhs_reshaped_shape(*src, lhs_info, reinterpret_input_as_3d)));
// Configure window
- Window win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
- Window win_in = calculate_max_window(*src, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
-
- AccessWindowStatic src_access(src, 0, 0,
- src->dimension(0),
- src->dimension(1));
- AccessWindowStatic dst_access(dst, 0, 0, dst->dimension(0), dst->dimension(1));
-
- window_changed = update_window_and_padding(win_in, src_access) || // window used by the execute_window_loop
- update_window_and_padding(win, dst_access); // window used to update the padding requirements of dst tensor
+ Window win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
// Collapse along the Z direction
// This collapse needs to be here in order to tune the Z dimension of LWS
Window collapsed = win.collapse(win, Window::DimZ);
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, collapsed);
+ return collapsed;
}
} // namespace
@@ -125,27 +115,20 @@ void ClGemmReshapeLhsMatrixKernel::configure(const CLCompileContext &compile_con
auto padding_info = get_padding_info({ src });
- _reinterpret_input_as_3d = reinterpret_input_as_3d;
-
- const unsigned int src_w = src->dimension(0);
- const unsigned int src_h = _reinterpret_input_as_3d ? src->dimension(1) * src->dimension(2) : src->dimension(1);
- const unsigned int partial_load_m0 = src_h % lhs_info.m0;
- const unsigned int partial_load_k0 = src_w % lhs_info.k0;
+ const unsigned int src_w = src->dimension(0);
+ const unsigned int m = reinterpret_input_as_3d ? src->dimension(1) * src->dimension(2) : src->dimension(1);
+ const unsigned int partial_m0 = m % lhs_info.m0;
+ const unsigned int partial_k0 = src_w % lhs_info.k0;
// Create build options
CLBuildOptions build_opts;
build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
- build_opts.add_option("-DV0=" + support::cpp11::to_string(lhs_info.v0));
- build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src_w));
- build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src_h));
build_opts.add_option_if(lhs_info.interleave, "-DINTERLEAVE");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(src->dimension(1)));
- build_opts.add_option_if(_reinterpret_input_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(src->dimension(2)));
+ build_opts.add_option_if_else(lhs_info.transpose, "-DRESHAPE_LHS_T", "-DRESHAPE_LHS_NT");
build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(src->element_size()));
- build_opts.add_option("-DPARTIAL_LOAD_M0=" + support::cpp11::to_string(partial_load_m0));
- build_opts.add_option("-DPARTIAL_LOAD_K0=" + support::cpp11::to_string(partial_load_k0));
+ build_opts.add_option("-DPARTIAL_M0=" + support::cpp11::to_string(partial_m0));
+ build_opts.add_option("-DPARTIAL_K0=" + support::cpp11::to_string(partial_k0));
std::string kernel_name("gemm_reshape_lhs_matrix_");
kernel_name += lhs_info.transpose ? "t" : "nt";
@@ -154,13 +137,16 @@ void ClGemmReshapeLhsMatrixKernel::configure(const CLCompileContext &compile_con
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(src, dst, lhs_info, reinterpret_input_as_3d);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ auto win_config = configure_window(src, dst, lhs_info, reinterpret_input_as_3d);
+ ICLKernel::configure_internal(win_config);
+
+ unsigned int idx = 2 * num_arguments_per_3d_tensor_nhw();
+ _kernel.setArg<cl_int>(idx++, m);
+ _kernel.setArg<cl_int>(idx++, lhs_info.v0);
// Set config_id for enabling LWS tuning
_config_id = "gemm_reshape_lhs_matrix_";
- _config_id += (_reinterpret_input_as_3d ? "3d_" : "");
+ _config_id += (reinterpret_input_as_3d ? "3d_" : "");
_config_id += lower_string(string_from_data_type(src->data_type()));
_config_id += "_";
_config_id += support::cpp11::to_string(dst->dimension(0));
@@ -185,8 +171,6 @@ void ClGemmReshapeLhsMatrixKernel::configure(const CLCompileContext &compile_con
Status ClGemmReshapeLhsMatrixKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, lhs_info, reinterpret_input_as_3d));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(), lhs_info, reinterpret_input_as_3d).first);
-
return Status{};
}
@@ -202,19 +186,11 @@ void ClGemmReshapeLhsMatrixKernel::run_op(ITensorPack &tensors, const Window &wi
Window slice = window.first_slice_window_3D();
- if(_reinterpret_input_as_3d)
- {
- // Pass bottom paddings to the kernel if the src has to be reinterpreted as 3D tensor
- const unsigned int idx0 = 2 * num_arguments_per_3D_tensor();
- const unsigned int total_cross_plane_pad = src->info()->padding().top + src->info()->padding().bottom;
- _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
- }
-
do
{
unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, slice);
- add_3D_tensor_argument(idx, dst, slice);
+ add_3d_tensor_nhw_argument(idx, src);
+ add_3d_tensor_nhw_argument(idx, dst);
enqueue(queue, *this, slice, lws_hint());
}
while(window.slide_window_slice_3D(slice));
diff --git a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h
index 69ec8f04f0..db88e0d735 100644
--- a/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h
+++ b/src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h
@@ -68,9 +68,6 @@ public:
// Inherited methods overridden:
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
-
-private:
- bool _reinterpret_input_as_3d{ false };
};
} // namespace kernels
} // namespace opencl
diff --git a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
index 894b83701f..0dd9b811f6 100644
--- a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
+++ b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
@@ -66,8 +66,10 @@ const auto b_values = framework::dataset::make("batchsize", 1, 3);
/** M0 values to test */
const auto m0_values_s32 = framework::dataset::make("M0", { 2, 3 });
-const auto m0_values_s16 = framework::dataset::make("M0", { 4, 5 });
-const auto m0_values_s8 = framework::dataset::make("M0", { 6, 7, 8 });
+const auto m0_values_s16 = framework::dataset::make("M0", { 4 });
+const auto m0_values_s16_nt = framework::dataset::make("M0", { 5 });
+const auto m0_values_s8_nt = framework::dataset::make("M0", { 6,7 });
+const auto m0_values_s8 = framework::dataset::make("M0", { 8 });
/** K0 values to test */
const auto k0_values_s32 = framework::dataset::make("K0", { 2, 3 });
@@ -101,6 +103,7 @@ FIXTURE_DATA_TEST_CASE(S32, CLGEMMReshapeLHSMatrixFixture<int>, framework::Datas
// Validate output
validate(CLAccessor(_target), _reference);
}
+
FIXTURE_DATA_TEST_CASE(S16, CLGEMMReshapeLHSMatrixFixture<short>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
b_values),
@@ -114,6 +117,7 @@ FIXTURE_DATA_TEST_CASE(S16, CLGEMMReshapeLHSMatrixFixture<short>, framework::Dat
// Validate output
validate(CLAccessor(_target), _reference);
}
+
FIXTURE_DATA_TEST_CASE(S8, CLGEMMReshapeLHSMatrixFixture<char>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
b_values),
@@ -128,6 +132,37 @@ FIXTURE_DATA_TEST_CASE(S8, CLGEMMReshapeLHSMatrixFixture<char>, framework::Datas
validate(CLAccessor(_target), _reference);
}
+TEST_SUITE(NotTransposed)
+FIXTURE_DATA_TEST_CASE(S16, CLGEMMReshapeLHSMatrixFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S16)),
+ m0_values_s16_nt),
+ k0_values_s16),
+ v0_values),
+ i_values),
+ framework::dataset::make("transpose", { false })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(S8, CLGEMMReshapeLHSMatrixFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S8)),
+ m0_values_s8_nt),
+ k0_values_s8),
+ v0_values),
+ i_values),
+ framework::dataset::make("transpose", { false })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+TEST_SUITE_END()
+
TEST_SUITE(ReinterpretInputAs3D)
FIXTURE_DATA_TEST_CASE(S32, CLGEMMReshapeLHSMatrix3DFixture<int>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape3DShapes(),