aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-12-06 17:13:09 +0000
committerGian Marco Iodice <gianmarco.iodice@arm.com>2018-12-10 15:58:54 +0000
commit5ba5e0938e68d4f90f5545a81066d56f022b376a (patch)
treed828f8b3fd52e6d5b8f732a7ec41f832f0b921d8
parent1d7cbb99d2a34abd15f3b6c2e017115736cd90cc (diff)
downloadComputeLibrary-5ba5e0938e68d4f90f5545a81066d56f022b376a.tar.gz
COMPMID-1774: Implement CLGEMMReshapeLHSMatrixKernel to reshape the LHS matrix of GEMM/GEMMLowp
Change-Id: I8c5fd4c8bcdffda1522c83158981ed92baa045f4 Reviewed-on: https://review.mlplatform.org/364 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/CL/CLKernels.h1
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h90
-rw-r--r--arm_compute/core/Types.h10
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h37
-rw-r--r--src/core/CL/CLKernelLibrary.cpp1
-rw-r--r--src/core/CL/cl_kernels/gemm.cl231
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp221
-rw-r--r--src/core/CL/kernels/CLReverseKernel.cpp15
-rw-r--r--tests/CL/Helper.h44
-rw-r--r--tests/datasets/ShapeDatasets.h56
-rw-r--r--tests/validation/CL/GEMMReshapeLHSMatrix.cpp335
-rw-r--r--tests/validation/Helpers.cpp8
-rw-r--r--tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h137
-rw-r--r--tests/validation/reference/GEMMReshapeLHSMatrix.cpp111
-rw-r--r--tests/validation/reference/GEMMReshapeLHSMatrix.h44
15 files changed, 1338 insertions, 3 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index 9d7ec71a32..7bfd44721f 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -79,6 +79,7 @@
#include "arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h"
#include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
#include "arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h"
+#include "arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
#include "arm_compute/core/CL/kernels/CLGaussian3x3Kernel.h"
#include "arm_compute/core/CL/kernels/CLGaussian5x5Kernel.h"
diff --git a/arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h b/arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h
new file mode 100644
index 0000000000..77d7494a7f
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLGEMMRESHAPELHSMATRIXKERNEL_H__
+#define __ARM_COMPUTE_CLGEMMRESHAPELHSMATRIXKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** OpenCL kernel to reshape the LHS matrix when performing the matrix multiplication.
+ * In particular, this function splits the input matrix in blocks of size M0xK0 (defined through GEMMLHSInfo) and
+ * stores each one in the output matrix unrolling the values
+ */
+class CLGEMMReshapeLHSMatrixKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLGEMMReshapeLHSMatrixKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLGEMMReshapeLHSMatrixKernel(const CLGEMMReshapeLHSMatrixKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLGEMMReshapeLHSMatrixKernel &operator=(const CLGEMMReshapeLHSMatrixKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLGEMMReshapeLHSMatrixKernel(CLGEMMReshapeLHSMatrixKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLGEMMReshapeLHSMatrixKernel &operator=(CLGEMMReshapeLHSMatrixKernel &&) = default;
+ /** Initialise the kernel's input and output.
+ *
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[out] output Output tensor. Data type supported: same as @p input
+ * @param[in] lhs_info LHS matrix information to be used for reshaping. This object contains all the necessary
+ * information to reshape the input tensor. Only the following values are supported:
+ * lhs_info.m0: 2,3,4,5,6,7,8
+ * lhs_info.k0: 2,4,8,16
+ * lhs_info.v0: greater than 0
+ * lhs_info.transpose: false
+ * lhs_info.interleave: true, false
+ * @param[in] reinterpret_input_as_3d (Optional) True if the input has to be reinterpreted as 3D tensor
+ */
+ void configure(const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMReshapeLHSMatrixKernel
+ *
+ * @param[in] input Input tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input.
+ * @param[in] lhs_info LHS matrix information to be used for reshaping. This object contains all the necessary
+ * information to reshape the input tensor. Only the following values are supported:
+ * lhs_info.m0: 2,3,4,5,6,7,8
+ * lhs_info.k0: 2,4,8,16
+ * lhs_info.v0: greater than 0
+ * lhs_info.transpose: false
+ * lhs_info.interleave: true, false
+ * @param[in] reinterpret_input_as_3d True if the input has to be reinterpreted as 3D tensor
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d);
+
+ // Inherited methods overridden
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ const ICLTensor *_input;
+ ICLTensor *_output;
+ bool _reinterpret_input_as_3d;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLGEMMRESHAPELHSMATRIXKERNEL_H__ */ \ No newline at end of file
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 75b38c5cb8..55b0ccb30d 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1771,6 +1771,16 @@ struct GEMMLowpOutputStageInfo
int gemmlowp_max_bound{ 0 }; /**< GEMMLowp max value used to saturate down the output result before converting back to QASYMM8 */
};
+/** GEMM LHS (Left Hand Side) matrix information */
+struct GEMMLHSMatrixInfo
+{
+ unsigned int m0{ 1 }; /**< Number of rows processed by the matrix multiplication */
+ unsigned int k0{ 1 }; /**< Number of partial accumulations performed by the matrix multiplication */
+ unsigned int v0{ 1 }; /**< Number of vertical blocks of size (m0xk0) stored on the same output row */
+ bool transpose{ true }; /**< True if the (m0xk0) block has to be transposed before been stored */
+ bool interleave{ true }; /**< True if the v0 (m0xk0) blocks have to be interleaved in the output row */
+};
+
/** GEMM information class. This class stores the necessary information to compute GEMM functions
*
* This object also contains the information about how matrix A and matrix B have been reshaped
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index c625a07a7f..88ce8d9e7b 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -101,6 +101,43 @@ inline TensorShape compute_weights_reshaped_shape(const ITensorInfo &weights, bo
return weights_reshaped;
}
+inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d = false)
+{
+ ARM_COMPUTE_ERROR_ON(lhs_info.m0 == 0);
+ ARM_COMPUTE_ERROR_ON(lhs_info.k0 == 0);
+ ARM_COMPUTE_ERROR_ON(lhs_info.v0 == 0);
+
+ // Input width/height
+ const unsigned int input_width = a.dimension(0);
+ const unsigned int input_height = reinterpret_input_as_3d ? a.dimension(1) * a.dimension(2) : a.dimension(1);
+
+ // Number of horizontal/vertical blocks in the input tensor
+ const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast<float>(lhs_info.k0));
+ const unsigned int num_vert_blocks = std::ceil(input_height / static_cast<float>(lhs_info.m0));
+
+ // Block size
+ const unsigned int block_size = lhs_info.m0 * lhs_info.k0;
+
+ // Output width/height
+ const unsigned int output_width = block_size * num_horiz_blocks * lhs_info.v0;
+ const unsigned int output_height = std::ceil(num_vert_blocks / static_cast<float>(lhs_info.v0));
+
+ TensorShape lhs_shape{ a.tensor_shape() };
+ lhs_shape.set(0, output_width);
+ lhs_shape.set(1, output_height);
+
+ if((reinterpret_input_as_3d) && (lhs_shape.num_dimensions() > 2))
+ {
+ // When the data format is NHWC and the shapes are Nx1x1
+ // the tensor shape num_dimensions is automatically set to 1 instead of 3.
+ // To avoid failures by removing a dimension that doesn't exist
+ // check if the number of dimensions is greater than 2.
+ lhs_shape.remove_dimension(2);
+ }
+
+ return lhs_shape;
+}
+
inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false)
{
// The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index da85472005..7b98e5ae80 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -282,6 +282,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "gemm_mm_floating_point_f32_bifrost_1000", "gemm.cl" },
{ "gemm_lc_vm_f32", "gemm.cl" },
{ "gemm_transpose1xW", "gemm.cl" },
+ { "gemm_reshape_lhs_matrix_nt", "gemm.cl" },
{ "gemmlowp_matrix_a_reduction", "gemmlowp.cl" },
{ "gemmlowp_matrix_a_reduction_dot8", "gemmlowp.cl" },
{ "gemmlowp_matrix_b_reduction", "gemmlowp.cl" },
diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl
index 7de15d018a..cf1e021929 100644
--- a/src/core/CL/cl_kernels/gemm.cl
+++ b/src/core/CL/cl_kernels/gemm.cl
@@ -23,6 +23,235 @@
*/
#include "helpers.h"
+#if defined(M0) && defined(K0) && defined(V0) && defined(DATA_TYPE)
+
+/** This OpenCL kernel reshapes the lhs input matrix. The kernel splits the input matrix in blocks of size M0xK0 and stores each one (not transposed) in
+ * the output matrix unrolling the values.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=float)
+ * @note The block's dimensions (M0 and K0) must be passed at compile time using -DM0 and -DK0 (i.e. -DM0=2, -DK0=2).
+ * @note The number of M0xK0 vertical blocks to store on the same output row must be passed at compile time using -DV0 (i.e. -DV0=2)
+ * @note Only the following values for M0, K0 and V0 are supported:
+ * M0: 2,3,4,5,6,7,8
+ * K0: 2,4,8,16
+ * V0: greater than 0
+ * @note In case the input has to be reinterpreted as a 3D tensor (i.e. input of convolution layer 1x1), the following information must be passed at compile time:
+ * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
+ * -# HEIGHT_GEMM3D: The height of the input in case it has to be reinterpreted as a 3D tensor.
+ * -# DEPTH_GEMM3D: The depth of the input in case it has to be reinterpreted as a 3D tensor
+ * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
+ * @note If the M0xK0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
+ *
+ * @param[in] src_ptr Pointer to the source LHS tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] src_stride_x Stride of the source LHS tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source LHS tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source LHS tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source LHS tensor
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
+ * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
+ */
+__kernel void gemm_reshape_lhs_matrix_nt(TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst)
+#if defined(REINTERPRET_INPUT_AS_3D)
+ ,
+ uint cross_plane_pad
+#endif // REINTERPRET_INPUT_AS_3D
+ )
+{
+// Block size
+#define BLOCK_SIZE ((M0) * (K0))
+
+// Output offset X
+#if defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (K0)
+#else // defined(INTERLEAVE)
+#define OUTPUT_OFFSET_X (BLOCK_SIZE)
+#endif // defined(INTERLEAVE)
+
+// Output step X
+#if defined(INTERLEAVE)
+#define OUTPUT_STEP_X (K0) * (V0)
+#else // Do not interleave
+#define OUTPUT_STEP_X (K0)
+#endif // defined(INTERLEAVE)
+
+ // Compute source and destination addresses
+ uint x = get_global_id(0);
+ uint y = get_global_id(1);
+ uint z = get_global_id(2);
+
+ // ------------------ Compute input/output addresses ---------------------------
+
+ // Compute the input address
+ __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)K0 * sizeof(DATA_TYPE) + y * (uint)M0 * src_stride_y;
+
+ // Compute the output address
+ __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)BLOCK_SIZE * (uint)V0 * sizeof(DATA_TYPE)) + ((y / (uint)V0) * (uint)dst_stride_y) + ((y % V0) *
+ (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE));
+
+ uint zin0 = 0;
+ uint zin1 = 0;
+ uint zin2 = 0;
+ uint zin3 = 0;
+ uint zin4 = 0;
+ uint zin5 = 0;
+ uint zin6 = 0;
+ uint zin7 = 0;
+
+#if defined(REINTERPRET_INPUT_AS_3D)
+ // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
+ // multiply src_stride_z by DEPTH_GEMM3D
+
+ // Note for the REINTERPRET_INPUT_AS_3D case
+ // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension
+ // in order to take into account the presence of possible cross plane paddings
+ //
+ // | |
+ // | plane0 |
+ // | |
+ // |__________________|
+ // |******************|
+ // | cross_plane_pad |
+ // |******************|
+ // | |
+ // | plane1 |
+ // | |
+ // |__________________|
+
+ input_ptr += z * (uint)src_stride_z * DEPTH_GEMM3D;
+
+ // The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
+ zin0 = (0 + (uint)(y * M0)) / (uint)HEIGHT_GEMM3D;
+ zin0 = min((uint)(DEPTH_GEMM3D - 1), zin0);
+ zin0 *= (cross_plane_pad * src_stride_y);
+#if M0 > 1
+ zin1 = (1 + (uint)(y * M0)) / (uint)HEIGHT_GEMM3D;
+ zin1 = min((uint)(DEPTH_GEMM3D - 1), zin1);
+ zin1 *= (cross_plane_pad * src_stride_y);
+#endif // M0 > 1
+#if M0 > 2
+ zin2 = (2 + (uint)(y * M0)) / (uint)HEIGHT_GEMM3D;
+ zin2 = min((uint)(DEPTH_GEMM3D - 1), zin2);
+ zin2 *= (cross_plane_pad * src_stride_y);
+#endif // M0 > 2
+#if M0 > 3
+ zin3 = (3 + (uint)(y * M0)) / (uint)HEIGHT_GEMM3D;
+ zin3 = min((uint)(DEPTH_GEMM3D - 1), zin3);
+ zin3 *= (cross_plane_pad * src_stride_y);
+#endif // M0 > 3
+#if M0 > 4
+ zin4 = (4 + (uint)(y * M0)) / (uint)HEIGHT_GEMM3D;
+ zin4 = min((uint)(DEPTH_GEMM3D - 1), zin4);
+ zin4 *= (cross_plane_pad * src_stride_y);
+#endif // M0 > 4
+#if M0 > 5
+ zin5 = (5 + (uint)(y * M0)) / (uint)HEIGHT_GEMM3D;
+ zin5 = min((uint)(DEPTH_GEMM3D - 1), zin5);
+ zin5 *= (cross_plane_pad * src_stride_y);
+#endif // M0 > 5
+#if M0 > 6
+ zin6 = (6 + (uint)(y * M0)) / (uint)HEIGHT_GEMM3D;
+ zin6 = min((uint)(DEPTH_GEMM3D - 1), zin6);
+ zin6 *= (cross_plane_pad * src_stride_y);
+#endif // M0 > 6
+#if M0 > 6
+ zin7 = (7 + (uint)(y * M0)) / (uint)HEIGHT_GEMM3D;
+ zin7 = min((uint)(DEPTH_GEMM3D - 1), zin7);
+ zin7 *= (cross_plane_pad * src_stride_y);
+#endif // M0 > 7
+
+#else // defined(REINTERPRET_INPUT_AS_3D)
+
+ input_ptr += z * (uint)src_stride_z;
+
+#endif // defined(REINTERPRET_INPUT_AS_3D)
+
+ // Add offset for batched GEMM
+ output_ptr += z * (uint)dst_stride_z;
+
+ // ---------------------------Load input values --------------------------------
+
+ // Load values from the LHS matrix
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ a0 = VLOAD(K0)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y + zin0));
+#if M0 > 1
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ a1 = VLOAD(K0)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y + zin1));
+#endif // M0 > 1
+#if M0 > 2
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ a2 = VLOAD(K0)(0, (__global DATA_TYPE *)(input_ptr + 2 * src_stride_y + zin2));
+#endif // M0 > 2
+#if M0 > 3
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ a3 = VLOAD(K0)(0, (__global DATA_TYPE *)(input_ptr + 3 * src_stride_y + zin3));
+#endif // M0 > 3
+#if M0 > 4
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ a4 = VLOAD(K0)(0, (__global DATA_TYPE *)(input_ptr + 4 * src_stride_y + zin4));
+#endif // M0 > 4
+#if M0 > 5
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ a5 = VLOAD(K0)(0, (__global DATA_TYPE *)(input_ptr + 5 * src_stride_y + zin5));
+#endif // M0 > 5
+#if M0 > 6
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ a6 = VLOAD(K0)(0, (__global DATA_TYPE *)(input_ptr + 6 * src_stride_y + zin6));
+#endif // M0 > 6
+#if M0 > 7
+ VEC_DATA_TYPE(DATA_TYPE, K0)
+ a7 = VLOAD(K0)(0, (__global DATA_TYPE *)(input_ptr + 7 * src_stride_y + zin7));
+#endif // M0 > 7
+
+ // ---------------------------Store output values ------------------------------
+
+ VSTORE(K0)
+ (a0, 0, (__global DATA_TYPE *)(output_ptr + 0 * OUTPUT_STEP_X * sizeof(DATA_TYPE)));
+#if M0 > 1
+ VSTORE(K0)
+ (a1, 0, (__global DATA_TYPE *)(output_ptr + 1 * OUTPUT_STEP_X * sizeof(DATA_TYPE)));
+#endif // M0 > 1
+#if M0 > 2
+ VSTORE(K0)
+ (a2, 0, (__global DATA_TYPE *)(output_ptr + 2 * OUTPUT_STEP_X * sizeof(DATA_TYPE)));
+#endif // M0 > 2
+#if M0 > 3
+ VSTORE(K0)
+ (a3, 0, (__global DATA_TYPE *)(output_ptr + 3 * OUTPUT_STEP_X * sizeof(DATA_TYPE)));
+#endif // M0 > 3
+#if M0 > 4
+ VSTORE(K0)
+ (a4, 0, (__global DATA_TYPE *)(output_ptr + 4 * OUTPUT_STEP_X * sizeof(DATA_TYPE)));
+#endif // M0 > 4
+#if M0 > 5
+ VSTORE(K0)
+ (a5, 0, (__global DATA_TYPE *)(output_ptr + 5 * OUTPUT_STEP_X * sizeof(DATA_TYPE)));
+#endif // M0 > 5
+#if M0 > 6
+ VSTORE(K0)
+ (a6, 0, (__global DATA_TYPE *)(output_ptr + 6 * OUTPUT_STEP_X * sizeof(DATA_TYPE)));
+#endif // M0 > 6
+#if M0 > 7
+ VSTORE(K0)
+ (a7, 0, (__global DATA_TYPE *)(output_ptr + 7 * OUTPUT_STEP_X * sizeof(DATA_TYPE)));
+#endif // M0 > 7
+
+#undef BLOCK_SIZE
+#undef OUTPUT_OFFSET_X
+#undef OUTPUT_STEP_X
+}
+#endif // defined(M0) && defined(K0) && defined(V0) && defined(DATA_TYPE)
+
#if defined(TRANSPOSE_W) && defined(MULT_TRANSPOSE1XW_WIDTH)
#if ELEMENT_SIZE == 1
@@ -193,7 +422,7 @@ __kernel void gemm_interleave4x4(TENSOR3D_DECLARATION(src),
vstore4(a1, 0, ((__global DATA_TYPE *)(dst_ptr + dst_addr_in_bytes) + 4 * MULT_INTERLEAVE4X4_HEIGHT));
vstore4(a2, 0, ((__global DATA_TYPE *)(dst_ptr + dst_addr_in_bytes) + 8 * MULT_INTERLEAVE4X4_HEIGHT));
vstore4(a3, 0, ((__global DATA_TYPE *)(dst_ptr + dst_addr_in_bytes) + 12 * MULT_INTERLEAVE4X4_HEIGHT));
-#else // defined(UNROLL_BLOCK)
+#else // defined(UNROLL_BLOCK)
VEC_DATA_TYPE(DATA_TYPE, 4)
val0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s0, a1.s0, a2.s0, a3.s0);
vstore4(val0, 0, ((__global DATA_TYPE *)(dst_ptr + dst_addr_in_bytes) + 0 * MULT_INTERLEAVE4X4_HEIGHT));
diff --git a/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
new file mode 100644
index 0000000000..e0af5801a8
--- /dev/null
+++ b/src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/CLValidate.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+using namespace arm_compute;
+using namespace arm_compute::misc::shape_calculator;
+
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.transpose);
+ ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.v0 == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((lhs_info.k0 & (lhs_info.k0 - 1)), "Only power of two values are allowed for k0");
+ ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.k0 > 16);
+ ARM_COMPUTE_RETURN_ERROR_ON(lhs_info.m0 < 2 || lhs_info.m0 > 8);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::U8, DataType::S8,
+ DataType::U16, DataType::S16, DataType::U32, DataType::S32,
+ DataType::F16, DataType::F32);
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), compute_lhs_reshaped_shape(*input, lhs_info, reinterpret_input_as_3d));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
+{
+ const unsigned int num_elems_processed_per_iteration_x = lhs_info.k0;
+ const unsigned int num_elems_processed_per_iteration_y = lhs_info.m0;
+ bool window_changed = false;
+
+ TensorInfo tmp_info(*input);
+
+ if(reinterpret_input_as_3d)
+ {
+ // Since the input tensor has to be reinterpreted as 3D and the execute window is based on a 2D interleave,
+ // the window needs to be constructed on the 2D collapsed version of the tensor
+ TensorShape tmp_shape(input->tensor_shape());
+ tmp_shape.collapse(2U, 1U);
+ tmp_info.set_tensor_shape(tmp_shape);
+ }
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_lhs_reshaped_shape(*input, lhs_info, reinterpret_input_as_3d)));
+
+ // Configure window
+ // Note: bottom paddings are calculated manually as the input can be reinterpreted as 3D tensor
+ // The only way to set properly the paddings, it is to set those explicitly through the AccessWindowStatic
+ const int m = reinterpret_input_as_3d ? input->tensor_shape()[1] * input->tensor_shape()[2] : input->tensor_shape()[1];
+ const int bottom_pad = ceil_to_multiple(m, num_elems_processed_per_iteration_y) - m;
+
+ Window win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
+ Window win_in = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
+
+ AccessWindowStatic input_access(input, 0, 0,
+ ceil_to_multiple(input->dimension(0), num_elems_processed_per_iteration_x),
+ input->dimension(1) + bottom_pad);
+ AccessWindowStatic output_access(output, 0, 0, output->dimension(0), output->dimension(1));
+
+ window_changed = update_window_and_padding(win_in, input_access) || // window used by the execute_window_loop
+ update_window_and_padding(win, output_access); // window used to update the padding requirements of output tensor
+ output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
+
+ // Collapse along the Z direction
+ // This collapse needs to be here in order to tune the Z dimension of LWS
+ Window collapsed = win.collapse(win, Window::DimZ);
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, collapsed);
+}
+} // namespace
+
+CLGEMMReshapeLHSMatrixKernel::CLGEMMReshapeLHSMatrixKernel()
+ : _input(nullptr), _output(nullptr), _reinterpret_input_as_3d(false)
+{
+}
+
+void CLGEMMReshapeLHSMatrixKernel::configure(const ICLTensor *input, ICLTensor *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ // Perform validate step
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), lhs_info, reinterpret_input_as_3d));
+
+ _input = input;
+ _output = output;
+ _reinterpret_input_as_3d = reinterpret_input_as_3d;
+
+ // Create build options
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DM0=" + support::cpp11::to_string(lhs_info.m0));
+ build_opts.add_option("-DK0=" + support::cpp11::to_string(lhs_info.k0));
+ build_opts.add_option("-DV0=" + support::cpp11::to_string(lhs_info.v0));
+ build_opts.add_option_if(lhs_info.interleave, "-DINTERLEAVE");
+ build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D");
+ build_opts.add_option_if(_reinterpret_input_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(input->info()->dimension(1)));
+ build_opts.add_option_if(_reinterpret_input_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(input->info()->dimension(2)));
+
+ switch(input->info()->element_size())
+ {
+ case 1:
+ build_opts.add_option("-DDATA_TYPE=uchar");
+ break;
+ case 2:
+ build_opts.add_option("-DDATA_TYPE=ushort");
+ break;
+ case 4:
+ build_opts.add_option("-DDATA_TYPE=uint");
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data type not supported");
+ }
+
+ std::string kernel_name("gemm_reshape_lhs_matrix_");
+ kernel_name += lhs_info.transpose ? "t" : "nt";
+
+ // Create kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(input->info(), output->info(), lhs_info, reinterpret_input_as_3d);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ ICLKernel::configure_internal(win_config.second);
+
+ // Set config_id for enabling LWS tuning
+ _config_id = "gemm_reshape_lhs_matrix_";
+ _config_id += (_reinterpret_input_as_3d ? "3d_" : "");
+ _config_id += lower_string(string_from_data_type(input->info()->data_type()));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(1));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(2));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(lhs_info.m0);
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(lhs_info.k0);
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(lhs_info.v0);
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(lhs_info.interleave);
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(lhs_info.transpose);
+}
+
+Status CLGEMMReshapeLHSMatrixKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, bool reinterpret_input_as_3d)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, lhs_info, reinterpret_input_as_3d));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), lhs_info, reinterpret_input_as_3d).first);
+
+ return Status{};
+}
+
+void CLGEMMReshapeLHSMatrixKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ Window slice = window.first_slice_window_3D();
+
+ if(_reinterpret_input_as_3d)
+ {
+ // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor
+ const unsigned int idx0 = 2 * num_arguments_per_3D_tensor();
+ const unsigned int total_cross_plane_pad = _input->info()->padding().top + _input->info()->padding().bottom;
+ _kernel.setArg<cl_uint>(idx0, static_cast<unsigned int>(total_cross_plane_pad));
+ }
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice);
+ add_3D_tensor_argument(idx, _output, slice);
+ enqueue(queue, *this, slice, lws_hint());
+ }
+ while(window.slide_window_slice_3D(slice));
+} \ No newline at end of file
diff --git a/src/core/CL/kernels/CLReverseKernel.cpp b/src/core/CL/kernels/CLReverseKernel.cpp
index 2859a51ce1..adbdb11c5f 100644
--- a/src/core/CL/kernels/CLReverseKernel.cpp
+++ b/src/core/CL/kernels/CLReverseKernel.cpp
@@ -80,7 +80,20 @@ void CLReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const
// Set kernel build options
CLBuildOptions build_opts;
build_opts.add_option("-DNUM_REVERSE_DIMS=" + support::cpp11::to_string(axis->info()->dimension(0)));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ switch(input->info()->element_size())
+ {
+ case 1:
+ build_opts.add_option("-DDATA_TYPE=uchar");
+ break;
+ case 2:
+ build_opts.add_option("-DDATA_TYPE=ushort");
+ break;
+ case 4:
+ build_opts.add_option("-DDATA_TYPE=uint");
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Data type not supported");
+ }
// Create kernel
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reverse", build_opts.options()));
diff --git a/tests/CL/Helper.h b/tests/CL/Helper.h
index 32f9ca00e3..88e716726a 100644
--- a/tests/CL/Helper.h
+++ b/tests/CL/Helper.h
@@ -24,7 +24,13 @@
#ifndef __ARM_COMPUTE_TEST_CL_HELPER_H__
#define __ARM_COMPUTE_TEST_CL_HELPER_H__
+#include "arm_compute/core/CL/ICLKernel.h"
+#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
+#include "arm_compute/core/CL/kernels/CLMemsetKernel.h"
+
+#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
+#include "arm_compute/runtime/IFunction.h"
#include "support/ToolchainSupport.h"
namespace arm_compute
@@ -77,6 +83,44 @@ public:
_border_handler.configure(first, BorderSize(bordersize), BorderMode::CONSTANT, PixelValue(0));
}
};
+
+/** As above but this also initializes to zero the input tensor */
+template <typename K, int bordersize>
+class CLSynthetizeFunctionInitOutputWithZeroAndWithZeroConstantBorder : public IFunction
+{
+public:
+ /** Configure the kernel.
+ *
+ * @param[in] first First input argument.
+ * @param[in] second Second input argument.
+ * @param[in] args Rest of the configuration arguments.
+ */
+ template <typename T, typename... Args>
+ void configure(T first, T second, Args &&... args)
+ {
+ auto k = arm_compute::support::cpp14::make_unique<K>();
+ k->set_target(CLScheduler::get().target());
+ k->configure(first, second, std::forward<Args>(args)...);
+ _kernel = std::move(k);
+ _border_handler.configure(first, BorderSize(bordersize), BorderMode::CONSTANT, PixelValue(0));
+ _memset_kernel.configure(second, PixelValue(0));
+ }
+
+ // Inherited method overridden:
+ void run() override final
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(!_kernel, "The CL kernel or function isn't configured");
+
+ CLScheduler::get().enqueue(_memset_kernel, false);
+ CLScheduler::get().enqueue(_border_handler, false);
+ CLScheduler::get().enqueue(*_kernel);
+ }
+
+private:
+ CLMemsetKernel _memset_kernel{}; /**< Kernel to initialize the tensor */
+ CLFillBorderKernel _border_handler{}; /**< Kernel to handle borders */
+ std::unique_ptr<ICLKernel> _kernel{}; /**< Kernel to run */
+};
} // namespace test
} // namespace arm_compute
#endif /* __ARM_COMPUTE_TEST_CL_HELPER_H__ */
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index f7e7ae26b9..875f7e8ea1 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -892,6 +892,62 @@ public:
{
}
};
+
+/** Data set containing small tensor shapes to be used with the GEMM reshaping kernel */
+class SmallGEMMReshape2DShapes final : public ShapeDataset
+{
+public:
+ SmallGEMMReshape2DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 63U, 72U },
+ })
+ {
+ }
+};
+
+/** Data set containing small tensor shapes to be used with the GEMM reshaping kernel when the input has to be reinterpreted as 3D */
+class SmallGEMMReshape3DShapes final : public ShapeDataset
+{
+public:
+ SmallGEMMReshape3DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 63U, 9U, 8U },
+ })
+ {
+ }
+};
+
+/** Data set containing large tensor shapes to be used with the GEMM reshaping kernel */
+class LargeGEMMReshape2DShapes final : public ShapeDataset
+{
+public:
+ LargeGEMMReshape2DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 16U, 27U },
+ TensorShape{ 533U, 171U },
+ TensorShape{ 345U, 612U }
+ })
+ {
+ }
+};
+
+/** Data set containing large tensor shapes to be used with the GEMM reshaping kernel when the input has to be reinterpreted as 3D */
+class LargeGEMMReshape3DShapes final : public ShapeDataset
+{
+public:
+ LargeGEMMReshape3DShapes()
+ : ShapeDataset("Shape",
+ {
+ TensorShape{ 16U, 3U, 9U },
+ TensorShape{ 533U, 19U, 9U },
+ TensorShape{ 345U, 34U, 18U }
+ })
+ {
+ }
+};
} // namespace datasets
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
new file mode 100644
index 0000000000..ea6589df22
--- /dev/null
+++ b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/CL/Helper.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+// *INDENT-OFF*
+// clang-format off
+/** Data types */
+const auto data_types = framework::dataset::make("DataType", { DataType::QASYMM8, DataType::F16, DataType::F32 });
+
+/** Batch size values to test */
+const auto b_values = framework::dataset::make("batchsize", 1, 3);
+
+/** M0 values to test */
+const auto m0_values = framework::dataset::make("M0", 2, 9);
+
+/** K0 values to test */
+const auto k0_values = framework::dataset::make("K0", { 2, 4, 8, 16 });
+
+/** V0 values to test */
+const auto v0_values = framework::dataset::make("V0", 1, 4);
+
+/** Interleave values to test */
+const auto i_values = framework::dataset::make("interleave", { true, false });
+
+/** Transpose values to test */
+const auto t_values = framework::dataset::make("transpose", { false });
+} // namespace
+
+using namespace arm_compute::misc::shape_calculator;
+
+// Initialize the output tensor with zero and fill the border with zero
+using CLGEMMReshapeLHSMatrix = CLSynthetizeFunctionInitOutputWithZeroAndWithZeroConstantBorder<CLGEMMReshapeLHSMatrixKernel, 16>;
+
+template <typename T>
+using CLGEMMReshapeLHSMatrixFixture = GEMMReshapeLHSMatrixValidationFixture<CLTensor, CLAccessor, CLGEMMReshapeLHSMatrix, T, false>;
+
+// Fixture to use when the input has to be reinterpreted as 3D
+template <typename T>
+using CLGEMMReshapeLHSMatrix3DFixture = GEMMReshapeLHSMatrixValidationFixture<CLTensor, CLAccessor, CLGEMMReshapeLHSMatrix, T, true>;
+
+TEST_SUITE(CL)
+TEST_SUITE(GEMMReshapeLHSMatrix)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
+ b_values),
+ data_types),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values),
+shape_in, b_value, data_type, m0_value, k0_value, v0_value, i_value, t_value)
+{
+ GEMMLHSMatrixInfo lhs_info;
+ lhs_info.m0 = m0_value;
+ lhs_info.k0 = k0_value;
+ lhs_info.v0 = v0_value;
+ lhs_info.interleave = i_value;
+ lhs_info.transpose = t_value;
+
+ const TensorShape shape_src(shape_in[0], shape_in[1], b_value);
+ const TensorShape shape_dst = compute_lhs_reshaped_shape(TensorInfo(shape_src, 1, data_type), lhs_info, false);
+
+ // Create tensors
+ CLTensor src = create_tensor<CLTensor>(shape_src, data_type);
+ CLTensor dst = create_tensor<CLTensor>(shape_dst, data_type);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLGEMMReshapeLHSMatrixKernel reshape_lhs;
+ reshape_lhs.configure(&src, &dst, lhs_info, false);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMReshapeLHSMatrixFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S32)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMReshapeLHSMatrixFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape2DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S32)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMReshapeLHSMatrixFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S16)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMReshapeLHSMatrixFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape2DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S16)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMReshapeLHSMatrixFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S8)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMReshapeLHSMatrixFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape2DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S8)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+
+TEST_SUITE(REINTERPRET_INPUT_AS_3D)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape3DShapes(),
+ b_values),
+ data_types),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values),
+shape_in, b_value, data_type, m0_value, k0_value, v0_value, i_value, t_value)
+{
+ GEMMLHSMatrixInfo lhs_info;
+ lhs_info.m0 = m0_value;
+ lhs_info.k0 = k0_value;
+ lhs_info.v0 = v0_value;
+ lhs_info.interleave = i_value;
+ lhs_info.transpose = t_value;
+
+ const TensorShape shape_src(shape_in[0], shape_in[1], shape_in[2], b_value);
+ const TensorShape shape_dst = compute_lhs_reshaped_shape(TensorInfo(shape_src, 1, data_type), lhs_info, true);
+
+ // Create tensors
+ CLTensor src = create_tensor<CLTensor>(shape_src, data_type);
+ CLTensor dst = create_tensor<CLTensor>(shape_dst, data_type);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLGEMMReshapeLHSMatrixKernel reshape_lhs;
+ reshape_lhs.configure(&src, &dst, lhs_info, true);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMReshapeLHSMatrix3DFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape3DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S32)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMReshapeLHSMatrix3DFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape3DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S32)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMReshapeLHSMatrix3DFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape3DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S16)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMReshapeLHSMatrix3DFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape3DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S16)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMReshapeLHSMatrix3DFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape3DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S8)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMReshapeLHSMatrix3DFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape3DShapes(),
+ b_values),
+ framework::dataset::make("DataType", DataType::S8)),
+ m0_values),
+ k0_values),
+ v0_values),
+ i_values),
+ t_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // REINTERPRET_INPUT_AS_3D
+TEST_SUITE_END() // GEMMReshapeLHSMatrix
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index eab6d5629f..11c454ea67 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -207,7 +207,7 @@ void transpose_matrix(const SimpleTensor<T> &in, SimpleTensor<T> &out)
{
for(int x = 0; x < width; ++x)
{
- const float val = in[x + y * width];
+ const T val = in[x + y * width];
out[x * height + y] = val;
}
@@ -313,10 +313,16 @@ std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, flo
template void get_tile(const SimpleTensor<float> &in, SimpleTensor<float> &roi, const Coordinates &coord);
template void get_tile(const SimpleTensor<half> &in, SimpleTensor<half> &roi, const Coordinates &coord);
+template void get_tile(const SimpleTensor<int> &in, SimpleTensor<int> &roi, const Coordinates &coord);
+template void get_tile(const SimpleTensor<short> &in, SimpleTensor<short> &roi, const Coordinates &coord);
+template void get_tile(const SimpleTensor<char> &in, SimpleTensor<char> &roi, const Coordinates &coord);
template void zeros(SimpleTensor<float> &in, const Coordinates &anchor, const TensorShape &shape);
template void zeros(SimpleTensor<half> &in, const Coordinates &anchor, const TensorShape &shape);
template void transpose_matrix(const SimpleTensor<float> &in, SimpleTensor<float> &out);
template void transpose_matrix(const SimpleTensor<half> &in, SimpleTensor<half> &out);
+template void transpose_matrix(const SimpleTensor<int> &in, SimpleTensor<int> &out);
+template void transpose_matrix(const SimpleTensor<short> &in, SimpleTensor<short> &out);
+template void transpose_matrix(const SimpleTensor<char> &in, SimpleTensor<char> &out);
template void matrix_multiply(const SimpleTensor<float> &a, const SimpleTensor<float> &b, SimpleTensor<float> &out);
template void matrix_multiply(const SimpleTensor<half> &a, const SimpleTensor<half> &b, SimpleTensor<half> &out);
diff --git a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
new file mode 100644
index 0000000000..3a5ab7c5e1
--- /dev/null
+++ b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_GEMMRESHAPELHSMATRIX_FIXTURE
+#define ARM_COMPUTE_TEST_GEMMRESHAPELHSMATRIX_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/GEMMReshapeLHSMatrix.h"
+#include "tests/validation/reference/Utils.h"
+
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::misc::shape_calculator;
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool reinterpret_input_as_3d = false>
+class GEMMReshapeLHSMatrixValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_in, unsigned int batch_size, DataType data_type, unsigned int m0, unsigned int k0, unsigned int v0, bool interleave, bool transpose)
+ {
+ GEMMLHSMatrixInfo lhs_info;
+ lhs_info.m0 = m0;
+ lhs_info.k0 = k0;
+ lhs_info.v0 = v0;
+ lhs_info.interleave = interleave;
+ lhs_info.transpose = transpose;
+
+ // Set the tensor shape
+ const TensorShape shape_src(shape_in[0],
+ shape_in[1],
+ reinterpret_input_as_3d ? shape_in[2] : batch_size,
+ reinterpret_input_as_3d ? batch_size : 1);
+
+ _target = compute_target(shape_src, data_type, lhs_info);
+ _reference = compute_reference(shape_src, data_type, lhs_info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+
+ TensorType compute_target(TensorShape input_shape, DataType data_type, const GEMMLHSMatrixInfo &lhs_info)
+ {
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
+ TensorType dst;
+
+ // The output tensor will be auto-initialized within the function
+
+ // Create and configure function
+ FunctionType gemm_lhs_reshape;
+ gemm_lhs_reshape.configure(&src, &dst, lhs_info, reinterpret_input_as_3d);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute GEMM LHS matrix reshape function
+ gemm_lhs_reshape.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, DataType data_type, const GEMMLHSMatrixInfo &lhs_info)
+ {
+ TensorShape src_shape = input_shape;
+
+ // If the input has to be reinterpreted as 3D, collapse the second dimension with the 3rd
+ if(reinterpret_input_as_3d)
+ {
+ src_shape.collapse(2U, 1U);
+ }
+
+ // Create reference
+ SimpleTensor<T> src{ src_shape, data_type, 1 };
+
+ // Fill reference
+ fill(src);
+
+ TensorShape output_shape = compute_lhs_reshaped_shape(TensorInfo(input_shape, 1, data_type), lhs_info, reinterpret_input_as_3d);
+
+ return reference::gemm_reshape_lhs_matrix<T>(src, output_shape, lhs_info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_GEMMRESHAPELHSMATRIX_FIXTURE */ \ No newline at end of file
diff --git a/tests/validation/reference/GEMMReshapeLHSMatrix.cpp b/tests/validation/reference/GEMMReshapeLHSMatrix.cpp
new file mode 100644
index 0000000000..431d65696e
--- /dev/null
+++ b/tests/validation/reference/GEMMReshapeLHSMatrix.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "GEMMReshapeLHSMatrix.h"
+
+#include "arm_compute/core/Types.h"
+
+#include "tests/validation/Helpers.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> gemm_reshape_lhs_matrix(const SimpleTensor<T> &in, const TensorShape &output_shape, const GEMMLHSMatrixInfo &lhs_info)
+{
+ ARM_COMPUTE_ERROR_ON(in.shape().num_dimensions() > 3);
+
+ SimpleTensor<T> out{ output_shape, in.data_type() };
+
+ // Initialize the output tensor with zero
+ std::memset(&out[0], 0, out.num_elements() * sizeof(T));
+
+ const unsigned int K = in.shape()[0];
+ const unsigned int M = in.shape()[1];
+ const unsigned int B = in.shape()[2];
+
+ const unsigned int num_tiles_x = std::ceil(K / static_cast<float>(lhs_info.k0));
+ const unsigned int num_tiles_y = std::ceil(M / static_cast<float>(lhs_info.m0));
+
+ const TensorShape tile_dims(lhs_info.k0, lhs_info.m0);
+ const TensorShape tile_dims_transposed(lhs_info.m0, lhs_info.k0);
+
+ // Simple tensor for the input tile
+ SimpleTensor<T> src_tile{ tile_dims, in.data_type() };
+
+ // Simple tensor for the input tile
+ SimpleTensor<T> src_tile_transposed{ tile_dims_transposed, in.data_type() };
+
+ // Simple tensor to use when storing the values
+ SimpleTensor<T> *tile_to_use = lhs_info.transpose ? &src_tile_transposed : &src_tile;
+
+ const unsigned int offset_output_x = lhs_info.interleave ? tile_to_use->shape()[0] : tile_to_use->shape()[0] * tile_to_use->shape()[1];
+ const unsigned int step_output_x = lhs_info.interleave ? tile_to_use->shape()[0] * lhs_info.v0 : tile_to_use->shape()[0];
+
+ for(unsigned int z = 0; z < B; ++z)
+ {
+ for(unsigned int y = 0; y < num_tiles_y; ++y)
+ {
+ for(unsigned int x = 0; x < num_tiles_x; ++x)
+ {
+ // Get the tile from the input tensor
+ get_tile<T>(in, src_tile, Coordinates(x * lhs_info.k0, y * lhs_info.m0, z, 0));
+
+ if(lhs_info.transpose)
+ {
+ // Transpose matrix
+ transpose_matrix<T>(src_tile, src_tile_transposed);
+ }
+
+ // Store
+ const unsigned int offset_output = (x * lhs_info.k0 * lhs_info.m0 * lhs_info.v0) + ((y % lhs_info.v0) * offset_output_x) + ((y / lhs_info.v0) * out.shape()[0]) + (z * out.shape()[0] * out.shape()[1]);
+
+ for(unsigned int i = 0; i < tile_to_use->shape()[1]; ++i)
+ {
+ const unsigned int offset_tile = i * tile_to_use->shape()[0];
+
+ // Copy per row
+ std::copy(&(*tile_to_use)[offset_tile], &(*tile_to_use)[offset_tile + tile_to_use->shape()[0]], &out[offset_output + i * step_output_x]);
+ }
+ }
+ }
+ }
+
+ return out;
+}
+template SimpleTensor<int> gemm_reshape_lhs_matrix(const SimpleTensor<int> &in, const TensorShape &output_shape, const GEMMLHSMatrixInfo &lhs_info);
+template SimpleTensor<short> gemm_reshape_lhs_matrix(const SimpleTensor<short> &in, const TensorShape &output_shape, const GEMMLHSMatrixInfo &lhs_info);
+template SimpleTensor<char> gemm_reshape_lhs_matrix(const SimpleTensor<char> &in, const TensorShape &output_shape, const GEMMLHSMatrixInfo &lhs_info);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/reference/GEMMReshapeLHSMatrix.h b/tests/validation/reference/GEMMReshapeLHSMatrix.h
new file mode 100644
index 0000000000..c0328dbd51
--- /dev/null
+++ b/tests/validation/reference/GEMMReshapeLHSMatrix.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_GEMMRESHAPELHSMATRIX_H__
+#define __ARM_COMPUTE_TEST_GEMMRESHAPELHSMATRIX_H__
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> gemm_reshape_lhs_matrix(const SimpleTensor<T> &in, const TensorShape &output_shape, const GEMMLHSMatrixInfo &lhs_info);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_GEMMRESHAPELHSMATRIX_H__ */ \ No newline at end of file