aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h2
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMM.h6
-rw-r--r--src/core/CL/CLKernelLibrary.cpp3
-rw-r--r--src/core/CL/cl_kernels/fixed_point.h67
-rw-r--r--src/core/CL/cl_kernels/gemm.cl290
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp9
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp15
-rw-r--r--src/runtime/CL/functions/CLGEMM.cpp30
-rw-r--r--src/runtime/NEON/functions/NEGEMM.cpp5
-rw-r--r--tests/validation/CL/GEMM.cpp203
11 files changed, 544 insertions, 88 deletions
diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h
index c808039567..b3a85a1706 100644
--- a/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h
@@ -52,7 +52,7 @@ public:
*
* @note The input and output tensors must have the same dimensions
*
- * @param[in] input Input tensor (Matrix C). Data types supported: F16/F32
+ * @param[in] input Input tensor (Matrix C). Data types supported: QS8/F16/F32
* @param[in, out] output Output tensor. If this kernel is used to finalize the GEMM result (alpha * AB + beta * C), output must contain the result obtained by @ref CLGEMMMatrixMultiplyKernel. Data type supported: same as @p input
* @param[in] beta Weight of matrix C
*/
diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
index 07ea3c12ac..7625358b8b 100644
--- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
@@ -53,7 +53,7 @@ public:
CLGEMMMatrixMultiplyKernel &operator=(CLGEMMMatrixMultiplyKernel &&) = default;
/** Initialise the kernel's input, output and alpha
*
- * @param[in] input0 Input tensor containing the interleaved Matrix A or the vector A. Data types supported: F16/F32
+ * @param[in] input0 Input tensor containing the interleaved Matrix A or the vector A. Data types supported: QS8/F16/F32
* @param[in] input1 Input tensor containing the transposed Matrix B if the first input tensor A is not a vector.
* If the output tensor is a vector, input1 must contain the matrix B not reshaped. Data type supported: same as @p input0
* @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
diff --git a/arm_compute/runtime/CL/functions/CLGEMM.h b/arm_compute/runtime/CL/functions/CLGEMM.h
index 043b2b8115..080f497b7b 100644
--- a/arm_compute/runtime/CL/functions/CLGEMM.h
+++ b/arm_compute/runtime/CL/functions/CLGEMM.h
@@ -36,7 +36,7 @@ namespace arm_compute
{
class ICLTensor;
-/** Basic function to execute GEMM on OpenCL. Data types supported: F32, F16. This function calls the following OpenCL kernels:
+/** Basic function to execute GEMM on OpenCL. This function calls the following OpenCL kernels:
*
* -# @ref CLGEMMInterleave4x4Kernel (if the output tensor is a matrix)
* -# @ref CLGEMMTranspose1xWKernel (if the output tensor is a matrix)
@@ -53,11 +53,11 @@ public:
*
* @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C].
*
- * @note All tensors must have the same data type. Data types supported: F32, F16
+ * @note All tensors must have the same data type.
*
* @note Whilst the first input tensor can be a vector, the second input tensor must be at least a matrix
*
- * @param[in] a First input tensor (Matrix or Vector A). Data types supported: F32, F16
+ * @param[in] a First input tensor (Matrix or Vector A). Data types supported: QS8/F16/F32
* @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a.
* @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a.
* @param[out] output Output tensor. Data type supported: same as @p a
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 081edac8e1..6cf5ce2564 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -157,12 +157,15 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "gemm_interleave4x4_32bit", "gemm.cl" },
{ "gemm_ma_f16", "gemm.cl" },
{ "gemm_ma_f32", "gemm.cl" },
+ { "gemm_ma_qs8", "gemm.cl" },
{ "gemm_mm_u8", "gemm.cl" },
{ "gemm_mm_f16", "gemm.cl" },
{ "gemm_mm_f32_midgard", "gemm.cl" },
{ "gemm_mm_f32_bifrost", "gemm.cl" },
+ { "gemm_mm_qs8", "gemm.cl" },
{ "gemm_vm_f16", "gemm.cl" },
{ "gemm_vm_f32", "gemm.cl" },
+ { "gemm_vm_qs8", "gemm.cl" },
{ "gemm_lc_vm_f32", "gemm.cl" },
{ "gemm_transpose1x16", "gemm.cl" },
{ "gemm_transpose1x8", "gemm.cl" },
diff --git a/src/core/CL/cl_kernels/fixed_point.h b/src/core/CL/cl_kernels/fixed_point.h
index 2c100c2e28..c0855db056 100644
--- a/src/core/CL/cl_kernels/fixed_point.h
+++ b/src/core/CL/cl_kernels/fixed_point.h
@@ -82,9 +82,9 @@ TYPE_ALIAS(short, qs16)
/* Computes max of fixed point types.
*
- * @param[in] type is the actual data type.
+ * @param[in] type the actual data type.
*
- * @return The result of the fixed point vector maximum.
+ * @return The result of the fixed point maximum.
*/
#define MAXQ_IMPL(type) \
inline type max_##type(type VopA, type VopB) \
@@ -103,9 +103,9 @@ MAXQ_IMPL(qs8x16)
/* Computes saturated addition of fixed point types.
*
- * @param[in] type is the actual data type.
+ * @param[in] type the actual data type.
*
- * @return The result of the fixed point vector addition. The result is saturated in case of overflow
+ * @return The result of the fixed point addition. The result is saturated in case of overflow
*/
#define ADDQ_SAT_IMPL(type) \
inline type add_sat_##type(type VopA, type VopB) \
@@ -124,9 +124,9 @@ ADDQ_SAT_IMPL(qs8x16)
/* Computes saturated subtraction of fixed point types.
*
- * @param[in] type is the actual data type.
+ * @param[in] type the actual data type.
*
- * @return The result of the fixed point vector subtraction. The result is saturated in case of overflow
+ * @return The result of the fixed point subtraction. The result is saturated in case of overflow
*/
#define SUBQ_SAT_IMPL(type) \
inline type sub_sat_##type(type VopA, type VopB) \
@@ -143,12 +143,12 @@ SUBQ_SAT_IMPL(qs8x16)
#define SUB_SAT_OP_EXPAND_STR(a, b, type, size) sub_sat_##type##x##size((a), (b))
#define SUB_SAT_OP_EXPAND(a, b, type, size) SUB_SAT_OP_EXPAND_STR(a, b, type, size)
-/* Saturate multiply of two fixed point vectors
+/* Saturate multiply of two fixed point numbers
*
- * @param[in] type is the actual data type.
- * @param[in] itype is the intermediate data type.
+ * @param[in] type the actual data type.
+ * @param[in] itype the intermediate data type.
*
- * @return The result of the fixed point vector subtraction. The result is saturated in case of overflow
+ * @return The result of the fixed point multiplication. The result is saturated in case of overflow
*/
#define MULQ_SAT_IMPL(type, itype) \
inline type mul_sat_##type(type VopA, type VopB, int fixed_point_position) \
@@ -163,11 +163,50 @@ MULQ_SAT_IMPL(qs8x16, qs16x16)
#define MUL_SAT_OP_EXPAND_STR(a, b, type, size, position) mul_sat_##type##x##size((a), (b), (position))
#define MUL_SAT_OP_EXPAND(a, b, type, size, position) MUL_SAT_OP_EXPAND_STR(a, b, type, size, position)
-/** Saturate division of two fixed point vectors
+/* Saturate multiply-accumulate
+ *
+ * @param[in] type the actual data type.
+ * @param[in] itype the intermediate data type.
+ *
+ * @return The result of the fixed point multiply-accumulate. The result is saturated in case of overflow
+ */
+#define MLAQ_SAT_IMPL(type, itype) \
+ type mla_sat_##type(type VopA, type VopB, type VopC, int fixed_point_position) \
+ { \
+ itype res = mad_sat(CONVERT(VopB, itype), CONVERT(VopC, itype), (itype)(1 << (fixed_point_position - 1))); \
+ return add_sat(VopA, CONVERT_SAT(res >> (itype)fixed_point_position, type)); \
+ }
+
+MLAQ_SAT_IMPL(qs8x8, qs16x8)
+MLAQ_SAT_IMPL(qs8x16, qs16x16)
+
+#define MLA_SAT_OP_EXPAND_STR(a, b, c, type, size, position) mla_sat_##type##x##size((a), (b), (c), (position))
+#define MLA_SAT_OP_EXPAND(a, b, c, type, size, position) MLA_SAT_OP_EXPAND_STR(a, b, c, type, size, position)
+
+/* Saturate multiply-accumulate long
+ *
+ * @param[in] type the actual data type.
+ * @param[in] itype the intermediate data type.
+ *
+ * @return The result of the fixed point multiply-accumulate long. The result is saturated in case of overflow
+ */
+#define MLALQ_SAT_IMPL(type, itype) \
+ itype mlal_sat_##type(itype VopA, type VopB, type VopC, int fixed_point_position) \
+ { \
+ itype res = mad_sat(CONVERT(VopB, itype), CONVERT(VopC, itype), (itype)(1 << (fixed_point_position - 1))); \
+ return add_sat(VopA, res >> (itype)fixed_point_position); \
+ }
+
+MLALQ_SAT_IMPL(qs8x8, qs16x8)
+
+#define MLAL_SAT_OP_EXPAND_STR(a, b, c, type, size, position) mlal_sat_##type##x##size((a), (b), (c), (position))
+#define MLAL_SAT_OP_EXPAND(a, b, c, type, size, position) MLAL_SAT_OP_EXPAND_STR(a, b, c, type, size, position)
+
+/** Saturate division of two fixed point numbers
*
- * @param[in] stype is the actual scalar data type.
- * @param[in] type is the actual data type.
- * @param[in] itype is the intermediate data type.
+ * @param[in] stype the actual scalar data type.
+ * @param[in] type the actual data type.
+ * @param[in] itype the intermediate data type.
*
* @return The result of the fixed point division. The result is saturated in case of overflow
*/
diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl
index 9bec8d5d92..796b343bda 100644
--- a/src/core/CL/cl_kernels/gemm.cl
+++ b/src/core/CL/cl_kernels/gemm.cl
@@ -21,17 +21,18 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include "fixed_point.h"
#include "helpers.h"
/** This OpenCL kernel computes the "vector" 1x4 transposition of input matrix
*
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32
+ * @param[in] src_ptr Pointer to the source matrix. Supported data types: U32/S32/F32
* @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -57,13 +58,13 @@ __kernel void gemm_transpose1x4(IMAGE_DECLARATION(src),
/** This OpenCL kernel computes the "vector" 1x8 transposition of input matrix
*
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: F16
+ * @param[in] src_ptr Pointer to the source matrix. Supported data types: U16/S16/QS16/F16
* @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F16
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -89,13 +90,13 @@ __kernel void gemm_transpose1x8(IMAGE_DECLARATION(src),
/** This OpenCL kernel computes the "vector" 1x16 transposition of input matrix
*
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8
+ * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8/S8/QS8
* @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: U8
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -127,7 +128,7 @@ __kernel void gemm_transpose1x16(IMAGE_DECLARATION(src),
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: U32/S32/F32
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -162,13 +163,13 @@ __kernel void gemm_interleave4x4_32bit(IMAGE_DECLARATION(src),
/** This OpenCL kernel reshapes the input matrix transposing each 4x4 block and interleaving the values
*
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: U16/S16/F16
+ * @param[in] src_ptr Pointer to the source matrix. Supported data types: U16/S16/QS16/F16
* @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: U16/S16/F16
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -203,13 +204,13 @@ __kernel void gemm_interleave4x4_16bit(IMAGE_DECLARATION(src),
/** This OpenCL kernel reshapes the input matrix transposing each 4x4 block and interleaving the values
*
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8/S8
+ * @param[in] src_ptr Pointer to the source matrix. Supported data types: U8/S8/QS8
* @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: U8/S8
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -250,13 +251,13 @@ __kernel void gemm_interleave4x4_8bit(IMAGE_DECLARATION(src),
*
* @note The data type must be passed at compile time -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
*
- * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: F32
+ * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: U8/S8/QS8/U16/S16/F16/U32/S32/F32
* @param[in] accum_stride_x Stride of the accmulate tensor in X dimension (in bytes)
* @param[in] accum_step_x accum_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] accum_stride_y Stride of the accumlulate tensor in Y dimension (in bytes)
* @param[in] accum_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] accum_offset_first_element_in_bytes The offset of the first element in the accumulate tensor
- * @param[in] biases_ptr Pointer to the biases vector. Same as input.
+ * @param[in] biases_ptr Pointer to the biases vector. Same as @p accum_ptr
* @param[in] biases_stride_x Stride of the destination tensor in X dimension (in bytes)
* @param[in] biases_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] biases_offset_first_element_in_bytes The offset of the first element in the destination tensor
@@ -282,7 +283,7 @@ __kernel void gemm_accumulate_biases(
#if(defined WIDTH_MATRIX_B)
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_u8 and @ref gemm_transpose1x16_u8 before running the matrix multiplication
+ * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication
*
* @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B
*
@@ -292,13 +293,13 @@ __kernel void gemm_accumulate_biases(
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported formats: U8
+ * @param[in] src1_ptr Pointer to the source matrix. Supported formats: same as @p src0_ptr
* @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported formats: U8
+ * @param[out] dst_ptr Pointer to the destination matrix Supported formats: same as @p src0_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -388,7 +389,7 @@ __kernel void gemm_mm_u8(IMAGE_DECLARATION(src0),
#if(defined WIDTH_MATRIX_B && defined ALPHA)
/** This OpenCL kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_f32 and @ref gemm_transpose1x4_f32 before running the matrix multiplication
+ * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
*
* @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B and -DALPHA
*
@@ -398,13 +399,13 @@ __kernel void gemm_mm_u8(IMAGE_DECLARATION(src0),
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F32
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
* @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -487,7 +488,7 @@ __kernel void gemm_mm_f32_midgard(IMAGE_DECLARATION(src0),
}
/** This OpenCL kernel is optimised for Bifrost. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_f32 and @ref gemm_transpose1x4_f32 before running the matrix multiplication
+ * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
*
* @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B and -DALPHA
*
@@ -497,13 +498,13 @@ __kernel void gemm_mm_f32_midgard(IMAGE_DECLARATION(src0),
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F32
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
* @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -697,7 +698,7 @@ __kernel void gemm_mm_f32_bifrost(IMAGE_DECLARATION(src0),
}
/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_f16 and @ref gemm_transpose1x8_f16 before running the matrix multiplication
+ * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x8 before running the matrix multiplication
*
* @attention The width of matrix B and the alpha's value need to be passed at compile time using -DWIDTH_MATRIX_B and -DALPHA
*
@@ -707,13 +708,13 @@ __kernel void gemm_mm_f32_bifrost(IMAGE_DECLARATION(src0),
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F16
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
* @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F16
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -795,6 +796,100 @@ __kernel void gemm_mm_f16(IMAGE_DECLARATION(src0),
vstore8(c30, 0, (__global half *)(offset(&dst, 0, 3)));
}
+#if(defined FIXED_POINT_POSITION)
+/** This OpenCL kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1) in 8 bit fixed point precision
+ * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_8bit and @ref gemm_transpose1x16 before running the matrix multiplication
+ *
+ * @attention The width of matrix B, the alpha's value and fixed point position need to be passed at compile time using -DWIDTH_MATRIX_B -DALPHA and -DFIXED_POINT_POSITION
+ *
+ * @note: ALPHA must be passed in 8 bit fixed point format
+ *
+ * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS8
+ * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
+ * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
+ * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
+ * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
+ * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
+ * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
+ * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
+ * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
+ */
+__kernel void gemm_mm_qs8(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
+{
+ /* src_addr.s0 = address of matrix A */
+ /* src_addr.s1 = address of matrix B */
+
+ /* Compute address for matrix A and B */
+ int2 src_addr = (int2)(get_global_id(1), get_global_id(0)) * (int2)((src0_stride_y),
+ (src1_stride_y));
+
+ /* Add offset_first_element_in_bytes */
+ src_addr = src_addr + ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
+
+ /* Compute end row address for matrix B */
+ int end_row_mtx_b = src_addr.s1 + WIDTH_MATRIX_B;
+
+ /* Reset accumulators */
+ short8 c00 = 0.0f;
+ short8 c10 = 0.0f;
+ short8 c20 = 0.0f;
+ short8 c30 = 0.0f;
+ short8 c01 = 0.0f;
+ short8 c11 = 0.0f;
+ short8 c21 = 0.0f;
+ short8 c31 = 0.0f;
+
+ /* This for loop performs 1 accumulation for each iteration */
+ for(; src_addr.s1 <= (end_row_mtx_b - 16); src_addr += (int2)(4, 16))
+ {
+ /* Load values from matrix A (interleaved) and matrix B (transposed) */
+ char4 a0 = vload4(0, ((__global char *)src0_ptr) + src_addr.s0);
+ char16 b0 = vload16(0, ((__global char *)src1_ptr) + src_addr.s1);
+
+ c00 = mlal_sat_qs8x8(c00, (char8)a0.s0, b0.s01234567, FIXED_POINT_POSITION);
+ c10 = mlal_sat_qs8x8(c10, (char8)a0.s1, b0.s01234567, FIXED_POINT_POSITION);
+ c20 = mlal_sat_qs8x8(c20, (char8)a0.s2, b0.s01234567, FIXED_POINT_POSITION);
+ c30 = mlal_sat_qs8x8(c30, (char8)a0.s3, b0.s01234567, FIXED_POINT_POSITION);
+
+ c01 = mlal_sat_qs8x8(c01, (char8)a0.s0, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ c11 = mlal_sat_qs8x8(c11, (char8)a0.s1, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ c21 = mlal_sat_qs8x8(c21, (char8)a0.s2, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ c31 = mlal_sat_qs8x8(c31, (char8)a0.s3, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ }
+
+ /* Compute destination address */
+ Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
+
+ /* Multiply by the weight of matrix product */
+ char16 c00_qs8 = convert_char16_sat((short16)(c00, c01));
+ char16 c10_qs8 = convert_char16_sat((short16)(c10, c11));
+ char16 c20_qs8 = convert_char16_sat((short16)(c20, c21));
+ char16 c30_qs8 = convert_char16_sat((short16)(c30, c31));
+
+ c00_qs8 = mul_sat_qs8x16(c00_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+ c10_qs8 = mul_sat_qs8x16(c10_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+ c20_qs8 = mul_sat_qs8x16(c20_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+ c30_qs8 = mul_sat_qs8x16(c30_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+
+ /* Store 16x4 block */
+ vstore16(c00_qs8, 0, (__global char *)(offset(&dst, 0, 0)));
+ vstore16(c10_qs8, 0, (__global char *)(offset(&dst, 0, 1)));
+ vstore16(c20_qs8, 0, (__global char *)(offset(&dst, 0, 2)));
+ vstore16(c30_qs8, 0, (__global char *)(offset(&dst, 0, 3)));
+}
+#endif // (defined FIXED_POINT_POSITION)
+
#if(defined WIDTH_VECTOR_A)
/** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1)
*
@@ -808,13 +903,13 @@ __kernel void gemm_mm_f16(IMAGE_DECLARATION(src0),
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F32
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
* @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -874,13 +969,13 @@ __kernel void gemm_vm_f32(IMAGE_DECLARATION(src0),
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F16
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
* @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F16
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -931,6 +1026,92 @@ __kernel void gemm_vm_f16(IMAGE_DECLARATION(src0),
vstore8(acc, 0, (__global half *)(offset(&dst, 0, 0)));
}
+
+#if(defined FIXED_POINT_POSITION)
+/** This OpenCL kernel computes the vector by matrix multiplication between the vector A (src0) and matrix B (src1) in 8 bit fixed point
+ *
+ * @attention The width of vector A, the width of matrix B, the alpha's value and the fixed point position need to be passed at compile time using -DWIDTH_VECTOR_A -DWIDTH_MATRIX_B, -DALPHA and -DFIXED_POINT_POSITION
+ *
+ * @attention The input vector A and matrix B must not be reshaped
+ *
+ * @note: ALPHA must be passed in 8 bit fixed point format
+ *
+ * @param[in] src0_ptr Pointer to the source matrix. Supported data types: QS8
+ * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
+ * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
+ * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
+ * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
+ * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
+ * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
+ * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
+ * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
+ */
+__kernel void gemm_vm_qs8(IMAGE_DECLARATION(src0),
+ IMAGE_DECLARATION(src1),
+ IMAGE_DECLARATION(dst))
+{
+ int idx = get_global_id(0) * 16;
+
+ /* Compute the address for the vector A and matrix B */
+ int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes));
+ src_addr.s1 += idx;
+
+ int end_row_vec_a = src_addr.s0 + WIDTH_VECTOR_A;
+
+ short8 acc0 = 0;
+ short8 acc1 = 0;
+
+ /* This for loop performs 4 accumulations per iteration */
+ for(; src_addr.s0 <= (end_row_vec_a - 4); src_addr += (int2)(4, 4 * src1_stride_y))
+ {
+ char4 a0 = vload4(0, (__global char *)(src0_ptr + src_addr.s0));
+ char16 b0 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 0 * src1_stride_y));
+ char16 b1 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 1 * src1_stride_y));
+ char16 b2 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 2 * src1_stride_y));
+ char16 b3 = vload16(0, (__global char *)(src1_ptr + src_addr.s1 + 3 * src1_stride_y));
+
+ acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s0, b0.s01234567, FIXED_POINT_POSITION);
+ acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s1, b1.s01234567, FIXED_POINT_POSITION);
+ acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s2, b2.s01234567, FIXED_POINT_POSITION);
+ acc0 = mlal_sat_qs8x8(acc0, (char8)a0.s3, b3.s01234567, FIXED_POINT_POSITION);
+
+ acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s0, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s1, b1.s89ABCDEF, FIXED_POINT_POSITION);
+ acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s2, b2.s89ABCDEF, FIXED_POINT_POSITION);
+ acc1 = mlal_sat_qs8x8(acc1, (char8)a0.s3, b3.s89ABCDEF, FIXED_POINT_POSITION);
+ }
+
+ /* Left-over accumulations */
+ for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(1, src1_stride_y))
+ {
+ char a0 = *((__global char *)(src0_ptr + src_addr.s0));
+ char16 b0 = vload16(0, (__global char *)(src1_ptr + src_addr.s1));
+
+ acc0 = mlal_sat_qs8x8(acc0, (char8)a0, b0.s01234567, FIXED_POINT_POSITION);
+ acc1 = mlal_sat_qs8x8(acc1, (char8)a0, b0.s89ABCDEF, FIXED_POINT_POSITION);
+ }
+
+ /* Compute destination address */
+ Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
+
+ /* Multiply by the weight of matrix product */
+ char16 acc_qs8 = convert_char16_sat((short16)(acc0, acc1));
+
+ acc_qs8 = mul_sat_qs8x16(acc_qs8, (char16)ALPHA, FIXED_POINT_POSITION);
+
+ /* Store 16 values */
+ vstore16(acc_qs8, 0, (__global char *)(offset(&dst, 0, 0)));
+}
+#endif /* #if(defined FIXED_POINT_POSITION) */
#endif /* (defined WIDTH_VECTOR_A) */
#endif /* (defined WIDTH_MATRIX_B && defined ALPHA) */
@@ -945,7 +1126,7 @@ __kernel void gemm_vm_f16(IMAGE_DECLARATION(src0),
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -974,13 +1155,15 @@ __kernel void gemm_ma_f32(IMAGE_DECLARATION(src),
/** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
*
+ * @attention The beta's value need to be passed at compile time using -DBETA
+ *
* @param[in] src_ptr Pointer to the source matrix. Supported data types: F16
* @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F16
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
@@ -1006,6 +1189,47 @@ __kernel void gemm_ma_f16(IMAGE_DECLARATION(src),
/* Store final result in axb matrix */
vstore8(out, 0, (__global half *)dst.ptr);
}
+
+#if(defined FIXED_POINT_POSITION)
+/** This OpenCL kernel performs the in-place matrix addition between 2 matrices in 8 bit fixed point taking into account that the second matrix might be weighted by a scalar value beta:
+ *
+ * @attention The beta's value and the fixed point position need to be passed at compile time using -DBETA and -DFIXED_POINT_POSITION
+ *
+ * @note: BETA must be passed in 8 bit fixed point format
+ *
+ * @param[in] src_ptr Pointer to the source matrix. Supported data types: QS8
+ * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
+ * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
+ */
+__kernel void gemm_ma_qs8(IMAGE_DECLARATION(src),
+ IMAGE_DECLARATION(dst))
+{
+ /* Compute source and destination addresses */
+ Image src = CONVERT_TO_IMAGE_STRUCT(src);
+ Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
+
+ /* Load values from A x B */
+ char16 alpha_ab = vload16(0, (__global char *)dst.ptr);
+
+ /* Load values from Matrix C */
+ char16 c = vload16(0, (__global char *)src.ptr);
+
+ /* Computes alpha * axb + beta * c */
+ char16 out = mla_sat_qs8x16(alpha_ab, (char16)BETA, c, FIXED_POINT_POSITION);
+
+ /* Store final result in axb matrix */
+ vstore16(out, 0, (__global char *)dst.ptr);
+}
+#endif /* #if(defined FIXED_POINT_POSITION) */
#endif /* (defined BETA) */
#if(defined WIDTH_VECTOR_A)
@@ -1021,7 +1245,7 @@ __kernel void gemm_ma_f16(IMAGE_DECLARATION(src),
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: F32
+ * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
* @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
@@ -1029,7 +1253,7 @@ __kernel void gemm_ma_f16(IMAGE_DECLARATION(src),
* @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
* @param[in] src1_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: F32
+ * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
diff --git a/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp
index 343838f2f9..5883dd698b 100644
--- a/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixAdditionKernel.cpp
@@ -28,6 +28,7 @@
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Error.h"
+#include "arm_compute/core/FixedPoint.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
@@ -40,10 +41,9 @@ CLGEMMMatrixAdditionKernel::CLGEMMMatrixAdditionKernel()
{
}
-void CLGEMMMatrixAdditionKernel::configure(const ICLTensor *input, ICLTensor *output, const float beta)
+void CLGEMMMatrixAdditionKernel::configure(const ICLTensor *input, ICLTensor *output, float beta)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != output->info()->dimension(0));
ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != output->info()->dimension(1));
@@ -53,7 +53,8 @@ void CLGEMMMatrixAdditionKernel::configure(const ICLTensor *input, ICLTensor *ou
const unsigned int num_elems_processed_per_iteration = max_cl_vector_width / data_size_from_type(input->info()->data_type());
std::ostringstream ma_arguments;
- ma_arguments << "-DBETA=" << beta;
+ ma_arguments << "-DBETA=" << (input->info()->data_type() == DataType::QS8 ? scvt_qs8_f32(beta, input->info()->fixed_point_position()) : beta) << " ";
+ ma_arguments << "-DFIXED_POINT_POSITION=" << input->info()->fixed_point_position();
std::set<std::string> build_opts;
build_opts.emplace(ma_arguments.str());
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
index d7388e8579..7c5b3d7866 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
@@ -25,12 +25,12 @@
#include "arm_compute/core/AccessWindowStatic.h"
#include "arm_compute/core/AccessWindowTranspose.h"
-
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/OpenCL.h"
#include "arm_compute/core/Error.h"
+#include "arm_compute/core/FixedPoint.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Utils.h"
@@ -50,10 +50,10 @@ CLGEMMMatrixMultiplyKernel::CLGEMMMatrixMultiplyKernel()
void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QS8, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input0, input1, output);
+
if(output->info()->dimension(1) == 1)
{
ARM_COMPUTE_ERROR_ON(input0->info()->dimension(0) != input1->info()->dimension(1));
@@ -74,7 +74,8 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
std::ostringstream mm_arguments;
mm_arguments << "-DWIDTH_MATRIX_B=" << input1->info()->dimension(0) << " ";
- mm_arguments << "-DALPHA=" << alpha << " ";
+ mm_arguments << "-DALPHA=" << (input0->info()->data_type() == DataType::QS8 ? scvt_qs8_f32(alpha, input0->info()->fixed_point_position()) : alpha) << " ";
+ mm_arguments << "-DFIXED_POINT_POSITION=" << input0->info()->fixed_point_position() << " ";
std::set<std::string> build_opts;
// Check if the output tensor is a vector. If so,the kernel runs the vector-matrix multiplication
@@ -98,7 +99,9 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
update_window_and_padding(win, input0_access, input1_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+ Coordinates coord;
+ coord.set_num_dimensions(output->info()->num_dimensions());
+ output_access.set_valid_region(win, ValidRegion(coord, output->info()->tensor_shape()));
ICLKernel::configure(win);
}
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index 07b19421d6..6d22825694 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -45,13 +45,11 @@ CLGEMM::CLGEMM()
void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::F32, DataType::F16);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::F32, DataType::F16);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32, DataType::F16);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QS8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output);
if(c != nullptr)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(c, 1, DataType::F32, DataType::F16);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, c);
ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(1) != c->info()->dimension(1), "The C matrix must have the same number of rows as the matrix A");
ARM_COMPUTE_ERROR_ON_MSG(b->info()->dimension(0) != c->info()->dimension(0), "The C matrix must have the same number of columns as the matrix C");
@@ -59,7 +57,6 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
ARM_COMPUTE_ERROR_ON_MSG(c->info()->dimension(1) != output->info()->dimension(1), "The C matrix must have the same number of columns as the output matrix");
}
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output);
ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
// Check if the first input tensor is a vector. If so, all the kernels for reshaping the tensors can be skipped
@@ -73,25 +70,14 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
shape_tmp_a.set(0, a->info()->dimension(0) * 4);
shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.0f));
- if(DataType::F32 == a->info()->data_type())
- {
- shape_tmp_b.set(0, b->info()->dimension(1) * 4);
- shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / 4.0f));
- }
- else if(DataType::F16 == a->info()->data_type())
- {
- shape_tmp_b.set(0, b->info()->dimension(1) * 8);
- shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / 8.0f));
- }
- else
- {
- ARM_COMPUTE_ERROR("DataType not supported");
- }
-
- TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type());
+ const unsigned int transpose_w = max_cl_vector_width / data_size_from_type(b->info()->data_type());
+ shape_tmp_b.set(0, b->info()->dimension(1) * transpose_w);
+ shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / static_cast<float>(transpose_w)));
+
+ TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type(), a->info()->fixed_point_position());
_tmp_a.allocator()->init(info_a);
- TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type());
+ TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type(), b->info()->fixed_point_position());
_tmp_b.allocator()->init(info_b);
// Configure interleave kernel
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index 15d5f4effb..730735590d 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -44,12 +44,10 @@ NEGEMM::NEGEMM()
void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::F32, DataType::F16, DataType::QS8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::F32, DataType::F16, DataType::QS8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(d, 1, DataType::F32, DataType::F16, DataType::QS8);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, d);
if(c != nullptr)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(c, 1, DataType::F32, DataType::F16, DataType::QS8);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, c);
ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(1) != c->info()->dimension(1), "The C matrix must have the same number of rows as the matrix A");
ARM_COMPUTE_ERROR_ON_MSG(b->info()->dimension(0) != c->info()->dimension(0), "The C matrix must have the same number of columns as the matrix B");
@@ -57,7 +55,6 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe
ARM_COMPUTE_ERROR_ON_MSG(c->info()->dimension(1) != d->info()->dimension(1), "The C matrix must have the same number of columns as the output matrix");
}
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, d);
ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
// Check if the first input tensor is a vector. If so, all the kernels for reshaping the tensors can be skipped
diff --git a/tests/validation/CL/GEMM.cpp b/tests/validation/CL/GEMM.cpp
new file mode 100644
index 0000000000..84d65bd061
--- /dev/null
+++ b/tests/validation/CL/GEMM.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "CL/CLAccessor.h"
+#include "CL/Helper.h"
+#include "Globals.h"
+#include "TensorLibrary.h"
+#include "TypePrinter.h"
+#include "Utils.h"
+#include "dataset/GEMMDataset.h"
+#include "validation/Datasets.h"
+#include "validation/Reference.h"
+#include "validation/Validation.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/functions/CLGEMM.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+
+#include "boost_wrapper.h"
+
+#include <random>
+#include <string>
+
+using namespace arm_compute;
+using namespace arm_compute::test;
+using namespace arm_compute::test::cl;
+using namespace arm_compute::test::validation;
+
+namespace
+{
+const float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+const float tolerance_qs8 = 1.0f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */
+
+CLTensor compute_gemm(const TensorShape &src_shape1, const TensorShape &src_shape2, const TensorShape &src_shape3,
+ const TensorShape &out_shape, float alpha, float beta, DataType dt, int fixed_point_position = 0)
+{
+ // Create tensors
+ CLTensor src1 = create_tensor(src_shape1, dt, 1, fixed_point_position);
+ CLTensor src2 = create_tensor(src_shape2, dt, 1, fixed_point_position);
+ CLTensor src3 = create_tensor(src_shape3, dt, 1, fixed_point_position);
+ CLTensor dst = create_tensor(out_shape, dt, 1, fixed_point_position);
+
+ // Create and configure function
+ CLGEMM gemm;
+ gemm.configure(&src1, &src2, &src3, &dst, alpha, beta);
+
+ // Allocate tensors
+ src1.allocator()->allocate();
+ src2.allocator()->allocate();
+ src3.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ BOOST_TEST(!src1.info()->is_resizable());
+ BOOST_TEST(!src2.info()->is_resizable());
+ BOOST_TEST(!src3.info()->is_resizable());
+ BOOST_TEST(!dst.info()->is_resizable());
+
+ // Fill tensors
+ if(dt == DataType::F32)
+ {
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ library->fill(CLAccessor(src1), distribution, 0);
+ library->fill(CLAccessor(src2), distribution, 1);
+ library->fill(CLAccessor(src3), distribution, 2);
+ }
+ else
+ {
+ library->fill_tensor_uniform(CLAccessor(src1), 0);
+ library->fill_tensor_uniform(CLAccessor(src2), 1);
+ library->fill_tensor_uniform(CLAccessor(src3), 2);
+ }
+
+ // Compute function
+ gemm.run();
+
+ return dst;
+}
+} // namespace
+
+#ifndef DOXYGEN_SKIP_THIS
+BOOST_AUTO_TEST_SUITE(CL)
+BOOST_AUTO_TEST_SUITE(GEMM)
+
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(Configuration,
+ SmallGEMMDataset() * boost::unit_test::data::make({ DataType::F32, DataType::QS8 }),
+ gemm_set, dt)
+{
+ // Set fixed point position data type allowed
+ int fixed_point_position = (dt == DataType::F32) ? 0 : 3;
+
+ // Create tensors
+ CLTensor src1 = create_tensor(gemm_set.shape_a, dt, 1, fixed_point_position);
+ CLTensor src2 = create_tensor(gemm_set.shape_b, dt, 1, fixed_point_position);
+ CLTensor src3 = create_tensor(gemm_set.shape_c, dt, 1, fixed_point_position);
+ CLTensor dst = create_tensor(gemm_set.shape_d, dt, 1, fixed_point_position);
+
+ BOOST_TEST(src1.info()->is_resizable());
+ BOOST_TEST(src2.info()->is_resizable());
+ BOOST_TEST(src3.info()->is_resizable());
+ BOOST_TEST(dst.info()->is_resizable());
+
+ // Create and configure function
+ CLGEMM gemm;
+ gemm.configure(&src1, &src2, &src3, &dst, gemm_set.alpha, gemm_set.beta);
+
+ // Validate valid region
+ const ValidRegion src1_valid_region = shape_to_valid_region(gemm_set.shape_a);
+ const ValidRegion src2_valid_region = shape_to_valid_region(gemm_set.shape_b);
+ const ValidRegion src3_valid_region = shape_to_valid_region(gemm_set.shape_c);
+ const ValidRegion dst_valid_region = shape_to_valid_region(gemm_set.shape_d);
+
+ validate(src1.info()->valid_region(), src1_valid_region);
+ validate(src2.info()->valid_region(), src2_valid_region);
+ validate(src3.info()->valid_region(), src3_valid_region);
+ validate(dst.info()->valid_region(), dst_valid_region);
+}
+
+BOOST_AUTO_TEST_SUITE(Float)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(SmallGEMM, SmallGEMMDataset() * boost::unit_test::data::make(DataType::F32),
+ gemm_set, dt)
+{
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt);
+
+ // Compute function
+ CLTensor dst = compute_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt);
+
+ // Validate output
+ validate(CLAccessor(dst), ref_dst, tolerance_f32);
+}
+
+BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(LargeGEMM, LargeGEMMDataset() * boost::unit_test::data::make(DataType::F32),
+ gemm_set, dt)
+{
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt);
+
+ // Compute function
+ CLTensor dst = compute_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt);
+
+ // Validate output
+ validate(CLAccessor(dst), ref_dst, tolerance_f32);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE(Quantized)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(SmallGEMM, SmallGEMMDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(4, 7),
+ gemm_set, dt, fixed_point_position)
+{
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt, fixed_point_position);
+
+ // Compute function
+ CLTensor dst = compute_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt, fixed_point_position);
+
+ // Validate output
+ validate(CLAccessor(dst), ref_dst, tolerance_qs8);
+}
+
+BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(LargeGEMM, LargeGEMMDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(4, 7),
+ gemm_set, dt, fixed_point_position)
+{
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt, fixed_point_position);
+
+ // Compute function
+ CLTensor dst = compute_gemm(gemm_set.shape_a, gemm_set.shape_b, gemm_set.shape_c, gemm_set.shape_d, gemm_set.alpha, gemm_set.beta, dt, fixed_point_position);
+
+ // Validate output
+ validate(CLAccessor(dst), ref_dst, tolerance_qs8);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END()
+#endif