From e16c8906a2aedf00e910754a01fca8bc4189cfc7 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Fri, 14 Jun 2019 16:11:10 +0100 Subject: COMPMID-2053: Fuse bias addition with CLGEMMMatrixMultiplyReshapedKernel Change-Id: I5bfd38c94a6fd18a1cba2104f7e1b04e7bef6ec2 Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/1359 Comments-Addressed: Arm Jenkins Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- .../kernels/CLGEMMMatrixMultiplyReshapedKernel.h | 17 +- src/core/CL/cl_kernels/gemm.cl | 119 +++++++--- .../kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp | 86 ++++++- .../CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp | 4 +- src/runtime/CL/functions/CLGEMM.cpp | 60 ++--- .../CL/functions/CLGEMMConvolutionLayer.cpp | 6 +- tests/framework/Macros.h | 9 +- tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp | 69 ++++-- .../CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp | 27 +-- tests/validation/fixtures/GEMMFixture.h | 255 ++++++++++++--------- 10 files changed, 416 insertions(+), 236 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h index 4052a09162..68ab94a31d 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h @@ -51,8 +51,10 @@ public: * * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F32/F16. The number of dimensions for the LHS matrix must be less or equal than 4 * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3 + * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0. * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 * @param[in] alpha Weight of the matrix product + * @param[in] beta Weight of the matrix bias * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported: * lhs_info.m0: 2,3,4,5,6,7,8 * lhs_info.k0: 2,3,4,8,16 @@ -65,14 +67,17 @@ public: * * @note lhs_info.k0 must be equal to rhs_info.k0 */ - void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - const GEMMReshapeInfo &gemm_info); + void configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info, + const GEMMRHSMatrixInfo &rhs_info, + const GEMMReshapeInfo &gemm_info); /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyReshapedKernel * * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F32/F16. The number of dimensions for the LHS matrix must be less or equal than 4 * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3 + * @param[in] input2 Input tensor info containing the bias matrix. Data type supported: same as @p input0. * @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 * @param[in] alpha Weight of the matrix product + * @param[in] beta Weight of the matrix bias * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported: * lhs_info.m0: 2,3,4,5,6,7,8 * lhs_info.k0: 2,3,4,8,16 @@ -87,8 +92,9 @@ public: * * @return a status */ - static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, float alpha, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - const GEMMReshapeInfo &gemm_info); + static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info, + const GEMMRHSMatrixInfo &rhs_info, + const GEMMReshapeInfo &gemm_info); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; @@ -96,11 +102,14 @@ public: private: const ICLTensor *_input0; const ICLTensor *_input1; + const ICLTensor *_input2; ICLTensor *_output; bool _slide_matrix_b; bool _reinterpret_output_as_3d; unsigned int _k; bool _use_dummy_work_items; + bool _add_bias; + bool _broadcast_bias; }; } // namespace arm_compute #endif /*__ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDKERNEL_H__*/ \ No newline at end of file diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl index 2ac2eb7c32..7ada14c774 100644 --- a/src/core/CL/cl_kernels/gemm.cl +++ b/src/core/CL/cl_kernels/gemm.cl @@ -1042,11 +1042,12 @@ __kernel void gemm_reshape_rhs_matrix_t(TENSOR3D_DECLARATION(src), * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix * @param[in] bias_ptr (Optional)Pointer to the bias reshaped matrix. Supported data type: same as @p lhs_ptr - * @param[in] bias_stride_x (Optional)Stride of the bias reshaped matrix in X dimension (in bytes) - * @param[in] bias_step_x (Optional)bias_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] bias_stride_y (Optional)Stride of the bias reshaped matrix in Y dimension (in bytes) - * @param[in] bias_step_y (Optional)bias_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] bias_offset_first_element_in_bytes (Optional)The offset of the first element in the bias reshaped matrix + * @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) + * @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) + * @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) @@ -1055,7 +1056,7 @@ __kernel void gemm_reshape_rhs_matrix_t(TENSOR3D_DECLARATION(src), * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes) * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes) - * @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D) * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) @@ -1415,10 +1416,10 @@ __kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs), * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes) * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix - * @param[in] bias_ptr (Optional) Pointer to the bias reshaped matrix. Supported data type: same as @p lhs_ptr - * @param[in] bias_stride_x (Optional) Stride of the bias reshaped matrix in X dimension (in bytes) + * @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) * @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] bias_stride_y (Optional) Stride of the bias reshaped matrix in Y dimension (in bytes) + * @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) * @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr @@ -1429,7 +1430,7 @@ __kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs), * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes) * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes) - * @param[in] bias_stride_z (Optional)Stride of the bias reshaped matrix in Z dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) * @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D) * @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) @@ -1804,36 +1805,49 @@ __kernel void gemm_mm_reshaped_only_rhs_nt(IMAGE_DECLARATION(lhs), * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix NOT reshaped * - * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: F16/F32 - * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes) - * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes) - * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix - * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr - * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes) - * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes) - * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped. - * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes) - * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) + * @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: F16/F32 + * @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes) + * @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes) + * @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix + * @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr + * @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes) + * @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes) + * @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix + * @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr + * @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) + * @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) + * @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix + * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + * @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped. + * @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes) + * @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes) + * @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) */ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs), IMAGE_DECLARATION(rhs), +#if defined(BETA) + IMAGE_DECLARATION(bias), +#endif // defined(BETA) IMAGE_DECLARATION(dst), uint k, uint lhs_stride_z, uint rhs_stride_z, +#if defined(BETA) + uint bias_stride_z, +#endif //defined(BETA) uint dst_stride_z #if defined(REINTERPRET_OUTPUT_AS_3D) , @@ -1892,8 +1906,8 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs), // Initialize the accumulators REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0; - REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zlhs0=0,zlhs1=0,zlhs2=0,... zlhs7=0; - REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0); + REPEAT_VAR_INIT_TO_CONST(M0, uint, zlhs, 0); //uint zlhs0=0,zlhs1=0,zlhs2=0,... zlhs7=0; + REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0); for(int i = 0; i < k; i += K0) { @@ -1910,7 +1924,7 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs), LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_addr, 0, LHS_STEP_X * sizeof(DATA_TYPE), zlhs); // Load values from RHS matrix - LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_addr, 0, RHS_STEP_X * sizeof(DATA_TYPE), zrhs); + LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_addr, 0, RHS_STEP_X * sizeof(DATA_TYPE), zero); // Accumulate ARM_DOT_K0XN0(a0, b, c0); @@ -1942,7 +1956,7 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs), __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * dst_stride_y); - REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0; + REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0); #if defined(REINTERPRET_OUTPUT_AS_3D) @@ -1964,8 +1978,39 @@ __kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs), SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA); #endif // defined(ALPHA) + // Add beta*bias +#if defined(BETA) +#if defined(BROADCAST_BIAS) + __global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)); + + LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero); + +#ifndef UNIT_BETA + SCALE_BLOCK(1, DATA_TYPE, bias, BETA); +#endif // UNIT_BIAS + + // c = c + bias[broadcasted] + ADD_BLOCK_BROADCAST(M0, c, bias0); + +#else // defined(BROADCAST_BIAS) + __global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * bias_stride_y) + get_global_id( + 2) * bias_stride_z; + + LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero); + +#ifndef UNIT_BETA + SCALE_BLOCK(M0, DATA_TYPE, bias, BETA); +#endif // UNIT_BIAS + + // c = c + bias + ADD_BLOCK(M0, c, bias); + +#endif // defined(BROADCAST_BIAS) +#endif // defined(BETA) + // Store output block STORE_BLOCK(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout); + #undef LHS_BLOCK_SIZE #undef LHS_OFFSET_X #undef LHS_STEP_X diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp index 59afa47f6f..4436726852 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.cpp @@ -56,8 +56,9 @@ namespace { using ElementsProcessed = Steps; -Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, float alpha, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - const GEMMReshapeInfo &gemm_info) +Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info, + const GEMMRHSMatrixInfo &rhs_info, + const GEMMReshapeInfo &gemm_info) { ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output); @@ -86,6 +87,22 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, tensor_shape1.set(0, n); tensor_shape1.set(1, k); + if(input2 != nullptr && !(helpers::float_ops::is_zero(beta))) + { + const int input2_dim0 = static_cast(input2->dimension(0)); + const int input2_dim1 = static_cast(input2->dimension(1)); + + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input2, input1); + if(gemm_info.broadcast_bias()) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim1 != 1 || input2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted"); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG((input2_dim0 != n || input2_dim1 != m), "Incorrect dimension of bias matrix"); + } + } + const TensorInfo tensor_info0 = input0->clone()->set_tensor_shape(tensor_shape0); const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1); @@ -105,7 +122,8 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, return Status{}; } -std::pair validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, +std::pair validate_and_configure_window(ITensorInfo *input0, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const GEMMLHSMatrixInfo &lhs_info, + const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info, ElementsProcessed &num_elements_processed) { unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0]; @@ -152,8 +170,24 @@ std::pair validate_and_configure_window(ITensorInfo *input0, ITe ceil_to_multiple(output->dimension(0), num_elems_processed_per_iteration_x), output->dimension(1) + bottom_pad); - window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop - update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor + if(input2 != nullptr) + { + const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x; + + const int bias_processed_per_iteration_y = gemm_info.broadcast_bias() ? 1 : num_elems_processed_per_iteration_y; + + AccessWindowStatic input2_access(input2, 0, 0, + ceil_to_multiple(input2->dimension(0), bias_processed_per_iteration_x), + ceil_to_multiple(input2->dimension(1), bias_processed_per_iteration_y)); + + window_changed = update_window_and_padding(win, input0_access, input1_access, input2_access) || // window used by the execute_window_loop + update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor + } + else + { + window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop + update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor + } output_access.set_valid_region(win_out, ValidRegion(Coordinates(0, 0), output->tensor_shape())); @@ -169,23 +203,28 @@ std::pair validate_and_configure_window(ITensorInfo *input0, ITe } // namespace CLGEMMMatrixMultiplyReshapedKernel::CLGEMMMatrixMultiplyReshapedKernel() - : _input0(nullptr), _input1(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_output_as_3d(false), _k(1), _use_dummy_work_items(false) + : _input0(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr), _slide_matrix_b(true), _reinterpret_output_as_3d(false), _k(1), _use_dummy_work_items(false), _add_bias(false), + _broadcast_bias(false) { } -void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha, const GEMMLHSMatrixInfo &lhs_info, +void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, + const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input0, input1, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), output->info(), alpha, lhs_info, rhs_info, gemm_info)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input0->info(), input1->info(), (input2 != nullptr ? input2->info() : nullptr), output->info(), alpha, beta, lhs_info, rhs_info, gemm_info)); _input0 = input0; _input1 = input1; + _input2 = helpers::float_ops::is_zero(beta) ? nullptr : input2; _output = output; _reinterpret_output_as_3d = (gemm_info.depth_output_gemm3d() != 0); _k = gemm_info.k(); _use_dummy_work_items = preferred_dummy_work_items_support(CLKernelLibrary::get().get_device()); + _add_bias = _input2 != nullptr; + _broadcast_bias = gemm_info.broadcast_bias(); // Check if we need to slide the matrix B const unsigned int num_dimensions_input0 = _input0->info()->num_dimensions(); @@ -194,7 +233,7 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, cons ElementsProcessed num_elements_processed{}; // Configure kernel window - auto win_config = validate_and_configure_window(input0->info(), input1->info(), output->info(), lhs_info, rhs_info, gemm_info, num_elements_processed); + auto win_config = validate_and_configure_window(input0->info(), input1->info(), input2 != nullptr ? input2->info() : nullptr, output->info(), lhs_info, rhs_info, gemm_info, num_elements_processed); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure_internal(win_config.second); @@ -202,9 +241,12 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, cons CLBuildOptions build_opts; build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type())); build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha)); + build_opts.add_option_if(_input2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta)); + build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA"); build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D"); build_opts.add_option_if(_reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(1))); build_opts.add_option_if(_reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(output->info()->dimension(2))); + build_opts.add_option_if(gemm_info.broadcast_bias(), "-DBROADCAST_BIAS"); build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(input1->info()->dimension(2))); build_opts.add_option_if(lhs_info.interleave, "-DLHS_INTERLEAVE"); build_opts.add_option_if(rhs_info.interleave, "-DRHS_INTERLEAVE"); @@ -227,6 +269,8 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, cons // Set config_id for enabling LWS tuning _config_id = kernel_name; _config_id += "_"; + _config_id += (_add_bias ? "add_bias_" : ""); + _config_id += (_broadcast_bias ? "broadcast_bias_" : ""); _config_id += (_reinterpret_output_as_3d ? "3do_" : ""); _config_id += lower_string(string_from_data_type(input0->info()->data_type())); _config_id += "_"; @@ -253,13 +297,15 @@ void CLGEMMMatrixMultiplyReshapedKernel::configure(const ICLTensor *input0, cons _config_id += support::cpp11::to_string(rhs_info.interleave); } -Status CLGEMMMatrixMultiplyReshapedKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output, float alpha, const GEMMLHSMatrixInfo &lhs_info, +Status CLGEMMMatrixMultiplyReshapedKernel::validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, + const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, const GEMMReshapeInfo &gemm_info) { ElementsProcessed num_elements_processed{}; - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, output, alpha, lhs_info, rhs_info, gemm_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input0, input1, input2, output, alpha, beta, lhs_info, rhs_info, gemm_info)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input0->clone().get(), input1->clone().get(), + input2 != nullptr ? input2->clone().get() : nullptr, output->clone().get(), lhs_info, rhs_info, @@ -290,7 +336,15 @@ void CLGEMMMatrixMultiplyReshapedKernel::run(const Window &window, cl::CommandQu if(_reinterpret_output_as_3d) { // Pass bottom paddings to the kernel if the output has to be reinterpreted as 3D tensor - const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 4; + unsigned int idx0; + if(_add_bias) + { + idx0 = 4 * num_arguments_per_2D_tensor() + 5; + } + else + { + idx0 = 3 * num_arguments_per_2D_tensor() + 4; + } const unsigned int total_cross_plane_pad = _output->info()->padding().top + _output->info()->padding().bottom; _kernel.setArg(idx0, static_cast(total_cross_plane_pad)); } @@ -308,10 +362,18 @@ void CLGEMMMatrixMultiplyReshapedKernel::run(const Window &window, cl::CommandQu unsigned int idx = 0; add_2D_tensor_argument(idx, _input0, slice); add_2D_tensor_argument(idx, _input1, slice_b); + if(_add_bias) + { + add_2D_tensor_argument(idx, _input2, slice); + } add_2D_tensor_argument(idx, _output, slice); _kernel.setArg(idx++, static_cast(_k)); _kernel.setArg(idx++, static_cast(_input0->info()->strides_in_bytes()[2])); _kernel.setArg(idx++, static_cast(_input1->info()->strides_in_bytes()[2])); + if(_add_bias) + { + _kernel.setArg(idx++, static_cast(_input2->info()->strides_in_bytes()[2])); + } _kernel.setArg(idx++, static_cast(_output->info()->strides_in_bytes()[2])); enqueue(queue, *this, slice, lws_hint(), _use_dummy_work_items); } diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp index 99b3d20953..d952de8232 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.cpp @@ -250,7 +250,7 @@ void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *input CLBuildOptions build_opts; build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input0->info()->data_type())); build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha)); - build_opts.add_option_if(!(helpers::float_ops::is_zero(beta)) && _input2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta)); + build_opts.add_option_if(_input2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta)); build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA"); build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D"); build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D"); @@ -277,6 +277,8 @@ void CLGEMMMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *input // Set config_id for enabling LWS tuning _config_id = kernel_name; _config_id += "_"; + _config_id += (_add_bias ? "add_bias_" : ""); + _config_id += (_broadcast_bias ? "broadcast_bias_" : ""); _config_id += (_reinterpret_input_as_3d ? "3di_" : ""); _config_id += (_reinterpret_output_as_3d ? "3do_" : ""); _config_id += lower_string(string_from_data_type(input0->info()->data_type())); diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index 21a9fce233..94b318c93e 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -34,6 +34,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" +#include "arm_compute/core/utils/helpers/float_ops.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/runtime/CL/CLScheduler.h" #include "arm_compute/runtime/ITensorAllocator.h" @@ -189,10 +190,6 @@ void CLGEMM::configure_reshaped_v1(const ICLTensor *a, const ICLTensor *b, const void CLGEMM::configure_reshaped_v2(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta, const GEMMInfo &gemm_info) { - ARM_COMPUTE_ERROR_ON(c != nullptr); - ARM_COMPUTE_UNUSED(beta); - ARM_COMPUTE_UNUSED(c); - DataType data_type = a->info()->data_type(); bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); const unsigned int m = reinterpret_input_as_3d ? (a->info()->dimension(1) * a->info()->dimension(2)) : a->info()->dimension(1); @@ -201,12 +198,13 @@ void CLGEMM::configure_reshaped_v2(const ICLTensor *a, const ICLTensor *b, const const unsigned int batch_size = reinterpret_input_as_3d ? a->info()->dimension(3) : a->info()->dimension(2); const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); const GPUTarget gpu_target = CLScheduler::get().target(); + bool broadcast_bias = gemm_info.broadcast_bias(); // Set the target for the kernels _reshape_lhs_kernel.set_target(gpu_target); _mm_kernel.set_target(gpu_target); - GEMMReshapeInfo reshape_info(m, n, k, 1, 1, depth_output_gemm3d, false); + GEMMReshapeInfo reshape_info(m, n, k, 1, 1, depth_output_gemm3d, false, broadcast_bias); // Manage intermediate buffers _memory_group.manage(&_tmp_a); @@ -230,7 +228,7 @@ void CLGEMM::configure_reshaped_v2(const ICLTensor *a, const ICLTensor *b, const _reshape_rhs_kernel.configure(b, &_tmp_b, rhs_info); // Configure and tune matrix multiply kernel - _mm_reshaped_kernel.configure(&_tmp_a, &_tmp_b, output, alpha, lhs_info, rhs_info, reshape_info); + _mm_reshaped_kernel.configure(&_tmp_a, &_tmp_b, c, output, alpha, beta, lhs_info, rhs_info, reshape_info); // Allocate intermediate tensors _tmp_a.allocator()->allocate(); @@ -395,9 +393,9 @@ Status CLGEMM::validate_reshaped_v2(const ITensorInfo *a, const ITensorInfo *b, const unsigned int k = a->dimension(0); const unsigned int batch_size = reinterpret_input_as_3d ? a->dimension(3) : a->dimension(2); const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); - const bool add_c = (beta != 0.f && c != nullptr); + const bool broadcast_bias = gemm_info.broadcast_bias(); - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, false); + const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, false, broadcast_bias); GEMMLHSMatrixInfo lhs_info; GEMMRHSMatrixInfo rhs_info; @@ -416,13 +414,8 @@ Status CLGEMM::validate_reshaped_v2(const ITensorInfo *a, const ITensorInfo *b, ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info)); // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixMultiplyReshapedKernel::validate(&tmp_a_info, &tmp_b_info, output, alpha, lhs_info, rhs_info, reshape_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixMultiplyReshapedKernel::validate(&tmp_a_info, &tmp_b_info, c, output, alpha, beta, lhs_info, rhs_info, reshape_info)); - if(add_c) - { - // Validate matrix addition kernel - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAdditionKernel::validate(c, output, beta)); - } return Status{}; } @@ -486,31 +479,32 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * // Select GEMMType _gemm_type = select_gemm_type(m, n, k, a->info()->data_type(), _reshape_b_only_on_first_run, gpu_target); - const bool is_gemm_reshaped_only_rhs = _gemm_type == GEMMType::RESHAPED_ONLY_RHS; - const bool add_c = (beta != 0.f && c != nullptr); - const bool is_beta_one = std::abs(1.0f - beta) < 0.00001f; - const bool fuse_add = (is_beta_one && (c != nullptr && c->info()->num_dimensions() == 1)) || is_gemm_reshaped_only_rhs; + const bool is_fuse_add_c_supported = (_gemm_type == GEMMType::RESHAPED_V2) || (_gemm_type == GEMMType::RESHAPED_ONLY_RHS); + const bool add_c = (!(helpers::float_ops::is_zero(beta)) && c != nullptr); + const bool fuse_add_c = add_c && is_fuse_add_c_supported; + + const ICLTensor *c_to_use = fuse_add_c ? c : nullptr; switch(_gemm_type) { case GEMMType::NATIVE: { - configure_native(a, b, (add_c && fuse_add) ? c : nullptr, output, alpha, beta, gemm_info); + configure_native(a, b, c_to_use, output, alpha, beta, gemm_info); break; } case GEMMType::RESHAPED_V1: { - configure_reshaped_v1(a, b, (add_c && fuse_add) ? c : nullptr, output, alpha, beta, gemm_info); + configure_reshaped_v1(a, b, c_to_use, output, alpha, beta, gemm_info); break; } case GEMMType::RESHAPED_V2: { - configure_reshaped_v2(a, b, (add_c && fuse_add) ? c : nullptr, output, alpha, beta, gemm_info); + configure_reshaped_v2(a, b, c_to_use, output, alpha, beta, gemm_info); break; } case GEMMType::RESHAPED_ONLY_RHS: { - configure_reshaped_only_rhs(a, b, (add_c && fuse_add) ? c : nullptr, output, alpha, beta, gemm_info); + configure_reshaped_only_rhs(a, b, c_to_use, output, alpha, beta, gemm_info); break; } default: @@ -520,7 +514,7 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * } // Configure matrix addition kernel - if(add_c && !fuse_add) + if(add_c && !fuse_add_c) { _ma_kernel.configure(c, output, beta); _run_addition = true; @@ -539,26 +533,32 @@ Status CLGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso // Select GEMMType GEMMType gemm_type = select_gemm_type(m, n, k, a->data_type(), gemm_info.reshape_b_only_on_first_run(), gpu_target); + const bool is_fuse_add_c_supported = (gemm_type == GEMMType::RESHAPED_V2) || (gemm_type == GEMMType::RESHAPED_ONLY_RHS); + const bool add_c = (!(helpers::float_ops::is_zero(beta)) && c != nullptr); + const bool fuse_add_c = add_c && is_fuse_add_c_supported; + + const ITensorInfo *c_to_use = fuse_add_c ? c : nullptr; + switch(gemm_type) { case GEMMType::NATIVE: { - ARM_COMPUTE_RETURN_ON_ERROR(validate_native(a, b, c, output, alpha, beta, gemm_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_native(a, b, c_to_use, output, alpha, beta, gemm_info)); break; } case GEMMType::RESHAPED_V1: { - ARM_COMPUTE_RETURN_ON_ERROR(validate_reshaped_v1(a, b, c, output, alpha, beta, gemm_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_reshaped_v1(a, b, c_to_use, output, alpha, beta, gemm_info)); break; } case GEMMType::RESHAPED_V2: { - ARM_COMPUTE_RETURN_ON_ERROR(validate_reshaped_v2(a, b, c, output, alpha, beta, gemm_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_reshaped_v2(a, b, c_to_use, output, alpha, beta, gemm_info)); break; } case GEMMType::RESHAPED_ONLY_RHS: { - ARM_COMPUTE_RETURN_ON_ERROR(validate_reshaped_only_rhs(a, b, c, output, alpha, beta, gemm_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_reshaped_only_rhs(a, b, c_to_use, output, alpha, beta, gemm_info)); break; } default: @@ -567,6 +567,12 @@ Status CLGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso } } + // Validate matrix addition kernel + if(add_c && !fuse_add_c) + { + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixAdditionKernel::validate(c, output, beta)); + } + return Status{}; } diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp index 4e518fcfd5..99f045a0bf 100644 --- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp @@ -202,8 +202,7 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * _skip_col2im = data_layout == DataLayout::NHWC; _append_bias = (biases != nullptr) && (!_is_quantized); _is_activationlayer_enabled = act_info.enabled(); - // In case of F16, fused bias will be used in GEMM - _run_addition = (_skip_im2col) && (_append_bias) && (data_type != DataType::F16); + _run_addition = (_skip_im2col) && (_append_bias); // Set the GPU target for im2col and col2im _im2col_kernel.set_target(CLScheduler::get().target()); @@ -388,8 +387,7 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1); const bool skip_col2im = data_layout == DataLayout::NHWC; bool is_activationlayer_enabled = act_info.enabled(); - // In case of F16, fused bias will be used in GEMM - const bool run_addition = (skip_im2col) && (append_bias) && (data_type != DataType::F16); + const bool run_addition = (skip_im2col) && (append_bias); const UniformQuantizationInfo iq_info = input->quantization_info().uniform(); const UniformQuantizationInfo wq_info = weights->quantization_info().uniform(); diff --git a/tests/framework/Macros.h b/tests/framework/Macros.h index 591b80e9d8..134f75e287 100644 --- a/tests/framework/Macros.h +++ b/tests/framework/Macros.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -49,8 +49,8 @@ #define CONCAT(ARG0, ARG1) ARG0##ARG1 -#define VARIADIC_SIZE_IMPL(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, size, ...) size -#define VARIADIC_SIZE(...) VARIADIC_SIZE_IMPL(__VA_ARGS__, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define VARIADIC_SIZE_IMPL(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, size, ...) size +#define VARIADIC_SIZE(...) VARIADIC_SIZE_IMPL(__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) #define JOIN_PARAM1(OP, param) OP(0, param) #define JOIN_PARAM2(OP, param, ...) \ @@ -83,6 +83,9 @@ #define JOIN_PARAM11(OP, param, ...) \ OP(10, param) \ , JOIN_PARAM10(OP, __VA_ARGS__) +#define JOIN_PARAM12(OP, param, ...) \ + OP(11, param) \ + , JOIN_PARAM11(OP, __VA_ARGS__) #define JOIN_PARAM(OP, NUM, ...) \ CONCAT(JOIN_PARAM, NUM) \ (OP, __VA_ARGS__) diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp index 564d3f4c2f..69e58303f3 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp @@ -76,6 +76,9 @@ constexpr float tolerance_num_f16 = 0.02f; /** Alpha values to test - Precommit */ const auto a_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); +/** Beta values to test - Precommit */ +const auto beta_values = framework::dataset::make("beta", {-0.75f, 0.0f} ); + /** M values to test */ const auto m_values = framework::dataset::make("M", 37); @@ -130,8 +133,11 @@ const auto i_values_lhs = framework::dataset::make("interleave_lhs", { true, fal /** Interleave values to test with RHS matrix */ const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, false }); +/** Broadcast bias from vector to matrix */ +const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); + /** Configuration test */ -void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int v0_value, unsigned int h0_value, bool i_value_lhs, bool i_value_rhs, DataType data_type) +void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned int k_value, unsigned int b_value, unsigned int m0_value, unsigned int n0_value, unsigned int k0_value, unsigned int v0_value, unsigned int h0_value, bool i_value_lhs, bool i_value_rhs, bool broadcast_bias, DataType data_type) { const unsigned int M = m_value; const unsigned int N = n_value; @@ -151,7 +157,7 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned rhs_info.interleave = i_value_rhs; rhs_info.transpose = true; - GEMMReshapeInfo gemm_info(M, N, K); + GEMMReshapeInfo gemm_info(M, N, K, false, false, 0, false, broadcast_bias); const TensorShape lhs_shape(K, M, b_value); const TensorShape lhs_shape_reshaped = compute_lhs_reshaped_shape(TensorInfo(lhs_shape, 1, data_type), @@ -166,18 +172,24 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned TensorInfo(rhs_shape_reshaped, 1, data_type), gemm_info); + const TensorShape bias_shape(N, + broadcast_bias? 1 : M, + broadcast_bias? 1 : b_value); + // Create tensors CLTensor lhs_reshaped = create_tensor(lhs_shape_reshaped, data_type); CLTensor rhs_reshaped = create_tensor(rhs_shape_reshaped, data_type); + CLTensor bias = create_tensor(bias_shape, data_type); CLTensor dst = create_tensor(dst_shape, data_type); ARM_COMPUTE_EXPECT(lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Create and configure function CLGEMMMatrixMultiplyReshaped gemm; - gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, 1.0f, lhs_info, rhs_info, gemm_info); + gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, gemm_info); } } // namespace @@ -185,7 +197,7 @@ TEST_SUITE(CL) TEST_SUITE(GEMMMatrixMultiplyReshaped) TEST_SUITE(Float) TEST_SUITE(FP32) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -197,13 +209,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi h0_values_precommit), i_values_lhs), i_values_rhs), -m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, v0_value, h0_value, i_value_lhs, i_value_rhs) + broadcast_bias_values), +m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, v0_value, h0_value, i_value_lhs, i_value_rhs, broadcast_bias) { - validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, v0_value, h0_value, i_value_lhs, i_value_rhs, DataType::F32); + validate_configuration(m_value, n_value, k_value, b_value, m0_value, n0_value, k0_value, v0_value, h0_value, i_value_lhs, i_value_rhs, broadcast_bias, DataType::F32); } FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -216,14 +229,16 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, fra i_values_lhs), i_values_rhs), framework::dataset::make("DataType", DataType::F32)), - a_values)) + a_values), + beta_values), + broadcast_bias_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -236,14 +251,16 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, fra i_values_lhs), i_values_rhs), framework::dataset::make("DataType", DataType::F32)), - a_values)) + a_values), + beta_values), + broadcast_bias_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -257,14 +274,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, i_values_lhs), i_values_rhs), framework::dataset::make("DataType", DataType::F32)), - a_values)) + a_values), + beta_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -278,7 +296,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, i_values_lhs), i_values_rhs), framework::dataset::make("DataType", DataType::F32)), - a_values)) + a_values), + beta_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); @@ -287,7 +306,7 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -300,14 +319,16 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, fram i_values_lhs), i_values_rhs), framework::dataset::make("DataType", DataType::F16)), - a_values)) + a_values), + beta_values), + broadcast_bias_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_values, n_values), k_values), @@ -320,14 +341,16 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMMatrixMultiplyReshapedFixture, fram i_values_lhs), i_values_rhs), framework::dataset::make("DataType", DataType::F16)), - a_values)) + a_values), + beta_values), + broadcast_bias_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -341,14 +364,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, i_values_lhs), i_values_rhs), framework::dataset::make("DataType", DataType::F16)), - a_values)) + a_values), + beta_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); } FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::NIGHTLY, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( m_w_values, m_h_values), n_values), @@ -362,7 +386,8 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshaped3DFixture, i_values_lhs), i_values_rhs), framework::dataset::make("DataType", DataType::F16)), - a_values)) + a_values), + beta_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp index 23ae004912..133170e2d3 100644 --- a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp +++ b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp @@ -123,7 +123,7 @@ const auto i_values_rhs = framework::dataset::make("interleave_rhs", { true, fal /** Transpose values to test with RHS matrix */ const auto t_values_rhs = framework::dataset::make("transpose_rhs", { true, false }); -/**Broadcast bias from vector to matrix */ +/** Broadcast bias from vector to matrix */ const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); /** Configuration test */ @@ -155,18 +155,15 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned TensorInfo(rhs_shape_reshaped, 1, data_type), gemm_info); + const TensorShape bias_shape(N, + broadcast_bias? 1 : M, + broadcast_bias? 1 : b_value); + // Create tensors CLTensor lhs = create_tensor(lhs_shape, data_type); CLTensor rhs_reshaped = create_tensor(rhs_shape_reshaped, data_type); - CLTensor dst = create_tensor(dst_shape, data_type); - - TensorShape bias_shape = dst_shape; - if (broadcast_bias) - { - bias_shape[1] = 1; - bias_shape[2] = 1; - } CLTensor bias = create_tensor(bias_shape, data_type); + CLTensor dst = create_tensor(dst_shape, data_type); ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -257,7 +254,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture< t_values_rhs), framework::dataset::make("DataType", DataType::F32)), a_values), - b_values)) + beta_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); @@ -278,7 +275,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge3D, CLGEMMMatrixMultiplyReshapedOnlyRHS3DFixture< t_values_rhs), framework::dataset::make("DataType", DataType::F32)), a_values), - b_values)) + beta_values)) { // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); @@ -300,7 +297,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedOnlyRHSFixture void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs, - bool interleave_rhs, DataType data_type, float alpha) + bool interleave_rhs, DataType data_type, float alpha, float beta, bool broadcast_bias) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; @@ -176,9 +176,12 @@ public: // Set the tensor shapes for LHS and RHS matrices const TensorShape lhs_shape(k, m, batch_size); const TensorShape rhs_shape(n, k, batch_size); + const TensorShape bias_shape(n, + broadcast_bias ? 1 : m, + broadcast_bias ? 1 : batch_size); - _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type, alpha); - _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha); + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias); + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias); } protected: @@ -193,11 +196,13 @@ protected: library->fill_borders_with_garbage(tensor, distribution_inf, i); } - TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type, float alpha) + TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, + DataType data_type, float alpha, float beta, bool broadcast_bias) { // Create tensors - TensorType lhs = create_tensor(lhs_shape, data_type, 1); - TensorType rhs = create_tensor(rhs_shape, data_type, 1); + TensorType lhs = create_tensor(lhs_shape, data_type, 1); + TensorType rhs = create_tensor(rhs_shape, data_type, 1); + TensorType bias = create_tensor(bias_shape, data_type, 1); TensorType lhs_reshaped; TensorType rhs_reshaped; TensorType dst; @@ -214,20 +219,23 @@ protected: GEMMFunctionType gemm; reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info); reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, alpha, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K)); + gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, 0, false, broadcast_bias)); ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); lhs_reshaped.allocator()->allocate(); rhs_reshaped.allocator()->allocate(); + bias.allocator()->allocate(); dst.allocator()->allocate(); ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -235,6 +243,7 @@ protected: // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); + fill(AccessorType(bias), 2); // Compute GEMM reshape_lhs.run(); @@ -244,7 +253,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, bool broadcast_bias) { TensorShape dst_shape = lhs_shape; dst_shape[0] = rhs_shape[0]; @@ -253,13 +262,27 @@ protected: // Create reference SimpleTensor lhs{ lhs_shape, data_type, 1 }; SimpleTensor rhs{ rhs_shape, data_type, 1 }; - SimpleTensor c{ dst_shape, data_type, 1 }; + SimpleTensor bias{ dst_shape, data_type, 1 }; + + const int n = rhs_shape[0]; + const int m = lhs_shape[1]; + const int batch_size = lhs_shape[2]; // Fill reference fill(lhs, 0); fill(rhs, 1); + fill(bias, 2); - return reference::gemm(lhs, rhs, c, alpha, 0.0f); + if(broadcast_bias) + { + // In case of broadcast, we need simply copy the first into the following "M" ones + for(int i = 1; i < m * batch_size; i++) + { + memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); + } + } + + return reference::gemm(lhs, rhs, bias, alpha, beta); } TensorType _target{}; @@ -273,7 +296,7 @@ public: template void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool interleave_lhs, - bool interleave_rhs, DataType data_type, float alpha) + bool interleave_rhs, DataType data_type, float alpha, float beta) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; @@ -295,9 +318,10 @@ public: // Set the tensor shapes for LHS and RHS matrices const TensorShape lhs_shape(k, m, batch_size); const TensorShape rhs_shape(n, k, batch_size); + const TensorShape bias_shape(n, 1, 1); - _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type, alpha, m_h); - _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha, m_h); + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h); + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h); } protected: @@ -308,12 +332,13 @@ protected: library->fill(tensor, distribution, i); } - TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type, float alpha, - unsigned int m_h) + TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, + DataType data_type, float alpha, float beta, unsigned int m_h) { // Create tensors - TensorType lhs = create_tensor(lhs_shape, data_type, 1); - TensorType rhs = create_tensor(rhs_shape, data_type, 1); + TensorType lhs = create_tensor(lhs_shape, data_type, 1); + TensorType rhs = create_tensor(rhs_shape, data_type, 1); + TensorType bias = create_tensor(bias_shape, data_type, 1); TensorType lhs_reshaped; TensorType rhs_reshaped; TensorType dst; @@ -330,27 +355,31 @@ protected: GEMMFunctionType gemm; reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info); reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, alpha, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h)); + gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h, false, true)); ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); lhs_reshaped.allocator()->allocate(); rhs_reshaped.allocator()->allocate(); + bias.allocator()->allocate(); dst.allocator()->allocate(); ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!lhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); + fill(AccessorType(bias), 2); // Compute GEMM reshape_lhs.run(); @@ -360,7 +389,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha, unsigned int m_h) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h) { TensorShape dst_shape = lhs_shape; dst_shape.set(0, rhs_shape[0]); @@ -371,13 +400,24 @@ protected: // Create reference SimpleTensor lhs{ lhs_shape, data_type, 1 }; SimpleTensor rhs{ rhs_shape, data_type, 1 }; - SimpleTensor c{ dst_shape, data_type, 1 }; + SimpleTensor bias{ dst_shape, data_type, 1 }; + + const int n = rhs_shape[0]; + const int m = lhs_shape[1]; + const int batch_size = lhs_shape[2]; // Fill reference fill(lhs, 0); fill(rhs, 1); + fill(bias, 2); - return reference::gemm(lhs, rhs, c, alpha, 0.0f); + // In case of broadcast, we need simply copy the first into the following "M" ones + for(int i = 1; i < m * batch_size; i++) + { + memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); + } + + return reference::gemm(lhs, rhs, bias, alpha, beta); } TensorType _target{}; @@ -406,16 +446,9 @@ public: // Set the tensor shapes for LHS and RHS matrices const TensorShape lhs_shape(k, m, batch_size); const TensorShape rhs_shape(n, k, batch_size); - - TensorShape bias_shape; - if(broadcast_bias) - { - bias_shape = TensorShape(n, 1, 1); - } - else - { - bias_shape = TensorShape(n, m, batch_size); - } + const TensorShape bias_shape(n, + broadcast_bias ? 1 : m, + broadcast_bias ? 1 : batch_size); _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, broadcast_bias); _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, broadcast_bias); @@ -457,6 +490,7 @@ protected: ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); // Allocate tensors lhs.allocator()->allocate(); @@ -468,6 +502,7 @@ protected: ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Fill tensors @@ -500,20 +535,16 @@ protected: // Fill reference fill(lhs, 0); fill(rhs, 1); + fill(bias, 2); if(broadcast_bias) { - SimpleTensor tmp{ bias_shape, data_type, 1 }; - fill(tmp, 2); - for(int i = 0; i < m * batch_size; i++) + // In case of broadcast, we need simply copy the first into the following "M" ones + for(int i = 1; i < m * batch_size; i++) { - memcpy(bias.data() + i * n, tmp.data(), n * sizeof(T)); + memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); } } - else - { - fill(bias, 2); - } return (reference::gemm(lhs, rhs, bias, alpha, beta)); } @@ -522,27 +553,35 @@ protected: SimpleTensor _reference{}; }; -template -class GEMMMatrixMultiplyNativeValidationFixture : public framework::Fixture +template +class GEMMMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture { public: template - void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha) + void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, + bool interleave_rhs, bool transpose_rhs, DataType data_type, float alpha, float beta) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; lhs_info.k0 = k0; GEMMRHSMatrixInfo rhs_info; - rhs_info.n0 = n0; - rhs_info.k0 = k0; + rhs_info.n0 = n0; + rhs_info.k0 = k0; + rhs_info.h0 = h0; + rhs_info.interleave = interleave_rhs; + rhs_info.transpose = transpose_rhs; + + // In case of GEMM3D, m is the product between m_w and m_h + const unsigned int m = m_w * m_h; // Set the tensor shapes for LHS and RHS matrices const TensorShape lhs_shape(k, m, batch_size); const TensorShape rhs_shape(n, k, batch_size); + const TensorShape bias_shape(n, 1, 1); - _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type, alpha); - _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha); + _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h); + _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h); } protected: @@ -551,100 +590,116 @@ protected: { std::uniform_real_distribution<> distribution(-1.0f, 1.0f); library->fill(tensor, distribution, i); - - // Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0) - std::uniform_real_distribution<> distribution_inf(std::numeric_limits::infinity(), std::numeric_limits::infinity()); - library->fill_borders_with_garbage(tensor, distribution_inf, i); } - TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type, float alpha) + TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, + DataType data_type, float alpha, float beta, + unsigned int m_h) { // Create tensors - TensorType lhs = create_tensor(lhs_shape, data_type, 1); - TensorType rhs = create_tensor(rhs_shape, data_type, 1); + TensorType lhs = create_tensor(lhs_shape, data_type, 1); + TensorType rhs = create_tensor(rhs_shape, data_type, 1); + TensorType bias = create_tensor(bias_shape, data_type, 1); + TensorType rhs_reshaped; TensorType dst; const unsigned int M = lhs_shape[1]; const unsigned int N = rhs_shape[0]; const unsigned int K = lhs_shape[0]; + // The output tensor will be auto-initialized within the function + // Create and configure function - GEMMFunctionType gemm; - gemm.configure(&lhs, &rhs, &dst, alpha, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K)); + ReshapeRHSFunctionType reshape_rhs; + GEMMFunctionType gemm; + reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); + gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h, false, true)); ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); + rhs_reshaped.allocator()->allocate(); + bias.allocator()->allocate(); dst.allocator()->allocate(); ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); + fill(AccessorType(bias), 2); // Compute GEMM + reshape_rhs.run(); gemm.run(); return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h) { TensorShape dst_shape = lhs_shape; - dst_shape[0] = rhs_shape[0]; - dst_shape[1] = lhs_shape[1]; + dst_shape.set(0, rhs_shape[0]); + dst_shape.set(1, lhs_shape[1] / m_h); + dst_shape.set(2, m_h); + dst_shape.set(3, lhs_shape[2]); // Create reference SimpleTensor lhs{ lhs_shape, data_type, 1 }; SimpleTensor rhs{ rhs_shape, data_type, 1 }; - SimpleTensor c{ dst_shape, data_type, 1 }; + SimpleTensor bias{ dst_shape, data_type, 1 }; + + const int n = rhs_shape[0]; + const int m = lhs_shape[1]; + const int batch_size = lhs_shape[2]; // Fill reference fill(lhs, 0); fill(rhs, 1); + fill(bias, 2); - return reference::gemm(lhs, rhs, c, alpha, 0.0f); + // In case of broadcast, we need simply copy the first into the following "M" ones + for(int i = 1; i < m * batch_size; i++) + { + memcpy(bias.data() + i * n, bias.data(), n * sizeof(T)); + } + + return reference::gemm(lhs, rhs, bias, alpha, beta); } TensorType _target{}; SimpleTensor _reference{}; }; -template -class GEMMMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture +template +class GEMMMatrixMultiplyNativeValidationFixture : public framework::Fixture { public: template - void setup(unsigned int m_w, unsigned int m_h, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int h0, - bool interleave_rhs, bool transpose_rhs, DataType data_type, float alpha, float beta) + void setup(unsigned int m, unsigned int n, unsigned int k, unsigned int batch_size, unsigned int m0, unsigned int n0, unsigned int k0, DataType data_type, float alpha) { GEMMLHSMatrixInfo lhs_info; lhs_info.m0 = m0; lhs_info.k0 = k0; GEMMRHSMatrixInfo rhs_info; - rhs_info.n0 = n0; - rhs_info.k0 = k0; - rhs_info.h0 = h0; - rhs_info.interleave = interleave_rhs; - rhs_info.transpose = transpose_rhs; - - // In case of GEMM3D, m is the product between m_w and m_h - const unsigned int m = m_w * m_h; + rhs_info.n0 = n0; + rhs_info.k0 = k0; // Set the tensor shapes for LHS and RHS matrices const TensorShape lhs_shape(k, m, batch_size); const TensorShape rhs_shape(n, k, batch_size); - const TensorShape bias_shape(n, 1, 1); - _target = compute_target(lhs_shape, rhs_shape, bias_shape, lhs_info, rhs_info, data_type, alpha, beta, m_h); - _reference = compute_reference(lhs_shape, rhs_shape, bias_shape, data_type, alpha, beta, m_h); + _target = compute_target(lhs_shape, rhs_shape, lhs_info, rhs_info, data_type, alpha); + _reference = compute_reference(lhs_shape, rhs_shape, data_type, alpha); } protected: @@ -653,30 +708,26 @@ protected: { std::uniform_real_distribution<> distribution(-1.0f, 1.0f); library->fill(tensor, distribution, i); + + // Fill border with infinity in order to check the presence of NaN values (i.e. inf * 0) + std::uniform_real_distribution<> distribution_inf(std::numeric_limits::infinity(), std::numeric_limits::infinity()); + library->fill_borders_with_garbage(tensor, distribution_inf, i); } - TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, - DataType data_type, float alpha, float beta, - unsigned int m_h) + TensorType compute_target(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const GEMMLHSMatrixInfo &lhs_info, const GEMMRHSMatrixInfo &rhs_info, DataType data_type, float alpha) { // Create tensors - TensorType lhs = create_tensor(lhs_shape, data_type, 1); - TensorType rhs = create_tensor(rhs_shape, data_type, 1); - TensorType bias = create_tensor(bias_shape, data_type, 1); - TensorType rhs_reshaped; + TensorType lhs = create_tensor(lhs_shape, data_type, 1); + TensorType rhs = create_tensor(rhs_shape, data_type, 1); TensorType dst; const unsigned int M = lhs_shape[1]; const unsigned int N = rhs_shape[0]; const unsigned int K = lhs_shape[0]; - // The output tensor will be auto-initialized within the function - // Create and configure function - ReshapeRHSFunctionType reshape_rhs; - GEMMFunctionType gemm; - reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info); - gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h, false, true)); + GEMMFunctionType gemm; + gemm.configure(&lhs, &rhs, &dst, alpha, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K)); ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -684,56 +735,38 @@ protected: // Allocate tensors lhs.allocator()->allocate(); rhs.allocator()->allocate(); - rhs_reshaped.allocator()->allocate(); - bias.allocator()->allocate(); dst.allocator()->allocate(); ARM_COMPUTE_EXPECT(!lhs.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!rhs.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!rhs_reshaped.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Fill tensors fill(AccessorType(lhs), 0); fill(AccessorType(rhs), 1); - fill(AccessorType(bias), 2); // Compute GEMM - reshape_rhs.run(); gemm.run(); return dst; } - SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, const TensorShape &bias_shape, DataType data_type, float alpha, float beta, unsigned int m_h) + SimpleTensor compute_reference(const TensorShape &lhs_shape, const TensorShape &rhs_shape, DataType data_type, float alpha) { TensorShape dst_shape = lhs_shape; - dst_shape.set(0, rhs_shape[0]); - dst_shape.set(1, lhs_shape[1] / m_h); - dst_shape.set(2, m_h); - dst_shape.set(3, lhs_shape[2]); + dst_shape[0] = rhs_shape[0]; + dst_shape[1] = lhs_shape[1]; // Create reference SimpleTensor lhs{ lhs_shape, data_type, 1 }; SimpleTensor rhs{ rhs_shape, data_type, 1 }; - SimpleTensor bias{ dst_shape, data_type, 1 }; - - const int n = rhs_shape[0]; - const int m = lhs_shape[1]; - const int batch_size = lhs_shape[2]; + SimpleTensor c{ dst_shape, data_type, 1 }; // Fill reference fill(lhs, 0); fill(rhs, 1); - SimpleTensor tmp{ bias_shape, data_type, 1 }; - fill(tmp, 2); - for(int i = 0; i < m * batch_size; i++) - { - memcpy(bias.data() + i * n, tmp.data(), n * sizeof(T)); - } - - return reference::gemm(lhs, rhs, bias, alpha, beta); + return reference::gemm(lhs, rhs, c, alpha, 0.0f); } TensorType _target{}; -- cgit v1.2.1