From 2ec6c1eb6ee77b79e8ab6b97b8cd70bcc4c5589d Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Tue, 9 Apr 2019 12:03:05 +0100 Subject: COMPMID-2110: Enable CLGEMMLowpMatrixMultiplyReshapeOnlyRHSKernel in CLGEMMLowp Change-Id: Ic32c803c3e2a067de10a7e46c85c962a970957b6 Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/969 Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- .../CL/functions/CLGEMMLowpMatrixMultiplyCore.h | 12 ++--- src/core/CL/cl_kernels/gemmlowp.cl | 2 +- ...MMReshapedOnlyRHSKernelConfigurationBifrost.cpp | 14 ++--- ...GEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp | 2 +- .../CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp | 61 ++++++---------------- 5 files changed, 26 insertions(+), 65 deletions(-) diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h index 67b22821da..a07101c020 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h +++ b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h @@ -25,11 +25,10 @@ #define __ARM_COMPUTE_CLGEMMLOWPMATRIXMULTIPLYCORE_H__ #include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h" +#include "arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h" #include "arm_compute/runtime/CL/CLMemoryGroup.h" #include "arm_compute/runtime/CL/CLTensor.h" @@ -42,10 +41,9 @@ class ICLTensor; /** Basic function to execute GEMMLowpMatrixMultiplyCore on OpenCL. This function calls the following OpenCL kernels: * - * -# @ref CLGEMMReshapeLHSMatrixKernel (if the output tensor is a matrix) * -# @ref CLGEMMReshapeRHSMatrixKernel (if the output tensor is a matrix) - * -# @ref CLGEMMLowpMatrixMultiplyKernel (if the input matrix is a vector or for Midgard architectures) - * -# @ref CLGEMMLowpMatrixMultiplyReshapedKernel (if the input matrix is not a vector and if the GPU architecture is not Midgard) + * -# @ref CLGEMMLowpMatrixMultiplyKernel (if the parameter "reshape_b_only_on_first_run" of GEMMInfo is FALSE) + * -# @ref CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel (if the parameter "reshape_b_only_on_first_run" of GEMMInfo is TRUE) * -# @ref CLGEMMLowpMatrixAReductionKernel (if the offset of matrix B is not 0) * -# @ref CLGEMMLowpMatrixBReductionKernel (if the offset of matrix A is not 0) * -# @ref CLGEMMLowpOffsetContributionKernel (if gemm_info.gemmlowp_output_stage == NONE) @@ -103,8 +101,7 @@ public: private: CLMemoryGroup _memory_group; CLGEMMLowpMatrixMultiplyKernel _mm_kernel; - CLGEMMLowpMatrixMultiplyReshapedKernel _mm_reshaped_kernel; - CLGEMMReshapeLHSMatrixKernel _mtx_a_reshape_kernel; + CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel _mm_reshaped_only_rhs_kernel; CLGEMMReshapeRHSMatrixKernel _mtx_b_reshape_kernel; CLGEMMLowpMatrixAReductionKernel _mtx_a_reduction_kernel; CLGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel; @@ -112,7 +109,6 @@ private: CLGEMMLowpOffsetContributionOutputStageKernel _offset_contribution_output_stage_kernel; CLTensor _vector_sum_col; CLTensor _vector_sum_row; - CLTensor _tmp_a; CLTensor _tmp_b; CLTensor _mm_result_s32; const ICLTensor *_original_b; diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl index cf377e1114..033b4b4942 100644 --- a/src/core/CL/cl_kernels/gemmlowp.cl +++ b/src/core/CL/cl_kernels/gemmlowp.cl @@ -4006,4 +4006,4 @@ __kernel void gemmlowp_output_stage_quantize_down_float(TENSOR3D_DECLARATION(src // Store the result vstore4(res, 0, dst_addr); } -#endif // defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET) +#endif // defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET) \ No newline at end of file diff --git a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationBifrost.cpp b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationBifrost.cpp index f696f0b253..483bab832f 100644 --- a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationBifrost.cpp +++ b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationBifrost.cpp @@ -133,21 +133,13 @@ std::pair CLGEMMReshapedOnlyRHSKernelConfi { if(m == 1) { - if(n > 2048) - { - const unsigned int h0 = std::max(n / 4, static_cast(1)); - return configure_lhs_rhs_info(m, n, 1, 4, 16, 1, h0, false, true, false, true); - } - else - { - const unsigned int h0 = std::max(n / 2, static_cast(1)); - return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, h0, false, true, false, true); - } + const unsigned int h0 = std::max(n / 2, static_cast(1)); + return configure_lhs_rhs_info(m, n, 1, 2, 4, 1, h0, false, true, false, true); } else { const unsigned int h0 = std::max(n / 4, static_cast(1)); - return configure_lhs_rhs_info(m, n, 4, 1, 16, 1, h0, false, true, false, true); + return configure_lhs_rhs_info(m, n, 2, 2, 16, 1, h0, false, true, false, true); } } } diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp index b1b0a16b5d..eca24169b9 100644 --- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp @@ -152,7 +152,7 @@ std::pair validate_and_configure_window(ITensorInfo *input0, ITe window_changed = update_window_and_padding(win, input0_access, input1_access) || // window used by the execute_window_loop update_window_and_padding(win_out, output_access); // window used to update the padding requirements of output tensor - output_access.set_valid_region(win_out, ValidRegion(Coordinates(0, 0), output->tensor_shape())); + output_access.set_valid_region(win_out, ValidRegion(Coordinates(), output->tensor_shape())); // Collapse along the Z direction // This collapse needs to be here in order to tune the Z dimension of LWS diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp index c447cb8778..cd537087d3 100644 --- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp @@ -24,7 +24,7 @@ #include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h" #include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfiguration.h" +#include "arm_compute/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfiguration.h" #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/TensorInfo.h" @@ -40,17 +40,16 @@ using namespace arm_compute::cl_gemm; namespace { -inline bool is_gemm_reshaped(unsigned int m, bool reshape_b_only_on_first_run, GPUTarget gpu_target) +inline bool is_gemm_reshaped(bool reshape_b_only_on_first_run, GPUTarget gpu_target) { - return (get_arch_from_target(gpu_target) != GPUTarget::MIDGARD) && (m > 1) && (reshape_b_only_on_first_run); + return (get_arch_from_target(gpu_target) != GPUTarget::MIDGARD) && (reshape_b_only_on_first_run); } } // namespace CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr memory_manager) : _memory_group(std::move(memory_manager)), _mm_kernel(), - _mm_reshaped_kernel(), - _mtx_a_reshape_kernel(), + _mm_reshaped_only_rhs_kernel(), _mtx_b_reshape_kernel(), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), @@ -58,7 +57,6 @@ CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptrconfigure(m, n, k, batch_size, DataType::QASYMM8); - - // Configure reshape LHS kernel - _mtx_a_reshape_kernel.configure(a, &_tmp_a, lhs_info, gemm_info.reinterpret_input_as_3d()); + std::tie(lhs_info, rhs_info) = CLGEMMReshapedOnlyRHSKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8); // Configure reshape RHS kernel _mtx_b_reshape_kernel.configure(b, &_tmp_b, rhs_info); @@ -166,7 +155,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor if(_is_gemm_reshaped) { // Configure and tune matrix multiply kernel - _mm_reshaped_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); + _mm_reshaped_only_rhs_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); } else { @@ -185,7 +174,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor if(_is_gemm_reshaped) { // Configure and tune matrix multiply kernel - _mm_reshaped_kernel.configure(matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); + _mm_reshaped_only_rhs_kernel.configure(matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d)); } else { @@ -200,7 +189,6 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor // Allocate tensors if(_is_gemm_reshaped) { - _tmp_a.allocator()->allocate(); if(!_reshape_b_only_on_first_run) { _tmp_b.allocator()->allocate(); @@ -231,7 +219,6 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso const ITensorInfo *matrix_a_info = a; const ITensorInfo *matrix_b_info = b; - TensorInfo tmp_a_info{}; TensorInfo tmp_b_info{}; GEMMRHSMatrixInfo rhs_info; GEMMLHSMatrixInfo lhs_info; @@ -246,27 +233,16 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso const unsigned int batch_size = reinterpret_input_as_3d ? a->dimension(3) : a->dimension(2); const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); - bool reshape_matrices = is_gemm_reshaped(m, gemm_info.reshape_b_only_on_first_run(), CLScheduler::get().target()); - - // if reshape_matrices is set, force reinterpret_input_as_3d to be false as the output of CLGEMMInterleaveKernel will be 2D - if(reshape_matrices) - { - reinterpret_input_as_3d = false; - } + bool reshape_matrix_b = is_gemm_reshaped(gemm_info.reshape_b_only_on_first_run(), CLScheduler::get().target()); const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d); - if(reshape_matrices) + if(reshape_matrix_b) { - matrix_a_info = &tmp_a_info; matrix_b_info = &tmp_b_info; // Pick up the GEMM configuration - std::tie(lhs_info, rhs_info) = CLGEMMReshapedKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8); - - // Validate reshape LHS kernel - auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_lhs_reshaped_shape(*a, lhs_info, gemm_info.reinterpret_input_as_3d()))); - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeLHSMatrixKernel::validate(a, &tmp_a_info, lhs_info, gemm_info.reinterpret_input_as_3d())); + std::tie(lhs_info, rhs_info) = CLGEMMReshapedOnlyRHSKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8); // Validate reshape RHS kernel auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info))); @@ -297,13 +273,13 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso { TensorInfo mm_result_s32_info{}; - if(reshape_matrices) + if(reshape_matrix_b) { // Output tensor auto inizialitation if not yet initialized auto_init_if_empty(mm_result_s32_info, a->clone()->set_tensor_shape(compute_mm_shape(*matrix_a_info, *matrix_b_info, reshape_info)).set_data_type(DataType::S32)); // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, lhs_info, rhs_info, reshape_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, &mm_result_s32_info, lhs_info, rhs_info, reshape_info)); } else { @@ -324,10 +300,10 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso } else { - if(reshape_matrices) + if(reshape_matrix_b) { // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedKernel::validate(matrix_a_info, matrix_b_info, output, lhs_info, rhs_info, reshape_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::validate(matrix_a_info, matrix_b_info, output, lhs_info, rhs_info, reshape_info)); } else { @@ -356,9 +332,6 @@ void CLGEMMLowpMatrixMultiplyCore::run() if(_is_gemm_reshaped) { - // Run reshape matrix A - CLScheduler::get().enqueue(_mtx_a_reshape_kernel, false); - if(!_reshape_b_only_on_first_run) { // Run reshape matrix B @@ -375,7 +348,7 @@ void CLGEMMLowpMatrixMultiplyCore::run() // Run matrix multiply if(_is_gemm_reshaped) { - CLScheduler::get().enqueue(_mm_reshaped_kernel, false); + CLScheduler::get().enqueue(_mm_reshaped_only_rhs_kernel, false); } else { @@ -425,4 +398,4 @@ void CLGEMMLowpMatrixMultiplyCore::prepare() _is_prepared = true; } } -} // namespace arm_compute +} // namespace arm_compute \ No newline at end of file -- cgit v1.2.1