From 1d25ed54a948639d1894c8b021940df70005d519 Mon Sep 17 00:00:00 2001 From: Gian Marco Date: Sat, 16 Dec 2017 19:33:50 +0000 Subject: COMPMID-759 - CLGEMM optimization for McVail benchmarks This patch introduces an optimization for CLGEMM on Bifrost architectures which can bring to 40% of FMA utilization on config 3 of McVail. The new CLGEMM does not require any reshape of matrix A and matrix B. This patch also adds the auto-config in CLConvolutionLayer and CLGEMM and extends the interface for NEGEMM and CLGEMM. Change-Id: Ibb354eda45e9ca64b14a99700fb21dff5989dda9 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/113716 Tested-by: Jenkins Reviewed-by: Michalis Spyrou Reviewed-by: Anthony Barbier --- src/runtime/CL/functions/CLGEMM.cpp | 51 ++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 24 deletions(-) (limited to 'src/runtime/CL/functions/CLGEMM.cpp') diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index ca0228fcdb..be2527f4ba 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -39,14 +39,17 @@ using namespace arm_compute; CLGEMM::CLGEMM(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _ma_kernel(), _tmp_a(), _tmp_b(), _is_interleaved_transposed(false), _run_addition(false) + : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _ma_kernel(), _tmp_a(), _tmp_b(), _is_interleaved_transposed(false), _run_addition(false), + _is_first_run(true), _reshape_b_only_on_first_run(false) { } -void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta) +void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta, const GEMMInfo &gemm_info) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output); + ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported"); + ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported"); if(c != nullptr) { @@ -60,7 +63,11 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); // If the input tensor has less than 16 rows, we run a special version of GEMM without reshaping the input tensors - _is_interleaved_transposed = a->info()->dimension(1) > 16; + // For Bifrost architectures we do not reshape the input matrices + _is_interleaved_transposed = (a->info()->dimension(1) > 16 && CLScheduler::get().target() != GPUTarget::BIFROST); + + // Check if we need to reshape the matrix B only on the first run + _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run(); const ICLTensor *matrix_a = a; const ICLTensor *matrix_b = b; @@ -73,31 +80,17 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * matrix_a = &_tmp_a; matrix_b = &_tmp_b; - TensorShape shape_tmp_a = a->info()->tensor_shape(); - TensorShape shape_tmp_b = b->info()->tensor_shape(); - - shape_tmp_a.set(0, a->info()->dimension(0) * 4); - shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.0f)); - - const unsigned int transpose_w = max_cl_vector_width / data_size_from_type(b->info()->data_type()); - shape_tmp_b.set(0, b->info()->dimension(1) * transpose_w); - shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / static_cast(transpose_w))); - - TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type(), a->info()->fixed_point_position()); - _tmp_a.allocator()->init(info_a); - - TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type(), b->info()->fixed_point_position()); - _tmp_b.allocator()->init(info_b); - - // Manage intermediate buffers - _memory_group.manage(&_tmp_a); - _memory_group.manage(&_tmp_b); + // _tmp_a and _tmp_n will be auto configured in _interleave_kernel and in _transpose_kernel // Configure interleave kernel _interleave_kernel.configure(a, &_tmp_a); // Configure transpose kernel _transpose_kernel.configure(b, &_tmp_b); + + // Manage intermediate buffers + _memory_group.manage(&_tmp_a); + _memory_group.manage(&_tmp_b); } _mm_kernel.configure(matrix_a, matrix_b, output, alpha, _is_interleaved_transposed); @@ -126,8 +119,18 @@ void CLGEMM::run() // Run interleave kernel CLScheduler::get().enqueue(_interleave_kernel, false); - // Run transpose kernel - CLScheduler::get().enqueue(_transpose_kernel, false); + if(_is_first_run) + { + // Run transpose kernel + CLScheduler::get().enqueue(_transpose_kernel, false); + + _is_first_run = false; + } + else if(!_reshape_b_only_on_first_run) + { + // Run transpose kernel + CLScheduler::get().enqueue(_transpose_kernel, false); + } } // Run matrix multiply kernel -- cgit v1.2.1