diff options
Diffstat (limited to 'src/runtime/CL/functions/CLGEMM.cpp')
-rw-r--r-- | src/runtime/CL/functions/CLGEMM.cpp | 42 |
1 files changed, 18 insertions, 24 deletions
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index d0db8766d9..9048b85114 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -118,7 +118,6 @@ inline void select_gemm_configuration(unsigned int m, unsigned int n, GEMMLHSMat CLGEMM::CLGEMM(std::shared_ptr<IMemoryManager> memory_manager) : _memory_group(std::move(memory_manager)), _interleave_kernel(), - _transpose_kernel(), _mm_kernel(), _ma_kernel(), _reshape_lhs_kernel(), @@ -174,13 +173,18 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * mult_transpose1xW_width = 4; mult_interleave4x4_height = 2; } + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = 16 / b->info()->element_size(); + rhs_info.k0 = 1; + rhs_info.h0 = mult_transpose1xW_width; + rhs_info.interleave = false; + rhs_info.transpose = false; // Check if we need to reshape the matrix A and matrix B _is_interleaved_transposed = is_interleaved_transposed(m, n, k, a->info()->data_type(), _reshape_b_only_on_first_run, gpu_target); // Check if we can run the new reshaped GEMM _is_G76_path = (gpu_target == GPUTarget::G76) && _is_interleaved_transposed && (data_type == DataType::F32); - ; // if _is_interleaved_transposed is set, force reinterpret_input_as_3d to be false as the output of CLGEMMInterleaveKernel will be 2D if(_is_interleaved_transposed) @@ -201,7 +205,6 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * if(_is_G76_path) { GEMMLHSMatrixInfo lhs_info; - GEMMRHSMatrixInfo rhs_info; // Pick up the GEMM configuration based on M,N and K select_gemm_configuration(m, n, lhs_info, rhs_info); @@ -219,7 +222,7 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * _interleave_kernel.configure(a, &_tmp_a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d()); // Configure transpose kernel - _transpose_kernel.configure(b, &_tmp_b, mult_transpose1xW_width); + _reshape_rhs_kernel.configure(b, &_tmp_b, rhs_info); } } @@ -286,6 +289,13 @@ Status CLGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso mult_interleave4x4_height = 2; } + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = 16 / b->element_size(); + rhs_info.k0 = 1; + rhs_info.h0 = mult_transpose1xW_width; + rhs_info.interleave = false; + rhs_info.transpose = false; + // Check if we need to reshape the matrix A and matrix B const bool run_interleave_transpose = is_interleaved_transposed(m, n, k, a->data_type(), reshape_b_only_on_first_run, gpu_target); @@ -308,7 +318,6 @@ Status CLGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso if(is_G76_path) { GEMMLHSMatrixInfo lhs_info; - GEMMRHSMatrixInfo rhs_info; // Pick up the GEMM configuration based on M,N and K select_gemm_configuration(m, n, lhs_info, rhs_info); @@ -328,10 +337,9 @@ Status CLGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso // Validate interleave kernel auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(*a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d()))); ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMInterleave4x4Kernel::validate(a, &tmp_a_info, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d())); - // Validate transpose kernel - auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width))); - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &tmp_b_info, mult_transpose1xW_width)); + auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info))); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info)); } } @@ -371,14 +379,7 @@ void CLGEMM::run() if(!_reshape_b_only_on_first_run) { // Run transpose kernel - if(_is_G76_path) - { - CLScheduler::get().enqueue(_reshape_rhs_kernel, false); - } - else - { - CLScheduler::get().enqueue(_transpose_kernel, false); - } + CLScheduler::get().enqueue(_reshape_rhs_kernel, false); } } @@ -409,14 +410,7 @@ void CLGEMM::prepare() { // Run transpose kernel and mark original weights tensor as unused _tmp_b.allocator()->allocate(); - if(_is_G76_path) - { - CLScheduler::get().enqueue(_reshape_rhs_kernel, false); - } - else - { - CLScheduler::get().enqueue(_transpose_kernel, false); - } + CLScheduler::get().enqueue(_reshape_rhs_kernel, false); _original_b->mark_as_unused(); } CLScheduler::get().queue().finish(); |