From 8b6b4a959a49127d64293f8b60265f0f5ed486d4 Mon Sep 17 00:00:00 2001 From: giuros01 Date: Tue, 18 Dec 2018 19:01:33 +0000 Subject: COMPMID-1836: Remove CLGEMMTranspose1xWKernel and replace with CLGEMMReshapeRHSMatrixKernel Change-Id: Ic5a4f32657a155380684dcd4b44fbb608ef40cb4 Reviewed-on: https://review.mlplatform.org/418 Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins --- .../CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h | 4 +-- .../core/CL/kernels/CLGEMMMatrixMultiplyKernel.h | 6 ++-- arm_compute/core/Types.h | 4 +-- arm_compute/runtime/CL/functions/CLGEMM.h | 5 +-- .../runtime/CL/functions/CLGEMMConvolutionLayer.h | 1 - .../CL/functions/CLGEMMLowpMatrixMultiplyCore.h | 6 ++-- src/core/CL/CLHelpers.cpp | 3 +- .../CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp | 18 ++++++---- src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp | 18 ++++++---- src/runtime/CL/functions/CLGEMM.cpp | 42 ++++++++++------------ .../CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp | 23 +++++++++--- 11 files changed, 72 insertions(+), 58 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h index 82dcd93ce6..616c269b0d 100644 --- a/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.h @@ -58,7 +58,7 @@ public: * @param[in] input0 Input tensor containing the interleaved Matrix A. Data type supported: QASYMM8 * @param[in] input1 Input tensor containing the transposed1xW Matrix B. Data type supported: same as @p input0 * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: S32 - * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel + * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMReshapeRHSMatrixKernel * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped */ void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo()); @@ -67,7 +67,7 @@ public: * @param[in] input0 Input tensor info containing the interleaved Matrix A. Data type supported: QASYMM8 * @param[in] input1 Input tensor info containing the transposed Matrix B. Data type supported: same as @p input0 * @param[in] output Output tensor info to store the result of matrix multiplication. Data type supported: S32 - * @param[in] is_interleaved_transposed True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel + * @param[in] is_interleaved_transposed True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMReshapeRHSMatrixKernel * @param[in] reshape_info GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped * * @return a status diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h index f61c330de6..ce37787862 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h @@ -32,7 +32,7 @@ class ICLTensor; /** OpenCL kernel to multiply two input matrices "A" and "B" . All elements of the output matrix will be multiplied by alpha * - * @note If the input tensors @p input0 and @p input1 have been reshaped respectively with @ref CLGEMMInterleave4x4Kernel" and @ref CLGEMMTranspose1xWKernel, + * @note If the input tensors @p input0 and @p input1 have been reshaped respectively with @ref CLGEMMInterleave4x4Kernel" and @ref CLGEMMReshapeRHSMatrixKernel, * the flag @p is_interleaved_transposed must be set to true * * @attention The second input tensor must have at least 2 dimensions (matrix) @@ -57,7 +57,7 @@ public: * @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0 * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 * @param[in] alpha Weight of the matrix product - * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel + * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMReshapeRHSMatrixKernel * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy * @@ -70,7 +70,7 @@ public: * @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0 * @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0 * @param[in] alpha Weight of the matrix product - * @param[in] is_interleaved_transposed True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMTranspose1xWKernel + * @param[in] is_interleaved_transposed True if input0 and input1 have been reshaped respectively using @ref CLGEMMInterleave4x4Kernel and @ref CLGEMMReshapeRHSMatrixKernel * @param[in] reshape_info GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped * @param[in] gpu_target GPU Target * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index 6ef9878a95..02001a2438 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -1652,8 +1652,8 @@ private: * The matrix A can only be reshaped through @ref CLGEMMInterleave4x4Kernel or @ref NEGEMMInterleave4x4Kernel or @ref GCGEMMInterleave4x4Kernel * Note: Optionally just for @ref CLGEMMInterleave4x4Kernel is it possible to set mult_interleave4x4_height, the multiplication factor for the height of the 4x4 interleaved block * - * The matrix B can only be reshaped through @ref CLGEMMTranspose1xWKernel or @ref NEGEMMTranspose1xWKernel or @ref GCGEMMTranspose1xWKernel - * Note: Optionally just for @ref CLGEMMTranspose1xWKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block + * The matrix B can only be reshaped through @ref CLGEMMReshapeRHSMatrixKernel or @ref NEGEMMTranspose1xWKernel or @ref GCGEMMTranspose1xWKernel + * Note: Optionally just for @ref CLGEMMReshapeRHSMatrixKernel is it possible to set mult_transpose1xW_width, the multiplication factor for the width of the 1xW transposed block * */ class GEMMReshapeInfo final diff --git a/arm_compute/runtime/CL/functions/CLGEMM.h b/arm_compute/runtime/CL/functions/CLGEMM.h index 7d47194e56..c4accde23d 100644 --- a/arm_compute/runtime/CL/functions/CLGEMM.h +++ b/arm_compute/runtime/CL/functions/CLGEMM.h @@ -30,7 +30,6 @@ #include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h" #include "arm_compute/runtime/CL/CLMemoryGroup.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/IFunction.h" @@ -44,8 +43,7 @@ class ICLTensor; * * -# @ref CLGEMMInterleave4x4Kernel (only if the reshaped GEMM is selected by the heuristic model and the GPU target is NOT Mali-G76) * -# @ref CLGEMMReshapeLHSMatrixKernel (only if the reshaped GEMM is selected by the heuristic model and the GPU target IS Mali-G76) - * -# @ref CLGEMMTranspose1xWKernel (only if the reshaped GEMM is selected by the heuristic model and the GPU target is NOT Mali-G76) - * -# @ref CLGEMMReshapeRHSMatrixKernel (only if the reshaped GEMM is selected by the heuristic model and the GPU target IS Mali-G76) + * -# @ref CLGEMMReshapeRHSMatrixKernel (only if the reshaped GEMM is selected by the heuristic model) * -# @ref CLGEMMMatrixMultiplyKernel (if GPU target is NOT G76 or if the reshaped GEMM is NOT selected) * -# @ref CLGEMMMatrixMultiplyReshapedKernel (only if the reshaped GEMM is selected by the heuristic model and the GPU target IS Mali-G76) * -# @ref CLGEMMMatrixAdditionKernel (if c != nullptr and beta != 0.0) @@ -108,7 +106,6 @@ public: private: CLMemoryGroup _memory_group; CLGEMMInterleave4x4Kernel _interleave_kernel; // TODO - COMPMID-1835: Remove this kernel and use CLGEMMReshapeLHSMatrixKernel - CLGEMMTranspose1xWKernel _transpose_kernel; // TODO - COMPMID-1836: Remove this kernel and use CLGEMMReshapeRHSMatrixKernel CLGEMMMatrixMultiplyKernel _mm_kernel; CLGEMMMatrixAdditionKernel _ma_kernel; CLGEMMReshapeLHSMatrixKernel _reshape_lhs_kernel; diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h index 1468b156eb..d7694a8328 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h @@ -30,7 +30,6 @@ #include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h" #include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h" #include "arm_compute/core/CL/kernels/CLIm2ColKernel.h" #include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h" #include "arm_compute/core/Types.h" diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h index 82f307a773..141354e723 100644 --- a/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h +++ b/arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h @@ -29,7 +29,7 @@ #include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h" +#include "arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h" #include "arm_compute/runtime/CL/CLMemoryGroup.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/IFunction.h" @@ -42,7 +42,7 @@ class ICLTensor; /** Basic function to execute GEMMLowpMatrixMultiplyCore on OpenCL. This function calls the following OpenCL kernels: * * -# @ref CLGEMMInterleave4x4Kernel (if the output tensor is a matrix) - * -# @ref CLGEMMTranspose1xWKernel (if the output tensor is a matrix) + * -# @ref CLGEMMReshapeRHSMatrixKernel (if the output tensor is a matrix) * -# @ref CLGEMMLowpMatrixMultiplyKernel * -# @ref CLGEMMLowpMatrixAReductionKernel (if the offset of matrix B is not 0) * -# @ref CLGEMMLowpMatrixBReductionKernel (if the offset of matrix A is not 0) @@ -102,7 +102,7 @@ private: CLMemoryGroup _memory_group; CLGEMMLowpMatrixMultiplyKernel _mm_kernel; CLGEMMInterleave4x4Kernel _mtx_a_reshape_kernel; - CLGEMMTranspose1xWKernel _mtx_b_reshape_kernel; + CLGEMMReshapeRHSMatrixKernel _mtx_b_reshape_kernel; CLGEMMLowpMatrixAReductionKernel _mtx_a_reduction_kernel; CLGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel; CLGEMMLowpOffsetContributionKernel _offset_contribution_kernel; diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp index 924fb1d322..18ef185ac0 100644 --- a/src/core/CL/CLHelpers.cpp +++ b/src/core/CL/CLHelpers.cpp @@ -148,7 +148,7 @@ bool dot8_supported(const cl::Device &device) const GPUTarget gpu_target = get_target_from_name(device_name); // SW_WORKAROUND: Workaround for DDK revision r14p0.to enable cl_arm_integer_dot_product_int8 - std::set sw_workaround_issue = {GPUTarget::G76}; + std::set sw_workaround_issue = { GPUTarget::G76 }; return (device_supports_extension(device, "cl_arm_integer_dot_product_int8") || sw_workaround_issue.count(gpu_target) != 0); } @@ -255,5 +255,4 @@ size_t preferred_vector_width(const cl::Device &device, const DataType dt) return 1; } } - } // namespace arm_compute diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp index b2fb3e0278..66fafe4de5 100644 --- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp @@ -71,11 +71,17 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, } else { - const int m = reshape_info.m(); - const int n = reshape_info.n(); - const int k = reshape_info.k(); - const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width(); - const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height(); + GEMMRHSMatrixInfo rhs_info; + const int m = reshape_info.m(); + const int n = reshape_info.n(); + const int k = reshape_info.k(); + const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width(); + const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height(); + rhs_info.n0 = 16 / input1->element_size(); + rhs_info.k0 = 1; + rhs_info.h0 = mult_transpose1xW_width; + rhs_info.interleave = false; + rhs_info.transpose = false; TensorShape tensor_shape0{ input0->tensor_shape() }; tensor_shape0.set(0, k); @@ -89,7 +95,7 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1); const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_interleaved_shape(tensor_info0, mult_interleave4x4_height)); - const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(tensor_info1, mult_transpose1xW_width)); + const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1); diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp index c9ed7763da..69455cf419 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp @@ -66,11 +66,17 @@ inline Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *i } else { - const int m = reshape_info.m(); - const int n = reshape_info.n(); - const int k = reshape_info.k(); - const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width(); - const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height(); + GEMMRHSMatrixInfo rhs_info; + const int m = reshape_info.m(); + const int n = reshape_info.n(); + const int k = reshape_info.k(); + const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width(); + const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height(); + rhs_info.n0 = 16 / input1->element_size(); + rhs_info.k0 = 1; + rhs_info.h0 = mult_transpose1xW_width; + rhs_info.interleave = false; + rhs_info.transpose = false; TensorShape tensor_shape0{ input0->tensor_shape() }; tensor_shape0.set(0, k); @@ -84,7 +90,7 @@ inline Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *i const TensorInfo tensor_info1 = input1->clone()->set_tensor_shape(tensor_shape1); const TensorInfo tensor_info_reshaped0 = input0->clone()->set_tensor_shape(compute_interleaved_shape(tensor_info0, mult_interleave4x4_height)); - const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(tensor_info1, mult_transpose1xW_width)); + const TensorInfo tensor_info_reshaped1 = input1->clone()->set_tensor_shape(compute_rhs_reshaped_shape(tensor_info1, rhs_info)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input0, &tensor_info_reshaped0); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, &tensor_info_reshaped1); diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index d0db8766d9..9048b85114 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -118,7 +118,6 @@ inline void select_gemm_configuration(unsigned int m, unsigned int n, GEMMLHSMat CLGEMM::CLGEMM(std::shared_ptr memory_manager) : _memory_group(std::move(memory_manager)), _interleave_kernel(), - _transpose_kernel(), _mm_kernel(), _ma_kernel(), _reshape_lhs_kernel(), @@ -174,13 +173,18 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * mult_transpose1xW_width = 4; mult_interleave4x4_height = 2; } + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = 16 / b->info()->element_size(); + rhs_info.k0 = 1; + rhs_info.h0 = mult_transpose1xW_width; + rhs_info.interleave = false; + rhs_info.transpose = false; // Check if we need to reshape the matrix A and matrix B _is_interleaved_transposed = is_interleaved_transposed(m, n, k, a->info()->data_type(), _reshape_b_only_on_first_run, gpu_target); // Check if we can run the new reshaped GEMM _is_G76_path = (gpu_target == GPUTarget::G76) && _is_interleaved_transposed && (data_type == DataType::F32); - ; // if _is_interleaved_transposed is set, force reinterpret_input_as_3d to be false as the output of CLGEMMInterleaveKernel will be 2D if(_is_interleaved_transposed) @@ -201,7 +205,6 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * if(_is_G76_path) { GEMMLHSMatrixInfo lhs_info; - GEMMRHSMatrixInfo rhs_info; // Pick up the GEMM configuration based on M,N and K select_gemm_configuration(m, n, lhs_info, rhs_info); @@ -219,7 +222,7 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor * _interleave_kernel.configure(a, &_tmp_a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d()); // Configure transpose kernel - _transpose_kernel.configure(b, &_tmp_b, mult_transpose1xW_width); + _reshape_rhs_kernel.configure(b, &_tmp_b, rhs_info); } } @@ -286,6 +289,13 @@ Status CLGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso mult_interleave4x4_height = 2; } + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = 16 / b->element_size(); + rhs_info.k0 = 1; + rhs_info.h0 = mult_transpose1xW_width; + rhs_info.interleave = false; + rhs_info.transpose = false; + // Check if we need to reshape the matrix A and matrix B const bool run_interleave_transpose = is_interleaved_transposed(m, n, k, a->data_type(), reshape_b_only_on_first_run, gpu_target); @@ -308,7 +318,6 @@ Status CLGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso if(is_G76_path) { GEMMLHSMatrixInfo lhs_info; - GEMMRHSMatrixInfo rhs_info; // Pick up the GEMM configuration based on M,N and K select_gemm_configuration(m, n, lhs_info, rhs_info); @@ -328,10 +337,9 @@ Status CLGEMM::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso // Validate interleave kernel auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_interleaved_shape(*a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d()))); ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMInterleave4x4Kernel::validate(a, &tmp_a_info, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d())); - // Validate transpose kernel - auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width))); - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &tmp_b_info, mult_transpose1xW_width)); + auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info))); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info)); } } @@ -371,14 +379,7 @@ void CLGEMM::run() if(!_reshape_b_only_on_first_run) { // Run transpose kernel - if(_is_G76_path) - { - CLScheduler::get().enqueue(_reshape_rhs_kernel, false); - } - else - { - CLScheduler::get().enqueue(_transpose_kernel, false); - } + CLScheduler::get().enqueue(_reshape_rhs_kernel, false); } } @@ -409,14 +410,7 @@ void CLGEMM::prepare() { // Run transpose kernel and mark original weights tensor as unused _tmp_b.allocator()->allocate(); - if(_is_G76_path) - { - CLScheduler::get().enqueue(_reshape_rhs_kernel, false); - } - else - { - CLScheduler::get().enqueue(_transpose_kernel, false); - } + CLScheduler::get().enqueue(_reshape_rhs_kernel, false); _original_b->mark_as_unused(); } CLScheduler::get().queue().finish(); diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp index 2d4d231f5f..cf20bc6a7a 100644 --- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp @@ -108,6 +108,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor const ICLTensor *matrix_a = a; const ICLTensor *matrix_b = b; + GEMMRHSMatrixInfo rhs_info; // Arguments used by GEMMReshapeInfo // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo @@ -120,6 +121,11 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); constexpr int mult_transpose1xW_width = 1; constexpr int mult_interleave4x4_height = 1; + rhs_info.n0 = 16 / b->info()->element_size(); + rhs_info.k0 = 1; + rhs_info.h0 = mult_transpose1xW_width; + rhs_info.interleave = false; + rhs_info.transpose = false; // Check if we need to reshape the matrix A and matrix B _is_interleaved_transposed = is_interleaved_transposed(m, n, k, _reshape_b_only_on_first_run, gpu_target); @@ -142,7 +148,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor _mtx_a_reshape_kernel.configure(a, &_tmp_a, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d(), unroll_block); // Configure transpose kernel - _mtx_b_reshape_kernel.configure(b, &_tmp_b, mult_transpose1xW_width); + _mtx_b_reshape_kernel.configure(b, &_tmp_b, rhs_info); } // Initialize matrix B reduction kernel only if _a_offset is not equal to 0 @@ -233,8 +239,9 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso const ITensorInfo *matrix_a_info = a; const ITensorInfo *matrix_b_info = b; - TensorInfo tmp_a_info{}; - TensorInfo tmp_b_info{}; + TensorInfo tmp_a_info{}; + TensorInfo tmp_b_info{}; + GEMMRHSMatrixInfo rhs_info; bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); const int m = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1); @@ -243,6 +250,11 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso constexpr int mult_transpose1xW_width = 1; constexpr int mult_interleave4x4_height = 1; const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); + rhs_info.n0 = 16 / b->element_size(); + rhs_info.k0 = 1; + rhs_info.h0 = mult_transpose1xW_width; + rhs_info.interleave = false; + rhs_info.transpose = false; bool reshape_matrices = is_interleaved_transposed(m, n, k, gemm_info.reshape_b_only_on_first_run(), CLScheduler::get().target()); @@ -264,8 +276,9 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMInterleave4x4Kernel::validate(a, &tmp_a_info, mult_interleave4x4_height, gemm_info.reinterpret_input_as_3d())); // Validate transpose kernel - auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_transpose1xW_with_element_size_shape(*b, mult_transpose1xW_width))); - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &tmp_b_info, mult_transpose1xW_width)); + + auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info))); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info)); } TensorInfo info_vector_sum_col, info_vector_sum_row; -- cgit v1.2.1