From ed5fe69b6612a5cf0dd52340f6781885d77afbc9 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Thu, 9 Jul 2020 08:41:10 +0100 Subject: COMPMID-3326: Update heuristic for GEMMReshaped and GEMMReshapedOnlyRHS - Update the heuristic for Arm Mali-G76 (F32) in order to use the OpenCL image2d object on GEMM - Create utility function to validate the support for image2d Change-Id: I0913ac5f27fd07992b0ac188af753a2abeb034ca Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3559 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- .../CLGEMMReshapedKernelConfigurationBifrost.cpp | 44 +++++++++++++++++++++- 1 file changed, 42 insertions(+), 2 deletions(-) (limited to 'src/core/CL/gemm/reshaped') diff --git a/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationBifrost.cpp b/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationBifrost.cpp index f2954be7d2..a533f14d02 100644 --- a/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationBifrost.cpp +++ b/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationBifrost.cpp @@ -27,6 +27,9 @@ #include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/CL/gemm/CLGEMMHelpers.h" #include "arm_compute/core/GPUTarget.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include #include @@ -35,6 +38,8 @@ namespace arm_compute { namespace cl_gemm { +using namespace arm_compute::misc::shape_calculator; + CLGEMMReshapedKernelConfigurationBifrost::CLGEMMReshapedKernelConfigurationBifrost(GPUTarget gpu) : ICLGEMMKernelConfiguration(gpu) { @@ -153,13 +158,48 @@ std::pair CLGEMMReshapedKernelConfiguratio ARM_COMPUTE_UNUSED(k); ARM_COMPUTE_UNUSED(b); + GEMMLHSMatrixInfo lhs_info_buf; + GEMMRHSMatrixInfo rhs_info_buf; + GEMMLHSMatrixInfo lhs_info_img; + GEMMRHSMatrixInfo rhs_info_img; + + // Get lhs_info/rhs_info in case of OpenCL buffer if(n <= 4) { - return configure_lhs_rhs_info(m, n, 4, 2, 8, 16, 16, true, false, false, true); + std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 2, 8, 16, 16, true, false, false, true); + } + else + { + std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 4, 4, 2, 8, 16, false, false, false, true); + } + + // Get lhs_info/rhs_info in case of OpenCL image + // Condition on the GPU workload + if((m / 4) * (n / 4) >= 2560) + { + // Big workload + std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 8, true, true, true, false, true); + } + else + { + // Small workload + std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 1, true, true, true, false, true); + } + + const TensorInfo tensor_rhs_info(TensorShape(n, k, b), 1, DataType::F32); + const TensorShape shape = compute_rhs_reshaped_shape(tensor_rhs_info, rhs_info_img); + const TensorInfo tensor_reshaped_info(shape, 1, DataType::F32); + + // In case of vector by matrix with few work-items, we use the OpenCL buffer rather than the OpenCL image2d + const bool use_cl_image2d = (n <= 4) ? false : true; + + if(bool(validate_image2d_support_on_rhs(tensor_reshaped_info, rhs_info_img)) && use_cl_image2d) + { + return std::make_pair(lhs_info_img, rhs_info_img); } else { - return configure_lhs_rhs_info(m, n, 4, 4, 2, 8, 16, false, false, false, true); + return std::make_pair(lhs_info_buf, rhs_info_buf); } } -- cgit v1.2.1