From 839e19865d4b654899d1da5cfb94304841e7f210 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Thu, 29 Oct 2020 13:36:50 +0000 Subject: COMPMID-3930: Update CLGEMM heuristic for fp16. Mali-G76 - Since the GEMM kernel can now work without padding, the heuristic requires to be fine-tuned to exploit this feature - The heuristic affects Mali-G76 FP16 only Change-Id: Ia430627f02131ad956ce2219b80c83c8e7cabaf2 Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4284 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: SiCong Li --- src/core/CL/gemm/CLGEMMHelpers.cpp | 25 ++++++- src/core/CL/gemm/CLGEMMHelpers.h | 19 ++++++ .../CLGEMMReshapedKernelConfigurationBifrost.cpp | 79 +++------------------- ...MMReshapedOnlyRHSKernelConfigurationBifrost.cpp | 69 +++++++++++++++++-- .../CL/gemm/CLGEMMKernelSelectionBifrost.cpp | 66 +++++++++++++++++- src/runtime/CL/gemm/CLGEMMKernelSelectionBifrost.h | 1 + 6 files changed, 179 insertions(+), 80 deletions(-) diff --git a/src/core/CL/gemm/CLGEMMHelpers.cpp b/src/core/CL/gemm/CLGEMMHelpers.cpp index 877bf1e047..d60626b158 100644 --- a/src/core/CL/gemm/CLGEMMHelpers.cpp +++ b/src/core/CL/gemm/CLGEMMHelpers.cpp @@ -27,6 +27,7 @@ #include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/CL/OpenCL.h" #include "arm_compute/core/ITensorInfo.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include @@ -34,11 +35,13 @@ namespace arm_compute { namespace cl_gemm { +using namespace arm_compute::misc::shape_calculator; + std::pair configure_lhs_rhs_info(unsigned int m, unsigned int n, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool lhs_interleave, bool rhs_interleave, bool lhs_transpose, bool rhs_transpose, bool export_to_cl_image) { - v0 = ((m / (m0 * v0)) == 0) ? 1 : v0; - h0 = ((n / (n0 * h0)) == 0) ? 1 : h0; + v0 = std::max(std::min(static_cast(m / m0), static_cast(v0)), static_cast(1)); + h0 = std::max(std::min(static_cast(n / n0), static_cast(h0)), static_cast(1)); const GEMMLHSMatrixInfo lhs_info(m0, k0, v0, lhs_transpose, lhs_interleave); const GEMMRHSMatrixInfo rhs_info(n0, k0, h0, rhs_transpose, rhs_interleave, export_to_cl_image); @@ -46,6 +49,24 @@ std::pair configure_lhs_rhs_info(unsigned return std::make_pair(lhs_info, rhs_info); } +std::pair select_lhs_rhs_info(std::pair info_img, + std::pair info_buf, + unsigned int n, unsigned int k, unsigned int b, DataType data_type) +{ + const TensorInfo tensor_rhs_info(TensorShape(n, k, b), 1, data_type); + const TensorShape shape = compute_rhs_reshaped_shape(tensor_rhs_info, info_img.second); + const TensorInfo tensor_reshaped_info(shape, 1, data_type); + + if(bool(validate_image2d_support_on_rhs(tensor_reshaped_info, info_img.second))) + { + return info_img; + } + else + { + return info_buf; + } +} + void update_padding_for_cl_image(ITensorInfo *tensor) { constexpr unsigned int num_floats_per_pixel = 4; diff --git a/src/core/CL/gemm/CLGEMMHelpers.h b/src/core/CL/gemm/CLGEMMHelpers.h index 013c068cf7..57624673c0 100644 --- a/src/core/CL/gemm/CLGEMMHelpers.h +++ b/src/core/CL/gemm/CLGEMMHelpers.h @@ -54,6 +54,25 @@ namespace cl_gemm std::pair configure_lhs_rhs_info(unsigned int m, unsigned int n, unsigned int m0, unsigned int n0, unsigned int k0, unsigned int v0, unsigned int h0, bool lhs_interleave, bool rhs_interleave, bool lhs_transpose, bool rhs_transpose, bool export_to_cl_image = false); +/** Select @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo + * + * This function accepts two pairs of GEMMLHSMatrixInfo/GEMMRHSMatrixInfo where only the first is with cl_image2d support, + * and selects the valid one validating the GEMMRHSMatrixInfo. If the validation passes, the functions will return + * the first GEMMLHSMatrixInfo/GEMMRHSMatrixInfo pair with cl_image2d support. + * + * @param[in] info_img GEMMLHSMatrixInfo/GEMMRHSMatrixInfo with cl_image2d support + * @param[in] info_buf GEMMLHSMatrixInfo/GEMMRHSMatrixInfo to fall-back if cl_image2d cannot be used + * @param[in] n Number of columns (N) in the RHS matrix not reshaped + * @param[in] k Number of rows (K) in the RHS matrix not reshaped + * @param[in] b Batch size + * @param[in] data_type Data type + * + * @return @ref GEMMLHSMatrixInfo and @ref GEMMRHSMatrixInfo + */ +std::pair select_lhs_rhs_info(std::pair info_img, + std::pair info_buf, + unsigned int n, unsigned int k, unsigned int b, DataType data_type); + /** Update padding required to export the OpenCL buffer to OpenCL image2d * * @param[in,out] tensor ITensorInfo of the tensor required to be exported to OpenCL image2d diff --git a/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationBifrost.cpp b/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationBifrost.cpp index 00c284facc..c1ca187a70 100644 --- a/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationBifrost.cpp +++ b/src/core/CL/gemm/reshaped/CLGEMMReshapedKernelConfigurationBifrost.cpp @@ -205,95 +205,38 @@ std::pair CLGEMMReshapedKernelConfiguratio std::pair CLGEMMReshapedKernelConfigurationBifrost::configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b) { - ARM_COMPUTE_UNUSED(k); - - const float r_mn = static_cast(m) / static_cast(n); const float workload = (static_cast(m) * static_cast(n) * static_cast(b)) / 20.0f; + const float r_mk = static_cast(m) / static_cast(k); + const float r_nk = static_cast(n) / static_cast(k); - if(workload <= 1049.59f) + if(workload <= 1422.40f) { - if(b <= 5) + if(r_mk <= 2.45f) { - if(workload <= 790.39f) + if(workload <= 801.60f) { - return configure_lhs_rhs_info(m, n, 2, 4, 4, 2, 2, false, false, true, false, false); + return configure_lhs_rhs_info(m, n, 2, 4, 4, 1, 2, true, false, true, false, false); } else { - if(workload <= 982.39f) - { - return configure_lhs_rhs_info(m, n, 4, 2, 4, 4, 4, false, false, true, false, false); - } - else - { - return configure_lhs_rhs_info(m, n, 2, 4, 4, 2, 1, false, true, true, false, false); - } + return configure_lhs_rhs_info(m, n, 4, 2, 4, 2, 2, false, false, true, false, false); } } else { - if(r_mn <= 0.21f) + if(r_nk <= 0.67f) { - if(r_mn <= 0.11f) - { - return configure_lhs_rhs_info(m, n, 2, 4, 4, 2, 2, false, false, true, false, false); - } - else - { - return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, false, true, true, false, false); - } + return configure_lhs_rhs_info(m, n, 4, 2, 4, 2, 2, false, false, true, false, false); } else { - return configure_lhs_rhs_info(m, n, 2, 4, 4, 2, 2, false, false, true, false, false); + return configure_lhs_rhs_info(m, n, 2, 4, 4, 4, 1, false, true, false, true, false); } } } else { - if(n <= 200) - { - if(workload <= 29772.79f) - { - if(m <= 64.5) - { - return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 4, true, false, true, false, false); - } - else - { - return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, false, true, true, false, false); - } - } - else - { - if(r_mn <= 1.09f) - { - return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, false, true, true, false, false); - } - else - { - return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, true, true, true, false, false); - } - } - } - else - { - if(m <= 43) - { - return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 4, true, false, true, false, false); - } - else - { - if(workload <= 26364.79f) - { - return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 2, false, true, true, false, false); - } - else - { - return configure_lhs_rhs_info(m, n, 4, 4, 4, 4, 4, false, true, true, false, false); - } - } - } + return configure_lhs_rhs_info(m, n, 4, 4, 4, 2, 4, true, true, true, false, false); } } diff --git a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationBifrost.cpp b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationBifrost.cpp index 0a0fc5d152..3105db6693 100644 --- a/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationBifrost.cpp +++ b/src/core/CL/gemm/reshaped_only_rhs/CLGEMMReshapedOnlyRHSKernelConfigurationBifrost.cpp @@ -151,15 +151,13 @@ std::pair CLGEMMReshapedOnlyRHSKernelConfi // Get lhs_info/rhs_info in case of OpenCL buffer if(m == 1) { - if((n / 4) >= 2048) + if(n <= 204.0) { - const unsigned int h0 = std::max(n / 4, 1U); - std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 4, 8, 1, h0, false, true, false, true); + return configure_lhs_rhs_info(m, n, 1, 2, 16, 1, 16, false, true, false, true, false); } else { - const unsigned int h0 = std::max(n / 2, 1U); - std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 1, 2, 8, 1, h0, false, true, false, true); + return configure_lhs_rhs_info(m, n, 1, 2, 8, 1, 32, false, true, false, true, false); } } else @@ -247,7 +245,6 @@ std::pair CLGEMMReshapedOnlyRHSKernelConfi std::pair CLGEMMReshapedOnlyRHSKernelConfigurationBifrost::configure_G76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b) { ARM_COMPUTE_UNUSED(k); - ARM_COMPUTE_UNUSED(b); if(m == 1) { @@ -255,7 +252,65 @@ std::pair CLGEMMReshapedOnlyRHSKernelConfi } else { - return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 2, false, true, false, true); + const float r_mn = static_cast(m) / static_cast(n); + const float workload = (static_cast(m) * static_cast(n) * static_cast(b)) / 20.0f; + + if(workload <= 7449.60f) + { + if(workload <= 691.60f) + { + return configure_lhs_rhs_info(m, n, 2, 2, 8, 1, 8, false, false, false, false, false); + } + else + { + if(workload <= 4155.20f) + { + return configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false); + } + else + { + return configure_lhs_rhs_info(m, n, 5, 8, 2, 1, 32, false, false, false, false, false); + } + } + } + else + { + if(workload <= 16300.80f) + { + if(r_mn <= 44.56f) + { + GEMMLHSMatrixInfo lhs_info_buf; + GEMMRHSMatrixInfo rhs_info_buf; + GEMMLHSMatrixInfo lhs_info_img; + GEMMRHSMatrixInfo rhs_info_img; + + std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, true, false, false, true); + std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false); + + return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img), + std::make_pair(lhs_info_buf, rhs_info_buf), + n, k, b, DataType::F16); + } + else + { + return configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false); + } + } + else + { + GEMMLHSMatrixInfo lhs_info_buf; + GEMMRHSMatrixInfo rhs_info_buf; + GEMMLHSMatrixInfo lhs_info_img; + GEMMRHSMatrixInfo rhs_info_img; + + std::tie(lhs_info_img, rhs_info_img) = configure_lhs_rhs_info(m, n, 5, 4, 4, 1, 2, false, true, false, false, true); + std::tie(lhs_info_buf, rhs_info_buf) = configure_lhs_rhs_info(m, n, 5, 2, 8, 1, 16, false, false, false, false, false); + + return select_lhs_rhs_info(std::make_pair(lhs_info_img, rhs_info_img), + std::make_pair(lhs_info_buf, rhs_info_buf), + n, k, b, DataType::F16); + } + } } } diff --git a/src/runtime/CL/gemm/CLGEMMKernelSelectionBifrost.cpp b/src/runtime/CL/gemm/CLGEMMKernelSelectionBifrost.cpp index 73b90568f5..7c6efe3f11 100644 --- a/src/runtime/CL/gemm/CLGEMMKernelSelectionBifrost.cpp +++ b/src/runtime/CL/gemm/CLGEMMKernelSelectionBifrost.cpp @@ -72,7 +72,7 @@ CLGEMMKernelType CLGEMMKernelSelectionBifrost::select_kernel(const CLGEMMKernelS static std::map gemm_g76_configs = { { DataType::F32, &CLGEMMKernelSelectionBifrost::g76_f32 }, - { DataType::F16, &CLGEMMKernelSelectionBifrost::default_f16 }, + { DataType::F16, &CLGEMMKernelSelectionBifrost::g76_f16 }, { DataType::QASYMM8, &CLGEMMKernelSelectionBifrost::default_q8 }, { DataType::QASYMM8_SIGNED, &CLGEMMKernelSelectionBifrost::default_q8 }, { DataType::QSYMM8, &CLGEMMKernelSelectionBifrost::default_q8 }, @@ -188,12 +188,10 @@ CLGEMMKernelType CLGEMMKernelSelectionBifrost::g76_f32(unsigned int m, unsigned { return CLGEMMKernelType::NATIVE_V1; } - if(m == 1) { return CLGEMMKernelType::RESHAPED_ONLY_RHS; } - if(k <= 496) { if(n <= 544) @@ -239,6 +237,68 @@ CLGEMMKernelType CLGEMMKernelSelectionBifrost::g76_f32(unsigned int m, unsigned } } +CLGEMMKernelType CLGEMMKernelSelectionBifrost::g76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant) +{ + ARM_COMPUTE_UNUSED(b); + + if (!is_rhs_constant) + { + return CLGEMMKernelType::NATIVE_V1; + } + + if (m == 1) + { + return CLGEMMKernelType::RESHAPED_ONLY_RHS; + } + + const float r_mn = static_cast(m) / static_cast(n); + const float r_nk = static_cast(n) / static_cast(k); + + if(k <= 212) + { + return CLGEMMKernelType::RESHAPED_ONLY_RHS; + } + else + { + if(r_nk <= 0.4990234375f) + { + if(k <= 1392) + { + return CLGEMMKernelType::RESHAPED_ONLY_RHS; + } + else + { + if(m <= 325) + { + return CLGEMMKernelType::RESHAPED_ONLY_RHS; + } + else + { + return CLGEMMKernelType::RESHAPED; + } + } + } + else + { + if(k <= 471) + { + return CLGEMMKernelType::RESHAPED_ONLY_RHS; + } + else + { + if(r_mn <= 0.04475911520421505f) + { + return CLGEMMKernelType::RESHAPED; + } + else + { + return CLGEMMKernelType::RESHAPED_ONLY_RHS; + } + } + } + } +} + CLGEMMKernelType CLGEMMKernelSelectionBifrost::g71_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant) { ARM_COMPUTE_UNUSED(b); diff --git a/src/runtime/CL/gemm/CLGEMMKernelSelectionBifrost.h b/src/runtime/CL/gemm/CLGEMMKernelSelectionBifrost.h index a495b48301..e3cc8e4a27 100644 --- a/src/runtime/CL/gemm/CLGEMMKernelSelectionBifrost.h +++ b/src/runtime/CL/gemm/CLGEMMKernelSelectionBifrost.h @@ -45,6 +45,7 @@ public: private: CLGEMMKernelType g76_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant); + CLGEMMKernelType g76_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant); CLGEMMKernelType g71_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant); CLGEMMKernelType default_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant); CLGEMMKernelType default_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant); -- cgit v1.2.1