From fd683111bba15288dc88b7f53486f935ebeccde0 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Tue, 17 Apr 2018 09:52:44 +0100 Subject: COMPMID-922 - CLGEMM FP16 optimizations - part1 This patch improves of ~20% GEMM fp16. The results has been reported at the following confluence page: https://confluence.arm.com/display/MLENG/GEMM+FP32+performance%3A+ACL+18.05 I am aware with few cases we have a bit of degradation. However this cases are memory bound anyway (Fully connected layer cases) Change-Id: I183cbb7fba55a0b5eb86532c4dca5efe096096b0 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/128044 Reviewed-by: Georgios Pinitas Tested-by: Jenkins Reviewed-by: Anthony Barbier --- src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp') diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp index 0f0646c1ce..0a0de7ad4e 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp @@ -288,12 +288,16 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen build_opts.add_option("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0))); // Create kernels according to the architecture, data type and input size. - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX) && data_type == DataType::F32) + if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX) && is_data_type_float(data_type)) { + kernel_name = "gemm_mm_floating_point_" + lower_string(string_from_data_type(data_type)) + "_bifrost"; // The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g. // FC6 and FC7 of AlexNet and VGG-16). - kernel_name = (input1->info()->dimension(0) <= 1000 && input0->info()->num_dimensions() == 1) ? "gemm_mm_floating_point_f32_bifrost_1000" : "gemm_mm_floating_point_f32_bifrost"; + if(input1->info()->dimension(0) <= 1000 && input0->info()->num_dimensions() == 1 && data_type == DataType::F32) + { + kernel_name += "_1000"; + } // The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels // via exhaustive autotuning over a range of representative layer configurations. -- cgit v1.2.1