aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-04-17 09:52:44 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:37 +0000
commitfd683111bba15288dc88b7f53486f935ebeccde0 (patch)
treeb4e249618ff95c5b3f620fc4ee9500bead24e118 /src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
parentdfca60b8e8805966624c7c941f289e090e3d73bb (diff)
downloadComputeLibrary-fd683111bba15288dc88b7f53486f935ebeccde0.tar.gz
COMPMID-922 - CLGEMM FP16 optimizations - part1
This patch improves of ~20% GEMM fp16. The results has been reported at the following confluence page: https://confluence.arm.com/display/MLENG/GEMM+FP32+performance%3A+ACL+18.05 I am aware with few cases we have a bit of degradation. However this cases are memory bound anyway (Fully connected layer cases) Change-Id: I183cbb7fba55a0b5eb86532c4dca5efe096096b0 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/128044 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp')
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp8
1 files changed, 6 insertions, 2 deletions
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
index 0f0646c1ce..0a0de7ad4e 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
@@ -288,12 +288,16 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
build_opts.add_option("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0)));
// Create kernels according to the architecture, data type and input size.
- if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX) && data_type == DataType::F32)
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX) && is_data_type_float(data_type))
{
+ kernel_name = "gemm_mm_floating_point_" + lower_string(string_from_data_type(data_type)) + "_bifrost";
// The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and
// FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g.
// FC6 and FC7 of AlexNet and VGG-16).
- kernel_name = (input1->info()->dimension(0) <= 1000 && input0->info()->num_dimensions() == 1) ? "gemm_mm_floating_point_f32_bifrost_1000" : "gemm_mm_floating_point_f32_bifrost";
+ if(input1->info()->dimension(0) <= 1000 && input0->info()->num_dimensions() == 1 && data_type == DataType::F32)
+ {
+ kernel_name += "_1000";
+ }
// The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels
// via exhaustive autotuning over a range of representative layer configurations.