From e52a3000d2c13bc1b66ca66b3d12b6b836982394 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Wed, 11 Apr 2018 15:59:10 +0100 Subject: COMPMID-1026 - Add support for 4x4 output tile in CLWinogradConvolutionLayer The performance achieved can be found at the following confluence page: https://confluence.arm.com/display/MLENG/GEMM-based+convolution+vs+Winograd-based+convolution+on+OpenCL Change-Id: I4b690cfdd4eb4ff0cd17b14fdd49ccaa1d1dc85c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/127729 Tested-by: Jenkins Reviewed-by: Georgios Pinitas --- src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp') diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp index 0a0de7ad4e..805a594af6 100644 --- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp +++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp @@ -286,17 +286,23 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen else // The input tensors have not been reshaped { build_opts.add_option("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0))); + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); // Create kernels according to the architecture, data type and input size. if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::TNOX) && is_data_type_float(data_type)) { - kernel_name = "gemm_mm_floating_point_" + lower_string(string_from_data_type(data_type)) + "_bifrost"; - // The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and - // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g. - // FC6 and FC7 of AlexNet and VGG-16). - if(input1->info()->dimension(0) <= 1000 && input0->info()->num_dimensions() == 1 && data_type == DataType::F32) + kernel_name = "gemm_mm_floating_point"; + + if(input0->info()->num_dimensions() != 1) + { + kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost"; + } + else if(input1->info()->dimension(0) <= 1000 && data_type == DataType::F32) { - kernel_name += "_1000"; + // The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and + // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g. + // FC6 and FC7 of AlexNet and VGG-16). + kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost_1000"; } // The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels @@ -309,7 +315,6 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen } else // (MIDGARD and F32) or (F16) { - build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); kernel_name = "gemm_mm_floating_point"; } build_opts.add_option("-DNUM_ELEMS_PROCESSED_PER_THREAD_Y=" + support::cpp11::to_string(num_elements_processed.y())); -- cgit v1.2.1