aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2018-02-22 18:07:43 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:48:35 +0000
commita9676118fd2a0e5bc916969af83ecee049bae76b (patch)
treef67ff64d962a2f802700f6b26a9ab160c04c721d /src
parent2bc74410251dcbaf17a7c5447317aa6d0171972a (diff)
downloadComputeLibrary-a9676118fd2a0e5bc916969af83ecee049bae76b.tar.gz
COMPMID-886 Don't use LWS hints by default for GPU post Mali-G72
Change-Id: I64cb2d7f9513d69aebd9307a803b1b2c9c0e04c3 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/121929 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/CLHelpers.cpp108
-rw-r--r--src/core/CL/CLKernelLibrary.cpp18
-rw-r--r--src/core/CL/kernels/CLCol2ImKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp4
-rw-r--r--src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp5
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp6
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp55
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLIm2ColKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp4
-rw-r--r--src/runtime/CL/functions/CLGEMM.cpp4
-rw-r--r--src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp2
13 files changed, 160 insertions, 68 deletions
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index ef517aa589..eb1b06e7a8 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -35,9 +35,37 @@ namespace
{
arm_compute::GPUTarget get_bifrost_target(const std::string &version)
{
- if(version == "70")
+ if(version == "G71")
{
- return arm_compute::GPUTarget::G70;
+ return arm_compute::GPUTarget::G71;
+ }
+ else if(version == "G72")
+ {
+ return arm_compute::GPUTarget::G72;
+ }
+ else if(version == "G51")
+ {
+ return arm_compute::GPUTarget::G51;
+ }
+ else if(version == "G51BIG")
+ {
+ return arm_compute::GPUTarget::G51BIG;
+ }
+ else if(version == "G51LIT")
+ {
+ return arm_compute::GPUTarget::G51LIT;
+ }
+ else if(version == "TNOX")
+ {
+ return arm_compute::GPUTarget::TNOX;
+ }
+ else if(version == "TTRX")
+ {
+ return arm_compute::GPUTarget::TTRX;
+ }
+ else if(version == "TBOX")
+ {
+ return arm_compute::GPUTarget::TBOX;
}
else
{
@@ -47,16 +75,21 @@ arm_compute::GPUTarget get_bifrost_target(const std::string &version)
arm_compute::GPUTarget get_midgard_target(const std::string &version)
{
- switch(version[0])
- {
- case '6':
- return arm_compute::GPUTarget::T600;
- case '7':
- return arm_compute::GPUTarget::T700;
- case '8':
- return arm_compute::GPUTarget::T800;
- default:
- return arm_compute::GPUTarget::MIDGARD;
+ if(version == "T600")
+ {
+ return arm_compute::GPUTarget::T600;
+ }
+ else if(version == "T700")
+ {
+ return arm_compute::GPUTarget::T700;
+ }
+ else if(version == "T800")
+ {
+ return arm_compute::GPUTarget::T800;
+ }
+ else
+ {
+ return arm_compute::GPUTarget::MIDGARD;
}
}
@@ -159,39 +192,58 @@ const std::string &string_from_target(GPUTarget target)
{ GPUTarget::T600, "t600" },
{ GPUTarget::T700, "t700" },
{ GPUTarget::T800, "t800" },
- { GPUTarget::G70, "g70" }
+ { GPUTarget::G71, "g71" },
+ { GPUTarget::G72, "g72" },
+ { GPUTarget::G51, "g51" },
+ { GPUTarget::G51BIG, "g51big" },
+ { GPUTarget::G51LIT, "g51lit" },
+ { GPUTarget::TNOX, "tnox" },
+ { GPUTarget::TTRX, "ttrx" },
+ { GPUTarget::TBOX, "tbox" }
};
return gpu_target_map[target];
}
-GPUTarget get_target_from_device(cl::Device &device)
+GPUTarget get_target_from_name(const std::string &device_name)
{
- // Query device name size
- std::string device_name = device.getInfo<CL_DEVICE_NAME>();
- std::regex mali_regex(R"(Mali-([TG])(\d+))");
+ std::regex mali_regex(R"(Mali-(.*))");
std::smatch name_parts;
const bool found_mali = std::regex_search(device_name, name_parts, mali_regex);
if(!found_mali)
{
- ARM_COMPUTE_LOG_INFO_MSG_CORE("Can't find valid Mali GPU. Target is set to MIDGARD.");
- return GPUTarget::MIDGARD;
+ ARM_COMPUTE_LOG_INFO_MSG_CORE("Can't find valid Mali GPU. Target is set to UNKNOWN.");
+ return GPUTarget::UNKNOWN;
}
const char target = name_parts.str(1)[0];
- const std::string &version = name_parts.str(2);
+ const std::string &version = name_parts.str(1);
+
+ std::regex future_regex(R"(.*X)");
+ const bool is_future_bifrost = std::regex_search(version, future_regex);
- switch(target)
+ if(target == 'G' || is_future_bifrost)
{
- case 'T':
- return get_midgard_target(version);
- case 'G':
- return get_bifrost_target(version);
- default:
- ARM_COMPUTE_LOG_INFO_MSG_CORE("Mali GPU unknown. Target is set to the default one.");
- return GPUTarget::MIDGARD;
+ return get_bifrost_target(version);
+ }
+ else if(target == 'T')
+ {
+ return get_midgard_target(version);
}
+ else
+ {
+ ARM_COMPUTE_LOG_INFO_MSG_CORE("Mali GPU unknown. Target is set to the default one.");
+ return GPUTarget::UNKNOWN;
+ }
+}
+
+GPUTarget get_target_from_device(cl::Device &device)
+{
+ // Query device name size
+ std::string device_name = device.getInfo<CL_DEVICE_NAME>();
+
+ return get_target_from_name(device_name);
}
GPUTarget get_arch_from_target(GPUTarget target)
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index caf017d984..aba20448e7 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -838,7 +838,23 @@ size_t CLKernelLibrary::max_local_workgroup_size(const cl::Kernel &kernel) const
cl::NDRange CLKernelLibrary::default_ndrange() const
{
- return cl::NDRange(128u, 1);
+ cl::Device device = cl::Device::getDefault();
+ GPUTarget _target = get_target_from_device(device);
+ cl::NDRange default_range;
+
+ switch(_target)
+ {
+ case GPUTarget::MIDGARD:
+ case GPUTarget::T600:
+ case GPUTarget::T700:
+ case GPUTarget::T800:
+ default_range = cl::NDRange(128u, 1);
+ break;
+ default:
+ default_range = cl::NullRange;
+ }
+
+ return default_range;
}
std::string CLKernelLibrary::get_device_version()
diff --git a/src/core/CL/kernels/CLCol2ImKernel.cpp b/src/core/CL/kernels/CLCol2ImKernel.cpp
index 8ccec06c37..c66d64332a 100644
--- a/src/core/CL/kernels/CLCol2ImKernel.cpp
+++ b/src/core/CL/kernels/CLCol2ImKernel.cpp
@@ -111,8 +111,8 @@ void CLCol2ImKernel::configure(const ICLTensor *input, ICLTensor *output, std::p
// Configure the local work size for Bifrost with a value obtained
// via exhaustive autotuning over 30 representative tensor shapes.
- const GPUTarget gpu_target = get_arch_from_target(get_target());
- if(gpu_target == GPUTarget::BIFROST)
+ const GPUTarget gpu_target = get_target();
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72))
{
if((_convolved_dims.first == 7) || (_convolved_dims.first == 14))
{
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp
index 29564b36c9..7a47bcc704 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3Kernel.cpp
@@ -118,7 +118,7 @@ void CLDepthwiseConvolutionLayer3x3Kernel::configure(const ICLTensor *input, con
// Configure the local work size for Bifrost with a value obtained
// via exhaustive autotuning for the MobileNets tensor shapes.
- const GPUTarget gpu_target = get_arch_from_target(get_target());
+ const GPUTarget gpu_target = get_target();
// Configure kernel window
unsigned int num_elems_read_per_iteration_x = 0;
@@ -151,7 +151,7 @@ void CLDepthwiseConvolutionLayer3x3Kernel::configure(const ICLTensor *input, con
break;
}
}
- else if(input->info()->data_type() == DataType::F32 && gpu_target == GPUTarget::BIFROST)
+ else if(input->info()->data_type() == DataType::F32 && gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72))
{
if(_conv_stride_x == 1 && _conv_stride_y == 1)
{
diff --git a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
index 9851475928..18d64a1a9d 100644
--- a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
@@ -77,8 +77,9 @@ void CLDepthwiseIm2ColKernel::configure(const ICLTensor *input, ICLTensor *outpu
// Configure the local work size for Bifrost with a value obtained
// via exhaustive autotuning for the MobileNets tensor shapes.
- const GPUTarget gpu_target = get_arch_from_target(get_target());
- if(gpu_target == GPUTarget::BIFROST)
+ const GPUTarget gpu_target = get_target();
+
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72))
{
_lws_hint = cl::NDRange(1, 2, 1);
}
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
index c01a6660a7..56ac0c7250 100644
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
@@ -134,7 +134,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
unsigned int num_elems_written_per_iteration_x = 0;
unsigned int num_elems_written_per_iteration_y = 0;
- if((target == GPUTarget::BIFROST) && (kernel_size <= 5) && (conv_stride_x == 1) && (conv_stride_y == 1) && (data_type == DataType::F32))
+ if(gpu_target_is_in(target, GPUTarget::G71, GPUTarget::G72) && (kernel_size <= 5) && (conv_stride_x == 1) && (conv_stride_y == 1) && (data_type == DataType::F32))
{
// Configure kernel window
@@ -301,7 +301,7 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
_output = output;
_biases = biases;
- const GPUTarget gpu_target = get_arch_from_target(get_target());
+ const GPUTarget gpu_target = get_target();
std::stringstream kernel_name;
kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
@@ -309,7 +309,7 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL
CLBuildOptions build_options;
build_options.add_option_if(_biases != nullptr, std::string("-DHAS_BIAS"));
- if((gpu_target == GPUTarget::BIFROST) && (kernel_size <= 5) && (_conv_stride_x == 1) && (_conv_stride_y == 1) && (data_type == DataType::F32))
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) && (kernel_size <= 5) && (_conv_stride_x == 1) && (_conv_stride_y == 1) && (data_type == DataType::F32))
{
build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(2))));
diff --git a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp
index d5c93dd24a..3309775c36 100644
--- a/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,7 +52,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *accum, ITen
unsigned int &num_elems_processed_per_iteration)
{
// Select the vector size to use (8 for Bifrost; 16 for Midgard).
- num_elems_processed_per_iteration = (gpu_target == GPUTarget::BIFROST) ? 8 : 16;
+ num_elems_processed_per_iteration = gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) ? 8 : 16;
// Configure kernel window
Window win = calculate_max_window(*accum, Steps(num_elems_processed_per_iteration));
@@ -81,12 +81,12 @@ void CLGEMMMatrixAccumulateBiasesKernel::configure(ICLTensor *accum, const ICLTe
_biases = biases;
_accum = accum;
- // Get the target architecture
- GPUTarget arch_target = get_arch_from_target(get_target());
+ // Get the target gpu
+ GPUTarget gpu_target = get_target();
unsigned int vector_size = 0;
// Configure kernel window
- auto win_config = validate_and_configure_window(accum->info(), biases->info(), arch_target, vector_size);
+ auto win_config = validate_and_configure_window(accum->info(), biases->info(), gpu_target, vector_size);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure(win_config.second);
diff --git a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
index 6655d12d7e..9c69800928 100644
--- a/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.cpp
@@ -139,7 +139,8 @@ inline std::pair<Status, Window> validate_and_configure_window(ITensorInfo *inpu
num_elems_processed_per_iteration_y = std::min(static_cast<int>(output->dimension(1)), 4);
// Create kernels according to the architecture, data type and input size.
- if(gpu_target == GPUTarget::BIFROST && data_type == DataType::F32)
+ GPUTarget arch_target = get_arch_from_target(gpu_target);
+ if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32)
{
num_elems_processed_per_iteration_x = (input1->dimension(0) <= 1000 && input0->num_dimensions() == 1) ? 2 : 4;
}
@@ -199,27 +200,48 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
const int fp_pos = input0->info()->fixed_point_position();
// Get target architecture
- GPUTarget arch_target = get_arch_from_target(get_target());
+ GPUTarget gpu_target = get_target();
// Configure LWS hint
- if(arch_target == GPUTarget::BIFROST && input1->info()->dimension(1) == 24)
- {
- // LWS optimized for the 11x11 AlexNet convolution on Bifrost.
- _lws_hint = cl::NDRange(2, 2);
- }
- else if(output->info()->dimension(1) == 196)
+ switch(gpu_target)
{
- _lws_hint = cl::NDRange(1, 7);
- }
- else
- {
- _lws_hint = cl::NDRange(8, 8);
+ case GPUTarget::MIDGARD:
+ case GPUTarget::T600:
+ case GPUTarget::T700:
+ case GPUTarget::T800:
+ if(output->info()->dimension(1) == 196)
+ {
+ _lws_hint = cl::NDRange(1, 7);
+ }
+ else
+ {
+ _lws_hint = cl::NDRange(8, 8);
+ }
+ break;
+ case GPUTarget::G71:
+ case GPUTarget::G72:
+ if(input1->info()->dimension(1) == 24)
+ {
+ // LWS optimized for the 11x11 AlexNet convolution on Bifrost.
+ _lws_hint = cl::NDRange(2, 2);
+ }
+ else if(output->info()->dimension(1) == 196)
+ {
+ _lws_hint = cl::NDRange(1, 7);
+ }
+ else
+ {
+ _lws_hint = cl::NDRange(8, 8);
+ }
+ break;
+ default:
+ _lws_hint = cl::NullRange;
}
ElementsProcessed num_elements_processed{};
// Configure kernel window
- auto win_config = validate_and_configure_window(input0->info(), input1->info(), output->info(), is_interleaved_transposed, arch_target, num_elements_processed);
+ auto win_config = validate_and_configure_window(input0->info(), input1->info(), output->info(), is_interleaved_transposed, gpu_target, num_elements_processed);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure(win_config.second);
@@ -247,7 +269,8 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
if(data_type == DataType::F32)
{
- kernel_name = "gemm_mm_interleaved_transposed_f32_" + string_from_target(arch_target);
+ GPUTarget arch_target = get_arch_from_target(gpu_target);
+ kernel_name = "gemm_mm_interleaved_transposed_f32_" + string_from_target(arch_target);
}
else
{
@@ -259,7 +282,7 @@ void CLGEMMMatrixMultiplyKernel::configure(const ICLTensor *input0, const ICLTen
build_opts.add_option("-DCOLS_A=" + support::cpp11::to_string(input0->info()->dimension(0)));
// Create kernels according to the architecture, data type and input size.
- if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32)
+ if((gpu_target == GPUTarget::G71 || gpu_target == GPUTarget::G72) && data_type == DataType::F32)
{
// The first kernel is optimized for the case of 1000 or less output elements (e.g. FC8 of AlexNet and VGG-16, and
// FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 output elements (e.g.
diff --git a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
index cc483dc44e..87e624cc74 100644
--- a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
@@ -77,8 +77,8 @@ void CLGEMMMatrixVectorMultiplyKernel::configure(const ICLTensor *input0, const
// Configure the local work size for Bifrost with a value obtained
// via exhaustive autotuning for the MobileNets tensor shapes.
- const GPUTarget gpu_target = get_arch_from_target(get_target());
- if(gpu_target == GPUTarget::BIFROST)
+ const GPUTarget gpu_target = get_target();
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72))
{
_lws_hint = cl::NDRange(1, 1, 1);
}
diff --git a/src/core/CL/kernels/CLIm2ColKernel.cpp b/src/core/CL/kernels/CLIm2ColKernel.cpp
index b75d2646c6..9bc4787384 100644
--- a/src/core/CL/kernels/CLIm2ColKernel.cpp
+++ b/src/core/CL/kernels/CLIm2ColKernel.cpp
@@ -75,7 +75,7 @@ void CLIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const
_kernel_dims = kernel_dims;
const DataType data_type = input->info()->data_type();
- const GPUTarget gpu_target = get_arch_from_target(get_target());
+ const GPUTarget gpu_target = get_target();
// Create kernel
CLBuildOptions build_opts;
@@ -185,7 +185,7 @@ void CLIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const
vector_size = kernel_dims.width;
}
// Local work size and vector size optimized for the 11x11 AlexNet convolution on Bifrost.
- if(gpu_target == GPUTarget::BIFROST && kernel_dims.width == 11)
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72) && kernel_dims.width == 11)
{
_lws_hint = cl::NDRange(1, 1, 1);
vector_size = 8;
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index b3034e10cc..d7b86e78f6 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -190,7 +190,7 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
_output = output;
_pool_info = pool_info;
- const GPUTarget gpu_target = get_arch_from_target(get_target());
+ const GPUTarget gpu_target = get_target();
const DataType data_type = input->info()->data_type();
// Set build options
@@ -240,7 +240,7 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
// On Bifrost, this works for up to 35x35xC filters, for which the pooling_layer_3_optimized
// kernel is launched with gws=(9, 33, C). In any case, the hint will be ignored if it is
// invalid (e.g. exceeds the maximum workgroup size that the kernel can be launched with).
- if(gpu_target == GPUTarget::BIFROST)
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72))
{
cl::NDRange gws = ICLKernel::gws_from_window(std::get<1>(win_config));
_lws_hint = cl::NDRange(gws[0], gws[1], 1);
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index da00d2dc64..e6f8f266d8 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -44,7 +44,7 @@ inline bool is_interleaved_transposed(int m, int n, int k, DataType data_type, b
{
bool flag = true;
- if(gpu_target == GPUTarget::BIFROST)
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72))
{
// COMPMID-852
if(k > 256 && m > 4 && data_type == DataType::F32 && reshape_b_only_on_first_run)
@@ -122,7 +122,7 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
int mult_transpose1xW_width = 1;
int mult_interleave4x4_height = 1;
- if(gpu_target == GPUTarget::BIFROST)
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72))
{
mult_transpose1xW_width = 4;
mult_interleave4x4_height = 2;
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index c688299d4f..c4f939e297 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -41,7 +41,7 @@ inline bool is_interleaved_transposed(int m, int n, int k, bool reshape_b_only_o
{
bool flag = true;
- if(gpu_target == GPUTarget::BIFROST)
+ if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72))
{
// COMPMID-852
if(k > 256 && m > 4 && reshape_b_only_on_first_run)