aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/kernels
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2017-11-28 10:33:22 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:41:36 +0000
commita2ea75360b1193318dc8441bbd9120eb747041ae (patch)
tree15d1b8f062be484bd2c5649a089d0711c7b121ca /src/core/CL/kernels
parentd912fd8eaaa56aac90f2b0b118c76f24ba8efa02 (diff)
downloadComputeLibrary-a2ea75360b1193318dc8441bbd9120eb747041ae.tar.gz
COMPMID-661 Add Bifrost lws heuristics for several depthwise_convolution kernels #49
Change-Id: Ibfa1c1cc9fc8501b22a18ecd519758f4aeb301eb Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110880 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/core/CL/kernels')
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp57
-rw-r--r--src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp10
3 files changed, 57 insertions, 20 deletions
diff --git a/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
index 63586b0f0f..be8fae2885 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
@@ -93,9 +93,47 @@ void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, const IC
build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
+ // Create kernel
std::string kernel_name = is_data_type_quantized_asymmetric(_input->info()->data_type()) ? "depthwise_convolution_3x3_quantized" : "depthwise_convolution_3x3";
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ // Set static arguments
+ if(is_data_type_quantized_asymmetric(_input->info()->data_type()))
+ {
+ float multiplier = _input->info()->quantization_info().scale * _weights->info()->quantization_info().scale / _output->info()->quantization_info().scale;
+ int output_multiplier = 0;
+ int output_shift = 0;
+ quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+
+ unsigned int idx = 3 * num_arguments_per_3D_tensor() + ((_biases != nullptr) ? num_arguments_per_1D_tensor() : 0);
+
+ _kernel.setArg(idx++, -_input->info()->quantization_info().offset);
+ _kernel.setArg(idx++, -_weights->info()->quantization_info().offset);
+ _kernel.setArg(idx++, _output->info()->quantization_info().offset);
+ _kernel.setArg(idx++, output_multiplier);
+ _kernel.setArg(idx++, output_shift);
+ }
+
+ // Configure the local work size for Bifrost with a value obtained
+ // via exhaustive autotuning for the MobileNets tensor shapes.
+ const GPUTarget gpu_target = get_arch_from_target(get_target());
+ if(gpu_target == GPUTarget::BIFROST)
+ {
+ const size_t width = input->info()->dimension(0);
+ if(width >= 56) // 56 or 112
+ {
+ _lws_hint = cl::NDRange(8, 5, 2);
+ }
+ else if(width >= 14) // 14 or 28
+ {
+ _lws_hint = cl::NDRange(1, 5, 2);
+ }
+ else // 7
+ {
+ _lws_hint = cl::NDRange(1, 1, 2);
+ }
+ }
+
// Configure kernel window
const unsigned int num_elems_processed_per_iteration = 2;
const unsigned int num_elems_written_per_iteration = 2;
@@ -113,23 +151,6 @@ void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, const IC
output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
ICLKernel::configure(win);
-
- // Set static arguments
- if(is_data_type_quantized_asymmetric(_input->info()->data_type()))
- {
- float multiplier = _input->info()->quantization_info().scale * _weights->info()->quantization_info().scale / _output->info()->quantization_info().scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
-
- unsigned int idx = 3 * num_arguments_per_3D_tensor() + ((_biases != nullptr) ? num_arguments_per_1D_tensor() : 0);
-
- _kernel.setArg(idx++, -_input->info()->quantization_info().offset);
- _kernel.setArg(idx++, -_weights->info()->quantization_info().offset);
- _kernel.setArg(idx++, _output->info()->quantization_info().offset);
- _kernel.setArg(idx++, output_multiplier);
- _kernel.setArg(idx++, output_shift);
- }
}
void CLDepthwiseConvolution3x3Kernel::run(const Window &window, cl::CommandQueue &queue)
@@ -166,7 +187,7 @@ void CLDepthwiseConvolution3x3Kernel::run(const Window &window, cl::CommandQueue
add_3D_tensor_argument(idx, _output, slice_out);
add_3D_tensor_argument(idx, _weights, slice_weights);
- enqueue(queue, *this, slice_out);
+ enqueue(queue, *this, slice_out, _lws_hint);
}
while(window.slide_window_slice_3D(slice_out) && win_in.slide_window_slice_3D(slice_in));
}
diff --git a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
index c23941426e..ad9ac0ecd6 100644
--- a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
@@ -73,6 +73,14 @@ void CLDepthwiseIm2ColKernel::configure(const ICLTensor *input, ICLTensor *outpu
}
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("depthwise_im2col", build_opts));
+ // Configure the local work size for Bifrost with a value obtained
+ // via exhaustive autotuning for the MobileNets tensor shapes.
+ const GPUTarget gpu_target = get_arch_from_target(get_target());
+ if(gpu_target == GPUTarget::BIFROST)
+ {
+ _lws_hint = cl::NDRange(1, 2, 1);
+ }
+
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
// The CLDepthwiseIm2ColKernel doesn't need padding so update_window_and_padding() can be skipped
@@ -105,7 +113,7 @@ void CLDepthwiseIm2ColKernel::run(const Window &window, cl::CommandQueue &queue)
unsigned int idx = 0;
add_3D_tensor_argument(idx, _input, slice_in);
add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice);
+ enqueue(queue, *this, slice, _lws_hint);
}
while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_in));
}
diff --git a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
index 70af5d63cf..951bc144aa 100644
--- a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
@@ -63,6 +63,14 @@ void CLGEMMMatrixVectorMultiplyKernel::configure(const ICLTensor *input0, const
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mv", build_opts));
+ // Configure the local work size for Bifrost with a value obtained
+ // via exhaustive autotuning for the MobileNets tensor shapes.
+ const GPUTarget gpu_target = get_arch_from_target(get_target());
+ if(gpu_target == GPUTarget::BIFROST)
+ {
+ _lws_hint = cl::NDRange(1, 1, 1);
+ }
+
// Configure kernel window
const unsigned int num_elems_read_per_iteration = 4;
@@ -119,7 +127,7 @@ void CLGEMMMatrixVectorMultiplyKernel::run(const Window &window, cl::CommandQueu
unsigned int idx_2 = num_arguments_per_3D_tensor() + num_arguments_per_2D_tensor();
add_3D_tensor_argument(idx_0, _input0, slice_in);
add_1D_tensor_argument(idx_2, _output, slice_out);
- enqueue(queue, *this, slice_in);
+ enqueue(queue, *this, slice_in, _lws_hint);
}
while(window.slide_window_slice_3D(slice_in) && window.slide_window_slice_3D(slice_out));
}