aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp57
-rw-r--r--src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp10
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolution.cpp6
4 files changed, 62 insertions, 21 deletions
diff --git a/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
index 63586b0f0f..be8fae2885 100644
--- a/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseConvolution3x3Kernel.cpp
@@ -93,9 +93,47 @@ void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, const IC
build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS");
+ // Create kernel
std::string kernel_name = is_data_type_quantized_asymmetric(_input->info()->data_type()) ? "depthwise_convolution_3x3_quantized" : "depthwise_convolution_3x3";
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
+ // Set static arguments
+ if(is_data_type_quantized_asymmetric(_input->info()->data_type()))
+ {
+ float multiplier = _input->info()->quantization_info().scale * _weights->info()->quantization_info().scale / _output->info()->quantization_info().scale;
+ int output_multiplier = 0;
+ int output_shift = 0;
+ quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+
+ unsigned int idx = 3 * num_arguments_per_3D_tensor() + ((_biases != nullptr) ? num_arguments_per_1D_tensor() : 0);
+
+ _kernel.setArg(idx++, -_input->info()->quantization_info().offset);
+ _kernel.setArg(idx++, -_weights->info()->quantization_info().offset);
+ _kernel.setArg(idx++, _output->info()->quantization_info().offset);
+ _kernel.setArg(idx++, output_multiplier);
+ _kernel.setArg(idx++, output_shift);
+ }
+
+ // Configure the local work size for Bifrost with a value obtained
+ // via exhaustive autotuning for the MobileNets tensor shapes.
+ const GPUTarget gpu_target = get_arch_from_target(get_target());
+ if(gpu_target == GPUTarget::BIFROST)
+ {
+ const size_t width = input->info()->dimension(0);
+ if(width >= 56) // 56 or 112
+ {
+ _lws_hint = cl::NDRange(8, 5, 2);
+ }
+ else if(width >= 14) // 14 or 28
+ {
+ _lws_hint = cl::NDRange(1, 5, 2);
+ }
+ else // 7
+ {
+ _lws_hint = cl::NDRange(1, 1, 2);
+ }
+ }
+
// Configure kernel window
const unsigned int num_elems_processed_per_iteration = 2;
const unsigned int num_elems_written_per_iteration = 2;
@@ -113,23 +151,6 @@ void CLDepthwiseConvolution3x3Kernel::configure(const ICLTensor *input, const IC
output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
ICLKernel::configure(win);
-
- // Set static arguments
- if(is_data_type_quantized_asymmetric(_input->info()->data_type()))
- {
- float multiplier = _input->info()->quantization_info().scale * _weights->info()->quantization_info().scale / _output->info()->quantization_info().scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
-
- unsigned int idx = 3 * num_arguments_per_3D_tensor() + ((_biases != nullptr) ? num_arguments_per_1D_tensor() : 0);
-
- _kernel.setArg(idx++, -_input->info()->quantization_info().offset);
- _kernel.setArg(idx++, -_weights->info()->quantization_info().offset);
- _kernel.setArg(idx++, _output->info()->quantization_info().offset);
- _kernel.setArg(idx++, output_multiplier);
- _kernel.setArg(idx++, output_shift);
- }
}
void CLDepthwiseConvolution3x3Kernel::run(const Window &window, cl::CommandQueue &queue)
@@ -166,7 +187,7 @@ void CLDepthwiseConvolution3x3Kernel::run(const Window &window, cl::CommandQueue
add_3D_tensor_argument(idx, _output, slice_out);
add_3D_tensor_argument(idx, _weights, slice_weights);
- enqueue(queue, *this, slice_out);
+ enqueue(queue, *this, slice_out, _lws_hint);
}
while(window.slide_window_slice_3D(slice_out) && win_in.slide_window_slice_3D(slice_in));
}
diff --git a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
index c23941426e..ad9ac0ecd6 100644
--- a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
+++ b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp
@@ -73,6 +73,14 @@ void CLDepthwiseIm2ColKernel::configure(const ICLTensor *input, ICLTensor *outpu
}
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("depthwise_im2col", build_opts));
+ // Configure the local work size for Bifrost with a value obtained
+ // via exhaustive autotuning for the MobileNets tensor shapes.
+ const GPUTarget gpu_target = get_arch_from_target(get_target());
+ if(gpu_target == GPUTarget::BIFROST)
+ {
+ _lws_hint = cl::NDRange(1, 2, 1);
+ }
+
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps());
// The CLDepthwiseIm2ColKernel doesn't need padding so update_window_and_padding() can be skipped
@@ -105,7 +113,7 @@ void CLDepthwiseIm2ColKernel::run(const Window &window, cl::CommandQueue &queue)
unsigned int idx = 0;
add_3D_tensor_argument(idx, _input, slice_in);
add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice);
+ enqueue(queue, *this, slice, _lws_hint);
}
while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_in));
}
diff --git a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
index 70af5d63cf..951bc144aa 100644
--- a/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.cpp
@@ -63,6 +63,14 @@ void CLGEMMMatrixVectorMultiplyKernel::configure(const ICLTensor *input0, const
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mv", build_opts));
+ // Configure the local work size for Bifrost with a value obtained
+ // via exhaustive autotuning for the MobileNets tensor shapes.
+ const GPUTarget gpu_target = get_arch_from_target(get_target());
+ if(gpu_target == GPUTarget::BIFROST)
+ {
+ _lws_hint = cl::NDRange(1, 1, 1);
+ }
+
// Configure kernel window
const unsigned int num_elems_read_per_iteration = 4;
@@ -119,7 +127,7 @@ void CLGEMMMatrixVectorMultiplyKernel::run(const Window &window, cl::CommandQueu
unsigned int idx_2 = num_arguments_per_3D_tensor() + num_arguments_per_2D_tensor();
add_3D_tensor_argument(idx_0, _input0, slice_in);
add_1D_tensor_argument(idx_2, _output, slice_out);
- enqueue(queue, *this, slice_in);
+ enqueue(queue, *this, slice_in, _lws_hint);
}
while(window.slide_window_slice_3D(slice_in) && window.slide_window_slice_3D(slice_out));
}
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolution.cpp b/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
index 23a20a3011..baa05b921a 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolution.cpp
@@ -41,6 +41,7 @@ void CLDepthwiseConvolution3x3::configure(ICLTensor *input, const ICLTensor *wei
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::F32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights);
+ _kernel.set_target(CLScheduler::get().target());
_kernel.configure(input, weights, biases, output, conv_info);
_border_handler.configure(input, _kernel.border_size(), BorderMode::CONSTANT, PixelValue(0));
}
@@ -67,7 +68,8 @@ void CLDepthwiseConvolution::configure(ICLTensor *input, const ICLTensor *weight
const size_t weights_h = weights->info()->dimension(1);
const size_t weights_z = weights->info()->dimension(2);
- bool has_bias = (biases != nullptr);
+ const bool has_bias = (biases != nullptr);
+ const GPUTarget gpu_target = CLScheduler::get().target();
unsigned int conv_w = 0;
unsigned int conv_h = 0;
@@ -84,6 +86,7 @@ void CLDepthwiseConvolution::configure(ICLTensor *input, const ICLTensor *weight
shape_im2col.set(2, weights_z);
const TensorInfo info_im2col(shape_im2col, 1, input->info()->data_type(), input->info()->fixed_point_position());
_input_reshaped.allocator()->init(info_im2col);
+ _im2col_kernel.set_target(gpu_target);
_im2col_kernel.configure(input, &_input_reshaped, Size2D(weights_w, weights_h), conv_info, has_bias);
// Weights reshape configuration
@@ -99,6 +102,7 @@ void CLDepthwiseConvolution::configure(ICLTensor *input, const ICLTensor *weight
shape_v2mm_out.set(2, 1);
const TensorInfo info_v2mm_out(shape_v2mm_out, 1, input->info()->data_type(), input->info()->fixed_point_position());
_v2mm_output.allocator()->init(info_v2mm_out);
+ _v2mm_kernel.set_target(gpu_target);
_v2mm_kernel.configure(&_input_reshaped, &_weights_reshaped, &_v2mm_output);
_vector_to_tensor_kernel.configure(&_v2mm_output, output, conv_w, conv_h);