aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-04-11 15:59:10 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:37 +0000
commite52a3000d2c13bc1b66ca66b3d12b6b836982394 (patch)
tree70e8ef5ba216762604f84228805aac9bd65747b6 /src/runtime
parentdd03870b63784abe499761da2b26b209b33f2db2 (diff)
downloadComputeLibrary-e52a3000d2c13bc1b66ca66b3d12b6b836982394.tar.gz
COMPMID-1026 - Add support for 4x4 output tile in CLWinogradConvolutionLayer
The performance achieved can be found at the following confluence page: https://confluence.arm.com/display/MLENG/GEMM-based+convolution+vs+Winograd-based+convolution+on+OpenCL Change-Id: I4b690cfdd4eb4ff0cd17b14fdd49ccaa1d1dc85c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/127729 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp37
-rw-r--r--src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp18
-rw-r--r--src/runtime/NEON/functions/NEConvolutionLayer.cpp8
3 files changed, 47 insertions, 16 deletions
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index bcb5424aab..643e24d638 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -48,9 +48,16 @@ void CLConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, c
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_ERROR_THROW_ON(CLConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info));
- switch(CLConvolutionLayer::get_convolution_method(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info,
+ switch(CLConvolutionLayer::get_convolution_method(input->info(), weights->info(), output->info(), conv_info,
weights_info, act_info, CLScheduler::get().target(), dilation))
{
+ case ConvolutionMethod::WINOGRAD:
+ {
+ auto f = arm_compute::support::cpp14::make_unique<CLWinogradConvolutionLayer>();
+ f->configure(input, weights, biases, output, conv_info);
+ _function = std::move(f);
+ break;
+ }
case ConvolutionMethod::DIRECT:
{
auto f = arm_compute::support::cpp14::make_unique<CLDirectConvolutionLayer>();
@@ -79,8 +86,14 @@ Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo
//Configure if the parameters match the direct convolution or the gemm-based
const GPUTarget gpu_target = CLScheduler::get().target();
- switch(CLConvolutionLayer::get_convolution_method(input, weights, biases, output, conv_info, weights_info, act_info, gpu_target, dilation))
+ switch(CLConvolutionLayer::get_convolution_method(input, weights, output, conv_info, weights_info, act_info, gpu_target, dilation))
{
+ case ConvolutionMethod::WINOGRAD:
+ {
+ //Validate Winograd
+ CLWinogradConvolutionLayer::validate(input, weights, biases, output, conv_info);
+ break;
+ }
case ConvolutionMethod::DIRECT:
{
// Validate direct convolution layer
@@ -101,19 +114,25 @@ Status CLConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo
return Status{};
}
-ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
+ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info,
const WeightsInfo &weights_info, const ActivationLayerInfo &act_info, const GPUTarget gpu_target, const Size2D &dilation)
{
- ARM_COMPUTE_UNUSED(input);
- ARM_COMPUTE_UNUSED(weights);
- ARM_COMPUTE_UNUSED(biases);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(output);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(weights);
ARM_COMPUTE_UNUSED(output);
- ARM_COMPUTE_UNUSED(conv_info);
ARM_COMPUTE_UNUSED(weights_info);
ARM_COMPUTE_UNUSED(gpu_target);
- ARM_COMPUTE_UNUSED(dilation);
- ARM_COMPUTE_UNUSED(act_info);
+ const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
+ const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL);
+
+ if((input->data_type() == DataType::F32) && (input->data_layout() == DataLayout::NCHW) && (input->dimension(idx_c) > 3) && (weights->dimension(idx_w) == 3) && (weights->dimension(idx_h) == 3)
+ && (weights->num_dimensions() <= 4) && (conv_info.stride().first == 1) && (conv_info.stride().second == 1) && (dilation == Size2D(1U, 1U)) && (!act_info.enabled()))
+ {
+ return ConvolutionMethod::WINOGRAD;
+ }
return ConvolutionMethod::GEMM;
}
diff --git a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
index 0aa7f8d1b5..86ccddac88 100644
--- a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
@@ -44,13 +44,18 @@ void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *we
const size_t idx_height = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
// Input shape
- const TensorShape input_shape = input->info()->tensor_shape();
+ const TensorShape input_shape = input->info()->tensor_shape();
+ const unsigned int input_w = input->info()->tensor_shape()[idx_width];
+ const unsigned int input_h = input->info()->tensor_shape()[idx_height];
// Kernel size
const unsigned int kernel_w = weights->info()->tensor_shape()[idx_width];
const unsigned int kernel_h = weights->info()->tensor_shape()[idx_height];
- const WinogradInfo winograd_info = WinogradInfo(Size2D(2, 2),
+ //Winograd output tile
+ const Size2D output_tile = (Size2D(kernel_w, kernel_h) == Size2D(3U, 3U) && input_w <= 4 && input_h <= 4) ? Size2D(2U, 2U) : Size2D(4U, 4U);
+
+ const WinogradInfo winograd_info = WinogradInfo(output_tile,
Size2D(kernel_w, kernel_h),
Size2D(input_shape[idx_width], input_shape[idx_height]),
conv_info,
@@ -95,13 +100,18 @@ Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITen
const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
// Input shape
- const TensorShape input_shape = input->tensor_shape();
+ const TensorShape input_shape = input->tensor_shape();
+ const unsigned int input_w = input->tensor_shape()[idx_width];
+ const unsigned int input_h = input->tensor_shape()[idx_height];
// Kernel size
const unsigned int kernel_w = weights->tensor_shape()[idx_width];
const unsigned int kernel_h = weights->tensor_shape()[idx_height];
- const WinogradInfo winograd_info = WinogradInfo(Size2D(2, 2),
+ //Winograd output tile
+ const Size2D output_tile = (Size2D(kernel_w, kernel_h) == Size2D(3U, 3U) && input_w <= 4 && input_h <= 4) ? Size2D(2U, 2U) : Size2D(4U, 4U);
+
+ const WinogradInfo winograd_info = WinogradInfo(output_tile,
Size2D(kernel_w, kernel_h),
Size2D(input_shape[idx_width], input_shape[idx_height]),
conv_info,
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index afc354533d..b0603e92d2 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -109,10 +109,12 @@ ConvolutionMethod NEConvolutionLayer::get_convolution_method(const ITensorInfo *
ARM_COMPUTE_ERROR_ON_NULLPTR(weights);
ARM_COMPUTE_UNUSED(output);
ARM_COMPUTE_UNUSED(weights_info);
- ARM_COMPUTE_UNUSED(act_info);
- if((input->data_type() == DataType::F32) && (weights->dimension(0) == 3) && (weights->dimension(1) == 3) && (weights->num_dimensions() <= 4) && (conv_info.stride().first == 1)
- && (conv_info.stride().second == 1) && (dilation == Size2D(1U, 1U)))
+ const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
+
+ if((input->data_type() == DataType::F32) && (input->data_layout() == DataLayout::NCHW) && (weights->dimension(idx_w) == 3) && (weights->dimension(idx_h) == 3) && (weights->num_dimensions() <= 4)
+ && (conv_info.stride().first == 1) && (conv_info.stride().second == 1) && (dilation == Size2D(1U, 1U)) && (!act_info.enabled()))
{
//FIXME Until COMPMID-1041 is implemented Winograd is slower than GEMM on A53.
if(Scheduler::get().cpu_info().get_cpu_model() != CPUModel::A53)