aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-04-11 15:59:10 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:37 +0000
commite52a3000d2c13bc1b66ca66b3d12b6b836982394 (patch)
tree70e8ef5ba216762604f84228805aac9bd65747b6 /src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
parentdd03870b63784abe499761da2b26b209b33f2db2 (diff)
downloadComputeLibrary-e52a3000d2c13bc1b66ca66b3d12b6b836982394.tar.gz
COMPMID-1026 - Add support for 4x4 output tile in CLWinogradConvolutionLayer
The performance achieved can be found at the following confluence page: https://confluence.arm.com/display/MLENG/GEMM-based+convolution+vs+Winograd-based+convolution+on+OpenCL Change-Id: I4b690cfdd4eb4ff0cd17b14fdd49ccaa1d1dc85c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/127729 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp')
-rw-r--r--src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp18
1 files changed, 14 insertions, 4 deletions
diff --git a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
index 0aa7f8d1b5..86ccddac88 100644
--- a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp
@@ -44,13 +44,18 @@ void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *we
const size_t idx_height = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
// Input shape
- const TensorShape input_shape = input->info()->tensor_shape();
+ const TensorShape input_shape = input->info()->tensor_shape();
+ const unsigned int input_w = input->info()->tensor_shape()[idx_width];
+ const unsigned int input_h = input->info()->tensor_shape()[idx_height];
// Kernel size
const unsigned int kernel_w = weights->info()->tensor_shape()[idx_width];
const unsigned int kernel_h = weights->info()->tensor_shape()[idx_height];
- const WinogradInfo winograd_info = WinogradInfo(Size2D(2, 2),
+ //Winograd output tile
+ const Size2D output_tile = (Size2D(kernel_w, kernel_h) == Size2D(3U, 3U) && input_w <= 4 && input_h <= 4) ? Size2D(2U, 2U) : Size2D(4U, 4U);
+
+ const WinogradInfo winograd_info = WinogradInfo(output_tile,
Size2D(kernel_w, kernel_h),
Size2D(input_shape[idx_width], input_shape[idx_height]),
conv_info,
@@ -95,13 +100,18 @@ Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITen
const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT);
// Input shape
- const TensorShape input_shape = input->tensor_shape();
+ const TensorShape input_shape = input->tensor_shape();
+ const unsigned int input_w = input->tensor_shape()[idx_width];
+ const unsigned int input_h = input->tensor_shape()[idx_height];
// Kernel size
const unsigned int kernel_w = weights->tensor_shape()[idx_width];
const unsigned int kernel_h = weights->tensor_shape()[idx_height];
- const WinogradInfo winograd_info = WinogradInfo(Size2D(2, 2),
+ //Winograd output tile
+ const Size2D output_tile = (Size2D(kernel_w, kernel_h) == Size2D(3U, 3U) && input_w <= 4 && input_h <= 4) ? Size2D(2U, 2U) : Size2D(4U, 4U);
+
+ const WinogradInfo winograd_info = WinogradInfo(output_tile,
Size2D(kernel_w, kernel_h),
Size2D(input_shape[idx_width], input_shape[idx_height]),
conv_info,