From 2d9de0a3fa6ad858e70040124f362799a962bb6a Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Thu, 15 Mar 2018 17:58:20 +0000 Subject: COMPMID-1009 Support 4x4 output tile for Winograd Filter Transform on OpenCL. Change-Id: I68c6453e0f192de659582404f109a89616b9fbb9 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/124811 Tested-by: Jenkins Reviewed-by: Georgios Pinitas Reviewed-by: Gian Marco Iodice --- .../CL/kernels/CLWinogradFilterTransformKernel.h | 18 +-- arm_compute/core/Size2D.h | 16 +++ arm_compute/core/utils/misc/ShapeCalculator.h | 16 +-- src/core/CL/CLKernelLibrary.cpp | 1 + src/core/CL/cl_kernels/winograd.cl | 138 +++++++++++++++++++++ .../CL/kernels/CLWinogradFilterTransformKernel.cpp | 25 ++-- .../CL/functions/CLWinogradConvolutionLayer.cpp | 6 +- tests/validation/CL/Winograd.cpp | 37 ++++-- tests/validation/fixtures/WinogradLayerFixture.h | 16 +-- tests/validation/reference/Winograd.cpp | 116 ++++++++++------- tests/validation/reference/Winograd.h | 2 +- 11 files changed, 292 insertions(+), 99 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h b/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h index ec5e51482a..c4ae5745b8 100644 --- a/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h +++ b/arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h @@ -48,20 +48,22 @@ public: ~CLWinogradFilterTransformKernel() = default; /** Set the input and output tensor. * - * @param[in] input Source tensor. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout). - * kernel_x must be 3 and equal to kernel_y. Data types supported: F32. - * @param[out] output Destination tensor. The output is a 3D tensor with dimensions [OFM, IFM, 16]. Data type supported: same as @p input + * @param[in] input Source tensor. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout). + * kernel_x must be 3 and equal to kernel_y. Data types supported: F32. + * @param[out] output Destination tensor. The output is a 3D tensor with dimensions [OFM, IFM, 16]. Data type supported: same as @p input + * @param[in] output_tile Output tile. Currently only 2x2 and 4x4 tiles are supported. */ - void configure(const ICLTensor *input, ICLTensor *output); + void configure(const ICLTensor *input, ICLTensor *output, const Size2D &output_tile); /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradFilterTransformKernel * - * @param[in] input Source tensor info. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout). - * kernel_x must be 3 and equal to kernel_y. Data types supported: F32. - * @param[in] output Destination tensor info. The output is a 3D tensor with dimensions [OFM, IFM, 16]. Data type supported: same as @p input + * @param[in] input Source tensor info. The input is a 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] (NCHW data layout). + * kernel_x must be 3 and equal to kernel_y. Data types supported: F32. + * @param[in] output Destination tensor info. The output is a 3D tensor with dimensions [OFM, IFM, 16]. Data type supported: same as @p input + * @param[in] output_tile Output tile. Currently only 2x2 and 4x4 tiles are supported. * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output); + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &output_tile); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/Size2D.h b/arm_compute/core/Size2D.h index 3840771cd1..37c4ebd041 100644 --- a/arm_compute/core/Size2D.h +++ b/arm_compute/core/Size2D.h @@ -24,6 +24,7 @@ #ifndef __ARM_COMPUTE_SIZE2D_H__ #define __ARM_COMPUTE_SIZE2D_H__ +#include "support/ToolchainSupport.h" #include #include @@ -54,6 +55,21 @@ public: return (width * height); } + bool operator==(const Size2D &other) const + { + return (width == other.width) && (height == other.height); + } + + bool operator!=(const Size2D &other) const + { + return !(*this == other); + } + + std::string to_string() const + { + return support::cpp11::to_string(width) + std::string("x") + support::cpp11::to_string(height); + } + public: size_t width = {}; /**< Width of the image region or rectangle */ size_t height = {}; /**< Height of the image region or rectangle */ diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index 5344ce7e74..383fc6cda6 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -196,23 +196,17 @@ inline TensorShape compute_fully_connected_reshaped_weights_shape(const ITensorI return output_shape; } -inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input) +inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const Size2D &output_tile) { - // COMPMID-984 (giaiod01) TensorShape tensor_shape{ input.tensor_shape() }; + tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH)); + tensor_shape.set(Window::DimY, input.dimension(2)); + tensor_shape.set(Window::DimZ, (output_tile.width == 2) ? 16 : 36); + if(input.data_layout() == DataLayout::NCHW) { - tensor_shape.remove_dimension(0); tensor_shape.set(Window::DimX, input.dimension(3)); - tensor_shape.set(Window::DimY, input.dimension(2)); - tensor_shape.set(Window::DimZ, 16); - } - else - { - tensor_shape.remove_dimension(1); - tensor_shape.set(Window::DimY, input.dimension(2)); - tensor_shape.set(Window::DimZ, 16); } return tensor_shape; diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 9df2dcbacd..740a98bbac 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -352,6 +352,7 @@ const std::map CLKernelLibrary::_kernel_program_map = { "warp_perspective_nearest_neighbour", "warp_perspective.cl" }, { "warp_perspective_bilinear", "warp_perspective.cl" }, { "winograd_filter_transform_2x2_3x3_nchw", "winograd.cl" }, + { "winograd_filter_transform_4x4_3x3_nchw", "winograd.cl" }, { "winograd_input_transform_2x2_3x3_stepz1_nchw", "winograd.cl" }, { "winograd_input_transform_2x2_3x3_stepz2_nchw", "winograd.cl" }, { "winograd_output_transform_2x2_3x3_nchw", "winograd.cl" }, diff --git a/src/core/CL/cl_kernels/winograd.cl b/src/core/CL/cl_kernels/winograd.cl index 25c129d0aa..bd51db6b03 100644 --- a/src/core/CL/cl_kernels/winograd.cl +++ b/src/core/CL/cl_kernels/winograd.cl @@ -116,6 +116,144 @@ __kernel void winograd_filter_transform_2x2_3x3_nchw( *(__global float *)(dst_addr + 14 * dst_stride_z) = out3.s2; *(__global float *)(dst_addr + 15 * dst_stride_z) = out3.s3; } + +/** This OpenCL kernel performs Winograd filter transform 3x3 when the data format is NCHW and the output tile is 4x4 + * + * @note The number of channels must be passed at compile time using -DNUM_CHANNELS: e.g. -DNUM_CHANNELS=64 + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) + * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void winograd_filter_transform_4x4_3x3_nchw( + TENSOR4D_DECLARATION(src), + TENSOR3D_DECLARATION(dst)) +{ + Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, NUM_CHANNELS); + + const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0); + + // Load the values from the input tensor + float3 w0 = vload3(0, (__global float *)(src_addr + 0 * src_stride_y)); + float3 w1 = vload3(0, (__global float *)(src_addr + 1 * src_stride_y)); + float3 w2 = vload3(0, (__global float *)(src_addr + 2 * src_stride_y)); + + // Transform the 3x3 tile in a 6x6 tile + float8 out0 = 0.0f; + float8 out1 = 0.0f; + float8 out2 = 0.0f; + float8 out3 = 0.0f; + float8 out4 = 0.0f; + float8 out5 = 0.0f; + + // Row 0 + out0.s0 = (w0.s0) / 16.f; + out0.s1 = (-w0.s0 - w0.s1 - w0.s2) / 24.f; + out0.s2 = (-w0.s0 + w0.s1 - w0.s2) / 24.f; + out0.s3 = (w0.s0 + 2 * w0.s1 + 4 * w0.s2) / 96.f; + out0.s4 = (w0.s0 - 2 * w0.s1 + 4 * w0.s2) / 96.f; + out0.s5 = (w0.s2) / 4.f; + + // Row 1 + out1.s0 = (-w0.s0 - w1.s0 - w2.s0) / 24.f; + out1.s1 = (w0.s0 + w1.s0 + w2.s0 + w0.s1 + w1.s1 + w2.s1 + w0.s2 + w1.s2 + w2.s2) / 36.f; + out1.s2 = (w0.s0 + w1.s0 + w2.s0 - w0.s1 - w1.s1 - w2.s1 + w0.s2 + w1.s2 + w2.s2) / 36.f; + out1.s3 = (-w0.s0 - w1.s0 - w2.s0 + 2 * (-w0.s1 - w1.s1 - w2.s1) + 4 * (-w0.s2 - w1.s2 - w2.s2)) / 144.f; + out1.s4 = (-w0.s0 - w1.s0 - w2.s0 + 2 * (w0.s1 + w1.s1 + w2.s1) + 4 * (-w0.s2 - w1.s2 - w2.s2)) / 144.f; + out1.s5 = (-w0.s2 - w1.s2 - w2.s2) / 6.f; + + // Row 2 + out2.s0 = (-w0.s0 + w1.s0 - w2.s0) / 24.f; + out2.s1 = (w0.s0 - w1.s0 + w2.s0 + w0.s1 - w1.s1 + w2.s1 + w0.s2 - w1.s2 + w2.s2) / 36.f; + out2.s2 = (w0.s0 - w1.s0 + w2.s0 - w0.s1 + w1.s1 - w2.s1 + w0.s2 - w1.s2 + w2.s2) / 36.f; + out2.s3 = (-w0.s0 + w1.s0 - w2.s0 + 2 * (-w0.s1 + w1.s1 - w2.s1) + 4 * (-w0.s2 + w1.s2 - w2.s2)) / 144.f; + out2.s4 = (-w0.s0 + w1.s0 - w2.s0 + 2 * (w0.s1 - w1.s1 + w2.s1) + 4 * (-w0.s2 + w1.s2 - w2.s2)) / 144.f; + out2.s5 = (-w0.s2 + w1.s2 - w2.s2) / 6.f; + + // Row 3 + out3.s0 = (w0.s0 + 2 * w1.s0 + 4 * w2.s0) / 96.f; + out3.s1 = (-w0.s0 - 2 * w1.s0 - 4 * w2.s0 - w0.s1 - 2 * w1.s1 - 4 * w2.s1 - w0.s2 - 2 * w1.s2 - 4 * w2.s2) / 144.f; + out3.s2 = (-w0.s0 - 2 * w1.s0 - 4 * w2.s0 + w0.s1 + 2 * w1.s1 + 4 * w2.s1 - w0.s2 - 2 * w1.s2 - 4 * w2.s2) / 144.f; + out3.s3 = ((w0.s0 + 2 * w1.s0 + 4 * w2.s0) + 2 * (w0.s1 + 2 * w1.s1 + 4 * w2.s1) + 4 * (w0.s2 + 2 * w1.s2 + 4 * w2.s2)) / 576.f; + out3.s4 = ((w0.s0 + 2 * w1.s0 + 4 * w2.s0) + 2 * (-w0.s1 - 2 * w1.s1 - 4 * w2.s1) + 4 * (w0.s2 + 2 * w1.s2 + 4 * w2.s2)) / 576.f; + out3.s5 = (w0.s2 + 2 * w1.s2 + 4 * w2.s2) / 24.f; + + // Row 4 + out4.s0 = (w0.s0 - 2 * w1.s0 + 4 * w2.s0) / 96.f; + out4.s1 = (-w0.s0 + 2 * w1.s0 - 4 * w2.s0 - w0.s1 + 2 * w1.s1 - 4 * w2.s1 - w0.s2 + 2 * w1.s2 - 4 * w2.s2) / 144.f; + out4.s2 = (-w0.s0 + 2 * w1.s0 - 4 * w2.s0 + w0.s1 - 2 * w1.s1 + 4 * w2.s1 - w0.s2 + 2 * w1.s2 - 4 * w2.s2) / 144.f; + out4.s3 = ((w0.s0 - 2 * w1.s0 + 4 * w2.s0) + 2 * (w0.s1 - 2 * w1.s1 + 4 * w2.s1) + 4 * (w0.s2 - 2 * w1.s2 + 4 * w2.s2)) / 576.f; + out4.s4 = ((w0.s0 - 2 * w1.s0 + 4 * w2.s0) + 2 * (-w0.s1 + 2 * w1.s1 - 4 * w2.s1) + 4 * (w0.s2 - 2 * w1.s2 + 4 * w2.s2)) / 576.f; + out4.s5 = (w0.s2 - 2 * w1.s2 + 4 * w2.s2) / 24.f; + + // Row 5 + out5.s0 = (w2.s0) / 4.f; + out5.s1 = (-w2.s0 - w2.s1 - w2.s2) / 6.f; + out5.s2 = (-w2.s0 + w2.s1 - w2.s2) / 6.f; + out5.s3 = (w2.s0 + 2 * w2.s1 + 4 * w2.s2) / 24.f; + out5.s4 = (w2.s0 - 2 * w2.s1 + 4 * w2.s2) / 24.f; + out5.s5 = (w2.s2); + + int z = get_global_id(2); + int x0 = z / NUM_CHANNELS; // idx filter + int y0 = z % NUM_CHANNELS; // idx channel + + // Get output address + __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x0 * dst_stride_x + y0 * dst_stride_y; + + // Store the 36 values across the 36 channels + *(__global float *)(dst_addr + 0 * dst_stride_z) = out0.s0; + *(__global float *)(dst_addr + 1 * dst_stride_z) = out0.s1; + *(__global float *)(dst_addr + 2 * dst_stride_z) = out0.s2; + *(__global float *)(dst_addr + 3 * dst_stride_z) = out0.s3; + *(__global float *)(dst_addr + 4 * dst_stride_z) = out0.s4; + *(__global float *)(dst_addr + 5 * dst_stride_z) = out0.s5; + *(__global float *)(dst_addr + 6 * dst_stride_z) = out1.s0; + *(__global float *)(dst_addr + 7 * dst_stride_z) = out1.s1; + *(__global float *)(dst_addr + 8 * dst_stride_z) = out1.s2; + *(__global float *)(dst_addr + 9 * dst_stride_z) = out1.s3; + *(__global float *)(dst_addr + 10 * dst_stride_z) = out1.s4; + *(__global float *)(dst_addr + 11 * dst_stride_z) = out1.s5; + *(__global float *)(dst_addr + 12 * dst_stride_z) = out2.s0; + *(__global float *)(dst_addr + 13 * dst_stride_z) = out2.s1; + *(__global float *)(dst_addr + 14 * dst_stride_z) = out2.s2; + *(__global float *)(dst_addr + 15 * dst_stride_z) = out2.s3; + *(__global float *)(dst_addr + 16 * dst_stride_z) = out2.s4; + *(__global float *)(dst_addr + 17 * dst_stride_z) = out2.s5; + *(__global float *)(dst_addr + 18 * dst_stride_z) = out3.s0; + *(__global float *)(dst_addr + 19 * dst_stride_z) = out3.s1; + *(__global float *)(dst_addr + 20 * dst_stride_z) = out3.s2; + *(__global float *)(dst_addr + 21 * dst_stride_z) = out3.s3; + *(__global float *)(dst_addr + 22 * dst_stride_z) = out3.s4; + *(__global float *)(dst_addr + 23 * dst_stride_z) = out3.s5; + *(__global float *)(dst_addr + 24 * dst_stride_z) = out4.s0; + *(__global float *)(dst_addr + 25 * dst_stride_z) = out4.s1; + *(__global float *)(dst_addr + 26 * dst_stride_z) = out4.s2; + *(__global float *)(dst_addr + 27 * dst_stride_z) = out4.s3; + *(__global float *)(dst_addr + 28 * dst_stride_z) = out4.s4; + *(__global float *)(dst_addr + 29 * dst_stride_z) = out4.s5; + *(__global float *)(dst_addr + 30 * dst_stride_z) = out5.s0; + *(__global float *)(dst_addr + 31 * dst_stride_z) = out5.s1; + *(__global float *)(dst_addr + 32 * dst_stride_z) = out5.s2; + *(__global float *)(dst_addr + 33 * dst_stride_z) = out5.s3; + *(__global float *)(dst_addr + 34 * dst_stride_z) = out5.s4; + *(__global float *)(dst_addr + 35 * dst_stride_z) = out5.s5; +} #endif // defined(NUM_CHANNELS) #if defined(NUM_TILES_X) && defined(PAD_LEFT) && defined(PAD_TOP) diff --git a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp index 655b82bf66..5a03332e99 100644 --- a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp +++ b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp @@ -44,17 +44,18 @@ using namespace arm_compute::misc::shape_calculator; namespace { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &output_tile) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != 3); ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != input->dimension(1)); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); + ARM_COMPUTE_RETURN_ERROR_ON(output_tile != Size2D(2U, 2U) && output_tile != Size2D(4U, 4U)); // Checks performed when output is configured if(output->total_size() != 0) { - const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input)); + const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input, output_tile)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); @@ -63,8 +64,9 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) return Status{}; } -std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const Size2D &output_tile) { + ARM_COMPUTE_UNUSED(output_tile); ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); constexpr unsigned int num_elems_processed_per_iteration_x = 3; @@ -90,35 +92,36 @@ CLWinogradFilterTransformKernel::CLWinogradFilterTransformKernel() { } -void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTensor *output) +void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &output_tile) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); // Output tensor auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input->info()))); + auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input->info(), output_tile))); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), output_tile)); // Set build options CLBuildOptions build_opts; build_opts.add_option("-DNUM_CHANNELS=" + support::cpp11::to_string(input->info()->dimension(2))); // Create kernel - _kernel = static_cast(CLKernelLibrary::get().create_kernel("winograd_filter_transform_2x2_3x3_nchw", build_opts.options())); + std::string kernel_name = std::string("winograd_filter_transform_") + output_tile.to_string() + std::string("_3x3_nchw"); + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); _input = input; _output = output; // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), output->info()); + auto win_config = validate_and_configure_window(input->info(), output->info(), output_tile); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure(win_config.second); } -Status CLWinogradFilterTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output) +Status CLWinogradFilterTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &output_tile) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, output_tile)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), output_tile).first); return Status{}; } diff --git a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp index 5081cbac4e..a861e0072e 100644 --- a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp @@ -64,7 +64,7 @@ void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *we _input_transform.configure(input, &_input0, conv_info, Size2D(kernel_w, kernel_h)); // Configure filter transform - _filter_transform.configure(weights, &_input1); + _filter_transform.configure(weights, &_input1, Size2D(2U, 2U)); // Configure batched matrix multiply _batched_mm.configure(&_input0, &_input1, nullptr, &_batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/)); @@ -103,9 +103,9 @@ Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITen ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransform::validate(input, &input0, conv_info, Size2D(kernel_w, kernel_h))); // Validate filter transform - const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights); + const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, Size2D(2U, 2U)); const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape); - ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradFilterTransformKernel::validate(weights, &input1)); + ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradFilterTransformKernel::validate(weights, &input1, Size2D(2U, 2U))); // Configure batched matrix multiply TensorShape batched_mm_output_shape = input0.tensor_shape(); diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp index aa668fa575..07a52f8ebc 100644 --- a/tests/validation/CL/Winograd.cpp +++ b/tests/validation/CL/Winograd.cpp @@ -147,12 +147,12 @@ TEST_SUITE_END() // InputTransform TEST_SUITE(FilterTransform) // *INDENT-OFF* // clang-format off -DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( framework::dataset::make("InputInfo",{ TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::F16), // F16 not supported TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::QASYMM8), // QASYMM8 not supported TensorInfo(TensorShape(5U, 5U, 5U, 3U), 1, DataType::F32), // Kernel size not supported - TensorInfo(TensorShape(3U, 3U), 1, DataType::F32), // valid + TensorInfo(TensorShape(3U, 3U), 1, DataType::F32), // Output tile not supported TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::F32), // valid TensorInfo(TensorShape(3U, 3U, 37U, 2U), 1, DataType::F32), // valid TensorInfo(TensorShape(3U, 3U, 37U, 22U), 1, DataType::F32) // valid @@ -164,12 +164,21 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(1U, 1U, 16U), 1, DataType::F32), TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F32), TensorInfo(TensorShape(2U, 37U, 16U), 1, DataType::F32), - TensorInfo(TensorShape(22U, 37U, 16U), 1, DataType::F32) + TensorInfo(TensorShape(22U, 37U, 36U), 1, DataType::F32) })), - framework::dataset::make("Expected", { false, false, false, true, true, true, true })), - input_info, output_info, expected) + framework::dataset::make("OutputTile", { + Size2D(2U, 2U), + Size2D(2U, 2U), + Size2D(2U, 2U), + Size2D(3U, 3U), + Size2D(2U, 2U), + Size2D(2U, 2U), + Size2D(4U, 4U) + })), + framework::dataset::make("Expected", { false, false, false, false, true, true, true })), + input_info, output_info, output_tile, expected) { - ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), output_tile)) == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* @@ -177,13 +186,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( using CLWinogradFilterTransform = CLSynthetizeFunctionWithZeroConstantBorder; using CLWinogradFilterTransformFixture = WinogradFilterTransformValidationFixture; -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradFilterTransformDataset(), datasets::LargeWinogradFilterTransformDataset()), +DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallWinogradFilterTransformDataset(), datasets::LargeWinogradFilterTransformDataset()), + framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })), framework::dataset::make("DataType", { DataType::F32 })), - shape_a, is_nchw_format, data_type) + shape_a, is_nchw_format, output_tile, data_type) { ARM_COMPUTE_UNUSED(is_nchw_format); - TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type)); + TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), output_tile); // Create tensors CLTensor a = create_tensor(shape_a, data_type); @@ -194,16 +204,19 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da // Create and configure function CLWinogradFilterTransform winograd_filter_transform; - winograd_filter_transform.configure(&a, &b); + winograd_filter_transform.configure(&a, &b, output_tile); } -FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(datasets::SmallWinogradFilterTransformDataset(), framework::dataset::make("DataType", { DataType::F32 }))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallWinogradFilterTransformDataset(), framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })), + framework::dataset::make("DataType", { DataType::F32 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradFilterTransformDataset(), framework::dataset::make("DataType", { DataType::F32 }))) +FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeWinogradFilterTransformDataset(), + framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })), + framework::dataset::make("DataType", { DataType::F32 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); diff --git a/tests/validation/fixtures/WinogradLayerFixture.h b/tests/validation/fixtures/WinogradLayerFixture.h index 9811c28008..c427f8d20e 100644 --- a/tests/validation/fixtures/WinogradLayerFixture.h +++ b/tests/validation/fixtures/WinogradLayerFixture.h @@ -225,12 +225,12 @@ class WinogradFilterTransformValidationFixture : public framework::Fixture { public: template - void setup(TensorShape input_shape, bool is_nchw_format, DataType data_type) + void setup(TensorShape input_shape, bool is_nchw_format, Size2D output_tile, DataType data_type) { - TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type)); + TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), output_tile); - _target = compute_target(input_shape, output_shape, is_nchw_format, data_type); - _reference = compute_reference(input_shape, output_shape, is_nchw_format, data_type); + _target = compute_target(input_shape, output_shape, is_nchw_format, output_tile, data_type); + _reference = compute_reference(input_shape, output_shape, is_nchw_format, output_tile, data_type); } protected: @@ -254,7 +254,7 @@ protected: } } - TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, DataType data_type) + TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, const Size2D &output_tile, DataType data_type) { ARM_COMPUTE_UNUSED(is_nchw_format); @@ -264,7 +264,7 @@ protected: // Create and configure function FunctionType filter_transform; - filter_transform.configure(&src, &dst); + filter_transform.configure(&src, &dst, output_tile); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -284,7 +284,7 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, DataType data_type) + SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, const Size2D &output_tile, DataType data_type) { ARM_COMPUTE_UNUSED(is_nchw_format); @@ -294,7 +294,7 @@ protected: // Fill reference fill(src, 0, -1.f, 1.f); - return reference::winograd_filter_transform(src, output_shape); + return reference::winograd_filter_transform(src, output_shape, output_tile); } TensorType _target{}; diff --git a/tests/validation/reference/Winograd.cpp b/tests/validation/reference/Winograd.cpp index c760663b22..ad0dcbd958 100644 --- a/tests/validation/reference/Winograd.cpp +++ b/tests/validation/reference/Winograd.cpp @@ -39,40 +39,74 @@ namespace reference namespace { template -void winograd_filter_transform3x3(const SimpleTensor &in, SimpleTensor &out) +void winograd_filter_transform3x3(const SimpleTensor &in, SimpleTensor &out, const Size2D &output_tile) { + const bool is_2x2 = (output_tile.width == 2); + const unsigned int transf_side = is_2x2 ? 4u : 6u; + // Simple tensor for the 3x3 input tile SimpleTensor input_tile{ TensorShape(3u, 3u), in.data_type(), 1 }; // Simple tensor for the transformation matrix - SimpleTensor trans_matrix{ TensorShape(3u, 4u), in.data_type(), 1 }; + SimpleTensor trans_matrix{ TensorShape(3u, transf_side), in.data_type(), 1 }; // Simple tensor for the transformation matrix transpose - SimpleTensor trans_matrix_transposed{ TensorShape(4u, 3u), in.data_type(), 1 }; + SimpleTensor trans_matrix_transposed{ TensorShape(transf_side, 3u), in.data_type(), 1 }; - // Simple tensor for the 4x3 temporary tile - SimpleTensor tmp_tile{ TensorShape(3u, 4u), in.data_type(), 1 }; + // Simple tensor for the 3xSide temporary tile + SimpleTensor tmp_tile{ TensorShape(3u, transf_side), in.data_type(), 1 }; - // Simple tensor for the 4x4 output tile - SimpleTensor output_tile{ TensorShape(4u, 4u), in.data_type(), 1 }; + // Simple tensor for the SidexSide output tile + SimpleTensor transf_tile{ TensorShape(transf_side, transf_side), in.data_type(), 1 }; - // Initialize transformation matrix - // 1 | 0 | 0 - // 0.5 | 0.5 | 0.5 - // 0.5 |-0.5 | 0.5 - // 0 | 0 | 1 - trans_matrix[0 + 0 * 3] = 1.0f; - trans_matrix[1 + 0 * 3] = 0.0f; - trans_matrix[2 + 0 * 3] = 0.0f; - trans_matrix[0 + 1 * 3] = 0.5f; - trans_matrix[1 + 1 * 3] = 0.5f; - trans_matrix[2 + 1 * 3] = 0.5f; - trans_matrix[0 + 2 * 3] = 0.5f; - trans_matrix[1 + 2 * 3] = -0.5f; - trans_matrix[2 + 2 * 3] = 0.5f; - trans_matrix[0 + 3 * 3] = 0.0f; - trans_matrix[1 + 3 * 3] = 0.0f; - trans_matrix[2 + 3 * 3] = 1.0f; + if(is_2x2) + { + // Initialize 3x4 transformation matrix + // 1 | 0 | 0 + // 0.5 | 0.5 | 0.5 + // 0.5 |-0.5 | 0.5 + // 0 | 0 | 1 + trans_matrix[0 + 0 * 3] = 1.0f; + trans_matrix[1 + 0 * 3] = 0.0f; + trans_matrix[2 + 0 * 3] = 0.0f; + trans_matrix[0 + 1 * 3] = 0.5f; + trans_matrix[1 + 1 * 3] = 0.5f; + trans_matrix[2 + 1 * 3] = 0.5f; + trans_matrix[0 + 2 * 3] = 0.5f; + trans_matrix[1 + 2 * 3] = -0.5f; + trans_matrix[2 + 2 * 3] = 0.5f; + trans_matrix[0 + 3 * 3] = 0.0f; + trans_matrix[1 + 3 * 3] = 0.0f; + trans_matrix[2 + 3 * 3] = 1.0f; + } + else + { + // Initialize 3x6 transformation matrix + // 1/4 | 0 | 0 + // -1/6 | -1/6 | -1/6 + // -1/6 | 1/6 | -1/6 + // 1/24 | 1/12 | 1/6 + // 1/24 | -1/12 | 1/6 + // 0 | 0 | 1 + trans_matrix[0 + 0 * 3] = 1.0f / 4.0f; + trans_matrix[1 + 0 * 3] = 0.0f; + trans_matrix[2 + 0 * 3] = 0.0f; + trans_matrix[0 + 1 * 3] = -1.0f / 6.0f; + trans_matrix[1 + 1 * 3] = -1.0f / 6.0f; + trans_matrix[2 + 1 * 3] = -1.0f / 6.0f; + trans_matrix[0 + 2 * 3] = -1.0f / 6.0f; + trans_matrix[1 + 2 * 3] = 1.0f / 6.0f; + trans_matrix[2 + 2 * 3] = -1.0f / 6.0f; + trans_matrix[0 + 3 * 3] = 1.0f / 24.0f; + trans_matrix[1 + 3 * 3] = 1.0f / 12.0f; + trans_matrix[2 + 3 * 3] = 1.0f / 6.0f; + trans_matrix[0 + 4 * 3] = 1.0f / 24.0f; + trans_matrix[1 + 4 * 3] = -1.0f / 12.0f; + trans_matrix[2 + 4 * 3] = 1.0f / 6.0f; + trans_matrix[0 + 5 * 3] = 0.0f; + trans_matrix[1 + 5 * 3] = 0.0f; + trans_matrix[2 + 5 * 3] = 1.0f; + } // Transpose the transformation matrix transpose_matrix(trans_matrix, trans_matrix_transposed); @@ -94,26 +128,18 @@ void winograd_filter_transform3x3(const SimpleTensor &in, SimpleTensor &ou matrix_multiply(trans_matrix, input_tile, tmp_tile); // Second transformation - matrix_multiply(tmp_tile, trans_matrix_transposed, output_tile); + matrix_multiply(tmp_tile, trans_matrix_transposed, transf_tile); // Store the 4x4 output tile across the 16 channels - const int output_offset = w + z * num_filters; - out[output_offset + 0 * num_filters * num_channels] = output_tile[0 + 0 * 4]; - out[output_offset + 1 * num_filters * num_channels] = output_tile[1 + 0 * 4]; - out[output_offset + 2 * num_filters * num_channels] = output_tile[2 + 0 * 4]; - out[output_offset + 3 * num_filters * num_channels] = output_tile[3 + 0 * 4]; - out[output_offset + 4 * num_filters * num_channels] = output_tile[0 + 1 * 4]; - out[output_offset + 5 * num_filters * num_channels] = output_tile[1 + 1 * 4]; - out[output_offset + 6 * num_filters * num_channels] = output_tile[2 + 1 * 4]; - out[output_offset + 7 * num_filters * num_channels] = output_tile[3 + 1 * 4]; - out[output_offset + 8 * num_filters * num_channels] = output_tile[0 + 2 * 4]; - out[output_offset + 9 * num_filters * num_channels] = output_tile[1 + 2 * 4]; - out[output_offset + 10 * num_filters * num_channels] = output_tile[2 + 2 * 4]; - out[output_offset + 11 * num_filters * num_channels] = output_tile[3 + 2 * 4]; - out[output_offset + 12 * num_filters * num_channels] = output_tile[0 + 3 * 4]; - out[output_offset + 13 * num_filters * num_channels] = output_tile[1 + 3 * 4]; - out[output_offset + 14 * num_filters * num_channels] = output_tile[2 + 3 * 4]; - out[output_offset + 15 * num_filters * num_channels] = output_tile[3 + 3 * 4]; + const int output_offset = w + z * num_filters; + + for(unsigned int out_h = 0, out_pos = 0; out_h < transf_side; ++out_h) + { + for(unsigned int out_w = 0; out_w < transf_side; ++out_w, ++out_pos) + { + out[output_offset + out_pos * num_filters * num_channels] = transf_tile[out_w + out_h * transf_side]; + } + } } } } @@ -314,7 +340,7 @@ SimpleTensor winograd_input_transform(const SimpleTensor &src, const Tenso } template -SimpleTensor winograd_filter_transform(const SimpleTensor &in, const TensorShape &output_shape) +SimpleTensor winograd_filter_transform(const SimpleTensor &in, const TensorShape &output_shape, const Size2D &output_tile) { ARM_COMPUTE_ERROR_ON_MSG(in.data_layout() != DataLayout::NCHW, "Only supported NCHW data format"); @@ -324,7 +350,7 @@ SimpleTensor winograd_filter_transform(const SimpleTensor &in, const Tenso switch(in.shape()[0]) { case 3: - winograd_filter_transform3x3(in, out); + winograd_filter_transform3x3(in, out, output_tile); break; default: ARM_COMPUTE_ERROR("Only supported 3x3 kernel"); @@ -358,7 +384,7 @@ SimpleTensor winograd_output_transform(const SimpleTensor &in, const Tenso } template SimpleTensor winograd_input_transform(const SimpleTensor &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims); -template SimpleTensor winograd_filter_transform(const SimpleTensor &in, const TensorShape &output_shape); +template SimpleTensor winograd_filter_transform(const SimpleTensor &in, const TensorShape &output_shape, const Size2D &output_tile); template SimpleTensor winograd_output_transform(const SimpleTensor &in, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &num_tiles); } // namespace reference } // namespace validation diff --git a/tests/validation/reference/Winograd.h b/tests/validation/reference/Winograd.h index fa1a7f3f61..62e136b09d 100644 --- a/tests/validation/reference/Winograd.h +++ b/tests/validation/reference/Winograd.h @@ -40,7 +40,7 @@ template SimpleTensor winograd_input_transform(const SimpleTensor &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims); template -SimpleTensor winograd_filter_transform(const SimpleTensor &in, const TensorShape &output_shape); +SimpleTensor winograd_filter_transform(const SimpleTensor &in, const TensorShape &output_shape, const Size2D &output_tile); template SimpleTensor winograd_output_transform(const SimpleTensor &in, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &num_tiles); -- cgit v1.2.1