diff options
author | Gian Marco Iodice <gianmarco.iodice@arm.com> | 2018-03-22 11:24:56 +0000 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:49:16 +0000 |
commit | 247f52cfe337f7b2542b900e3d8cf122e9d4f11c (patch) | |
tree | bcbabb7f1eea588a5d37566829763506d328e7a9 /src | |
parent | eb8a399ba655b85c6854676832eb11b0af4108fe (diff) | |
download | ComputeLibrary-247f52cfe337f7b2542b900e3d8cf122e9d4f11c.tar.gz |
COMPMID-1013 - Create WinogradInfo data structure
COMPMID-1014 - Refactoring Winograd's dataset
Change-Id: I6abdcbf9a90d663f4db666cd410afece9f1d034d
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/125899
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src')
5 files changed, 159 insertions, 102 deletions
diff --git a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp index 5a03332e99..5b8921b8e4 100644 --- a/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp +++ b/src/core/CL/kernels/CLWinogradFilterTransformKernel.cpp @@ -44,18 +44,26 @@ using namespace arm_compute::misc::shape_calculator; namespace { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &output_tile) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != 3); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != input->dimension(1)); + ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() != DataLayout::NCHW); + + const Size2D kernel_size = winograd_info.kernel_size; + const Size2D output_tile_size = winograd_info.output_tile_size; + + const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + + ARM_COMPUTE_RETURN_ERROR_ON(kernel_size != Size2D(3U, 3U)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_w) != kernel_size.width || input->dimension(idx_h) != kernel_size.height); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); - ARM_COMPUTE_RETURN_ERROR_ON(output_tile != Size2D(2U, 2U) && output_tile != Size2D(4U, 4U)); + ARM_COMPUTE_RETURN_ERROR_ON(output_tile_size != Size2D(2U, 2U) && output_tile_size != Size2D(4U, 4U)); // Checks performed when output is configured if(output->total_size() != 0) { - const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input, output_tile)); + const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input, winograd_info)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); @@ -64,9 +72,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c return Status{}; } -std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const Size2D &output_tile) +std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) { - ARM_COMPUTE_UNUSED(output_tile); ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); constexpr unsigned int num_elems_processed_per_iteration_x = 3; @@ -92,36 +99,41 @@ CLWinogradFilterTransformKernel::CLWinogradFilterTransformKernel() { } -void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &output_tile) +void CLWinogradFilterTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - // Output tensor auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input->info(), output_tile))); + // Output auto initialization if not yet initialized + auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_filter_transform_shape(*input->info(), winograd_info))); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), output_tile)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info)); + + const size_t idx_c = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL); // Set build options CLBuildOptions build_opts; - build_opts.add_option("-DNUM_CHANNELS=" + support::cpp11::to_string(input->info()->dimension(2))); + build_opts.add_option("-DNUM_CHANNELS=" + support::cpp11::to_string(input->info()->dimension(idx_c))); + + const Size2D kernel_size = winograd_info.kernel_size; + const Size2D output_tile_size = winograd_info.output_tile_size; // Create kernel - std::string kernel_name = std::string("winograd_filter_transform_") + output_tile.to_string() + std::string("_3x3_nchw"); + std::string kernel_name = "winograd_filter_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_nchw"; _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); _input = input; _output = output; // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), output->info(), output_tile); + auto win_config = validate_and_configure_window(input->info(), output->info()); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure(win_config.second); } -Status CLWinogradFilterTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &output_tile) +Status CLWinogradFilterTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, output_tile)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), output_tile).first); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, winograd_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first); return Status{}; } diff --git a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp index 3b9350f9ba..df7ffe83a0 100644 --- a/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp +++ b/src/core/CL/kernels/CLWinogradInputTransformKernel.cpp @@ -37,17 +37,25 @@ using namespace arm_compute; namespace { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PadStrideInfo &conv_info, const Size2D &kernel_dims) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() != DataLayout::NCHW); + + const PadStrideInfo conv_info = winograd_info.convolution_info; + const Size2D output_tile_size = winograd_info.output_tile_size; + const Size2D kernel_size = winograd_info.kernel_size; ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd input transform only supports unit strides"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(kernel_dims.width != 3 || kernel_dims.height != 3, "Winograd input transform only supports 3x3 kernels"); - ARM_COMPUTE_UNUSED(kernel_dims); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(kernel_size != Size2D(3U, 3U), "Winograd input transform only supports 3x3 kernels"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(output_tile_size != Size2D(2U, 2U), "Winograd input transform only supports 2x2 output tile size"); + ARM_COMPUTE_UNUSED(conv_info); + ARM_COMPUTE_UNUSED(output_tile_size); + ARM_COMPUTE_UNUSED(kernel_size); // Validate configured output if(output->total_size() != 0) { - const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, conv_info, kernel_dims); + const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); @@ -56,15 +64,16 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c return Status{}; } -std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const PadStrideInfo &conv_info, const Size2D &kernel_dims) +std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_UNUSED(output); ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_ON(kernel_dims.width != 3 || kernel_dims.height != 3); - ARM_COMPUTE_UNUSED(kernel_dims); + const PadStrideInfo conv_info = winograd_info.convolution_info; + const Size2D output_tile_size = winograd_info.output_tile_size; + const Size2D kernel_size = winograd_info.kernel_size; - constexpr unsigned int num_elems_read_per_iteration_x = 4u; - constexpr unsigned int num_elems_read_per_iteration_y = 4u; + const unsigned int num_elems_read_per_iteration_x = output_tile_size.width + kernel_size.width - 1; + const unsigned int num_elems_read_per_iteration_y = output_tile_size.height + kernel_size.height - 1; Window win = calculate_max_window(*input, Steps(1, 1)); @@ -87,28 +96,33 @@ BorderSize CLWinogradInputTransformKernel::border_size() const return _border_size; } -void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const PadStrideInfo &conv_info, const Size2D &kernel_dims) +void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), conv_info, kernel_dims)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), winograd_info)); + + const PadStrideInfo conv_info = winograd_info.convolution_info; + const Size2D output_tile_size = winograd_info.output_tile_size; + const Size2D kernel_size = winograd_info.kernel_size; // Compute number of elements to process in the X and Y direction - const int num_elements_x = input->info()->dimension(0) - 2 + conv_info.pad_left() + conv_info.pad_right(); - const int num_elements_y = input->info()->dimension(1) - 2 + conv_info.pad_top() + conv_info.pad_bottom(); + const int num_elements_x = input->info()->dimension(0) - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right(); + const int num_elements_y = input->info()->dimension(1) - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom(); // Check if we need to extend the right or bottom border - const unsigned int extra_border_right = (num_elements_x % 2 == 0) ? 0u : 1u; - const unsigned int extra_border_bottom = (num_elements_y % 2 == 0) ? 0u : 1u; + // FIXME: This actually is not needed. Added just for validating the result; + const unsigned int extra_border_right = ((num_elements_x % output_tile_size.width) == 0) ? 0u : static_cast<unsigned int>(output_tile_size.width - 1); + const unsigned int extra_border_bottom = ((num_elements_y % output_tile_size.height) == 0) ? 0u : static_cast<unsigned int>(output_tile_size.height - 1); _input = input; _output = output; _border_size = BorderSize(conv_info.pad_top(), conv_info.pad_right() + extra_border_right, conv_info.pad_bottom() + extra_border_bottom, conv_info.pad_left()); - _num_tiles_x = std::ceil(num_elements_x / 2.0f); - _num_tiles_y = std::ceil(num_elements_y / 2.0f); + _num_tiles_x = std::ceil(num_elements_x / static_cast<float>(output_tile_size.width)); + _num_tiles_y = std::ceil(num_elements_y / static_cast<float>(output_tile_size.height)); - const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input->info(), conv_info, Size2D(3U, 3U)); + const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input->info(), winograd_info); - // Output auto inizialitation if not yet initialized + // Output auto initialization if not yet initialized auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape)); ARM_COMPUTE_ERROR_ON(_num_tiles_x * _num_tiles_y != static_cast<int>(output->info()->dimension(1))); @@ -119,24 +133,35 @@ void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top())); // Create kernel - if((_input->info()->dimension(2) % 2) != 0) - { - _step_z = 1; - _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("winograd_input_transform_2x2_3x3_stepz1_nchw", build_opts.options())); - } - else + std::string kernel_name = "winograd_input_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string(); + + // Check optimized kernel if output_dims == 2x2 + if(output_tile_size.width == 2 && output_tile_size.height == 2) { - _step_z = 2; - _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("winograd_input_transform_2x2_3x3_stepz2_nchw", build_opts.options())); - _lws_hint = cl::NDRange(1, 1, 8); + if((_input->info()->dimension(2) % 2) != 0) + { + _step_z = 1; + } + else + { + _step_z = 2; + _lws_hint = cl::NDRange(1, 1, 8); + } } + // Append stepz and data layout + kernel_name += "_stepz"; + kernel_name += support::cpp11::to_string(_step_z); + kernel_name += "_nchw"; + + _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); + // Create window and update padding - auto win_config = validate_and_configure_window(input->info(), output->info(), conv_info, kernel_dims); + auto win_config = validate_and_configure_window(input->info(), output->info(), winograd_info); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure(win_config.second); - _config_id = "winograd_transform_input_2x2_3x3_"; + _config_id = kernel_name; _config_id += support::cpp11::to_string(input->info()->dimension(0)); _config_id += "_"; _config_id += support::cpp11::to_string(input->info()->dimension(1)); @@ -148,11 +173,11 @@ void CLWinogradInputTransformKernel::configure(const ICLTensor *input, ICLTensor _config_id += support::cpp11::to_string(conv_info.pad_top()); } -Status CLWinogradInputTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PadStrideInfo &conv_info, const Size2D &kernel_dims) +Status CLWinogradInputTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, conv_info, kernel_dims)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), conv_info, kernel_dims).first); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, winograd_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), winograd_info).first); return Status{}; } diff --git a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp b/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp index c9823275eb..b59bc79327 100644 --- a/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp +++ b/src/core/CL/kernels/CLWinogradOutputTransformKernel.cpp @@ -46,13 +46,27 @@ using namespace arm_compute::misc::shape_calculator; namespace { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const Size2D &kernel_dims, const Size2D &output_convolved_dims, const Size2D &num_tiles) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != num_tiles.area()); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(kernel_dims.width != 3 || kernel_dims.height != 3, "Only 3x3 kernels are supported"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(static_cast<unsigned int>(std::sqrt(input->dimension(2))) != 4, "Only 2x2 output tile is supported"); - ARM_COMPUTE_UNUSED(kernel_dims); + ARM_COMPUTE_RETURN_ERROR_ON(winograd_info.output_data_layout != DataLayout::NCHW); + + const PadStrideInfo conv_info = winograd_info.convolution_info; + const Size2D output_tile_size = winograd_info.output_tile_size; + const Size2D kernel_size = winograd_info.kernel_size; + const Size2D input_dimensions = winograd_info.input_dimensions; + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(kernel_size != Size2D(3U, 3U), "Only 3x3 kernels are supported"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->dimension(2) != 16, "Only 2x2 output tile is supported"); + + // Compute number of elements to process in the X and Y direction + const int num_elements_x = input_dimensions.width - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right(); + const int num_elements_y = input_dimensions.height - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom(); + const int num_tiles_x = std::ceil(num_elements_x / static_cast<float>(output_tile_size.width)); + const int num_tiles_y = std::ceil(num_elements_y / static_cast<float>(output_tile_size.height)); + + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != static_cast<unsigned int>((num_tiles_x * num_tiles_y))); + ARM_COMPUTE_UNUSED(output_tile_size); if(bias != nullptr) { @@ -63,7 +77,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con // Checks performed when output is configured if(output->total_size() != 0) { - const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*input, output_convolved_dims, DataLayout::NCHW)); + const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*input, winograd_info)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); @@ -72,7 +86,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con return Status{}; } -std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output) +std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output, const Size2D &output_tile_size) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); @@ -82,7 +96,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen bool window_changed = false; AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration, num_elems_processed_per_iteration); - AccessWindowStatic output_access(output, 0, 0, ceil_to_multiple(output->dimension(0), 2), ceil_to_multiple(output->dimension(1), 2)); + AccessWindowStatic output_access(output, 0, 0, ceil_to_multiple(output->dimension(0), output_tile_size.width), ceil_to_multiple(output->dimension(1), output_tile_size.height)); if(bias != nullptr) { @@ -105,36 +119,44 @@ CLWinogradOutputTransformKernel::CLWinogradOutputTransformKernel() { } -void CLWinogradOutputTransformKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const Size2D &kernel_dims, const Size2D &output_convolved_dims, - const Size2D &num_tiles) +void CLWinogradOutputTransformKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const WinogradInfo &winograd_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_UNUSED(kernel_dims); // Output tensor auto initialization if not yet initialized - auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*input->info(), output_convolved_dims, DataLayout::NCHW))); + auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_winograd_output_transform_shape(*input->info(), winograd_info))); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), kernel_dims, output_convolved_dims, num_tiles)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), winograd_info)); _input = input; _bias = bias; _output = output; + // Compute num_tiles_x + const Size2D input_dimensions = winograd_info.input_dimensions; + const Size2D kernel_size = winograd_info.kernel_size; + const Size2D output_tile_size = winograd_info.output_tile_size; + const PadStrideInfo conv_info = winograd_info.convolution_info; + const int num_elements_x = input_dimensions.width - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right(); + const int num_tiles_x = std::ceil(num_elements_x / static_cast<float>(output_tile_size.width)); + // Set build options CLBuildOptions build_opts; build_opts.add_option_if(_bias != nullptr, std::string("-DHAS_BIAS")); - build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(num_tiles.width)); + build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(num_tiles_x)); // Create kernel - _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("winograd_output_transform_2x2_3x3_nchw", build_opts.options())); + std::string kernel_name = "winograd_output_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string() + "_nchw"; + _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info()); + auto win_config = validate_and_configure_window(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), winograd_info.output_tile_size); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure(win_config.second); // Set config_id for enabling LWS tuning - _config_id = "winograd_output_transform_2x2_3x3"; + _config_id = kernel_name; + _config_id += "_"; _config_id += lower_string(string_from_data_type(input->info()->data_type())); _config_id += "_"; _config_id += support::cpp11::to_string(input->info()->dimension(0)); @@ -146,11 +168,10 @@ void CLWinogradOutputTransformKernel::configure(const ICLTensor *input, const IC _config_id += support::cpp11::to_string(output->info()->dimension(1)); } -Status CLWinogradOutputTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const Size2D &kernel_dims, const Size2D &output_convolved_dims, - const Size2D &num_tiles) +Status CLWinogradOutputTransformKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const WinogradInfo &winograd_info) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, (bias != nullptr ? bias->clone().get() : nullptr), output, kernel_dims, output_convolved_dims, num_tiles)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), (bias != nullptr ? bias->clone().get() : nullptr), output->clone().get()).first); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, (bias != nullptr ? bias->clone().get() : nullptr), output, winograd_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), (bias != nullptr ? bias->clone().get() : nullptr), output->clone().get(), winograd_info.output_tile_size).first); return Status{}; } diff --git a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp index 7af36bf06b..0aa7f8d1b5 100644 --- a/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLWinogradConvolutionLayer.cpp @@ -39,21 +39,22 @@ CLWinogradConvolutionLayer::CLWinogradConvolutionLayer(std::shared_ptr<IMemoryMa void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info) { - // TODO(COMPMID-1013): This part will be removed - // Get indeces for the width and height + // Get indices for the width and height const size_t idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); const size_t idx_height = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); + // Input shape + const TensorShape input_shape = input->info()->tensor_shape(); + // Kernel size const unsigned int kernel_w = weights->info()->tensor_shape()[idx_width]; const unsigned int kernel_h = weights->info()->tensor_shape()[idx_height]; - // Number of tiles along the X and Y direction - const unsigned int num_tiles_x = std::ceil((input->info()->tensor_shape().x() - (kernel_w - 1) + conv_info.pad_left() + conv_info.pad_right()) / 2.f); - const unsigned int num_tiles_y = std::ceil((input->info()->tensor_shape().y() - (kernel_h - 1) + conv_info.pad_top() + conv_info.pad_bottom()) / 2.f); - - // Compute output shape - const TensorShape output_convolved_shape = misc::shape_calculator::compute_deep_convolution_shape(*input->info(), *weights->info(), conv_info); + const WinogradInfo winograd_info = WinogradInfo(Size2D(2, 2), + Size2D(kernel_w, kernel_h), + Size2D(input_shape[idx_width], input_shape[idx_height]), + conv_info, + input->info()->data_layout()); // Manage intermediate tensors _memory_group.manage(&_input0); @@ -62,17 +63,16 @@ void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *we // Do not manage _input1 as it contains the weights // Configure input transform - _input_transform.configure(input, &_input0, conv_info, Size2D(kernel_w, kernel_h)); + _input_transform.configure(input, &_input0, winograd_info); // Configure filter transform - _filter_transform.configure(weights, &_input1, Size2D(2U, 2U)); + _filter_transform.configure(weights, &_input1, winograd_info); // Configure batched matrix multiply _batched_mm.configure(&_input0, &_input1, nullptr, &_batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/)); // Configure output transform - _output_transform.configure(&_batched_mm_output, biases, output, Size2D(kernel_w, kernel_h), Size2D(output_convolved_shape[idx_width], output_convolved_shape[idx_height]), Size2D(num_tiles_x, - num_tiles_y)); + _output_transform.configure(&_batched_mm_output, biases, output, winograd_info); // Configure activation layer _is_activationlayer_enabled = act_info.enabled(); @@ -90,31 +90,32 @@ void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *we Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info) { - // TODO(COMPMID-1013): This part will be removed // Get indeces for the width and height const size_t idx_width = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); const size_t idx_height = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + // Input shape + const TensorShape input_shape = input->tensor_shape(); + // Kernel size const unsigned int kernel_w = weights->tensor_shape()[idx_width]; const unsigned int kernel_h = weights->tensor_shape()[idx_height]; - // Number of tiles along the X and Y direction - const unsigned int num_tiles_x = std::ceil((input->tensor_shape().x() - (kernel_w - 1) + conv_info.pad_left() + conv_info.pad_right()) / 2.f); - const unsigned int num_tiles_y = std::ceil((input->tensor_shape().y() - (kernel_h - 1) + conv_info.pad_top() + conv_info.pad_bottom()) / 2.f); - - // Compute output shape - const TensorShape output_convolved_shape = misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info); + const WinogradInfo winograd_info = WinogradInfo(Size2D(2, 2), + Size2D(kernel_w, kernel_h), + Size2D(input_shape[idx_width], input_shape[idx_height]), + conv_info, + input->data_layout()); // Validate input transform - const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, conv_info, Size2D(kernel_w, kernel_h)); + const TensorShape input0_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info); const TensorInfo input0 = input->clone()->set_tensor_shape(input0_shape); - ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransform::validate(input, &input0, conv_info, Size2D(kernel_w, kernel_h))); + ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransform::validate(input, &input0, winograd_info)); // Validate filter transform - const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, Size2D(2U, 2U)); + const TensorShape input1_shape = misc::shape_calculator::compute_winograd_filter_transform_shape(*weights, winograd_info); const TensorInfo input1 = weights->clone()->set_tensor_shape(input1_shape); - ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradFilterTransformKernel::validate(weights, &input1, Size2D(2U, 2U))); + ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradFilterTransformKernel::validate(weights, &input1, winograd_info)); // Validate batched matrix multiply TensorShape batched_mm_output_shape = input0.tensor_shape(); @@ -122,10 +123,8 @@ Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITen const TensorInfo batched_mm_output = input0.clone()->set_tensor_shape(batched_mm_output_shape); ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input0, &input1, nullptr, &batched_mm_output, 1.0f, 0.0f, GEMMInfo(false, false, true /* Reshape weights only for the first run*/))); - // Validate output transform - ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradOutputTransformKernel::validate(&batched_mm_output, biases, output, Size2D(kernel_w, kernel_h), Size2D(output_convolved_shape[idx_width], - output_convolved_shape[idx_height]), - Size2D(num_tiles_x, num_tiles_y))); + // Configure output transform + ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradOutputTransformKernel::validate(&batched_mm_output, biases, output, winograd_info)); // Validate Activation Layer if(act_info.enabled()) diff --git a/src/runtime/CL/functions/CLWinogradInputTransform.cpp b/src/runtime/CL/functions/CLWinogradInputTransform.cpp index 0499d4cd2f..09e84564e2 100644 --- a/src/runtime/CL/functions/CLWinogradInputTransform.cpp +++ b/src/runtime/CL/functions/CLWinogradInputTransform.cpp @@ -30,16 +30,16 @@ using namespace arm_compute; -void CLWinogradInputTransform::configure(ICLTensor *input, ICLTensor *output, const PadStrideInfo &conv_info, const Size2D &kernel_dims) +void CLWinogradInputTransform::configure(ICLTensor *input, ICLTensor *output, const WinogradInfo &winograd_info) { auto k = arm_compute::support::cpp14::make_unique<CLWinogradInputTransformKernel>(); - k->configure(input, output, conv_info, kernel_dims); + k->configure(input, output, winograd_info); _kernel = std::move(k); _border_handler.configure(input, _kernel->border_size(), BorderMode::CONSTANT, PixelValue(0)); } -Status CLWinogradInputTransform::validate(const ITensorInfo *input, const ITensorInfo *output, const PadStrideInfo &conv_info, const Size2D &kernel_dims) +Status CLWinogradInputTransform::validate(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info) { - ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransformKernel::validate(input, output, conv_info, kernel_dims)); + ARM_COMPUTE_RETURN_ON_ERROR(CLWinogradInputTransformKernel::validate(input, output, winograd_info)); return Status{}; } |