From 46a49a0a8206f0efa7afd514940e180a88ffd732 Mon Sep 17 00:00:00 2001 From: giuros01 Date: Mon, 1 Apr 2019 13:50:22 +0100 Subject: COMPMID-1635: Optimize CLDeconvolutionLayer - Part III Change-Id: Id2661e093a669ef3eaf2a5116cd278a80c1d5a89 Signed-off-by: giuros01 Reviewed-on: https://review.mlplatform.org/c/935 Reviewed-by: Gian Marco Iodice Comments-Addressed: Gian Marco Iodice Reviewed-by: Isabella Gottardi Comments-Addressed: Isabella Gottardi Tested-by: Isabella Gottardi Tested-by: Arm Jenkins --- src/core/CL/CLKernelLibrary.cpp | 3 +- src/core/CL/cl_kernels/deconvolution_layer.cl | 78 ++++- .../kernels/CLDeconvolutionReshapeOutputKernel.cpp | 195 +++++++++++ ...GEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp | 2 +- src/runtime/CL/functions/CLDeconvolutionLayer.cpp | 65 +++- .../CL/functions/CLDirectDeconvolutionLayer.cpp | 13 +- src/runtime/CL/functions/CLGEMM.cpp | 2 +- .../CL/functions/CLGEMMDeconvolutionLayer.cpp | 373 +++++++++++++++++++++ 8 files changed, 719 insertions(+), 12 deletions(-) create mode 100644 src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp create mode 100644 src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp (limited to 'src') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 322ff517d9..df60001343 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -214,6 +214,7 @@ const std::map CLKernelLibrary::_kernel_program_map = { "copy_planes_3p", "channel_combine.cl" }, { "copy_to_keypoint", "fast_corners.cl" }, { "crop_tensor", "crop_tensor.cl" }, + { "deconvolution_reshape", "deconvolution_layer.cl" }, { "deconvolution_upsample", "deconvolution_layer.cl" }, { "depthwise_convolution_3x3", "depthwise_convolution.cl" }, { "depthwise_convolution_3x3_f16", "depthwise_convolution.cl" }, @@ -1093,7 +1094,7 @@ Kernel CLKernelLibrary::create_kernel(const std::string &kernel_name, const Stri return Kernel(kernel_name, cl_program); } -void CLKernelLibrary::add_built_program(const std::string &built_program_name, cl::Program program) +void CLKernelLibrary::add_built_program(const std::string &built_program_name, const cl::Program &program) { _built_programs_map.emplace(built_program_name, program); } diff --git a/src/core/CL/cl_kernels/deconvolution_layer.cl b/src/core/CL/cl_kernels/deconvolution_layer.cl index e5169f983f..ea2455c613 100644 --- a/src/core/CL/cl_kernels/deconvolution_layer.cl +++ b/src/core/CL/cl_kernels/deconvolution_layer.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -52,3 +52,79 @@ __kernel void deconvolution_upsample( // Store result *((__global DATA_TYPE *)dst.ptr) = *((__global DATA_TYPE *)src.ptr); } + +#if defined(FILTER_WIDTH) && defined(FILTER_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE) +/** This kernel reshapes the deconvolution output tensor before returning the result of the Deconvolution. The decovnolution output tensor + * is the result of a @ref CLGEMM operation between the deconvolution input and the deconvolution filter + * + * @note Data type should be given as a preprocessor argument using -DDATA_TYPE=type, e.g., -DDATA_TYPE=F32 + * @note The width of the filter should be given as a preprocessor argument using -DFILTER_WIDTH=width, e.g., -DFILTER_WIDTH=2 + * @note The height of the filter should be given as a preprocessor argument using -DFILTER_HEIGHT=height, e.g., -DFILTER_HEIGHT=2 + * @note The width of the input should be given as a preprocessor argument using -DSRC_WIDTH=width, e.g., -DSRC_WIDTH=10 + * @note The height of the input should be given as a preprocessor argument using -DSRC_HEIGHT=width, e.g., -DSRC_HEIGHT=10 + * @note The output data layout is NHWC if the preprocessor argument NUM_FILTERS is defined, NCHW if NUM_FILTERS is not defined + * + * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8/F16/F32 + * @param[in] src_stride_x Stride of the source image in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image + * @param[out] dst_ptr Pointer to the destination image. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination image in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination image in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination image + * @param[in] bias_ptr (Optional) Pointer to the biases vector. Supported data types: F16/F32/S32 + * @param[in] bias_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) + * @param[in] bias_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector + */ +__kernel void deconvolution_reshape( + TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst) +#if defined(ADD_BIAS) + , + VECTOR_DECLARATION(bias) +#endif // defined(ADD_BIAS) +) +{ +#define FILTER_AREA ((FILTER_WIDTH) * (FILTER_HEIGHT)) + + Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src); + Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(dst); + const DATA_TYPE data = *(__global DATA_TYPE *)src.ptr; + + // Store result + const int x_in = get_global_id(0); + const int y_in = get_global_id(1); + const int z_in = get_global_id(2); + +#if defined(NUM_FILTERS) + const int bias_index = x_in / (FILTER_AREA); + const int z_out = bias_index + (NUM_FILTERS) * (z_in / (SRC_HEIGHT)); + const int x_out = x_in % (FILTER_WIDTH) + y_in * (FILTER_WIDTH); + const int y_out = (FILTER_HEIGHT) * (z_in % (SRC_HEIGHT)) + ((x_in % (FILTER_AREA)) / (FILTER_WIDTH)); +#else // defined(NUM_FILTERS) + const int x_out = x_in / (FILTER_AREA); + const int y_out = x_in % (FILTER_WIDTH) + y_in * (FILTER_WIDTH); + const int z_out = (FILTER_HEIGHT) * z_in + ((x_in % (FILTER_AREA)) / (FILTER_WIDTH)); + const int bias_index = x_out; +#endif // defined(NUM_FILTERS) + +#if defined(ADD_BIAS) + Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias); + const DATA_TYPE bias_val = *(__global DATA_TYPE *)vector_offset(&bias, bias_index); + *((__global DATA_TYPE *)tensor3D_offset(&dst, x_out, y_out, z_out)) = data + bias_val; +#else // defined(ADD_BIAS) + *((__global DATA_TYPE *)tensor3D_offset(&dst, x_out, y_out, z_out)) = data; +#endif // defined(ADD_BIAS) + +#undef FILTER_AREA +} +#endif // defined(FILTER_WIDTH) && defined(FILTER_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE) diff --git a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp new file mode 100644 index 0000000000..71218f5b52 --- /dev/null +++ b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +namespace arm_compute +{ +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, + const PadStrideInfo &deconv_info) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, input_info, weights_info); + const DataLayout data_layout = input_info->data_layout(); + const unsigned int stride_x = deconv_info.stride().first; + const unsigned int stride_y = deconv_info.stride().second; + + const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); + + const bool is_qasymm = is_data_type_quantized_asymmetric(input_info->data_type()); + + ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_w) != deconv_info.stride().first); + ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_h) != deconv_info.stride().second); + + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8, DataType::S32); + if(!is_qasymm) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, input_info, weights_info); + } + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_info->dimension(idx_w) * weights_info->dimension(idx_h) * weights_info->dimension(idx_b)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != input_info->dimension(idx_w)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != input_info->dimension(idx_h)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(3) != input_info->dimension(idx_b)); + + if(bias != nullptr) + { + if(is_qasymm) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(bias, input); + } + ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(0) != weights_info->dimension(idx_b)); + } + + if(output->total_size() != 0) + { + auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), weights_info->dimension(idx_w), weights_info->dimension(idx_h), + 0, 0, stride_x, stride_y); + + const TensorShape output_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info); + + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); + } + return Status{}; +} + +std::pair validate_and_configure_window(const ITensorInfo *input, ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + + const DataLayout data_layout = input_info->data_layout(); + + const unsigned int stride_x = deconv_info.stride().first; + const unsigned int stride_y = deconv_info.stride().second; + const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + + auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), weights_info->dimension(idx_w), weights_info->dimension(idx_h), + 0, 0, stride_x, stride_y); + + const TensorShape output_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info); + + auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_layout(data_layout).set_quantization_info(input->quantization_info())); + + Window win = calculate_max_window(*input); + + return std::make_pair(Status{}, win); +} +} // namespace + +CLDeconvolutionReshapeOutputKernel::CLDeconvolutionReshapeOutputKernel() + : _add_bias(false), + _bias(nullptr) +{ +} + +void CLDeconvolutionReshapeOutputKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, + const PadStrideInfo &deconv_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, input_info, weights_info); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), input_info, weights_info, deconv_info)); + + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), output->info(), input_info, weights_info, deconv_info); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + + const DataLayout data_layout = input_info->data_layout(); + const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); + + _input = input; + _output = output; + _add_bias = (bias != nullptr); + _bias = bias; + + const int filter_w = weights_info->dimension(idx_w); + const int filter_h = weights_info->dimension(idx_h); + const int filter_b = weights_info->dimension(idx_b); + const int img_w = input_info->dimension(idx_w); + const int img_h = input_info->dimension(idx_h); + + CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); + build_opts.add_option("-DFILTER_WIDTH=" + support::cpp11::to_string(filter_w)); + build_opts.add_option("-DFILTER_HEIGHT=" + support::cpp11::to_string(filter_h)); + build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(img_w)); + build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(img_h)); + build_opts.add_option_if(data_layout == DataLayout::NCHW, "-DNUM_FILTERS=" + support::cpp11::to_string(filter_b)); + build_opts.add_option_if(_add_bias, "-DADD_BIAS"); + + _kernel = static_cast(CLKernelLibrary::get().create_kernel("deconvolution_reshape", build_opts.options())); + ICLKernel::configure_internal(win_config.second); + + // Set config_id for enabling LWS tuning + _config_id = "deconvolution_reshape_output_"; + _config_id += lower_string(string_from_data_type(input->info()->data_type())); + _config_id += "_"; + _config_id += lower_string(string_from_data_layout(input->info()->data_layout())); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(0)); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(1)); + _config_id += "_"; + _config_id += support::cpp11::to_string(output->info()->dimension(0)); + _config_id += "_"; + _config_id += support::cpp11::to_string(output->info()->dimension(1)); +} + +Status CLDeconvolutionReshapeOutputKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, + const PadStrideInfo &deconv_info) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, input_info, weights_info, deconv_info)); + return Status{}; +} + +void CLDeconvolutionReshapeOutputKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); + + unsigned int idx = 0; + add_3D_tensor_argument(idx, _input, collapsed); + add_3D_tensor_argument(idx, _output, collapsed); + if(_add_bias) + { + add_1D_tensor_argument(idx, _bias, collapsed); + } + enqueue(queue, *this, collapsed, lws_hint()); +} +} // namespace arm_compute diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp index eca24169b9..923b9529fa 100644 --- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp @@ -321,4 +321,4 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::run(const Window &window, cl } while(window.slide_window_slice_3D(slice)); } -} // namespace arm_compute \ No newline at end of file +} // namespace arm_compute diff --git a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp index 2c17473fc7..c6f79d341f 100644 --- a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp @@ -44,11 +44,29 @@ CLDeconvolutionLayer::CLDeconvolutionLayer(std::shared_ptr memor void CLDeconvolutionLayer::configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info, unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info) { - ARM_COMPUTE_UNUSED(inner_border_right, inner_border_top); ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); - auto f = arm_compute::support::cpp14::make_unique(); - f->configure(input, weights, bias, output, deconv_info, weights_info); - _function = std::move(f); + ARM_COMPUTE_UNUSED(inner_border_right, inner_border_top); + + switch(CLDeconvolutionLayer::get_deconvolution_method(input->info(), weights->info(), nullptr, output->info(), deconv_info, weights_info)) + { + case DeconvolutionMethod::DIRECT: + { + auto f = arm_compute::support::cpp14::make_unique(); + f->configure(input, weights, bias, output, deconv_info, weights_info); + _function = std::move(f); + break; + } + case DeconvolutionMethod::GEMM: + { + auto f = arm_compute::support::cpp14::make_unique(_memory_manager); + f->configure(input, weights, bias, output, deconv_info); + _function = std::move(f); + break; + } + default: + ARM_COMPUTE_ERROR("Not supported."); + break; + } } Status CLDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info, @@ -56,10 +74,47 @@ Status CLDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInf { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_UNUSED(inner_border_right, inner_border_top); - ARM_COMPUTE_RETURN_ON_ERROR(CLDirectDeconvolutionLayer::validate(input, weights, bias, output, deconv_info, weights_info)); + + switch(CLDeconvolutionLayer::get_deconvolution_method(input, weights, bias, output, deconv_info, weights_info)) + { + case DeconvolutionMethod::DIRECT: + { + // Validate direct convolution layer + ARM_COMPUTE_RETURN_ON_ERROR(CLDirectDeconvolutionLayer::validate(input, weights, bias, output, deconv_info, weights_info)); + break; + } + case DeconvolutionMethod::GEMM: + { + // Validate gemm-based convolution layer + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMDeconvolutionLayer::validate(input, weights, bias, output, deconv_info)); + break; + } + default: + ARM_COMPUTE_ERROR("Not supported."); + break; + } + return Status{}; } +DeconvolutionMethod CLDeconvolutionLayer::get_deconvolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, ITensorInfo *output, const PadStrideInfo &deconv_info, + const WeightsInfo &weights_info) +{ + ARM_COMPUTE_UNUSED(output, bias, weights_info); + + const DataLayout data_layout = input->data_layout(); + + const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + + if(weights->dimension(idx_w) != deconv_info.stride().first || weights->dimension(idx_h) != deconv_info.stride().second) + { + return DeconvolutionMethod::DIRECT; + } + + return DeconvolutionMethod::GEMM; +} + void CLDeconvolutionLayer::configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info, const WeightsInfo &weights_info) { diff --git a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp index 721054dcf3..6e14e26cbd 100644 --- a/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDirectDeconvolutionLayer.cpp @@ -28,7 +28,6 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/runtime/CL/CLScheduler.h" -#include "utils/TypePrinter.h" #include #include @@ -161,8 +160,16 @@ void CLDirectDeconvolutionLayer::configure(ICLTensor *input, ICLTensor *weights, _flip_axis.allocator()->allocate(); _flip_axis.map(true); auto axis_data = reinterpret_cast(_flip_axis.buffer()); - axis_data[0] = 0; - axis_data[1] = 1; + if(weights->info()->data_layout() == DataLayout::NHWC) + { + axis_data[0] = 1; + axis_data[1] = 2; + } + else + { + axis_data[0] = 0; + axis_data[1] = 1; + } _flip_axis.unmap(); } diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp index 60bfbf24e5..492709f0d0 100644 --- a/src/runtime/CL/functions/CLGEMM.cpp +++ b/src/runtime/CL/functions/CLGEMM.cpp @@ -206,7 +206,7 @@ void CLGEMM::configure_reshaped_v2(const ICLTensor *a, const ICLTensor *b, const _reshape_lhs_kernel.set_target(gpu_target); _mm_kernel.set_target(gpu_target); - GEMMReshapeInfo reshape_info(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d); + GEMMReshapeInfo reshape_info(m, n, k, 1, 1, depth_output_gemm3d, false); // Manage intermediate buffers _memory_group.manage(&_tmp_a); diff --git a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp new file mode 100644 index 0000000000..bcb91e052c --- /dev/null +++ b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h" + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" +#include "arm_compute/runtime/CL/CLScheduler.h" +#include "utils/TypePrinter.h" + +#include +#include + +namespace arm_compute +{ +namespace +{ +std::pair compute_start_end_slice_coordinates(const ITensorInfo &output_info, const PadStrideInfo &deconv_info, bool is_nchw) +{ + Coordinates start; + Coordinates end; + + if(is_nchw) + { + start.set(0, deconv_info.pad_left()); + start.set(1, deconv_info.pad_top()); + end.set(0, output_info.dimension(0) - deconv_info.pad_right()); + end.set(1, output_info.dimension(1) - deconv_info.pad_bottom()); + } + else + { + start.set(0, 0); + start.set(1, deconv_info.pad_left()); + start.set(2, deconv_info.pad_top()); + + end.set(0, output_info.dimension(0)); + end.set(1, output_info.dimension(1) - deconv_info.pad_right()); + end.set(2, output_info.dimension(2) - deconv_info.pad_bottom()); + } + + return { start, end }; +} +} // namespace + +CLGEMMDeconvolutionLayer::CLGEMMDeconvolutionLayer(std::shared_ptr memory_manager) // NOLINT + : _memory_group(std::move(memory_manager)), + _mm_gemm(), + _mm_gemmlowp(), + _gemmlowp_output_stage(), + _permute_input_to_nhwc(), + _permute_weights_to_nhwc(), + _reshape_weights(), + _transpose_weights(), + _deconv_reshape(), + _slice_gemm(), + _gemmlowp_final(), + _reshaped_weights(), + _reshaped_weights_t(), + _permuted_input(), + _permuted_weights(), + _gemm_output(), + _slice_gemm_input(), + _original_weights(), + _is_prepared(false), + _padded_input(false), + _is_nchw(false), + _is_quantized(false) +{ +} + +Status CLGEMMDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &deconv_info) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights); + + DataLayout data_layout = input->data_layout(); + const bool padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0; + const bool is_nchw = input->data_layout() == DataLayout::NCHW; + const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); + + const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); + + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) != deconv_info.stride().first); + ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) != deconv_info.stride().second); + + TensorShape nhwc_weights_shape = weights->tensor_shape(); + TensorShape nhwc_input_shape = input->tensor_shape(); + + if(is_nchw) + { + permute(nhwc_weights_shape, PermutationVector(2, 0, 1)); + permute(nhwc_input_shape, PermutationVector(2, 0, 1)); + + TensorInfo nhwc_input_info = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_input_shape).set_data_layout(DataLayout::NCHW); + + TensorInfo nhwc_weights_info = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_weights_shape).set_data_layout(DataLayout::NCHW); + + CLPermute::validate(weights, &nhwc_weights_info, PermutationVector(2, 0, 1)); + CLPermute::validate(input, &nhwc_input_info, PermutationVector(2, 0, 1)); + } + + const TensorShape reshaped_shape = TensorShape(nhwc_weights_shape[0], nhwc_weights_shape[1] * nhwc_weights_shape[2] * nhwc_weights_shape[3]); + const TensorInfo reshaped_info = weights->clone()->set_tensor_shape(reshaped_shape).set_data_layout(DataLayout::NCHW).set_is_resizable(true); + ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayer::validate(weights, &reshaped_info)); + + TensorShape transposed_shape(reshaped_shape[1], reshaped_shape[0]); + const TensorInfo reshaped_t_info = reshaped_info.clone()->set_is_resizable(true).set_tensor_shape(transposed_shape); + ARM_COMPUTE_RETURN_ON_ERROR(CLTranspose::validate(&reshaped_info, &reshaped_t_info)); + + TensorShape gemm_output_shape(weights->dimension(idx_w) * weights->dimension(idx_h) * weights->dimension(idx_b), + input->dimension(idx_w), + input->dimension(idx_h), + input->dimension(idx_b)); + + TensorInfo gemm_output_info = reshaped_t_info.clone()->set_tensor_shape(gemm_output_shape).set_is_resizable(true); + GEMMInfo gemm_info(false, false, true, input->dimension(idx_h), true); + + if(is_quantized) + { + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input->clone()->set_tensor_shape(nhwc_input_shape), &reshaped_t_info, nullptr, &gemm_output_info.set_data_type(DataType::S32), + gemm_info)); + } + else + { + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input->clone()->set_tensor_shape(nhwc_input_shape).set_is_resizable(true), &reshaped_t_info, nullptr, &gemm_output_info, 1.0f, 0.0f, gemm_info)); + } + + auto out_dims = deconvolution_output_dimensions(input->dimension(idx_w), input->dimension(idx_h), weights->dimension(idx_w), weights->dimension(idx_h), + 0, 0, deconv_info.stride().first, deconv_info.stride().second); + const TensorShape deconv_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input, *weights); + TensorInfo col2im_output_info = gemm_output_info.clone()->set_tensor_shape(deconv_shape).set_is_resizable(true); + + if(padded_input && is_quantized) + { + const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw); + ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&col2im_output_info, nullptr, + &col2im_output_info.clone()->set_is_resizable(true).set_data_type(DataType::QASYMM8))); + ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info.clone()->set_is_resizable(true).set_data_type(DataType::QASYMM8), output, start_end.first, start_end.second)); + } + else if(padded_input) + { + const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw); + ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info, output, start_end.first, start_end.second)); + } + else if(is_quantized) + { + ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&col2im_output_info, nullptr, output)); + } + else + { + ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, output, input, weights, deconv_info)); + } + + return Status{}; +} + +void CLGEMMDeconvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); + ARM_COMPUTE_ERROR_THROW_ON(CLGEMMDeconvolutionLayer::validate(input->info(), + weights->info(), + bias != nullptr ? bias->info() : nullptr, + output->info(), + deconv_info)); + + _original_weights = weights; + _padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0; + _is_nchw = input->info()->data_layout() == DataLayout::NCHW; + _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); + + const ICLTensor *input_to_use = input; + const ICLTensor *weights_to_use = weights; + + // If the data layout is NCHW, transform everything in NHWC. Another alternative could be to + // do an outer product in NCHW and then an accumulation through a reduction. This would have two + // drawbacks: first, the outer product is less efficient than a full GEMM. Second, the reduction + // might be slower than GEMM. + if(_is_nchw) + { + _memory_group.manage(&_permuted_input); + _permute_input_to_nhwc.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U)); + + _permute_weights_to_nhwc.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U)); + + input_to_use = &_permuted_input; + weights_to_use = &_permuted_weights; + } + + // Reshape the input weights. The weights will be reshaped only once during the call to prepare() + _reshaped_weights.allocator()->init(TensorInfo(TensorShape(weights_to_use->info()->dimension(0), + weights_to_use->info()->dimension(1) * weights_to_use->info()->dimension(2) * weights_to_use->info()->dimension(3)), + 1, + input->info()->data_type(), weights->info()->quantization_info())); + + _reshape_weights.configure(weights_to_use, &_reshaped_weights); + _transpose_weights.configure(&_reshaped_weights, &_reshaped_weights_t); + + const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); + GEMMInfo gemm_info(false, false, true, input->info()->dimension(idx_h), true); + + // Configure output stage for asymmetric quantized types + if(_is_quantized) + { + _mm_gemmlowp.configure(input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, gemm_info); + } + else + { + _mm_gemm.configure(input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, 1.f, 0.0f, gemm_info); + } + + if(_is_nchw) + { + _permuted_input.allocator()->allocate(); + } + + ICLTensor *deconv_reshape_output = nullptr; + ICLTensor *slice_output = nullptr; + ICLTensor *output_stage_output = nullptr; + + if(_padded_input && _is_quantized) + { + _memory_group.manage(&_slice_gemm_input); + _memory_group.manage(&_gemmlowp_final); + deconv_reshape_output = &_gemmlowp_final; + output_stage_output = &_slice_gemm_input; + slice_output = output; + } + else if(_padded_input) + { + _memory_group.manage(&_slice_gemm_input); + deconv_reshape_output = &_slice_gemm_input; + slice_output = output; + } + else if(_is_quantized) + { + _memory_group.manage(&_gemmlowp_final); + deconv_reshape_output = &_gemmlowp_final; + output_stage_output = output; + } + else + { + deconv_reshape_output = output; + } + + // Configure a Col2Im call to reshape the output of GEMM + _deconv_reshape.configure(&_gemm_output, bias, deconv_reshape_output, input->info(), weights->info(), deconv_info); + _gemm_output.allocator()->allocate(); + + if(_is_quantized) + { + float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / _gemmlowp_final.info()->quantization_info().scale; + int output_multiplier(0); + int output_shift(0); + quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + _gemmlowp_output_stage.configure(&_gemmlowp_final, nullptr, output_stage_output, output_multiplier, output_shift, _gemmlowp_final.info()->quantization_info().offset); + _gemmlowp_final.allocator()->allocate(); + } + + // If the input was padded, the output needs to be sliced. + if(_padded_input) + { + const auto start_end = compute_start_end_slice_coordinates(*deconv_reshape_output->info(), deconv_info, _is_nchw); + _slice_gemm.configure(&_slice_gemm_input, slice_output, start_end.first, start_end.second); + _slice_gemm_input.allocator()->allocate(); + } +} + +void CLGEMMDeconvolutionLayer::run() +{ + prepare(); + + MemoryGroupResourceScope scope_mg(_memory_group); + + if(_is_nchw) + { + _permute_input_to_nhwc.run(); + } + + if(_is_quantized) + { + _mm_gemmlowp.run(); + } + else + { + _mm_gemm.run(); + } + + CLScheduler::get().enqueue(_deconv_reshape, false); + + if(_is_quantized) + { + _gemmlowp_output_stage.run(); + } + + if(_padded_input) + { + _slice_gemm.run(); + } +} + +void CLGEMMDeconvolutionLayer::prepare() +{ + if(!_is_prepared) + { + ARM_COMPUTE_ERROR_ON(!_original_weights->is_used()); + + if(_is_nchw) + { + _permuted_weights.allocator()->allocate(); + _permute_weights_to_nhwc.run(); + } + + _reshaped_weights.allocator()->allocate(); + _reshape_weights.run(); + + if(_is_nchw) + { + _permuted_weights.allocator()->free(); + } + + _reshaped_weights_t.allocator()->allocate(); + _transpose_weights.run(); + + // Prepare gemm + if(!_is_quantized) + { + _mm_gemm.prepare(); + } + else + { + _mm_gemmlowp.prepare(); + } + + // Free resources + if(!_reshaped_weights_t.is_used()) + { + _reshaped_weights_t.allocator()->free(); + } + + _original_weights->mark_as_unused(); + _is_prepared = true; + } +} +} // namespace arm_compute -- cgit v1.2.1