aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2019-04-01 13:50:22 +0100
committerGian Marco Iodice <gianmarco.iodice@arm.com>2019-05-10 08:26:44 +0000
commit46a49a0a8206f0efa7afd514940e180a88ffd732 (patch)
tree0ec53af4ef65037e357b1d8f6a1d1f65075659f7 /src/core
parent879e8dd2fc8523e4059ba9ced9ea0edb57103778 (diff)
downloadComputeLibrary-46a49a0a8206f0efa7afd514940e180a88ffd732.tar.gz
COMPMID-1635: Optimize CLDeconvolutionLayer - Part III
Change-Id: Id2661e093a669ef3eaf2a5116cd278a80c1d5a89 Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/935 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Isabella Gottardi <isabella.gottardi@arm.com> Comments-Addressed: Isabella Gottardi <isabella.gottardi@arm.com> Tested-by: Isabella Gottardi <isabella.gottardi@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CL/CLKernelLibrary.cpp3
-rw-r--r--src/core/CL/cl_kernels/deconvolution_layer.cl78
-rw-r--r--src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp195
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp2
4 files changed, 275 insertions, 3 deletions
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 322ff517d9..df60001343 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -214,6 +214,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "copy_planes_3p", "channel_combine.cl" },
{ "copy_to_keypoint", "fast_corners.cl" },
{ "crop_tensor", "crop_tensor.cl" },
+ { "deconvolution_reshape", "deconvolution_layer.cl" },
{ "deconvolution_upsample", "deconvolution_layer.cl" },
{ "depthwise_convolution_3x3", "depthwise_convolution.cl" },
{ "depthwise_convolution_3x3_f16", "depthwise_convolution.cl" },
@@ -1093,7 +1094,7 @@ Kernel CLKernelLibrary::create_kernel(const std::string &kernel_name, const Stri
return Kernel(kernel_name, cl_program);
}
-void CLKernelLibrary::add_built_program(const std::string &built_program_name, cl::Program program)
+void CLKernelLibrary::add_built_program(const std::string &built_program_name, const cl::Program &program)
{
_built_programs_map.emplace(built_program_name, program);
}
diff --git a/src/core/CL/cl_kernels/deconvolution_layer.cl b/src/core/CL/cl_kernels/deconvolution_layer.cl
index e5169f983f..ea2455c613 100644
--- a/src/core/CL/cl_kernels/deconvolution_layer.cl
+++ b/src/core/CL/cl_kernels/deconvolution_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,3 +52,79 @@ __kernel void deconvolution_upsample(
// Store result
*((__global DATA_TYPE *)dst.ptr) = *((__global DATA_TYPE *)src.ptr);
}
+
+#if defined(FILTER_WIDTH) && defined(FILTER_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE)
+/** This kernel reshapes the deconvolution output tensor before returning the result of the Deconvolution. The decovnolution output tensor
+ * is the result of a @ref CLGEMM operation between the deconvolution input and the deconvolution filter
+ *
+ * @note Data type should be given as a preprocessor argument using -DDATA_TYPE=type, e.g., -DDATA_TYPE=F32
+ * @note The width of the filter should be given as a preprocessor argument using -DFILTER_WIDTH=width, e.g., -DFILTER_WIDTH=2
+ * @note The height of the filter should be given as a preprocessor argument using -DFILTER_HEIGHT=height, e.g., -DFILTER_HEIGHT=2
+ * @note The width of the input should be given as a preprocessor argument using -DSRC_WIDTH=width, e.g., -DSRC_WIDTH=10
+ * @note The height of the input should be given as a preprocessor argument using -DSRC_HEIGHT=width, e.g., -DSRC_HEIGHT=10
+ * @note The output data layout is NHWC if the preprocessor argument NUM_FILTERS is defined, NCHW if NUM_FILTERS is not defined
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8/F16/F32
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[out] dst_ptr Pointer to the destination image. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination image in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination image in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination image
+ * @param[in] bias_ptr (Optional) Pointer to the biases vector. Supported data types: F16/F32/S32
+ * @param[in] bias_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
+ * @param[in] bias_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
+ */
+__kernel void deconvolution_reshape(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst)
+#if defined(ADD_BIAS)
+ ,
+ VECTOR_DECLARATION(bias)
+#endif // defined(ADD_BIAS)
+)
+{
+#define FILTER_AREA ((FILTER_WIDTH) * (FILTER_HEIGHT))
+
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+ Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(dst);
+ const DATA_TYPE data = *(__global DATA_TYPE *)src.ptr;
+
+ // Store result
+ const int x_in = get_global_id(0);
+ const int y_in = get_global_id(1);
+ const int z_in = get_global_id(2);
+
+#if defined(NUM_FILTERS)
+ const int bias_index = x_in / (FILTER_AREA);
+ const int z_out = bias_index + (NUM_FILTERS) * (z_in / (SRC_HEIGHT));
+ const int x_out = x_in % (FILTER_WIDTH) + y_in * (FILTER_WIDTH);
+ const int y_out = (FILTER_HEIGHT) * (z_in % (SRC_HEIGHT)) + ((x_in % (FILTER_AREA)) / (FILTER_WIDTH));
+#else // defined(NUM_FILTERS)
+ const int x_out = x_in / (FILTER_AREA);
+ const int y_out = x_in % (FILTER_WIDTH) + y_in * (FILTER_WIDTH);
+ const int z_out = (FILTER_HEIGHT) * z_in + ((x_in % (FILTER_AREA)) / (FILTER_WIDTH));
+ const int bias_index = x_out;
+#endif // defined(NUM_FILTERS)
+
+#if defined(ADD_BIAS)
+ Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
+ const DATA_TYPE bias_val = *(__global DATA_TYPE *)vector_offset(&bias, bias_index);
+ *((__global DATA_TYPE *)tensor3D_offset(&dst, x_out, y_out, z_out)) = data + bias_val;
+#else // defined(ADD_BIAS)
+ *((__global DATA_TYPE *)tensor3D_offset(&dst, x_out, y_out, z_out)) = data;
+#endif // defined(ADD_BIAS)
+
+#undef FILTER_AREA
+}
+#endif // defined(FILTER_WIDTH) && defined(FILTER_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE)
diff --git a/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
new file mode 100644
index 0000000000..71218f5b52
--- /dev/null
+++ b/src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+namespace arm_compute
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, input_info, weights_info);
+ const DataLayout data_layout = input_info->data_layout();
+ const unsigned int stride_x = deconv_info.stride().first;
+ const unsigned int stride_y = deconv_info.stride().second;
+
+ const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
+ const bool is_qasymm = is_data_type_quantized_asymmetric(input_info->data_type());
+
+ ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_w) != deconv_info.stride().first);
+ ARM_COMPUTE_RETURN_ERROR_ON(weights_info->dimension(idx_h) != deconv_info.stride().second);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8, DataType::S32);
+ if(!is_qasymm)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, input_info, weights_info);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) != weights_info->dimension(idx_w) * weights_info->dimension(idx_h) * weights_info->dimension(idx_b));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(1) != input_info->dimension(idx_w));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != input_info->dimension(idx_h));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(3) != input_info->dimension(idx_b));
+
+ if(bias != nullptr)
+ {
+ if(is_qasymm)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(bias, input);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON(bias->dimension(0) != weights_info->dimension(idx_b));
+ }
+
+ if(output->total_size() != 0)
+ {
+ auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), weights_info->dimension(idx_w), weights_info->dimension(idx_h),
+ 0, 0, stride_x, stride_y);
+
+ const TensorShape output_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
+ }
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *input, ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info, const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ const DataLayout data_layout = input_info->data_layout();
+
+ const unsigned int stride_x = deconv_info.stride().first;
+ const unsigned int stride_y = deconv_info.stride().second;
+ const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+
+ auto out_dims = deconvolution_output_dimensions(input_info->dimension(idx_w), input_info->dimension(idx_h), weights_info->dimension(idx_w), weights_info->dimension(idx_h),
+ 0, 0, stride_x, stride_y);
+
+ const TensorShape output_shape = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input_info, *weights_info);
+
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_layout(data_layout).set_quantization_info(input->quantization_info()));
+
+ Window win = calculate_max_window(*input);
+
+ return std::make_pair(Status{}, win);
+}
+} // namespace
+
+CLDeconvolutionReshapeOutputKernel::CLDeconvolutionReshapeOutputKernel()
+ : _add_bias(false),
+ _bias(nullptr)
+{
+}
+
+void CLDeconvolutionReshapeOutputKernel::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, input_info, weights_info);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias != nullptr ? bias->info() : nullptr), output->info(), input_info, weights_info, deconv_info));
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(input->info(), output->info(), input_info, weights_info, deconv_info);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+
+ const DataLayout data_layout = input_info->data_layout();
+ const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
+ _input = input;
+ _output = output;
+ _add_bias = (bias != nullptr);
+ _bias = bias;
+
+ const int filter_w = weights_info->dimension(idx_w);
+ const int filter_h = weights_info->dimension(idx_h);
+ const int filter_b = weights_info->dimension(idx_b);
+ const int img_w = input_info->dimension(idx_w);
+ const int img_h = input_info->dimension(idx_h);
+
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DFILTER_WIDTH=" + support::cpp11::to_string(filter_w));
+ build_opts.add_option("-DFILTER_HEIGHT=" + support::cpp11::to_string(filter_h));
+ build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(img_w));
+ build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(img_h));
+ build_opts.add_option_if(data_layout == DataLayout::NCHW, "-DNUM_FILTERS=" + support::cpp11::to_string(filter_b));
+ build_opts.add_option_if(_add_bias, "-DADD_BIAS");
+
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("deconvolution_reshape", build_opts.options()));
+ ICLKernel::configure_internal(win_config.second);
+
+ // Set config_id for enabling LWS tuning
+ _config_id = "deconvolution_reshape_output_";
+ _config_id += lower_string(string_from_data_type(input->info()->data_type()));
+ _config_id += "_";
+ _config_id += lower_string(string_from_data_layout(input->info()->data_layout()));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(1));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(1));
+}
+
+Status CLDeconvolutionReshapeOutputKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const ITensorInfo *input_info, const ITensorInfo *weights_info,
+ const PadStrideInfo &deconv_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output, input_info, weights_info, deconv_info));
+ return Status{};
+}
+
+void CLDeconvolutionReshapeOutputKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+ Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, collapsed);
+ add_3D_tensor_argument(idx, _output, collapsed);
+ if(_add_bias)
+ {
+ add_1D_tensor_argument(idx, _bias, collapsed);
+ }
+ enqueue(queue, *this, collapsed, lws_hint());
+}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
index eca24169b9..923b9529fa 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
@@ -321,4 +321,4 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::run(const Window &window, cl
}
while(window.slide_window_slice_3D(slice));
}
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute