From 3175fcf63249673f33fd1638879adad4baab545b Mon Sep 17 00:00:00 2001 From: giuros01 Date: Wed, 21 Nov 2018 09:59:17 +0000 Subject: COMPMID-1720: CL: Implement Tile Change-Id: I2a18f0acea382960a8bc71a8f56928a5998f0dd6 --- src/core/CL/CLKernelLibrary.cpp | 5 ++ src/core/CL/cl_kernels/tile.cl | 97 ++++++++++++++++++++++ src/core/CL/kernels/CLTileKernel.cpp | 152 +++++++++++++++++++++++++++++++++++ 3 files changed, 254 insertions(+) create mode 100644 src/core/CL/cl_kernels/tile.cl create mode 100644 src/core/CL/kernels/CLTileKernel.cpp (limited to 'src/core') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 12944061a9..6e5e97e3e1 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -410,6 +410,7 @@ const std::map CLKernelLibrary::_kernel_program_map = { "tablelookup_S16", "tablelookup.cl" }, { "threshold_binary", "threshold.cl" }, { "threshold_range", "threshold.cl" }, + { "tile", "tile.cl" }, { "transpose", "transpose.cl" }, { "UYVY422_to_IYUV_bt709", "color_convert.cl" }, { "UYVY422_to_NV12_bt709", "color_convert.cl" }, @@ -847,6 +848,10 @@ const std::map CLKernelLibrary::_program_source_map = { "threshold.cl", #include "./cl_kernels/threshold.clembed" + }, + { + "tile.cl", +#include "./cl_kernels/tile.clembed" }, { "transpose.cl", diff --git a/src/core/CL/cl_kernels/tile.cl b/src/core/CL/cl_kernels/tile.cl new file mode 100644 index 0000000000..ae625d99b1 --- /dev/null +++ b/src/core/CL/cl_kernels/tile.cl @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" +#if defined(DATA_TYPE) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(SRC_DEPTH) && defined(DST_DEPTH) +/** Perform a floor operation on an input tensor. + * + * @attention Data type can be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float + * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 + * @note Can only take floating point data types. + * + * @param[in] input_ptr Pointer to the source image. Supported data types: F16/F32 + * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) + * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) + * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image + * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr + * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes) + * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes) + * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image + */ +__kernel void tile( + TENSOR4D_DECLARATION(input), + TENSOR4D_DECLARATION(output)) +{ + Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DST_DEPTH); + Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, SRC_DEPTH); + + // For all coordinates but x, each tile copies from the input + const int y = get_global_id(1); + const int z = get_global_id(2) % DST_DEPTH; + const int batch = get_global_id(2) / DST_DEPTH; + +#if defined(VEC_SIZE) && defined(OFFSET) + // If we are loading/storing multiple elements at time, we need to + // not exceed the input boundaries. The last threads need to backtrack + // of OFFSET elements. Those elements cumulates for previous tiles + const int id = (int)(get_global_id(0)); + int x = id * VEC_SIZE; + + // Shift x based on the previous offsets + const int tile_number = x / SRC_WIDTH; + x -= (tile_number) * OFFSET; + int x_input = x % SRC_WIDTH; + + // Shift x based on being the last tile + const int last_tile = (int)(x_input + VEC_SIZE > SRC_WIDTH); + x -= last_tile * OFFSET; + x_input = x % SRC_WIDTH; + output.ptr -= (tile_number + last_tile) * OFFSET * output_stride_x; + + // Update the input pointer + input.ptr = tensor4D_offset(&input, x_input, y % SRC_HEIGHT, z % SRC_DEPTH, batch % SRC_BATCHES); + + // Copy the data + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); + + VSTORE(VEC_SIZE) + (data, 0, (__global DATA_TYPE *)output.ptr); +#else // !defined(VEC_SIZE) || !defined(OFFSET) + const int x = get_global_id(0); + + // Update the input pointer + input.ptr = tensor4D_offset(&input, x % SRC_WIDTH, y % SRC_HEIGHT, z % SRC_DEPTH, batch % SRC_BATCHES); + + *((__global DATA_TYPE *)(output.ptr)) = *((__global DATA_TYPE *)(input.ptr)); +#endif // defined(VEC_SIZE) && defined(OFFSET) +} +#endif // defined(DATA_TYPE) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(SRC_DEPTH) && defined(DST_DEPTH) diff --git a/src/core/CL/kernels/CLTileKernel.cpp b/src/core/CL/kernels/CLTileKernel.cpp new file mode 100644 index 0000000000..7559e7ae72 --- /dev/null +++ b/src/core/CL/kernels/CLTileKernel.cpp @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLTileKernel.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/CLValidate.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/IAccessWindow.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +namespace arm_compute +{ +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Multiples &multiples) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON(multiples.size() > 4); + ARM_COMPUTE_RETURN_ERROR_ON(multiples.empty()); + ARM_COMPUTE_RETURN_ERROR_ON(std::any_of(multiples.begin(), multiples.end(), [](uint32_t e) + { + return e == 0; + })); + + // Validate output if initialized + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(misc::shape_calculator::compute_tiled_shape(input->tensor_shape(), multiples), output->tensor_shape()); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + } + + return Status{}; +} +} // namespace + +CLTileKernel::CLTileKernel() + : _input(nullptr), _output(nullptr) +{ +} + +void CLTileKernel::configure(const ICLTensor *input, ICLTensor *output, const Multiples &multiples) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + + // Auto initialize output + TensorShape tiled_shape = misc::shape_calculator::compute_tiled_shape(input->info()->tensor_shape(), multiples); + auto_init_if_empty(*output->info(), tiled_shape, 1, input->info()->data_type()); + + // Validate + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), multiples)); + + _input = input; + _output = output; + + const DataType data_type = input->info()->data_type(); + const int vec_size_x = 16 / input->info()->element_size(); + const int input_width_x = input->info()->tensor_shape().x(); + const unsigned int offset = ceil_to_multiple(input_width_x, vec_size_x) - input_width_x; + const bool multi_access_x = (input_width_x / vec_size_x > 0); + + // Create kernel + CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); + build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(input_width_x)); + build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1))); + build_opts.add_option("-DSRC_DEPTH=" + support::cpp11::to_string(input->info()->dimension(2))); + build_opts.add_option("-DSRC_BATCHES=" + support::cpp11::to_string(input->info()->dimension(3))); + build_opts.add_option("-DDST_DEPTH=" + support::cpp11::to_string(output->info()->dimension(2))); + build_opts.add_option_if(multi_access_x, "-DOFFSET=" + support::cpp11::to_string(offset)); + build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x)); + _kernel = static_cast(CLKernelLibrary::get().create_kernel("tile", build_opts.options())); + + // Configure window without padding + Window win = calculate_max_window(*output->info()); + + if(multi_access_x) + { + // If multi-access is enabled, no thread should cross the tile boundaries. This means we need + // as many threads as those to cover a single tile times multiples[0]. Note that if threads + // do not cross the boundaries of the tiles, they won't cross the boundaries of the last tile, and + // we don't need to pad the output + const unsigned int size_win_x = ceil_to_multiple(input->info()->dimension(0), vec_size_x) * multiples[0]; + win.set(Window::DimX, + Window::Dimension(win.x().start(), size_win_x, vec_size_x)); + } + + ICLKernel::configure_internal(win); + + // Set config_id for enabling LWS tuning + _config_id = "tile"; + _config_id += "_"; + _config_id += lower_string(string_from_data_type(input->info()->data_type())); + for(unsigned int i = 0; i < multiples.size(); ++i) + { + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(i)); + _config_id += "_"; + _config_id += support::cpp11::to_string(multiples[i]); + } +} + +Status CLTileKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Multiples &multiples) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, multiples)); + return Status{}; +} + +void CLTileKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); + Window slice = collapsed.first_slice_window_4D(); + + do + { + unsigned int idx = 0; + add_4D_tensor_argument(idx, _input, slice); + add_4D_tensor_argument(idx, _output, slice); + enqueue(queue, *this, slice); + } + while(collapsed.slide_window_slice_4D(slice)); +} +} // namespace arm_compute -- cgit v1.2.1