From 5daeffdd96c5e46ac2482431d578dfaf9b300cde Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Mon, 26 Nov 2018 10:01:15 +0000 Subject: COMPMID-1723: CL: Implement Reverse Change-Id: Id0d4a07af24e2331161996083b0c1bab072bd405 Reviewed-on: https://review.mlplatform.org/322 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- src/core/CL/CLKernelLibrary.cpp | 5 ++ src/core/CL/cl_kernels/reverse.cl | 102 ++++++++++++++++++++++++ src/core/CL/kernels/CLReverseKernel.cpp | 135 ++++++++++++++++++++++++++++++++ src/runtime/CL/functions/CLReverse.cpp | 43 ++++++++++ 4 files changed, 285 insertions(+) create mode 100644 src/core/CL/cl_kernels/reverse.cl create mode 100644 src/core/CL/kernels/CLReverseKernel.cpp create mode 100644 src/runtime/CL/functions/CLReverse.cpp (limited to 'src') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index e48ff03e05..1361d02f74 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -387,6 +387,7 @@ const std::map CLKernelLibrary::_kernel_program_map = { "reorg_layer_nhwc", "reorg_layer.cl" }, { "reshape_layer", "reshape_layer.cl" }, { "reshape_to_columns", "convolution_layer.cl" }, + { "reverse", "reverse.cl" }, { "RGB888_to_IYUV_bt709", "color_convert.cl" }, { "RGB888_to_NV12_bt709", "color_convert.cl" }, { "RGB888_to_RGBA8888_bt709", "color_convert.cl" }, @@ -820,6 +821,10 @@ const std::map CLKernelLibrary::_program_source_map = { "reshape_layer.cl", #include "./cl_kernels/reshape_layer.clembed" + }, + { + "reverse.cl", +#include "./cl_kernels/reverse.clembed" }, { "roi_align_layer.cl", diff --git a/src/core/CL/cl_kernels/reverse.cl b/src/core/CL/cl_kernels/reverse.cl new file mode 100644 index 0000000000..6afd382fec --- /dev/null +++ b/src/core/CL/cl_kernels/reverse.cl @@ -0,0 +1,102 @@ +/* +* Copyright (c) 2018 ARM Limited. +* +* SPDX-License-Identifier: MIT +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), to +* deal in the Software without restriction, including without limitation the +* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +* sell copies of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in all +* copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +*/ +#include "helpers.h" + +#if defined(DATA_TYPE) && defined(NUM_REVERSE_DIMS) + +#if NUM_REVERSE_DIMS > 4 +#error("Reversing more than 4 dimensions is not currently supported") +#endif /* NUM_REVERSE_DIMS > 4 */ + +/** Performs reverse along the specified axis. + * + * @note The data type must be given as a preprocessor argument using -DDATA_TYPE=num. e.g. -DDATA_TYPE=uint + * @note The number of dimensions to reverse must be given as a preprocessor argument using -DNUM_REVERSE_DIMS=num, e.g. -DNUM_REVERSE_DIMS=3 + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_stride_w Stride of the first source tensor in Z dimension (in bytes) + * @param[in] src_step_w src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor + * @param[in] axis_ptr Pointer to the source vector. Supported data types: U32 + * @param[in] axis_stride_x Stride of the first source tensor in X dimension (in bytes) + * @param[in] axis_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] axis_offset_first_element_in_bytes The offset of the first element in the first source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void reverse(TENSOR4D_DECLARATION(src), + VECTOR_DECLARATION(axis), + TENSOR4D_DECLARATION(dst), + const uint width, + const uint height, + const uint depth, + const uint batches) +{ + Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, depth); + Vector axis = CONVERT_TO_VECTOR_STRUCT_NO_STEP(axis); + Tensor4D dst = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(dst, depth); + + const uint x_in = get_global_id(0); + const uint y_in = get_global_id(1); + const uint z_in = get_global_id(2) % depth; + const uint w_in = get_global_id(2) / depth; + + const uint4 dims = (uint4)(0, 1, 2, 3); + int4 to_reverse = (int4)(0, 0, 0, 0); +#if NUM_REVERSE_DIMS == 1 + const uint index = *((__global uint *)axis.ptr); + to_reverse = (uint4)index == dims; +#elif NUM_REVERSE_DIMS == 2 + const uint2 indices = vload2(0, (__global uint *)axis.ptr); + to_reverse = ((uint4)indices.s0 == dims) || ((uint4)indices.s1 == dims); +#elif NUM_REVERSE_DIMS == 3 + const uint2 indices01 = vload2(0, (__global uint *)axis.ptr); + const uint index2 = *((__global uint *)axis.ptr + 2); + to_reverse = ((uint4)indices01.s0 == dims) || ((uint4)indices01.s1 == dims) || ((uint4)index2 == dims); +#else /* NUM_REVERSE_DIMS == 3 */ + const uint4 indices = vload4(0, (__global uint *)axis.ptr); + to_reverse = ((uint4)indices.s0 == dims) || ((uint4)indices.s1 == dims) || ((uint4)indices.s2 == dims) || ((uint4)indices.s3 == dims); +#endif /* NUM_REVERSE_DIMS == 1 */ + const uint x_out = to_reverse.s0 ? width - x_in - 1 : x_in; + const uint y_out = to_reverse.s1 ? height - y_in - 1 : y_in; + const uint z_out = to_reverse.s2 ? depth - z_in - 1 : z_in; + const uint w_out = to_reverse.s3 ? batches - w_in - 1 : w_in; + + *((__global DATA_TYPE *)tensor4D_offset(&dst, x_out, y_out, z_out, w_out)) = *((__global DATA_TYPE *)src.ptr); +} +#endif // defined(DATA_TYPE) && defined(NUM_REVERSE_DIMS) diff --git a/src/core/CL/kernels/CLReverseKernel.cpp b/src/core/CL/kernels/CLReverseKernel.cpp new file mode 100644 index 0000000000..2859a51ce1 --- /dev/null +++ b/src/core/CL/kernels/CLReverseKernel.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLReverseKernel.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/CLValidate.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Window.h" + +namespace arm_compute +{ +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, axis); + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, + DataType::U16, DataType::S16, + DataType::U32, DataType::S32, + DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(axis, 1, DataType::U32); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis->num_dimensions() > 1, "Axis must be a 1D tensor"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis->dimension(0) > 4, "Only up to 4 dimensions can be reversed"); + + // Checks performed when output is configured + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + } + + return Status{}; +} +} // namespace + +CLReverseKernel::CLReverseKernel() + : _input(nullptr), _output(nullptr), _axis(nullptr) +{ +} + +void CLReverseKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *axis) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, axis); + + _input = input; + _output = output; + _axis = axis; + + // Output tensor auto initialization if not yet initialized + auto_init_if_empty(*output->info(), *input->info()->clone()); + + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), axis->info())); + + // Set kernel build options + CLBuildOptions build_opts; + build_opts.add_option("-DNUM_REVERSE_DIMS=" + support::cpp11::to_string(axis->info()->dimension(0))); + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); + + // Create kernel + _kernel = static_cast(CLKernelLibrary::get().create_kernel("reverse", build_opts.options())); + + // Set static kernel arguments + unsigned int idx = 2 * num_arguments_per_4D_tensor() + num_arguments_per_1D_tensor(); + add_argument(idx, input->info()->dimension(0)); + add_argument(idx, input->info()->dimension(1)); + add_argument(idx, input->info()->dimension(2)); + add_argument(idx, input->info()->dimension(3)); + + // Configure kernel window + Window win = calculate_max_window(*output->info(), Steps()); + ICLKernel::configure_internal(win); + + // Set config_id for enabling LWS tuning + _config_id += "reverse_"; + _config_id += lower_string(string_from_data_type(input->info()->data_type())); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(0)); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(1)); + _config_id += "_"; + _config_id += support::cpp11::to_string(input->info()->dimension(2)); +} + +Status CLReverseKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis)); + return Status{}; +} + +void CLReverseKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + Window collapsed = window.collapse(ICLKernel::window(), Window::DimZ); + Window slice = collapsed.first_slice_window_4D(); + Window axis_slice = collapsed.first_slice_window_1D(); + + do + { + unsigned int idx = 0; + add_4D_tensor_argument(idx, _input, slice); + add_1D_tensor_argument(idx, _axis, axis_slice); + add_4D_tensor_argument(idx, _output, slice); + enqueue(queue, *this, slice, lws_hint()); + } + while(collapsed.slide_window_slice_4D(slice)); +} +} // namespace arm_compute diff --git a/src/runtime/CL/functions/CLReverse.cpp b/src/runtime/CL/functions/CLReverse.cpp new file mode 100644 index 0000000000..0f86b9f326 --- /dev/null +++ b/src/runtime/CL/functions/CLReverse.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/CL/functions/CLReverse.h" + +#include "arm_compute/core/CL/kernels/CLReverseKernel.h" +#include "arm_compute/core/Types.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +void CLReverse::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *axis) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, output, axis); + _kernel = std::move(k); +} + +Status CLReverse::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *axis) +{ + return CLReverseKernel::validate(input, output, axis); +} +} // namespace arm_compute -- cgit v1.2.1