From e03802edd37229a1868bacedd7571cc443810caf Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Mon, 11 Mar 2019 12:20:20 +0000 Subject: COMPMID-1936: Add support for QASYMM8 in CLQuantizeLayer. Change-Id: I9aa1f1f1753bcdee6a74ec15b4fb366f823788b4 Signed-off-by: Usama Arif Reviewed-on: https://review.mlplatform.org/c/850 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins --- src/core/CL/cl_kernels/quantization_layer.cl | 80 +++++++++++--------- src/core/CL/kernels/CLQuantizationLayerKernel.cpp | 90 +++++++++++------------ 2 files changed, 90 insertions(+), 80 deletions(-) (limited to 'src/core') diff --git a/src/core/CL/cl_kernels/quantization_layer.cl b/src/core/CL/cl_kernels/quantization_layer.cl index 80ea54012f..7ae34ef71a 100644 --- a/src/core/CL/cl_kernels/quantization_layer.cl +++ b/src/core/CL/cl_kernels/quantization_layer.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -23,53 +23,63 @@ */ #include "helpers.h" +#define CONVERT_RTE(x, type) (convert_##type##_rte((x))) +#define CONVERT_RTE_VEC_STR(x, type, size) (convert_##type##size##_rte((x))) +#define CONVERT_RTE_VEC(x, type, size) CONVERT_RTE_VEC_STR(x, type, size) + +#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(SCALE) && defined(OFFSET) + /** This performs the quantization of floating point inputs to 8-bit unsigned integers. * - * @param[in] input_ptr Pointer to the source image. Supported data types: F32 - * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) - * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image - * @param[out] output_ptr Pointer to the destination image. Supported data types: U8 - * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes) - * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes) - * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image - * @param[in] min_max_ptr Pointer to the min/max vector. Minimum value in position 0, maximum value in position 1. Supported data types: F32. - * @param[in] min_max_stride_x Stride of the min/max vector in X dimension (in bytes) - * @param[in] min_max_step_x min_max_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] min_max_offset_first_element_in_bytes The offset of the first element in the min/max vector + * @param[in] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] output_ptr Pointer to the destination tensor. Supported data types: U8 + * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor */ __kernel void quantization_layer( TENSOR3D_DECLARATION(input), - TENSOR3D_DECLARATION(output), - VECTOR_DECLARATION(min_max)) + TENSOR3D_DECLARATION(output)) { // Get pixels pointer Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); - // min_max_value.s0 = min, min_max_value.s1 = max - const float2 min_max_value = vload2(0, (__global float *)(min_max_ptr + min_max_offset_first_element_in_bytes)); - - const float4 vmin = (float4)min_max_value.s0; - const float4 vrange = (float4)(min_max_value.s1 - min_max_value.s0); +#if defined(VEC_SIZE) && defined(LAST_ACCESSED_X) + // Check if access on width gets out of bounds + // If it does shift access vector to access elements within bounds + const int xi = (int)(get_global_id(0) * VEC_SIZE); + input.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * input_stride_x; + output.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * output_stride_x; // Load data - float4 data = vload4(0, (__global float *)input.ptr); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + val = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); - // Map float values to range [0.0, 1.0] - data = (data - vmin) / vrange; + // Create scale and offset vectors + const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) vscale = SCALE; + const VEC_DATA_TYPE(int, VEC_SIZE) voffset = OFFSET; - // Quantize and saturate - uchar4 res = convert_uchar4_sat(data * 256.0f); + // Quantize + VEC_DATA_TYPE(int, VEC_SIZE) + res = CLAMP(CONVERT_RTE_VEC(val / vscale, int, VEC_SIZE) + voffset, 0, 255); - // Store result - vstore4(res, 0, (__global uchar *)output.ptr); + //Store result + VSTORE(VEC_SIZE) + (CONVERT(res, VEC_DATA_TYPE(uchar, VEC_SIZE)), 0, (__global uchar *)output.ptr); +#else //!defined(VEC_SIZE) || !defined(LAST_ACCESSED_X) + *((__global uchar *)(output.ptr)) = (uchar)CLAMP(CONVERT_RTE(((float) * (__global DATA_TYPE *)input.ptr) / ((float)SCALE), int) + (int)OFFSET, 0, 255); +#endif // defined(VEC_SIZE) && defined(LAST_ACCESSED_X) } +#endif //defined(VEC_SIZE) && defined(DATA_TYPE) && defined(SCALE) && defined(OFFSET) diff --git a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp index 9028b0f604..374b22eab1 100644 --- a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -26,6 +26,7 @@ #include "arm_compute/core/AccessWindowStatic.h" #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/CLValidate.h" #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" @@ -36,73 +37,76 @@ using namespace arm_compute; namespace { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output, min_max); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() < 3); - - if(output->tensor_shape().total_size() > 0) + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16); + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); + if((output != nullptr) && (output->total_size() != 0)) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); } return Status{}; } -std::tuple validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, ITensorInfo *min_max) +std::tuple validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) { - // Output tensor auto initialization if not yet initialized - auto_init_if_empty(*output, input->tensor_shape(), 1, DataType::U8); - - constexpr unsigned int num_elems_processed_per_iteration = 4; - - // Configure window - Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration)); - AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); - AccessWindowStatic min_max_access(min_max, 0, 0, 2, min_max->dimension(1)); + // Configure kernel window + Window win = calculate_max_window(*input, Steps()); - // Update window and padding - bool window_changed = update_window_and_padding(win, input_access, output_access, min_max_access); + // Output tensor auto initialization if not yet initialized + auto_init_if_empty(*output, input->tensor_shape(), 1, DataType::QASYMM8); - output_access.set_valid_region(win, input->valid_region()); + Coordinates coord; + coord.set_num_dimensions(output->num_dimensions()); + output->set_valid_region(ValidRegion(coord, output->tensor_shape())); - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_tuple(err, win); + return std::make_tuple(Status{}, win); } } // namespace CLQuantizationLayerKernel::CLQuantizationLayerKernel() - : _input(nullptr), _output(nullptr), _min_max(nullptr) + : _input(nullptr), _output(nullptr) { } -void CLQuantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *output, ICLTensor *min_max) +void CLQuantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *output) { - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, min_max); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), min_max->info())); + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); - _input = input; - _output = output; - _min_max = min_max; + _input = input; + _output = output; - // Create kernel - _kernel = static_cast(CLKernelLibrary::get().create_kernel("quantization_layer")); + const int vec_size_x = 16 / input->info()->element_size(); + const int input_width_x = input->info()->tensor_shape().x(); + const bool multi_access_x = (input_width_x / vec_size_x > 0); - // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), output->info(), min_max->info()); - - ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config)); + // Create and update the window (if needed) + Window win = calculate_max_window(*input->info()); + if(multi_access_x) + { + win.set(Window::DimX, + Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x)); + } + ICLKernel::configure_internal(win); - ICLKernel::configure_internal(std::get<1>(win_config)); + // Create kernel + CLBuildOptions build_opts; + build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(output->info()->quantization_info().scale)); + build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(output->info()->quantization_info().offset)); + build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x)); + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); + build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max(input_width_x - vec_size_x, 0))); + _kernel = static_cast(CLKernelLibrary::get().create_kernel("quantization_layer", build_opts.options())); } -Status CLQuantizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *min_max) +Status CLQuantizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, min_max)); - ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), min_max->clone().get()))); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output)); + ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get()))); return Status{}; } @@ -117,13 +121,9 @@ void CLQuantizationLayerKernel::run(const Window &window, cl::CommandQueue &queu do { - Window slice_min_max = slice.shift_dimensions(2); - slice_min_max.set(Window::DimX, Window::Dimension(0, 1, 1)); - unsigned int idx = 0; add_3D_tensor_argument(idx, _input, slice); add_3D_tensor_argument(idx, _output, slice); - add_1D_tensor_argument(idx, _min_max, slice_min_max); enqueue(queue, *this, slice); } while(window_collapsed.slide_window_slice_3D(slice)); -- cgit v1.2.1