From 4622ac15aa009b3ce90ddc6c2ad6c918d6776103 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Wed, 27 Jun 2018 16:41:17 +0100 Subject: COMPMID-1336: Add CLArithmeticAddition support for QASYMM8 Change-Id: Ice2bb644841fdea4e776872ff5481eb927e66bd1 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/137714 Reviewed-by: Anthony Barbier Tested-by: Jenkins --- src/core/CL/CLKernelLibrary.cpp | 5 ++ src/core/CL/cl_kernels/arithmetic_op_quantized.cl | 89 ++++++++++++++++++++++ src/core/CL/kernels/CLArithmeticAdditionKernel.cpp | 25 +++++- 3 files changed, 115 insertions(+), 4 deletions(-) create mode 100644 src/core/CL/cl_kernels/arithmetic_op_quantized.cl (limited to 'src/core/CL') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 712a1179a6..aa11edf9ec 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -149,6 +149,7 @@ const std::map CLKernelLibrary::_kernel_program_map = { "accumulate_weighted", "accumulate.cl" }, { "activation_layer", "activation_layer.cl" }, { "activation_layer_qa8", "activation_layer_qa8.cl" }, + { "arithmetic_add_quantized", "arithmetic_op_quantized.cl" }, { "arithmetic_add", "arithmetic_op.cl" }, { "arithmetic_sub", "arithmetic_op.cl" }, { "arithmetic_div", "arithmetic_op.cl" }, @@ -414,6 +415,10 @@ const std::map CLKernelLibrary::_program_source_map = { "arithmetic_op.cl", #include "./cl_kernels/arithmetic_op.clembed" + }, + { + "arithmetic_op_quantized.cl", +#include "./cl_kernels/arithmetic_op_quantized.clembed" }, { "bitwise_op.cl", diff --git a/src/core/CL/cl_kernels/arithmetic_op_quantized.cl b/src/core/CL/cl_kernels/arithmetic_op_quantized.cl new file mode 100644 index 0000000000..82e92e32a8 --- /dev/null +++ b/src/core/CL/cl_kernels/arithmetic_op_quantized.cl @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2016-2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" + +#ifdef SATURATE +#define ADD(x, y) add_sat((x), (y)) +#define SUB(x, y) sub_sat((x), (y)) +#else /* SATURATE */ +#define ADD(x, y) (x) + (y) +#define SUB(x, y) (x) - (y) +#endif /* SATURATE */ + +#if defined(OFFSET) + +/** This function adds two tensors. + * + * @attention The quantization offset must be passed at compile time using -DOFFSET, i.e. -DOFFSET=10 + * @attention To perform saturating operation -DSATURATE has to be passed to the compiler otherwise wrapping policy will be used. + * + * @param[in] in1_ptr Pointer to the source tensor. Supported data types: QASYMM8 + * @param[in] in1_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] in1_step_x in1_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] in1_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] in1_step_y in1_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] in1_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] in1_step_z in1_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] in1_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[in] in2_ptr Pointer to the source tensor. Supported data types: same as @p in1_ptr + * @param[in] in2_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] in2_step_x in2_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] in2_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] in2_step_y in2_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] in2_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] in2_step_z in2_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] in2_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] out_ptr Pointer to the destination tensor. Supported data types: same as @p in1_ptr + * @param[in] out_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] out_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void arithmetic_add_quantized( + TENSOR3D_DECLARATION(in1), + TENSOR3D_DECLARATION(in2), + TENSOR3D_DECLARATION(out)) +{ + // Get pixels pointer + Tensor3D in1 = CONVERT_TO_TENSOR3D_STRUCT(in1); + Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2); + Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out); + + // Load values + const short16 in_a = CONVERT(vload16(0, (__global uchar *)in1.ptr), short16); + const short16 in_b = CONVERT(vload16(0, (__global uchar *)in2.ptr), short16); + const short16 offset = OFFSET; + + // Calculate result + short16 res = ADD(in_a, SUB(in_b, offset)); + + res = max((short16)0, min(res, (short16)255)); + + // Store result + vstore16(CONVERT(res, uchar16), 0, (__global uchar *)out.ptr); +} +#endif /* defined(OFFSET) */ diff --git a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp index b0177ab8b6..011807ad88 100644 --- a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp +++ b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp @@ -37,9 +37,15 @@ Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, { ARM_COMPUTE_UNUSED(policy); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input1); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&input2); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32); + + const bool is_qasymm = is_data_type_quantized_asymmetric(input1.data_type()) || is_data_type_quantized_asymmetric(input2.data_type()); + if(is_qasymm) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &input2); + } const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape()); @@ -50,12 +56,16 @@ Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, if(output.total_size() > 0) { ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MSG((output.data_type() == DataType::U8) && ((input1.data_type() != DataType::U8) || (input2.data_type() != DataType::U8)), "Output can only be U8 if both inputs are U8"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0), "Wrong shape for output"); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(&input1, &output); + if(is_qasymm) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &output); + } } return Status{}; @@ -124,6 +134,8 @@ void CLArithmeticAdditionKernel::configure(const ICLTensor *input1, const ICLTen const bool has_float_out = is_data_type_float(output->info()->data_type()); + std::string kernel_name = "arithmetic_add"; + // Set kernel build options std::set build_opts; build_opts.emplace((policy == ConvertPolicy::WRAP || has_float_out) ? "-DWRAP" : "-DSATURATE"); @@ -134,9 +146,14 @@ void CLArithmeticAdditionKernel::configure(const ICLTensor *input1, const ICLTen { build_opts.emplace("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input1->info()->fixed_point_position())); } + else if(is_data_type_quantized_asymmetric(input1->info()->data_type())) + { + build_opts.emplace("-DOFFSET=" + support::cpp11::to_string(input1->info()->quantization_info().offset)); + kernel_name += "_quantized"; + } // Create kernel - _kernel = static_cast(CLKernelLibrary::get().create_kernel("arithmetic_add", build_opts)); + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts)); ICLKernel::configure(win_config.second); } -- cgit v1.2.1