From 11d8415aa57b69fb6c83e86a37e3026c22d1d37d Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 28 Apr 2021 10:20:18 +0100 Subject: Port DepthConvert to new Api - Renames DepthConvert to Cast - Ports both NEDepthConverLayer and CLDepthConvert variants - Removes legacy shift capability from DepthConvert, allowing only shifts of 0 Signed-off-by: Georgios Pinitas Change-Id: I806a0f8eb23d23502b632c529fda7edde19c8176 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5565 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- src/core/CL/CLKernels.h | 1 - src/core/CL/cl_kernels/cast.cl | 134 ++ src/core/CL/cl_kernels/depth_convert.cl | 138 -- src/core/CL/kernels/CLDepthConvertLayerKernel.cpp | 152 --- src/core/CL/kernels/CLDepthConvertLayerKernel.h | 91 -- src/core/NEON/NEKernels.h | 1 - .../NEON/kernels/NEDepthConvertLayerKernel.cpp | 1410 -------------------- src/core/NEON/kernels/NEDepthConvertLayerKernel.h | 96 -- src/core/cpu/kernels/CpuCastKernel.cpp | 1367 +++++++++++++++++++ src/core/cpu/kernels/CpuCastKernel.h | 82 ++ src/core/gpu/cl/ClKernelLibrary.cpp | 8 +- src/core/gpu/cl/kernels/ClCastKernel.cpp | 163 +++ src/core/gpu/cl/kernels/ClCastKernel.h | 79 ++ 13 files changed, 1829 insertions(+), 1893 deletions(-) create mode 100644 src/core/CL/cl_kernels/cast.cl delete mode 100644 src/core/CL/cl_kernels/depth_convert.cl delete mode 100644 src/core/CL/kernels/CLDepthConvertLayerKernel.cpp delete mode 100644 src/core/CL/kernels/CLDepthConvertLayerKernel.h delete mode 100644 src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp delete mode 100644 src/core/NEON/kernels/NEDepthConvertLayerKernel.h create mode 100644 src/core/cpu/kernels/CpuCastKernel.cpp create mode 100644 src/core/cpu/kernels/CpuCastKernel.h create mode 100644 src/core/gpu/cl/kernels/ClCastKernel.cpp create mode 100644 src/core/gpu/cl/kernels/ClCastKernel.h (limited to 'src/core') diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h index 1302d52180..c59eebacbb 100644 --- a/src/core/CL/CLKernels.h +++ b/src/core/CL/CLKernels.h @@ -35,7 +35,6 @@ #include "src/core/CL/kernels/CLComparisonKernel.h" #include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h" #include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h" -#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h" #include "src/core/CL/kernels/CLDepthToSpaceLayerKernel.h" #include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h" #include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h" diff --git a/src/core/CL/cl_kernels/cast.cl b/src/core/CL/cl_kernels/cast.cl new file mode 100644 index 0000000000..036a683ec7 --- /dev/null +++ b/src/core/CL/cl_kernels/cast.cl @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" + +#ifdef SATURATE +#define CONVERT_DOWN(x, type) CONVERT_SAT(x, type) +#else /* SATURATE */ +#define CONVERT_DOWN(x, type) CONVERT(x, type) +#endif /* SATURATE */ + +#define CONVERT_UP(x, type) CONVERT(x, type) + +/** This function performs a down-casting + * + * @attention For QSYMM8_PER_CHANNEL -> QASYMM8, it is user's responsibility to keep track of the quantization info. + * + * @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN and -DDATA_TYPE_OUT: + * e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short + * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 + * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE + * + * @param[in] in_ptr Pointer to the source image. Supported data types: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32 + * @param[in] in_stride_x Stride of the source image in X dimension (in bytes) + * @param[in] in_step_x in_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes) + * @param[in] in_step_y in_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] in_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] in_step_z in_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image + * @param[out] out_ptr Pointer to the destination image. Supported data types: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes) + * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes) + * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image + */ +__kernel void cast_down( + TENSOR3D_DECLARATION(in), + TENSOR3D_DECLARATION(out)) +{ + int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); + + __global uchar *in_addr = in_ptr + in_offset_first_element_in_bytes + sizeof(DATA_TYPE_IN) * x_offs + get_global_id(1) * in_stride_y + get_global_id(2) * in_stride_z; + __global uchar *out_addr = out_ptr + out_offset_first_element_in_bytes + sizeof(DATA_TYPE_OUT) * x_offs + get_global_id(1) * out_stride_y + get_global_id(2) * out_stride_z; + + // Load data + VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) + in_data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)in_addr); + +#if defined(IS_DATA_TYPE_QUANTIZED) + in_data ^= (VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE))0x80; +#endif // defined(IS_DATA_TYPE_QUANTIZED) + +#if defined(IS_DATA_TYPE_FLOAT) + VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) + res0 = CONVERT_DOWN(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); + STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) +#else /* defined(IS_DATA_TYPE_FLOAT) */ + VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) + res0 = CONVERT_DOWN(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); + STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) +#endif /* defined(IS_DATA_TYPE_FLOAT) */ +} + +/** This function performs a up-casting + * + * @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN and -DDATA_TYPE_OUT: + * e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short + * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 + * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE + * + * @param[in] in_ptr Pointer to the source image. Supported data types: U8/S8/U16/S16/U32/S32/F16/F32 + * @param[in] in_stride_x Stride of the source image in X dimension (in bytes) + * @param[in] in_step_x in_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes) + * @param[in] in_step_y in_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] in_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] in_step_z in_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image + * @param[out] out_ptr Pointer to the destination image. Supported data types: U8/U16/S16/U32/S32/F16/F32 + * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes) + * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes) + * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image + */ +__kernel void cast_up( + TENSOR3D_DECLARATION(in), + TENSOR3D_DECLARATION(out)) +{ + int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); + + __global uchar *in_addr = in_ptr + in_offset_first_element_in_bytes + sizeof(DATA_TYPE_IN) * x_offs + get_global_id(1) * in_stride_y + get_global_id(2) * in_stride_z; + __global uchar *out_addr = out_ptr + out_offset_first_element_in_bytes + sizeof(DATA_TYPE_OUT) * x_offs + get_global_id(1) * out_stride_y + get_global_id(2) * out_stride_z; + + // Load data + VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) + in_data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)in_addr); + +#if defined(IS_DATA_TYPE_FLOAT) + VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) + res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); + STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) +#else /* defined(IS_DATA_TYPE_FLOAT) */ + VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) + res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); + STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) +#endif /* defined(IS_DATA_TYPE_FLOAT) */ +} diff --git a/src/core/CL/cl_kernels/depth_convert.cl b/src/core/CL/cl_kernels/depth_convert.cl deleted file mode 100644 index a888d7b9bc..0000000000 --- a/src/core/CL/cl_kernels/depth_convert.cl +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "helpers.h" - -#ifdef SATURATE -#define CONVERT_DOWN(x, type) CONVERT_SAT(x, type) -#else /* SATURATE */ -#define CONVERT_DOWN(x, type) CONVERT(x, type) -#endif /* SATURATE */ - -#define CONVERT_UP(x, type) CONVERT(x, type) - -/** This function performs a down-scaling depth conversion. - * - * @attention For QSYMM8_PER_CHANNEL -> QASYMM8, it is user's responsibility to keep track of the quantization info. - * - * @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN and -DDATA_TYPE_OUT: - * e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short - * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 - * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE - * - * @param[in] in_ptr Pointer to the source image. Supported data types: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32 - * @param[in] in_stride_x Stride of the source image in X dimension (in bytes) - * @param[in] in_step_x in_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes) - * @param[in] in_step_y in_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] in_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] in_step_z in_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image - * @param[out] out_ptr Pointer to the destination image. Supported data types: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 - * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes) - * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes) - * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image - * @param[in] shift The integer shift amount value. Supported data types: S32 - */ -__kernel void convert_depth_down( - TENSOR3D_DECLARATION(in), - TENSOR3D_DECLARATION(out), - const int shift) -{ - int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); - - __global uchar *in_addr = in_ptr + in_offset_first_element_in_bytes + sizeof(DATA_TYPE_IN) * x_offs + get_global_id(1) * in_stride_y + get_global_id(2) * in_stride_z; - __global uchar *out_addr = out_ptr + out_offset_first_element_in_bytes + sizeof(DATA_TYPE_OUT) * x_offs + get_global_id(1) * out_stride_y + get_global_id(2) * out_stride_z; - - // Load data - VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) - in_data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)in_addr); - -#if defined(IS_DATA_TYPE_QUANTIZED) - in_data ^= (VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE))0x80; -#endif // defined(IS_DATA_TYPE_QUANTIZED) - -#if defined(IS_DATA_TYPE_FLOAT) - VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) - res0 = CONVERT_DOWN(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); - STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) -#else /* defined(IS_DATA_TYPE_FLOAT) */ - VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) - res0 = CONVERT_DOWN(in_data >> shift, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); - STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) -#endif /* defined(IS_DATA_TYPE_FLOAT) */ -} - -/** This function performs a up-scaling depth conversion. - * - * @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN and -DDATA_TYPE_OUT: - * e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short - * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 - * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE - * - * @param[in] in_ptr Pointer to the source image. Supported data types: U8/S8/U16/S16/U32/S32/F16/F32 - * @param[in] in_stride_x Stride of the source image in X dimension (in bytes) - * @param[in] in_step_x in_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] in_stride_y Stride of the source image in Y dimension (in bytes) - * @param[in] in_step_y in_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] in_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] in_step_z in_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image - * @param[out] out_ptr Pointer to the destination image. Supported data types: U8/U16/S16/U32/S32/F16/F32 - * @param[in] out_stride_x Stride of the destination image in X dimension (in bytes) - * @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes) - * @param[in] out_step_y out_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image - * @param[in] shift The integer shift amount value. Supported data types: S32 - */ -__kernel void convert_depth_up( - TENSOR3D_DECLARATION(in), - TENSOR3D_DECLARATION(out), - const int shift) -{ - int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); - - __global uchar *in_addr = in_ptr + in_offset_first_element_in_bytes + sizeof(DATA_TYPE_IN) * x_offs + get_global_id(1) * in_stride_y + get_global_id(2) * in_stride_z; - __global uchar *out_addr = out_ptr + out_offset_first_element_in_bytes + sizeof(DATA_TYPE_OUT) * x_offs + get_global_id(1) * out_stride_y + get_global_id(2) * out_stride_z; - - // Load data - VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) - in_data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)in_addr); - -#if defined(IS_DATA_TYPE_FLOAT) - VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) - res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); - STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) -#else /* defined(IS_DATA_TYPE_FLOAT) */ - VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) - res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)) << shift; - STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) -#endif /* defined(IS_DATA_TYPE_FLOAT) */ -} diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp deleted file mode 100644 index 0d5c7a4881..0000000000 --- a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibrary.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/OpenCL.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/Validate.h" -#include "src/core/CL/CLValidate.h" -#include "src/core/helpers/AutoConfiguration.h" -#include "src/core/helpers/WindowHelpers.h" -#include "support/StringSupport.h" - -#include -#include -#include - -namespace arm_compute -{ -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_UNUSED(policy); - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON(input == output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, - 1, - DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16, - DataType::U16, DataType::U32, DataType::S32, DataType::F16, - DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, - 1, - DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16, - DataType::U16, DataType::U32, DataType::S32, DataType::F16, - DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == output->data_type(), "Input and output data types must be different"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_float(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs"); - ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8); - - // Validate in case of configured output - if(output->total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); - } - - return Status{}; -} -} // namespace - -void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift) -{ - configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, shift); -} - -void CLDepthConvertLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - _input = input; - _output = output; - - // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given) - set_shape_if_empty(*output->info(), input->info()->tensor_shape()); - - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift)); - - auto padding_info = get_padding_info({ input, output }); - - // Get data sizes - const size_t input_size = data_size_from_type(input->info()->data_type()); - const size_t output_size = data_size_from_type(output->info()->data_type()); - - // Get number of elements to process per iterations - const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->info()->element_size(), input->info()->dimension(0)); - - // Set build options - CLBuildOptions build_opts; - build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)); - build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration)); - build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type())); - // Conversions from float always SATURATE as out-of-bounds conversion from float->integer is implementation defined - build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE"); - build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || is_data_type_float(output->info()->data_type()), "-DIS_DATA_TYPE_FLOAT"); - build_opts.add_option_if(is_data_type_quantized(input->info()->data_type()), "-DIS_DATA_TYPE_QUANTIZED"); - - // Create kernel - const std::string kernel_name = (input_size >= output_size) ? "convert_depth_down" : "convert_depth_up"; - _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); - - // Set shift arg - unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters - _kernel.setArg(idx++, shift); - - // Configure kernel - Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration)); - ICLKernel::configure_internal(win); - - // Collapse window - const Window &full_window = window(); - Window collapsed_window = full_window.collapse_if_possible(full_window, Window::DimZ); - ICLKernel::configure_internal(collapsed_window); - - ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); - - // Set config_id for enabling LWS tuning - _config_id = kernel_name; - _config_id += "_"; - _config_id += lower_string(string_from_data_type(input->info()->data_type())); - _config_id += "_"; - _config_id += support::cpp11::to_string(input->info()->dimension(0)); - _config_id += "_"; - _config_id += support::cpp11::to_string(input->info()->dimension(1)); - _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(0)); - _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(1)); -} - -Status CLDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift)); - - return Status{}; -} -} // namespace arm_compute diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.h b/src/core/CL/kernels/CLDepthConvertLayerKernel.h deleted file mode 100644 index 8b511c6707..0000000000 --- a/src/core/CL/kernels/CLDepthConvertLayerKernel.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2016-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H -#define ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H - -#include "arm_compute/core/Types.h" -#include "src/core/CL/ICLSimple3DKernel.h" - -#include - -namespace arm_compute -{ -class ICLTensor; - -/** Interface for the depth conversion kernel. */ -class CLDepthConvertLayerKernel : public ICLSimple3DKernel -{ -public: - /** Set the input and output of the kernel. - * - * Valid conversions Input -> Output : - * - * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data) - * - U8 -> S8, U16, S16, U32, S32, F16, F32 - * - U16 -> U8, S8, S16, U32, S32, F16, F32 - * - S16 -> U8, S8, U16, U32, S32, F16, F32 - * - U32 -> U8, S8, U16, S16, S32, F16, F32 - * - S32 -> U8, S8, U16, S16, U32, F16, F32 - * - F16 -> U8, S8, U16, S16, U32, F32 - * - F32 -> U8, S8, U16, S16, U32, F16 - * - * @param[in] input The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32. - * @param[out] output The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. - * @param[in] policy Conversion policy - * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8. - */ - void configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift); - /** Set the input and output of the kernel. - * - * Valid conversions Input -> Output : - * - * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data) - * - U8 -> S8, U16, S16, U32, S32, F16, F32 - * - U16 -> U8, S8, S16, U32, S32, F16, F32 - * - S16 -> U8, S8, U16, U32, S32, F16, F32 - * - U32 -> U8, S8, U16, S16, S32, F16, F32 - * - S32 -> U8, S8, U16, S16, U32, F16, F32 - * - F16 -> U8, S8, U16, S16, U32, F32 - * - F32 -> U8, S8, U16, S16, U32, F16 - * - * @param[in] compile_context The compile context to be used. - * @param[in] input The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32. - * @param[out] output The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. - * @param[in] policy Conversion policy - * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8. - */ - void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift); - /** Static function to check if given info will lead to a valid configuration of @ref CLDepthConvertLayerKernel - * - * @param[in] input Source tensor info. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32. - * @param[in] output Destination tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. - * @param[in] policy Conversion policy - * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift); -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H */ diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h index b11e135a0d..ea15f4eddd 100644 --- a/src/core/NEON/NEKernels.h +++ b/src/core/NEON/NEKernels.h @@ -35,7 +35,6 @@ #include "src/core/NEON/kernels/NECol2ImKernel.h" #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h" #include "src/core/NEON/kernels/NECropKernel.h" -#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h" #include "src/core/NEON/kernels/NEDepthToSpaceLayerKernel.h" #include "src/core/NEON/kernels/NEFFTDigitReverseKernel.h" #include "src/core/NEON/kernels/NEFFTRadixStageKernel.h" diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp b/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp deleted file mode 100644 index 4b5208eeb6..0000000000 --- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp +++ /dev/null @@ -1,1410 +0,0 @@ -/* - * Copyright (c) 2016-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h" - -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/ITensor.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Validate.h" -#include "src/core/CPP/Validate.h" -#include "src/core/NEON/NEFixedPoint.h" -#include "src/core/NEON/NEMath.h" -#include "src/core/NEON/wrapper/wrapper.h" -#include "src/core/helpers/AutoConfiguration.h" -#include "src/core/helpers/WindowHelpers.h" -#include "support/SaturateCast.h" - -using namespace arm_compute; - -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(output); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(output); - ARM_COMPUTE_UNUSED(policy); - ARM_COMPUTE_RETURN_ERROR_ON(input == output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8, - DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16, - DataType::F32, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8, - DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16, - DataType::U32, DataType::S32, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8_SIGNED && (output->data_type() != DataType::S16 && output->data_type() != DataType::S32 - && output->data_type() != DataType::F16 && output->data_type() != DataType::F32), - "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8 && (output->data_type() != DataType::S16 && output->data_type() != DataType::U16 - && output->data_type() != DataType::S32 && output->data_type() != DataType::F16 && output->data_type() != DataType::F32), - "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::U8 && (output->data_type() != DataType::S16 && output->data_type() != DataType::U16 - && output->data_type() != DataType::S32 && output->data_type() != DataType::F16 && output->data_type() != DataType::F32), - "Only data_types supported [in] U8 -> [out] U16, S16, S32, F16, F32"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::U16 && (output->data_type() != DataType::U8 && output->data_type() != DataType::U32), - "Only data_types supported [in] U16 -> [out] U8, U32"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::S16 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::U8 && output->data_type() != DataType::S32), - "Only data_types supported [in] S16 -> [out] U8, S32"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::BFLOAT16 && output->data_type() != DataType::F32, - "Only data_types supported [in] BFLOAT16 -> [out] F32"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::F16 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8 - && output->data_type() != DataType::U8 - && output->data_type() != DataType::F32 && output->data_type() != DataType::S32), - "Only data_types supported [in] F16 -> [out] QASYMM8, F32, S32, U8"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::F32 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8 - && output->data_type() != DataType::F16 && output->data_type() != DataType::BFLOAT16 - && output->data_type() != DataType::S32 && output->data_type() != DataType::U8), - "Only data_types supported [in] F32 -> [out] QASYMM8, BFLOAT16, F16, S32, U8"); - - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::S32 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8 - && output->data_type() != DataType::F16 - && output->data_type() != DataType::F32 && output->data_type() != DataType::U8), - "Only data_types supported [in] S32 -> [out] QASYMM8, F16, F32, U8"); - - // Validate in case of configured output - if(output->total_size() > 0) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); - } - - return Status{}; -} -} // namespace - -NEDepthConvertLayerKernel::NEDepthConvertLayerKernel() - : _input(nullptr), _output(nullptr), _policy(), _shift(0) -{ -} - -void NEDepthConvertLayerKernel::configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given) - set_shape_if_empty(*output->info(), input->info()->tensor_shape()); - - _input = input; - _output = output; - _policy = policy; - _shift = shift; - - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift)); - - // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps()); - - ICPPKernel::configure(win); -} - -Status NEDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift)); - return Status{}; -} - -void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info) -{ - ARM_COMPUTE_UNUSED(info); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); - ARM_COMPUTE_ERROR_ON_NULLPTR(_input, _output); - ARM_COMPUTE_ERROR_ON(_input == _output); - - const auto window_start_x = static_cast(window.x().start()); - const auto window_end_x = static_cast(window.x().end()); - const int window_step_x = 16; - - Window win{ window }; - win.set(Window::DimX, Window::Dimension(0, 1, 1)); - - Iterator input(_input, win); - Iterator output(_output, win); - - switch(_input->info()->data_type()) - { - case DataType::QASYMM8_SIGNED: - { - const int16x8_t b = vdupq_n_s16(_shift); - - switch(_output->info()->data_type()) - { - case DataType::S16: - { - /* Up-conversion QASYMM8_SIGNED -> S16 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - int x = window_start_x; - - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int8x16_t texels_s8 = vld1q_s8(input_ptr + x); - - const int16x8x2_t texels = - { - { - vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b), - vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b) - } - }; - - vst1q_s16(output_ptr + x, texels.val[0]); - vst1q_s16(output_ptr + x + 8, texels.val[1]); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) << _shift); - } - }, - input, output); - break; - } - case DataType::S32: - { - /* Up-conversion QASYMM8_SIGNED -> S32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - int x = window_start_x; - - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int8x16_t texels_s8 = vld1q_s8(input_ptr + x); - - const int16x8x2_t texels = - { - { - vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b), - vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b) - } - }; - - vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0]))); - vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0]))); - vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1]))); - vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1]))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) << _shift); - } - }, - input, output); - break; - } - case DataType::F32: - { - /* Up-conversion QASYMM8_SIGNED -> F32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast(input.ptr())); - - const int16x8x2_t texels = - { - { - vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b), - vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b) - } - }; - vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0])))); - vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0])))); - vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1])))); - vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1])))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) << _shift); - } - }, - input, output); - break; - } -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - { - /* Up-conversion QASYMM8_SIGNED -> F16 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - int x = window_start_x; - - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int8x16_t texels_s8 = vld1q_s8(input_ptr + x); - - const int16x8x2_t texels = - { - { - vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b), - vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b) - } - }; - vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0])); - vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1])); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) << _shift); - } - }, - input, output); - break; - } -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - - default: - ARM_COMPUTE_ERROR("Output data type not supported"); - } - break; - } - - case DataType::QASYMM8: - case DataType::U8: - { - const int16x8_t b = vdupq_n_s16(_shift); - - switch(_output->info()->data_type()) - { - case DataType::S16: - { - /* Up-conversion U8 -> S16 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); - - const int16x8x2_t texels = - { - { - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b), - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b) - } - }; - - vst1q_s16(output_ptr + x, texels.val[0]); - vst1q_s16(output_ptr + x + 8, texels.val[1]); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - auto in = static_cast(*(input_ptr + x)); - *(output_ptr + x) = in << _shift; - } - }, - input, output); - break; - } - case DataType::S32: - { - /* Up-conversion U8 -> S32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); - - const int16x8x2_t texels = - { - { - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b), - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b) - } - }; - - vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0]))); - vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0]))); - vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1]))); - vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1]))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - auto in = static_cast(*(input_ptr + x)); - *(output_ptr + x) = in << _shift; - } - }, - input, output); - break; - } - case DataType::F32: - { - /* Up-conversion U8 -> F32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); - - const int16x8x2_t texels = - { - { - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b), - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b) - } - }; - vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0])))); - vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0])))); - vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1])))); - vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1])))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - auto in = static_cast(*(input_ptr + x)); - *(output_ptr + x) = static_cast(in << _shift); - } - }, - input, output); - break; - } -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - { - /* Up-conversion U8 -> F16 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); - - const int16x8x2_t texels = - { - { - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b), - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b) - } - }; - vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0])); - vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1])); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) << _shift); - } - }, - input, output); - break; - } -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::U16: - { - /* Up-conversion U8 -> U16 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); - - const uint16x8x2_t texels = - { - { - vshlq_u16(vmovl_u8(vget_low_u8(texels_u8)), b), - vshlq_u16(vmovl_u8(vget_high_u8(texels_u8)), b) - } - }; - - vst1q_u16(output_ptr + x, texels.val[0]); - vst1q_u16(output_ptr + x + 8, texels.val[1]); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x)) << _shift; - } - }, - input, output); - break; - } - default: - ARM_COMPUTE_ERROR("Output data type not supported"); - } - break; - } - case DataType::S16: - { - switch(_output->info()->data_type()) - { - case DataType::QASYMM8_SIGNED: - { - const int16x8_t b = vdupq_n_s16(-static_cast(_shift)); - - /* Down-conversion S16 -> QASYMM8_SIGNED */ - if(ConvertPolicy::SATURATE == _policy) - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int16x8x2_t texels = - { - { - vqshlq_s16(vld1q_s16(input_ptr + x), b), - vqshlq_s16(vld1q_s16(input_ptr + x + 8), b) - } - }; - - vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1]))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = utils::cast::saturate_cast(*(input_ptr + x) >> _shift); - } - }, - input, output); - } - else - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int16x8x2_t texels = - { - { - vshlq_s16(vld1q_s16(input_ptr + x), b), - vshlq_s16(vld1q_s16(input_ptr + x + 8), b) - } - }; - - vst1q_s8(output_ptr + x, vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1]))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) >> _shift); - } - }, - input, output); - } - break; - } - case DataType::U8: - { - const int16x8_t b = vdupq_n_s16(-static_cast(_shift)); - - /* Down-conversion S16 -> U8 */ - if(ConvertPolicy::SATURATE == _policy) - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int16x8x2_t texels = - { - { - vqshlq_s16(vld1q_s16(input_ptr + x), b), - vqshlq_s16(vld1q_s16(input_ptr + x + 8), b) - } - }; - - vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1]))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = utils::cast::saturate_cast(*(input_ptr + x) >> _shift); - } - }, - input, output); - } - else - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int16x8x2_t texels = - { - { - vshlq_s16(vld1q_s16(input_ptr + x), b), - vshlq_s16(vld1q_s16(input_ptr + x + 8), b) - } - }; - - vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])), - vmovn_u16(vreinterpretq_u16_s16(texels.val[1])))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) >> _shift); - } - }, - input, output); - } - break; - } - case DataType::S32: - { - const int32x4_t b = vdupq_n_s32(_shift); - - /* Up-conversion S16 -> S32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int16x8x2_t texels = - { - { - vld1q_s16(input_ptr + x), - vld1q_s16(input_ptr + x + 8) - } - }; - - const int32x4x4_t texels_s32 = - { - { - vshlq_s32(vmovl_s16(vget_low_s16(texels.val[0])), b), - vshlq_s32(vmovl_s16(vget_high_s16(texels.val[0])), b), - vshlq_s32(vmovl_s16(vget_low_s16(texels.val[1])), b), - vshlq_s32(vmovl_s16(vget_high_s16(texels.val[1])), b) - } - }; - - vst1q_s32(output_ptr + x, texels_s32.val[0]); - vst1q_s32(output_ptr + x + 4, texels_s32.val[1]); - vst1q_s32(output_ptr + x + 8, texels_s32.val[2]); - vst1q_s32(output_ptr + x + 12, texels_s32.val[3]); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) << _shift); - } - }, - input, output); - break; - } - default: - ARM_COMPUTE_ERROR("Output data type not supported"); - } - break; - } - case DataType::U16: - { - switch(_output->info()->data_type()) - { - case DataType::U8: - { - const int16x8_t b = vdupq_n_s16(-static_cast(_shift)); - - /* Down-conversion U16 -> U8 */ - if(ConvertPolicy::SATURATE == _policy) - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const uint16x8x2_t texels = - { - { - vqshlq_u16(vld1q_u16(input_ptr + x), b), - vqshlq_u16(vld1q_u16(input_ptr + x + 8), b) - } - }; - - vst1q_u8(output_ptr + x, vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1]))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = utils::cast::saturate_cast(*(input_ptr + x) >> _shift); - } - }, - input, output); - } - else - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const uint16x8x2_t texels = - { - { - vshlq_u16(vld1q_u16(input_ptr + x), b), - vshlq_u16(vld1q_u16(input_ptr + x + 8), b) - } - }; - - vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1]))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) >> _shift); - } - - }, - input, output); - } - break; - } - case DataType::U32: - { - const int32x4_t b = vdupq_n_s32(_shift); - - /* Up-conversion U16 -> U32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const uint16x8x2_t texels = - { - { - vld1q_u16(input_ptr + x), - vld1q_u16(input_ptr + x + 8) - } - }; - - vst1q_u32(output_ptr + x, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[0])), b)); - vst1q_u32(output_ptr + x + 4, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[0])), b)); - vst1q_u32(output_ptr + x + 8, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[1])), b)); - vst1q_u32(output_ptr + x + 12, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[1])), b)); - } - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) << _shift); - } - - }, - input, output); - break; - } - default: - ARM_COMPUTE_ERROR("Output data type not supported"); - } - break; - } -#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) - case DataType::BFLOAT16: - switch(_output->info()->data_type()) - { - case DataType::F32: - { - /* Up-conversion BFLOAT16 -> F32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const uint16x8x2_t texels = - { - { - vld1q_u16(reinterpret_cast(input.ptr())), - vld1q_u16(reinterpret_cast(input.ptr()) + 8) - } - }; - - vst1q_f32(reinterpret_cast(output.ptr()), - vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[0])), 16))); - vst1q_f32(reinterpret_cast(output.ptr()) + 4, - vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[0])), 16))); - vst1q_f32(reinterpret_cast(output.ptr()) + 8, - vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[1])), 16))); - vst1q_f32(reinterpret_cast(output.ptr()) + 12, - vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[1])), 16))); - } - - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = float(*(input_ptr + x)); - } - }, - input, output); - break; - } - default: - ARM_COMPUTE_ERROR("Output data type unsupported"); - } - break; -#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - switch(_output->info()->data_type()) - { - case DataType::QASYMM8_SIGNED: - { - const float16_t scale_s = 1 << _shift; - const float16x8_t scale = vdupq_n_f16(scale_s); - - /* Down-conversion F16 -> QASYMM8_SIGNED (Always saturating) */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float16x8x2_t texels = - { - { - vmulq_f16(vld1q_f16(input_ptr + x), scale), - vmulq_f16(vld1q_f16(input_ptr + x + 8), scale), - } - }; - - vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1])))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = utils::cast::saturate_cast(*(input_ptr + x) * scale_s); - } - }, - input, output); - break; - } - case DataType::QASYMM8: - case DataType::U8: - { - const float16_t scale_s = 1 << _shift; - const float16x8_t scale = vdupq_n_f16(scale_s); - - /* Down-conversion F16 -> QASYMM8/U8 (Always saturating) */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float16x8x2_t texels = - { - { - vmulq_f16(vld1q_f16(input_ptr + x), scale), - vmulq_f16(vld1q_f16(input_ptr + x + 8), scale), - } - }; - - vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1])))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = utils::cast::saturate_cast(*(input_ptr + x) * scale_s); - } - - }, - input, output); - break; - } - case DataType::F32: - { - const float scale_s = 1 << _shift; - const float32x4_t scale = vdupq_n_f32(scale_s); - - /* Up-conversion F16 -> F32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float16x8x2_t texels = - { - { - vld1q_f16(input_ptr + x), - vld1q_f16(input_ptr + x + 8) - } - }; - vst1q_f32(output_ptr + x, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale)); - vst1q_f32(output_ptr + x + 4, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale)); - vst1q_f32(output_ptr + x + 8, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale)); - vst1q_f32(output_ptr + x + 12, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale)); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) * scale_s); - } - }, - input, output); - break; - } - case DataType::S32: - { - const float scale_s = 1 << _shift; - const float32x4_t scale = vdupq_n_f32(scale_s); - - /* Up-conversion F16 -> S32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float16x8x2_t texels = - { - { - vld1q_f16(input_ptr + x), - vld1q_f16(input_ptr + x + 8) - } - }; - - vst1q_s32(output_ptr + x, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale))); - vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale))); - vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale))); - vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) * scale_s); - } - }, - input, output); - break; - } - default: - ARM_COMPUTE_ERROR("Output data type not supported"); - } - break; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - case DataType::F32: - switch(_output->info()->data_type()) - { -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - - /* Down-conversion F32 -> F16 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float32x4x4_t texels = - { - { - vmulq_f32(vld1q_f32(input_ptr + x), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 4), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 8), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 12), scale) - } - }; - - vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1]))); - vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3]))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) * scale_s); - } - }, - input, output); - break; - } -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ -#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) - case DataType::BFLOAT16: - { - /* Down-conversion F32 -> BFLOAT16 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - wrapper::vcvt_bf16_f32(reinterpret_cast(input.ptr()), - reinterpret_cast(output.ptr())); - wrapper::vcvt_bf16_f32(reinterpret_cast(input.ptr()) + 8, - reinterpret_cast(output.ptr()) + 8); - } - - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = *(input_ptr + x); - } - }, - input, output); - break; - } -#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ - case DataType::S32: - { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - - /* Conversion F32 -> S32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float32x4x4_t texels = - { - { - vmulq_f32(vld1q_f32(input_ptr + x), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 4), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 8), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 12), scale), - } - }; - - vst1q_s32(output_ptr + x, vcvtq_s32_f32(texels.val[0])); - vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(texels.val[1])); - vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(texels.val[2])); - vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(texels.val[3])); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) * scale_s); - } - }, - input, output); - break; - } - case DataType::QASYMM8: - case DataType::U8: - { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - - /* Down-conversion F32 -> U8 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float32x4x4_t texels = - { - { - vmulq_f32(vld1q_f32(input_ptr + x), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 4), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 8), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 12), scale), - } - }; - - vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1]))))); - vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3]))))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = utils::cast::saturate_cast(*(input_ptr + x) * scale_s); - } - }, - input, output); - break; - } - case DataType::QASYMM8_SIGNED: - { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - - /* Down-conversion F32 -> QASYMM8_SIGNED */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float32x4x4_t texels = - { - { - vmulq_f32(vld1q_f32(input_ptr + x), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 4), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 8), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 12), scale), - } - }; - - vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1]))))); - vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3]))))); - } - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = utils::cast::saturate_cast(*(input_ptr + x) * scale_s); - } - }, - input, output); - break; - } - - default: - ARM_COMPUTE_ERROR("Output data type not supported"); - } - break; - - case DataType::S32: - switch(_output->info()->data_type()) - { -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - - /* Down-conversion S32 -> F16 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const float32x4x4_t texels = - { - { - vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x)), scale), - vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 4)), scale), - vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 8)), scale), - vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 12)), scale) - } - }; - - vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1]))); - vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3]))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) * scale_s); - } - }, - input, output); - break; - } -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - case DataType::F32: - { - const int scale_s = 1.f / (1 << _shift); - const int32x4_t scale = vdupq_n_s32(scale_s); - - /* Conversion S32 -> F32 */ - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int32x4x4_t texels = - { - { - vmulq_s32(vld1q_s32(input_ptr + x), scale), - vmulq_s32(vld1q_s32(input_ptr + x + 4), scale), - vmulq_s32(vld1q_s32(input_ptr + x + 8), scale), - vmulq_s32(vld1q_s32(input_ptr + x + 12), scale), - } - }; - - vst1q_f32(output_ptr + x, vcvtq_f32_s32(texels.val[0])); - vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(texels.val[1])); - vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(texels.val[2])); - vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(texels.val[3])); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) * scale_s); - } - }, - input, output); - break; - } - case DataType::QASYMM8_SIGNED: - { - const int32x4_t b = vdupq_n_s32(-static_cast(_shift)); - - /* Down-conversion S32 -> QASYMM8_SIGNED */ - if(ConvertPolicy::SATURATE == _policy) - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int32x4x4_t texels = - { - { - vqshlq_s32(vld1q_s32(input_ptr + x), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 4), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 8), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 12), b) - } - }; - vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1])))); - vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3])))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = utils::cast::saturate_cast(*(input_ptr + x) >> _shift); - } - }, - input, output); - } - else - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int32x4x4_t texels = - { - { - vshlq_s32(vld1q_s32(input_ptr + x), b), - vshlq_s32(vld1q_s32(input_ptr + x + 4), b), - vshlq_s32(vld1q_s32(input_ptr + x + 8), b), - vshlq_s32(vld1q_s32(input_ptr + x + 12), b) - } - }; - - vst1_s8(output_ptr + x, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1])))); - vst1_s8(output_ptr + x + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3])))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) >> _shift); - } - }, - input, output); - } - break; - } - case DataType::QASYMM8: - case DataType::U8: - { - const int32x4_t b = vdupq_n_s32(-static_cast(_shift)); - - /* Down-conversion S32 -> U8 */ - if(ConvertPolicy::SATURATE == _policy) - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int32x4x4_t texels = - { - { - vqshlq_s32(vld1q_s32(input_ptr + x), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 4), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 8), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 12), b) - } - }; - vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1])))); - vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3])))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = utils::cast::saturate_cast(*(input_ptr + x) >> _shift); - } - }, - input, output); - } - else - { - execute_window_loop(win, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - const int32x4x4_t texels = - { - { - vshlq_s32(vld1q_s32(input_ptr + x), b), - vshlq_s32(vld1q_s32(input_ptr + x + 4), b), - vshlq_s32(vld1q_s32(input_ptr + x + 8), b), - vshlq_s32(vld1q_s32(input_ptr + x + 12), b) - } - }; - - vst1_u8(output_ptr + x, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1]))))); - vst1_u8(output_ptr + x + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3]))))); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - *(output_ptr + x) = static_cast(*(input_ptr + x) >> _shift); - } - }, - input, output); - } - break; - } - default: - ARM_COMPUTE_ERROR("Output data type not supported"); - } - break; - default: - ARM_COMPUTE_ERROR("Not supported"); - } -} diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.h b/src/core/NEON/kernels/NEDepthConvertLayerKernel.h deleted file mode 100644 index 30fe1ed2e6..0000000000 --- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2016-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_DEPTHCONVERTKERNEL_H -#define ARM_COMPUTE_DEPTHCONVERTKERNEL_H - -#include "src/core/NEON/INEKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** Depth conversion kernel - * This function ignores the scale and zeroPoint of quanized tensors, i.e. QASYMM8 input is treated as uint8 values. - */ -class NEDepthConvertLayerKernel : public INEKernel -{ -public: - const char *name() const override - { - return "NEDepthConvertLayerKernel"; - } - /** Default constructor*/ - NEDepthConvertLayerKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEDepthConvertLayerKernel(const NEDepthConvertLayerKernel &) = delete; - /** Default move constructor */ - NEDepthConvertLayerKernel(NEDepthConvertLayerKernel &&) = default; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEDepthConvertLayerKernel &operator=(const NEDepthConvertLayerKernel &) = delete; - /** Default move assignment operator */ - NEDepthConvertLayerKernel &operator=(NEDepthConvertLayerKernel &&) = default; - /** Default destructor */ - ~NEDepthConvertLayerKernel() = default; - /** Set the input and output of the kernel - * - * Valid conversions Input -> Output : - * - * - QASYMM8_SIGNED -> S16, S32, F32, F16 - * - QASYMM8 -> U16, S16, S32, F32, F16 - * - U8 -> U16, S16, S32, F32, F16 - * - U16 -> U8, U32 - * - S16 -> QASYMM8_SIGNED, U8, S32 - * - BFLOAT16 -> F32 - * - F16 -> QASYMM8_SIGNED, QASYMM8, F32, S32, U8 - * - S32 -> QASYMM8_SIGNED, QASYMM8, F16, F32, U8 - * - F32 -> QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8 - * - * @param[in] input The input tensor to convert. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32. - * @param[out] output The output tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32. - * @param[in] policy Conversion policy. - * @param[in] shift (Optional) Value for down/up conversions. Must be 0 <= shift < 8. - */ - void configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift = 0); - /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConvertLayerKernel - * - * @param[in] input Source tensor info. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32. - * @param[in] output Destination tensor info. Data type supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32. - * @param[in] policy Conversion policy - * @param[in] shift (Optional) Value for down/up conversions. Must be 0 <= shift < 8. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift = 0); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - -private: - const ITensor *_input; - ITensor *_output; - ConvertPolicy _policy; - uint32_t _shift; -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_NEDEPTHCONVERTKERNEL_H */ diff --git a/src/core/cpu/kernels/CpuCastKernel.cpp b/src/core/cpu/kernels/CpuCastKernel.cpp new file mode 100644 index 0000000000..46f3c330ef --- /dev/null +++ b/src/core/cpu/kernels/CpuCastKernel.cpp @@ -0,0 +1,1367 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/core/cpu/kernels/CpuCastKernel.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Validate.h" +#include "src/core/CPP/Validate.h" +#include "src/core/NEON/NEFixedPoint.h" +#include "src/core/NEON/NEMath.h" +#include "src/core/NEON/wrapper/wrapper.h" +#include "src/core/helpers/AutoConfiguration.h" +#include "src/core/helpers/WindowHelpers.h" +#include "support/SaturateCast.h" + +namespace arm_compute +{ +namespace cpu +{ +namespace kernels +{ +namespace +{ +Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) +{ + ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(src); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(dst); + ARM_COMPUTE_UNUSED(policy); + ARM_COMPUTE_RETURN_ERROR_ON(src == dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8, + DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16, + DataType::F32, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8, + DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16, + DataType::U32, DataType::S32, DataType::F32); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::QASYMM8_SIGNED && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::S32 + && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32), + "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::QASYMM8 && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::U16 + && dst->data_type() != DataType::S32 && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32), + "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::U8 && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::U16 + && dst->data_type() != DataType::S32 && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32), + "Only data_types supported [in] U8 -> [out] U16, S16, S32, F16, F32"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::U16 && (dst->data_type() != DataType::U8 && dst->data_type() != DataType::U32), + "Only data_types supported [in] U16 -> [out] U8, U32"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::S16 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::U8 && dst->data_type() != DataType::S32), + "Only data_types supported [in] S16 -> [out] U8, S32"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::BFLOAT16 && dst->data_type() != DataType::F32, + "Only data_types supported [in] BFLOAT16 -> [out] F32"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::F16 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8 + && dst->data_type() != DataType::U8 + && dst->data_type() != DataType::F32 && dst->data_type() != DataType::S32), + "Only data_types supported [in] F16 -> [out] QASYMM8, F32, S32, U8"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::F32 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8 + && dst->data_type() != DataType::F16 && dst->data_type() != DataType::BFLOAT16 + && dst->data_type() != DataType::S32 && dst->data_type() != DataType::U8), + "Only data_types supported [in] F32 -> [out] QASYMM8, BFLOAT16, F16, S32, U8"); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::S32 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8 + && dst->data_type() != DataType::F16 + && dst->data_type() != DataType::F32 && dst->data_type() != DataType::U8), + "Only data_types supported [in] S32 -> [out] QASYMM8, F16, F32, U8"); + + // Validate in case of configured dst + if(dst->total_size() > 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); + } + + return Status{}; +} +} // namespace + +void CpuCastKernel::configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + + // Auto initialize dst shape if not initialized (We can only auto-configure the shape, datatype must be given) + set_shape_if_empty(*dst, src->tensor_shape()); + + _policy = policy; + + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, policy)); + + // Configure kernel window + Window win = calculate_max_window(*src, Steps()); + + ICPPKernel::configure(win); +} + +Status CpuCastKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, policy)); + return Status{}; +} + +void CpuCastKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + const int window_step_x = 16; + + const ITensor *_src = tensors.get_const_tensor(TensorType::ACL_SRC); + ITensor *_dst = tensors.get_tensor(TensorType::ACL_DST); + ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst); + ARM_COMPUTE_ERROR_ON(_src == _dst); + + ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst); + + Window win{ window }; + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator src(_src, win); + Iterator dst(_dst, win); + + switch(_src->info()->data_type()) + { + case DataType::QASYMM8_SIGNED: + { + switch(_dst->info()->data_type()) + { + case DataType::S16: + { + /* Up-conversion QASYMM8_SIGNED -> S16 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + int x = window_start_x; + + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int8x16_t texels_s8 = vld1q_s8(src_ptr + x); + + const int16x8x2_t texels = + { + { + vmovl_s8(vget_low_s8(texels_s8)), + vmovl_s8(vget_high_s8(texels_s8)) + } + }; + + vst1q_s16(dst_ptr + x, texels.val[0]); + vst1q_s16(dst_ptr + x + 8, texels.val[1]); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + case DataType::S32: + { + /* Up-conversion QASYMM8_SIGNED -> S32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + int x = window_start_x; + + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int8x16_t texels_s8 = vld1q_s8(src_ptr + x); + + const int16x8x2_t texels = + { + { + vmovl_s8(vget_low_s8(texels_s8)), + vmovl_s8(vget_high_s8(texels_s8)) + } + }; + + vst1q_s32(dst_ptr + x, vmovl_s16(vget_low_s16(texels.val[0]))); + vst1q_s32(dst_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0]))); + vst1q_s32(dst_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1]))); + vst1q_s32(dst_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + case DataType::F32: + { + /* Up-conversion QASYMM8_SIGNED -> F32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast(src.ptr())); + + const int16x8x2_t texels = + { + { + vmovl_s8(vget_low_s8(texels_s8)), + vmovl_s8(vget_high_s8(texels_s8)) + } + }; + vst1q_f32(dst_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0])))); + vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0])))); + vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1])))); + vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1])))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + { + /* Up-conversion QASYMM8_SIGNED -> F16 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + int x = window_start_x; + + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int8x16_t texels_s8 = vld1q_s8(src_ptr + x); + + const int16x8x2_t texels = + { + { + vmovl_s8(vget_low_s8(texels_s8)), + vmovl_s8(vget_high_s8(texels_s8)) + } + }; + vst1q_f16(dst_ptr + x, vcvtq_f16_s16(texels.val[0])); + vst1q_f16(dst_ptr + x + 8, vcvtq_f16_s16(texels.val[1])); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + + default: + ARM_COMPUTE_ERROR("dst data type not supported"); + } + break; + } + + case DataType::QASYMM8: + case DataType::U8: + { + switch(_dst->info()->data_type()) + { + case DataType::S16: + { + /* Up-conversion U8 -> S16 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); + + const int16x8x2_t texels = + { + { + vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), + vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))) + } + }; + + vst1q_s16(dst_ptr + x, texels.val[0]); + vst1q_s16(dst_ptr + x + 8, texels.val[1]); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + case DataType::S32: + { + /* Up-conversion U8 -> S32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); + + const int16x8x2_t texels = + { + { + vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), + vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))) + } + }; + + vst1q_s32(dst_ptr + x, vmovl_s16(vget_low_s16(texels.val[0]))); + vst1q_s32(dst_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0]))); + vst1q_s32(dst_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1]))); + vst1q_s32(dst_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + case DataType::F32: + { + /* Up-conversion U8 -> F32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); + + const int16x8x2_t texels = + { + { + vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), + vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))) + } + }; + vst1q_f32(dst_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0])))); + vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0])))); + vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1])))); + vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1])))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + { + /* Up-conversion U8 -> F16 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); + + const int16x8x2_t texels = + { + { + vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), + vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))) + } + }; + vst1q_f16(dst_ptr + x, vcvtq_f16_s16(texels.val[0])); + vst1q_f16(dst_ptr + x + 8, vcvtq_f16_s16(texels.val[1])); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::U16: + { + /* Up-conversion U8 -> U16 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); + + const uint16x8x2_t texels = + { + { + vmovl_u8(vget_low_u8(texels_u8)), + vmovl_u8(vget_high_u8(texels_u8)) + } + }; + + vst1q_u16(dst_ptr + x, texels.val[0]); + vst1q_u16(dst_ptr + x + 8, texels.val[1]); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + default: + ARM_COMPUTE_ERROR("dst data type not supported"); + } + break; + } + case DataType::S16: + { + switch(_dst->info()->data_type()) + { + case DataType::QASYMM8_SIGNED: + { + /* Down-conversion S16 -> QASYMM8_SIGNED */ + if(ConvertPolicy::SATURATE == _policy) + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int16x8x2_t texels = + { + { + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) + } + }; + + vst1q_s8(dst_ptr + x, vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = utils::cast::saturate_cast(*(src_ptr + x)); + } + }, + src, dst); + } + else + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int16x8x2_t texels = + { + { + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) + } + }; + + vst1q_s8(dst_ptr + x, vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + } + break; + } + case DataType::U8: + { + /* Down-conversion S16 -> U8 */ + if(ConvertPolicy::SATURATE == _policy) + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int16x8x2_t texels = + { + { + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) + } + }; + + vst1q_u8(dst_ptr + x, vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = utils::cast::saturate_cast(*(src_ptr + x)); + } + }, + src, dst); + } + else + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int16x8x2_t texels = + { + { + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) + } + }; + + vst1q_u8(dst_ptr + x, vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])), + vmovn_u16(vreinterpretq_u16_s16(texels.val[1])))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + } + break; + } + case DataType::S32: + { + /* Up-conversion S16 -> S32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int16x8x2_t texels = + { + { + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) + } + }; + + const int32x4x4_t texels_s32 = + { + { + vmovl_s16(vget_low_s16(texels.val[0])), + vmovl_s16(vget_high_s16(texels.val[0])), + vmovl_s16(vget_low_s16(texels.val[1])), + vmovl_s16(vget_high_s16(texels.val[1])) + } + }; + + vst1q_s32(dst_ptr + x, texels_s32.val[0]); + vst1q_s32(dst_ptr + x + 4, texels_s32.val[1]); + vst1q_s32(dst_ptr + x + 8, texels_s32.val[2]); + vst1q_s32(dst_ptr + x + 12, texels_s32.val[3]); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + default: + ARM_COMPUTE_ERROR("dst data type not supported"); + } + break; + } + case DataType::U16: + { + switch(_dst->info()->data_type()) + { + case DataType::U8: + { + /* Down-conversion U16 -> U8 */ + if(ConvertPolicy::SATURATE == _policy) + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const uint16x8x2_t texels = + { + { + vld1q_u16(src_ptr + x), + vld1q_u16(src_ptr + x + 8) + } + }; + + vst1q_u8(dst_ptr + x, vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = utils::cast::saturate_cast(*(src_ptr + x)); + } + }, + src, dst); + } + else + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const uint16x8x2_t texels = + { + { + vld1q_u16(src_ptr + x), + vld1q_u16(src_ptr + x + 8) + } + }; + + vst1q_u8(dst_ptr + x, vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + + }, + src, dst); + } + break; + } + case DataType::U32: + { + /* Up-conversion U16 -> U32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const uint16x8x2_t texels = + { + { + vld1q_u16(src_ptr + x), + vld1q_u16(src_ptr + x + 8) + } + }; + + vst1q_u32(dst_ptr + x, vmovl_u16(vget_low_u16(texels.val[0]))); + vst1q_u32(dst_ptr + x + 4, vmovl_u16(vget_high_u16(texels.val[0]))); + vst1q_u32(dst_ptr + x + 8, vmovl_u16(vget_low_u16(texels.val[1]))); + vst1q_u32(dst_ptr + x + 12, vmovl_u16(vget_high_u16(texels.val[1]))); + } + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + + }, + src, dst); + break; + } + default: + ARM_COMPUTE_ERROR("dst data type not supported"); + } + break; + } +#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) + case DataType::BFLOAT16: + switch(_dst->info()->data_type()) + { + case DataType::F32: + { + /* Up-conversion BFLOAT16 -> F32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const uint16x8x2_t texels = + { + { + vld1q_u16(reinterpret_cast(src.ptr())), + vld1q_u16(reinterpret_cast(src.ptr()) + 8) + } + }; + + vst1q_f32(reinterpret_cast(dst.ptr()), + vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[0])), 16))); + vst1q_f32(reinterpret_cast(dst.ptr()) + 4, + vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[0])), 16))); + vst1q_f32(reinterpret_cast(dst.ptr()) + 8, + vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[1])), 16))); + vst1q_f32(reinterpret_cast(dst.ptr()) + 12, + vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[1])), 16))); + } + + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = float(*(src_ptr + x)); + } + }, + src, dst); + break; + } + default: + ARM_COMPUTE_ERROR("dst data type unsupported"); + } + break; +#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + switch(_dst->info()->data_type()) + { + case DataType::QASYMM8_SIGNED: + { + /* Down-conversion F16 -> QASYMM8_SIGNED (Always saturating) */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float16x8x2_t texels = + { + { + vld1q_f16(src_ptr + x), + vld1q_f16(src_ptr + x + 8), + } + }; + + vst1q_s8(dst_ptr + x, vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1])))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = utils::cast::saturate_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + case DataType::QASYMM8: + case DataType::U8: + { + /* Down-conversion F16 -> QASYMM8/U8 (Always saturating) */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float16x8x2_t texels = + { + { + vld1q_f16(src_ptr + x), + vld1q_f16(src_ptr + x + 8), + } + }; + + vst1q_u8(dst_ptr + x, vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1])))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = utils::cast::saturate_cast(*(src_ptr + x)); + } + + }, + src, dst); + break; + } + case DataType::F32: + { + /* Up-conversion F16 -> F32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float16x8x2_t texels = + { + { + vld1q_f16(src_ptr + x), + vld1q_f16(src_ptr + x + 8) + } + }; + vst1q_f32(dst_ptr + x, vcvt_f32_f16(vget_low_f16(texels.val[0]))); + vst1q_f32(dst_ptr + x + 4, vcvt_f32_f16(vget_high_f16(texels.val[0]))); + vst1q_f32(dst_ptr + x + 8, vcvt_f32_f16(vget_low_f16(texels.val[1]))); + vst1q_f32(dst_ptr + x + 12, vcvt_f32_f16(vget_high_f16(texels.val[1]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + case DataType::S32: + { + /* Up-conversion F16 -> S32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float16x8x2_t texels = + { + { + vld1q_f16(src_ptr + x), + vld1q_f16(src_ptr + x + 8) + } + }; + + vst1q_s32(dst_ptr + x, vcvtq_s32_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])))); + vst1q_s32(dst_ptr + x + 4, vcvtq_s32_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])))); + vst1q_s32(dst_ptr + x + 8, vcvtq_s32_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])))); + vst1q_s32(dst_ptr + x + 12, vcvtq_s32_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + default: + ARM_COMPUTE_ERROR("dst data type not supported"); + } + break; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + case DataType::F32: + switch(_dst->info()->data_type()) + { +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + { + /* Down-conversion F32 -> F16 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float32x4x4_t texels = + { + { + vld1q_f32(src_ptr + x), + vld1q_f32(src_ptr + x + 4), + vld1q_f32(src_ptr + x + 8), + vld1q_f32(src_ptr + x + 12) + } + }; + + vst1q_f16(dst_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1]))); + vst1q_f16(dst_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) + case DataType::BFLOAT16: + { + /* Down-conversion F32 -> BFLOAT16 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + wrapper::vcvt_bf16_f32(reinterpret_cast(src.ptr()), + reinterpret_cast(dst.ptr())); + wrapper::vcvt_bf16_f32(reinterpret_cast(src.ptr()) + 8, + reinterpret_cast(dst.ptr()) + 8); + } + + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = *(src_ptr + x); + } + }, + src, dst); + break; + } +#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ + case DataType::S32: + { + /* Conversion F32 -> S32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float32x4x4_t texels = + { + { + vld1q_f32(src_ptr + x), + vld1q_f32(src_ptr + x + 4), + vld1q_f32(src_ptr + x + 8), + vld1q_f32(src_ptr + x + 12), + } + }; + + vst1q_s32(dst_ptr + x, vcvtq_s32_f32(texels.val[0])); + vst1q_s32(dst_ptr + x + 4, vcvtq_s32_f32(texels.val[1])); + vst1q_s32(dst_ptr + x + 8, vcvtq_s32_f32(texels.val[2])); + vst1q_s32(dst_ptr + x + 12, vcvtq_s32_f32(texels.val[3])); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + case DataType::QASYMM8: + case DataType::U8: + { + /* Down-conversion F32 -> U8 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float32x4x4_t texels = + { + { + vld1q_f32(src_ptr + x), + vld1q_f32(src_ptr + x + 4), + vld1q_f32(src_ptr + x + 8), + vld1q_f32(src_ptr + x + 12), + } + }; + + vst1_u8(dst_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1]))))); + vst1_u8(dst_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3]))))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = utils::cast::saturate_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + case DataType::QASYMM8_SIGNED: + { + /* Down-conversion F32 -> QASYMM8_SIGNED */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float32x4x4_t texels = + { + { + vld1q_f32(src_ptr + x), + vld1q_f32(src_ptr + x + 4), + vld1q_f32(src_ptr + x + 8), + vld1q_f32(src_ptr + x + 12), + } + }; + + vst1_s8(dst_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1]))))); + vst1_s8(dst_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3]))))); + } + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = utils::cast::saturate_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + + default: + ARM_COMPUTE_ERROR("dst data type not supported"); + } + break; + + case DataType::S32: + switch(_dst->info()->data_type()) + { +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + { + /* Down-conversion S32 -> F16 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const float32x4x4_t texels = + { + { + vcvtq_f32_s32(vld1q_s32(src_ptr + x)), + vcvtq_f32_s32(vld1q_s32(src_ptr + x + 4)), + vcvtq_f32_s32(vld1q_s32(src_ptr + x + 8)), + vcvtq_f32_s32(vld1q_s32(src_ptr + x + 12)) + } + }; + + vst1q_f16(dst_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1]))); + vst1q_f16(dst_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3]))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + case DataType::F32: + { + /* Conversion S32 -> F32 */ + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int32x4x4_t texels = + { + { + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12), + } + }; + + vst1q_f32(dst_ptr + x, vcvtq_f32_s32(texels.val[0])); + vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(texels.val[1])); + vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(texels.val[2])); + vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(texels.val[3])); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + break; + } + case DataType::QASYMM8_SIGNED: + { + /* Down-conversion S32 -> QASYMM8_SIGNED */ + if(ConvertPolicy::SATURATE == _policy) + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int32x4x4_t texels = + { + { + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12), + } + }; + vst1_s8(dst_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1])))); + vst1_s8(dst_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3])))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = utils::cast::saturate_cast(*(src_ptr + x)); + } + }, + src, dst); + } + else + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int32x4x4_t texels = + { + { + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12) + } + }; + + vst1_s8(dst_ptr + x, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1])))); + vst1_s8(dst_ptr + x + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3])))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + } + break; + } + case DataType::QASYMM8: + case DataType::U8: + { + /* Down-conversion S32 -> U8 */ + if(ConvertPolicy::SATURATE == _policy) + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int32x4x4_t texels = + { + { + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12) + } + }; + vst1_u8(dst_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1])))); + vst1_u8(dst_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3])))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = utils::cast::saturate_cast(*(src_ptr + x)); + } + }, + src, dst); + } + else + { + execute_window_loop(win, [&](const Coordinates &) + { + const auto src_ptr = reinterpret_cast(src.ptr()); + const auto dst_ptr = reinterpret_cast(dst.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const int32x4x4_t texels = + { + { + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12) + } + }; + + vst1_u8(dst_ptr + x, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1]))))); + vst1_u8(dst_ptr + x + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3]))))); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dst_ptr + x) = static_cast(*(src_ptr + x)); + } + }, + src, dst); + } + break; + } + default: + ARM_COMPUTE_ERROR("dst data type not supported"); + } + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + } +} + +const char *CpuCastKernel::name() const +{ + return "CpuCastKernel.cpp"; +} +} // namespace kernels +} // namespace cpu +} // namespace arm_compute diff --git a/src/core/cpu/kernels/CpuCastKernel.h b/src/core/cpu/kernels/CpuCastKernel.h new file mode 100644 index 0000000000..2a75c5850e --- /dev/null +++ b/src/core/cpu/kernels/CpuCastKernel.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CPU_CAST_KERNEL_H +#define ARM_COMPUTE_CPU_CAST_KERNEL_H + +#include "src/core/common/Macros.h" +#include "src/core/cpu/ICpuKernel.h" + +namespace arm_compute +{ +namespace cpu +{ +namespace kernels +{ +/** Casts a given tensor to a new type + * + * @note When casting between quantized types the scale and zeroPoint are ignored + */ +class CpuCastKernel : public ICpuKernel +{ +public: + CpuCastKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuCastKernel); + /** Set the src and dst of the kernel + * + * Valid conversions src -> dst : + * + * - QASYMM8_SIGNED -> S16, S32, F32, F16 + * - QASYMM8 -> U16, S16, S32, F32, F16 + * - U8 -> U16, S16, S32, F32, F16 + * - U16 -> U8, U32 + * - S16 -> QASYMM8_SIGNED, U8, S32 + * - BFLOAT16 -> F32 + * - F16 -> QASYMM8_SIGNED, QASYMM8, F32, S32, U8 + * - S32 -> QASYMM8_SIGNED, QASYMM8, F16, F32, U8 + * - F32 -> QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8 + * + * @param[in] src The src tensor to convert. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32. + * @param[out] dst The dst tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32. + * @param[in] policy Conversion policy. + */ + void configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy); + /** Static function to check if given info will lead to a valid configuration + * + * Similar to @ref CpuCastKernel::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; + const char *name() const override; + +private: + ConvertPolicy _policy{ ConvertPolicy::SATURATE }; +}; +} // namespace kernels +} // namespace cpu +} // namespace arm_compute +#endif /* ARM_COMPUTE_CPU_CAST_KERNEL_H */ diff --git a/src/core/gpu/cl/ClKernelLibrary.cpp b/src/core/gpu/cl/ClKernelLibrary.cpp index 286ed4c021..b0458d7c3a 100644 --- a/src/core/gpu/cl/ClKernelLibrary.cpp +++ b/src/core/gpu/cl/ClKernelLibrary.cpp @@ -216,8 +216,8 @@ const std::map ClKernelLibrary::_kernel_program_map = { "concatenate_width_x2", "concatenate.cl" }, { "concatenate_width_x4", "concatenate.cl" }, { "col2im", "col2im.cl" }, - { "convert_depth_down", "depth_convert.cl" }, - { "convert_depth_up", "depth_convert.cl" }, + { "cast_down", "cast.cl" }, + { "cast_up", "cast.cl" }, { "convert_fc_weights", "convert_fc_weights.cl" }, { "copy_tensor", "copy_tensor.cl" }, { "crop_tensor", "crop_tensor.cl" }, @@ -565,8 +565,8 @@ const std::map ClKernelLibrary::_program_source_map = #include "./cl_kernels/deconvolution_layer.clembed" }, { - "depth_convert.cl", -#include "./cl_kernels/depth_convert.clembed" + "cast.cl", +#include "./cl_kernels/cast.clembed" }, { "depth_to_space.cl", diff --git a/src/core/gpu/cl/kernels/ClCastKernel.cpp b/src/core/gpu/cl/kernels/ClCastKernel.cpp new file mode 100644 index 0000000000..7a1d5c2824 --- /dev/null +++ b/src/core/gpu/cl/kernels/ClCastKernel.cpp @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/core/gpu/cl/kernels/ClCastKernel.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/CL/OpenCL.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "src/core/CL/CLValidate.h" +#include "src/core/helpers/AutoConfiguration.h" +#include "src/core/helpers/WindowHelpers.h" + +#include "support/Cast.h" +#include "support/StringSupport.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace kernels +{ +namespace +{ +Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) +{ + ARM_COMPUTE_UNUSED(policy); + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); + ARM_COMPUTE_RETURN_ERROR_ON(src == dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, + 1, + DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16, + DataType::U16, DataType::U32, DataType::S32, DataType::F16, + DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, + 1, + DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16, + DataType::U16, DataType::U32, DataType::S32, DataType::F16, + DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == dst->data_type(), "src and dst data types must be different"); + + // Validate in case of configured dst + if(dst->total_size() > 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); + } + + return Status{}; +} +} // namespace + +void ClCastKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + + // Auto initialize dst shape if not initialized (We can only auto-configure the shape, datatype must be given) + set_shape_if_empty(*dst, src->tensor_shape()); + + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, policy)); + + auto padding_info = get_padding_info({ src, dst }); + + // Get data sizes + const size_t src_size = data_size_from_type(src->data_type()); + const size_t dst_size = data_size_from_type(dst->data_type()); + + // Get number of elements to process per iterations + const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / src->element_size(), src->dimension(0)); + + // Set build options + CLBuildOptions build_opts; + build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)); + build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration)); + build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(src->data_type())); + build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(dst->data_type())); + // Conversions from float always SATURATE as out-of-bounds conversion from float->integer is implementation defined + build_opts.add_option_if(is_data_type_float(src->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE"); + build_opts.add_option_if(is_data_type_float(src->data_type()) || is_data_type_float(dst->data_type()), "-DIS_DATA_TYPE_FLOAT"); + build_opts.add_option_if(is_data_type_quantized(src->data_type()), "-DIS_DATA_TYPE_QUANTIZED"); + + // Create kernel + const std::string kernel_name = (src_size >= dst_size) ? "cast_down" : "cast_up"; + _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); + + // Configure kernel + Window win = calculate_max_window(*src, Steps(num_elems_processed_per_iteration)); + ICLKernel::configure_internal(win); + + // Collapse window + const Window &full_window = window(); + Window collapsed_window = full_window.collapse_if_possible(full_window, Window::DimZ); + ICLKernel::configure_internal(collapsed_window); + + ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); + + // Set config_id for enabling LWS tuning + _config_id = kernel_name; + _config_id += "_"; + _config_id += lower_string(string_from_data_type(src->data_type())); + _config_id += "_"; + _config_id += support::cpp11::to_string(src->dimension(0)); + _config_id += "_"; + _config_id += support::cpp11::to_string(src->dimension(1)); + _config_id += "_"; + _config_id += support::cpp11::to_string(dst->dimension(0)); + _config_id += "_"; + _config_id += support::cpp11::to_string(dst->dimension(1)); +} + +Status ClCastKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, policy)); + return Status{}; +} + +void ClCastKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + const auto src = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC)); + auto dst = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); + + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + + Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); + Window slice = collapsed.first_slice_window_3D(); + + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, src, slice); + add_3D_tensor_argument(idx, dst, slice); + enqueue(queue, *this, slice, lws_hint()); + } + while(collapsed.slide_window_slice_3D(slice)); +} +} // namespace kernels +} // namespace opencl +} // namespace arm_compute diff --git a/src/core/gpu/cl/kernels/ClCastKernel.h b/src/core/gpu/cl/kernels/ClCastKernel.h new file mode 100644 index 0000000000..451aa9c1ab --- /dev/null +++ b/src/core/gpu/cl/kernels/ClCastKernel.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL_CAST_KERNEL_H +#define ARM_COMPUTE_CL_CAST_KERNEL_H + +#include "src/core/common/Macros.h" +#include "src/core/gpu/cl/ClCompileContext.h" +#include "src/core/gpu/cl/IClKernel.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace kernels +{ +/** Casts a given tensor to a new type + * + * @note When casting between quantized types the scale and zeroPoint are ignored + */ +class ClCastKernel : public IClKernel +{ +public: + ClCastKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClCastKernel); + /** Set the src and dst of the kernel. + * + * Valid conversions src -> dst : + * + * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data) + * - U8 -> S8, U16, S16, U32, S32, F16, F32 + * - U16 -> U8, S8, S16, U32, S32, F16, F32 + * - S16 -> U8, S8, U16, U32, S32, F16, F32 + * - U32 -> U8, S8, U16, S16, S32, F16, F32 + * - S32 -> U8, S8, U16, S16, U32, F16, F32 + * - F16 -> U8, S8, U16, S16, U32, F32 + * - F32 -> U8, S8, U16, S16, U32, F16 + * + * @param[in] compile_context The compile context to be used. + * @param[in] src The source tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32. + * @param[out] dst The destination tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. + * @param[in] policy Conversion policy + */ + void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy); + /** Static function to check if given info will lead to a valid configuration + * + * Similar to @ref ClCastKernel::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override; +}; +} // namespace kernels +} // namespace opencl +} // namespace arm_compute +#endif /* ARM_COMPUTE_CL_CAST_KERNEL_H */ -- cgit v1.2.1