diff options
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/CL/CLKernels.h | 1 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/cast.cl (renamed from src/core/CL/cl_kernels/depth_convert.cl) | 20 | ||||
-rw-r--r-- | src/core/CL/kernels/CLDepthConvertLayerKernel.h | 91 | ||||
-rw-r--r-- | src/core/NEON/NEKernels.h | 1 | ||||
-rw-r--r-- | src/core/NEON/kernels/NEDepthConvertLayerKernel.h | 96 | ||||
-rw-r--r-- | src/core/cpu/kernels/CpuCastKernel.cpp (renamed from src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp) | 831 | ||||
-rw-r--r-- | src/core/cpu/kernels/CpuCastKernel.h | 82 | ||||
-rw-r--r-- | src/core/gpu/cl/ClKernelLibrary.cpp | 8 | ||||
-rw-r--r-- | src/core/gpu/cl/kernels/ClCastKernel.cpp (renamed from src/core/CL/kernels/CLDepthConvertLayerKernel.cpp) | 119 | ||||
-rw-r--r-- | src/core/gpu/cl/kernels/ClCastKernel.h | 79 |
10 files changed, 632 insertions, 696 deletions
diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h index 1302d52180..c59eebacbb 100644 --- a/src/core/CL/CLKernels.h +++ b/src/core/CL/CLKernels.h @@ -35,7 +35,6 @@ #include "src/core/CL/kernels/CLComparisonKernel.h" #include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h" #include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h" -#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h" #include "src/core/CL/kernels/CLDepthToSpaceLayerKernel.h" #include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h" #include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h" diff --git a/src/core/CL/cl_kernels/depth_convert.cl b/src/core/CL/cl_kernels/cast.cl index a888d7b9bc..036a683ec7 100644 --- a/src/core/CL/cl_kernels/depth_convert.cl +++ b/src/core/CL/cl_kernels/cast.cl @@ -31,7 +31,7 @@ #define CONVERT_UP(x, type) CONVERT(x, type) -/** This function performs a down-scaling depth conversion. +/** This function performs a down-casting * * @attention For QSYMM8_PER_CHANNEL -> QASYMM8, it is user's responsibility to keep track of the quantization info. * @@ -56,12 +56,10 @@ * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes) * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image - * @param[in] shift The integer shift amount value. Supported data types: S32 */ -__kernel void convert_depth_down( +__kernel void cast_down( TENSOR3D_DECLARATION(in), - TENSOR3D_DECLARATION(out), - const int shift) + TENSOR3D_DECLARATION(out)) { int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); @@ -82,12 +80,12 @@ __kernel void convert_depth_down( STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) #else /* defined(IS_DATA_TYPE_FLOAT) */ VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) - res0 = CONVERT_DOWN(in_data >> shift, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); + res0 = CONVERT_DOWN(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) #endif /* defined(IS_DATA_TYPE_FLOAT) */ } -/** This function performs a up-scaling depth conversion. +/** This function performs a up-casting * * @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN and -DDATA_TYPE_OUT: * e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short @@ -110,12 +108,10 @@ __kernel void convert_depth_down( * @param[in] out_stride_z Stride of the source tensor in Z dimension (in bytes) * @param[in] out_step_z out_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] out_offset_first_element_in_bytes The offset of the first element in the destination image - * @param[in] shift The integer shift amount value. Supported data types: S32 */ -__kernel void convert_depth_up( +__kernel void cast_up( TENSOR3D_DECLARATION(in), - TENSOR3D_DECLARATION(out), - const int shift) + TENSOR3D_DECLARATION(out)) { int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); @@ -132,7 +128,7 @@ __kernel void convert_depth_up( STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) #else /* defined(IS_DATA_TYPE_FLOAT) */ VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE) - res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)) << shift; + res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)); STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) #endif /* defined(IS_DATA_TYPE_FLOAT) */ } diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.h b/src/core/CL/kernels/CLDepthConvertLayerKernel.h deleted file mode 100644 index 8b511c6707..0000000000 --- a/src/core/CL/kernels/CLDepthConvertLayerKernel.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2016-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H -#define ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H - -#include "arm_compute/core/Types.h" -#include "src/core/CL/ICLSimple3DKernel.h" - -#include <cstdint> - -namespace arm_compute -{ -class ICLTensor; - -/** Interface for the depth conversion kernel. */ -class CLDepthConvertLayerKernel : public ICLSimple3DKernel -{ -public: - /** Set the input and output of the kernel. - * - * Valid conversions Input -> Output : - * - * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data) - * - U8 -> S8, U16, S16, U32, S32, F16, F32 - * - U16 -> U8, S8, S16, U32, S32, F16, F32 - * - S16 -> U8, S8, U16, U32, S32, F16, F32 - * - U32 -> U8, S8, U16, S16, S32, F16, F32 - * - S32 -> U8, S8, U16, S16, U32, F16, F32 - * - F16 -> U8, S8, U16, S16, U32, F32 - * - F32 -> U8, S8, U16, S16, U32, F16 - * - * @param[in] input The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32. - * @param[out] output The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. - * @param[in] policy Conversion policy - * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8. - */ - void configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift); - /** Set the input and output of the kernel. - * - * Valid conversions Input -> Output : - * - * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data) - * - U8 -> S8, U16, S16, U32, S32, F16, F32 - * - U16 -> U8, S8, S16, U32, S32, F16, F32 - * - S16 -> U8, S8, U16, U32, S32, F16, F32 - * - U32 -> U8, S8, U16, S16, S32, F16, F32 - * - S32 -> U8, S8, U16, S16, U32, F16, F32 - * - F16 -> U8, S8, U16, S16, U32, F32 - * - F32 -> U8, S8, U16, S16, U32, F16 - * - * @param[in] compile_context The compile context to be used. - * @param[in] input The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32. - * @param[out] output The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. - * @param[in] policy Conversion policy - * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8. - */ - void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift); - /** Static function to check if given info will lead to a valid configuration of @ref CLDepthConvertLayerKernel - * - * @param[in] input Source tensor info. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32. - * @param[in] output Destination tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. - * @param[in] policy Conversion policy - * @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift); -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H */ diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h index b11e135a0d..ea15f4eddd 100644 --- a/src/core/NEON/NEKernels.h +++ b/src/core/NEON/NEKernels.h @@ -35,7 +35,6 @@ #include "src/core/NEON/kernels/NECol2ImKernel.h" #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h" #include "src/core/NEON/kernels/NECropKernel.h" -#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h" #include "src/core/NEON/kernels/NEDepthToSpaceLayerKernel.h" #include "src/core/NEON/kernels/NEFFTDigitReverseKernel.h" #include "src/core/NEON/kernels/NEFFTRadixStageKernel.h" diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.h b/src/core/NEON/kernels/NEDepthConvertLayerKernel.h deleted file mode 100644 index 30fe1ed2e6..0000000000 --- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2016-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_DEPTHCONVERTKERNEL_H -#define ARM_COMPUTE_DEPTHCONVERTKERNEL_H - -#include "src/core/NEON/INEKernel.h" - -namespace arm_compute -{ -class ITensor; - -/** Depth conversion kernel - * This function ignores the scale and zeroPoint of quanized tensors, i.e. QASYMM8 input is treated as uint8 values. - */ -class NEDepthConvertLayerKernel : public INEKernel -{ -public: - const char *name() const override - { - return "NEDepthConvertLayerKernel"; - } - /** Default constructor*/ - NEDepthConvertLayerKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEDepthConvertLayerKernel(const NEDepthConvertLayerKernel &) = delete; - /** Default move constructor */ - NEDepthConvertLayerKernel(NEDepthConvertLayerKernel &&) = default; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - NEDepthConvertLayerKernel &operator=(const NEDepthConvertLayerKernel &) = delete; - /** Default move assignment operator */ - NEDepthConvertLayerKernel &operator=(NEDepthConvertLayerKernel &&) = default; - /** Default destructor */ - ~NEDepthConvertLayerKernel() = default; - /** Set the input and output of the kernel - * - * Valid conversions Input -> Output : - * - * - QASYMM8_SIGNED -> S16, S32, F32, F16 - * - QASYMM8 -> U16, S16, S32, F32, F16 - * - U8 -> U16, S16, S32, F32, F16 - * - U16 -> U8, U32 - * - S16 -> QASYMM8_SIGNED, U8, S32 - * - BFLOAT16 -> F32 - * - F16 -> QASYMM8_SIGNED, QASYMM8, F32, S32, U8 - * - S32 -> QASYMM8_SIGNED, QASYMM8, F16, F32, U8 - * - F32 -> QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8 - * - * @param[in] input The input tensor to convert. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32. - * @param[out] output The output tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32. - * @param[in] policy Conversion policy. - * @param[in] shift (Optional) Value for down/up conversions. Must be 0 <= shift < 8. - */ - void configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift = 0); - /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConvertLayerKernel - * - * @param[in] input Source tensor info. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32. - * @param[in] output Destination tensor info. Data type supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32. - * @param[in] policy Conversion policy - * @param[in] shift (Optional) Value for down/up conversions. Must be 0 <= shift < 8. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift = 0); - - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - -private: - const ITensor *_input; - ITensor *_output; - ConvertPolicy _policy; - uint32_t _shift; -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_NEDEPTHCONVERTKERNEL_H */ diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp b/src/core/cpu/kernels/CpuCastKernel.cpp index 4b5208eeb6..46f3c330ef 100644 --- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp +++ b/src/core/cpu/kernels/CpuCastKernel.cpp @@ -21,7 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h" +#include "src/core/cpu/kernels/CpuCastKernel.h" #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" @@ -36,161 +36,159 @@ #include "src/core/helpers/WindowHelpers.h" #include "support/SaturateCast.h" -using namespace arm_compute; - +namespace arm_compute +{ +namespace cpu +{ +namespace kernels +{ namespace { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift) +Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) { - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(output); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(output); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(dst); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(src); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(dst); ARM_COMPUTE_UNUSED(policy); - ARM_COMPUTE_RETURN_ERROR_ON(input == output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8, + ARM_COMPUTE_RETURN_ERROR_ON(src == dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8, DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16, DataType::F32, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8, + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8, DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16, DataType::U32, DataType::S32, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8_SIGNED && (output->data_type() != DataType::S16 && output->data_type() != DataType::S32 - && output->data_type() != DataType::F16 && output->data_type() != DataType::F32), + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::QASYMM8_SIGNED && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::S32 + && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32), "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8 && (output->data_type() != DataType::S16 && output->data_type() != DataType::U16 - && output->data_type() != DataType::S32 && output->data_type() != DataType::F16 && output->data_type() != DataType::F32), + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::QASYMM8 && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::U16 + && dst->data_type() != DataType::S32 && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32), "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::U8 && (output->data_type() != DataType::S16 && output->data_type() != DataType::U16 - && output->data_type() != DataType::S32 && output->data_type() != DataType::F16 && output->data_type() != DataType::F32), + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::U8 && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::U16 + && dst->data_type() != DataType::S32 && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32), "Only data_types supported [in] U8 -> [out] U16, S16, S32, F16, F32"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::U16 && (output->data_type() != DataType::U8 && output->data_type() != DataType::U32), + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::U16 && (dst->data_type() != DataType::U8 && dst->data_type() != DataType::U32), "Only data_types supported [in] U16 -> [out] U8, U32"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::S16 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::U8 && output->data_type() != DataType::S32), + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::S16 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::U8 && dst->data_type() != DataType::S32), "Only data_types supported [in] S16 -> [out] U8, S32"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::BFLOAT16 && output->data_type() != DataType::F32, + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::BFLOAT16 && dst->data_type() != DataType::F32, "Only data_types supported [in] BFLOAT16 -> [out] F32"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::F16 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8 - && output->data_type() != DataType::U8 - && output->data_type() != DataType::F32 && output->data_type() != DataType::S32), + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::F16 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8 + && dst->data_type() != DataType::U8 + && dst->data_type() != DataType::F32 && dst->data_type() != DataType::S32), "Only data_types supported [in] F16 -> [out] QASYMM8, F32, S32, U8"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::F32 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8 - && output->data_type() != DataType::F16 && output->data_type() != DataType::BFLOAT16 - && output->data_type() != DataType::S32 && output->data_type() != DataType::U8), + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::F32 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8 + && dst->data_type() != DataType::F16 && dst->data_type() != DataType::BFLOAT16 + && dst->data_type() != DataType::S32 && dst->data_type() != DataType::U8), "Only data_types supported [in] F32 -> [out] QASYMM8, BFLOAT16, F16, S32, U8"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::S32 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8 - && output->data_type() != DataType::F16 - && output->data_type() != DataType::F32 && output->data_type() != DataType::U8), + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::S32 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8 + && dst->data_type() != DataType::F16 + && dst->data_type() != DataType::F32 && dst->data_type() != DataType::U8), "Only data_types supported [in] S32 -> [out] QASYMM8, F16, F32, U8"); - // Validate in case of configured output - if(output->total_size() > 0) + // Validate in case of configured dst + if(dst->total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } return Status{}; } } // namespace -NEDepthConvertLayerKernel::NEDepthConvertLayerKernel() - : _input(nullptr), _output(nullptr), _policy(), _shift(0) +void CpuCastKernel::configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy) { -} - -void NEDepthConvertLayerKernel::configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); - // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given) - set_shape_if_empty(*output->info(), input->info()->tensor_shape()); + // Auto initialize dst shape if not initialized (We can only auto-configure the shape, datatype must be given) + set_shape_if_empty(*dst, src->tensor_shape()); - _input = input; - _output = output; _policy = policy; - _shift = shift; - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, policy)); // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps()); + Window win = calculate_max_window(*src, Steps()); ICPPKernel::configure(win); } -Status NEDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift) +Status CpuCastKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, policy)); return Status{}; } -void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info) +void CpuCastKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); - ARM_COMPUTE_ERROR_ON_NULLPTR(_input, _output); - ARM_COMPUTE_ERROR_ON(_input == _output); const auto window_start_x = static_cast<int>(window.x().start()); const auto window_end_x = static_cast<int>(window.x().end()); const int window_step_x = 16; + const ITensor *_src = tensors.get_const_tensor(TensorType::ACL_SRC); + ITensor *_dst = tensors.get_tensor(TensorType::ACL_DST); + ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst); + ARM_COMPUTE_ERROR_ON(_src == _dst); + + ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst); + Window win{ window }; win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input(_input, win); - Iterator output(_output, win); + Iterator src(_src, win); + Iterator dst(_dst, win); - switch(_input->info()->data_type()) + switch(_src->info()->data_type()) { case DataType::QASYMM8_SIGNED: { - const int16x8_t b = vdupq_n_s16(_shift); - - switch(_output->info()->data_type()) + switch(_dst->info()->data_type()) { case DataType::S16: { /* Up-conversion QASYMM8_SIGNED -> S16 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr()); - int x = window_start_x; + const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int16_t *>(dst.ptr()); + int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - const int8x16_t texels_s8 = vld1q_s8(input_ptr + x); + const int8x16_t texels_s8 = vld1q_s8(src_ptr + x); const int16x8x2_t texels = { { - vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b), - vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b) + vmovl_s8(vget_low_s8(texels_s8)), + vmovl_s8(vget_high_s8(texels_s8)) } }; - vst1q_s16(output_ptr + x, texels.val[0]); - vst1q_s16(output_ptr + x + 8, texels.val[1]); + vst1q_s16(dst_ptr + x, texels.val[0]); + vst1q_s16(dst_ptr + x + 8, texels.val[1]); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<int16_t>(*(input_ptr + x) << _shift); + *(dst_ptr + x) = static_cast<int16_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::S32: @@ -198,35 +196,35 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info /* Up-conversion QASYMM8_SIGNED -> S32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr()); - int x = window_start_x; + const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr()); + int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - const int8x16_t texels_s8 = vld1q_s8(input_ptr + x); + const int8x16_t texels_s8 = vld1q_s8(src_ptr + x); const int16x8x2_t texels = { { - vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b), - vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b) + vmovl_s8(vget_low_s8(texels_s8)), + vmovl_s8(vget_high_s8(texels_s8)) } }; - vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0]))); - vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0]))); - vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1]))); - vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1]))); + vst1q_s32(dst_ptr + x, vmovl_s16(vget_low_s16(texels.val[0]))); + vst1q_s32(dst_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0]))); + vst1q_s32(dst_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1]))); + vst1q_s32(dst_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) << _shift); + *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::F32: @@ -234,34 +232,34 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info /* Up-conversion QASYMM8_SIGNED -> F32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<float *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<float *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(input.ptr())); + const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(src.ptr())); const int16x8x2_t texels = { { - vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b), - vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b) + vmovl_s8(vget_low_s8(texels_s8)), + vmovl_s8(vget_high_s8(texels_s8)) } }; - vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0])))); - vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0])))); - vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1])))); - vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1])))); + vst1q_f32(dst_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0])))); + vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0])))); + vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1])))); + vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1])))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<float>(*(input_ptr + x) << _shift); + *(dst_ptr + x) = static_cast<float>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC @@ -270,38 +268,38 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info /* Up-conversion QASYMM8_SIGNED -> F16 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int8_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr()); - int x = window_start_x; + const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr()); + int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - const int8x16_t texels_s8 = vld1q_s8(input_ptr + x); + const int8x16_t texels_s8 = vld1q_s8(src_ptr + x); const int16x8x2_t texels = { { - vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b), - vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b) + vmovl_s8(vget_low_s8(texels_s8)), + vmovl_s8(vget_high_s8(texels_s8)) } }; - vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0])); - vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1])); + vst1q_f16(dst_ptr + x, vcvtq_f16_s16(texels.val[0])); + vst1q_f16(dst_ptr + x + 8, vcvtq_f16_s16(texels.val[1])); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) << _shift); + *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC default: - ARM_COMPUTE_ERROR("Output data type not supported"); + ARM_COMPUTE_ERROR("dst data type not supported"); } break; } @@ -309,43 +307,40 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info case DataType::QASYMM8: case DataType::U8: { - const int16x8_t b = vdupq_n_s16(_shift); - - switch(_output->info()->data_type()) + switch(_dst->info()->data_type()) { case DataType::S16: { /* Up-conversion U8 -> S16 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int16_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); const int16x8x2_t texels = { { - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b), - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b) + vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), + vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))) } }; - vst1q_s16(output_ptr + x, texels.val[0]); - vst1q_s16(output_ptr + x + 8, texels.val[1]); + vst1q_s16(dst_ptr + x, texels.val[0]); + vst1q_s16(dst_ptr + x + 8, texels.val[1]); } // Compute left-over elements for(; x < window_end_x; ++x) { - auto in = static_cast<int32_t>(*(input_ptr + x)); - *(output_ptr + x) = in << _shift; + *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::S32: @@ -353,36 +348,35 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info /* Up-conversion U8 -> S32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); const int16x8x2_t texels = { { - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b), - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b) + vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), + vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))) } }; - vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0]))); - vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0]))); - vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1]))); - vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1]))); + vst1q_s32(dst_ptr + x, vmovl_s16(vget_low_s16(texels.val[0]))); + vst1q_s32(dst_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0]))); + vst1q_s32(dst_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1]))); + vst1q_s32(dst_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - auto in = static_cast<uint32_t>(*(input_ptr + x)); - *(output_ptr + x) = in << _shift; + *(dst_ptr + x) = static_cast<uint32_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::F32: @@ -390,35 +384,34 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info /* Up-conversion U8 -> F32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<float *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<float *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); const int16x8x2_t texels = { { - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b), - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b) + vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), + vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))) } }; - vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0])))); - vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0])))); - vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1])))); - vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1])))); + vst1q_f32(dst_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0])))); + vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0])))); + vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1])))); + vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1])))); } // Compute left-over elements for(; x < window_end_x; ++x) { - auto in = static_cast<uint32_t>(*(input_ptr + x)); - *(output_ptr + x) = static_cast<float>(in << _shift); + *(dst_ptr + x) = static_cast<uint32_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC @@ -427,32 +420,32 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info /* Up-conversion U8 -> F16 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); const int16x8x2_t texels = { { - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b), - vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b) + vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), + vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))) } }; - vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0])); - vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1])); + vst1q_f16(dst_ptr + x, vcvtq_f16_s16(texels.val[0])); + vst1q_f16(dst_ptr + x + 8, vcvtq_f16_s16(texels.val[1])); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) << _shift); + *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC @@ -461,55 +454,53 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info /* Up-conversion U8 -> U16 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint16_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint16_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x); + const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x); const uint16x8x2_t texels = { { - vshlq_u16(vmovl_u8(vget_low_u8(texels_u8)), b), - vshlq_u16(vmovl_u8(vget_high_u8(texels_u8)), b) + vmovl_u8(vget_low_u8(texels_u8)), + vmovl_u8(vget_high_u8(texels_u8)) } }; - vst1q_u16(output_ptr + x, texels.val[0]); - vst1q_u16(output_ptr + x + 8, texels.val[1]); + vst1q_u16(dst_ptr + x, texels.val[0]); + vst1q_u16(dst_ptr + x + 8, texels.val[1]); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<uint16_t>(*(input_ptr + x)) << _shift; + *(dst_ptr + x) = static_cast<uint16_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } default: - ARM_COMPUTE_ERROR("Output data type not supported"); + ARM_COMPUTE_ERROR("dst data type not supported"); } break; } case DataType::S16: { - switch(_output->info()->data_type()) + switch(_dst->info()->data_type()) { case DataType::QASYMM8_SIGNED: { - const int16x8_t b = vdupq_n_s16(-static_cast<int16_t>(_shift)); - /* Down-conversion S16 -> QASYMM8_SIGNED */ if(ConvertPolicy::SATURATE == _policy) { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -517,28 +508,28 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int16x8x2_t texels = { { - vqshlq_s16(vld1q_s16(input_ptr + x), b), - vqshlq_s16(vld1q_s16(input_ptr + x + 8), b) + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) } }; - vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1]))); + vst1q_s8(dst_ptr + x, vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } else { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -546,35 +537,33 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int16x8x2_t texels = { { - vshlq_s16(vld1q_s16(input_ptr + x), b), - vshlq_s16(vld1q_s16(input_ptr + x + 8), b) + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) } }; - vst1q_s8(output_ptr + x, vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1]))); + vst1q_s8(dst_ptr + x, vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = static_cast<int8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } break; } case DataType::U8: { - const int16x8_t b = vdupq_n_s16(-static_cast<int16_t>(_shift)); - /* Down-conversion S16 -> U8 */ if(ConvertPolicy::SATURATE == _policy) { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -582,28 +571,28 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int16x8x2_t texels = { { - vqshlq_s16(vld1q_s16(input_ptr + x), b), - vqshlq_s16(vld1q_s16(input_ptr + x + 8), b) + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) } }; - vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1]))); + vst1q_u8(dst_ptr + x, vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } else { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -611,34 +600,32 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int16x8x2_t texels = { { - vshlq_s16(vld1q_s16(input_ptr + x), b), - vshlq_s16(vld1q_s16(input_ptr + x + 8), b) + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) } }; - vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])), - vmovn_u16(vreinterpretq_u16_s16(texels.val[1])))); + vst1q_u8(dst_ptr + x, vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])), + vmovn_u16(vreinterpretq_u16_s16(texels.val[1])))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = static_cast<uint8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } break; } case DataType::S32: { - const int32x4_t b = vdupq_n_s32(_shift); - /* Up-conversion S16 -> S32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -646,56 +633,54 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int16x8x2_t texels = { { - vld1q_s16(input_ptr + x), - vld1q_s16(input_ptr + x + 8) + vld1q_s16(src_ptr + x), + vld1q_s16(src_ptr + x + 8) } }; const int32x4x4_t texels_s32 = { { - vshlq_s32(vmovl_s16(vget_low_s16(texels.val[0])), b), - vshlq_s32(vmovl_s16(vget_high_s16(texels.val[0])), b), - vshlq_s32(vmovl_s16(vget_low_s16(texels.val[1])), b), - vshlq_s32(vmovl_s16(vget_high_s16(texels.val[1])), b) + vmovl_s16(vget_low_s16(texels.val[0])), + vmovl_s16(vget_high_s16(texels.val[0])), + vmovl_s16(vget_low_s16(texels.val[1])), + vmovl_s16(vget_high_s16(texels.val[1])) } }; - vst1q_s32(output_ptr + x, texels_s32.val[0]); - vst1q_s32(output_ptr + x + 4, texels_s32.val[1]); - vst1q_s32(output_ptr + x + 8, texels_s32.val[2]); - vst1q_s32(output_ptr + x + 12, texels_s32.val[3]); + vst1q_s32(dst_ptr + x, texels_s32.val[0]); + vst1q_s32(dst_ptr + x + 4, texels_s32.val[1]); + vst1q_s32(dst_ptr + x + 8, texels_s32.val[2]); + vst1q_s32(dst_ptr + x + 12, texels_s32.val[3]); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) << _shift); + *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } default: - ARM_COMPUTE_ERROR("Output data type not supported"); + ARM_COMPUTE_ERROR("dst data type not supported"); } break; } case DataType::U16: { - switch(_output->info()->data_type()) + switch(_dst->info()->data_type()) { case DataType::U8: { - const int16x8_t b = vdupq_n_s16(-static_cast<int16_t>(_shift)); - /* Down-conversion U16 -> U8 */ if(ConvertPolicy::SATURATE == _policy) { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const uint16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const uint16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -703,28 +688,28 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const uint16x8x2_t texels = { { - vqshlq_u16(vld1q_u16(input_ptr + x), b), - vqshlq_u16(vld1q_u16(input_ptr + x + 8), b) + vld1q_u16(src_ptr + x), + vld1q_u16(src_ptr + x + 8) } }; - vst1q_u8(output_ptr + x, vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1]))); + vst1q_u8(dst_ptr + x, vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } else { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const uint16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const uint16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -732,34 +717,32 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const uint16x8x2_t texels = { { - vshlq_u16(vld1q_u16(input_ptr + x), b), - vshlq_u16(vld1q_u16(input_ptr + x + 8), b) + vld1q_u16(src_ptr + x), + vld1q_u16(src_ptr + x + 8) } }; - vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1]))); + vst1q_u8(dst_ptr + x, vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = static_cast<uint8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } break; } case DataType::U32: { - const int32x4_t b = vdupq_n_s32(_shift); - /* Up-conversion U16 -> U32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const uint16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint32_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const uint16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint32_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -767,42 +750,42 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const uint16x8x2_t texels = { { - vld1q_u16(input_ptr + x), - vld1q_u16(input_ptr + x + 8) + vld1q_u16(src_ptr + x), + vld1q_u16(src_ptr + x + 8) } }; - vst1q_u32(output_ptr + x, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[0])), b)); - vst1q_u32(output_ptr + x + 4, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[0])), b)); - vst1q_u32(output_ptr + x + 8, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[1])), b)); - vst1q_u32(output_ptr + x + 12, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[1])), b)); + vst1q_u32(dst_ptr + x, vmovl_u16(vget_low_u16(texels.val[0]))); + vst1q_u32(dst_ptr + x + 4, vmovl_u16(vget_high_u16(texels.val[0]))); + vst1q_u32(dst_ptr + x + 8, vmovl_u16(vget_low_u16(texels.val[1]))); + vst1q_u32(dst_ptr + x + 12, vmovl_u16(vget_high_u16(texels.val[1]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<uint32_t>(*(input_ptr + x) << _shift); + *(dst_ptr + x) = static_cast<uint32_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } default: - ARM_COMPUTE_ERROR("Output data type not supported"); + ARM_COMPUTE_ERROR("dst data type not supported"); } break; } #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) case DataType::BFLOAT16: - switch(_output->info()->data_type()) + switch(_dst->info()->data_type()) { case DataType::F32: { /* Up-conversion BFLOAT16 -> F32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const bfloat16 *>(input.ptr()); - const auto output_ptr = reinterpret_cast<float *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const bfloat16 *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<float *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -810,48 +793,45 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const uint16x8x2_t texels = { { - vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr())), - vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr()) + 8) + vld1q_u16(reinterpret_cast<uint16_t *>(src.ptr())), + vld1q_u16(reinterpret_cast<uint16_t *>(src.ptr()) + 8) } }; - vst1q_f32(reinterpret_cast<float *>(output.ptr()), + vst1q_f32(reinterpret_cast<float *>(dst.ptr()), vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[0])), 16))); - vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4, + vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 4, vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[0])), 16))); - vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8, + vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 8, vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[1])), 16))); - vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12, + vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 12, vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[1])), 16))); } for(; x < window_end_x; ++x) { - *(output_ptr + x) = float(*(input_ptr + x)); + *(dst_ptr + x) = float(*(src_ptr + x)); } }, - input, output); + src, dst); break; } default: - ARM_COMPUTE_ERROR("Output data type unsupported"); + ARM_COMPUTE_ERROR("dst data type unsupported"); } break; #endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: - switch(_output->info()->data_type()) + switch(_dst->info()->data_type()) { case DataType::QASYMM8_SIGNED: { - const float16_t scale_s = 1 << _shift; - const float16x8_t scale = vdupq_n_f16(scale_s); - /* Down-conversion F16 -> QASYMM8_SIGNED (Always saturating) */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -859,34 +839,31 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const float16x8x2_t texels = { { - vmulq_f16(vld1q_f16(input_ptr + x), scale), - vmulq_f16(vld1q_f16(input_ptr + x + 8), scale), + vld1q_f16(src_ptr + x), + vld1q_f16(src_ptr + x + 8), } }; - vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1])))); + vst1q_s8(dst_ptr + x, vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1])))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::QASYMM8: case DataType::U8: { - const float16_t scale_s = 1 << _shift; - const float16x8_t scale = vdupq_n_f16(scale_s); - /* Down-conversion F16 -> QASYMM8/U8 (Always saturating) */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -894,34 +871,31 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const float16x8x2_t texels = { { - vmulq_f16(vld1q_f16(input_ptr + x), scale), - vmulq_f16(vld1q_f16(input_ptr + x + 8), scale), + vld1q_f16(src_ptr + x), + vld1q_f16(src_ptr + x + 8), } }; - vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1])))); + vst1q_u8(dst_ptr + x, vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1])))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::F32: { - const float scale_s = 1 << _shift; - const float32x4_t scale = vdupq_n_f32(scale_s); - /* Up-conversion F16 -> F32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<float *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<float *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -929,35 +903,32 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const float16x8x2_t texels = { { - vld1q_f16(input_ptr + x), - vld1q_f16(input_ptr + x + 8) + vld1q_f16(src_ptr + x), + vld1q_f16(src_ptr + x + 8) } }; - vst1q_f32(output_ptr + x, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale)); - vst1q_f32(output_ptr + x + 4, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale)); - vst1q_f32(output_ptr + x + 8, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale)); - vst1q_f32(output_ptr + x + 12, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale)); + vst1q_f32(dst_ptr + x, vcvt_f32_f16(vget_low_f16(texels.val[0]))); + vst1q_f32(dst_ptr + x + 4, vcvt_f32_f16(vget_high_f16(texels.val[0]))); + vst1q_f32(dst_ptr + x + 8, vcvt_f32_f16(vget_low_f16(texels.val[1]))); + vst1q_f32(dst_ptr + x + 12, vcvt_f32_f16(vget_high_f16(texels.val[1]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<float>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = static_cast<float>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::S32: { - const float scale_s = 1 << _shift; - const float32x4_t scale = vdupq_n_f32(scale_s); - /* Up-conversion F16 -> S32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -965,45 +936,42 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const float16x8x2_t texels = { { - vld1q_f16(input_ptr + x), - vld1q_f16(input_ptr + x + 8) + vld1q_f16(src_ptr + x), + vld1q_f16(src_ptr + x + 8) } }; - vst1q_s32(output_ptr + x, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale))); - vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale))); - vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale))); - vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale))); + vst1q_s32(dst_ptr + x, vcvtq_s32_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])))); + vst1q_s32(dst_ptr + x + 4, vcvtq_s32_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])))); + vst1q_s32(dst_ptr + x + 8, vcvtq_s32_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])))); + vst1q_s32(dst_ptr + x + 12, vcvtq_s32_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } default: - ARM_COMPUTE_ERROR("Output data type not supported"); + ARM_COMPUTE_ERROR("dst data type not supported"); } break; #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ case DataType::F32: - switch(_output->info()->data_type()) + switch(_dst->info()->data_type()) { #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - /* Down-conversion F32 -> F16 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const float *>(input.ptr()); - const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const float *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1011,24 +979,24 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const float32x4x4_t texels = { { - vmulq_f32(vld1q_f32(input_ptr + x), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 4), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 8), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 12), scale) + vld1q_f32(src_ptr + x), + vld1q_f32(src_ptr + x + 4), + vld1q_f32(src_ptr + x + 8), + vld1q_f32(src_ptr + x + 12) } }; - vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1]))); - vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3]))); + vst1q_f16(dst_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1]))); + vst1q_f16(dst_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ @@ -1038,37 +1006,34 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info /* Down-conversion F32 -> BFLOAT16 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const float *>(input.ptr()); - const auto output_ptr = reinterpret_cast<bfloat16 *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const float *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<bfloat16 *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) { - wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(input.ptr()), - reinterpret_cast<uint16_t *>(output.ptr())); - wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(input.ptr()) + 8, - reinterpret_cast<uint16_t *>(output.ptr()) + 8); + wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(src.ptr()), + reinterpret_cast<uint16_t *>(dst.ptr())); + wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(src.ptr()) + 8, + reinterpret_cast<uint16_t *>(dst.ptr()) + 8); } for(; x < window_end_x; ++x) { - *(output_ptr + x) = *(input_ptr + x); + *(dst_ptr + x) = *(src_ptr + x); } }, - input, output); + src, dst); break; } #endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */ case DataType::S32: { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - /* Conversion F32 -> S32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const float *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const float *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1076,39 +1041,36 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const float32x4x4_t texels = { { - vmulq_f32(vld1q_f32(input_ptr + x), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 4), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 8), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 12), scale), + vld1q_f32(src_ptr + x), + vld1q_f32(src_ptr + x + 4), + vld1q_f32(src_ptr + x + 8), + vld1q_f32(src_ptr + x + 12), } }; - vst1q_s32(output_ptr + x, vcvtq_s32_f32(texels.val[0])); - vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(texels.val[1])); - vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(texels.val[2])); - vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(texels.val[3])); + vst1q_s32(dst_ptr + x, vcvtq_s32_f32(texels.val[0])); + vst1q_s32(dst_ptr + x + 4, vcvtq_s32_f32(texels.val[1])); + vst1q_s32(dst_ptr + x + 8, vcvtq_s32_f32(texels.val[2])); + vst1q_s32(dst_ptr + x + 12, vcvtq_s32_f32(texels.val[3])); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::QASYMM8: case DataType::U8: { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - /* Down-conversion F32 -> U8 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const float *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const float *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1116,36 +1078,33 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const float32x4x4_t texels = { { - vmulq_f32(vld1q_f32(input_ptr + x), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 4), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 8), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 12), scale), + vld1q_f32(src_ptr + x), + vld1q_f32(src_ptr + x + 4), + vld1q_f32(src_ptr + x + 8), + vld1q_f32(src_ptr + x + 12), } }; - vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1]))))); - vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3]))))); + vst1_u8(dst_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1]))))); + vst1_u8(dst_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3]))))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::QASYMM8_SIGNED: { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - /* Down-conversion F32 -> QASYMM8_SIGNED */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const float *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const float *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1153,45 +1112,42 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const float32x4x4_t texels = { { - vmulq_f32(vld1q_f32(input_ptr + x), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 4), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 8), scale), - vmulq_f32(vld1q_f32(input_ptr + x + 12), scale), + vld1q_f32(src_ptr + x), + vld1q_f32(src_ptr + x + 4), + vld1q_f32(src_ptr + x + 8), + vld1q_f32(src_ptr + x + 12), } }; - vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1]))))); - vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3]))))); + vst1_s8(dst_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1]))))); + vst1_s8(dst_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3]))))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } default: - ARM_COMPUTE_ERROR("Output data type not supported"); + ARM_COMPUTE_ERROR("dst data type not supported"); } break; case DataType::S32: - switch(_output->info()->data_type()) + switch(_dst->info()->data_type()) { #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: { - const float scale_s = 1.f / (1 << _shift); - const float32x4_t scale = vdupq_n_f32(scale_s); - /* Down-conversion S32 -> F16 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1199,37 +1155,34 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const float32x4x4_t texels = { { - vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x)), scale), - vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 4)), scale), - vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 8)), scale), - vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 12)), scale) + vcvtq_f32_s32(vld1q_s32(src_ptr + x)), + vcvtq_f32_s32(vld1q_s32(src_ptr + x + 4)), + vcvtq_f32_s32(vld1q_s32(src_ptr + x + 8)), + vcvtq_f32_s32(vld1q_s32(src_ptr + x + 12)) } }; - vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1]))); - vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3]))); + vst1q_f16(dst_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1]))); + vst1q_f16(dst_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3]))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ case DataType::F32: { - const int scale_s = 1.f / (1 << _shift); - const int32x4_t scale = vdupq_n_s32(scale_s); - /* Conversion S32 -> F32 */ execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<float *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<float *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1237,39 +1190,37 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int32x4x4_t texels = { { - vmulq_s32(vld1q_s32(input_ptr + x), scale), - vmulq_s32(vld1q_s32(input_ptr + x + 4), scale), - vmulq_s32(vld1q_s32(input_ptr + x + 8), scale), - vmulq_s32(vld1q_s32(input_ptr + x + 12), scale), + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12), } }; - vst1q_f32(output_ptr + x, vcvtq_f32_s32(texels.val[0])); - vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(texels.val[1])); - vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(texels.val[2])); - vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(texels.val[3])); + vst1q_f32(dst_ptr + x, vcvtq_f32_s32(texels.val[0])); + vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(texels.val[1])); + vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(texels.val[2])); + vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(texels.val[3])); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<float>(*(input_ptr + x) * scale_s); + *(dst_ptr + x) = static_cast<float>(*(src_ptr + x)); } }, - input, output); + src, dst); break; } case DataType::QASYMM8_SIGNED: { - const int32x4_t b = vdupq_n_s32(-static_cast<int32_t>(_shift)); - /* Down-conversion S32 -> QASYMM8_SIGNED */ if(ConvertPolicy::SATURATE == _policy) { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1277,30 +1228,30 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int32x4x4_t texels = { { - vqshlq_s32(vld1q_s32(input_ptr + x), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 4), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 8), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 12), b) + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12), } }; - vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1])))); - vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3])))); + vst1_s8(dst_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1])))); + vst1_s8(dst_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3])))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } else { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1308,39 +1259,37 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int32x4x4_t texels = { { - vshlq_s32(vld1q_s32(input_ptr + x), b), - vshlq_s32(vld1q_s32(input_ptr + x + 4), b), - vshlq_s32(vld1q_s32(input_ptr + x + 8), b), - vshlq_s32(vld1q_s32(input_ptr + x + 12), b) + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12) } }; - vst1_s8(output_ptr + x, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1])))); - vst1_s8(output_ptr + x + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3])))); + vst1_s8(dst_ptr + x, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1])))); + vst1_s8(dst_ptr + x + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3])))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = static_cast<int8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } break; } case DataType::QASYMM8: case DataType::U8: { - const int32x4_t b = vdupq_n_s32(-static_cast<int32_t>(_shift)); - /* Down-conversion S32 -> U8 */ if(ConvertPolicy::SATURATE == _policy) { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1348,30 +1297,30 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int32x4x4_t texels = { { - vqshlq_s32(vld1q_s32(input_ptr + x), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 4), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 8), b), - vqshlq_s32(vld1q_s32(input_ptr + x + 12), b) + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12) } }; - vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1])))); - vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3])))); + vst1_u8(dst_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1])))); + vst1_u8(dst_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3])))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } else { execute_window_loop(win, [&](const Coordinates &) { - const auto input_ptr = reinterpret_cast<const int32_t *>(input.ptr()); - const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr()); + const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr()); + const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr()); int x = window_start_x; for(; x <= (window_end_x - window_step_x); x += window_step_x) @@ -1379,32 +1328,40 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info const int32x4x4_t texels = { { - vshlq_s32(vld1q_s32(input_ptr + x), b), - vshlq_s32(vld1q_s32(input_ptr + x + 4), b), - vshlq_s32(vld1q_s32(input_ptr + x + 8), b), - vshlq_s32(vld1q_s32(input_ptr + x + 12), b) + vld1q_s32(src_ptr + x), + vld1q_s32(src_ptr + x + 4), + vld1q_s32(src_ptr + x + 8), + vld1q_s32(src_ptr + x + 12) } }; - vst1_u8(output_ptr + x, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1]))))); - vst1_u8(output_ptr + x + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3]))))); + vst1_u8(dst_ptr + x, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1]))))); + vst1_u8(dst_ptr + x + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3]))))); } // Compute left-over elements for(; x < window_end_x; ++x) { - *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift); + *(dst_ptr + x) = static_cast<uint8_t>(*(src_ptr + x)); } }, - input, output); + src, dst); } break; } default: - ARM_COMPUTE_ERROR("Output data type not supported"); + ARM_COMPUTE_ERROR("dst data type not supported"); } break; default: ARM_COMPUTE_ERROR("Not supported"); } } + +const char *CpuCastKernel::name() const +{ + return "CpuCastKernel.cpp"; +} +} // namespace kernels +} // namespace cpu +} // namespace arm_compute diff --git a/src/core/cpu/kernels/CpuCastKernel.h b/src/core/cpu/kernels/CpuCastKernel.h new file mode 100644 index 0000000000..2a75c5850e --- /dev/null +++ b/src/core/cpu/kernels/CpuCastKernel.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CPU_CAST_KERNEL_H +#define ARM_COMPUTE_CPU_CAST_KERNEL_H + +#include "src/core/common/Macros.h" +#include "src/core/cpu/ICpuKernel.h" + +namespace arm_compute +{ +namespace cpu +{ +namespace kernels +{ +/** Casts a given tensor to a new type + * + * @note When casting between quantized types the scale and zeroPoint are ignored + */ +class CpuCastKernel : public ICpuKernel +{ +public: + CpuCastKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuCastKernel); + /** Set the src and dst of the kernel + * + * Valid conversions src -> dst : + * + * - QASYMM8_SIGNED -> S16, S32, F32, F16 + * - QASYMM8 -> U16, S16, S32, F32, F16 + * - U8 -> U16, S16, S32, F32, F16 + * - U16 -> U8, U32 + * - S16 -> QASYMM8_SIGNED, U8, S32 + * - BFLOAT16 -> F32 + * - F16 -> QASYMM8_SIGNED, QASYMM8, F32, S32, U8 + * - S32 -> QASYMM8_SIGNED, QASYMM8, F16, F32, U8 + * - F32 -> QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8 + * + * @param[in] src The src tensor to convert. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32. + * @param[out] dst The dst tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32. + * @param[in] policy Conversion policy. + */ + void configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy); + /** Static function to check if given info will lead to a valid configuration + * + * Similar to @ref CpuCastKernel::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; + const char *name() const override; + +private: + ConvertPolicy _policy{ ConvertPolicy::SATURATE }; +}; +} // namespace kernels +} // namespace cpu +} // namespace arm_compute +#endif /* ARM_COMPUTE_CPU_CAST_KERNEL_H */ diff --git a/src/core/gpu/cl/ClKernelLibrary.cpp b/src/core/gpu/cl/ClKernelLibrary.cpp index 286ed4c021..b0458d7c3a 100644 --- a/src/core/gpu/cl/ClKernelLibrary.cpp +++ b/src/core/gpu/cl/ClKernelLibrary.cpp @@ -216,8 +216,8 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map = { "concatenate_width_x2", "concatenate.cl" }, { "concatenate_width_x4", "concatenate.cl" }, { "col2im", "col2im.cl" }, - { "convert_depth_down", "depth_convert.cl" }, - { "convert_depth_up", "depth_convert.cl" }, + { "cast_down", "cast.cl" }, + { "cast_up", "cast.cl" }, { "convert_fc_weights", "convert_fc_weights.cl" }, { "copy_tensor", "copy_tensor.cl" }, { "crop_tensor", "crop_tensor.cl" }, @@ -565,8 +565,8 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map = #include "./cl_kernels/deconvolution_layer.clembed" }, { - "depth_convert.cl", -#include "./cl_kernels/depth_convert.clembed" + "cast.cl", +#include "./cl_kernels/cast.clembed" }, { "depth_to_space.cl", diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp b/src/core/gpu/cl/kernels/ClCastKernel.cpp index 0d5c7a4881..7a1d5c2824 100644 --- a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp +++ b/src/core/gpu/cl/kernels/ClCastKernel.cpp @@ -21,7 +21,7 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h" +#include "src/core/gpu/cl/kernels/ClCastKernel.h" #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibrary.h" @@ -33,93 +33,80 @@ #include "src/core/CL/CLValidate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" -#include "support/StringSupport.h" -#include <cstddef> -#include <set> -#include <string> +#include "support/Cast.h" +#include "support/StringSupport.h" namespace arm_compute { +namespace opencl +{ +namespace kernels +{ namespace { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift) +Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) { ARM_COMPUTE_UNUSED(policy); - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON(input == output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); + ARM_COMPUTE_RETURN_ERROR_ON(src == dst); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16, DataType::U16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16, DataType::U16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == output->data_type(), "Input and output data types must be different"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_float(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs"); - ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == dst->data_type(), "src and dst data types must be different"); - // Validate in case of configured output - if(output->total_size() > 0) + // Validate in case of configured dst + if(dst->total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); } return Status{}; } } // namespace -void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift) -{ - configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, shift); -} - -void CLDepthConvertLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift) +void ClCastKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy) { - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - _input = input; - _output = output; + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); - // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given) - set_shape_if_empty(*output->info(), input->info()->tensor_shape()); + // Auto initialize dst shape if not initialized (We can only auto-configure the shape, datatype must be given) + set_shape_if_empty(*dst, src->tensor_shape()); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, policy)); - auto padding_info = get_padding_info({ input, output }); + auto padding_info = get_padding_info({ src, dst }); // Get data sizes - const size_t input_size = data_size_from_type(input->info()->data_type()); - const size_t output_size = data_size_from_type(output->info()->data_type()); + const size_t src_size = data_size_from_type(src->data_type()); + const size_t dst_size = data_size_from_type(dst->data_type()); // Get number of elements to process per iterations - const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->info()->element_size(), input->info()->dimension(0)); + const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / src->element_size(), src->dimension(0)); // Set build options CLBuildOptions build_opts; build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)); - build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration)); - build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type())); + build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration)); + build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(src->data_type())); + build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(dst->data_type())); // Conversions from float always SATURATE as out-of-bounds conversion from float->integer is implementation defined - build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE"); - build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || is_data_type_float(output->info()->data_type()), "-DIS_DATA_TYPE_FLOAT"); - build_opts.add_option_if(is_data_type_quantized(input->info()->data_type()), "-DIS_DATA_TYPE_QUANTIZED"); + build_opts.add_option_if(is_data_type_float(src->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE"); + build_opts.add_option_if(is_data_type_float(src->data_type()) || is_data_type_float(dst->data_type()), "-DIS_DATA_TYPE_FLOAT"); + build_opts.add_option_if(is_data_type_quantized(src->data_type()), "-DIS_DATA_TYPE_QUANTIZED"); // Create kernel - const std::string kernel_name = (input_size >= output_size) ? "convert_depth_down" : "convert_depth_up"; + const std::string kernel_name = (src_size >= dst_size) ? "cast_down" : "cast_up"; _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); - // Set shift arg - unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters - _kernel.setArg(idx++, shift); - // Configure kernel - Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration)); + Window win = calculate_max_window(*src, Steps(num_elems_processed_per_iteration)); ICLKernel::configure_internal(win); // Collapse window @@ -132,21 +119,45 @@ void CLDepthConvertLayerKernel::configure(const CLCompileContext &compile_contex // Set config_id for enabling LWS tuning _config_id = kernel_name; _config_id += "_"; - _config_id += lower_string(string_from_data_type(input->info()->data_type())); + _config_id += lower_string(string_from_data_type(src->data_type())); _config_id += "_"; - _config_id += support::cpp11::to_string(input->info()->dimension(0)); + _config_id += support::cpp11::to_string(src->dimension(0)); _config_id += "_"; - _config_id += support::cpp11::to_string(input->info()->dimension(1)); + _config_id += support::cpp11::to_string(src->dimension(1)); _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(0)); + _config_id += support::cpp11::to_string(dst->dimension(0)); _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(1)); + _config_id += support::cpp11::to_string(dst->dimension(1)); } -Status CLDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift) +Status ClCastKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift)); - + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, policy)); return Status{}; } + +void ClCastKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC)); + auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST)); + + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + + Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); + Window slice = collapsed.first_slice_window_3D(); + + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, src, slice); + add_3D_tensor_argument(idx, dst, slice); + enqueue(queue, *this, slice, lws_hint()); + } + while(collapsed.slide_window_slice_3D(slice)); +} +} // namespace kernels +} // namespace opencl } // namespace arm_compute diff --git a/src/core/gpu/cl/kernels/ClCastKernel.h b/src/core/gpu/cl/kernels/ClCastKernel.h new file mode 100644 index 0000000000..451aa9c1ab --- /dev/null +++ b/src/core/gpu/cl/kernels/ClCastKernel.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2016-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CL_CAST_KERNEL_H +#define ARM_COMPUTE_CL_CAST_KERNEL_H + +#include "src/core/common/Macros.h" +#include "src/core/gpu/cl/ClCompileContext.h" +#include "src/core/gpu/cl/IClKernel.h" + +namespace arm_compute +{ +namespace opencl +{ +namespace kernels +{ +/** Casts a given tensor to a new type + * + * @note When casting between quantized types the scale and zeroPoint are ignored + */ +class ClCastKernel : public IClKernel +{ +public: + ClCastKernel() = default; + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClCastKernel); + /** Set the src and dst of the kernel. + * + * Valid conversions src -> dst : + * + * - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data) + * - U8 -> S8, U16, S16, U32, S32, F16, F32 + * - U16 -> U8, S8, S16, U32, S32, F16, F32 + * - S16 -> U8, S8, U16, U32, S32, F16, F32 + * - U32 -> U8, S8, U16, S16, S32, F16, F32 + * - S32 -> U8, S8, U16, S16, U32, F16, F32 + * - F16 -> U8, S8, U16, S16, U32, F32 + * - F32 -> U8, S8, U16, S16, U32, F16 + * + * @param[in] compile_context The compile context to be used. + * @param[in] src The source tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32. + * @param[out] dst The destination tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. + * @param[in] policy Conversion policy + */ + void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy); + /** Static function to check if given info will lead to a valid configuration + * + * Similar to @ref ClCastKernel::configure() + * + * @return a status + */ + static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy); + + // Inherited methods overridden: + void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override; +}; +} // namespace kernels +} // namespace opencl +} // namespace arm_compute +#endif /* ARM_COMPUTE_CL_CAST_KERNEL_H */ |