From ee939fb58e3fc50ae7c92c895f8abd1dd9f20eb3 Mon Sep 17 00:00:00 2001 From: Luca Foschiani Date: Tue, 28 Jan 2020 10:38:07 +0000 Subject: COMPMID-2774: Add support for QASYMM8_SIGNED in NEReductionOperation, NEReduceMean and NEArgMinMaxLayer Signed-off-by: Luca Foschiani Change-Id: Icf198a983c8ce2c6cd8451a1190bb99115eac3af Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2652 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Giorgio Arena Comments-Addressed: Arm Jenkins --- .../core/NEON/kernels/NEReductionOperationKernel.h | 6 +-- arm_compute/core/NEON/wrapper/intrinsics/cvt.h | 61 ++++++++++++++++++++++ .../core/NEON/wrapper/intrinsics/intrinsics.h | 1 + arm_compute/core/Utils.h | 6 +++ 4 files changed, 71 insertions(+), 3 deletions(-) create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/cvt.h (limited to 'arm_compute/core') diff --git a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h index 36792abee3..28cca4987b 100644 --- a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h +++ b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -59,7 +59,7 @@ public: /** Set the source, destination of the kernel * - * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW. + * @param[in] input Source tensor. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32. Data layouts supported: NCHW. * @param[out] output Destination tensor.Data types and data layouts supported: same as @p input, S32 for ARG_MIX/ARG_MAX. * Output will have the same number of dimensions as input. * @param[in] axis Axis along which to reduce. Supported reduction axis : 0 @@ -69,7 +69,7 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref NEReductionOperationKernel. * - * @param[in] input Source tensor info. Data type supported: QASYMM8/F16/F32. Data layouts supported: NCHW. + * @param[in] input Source tensor info. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32. Data layouts supported: NCHW. * @param[in] output Destination tensor info.Data types and data layouts supported: same as @p input, S32 for ARG_MIX/ARG_MAX. * Output will have the same number of dimensions as input. * @param[in] axis Axis along which to reduce. Supported reduction axis : 0 diff --git a/arm_compute/core/NEON/wrapper/intrinsics/cvt.h b/arm_compute/core/NEON/wrapper/intrinsics/cvt.h new file mode 100644 index 0000000000..1f22e09a11 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/cvt.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2020 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_CVT_H +#define ARM_COMPUTE_WRAPPER_CVT_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VCVT_TO_F32_IMPL(ptype, vtype, prefix, postfix1, postfix2) \ + template \ + inline typename std::enable_if::value, float32x4_t>::type \ + vcvt(const vtype &a) \ + { \ + return prefix##_##postfix1##_##postfix2(a); \ + } + +VCVT_TO_F32_IMPL(float32x4_t, uint32x4_t, vcvtq, f32, u32) +VCVT_TO_F32_IMPL(float32x4_t, int32x4_t, vcvtq, f32, s32) +#undef VCVT_TO_F32_IMPL + +template +inline typename std::enable_if::value, uint32x4_t>::type +vcvt(const float32x4_t &a) +{ + return vcvtq_u32_f32(a); +} + +template +inline typename std::enable_if::value, int32x4_t>::type +vcvt(const float32x4_t &a) +{ + return vcvtq_s32_f32(a); +} + +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_CVT_H */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h index a7af352c76..51b1fcc1bd 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h @@ -33,6 +33,7 @@ #include "arm_compute/core/NEON/wrapper/intrinsics/cgt.h" #include "arm_compute/core/NEON/wrapper/intrinsics/clt.h" #include "arm_compute/core/NEON/wrapper/intrinsics/combine.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/cvt.h" #include "arm_compute/core/NEON/wrapper/intrinsics/div.h" #include "arm_compute/core/NEON/wrapper/intrinsics/dup_n.h" #include "arm_compute/core/NEON/wrapper/intrinsics/eor.h" diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h index 7ab78be908..4a3b01d21f 100644 --- a/arm_compute/core/Utils.h +++ b/arm_compute/core/Utils.h @@ -596,6 +596,12 @@ inline std::tuple get_min_max(DataType dt) max = PixelValue(std::numeric_limits::max()); break; } + case DataType::F16: + { + min = PixelValue(std::numeric_limits::lowest()); + max = PixelValue(std::numeric_limits::max()); + break; + } case DataType::F32: { min = PixelValue(std::numeric_limits::lowest()); -- cgit v1.2.1