From 18e20ff8bc31bd835e96b46c0beb5435c76572bd Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Wed, 6 May 2020 17:03:59 +0100 Subject: COMPMID-3460: Refactor NEElementwiseUnaryKernel Removed most of the templates and refactored the code. Performance is the same but the libary size dropped by 52Kb. Change-Id: I41ff0c0853c923d925cdaeb05f4a58c9086fff94 Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3190 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- .../core/NEON/kernels/NEElementwiseUnaryKernel.h | 29 ++++++++++++---------- arm_compute/core/NEON/wrapper/intrinsics/exp.h | 10 +++++++- arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h | 10 +++++++- arm_compute/core/NEON/wrapper/intrinsics/log.h | 11 +++++++- arm_compute/core/NEON/wrapper/intrinsics/round.h | 10 +++++++- arm_compute/core/NEON/wrapper/intrinsics/sin.h | 11 +++++++- 6 files changed, 63 insertions(+), 18 deletions(-) (limited to 'arm_compute/core/NEON') diff --git a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h index 2a4a8f8e46..9a41cecf19 100644 --- a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h +++ b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -78,23 +78,26 @@ public: // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; +private: /** Common signature for all the specialised arithmetic functions * - * @param[in] input An input tensor. Data types supported: F16/F32. - * @param[out] output The output tensor. Data types supported: F16/F32. - * @param[in] window Region on which to execute the kernel. + * @param[in] window Region on which to execute the kernel. */ - using ElementwiseUnaryFunction = void(const ITensor *input, ITensor *output, const Window &window); + using ElementwiseUnaryPtr = void (NEElementwiseUnaryKernel::*)(const Window &window); -protected: - // Inherited methods overridden: - static Status validate_arguments(ElementWiseUnary op, const ITensorInfo &input, const ITensorInfo &output); - - /** Function to use for the particular tensor types passed to configure() */ - std::function _function; + /** Template function to run elementwise unary operation + * + * @tparam ScalarType Scalar datatype + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + template + void elementwise_op(const Window &window); - const ITensor *_input; - ITensor *_output; + ElementwiseUnaryPtr _func; + const ITensor *_input; + ITensor *_output; + ElementWiseUnary _op; }; } // namespace arm_compute #endif /* ARM_COMPUTE_NEELEMENTWISEUNARYKERNEL_H */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/exp.h b/arm_compute/core/NEON/wrapper/intrinsics/exp.h index f079af0ae2..4b17ebd93f 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/exp.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/exp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,10 +37,18 @@ namespace wrapper return vexpq_##postfix(a); \ } +#define VEXPQ_IMPL_INT(vtype, postfix) \ + inline vtype vexpq(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VEXPQ_IMPL(float32x4_t, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VEXPQ_IMPL(float16x8_t, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VEXPQ_IMPL_INT(int32x4_t, s32) #undef VEXPQ_IMPL } // namespace wrapper diff --git a/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h b/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h index 2bf9f52dbe..77adcf7b8c 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,10 +37,18 @@ namespace wrapper return prefix##_##postfix(a); \ } +#define VINVSQRT_IMPL_INT(stype, vtype, prefix, postfix) \ + inline vtype vinvsqrt(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VINVSQRT_IMPL(float, float32x2_t, vinvsqrt, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VINVSQRT_IMPL(float16_t, float16x4_t, vinvsqrt, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VINVSQRT_IMPL_INT(int, int32x4_t, vinvsqrt, s32) VINVSQRT_IMPL(float, float32x4_t, vinvsqrtq, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC diff --git a/arm_compute/core/NEON/wrapper/intrinsics/log.h b/arm_compute/core/NEON/wrapper/intrinsics/log.h index bb4181ec93..682830c122 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/log.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/log.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,10 +37,19 @@ namespace wrapper return prefix##_##postfix(a); \ } +#define VLOG_IMPL_INT(vtype, prefix, postfix) \ + inline vtype vlog(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VLOG_IMPL(float32x4_t, vlogq, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VLOG_IMPL(float16x8_t, vlogq, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VLOG_IMPL_INT(int32x4_t, vlogq, s32) + #undef VLOG_IMPL } // namespace wrapper } // namespace arm_compute diff --git a/arm_compute/core/NEON/wrapper/intrinsics/round.h b/arm_compute/core/NEON/wrapper/intrinsics/round.h index f3e0fe1ed8..d6f5a88689 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/round.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/round.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,10 +37,18 @@ namespace wrapper return vroundq_rte_##postfix(a); \ } +#define VROUNDQ_IMPL_INT(vtype, postfix) \ + inline vtype vround(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VROUNDQ_IMPL(float32x4_t, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VROUNDQ_IMPL(float16x8_t, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VROUNDQ_IMPL_INT(int32x4_t, s32) #undef VROUNDQ_IMPL } // namespace wrapper diff --git a/arm_compute/core/NEON/wrapper/intrinsics/sin.h b/arm_compute/core/NEON/wrapper/intrinsics/sin.h index e0fe5fbff3..bca72db38a 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/sin.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/sin.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,11 +37,20 @@ namespace wrapper return prefix##_##postfix(a); \ } +#define VSIN_IMPL_INT(vtype, prefix, postfix) \ + inline vtype vsin(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VSIN_IMPL(float32x4_t, vsinq, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VSIN_IMPL(float16x8_t, vsinq, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSIN_IMPL_INT(int32x4_t, vsinq, s32) + #undef vsub_IMPL } // namespace wrapper } // namespace arm_compute -- cgit v1.2.1