From 18e20ff8bc31bd835e96b46c0beb5435c76572bd Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Wed, 6 May 2020 17:03:59 +0100 Subject: COMPMID-3460: Refactor NEElementwiseUnaryKernel Removed most of the templates and refactored the code. Performance is the same but the libary size dropped by 52Kb. Change-Id: I41ff0c0853c923d925cdaeb05f4a58c9086fff94 Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3190 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- .../core/NEON/kernels/NEElementwiseUnaryKernel.h | 29 +++-- arm_compute/core/NEON/wrapper/intrinsics/exp.h | 10 +- arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h | 10 +- arm_compute/core/NEON/wrapper/intrinsics/log.h | 11 +- arm_compute/core/NEON/wrapper/intrinsics/round.h | 10 +- arm_compute/core/NEON/wrapper/intrinsics/sin.h | 11 +- src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp | 142 +++++---------------- 7 files changed, 97 insertions(+), 126 deletions(-) diff --git a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h index 2a4a8f8e46..9a41cecf19 100644 --- a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h +++ b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -78,23 +78,26 @@ public: // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; +private: /** Common signature for all the specialised arithmetic functions * - * @param[in] input An input tensor. Data types supported: F16/F32. - * @param[out] output The output tensor. Data types supported: F16/F32. - * @param[in] window Region on which to execute the kernel. + * @param[in] window Region on which to execute the kernel. */ - using ElementwiseUnaryFunction = void(const ITensor *input, ITensor *output, const Window &window); + using ElementwiseUnaryPtr = void (NEElementwiseUnaryKernel::*)(const Window &window); -protected: - // Inherited methods overridden: - static Status validate_arguments(ElementWiseUnary op, const ITensorInfo &input, const ITensorInfo &output); - - /** Function to use for the particular tensor types passed to configure() */ - std::function _function; + /** Template function to run elementwise unary operation + * + * @tparam ScalarType Scalar datatype + * + * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). + */ + template + void elementwise_op(const Window &window); - const ITensor *_input; - ITensor *_output; + ElementwiseUnaryPtr _func; + const ITensor *_input; + ITensor *_output; + ElementWiseUnary _op; }; } // namespace arm_compute #endif /* ARM_COMPUTE_NEELEMENTWISEUNARYKERNEL_H */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/exp.h b/arm_compute/core/NEON/wrapper/intrinsics/exp.h index f079af0ae2..4b17ebd93f 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/exp.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/exp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,10 +37,18 @@ namespace wrapper return vexpq_##postfix(a); \ } +#define VEXPQ_IMPL_INT(vtype, postfix) \ + inline vtype vexpq(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VEXPQ_IMPL(float32x4_t, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VEXPQ_IMPL(float16x8_t, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VEXPQ_IMPL_INT(int32x4_t, s32) #undef VEXPQ_IMPL } // namespace wrapper diff --git a/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h b/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h index 2bf9f52dbe..77adcf7b8c 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/invsqrt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,10 +37,18 @@ namespace wrapper return prefix##_##postfix(a); \ } +#define VINVSQRT_IMPL_INT(stype, vtype, prefix, postfix) \ + inline vtype vinvsqrt(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VINVSQRT_IMPL(float, float32x2_t, vinvsqrt, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VINVSQRT_IMPL(float16_t, float16x4_t, vinvsqrt, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VINVSQRT_IMPL_INT(int, int32x4_t, vinvsqrt, s32) VINVSQRT_IMPL(float, float32x4_t, vinvsqrtq, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC diff --git a/arm_compute/core/NEON/wrapper/intrinsics/log.h b/arm_compute/core/NEON/wrapper/intrinsics/log.h index bb4181ec93..682830c122 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/log.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/log.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,10 +37,19 @@ namespace wrapper return prefix##_##postfix(a); \ } +#define VLOG_IMPL_INT(vtype, prefix, postfix) \ + inline vtype vlog(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VLOG_IMPL(float32x4_t, vlogq, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VLOG_IMPL(float16x8_t, vlogq, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VLOG_IMPL_INT(int32x4_t, vlogq, s32) + #undef VLOG_IMPL } // namespace wrapper } // namespace arm_compute diff --git a/arm_compute/core/NEON/wrapper/intrinsics/round.h b/arm_compute/core/NEON/wrapper/intrinsics/round.h index f3e0fe1ed8..d6f5a88689 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/round.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/round.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,10 +37,18 @@ namespace wrapper return vroundq_rte_##postfix(a); \ } +#define VROUNDQ_IMPL_INT(vtype, postfix) \ + inline vtype vround(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VROUNDQ_IMPL(float32x4_t, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VROUNDQ_IMPL(float16x8_t, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VROUNDQ_IMPL_INT(int32x4_t, s32) #undef VROUNDQ_IMPL } // namespace wrapper diff --git a/arm_compute/core/NEON/wrapper/intrinsics/sin.h b/arm_compute/core/NEON/wrapper/intrinsics/sin.h index e0fe5fbff3..bca72db38a 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/sin.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/sin.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -37,11 +37,20 @@ namespace wrapper return prefix##_##postfix(a); \ } +#define VSIN_IMPL_INT(vtype, prefix, postfix) \ + inline vtype vsin(const vtype &a) \ + { \ + ARM_COMPUTE_UNUSED(a); \ + ARM_COMPUTE_ERROR("Not supported"); \ + } + VSIN_IMPL(float32x4_t, vsinq, f32) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VSIN_IMPL(float16x8_t, vsinq, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSIN_IMPL_INT(int32x4_t, vsinq, s32) + #undef vsub_IMPL } // namespace wrapper } // namespace arm_compute diff --git a/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp b/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp index 5d3af3b03d..ba93c37ba1 100644 --- a/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp +++ b/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -26,30 +26,17 @@ #include "arm_compute/core/CPP/Validate.h" #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" -#include "arm_compute/core/IAccessWindow.h" #include "arm_compute/core/ITensor.h" -#include "arm_compute/core/NEON/NEAsymm.h" -#include "arm_compute/core/NEON/NEFixedPoint.h" -#include "arm_compute/core/NEON/NEMath.h" #include "arm_compute/core/NEON/wrapper/wrapper.h" -#include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Validate.h" #include "support/ToolchainSupport.h" -#include -#include -#include -#include -#include - namespace arm_compute { -class Coordinates; - namespace { -template -inline ScalarType elementwise_op_scalar(const ScalarType &a) +template +inline ScalarType elementwise_op_scalar_imp(ElementWiseUnary op, const ScalarType &a) { switch(op) { @@ -72,9 +59,8 @@ inline ScalarType elementwise_op_scalar(const ScalarType &a) } } -/* Elementwise operations that are supported for float */ -template ::type = 0> -inline VectorType elementwise_op(const VectorType &a) +template +inline VectorType elementwise_op_imp(ElementWiseUnary op, const VectorType &a) { switch(op) { @@ -96,24 +82,10 @@ inline VectorType elementwise_op(const VectorType &a) ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); } } +} // namespace -/* Elementwise operations that are supported for non floats */ -template < ElementWiseUnary op, typename ScalarType, bool is_float, typename VectorType, typename std::enable_if < !is_float, int >::type = 0 > -inline VectorType elementwise_op(const VectorType &a) -{ - switch(op) - { - case ElementWiseUnary::NEG: - return wrapper::vneg(a); - case ElementWiseUnary::ABS: - return wrapper::vabs(a); - default: - ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); - } -} - -template -void elementwise_op(const ITensor *in, ITensor *out, const Window &window) +template +void NEElementwiseUnaryKernel::elementwise_op(const Window &window) { const int window_step_x = 16 / sizeof(ScalarType); const auto window_start_x = static_cast(window.x().start()); @@ -122,8 +94,8 @@ void elementwise_op(const ITensor *in, ITensor *out, const Window &window) Window win = window; win.set(Window::DimX, Window::Dimension(0, 1, 1)); - Iterator input(in, win); - Iterator output(out, win); + Iterator input(_input, win); + Iterator output(_output, win); execute_window_loop(win, [&](const Coordinates &) { @@ -133,55 +105,24 @@ void elementwise_op(const ITensor *in, ITensor *out, const Window &window) int x = window_start_x; for(; x <= window_end_x - window_step_x; x += window_step_x) { - wrapper::vstore(output_ptr + x, elementwise_op(wrapper::vloadq(input_ptr + x))); + wrapper::vstore(output_ptr + x, elementwise_op_imp(_op, wrapper::vloadq(input_ptr + x))); } for(; x < window_end_x; ++x) { - *(output_ptr + x) = elementwise_op_scalar(*(input_ptr + x)); + *(output_ptr + x) = elementwise_op_scalar_imp(_op, *(input_ptr + x)); } }, input, output); } -template -std::function -configure_func(const ITensor *input, ITensor *output) -{ - std::string function_to_call("op_"); - function_to_call += string_from_data_type(input->info()->data_type()) + "_"; - function_to_call += string_from_data_type(output->info()->data_type()); - - static std::map map_function = - { - { "op_F32_F32", &elementwise_op }, - { "op_S32_S32", &elementwise_op }, - }; -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - map_function["op_F16_F16"] = &elementwise_op; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - - auto it = map_function.find(function_to_call); - - if(it != map_function.end()) - { - auto func = it->second; - return [func](const ITensor * input, ITensor * output, const Window & window) - { - func(input, output, window); - }; - } - return nullptr; -} -} // namespace - NEElementwiseUnaryKernel::NEElementwiseUnaryKernel() - : _function(nullptr), _input(nullptr), _output(nullptr) + : _func(nullptr), _input(nullptr), _output(nullptr), _op() { } void NEElementwiseUnaryKernel::configure(ElementWiseUnary op, const ITensor *input, ITensor *output) { - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(op, *input->info(), *output->info())); + ARM_COMPUTE_ERROR_THROW_ON(validate(op, input->info(), output->info())); ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); // Configure kernel window @@ -196,40 +137,32 @@ void NEElementwiseUnaryKernel::configure(ElementWiseUnary op, const ITensor *inp _input = input; _output = output; + _op = op; INEKernel::configure(win); - switch(op) + switch(input->info()->data_type()) { - case ElementWiseUnary::RSQRT: - _function = configure_func(input, output); + case DataType::F32: + _func = &NEElementwiseUnaryKernel::elementwise_op; break; - case ElementWiseUnary::EXP: - _function = configure_func(input, output); - break; - case ElementWiseUnary::NEG: - _function = configure_func(input, output); - break; - case ElementWiseUnary::LOG: - _function = configure_func(input, output); - break; - case ElementWiseUnary::ABS: - _function = configure_func(input, output); - break; - case ElementWiseUnary::ROUND: - _function = configure_func(input, output); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + _func = &NEElementwiseUnaryKernel::elementwise_op; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ break; - case ElementWiseUnary::SIN: - _function = configure_func(input, output); + case DataType::S32: + _func = &NEElementwiseUnaryKernel::elementwise_op; break; default: - ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); + ARM_COMPUTE_ERROR("DataType not supported"); } } -Status NEElementwiseUnaryKernel::validate_arguments(ElementWiseUnary op, const ITensorInfo &input, const ITensorInfo &output) +Status NEElementwiseUnaryKernel::validate(ElementWiseUnary op, const ITensorInfo *input, const ITensorInfo *output) { - ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); switch(op) { case ElementWiseUnary::EXP: @@ -237,37 +170,30 @@ Status NEElementwiseUnaryKernel::validate_arguments(ElementWiseUnary op, const I case ElementWiseUnary::LOG: case ElementWiseUnary::ROUND: case ElementWiseUnary::SIN: - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); break; case ElementWiseUnary::NEG: case ElementWiseUnary::ABS: - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::F16, DataType::F32, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32, DataType::S32); break; default: ARM_COMPUTE_ERROR("ElementWiseUnary operation not supported"); } // Validate in case of configured output - if(output.total_size() > 0) + if(output->total_size() > 0) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input, &output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } return Status{}; } -Status NEElementwiseUnaryKernel::validate(ElementWiseUnary op, const ITensorInfo *input, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(op, *input, *output)); - return Status{}; -} - void NEElementwiseUnaryKernel::run(const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(info); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); - ARM_COMPUTE_ERROR_ON(_function == nullptr); - _function(_input, _output, window); + ARM_COMPUTE_ERROR_ON(_func == nullptr); + (this->*_func)(window); } } // namespace arm_compute -- cgit v1.2.1