From 5f39091e502b0805f292d79a2a7da66d485f70ac Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Wed, 13 May 2020 00:12:08 +0100 Subject: COMPMID-3176: Remove padding from NEArithmeticSubtractionKernel COMPMID-3487: Refactor NEArithmeticSubtractionKernel Refactored code in order to remove paddings. This resulted in a big increase in libary size so after some rework the total size dropped by 4Kb. Change-Id: I4e3014c2ae49c29c6090b195ea16620afcf6c09f Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3206 Comments-Addressed: Arm Jenkins Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins --- .../NEON/kernels/NEArithmeticSubtractionKernel.h | 7 +-- .../core/NEON/wrapper/intrinsics/intrinsics.h | 1 + arm_compute/core/NEON/wrapper/intrinsics/qmov.h | 49 +++++++++++++++++ arm_compute/core/NEON/wrapper/intrinsics/sub.h | 11 +++- arm_compute/core/NEON/wrapper/scalar/scalar.h | 3 +- arm_compute/core/NEON/wrapper/scalar/sub.h | 62 ++++++++++++++++++++++ 6 files changed, 128 insertions(+), 5 deletions(-) create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/qmov.h create mode 100644 arm_compute/core/NEON/wrapper/scalar/sub.h (limited to 'arm_compute/core/NEON') diff --git a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h index 919c685886..f75c6bfb98 100644 --- a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h +++ b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h @@ -52,7 +52,7 @@ public: /** Default destructor */ ~NEArithmeticSubtractionKernel() = default; - /** Initialise the kernel's input, output and border mode. + /** Initialise the kernel's input and output. * * Valid configurations (Input1,Input2) -> Output : * @@ -87,7 +87,6 @@ public: // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; - BorderSize border_size() const override; private: /** Common signature for all the specialised sub functions @@ -96,13 +95,15 @@ private: * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/QSYMM16/S16/F16/F32 * @param[out] output The output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/QSYMM16/S16/F16/F32. * @param[in] window Region on which to execute the kernel. + * @param[in] is_sat Flag to indicate if the policy is SATURATE. */ - using SubFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window); + using SubFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window, bool is_sat); /** Sub function to use for the particular tensor types passed to configure() */ SubFunction *_func; const ITensor *_input1; const ITensor *_input2; ITensor *_output; + ConvertPolicy _policy; }; } // namespace arm_compute #endif /* ARM_COMPUTE_NEARITHMETICSUBTRACTIONKERNEL_H */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h index 51b1fcc1bd..1150daa073 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h @@ -58,6 +58,7 @@ #include "arm_compute/core/NEON/wrapper/intrinsics/pmax.h" #include "arm_compute/core/NEON/wrapper/intrinsics/pmin.h" #include "arm_compute/core/NEON/wrapper/intrinsics/pow.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/qmov.h" #include "arm_compute/core/NEON/wrapper/intrinsics/qmovun.h" #include "arm_compute/core/NEON/wrapper/intrinsics/reinterpret.h" #include "arm_compute/core/NEON/wrapper/intrinsics/rev64.h" diff --git a/arm_compute/core/NEON/wrapper/intrinsics/qmov.h b/arm_compute/core/NEON/wrapper/intrinsics/qmov.h new file mode 100644 index 0000000000..bb64bef1e9 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/qmov.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2020 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_QMOV_H +#define ARM_COMPUTE_WRAPPER_QMOV_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +template +inline typename std::enable_if::value, uint8x8_t>::type +vqmov(const int16x8_t &a) +{ + return vqmovun_s16(a); +} + +template +inline typename std::enable_if::value, int8x8_t>::type +vqmov(const int16x8_t &a) +{ + return vqmovn_s16(a); +} + +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_QMOV_H */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/sub.h b/arm_compute/core/NEON/wrapper/intrinsics/sub.h index 2c6c96125a..f46b57c815 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/sub.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/sub.h @@ -64,6 +64,7 @@ VSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16) #undef VSUB_IMPL +// VQSUB: Vector saturating sub (No notion of saturation for floating point) #define VQSUB_IMPL(stype, vtype, prefix, postfix) \ inline vtype vqsub(const vtype &a, const vtype &b) \ { \ @@ -78,6 +79,10 @@ VQSUB_IMPL(uint32x2_t, uint32x2_t, vqsub, u32) VQSUB_IMPL(int32x2_t, int32x2_t, vqsub, s32) VQSUB_IMPL(uint64x1_t, uint64x1_t, vqsub, u64) VQSUB_IMPL(int64x1_t, int64x1_t, vqsub, s64) +VQSUB_IMPL(float32x2_t, float32x2_t, vsub, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VQSUB_IMPL(float16x4_t, float16x4_t, vsub, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VQSUB_IMPL(uint8x16_t, uint8x16_t, vqsubq, u8) VQSUB_IMPL(int8x16_t, int8x16_t, vqsubq, s8) @@ -87,8 +92,12 @@ VQSUB_IMPL(uint32x4_t, uint32x4_t, vqsubq, u32) VQSUB_IMPL(int32x4_t, int32x4_t, vqsubq, s32) VQSUB_IMPL(uint64x2_t, uint64x2_t, vqsubq, u64) VQSUB_IMPL(int64x2_t, int64x2_t, vqsubq, s64) - +VQSUB_IMPL(float32x4_t, float32x4_t, vsubq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VQSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #undef VQSUB_IMPL + } // namespace wrapper } // namespace arm_compute #endif /* ARM_COMPUTE_WRAPPER_SUB_H */ diff --git a/arm_compute/core/NEON/wrapper/scalar/scalar.h b/arm_compute/core/NEON/wrapper/scalar/scalar.h index c8bd47385e..ff2d807c0e 100644 --- a/arm_compute/core/NEON/wrapper/scalar/scalar.h +++ b/arm_compute/core/NEON/wrapper/scalar/scalar.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -25,5 +25,6 @@ #define ARM_COMPUTE_WRAPPER_SCALAR_H #include "arm_compute/core/NEON/wrapper/scalar/add.h" +#include "arm_compute/core/NEON/wrapper/scalar/sub.h" #endif /* ARM_COMPUTE_WRAPPER_SCALAR_H */ diff --git a/arm_compute/core/NEON/wrapper/scalar/sub.h b/arm_compute/core/NEON/wrapper/scalar/sub.h new file mode 100644 index 0000000000..5b4cab93d3 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/scalar/sub.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2020 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_SCALAR_SUB_H +#define ARM_COMPUTE_WRAPPER_SCALAR_SUB_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +inline uint8_t sub_sat(const uint8_t &a, const uint8_t &b) +{ + const uint8x8_t va = { a, 0, 0, 0, 0, 0, 0, 0 }; + const uint8x8_t vb = { b, 0, 0, 0, 0, 0, 0, 0 }; + return vget_lane_u8(vqsub_u8(va, vb), 0); +} + +inline int16_t sub_sat(const int16_t &a, const int16_t &b) +{ + const int16x4_t va = { a, 0, 0, 0 }; + const int16x4_t vb = { b, 0, 0, 0 }; + return vget_lane_s16(vqsub_s16(va, vb), 0); +} + +inline float sub_sat(const float &a, const float &b) +{ + // No notion of saturation exists in floating point + return a - b; +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +inline float16_t sub_sat(const float16_t &a, const float16_t &b) +{ + // No notion of saturation exists in floating point + return a - b; +} +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_SCALAR_SUB_H */ -- cgit v1.2.1