From 25ef7217ec4e13682bf37c87c0c6075a799ba1c0 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 2 Jun 2020 23:00:41 +0100 Subject: COMPMID-3180: Remove padding from NEThreshold - Removes padding from NEThresholdKernel - Alters configuration interface to use a descriptor Change-Id: I394d5e1375454813856d9d206e61dc9a87c2cadc Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3300 Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- arm_compute/core/NEON/kernels/NEThresholdKernel.h | 39 +++++++------ arm_compute/core/NEON/wrapper/intrinsics/cle.h | 64 ++++++++++++++++++++++ .../core/NEON/wrapper/intrinsics/intrinsics.h | 1 + 3 files changed, 84 insertions(+), 20 deletions(-) create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/cle.h (limited to 'arm_compute/core/NEON') diff --git a/arm_compute/core/NEON/kernels/NEThresholdKernel.h b/arm_compute/core/NEON/kernels/NEThresholdKernel.h index a6d1e9071c..fc97c3a75f 100644 --- a/arm_compute/core/NEON/kernels/NEThresholdKernel.h +++ b/arm_compute/core/NEON/kernels/NEThresholdKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 ARM Limited. + * Copyright (c) 2016-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,18 +24,15 @@ #ifndef ARM_COMPUTE_NETHRESHOLDKERNEL_H #define ARM_COMPUTE_NETHRESHOLDKERNEL_H +#include "arm_compute/core/KernelDescriptors.h" #include "arm_compute/core/NEON/INEKernel.h" #include "arm_compute/core/Types.h" -#include - namespace arm_compute { class ITensor; -/** Interface for the thresholding kernel - * - */ +/** Interface for the thresholding kernel */ class NEThresholdKernel : public INEKernel { public: @@ -53,15 +50,20 @@ public: NEThresholdKernel &operator=(const NEThresholdKernel &) = delete; /** Initialise the kernel's input, output and threshold parameters. * - * @param[in] input An input tensor. Data type supported: U8 - * @param[out] output The output tensor. Data type supported: U8. - * @param[in] threshold Threshold. When the threhold type is RANGE, this is used as the lower threshold. - * @param[in] false_value value to set when the condition is not respected. - * @param[in] true_value value to set when the condition is respected. - * @param[in] type Thresholding type. Either RANGE or BINARY. - * @param[in] upper Upper threshold. Only used when the thresholding type is RANGE. + * @param[in] input An input tensor. Data type supported: U8 + * @param[out] output The output tensor. Data type supported: U8. + * @param[in] info Threshold kernel descriptor + */ + void configure(const ITensor *input, ITensor *output, const ThresholdKernelInfo &info); + /** Static function to check if given info will lead to a valid configuration of @ref NEThresholdKernel + * + * @param[in] input Input tensor info. Data type supported: U8 + * @param[in] output Output tensor info. Data type supported: U8 + * @param[in] info Threshold kernel descriptor + * + * @return A status containing an error code in case of failure */ - void configure(const ITensor *input, ITensor *output, uint8_t threshold, uint8_t false_value, uint8_t true_value, ThresholdType type, uint8_t upper); + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ThresholdKernelInfo &info); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; @@ -74,12 +76,9 @@ private: void (NEThresholdKernel::*_func)(const Window &window); - const ITensor *_input; /**< Input */ - ITensor *_output; /**< Output */ - uint8_t _threshold; - uint8_t _false_value; - uint8_t _true_value; - uint8_t _upper; + const ITensor *_input; /**< Input */ + ITensor *_output; /**< Output */ + ThresholdKernelInfo _info; /**< Threshold descriptor */ }; } // namespace arm_compute #endif /*ARM_COMPUTE_NETHRESHOLDKERNEL_H */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/cle.h b/arm_compute/core/NEON/wrapper/intrinsics/cle.h new file mode 100644 index 0000000000..83c92d6891 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/cle.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_CLE_H +#define ARM_COMPUTE_WRAPPER_CLE_H + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VCLE_IMPL(stype, vtype, rtype, prefix, postfix) \ + inline rtype vcle(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VCLE_IMPL(uint8_t, uint8x8_t, uint8x8_t, vcle, u8) +VCLE_IMPL(int8_t, int8x8_t, uint8x8_t, vcle, s8) +VCLE_IMPL(uint16_t, uint16x4_t, uint16x4_t, vcle, u16) +VCLE_IMPL(int16_t, int16x4_t, uint16x4_t, vcle, s16) +VCLE_IMPL(uint32_t, uint32x2_t, uint32x2_t, vcle, u32) +VCLE_IMPL(int32_t, int32x2_t, uint32x2_t, vcle, s32) +VCLE_IMPL(float32x2_t, float32x2_t, uint32x2_t, vcle, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCLE_IMPL(float16x4_t, float16x4_t, uint16x4_t, vcle, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VCLE_IMPL(uint8_t, uint8x16_t, uint8x16_t, vcleq, u8) +VCLE_IMPL(int8_t, int8x16_t, uint8x16_t, vcleq, s8) +VCLE_IMPL(uint16_t, uint16x8_t, uint16x8_t, vcleq, u16) +VCLE_IMPL(int16_t, int16x8_t, uint16x8_t, vcleq, s16) +VCLE_IMPL(uint32_t, uint32x4_t, uint32x4_t, vcleq, u32) +VCLE_IMPL(int32_t, int32x4_t, uint32x4_t, vcleq, s32) +VCLE_IMPL(float32x4_t, float32x4_t, uint32x4_t, vcleq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VCLE_IMPL(float16x8_t, float16x8_t, uint16x8_t, vcleq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VCLE_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* ARM_COMPUTE_WRAPPER_CLE_H */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h index 1150daa073..14c5d615be 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h @@ -31,6 +31,7 @@ #include "arm_compute/core/NEON/wrapper/intrinsics/ceq.h" #include "arm_compute/core/NEON/wrapper/intrinsics/cge.h" #include "arm_compute/core/NEON/wrapper/intrinsics/cgt.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/cle.h" #include "arm_compute/core/NEON/wrapper/intrinsics/clt.h" #include "arm_compute/core/NEON/wrapper/intrinsics/combine.h" #include "arm_compute/core/NEON/wrapper/intrinsics/cvt.h" -- cgit v1.2.1