From a6825a427a51da815a805d66ce65c98de282d89b Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Thu, 13 Sep 2018 12:24:03 +0100 Subject: COMPMID-1540 Implement YOLOLayer on NEON Change-Id: Ice28996959dc666fff5e8ae486c1ff8093db083f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/148367 Reviewed-by: Georgios Pinitas Tested-by: bsgcomp --- arm_compute/core/NEON/NEKernels.h | 1 + arm_compute/core/NEON/kernels/NEYOLOLayerKernel.h | 116 ++++++++++ .../kernels/detail/NEActivationFunctionDetail.h | 91 ++++++++ arm_compute/core/NEON/wrapper/intrinsics/add.h | 68 ++++++ arm_compute/core/NEON/wrapper/intrinsics/exp.h | 47 ++++ .../core/NEON/wrapper/intrinsics/intrinsics.h | 6 + arm_compute/core/NEON/wrapper/intrinsics/inv.h | 53 +++++ arm_compute/core/NEON/wrapper/intrinsics/mla.h | 58 +++++ arm_compute/core/NEON/wrapper/intrinsics/mul.h | 58 +++++ arm_compute/core/NEON/wrapper/intrinsics/neg.h | 65 ++++++ arm_compute/runtime/NEON/NEFunctions.h | 1 + arm_compute/runtime/NEON/functions/NEYOLOLayer.h | 64 ++++++ src/core/NEON/kernels/NEYOLOLayerKernel.cpp | 250 +++++++++++++++++++++ src/runtime/NEON/functions/NEYOLOLayer.cpp | 42 ++++ tests/validation/NEON/YOLOLayer.cpp | 105 +++++++++ 15 files changed, 1025 insertions(+) create mode 100644 arm_compute/core/NEON/kernels/NEYOLOLayerKernel.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/add.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/exp.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/inv.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/mla.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/mul.h create mode 100644 arm_compute/core/NEON/wrapper/intrinsics/neg.h create mode 100644 arm_compute/runtime/NEON/functions/NEYOLOLayer.h create mode 100644 src/core/NEON/kernels/NEYOLOLayerKernel.cpp create mode 100644 src/runtime/NEON/functions/NEYOLOLayer.cpp create mode 100644 tests/validation/NEON/YOLOLayer.cpp diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h index bbeb604140..2b76c0bd29 100644 --- a/arm_compute/core/NEON/NEKernels.h +++ b/arm_compute/core/NEON/NEKernels.h @@ -117,5 +117,6 @@ #include "arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h" #include "arm_compute/core/NEON/kernels/NEWidthConcatenateLayerKernel.h" #include "arm_compute/core/NEON/kernels/NEWinogradConvolutionLayerKernel.h" +#include "arm_compute/core/NEON/kernels/NEYOLOLayerKernel.h" #endif /* __ARM_COMPUTE_NEKERNELS_H__ */ diff --git a/arm_compute/core/NEON/kernels/NEYOLOLayerKernel.h b/arm_compute/core/NEON/kernels/NEYOLOLayerKernel.h new file mode 100644 index 0000000000..c0cfcc049e --- /dev/null +++ b/arm_compute/core/NEON/kernels/NEYOLOLayerKernel.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_NEYOLOLAYERKERNEL_H__ +#define __ARM_COMPUTE_NEYOLOLAYERKERNEL_H__ + +#include "arm_compute/core/NEON/INEKernel.h" + +namespace arm_compute +{ +class ITensor; + +/** Interface for the YOLO layer kernel. */ +class NEYOLOLayerKernel : public INEKernel +{ +public: + const char *name() const override + { + return "NEYOLOLayerKernel"; + } + /** Constructor */ + NEYOLOLayerKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEYOLOLayerKernel(const NEYOLOLayerKernel &) = delete; + /** Default move constructor */ + NEYOLOLayerKernel(NEYOLOLayerKernel &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEYOLOLayerKernel &operator=(const NEYOLOLayerKernel &) = delete; + /** Default move assignment operator */ + NEYOLOLayerKernel &operator=(NEYOLOLayerKernel &&) = default; + /** Default destructor */ + ~NEYOLOLayerKernel() = default; + /** Set the input and output tensor. + * + * @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place + * + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result + * of the activation function. Data types supported: F16/F32. + * @param[out] output Destination tensor. Data type supported: same as @p input + * @param[in] act_info Activation layer parameters. + * @param[in] num_classes Number of classes to activate (must be submultiple of @p input channels) + */ + void configure(ITensor *input, ITensor *output, const ActivationLayerInfo &act_info, int32_t num_classes); + /** Static function to check if given info will lead to a valid configuration of @ref NEYOLOLayerKernel + * + * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result + * of the activation function. Data types supported: F16/F32. + * @param[in] output Destination tensor info. Data type supported: same as @p input + * @param[in] act_info Activation layer information. + * @param[in] num_classes Number of classes to activate (must be submultiple of @p input channels) + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes); + + // Inherited methods overridden: + void run(const Window &window, const ThreadInfo &info) override; + +private: +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + /** Function to run YOLO layer on fp16 + * + * @param[in] window Region on which to execute the kernel. + */ + void yolo_layer_fp16_nchw(const Window &window); + /** Function to run batch normalization on fp16 on tensors with NHWC format + * + * @param[in] window Region on which to execute the kernel. + */ + void yolo_layer_fp16_nhwc(const Window &window); +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + /** Function to run YOLO layer on fp32 + * + * @param[in] window Region on which to execute the kernel. + */ + void yolo_layer_fp32_nchw(const Window &window); + /** Function to run YOLO layer on fp32 on tensors with NHWC format + * + * @param[in] window Region on which to execute the kernel. + */ + void yolo_layer_fp32_nhwc(const Window &window); + /** Common signature for all the yolo layer functions + * + * @param[in] window Region on which to execute the kernel. + */ + using YOLOFunctionPtr = void (NEYOLOLayerKernel::*)(const Window &window); + +private: + YOLOFunctionPtr _func; + ITensor *_input; + ITensor *_output; + ActivationLayerInfo _act_info; + int32_t _num_classes; +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_NEYOLOLAYERKERNEL_H__ */ diff --git a/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h b/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h index 71d5a9eef7..9344235d09 100644 --- a/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h +++ b/arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h @@ -54,6 +54,97 @@ struct dummy ARM_COMPUTE_UNUSED(vval); } }; +/** Linear activation object */ +template +struct linear +{ + /** NEON vector type. */ + using ExactType = typename wrapper::traits::neon_vector::type; + /** NEON vector tag type. */ + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + /** Construct a Linear activation object. + * + * @param[in] act_info Activation layer information. + */ + explicit linear(ActivationLayerInfo act_info) + : valpha(wrapper::vdup_n(static_cast(act_info.a()), ExactTagType{})), + vbeta(wrapper::vdup_n(static_cast(act_info.b()), ExactTagType{})) + { + } + + /** Run activation function. + * + * @param[in] vval Vector of values. + */ + void operator()(ExactType &vval) + { + vval = wrapper::vmla(vval, valpha, vbeta); + } + + /** Vector of alphas. */ + const ExactType valpha; + /** Vector of betas. */ + const ExactType vbeta; +}; +/** Square activation object */ +template +struct square +{ + /** NEON vector type. */ + using ExactType = typename wrapper::traits::neon_vector::type; + /** NEON vector tag type. */ + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + /** Construct a Square activation object. + * + * @param[in] act_info Activation layer information. + */ + explicit square(ActivationLayerInfo act_info) + { + ARM_COMPUTE_UNUSED(act_info); + } + + /** Run activation function. + * + * @param[in] vval Vector of values. + */ + void operator()(ExactType &vval) + { + vval = wrapper::vmul(vval, vval); + } +}; +/** Logistic activation object */ +template +struct logistic +{ + /** NEON vector type. */ + using ExactType = typename wrapper::traits::neon_vector::type; + /** NEON vector tag type. */ + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + /** Construct a Logistic activation object. + * + * @param[in] act_info Activation layer information. + */ + explicit logistic(ActivationLayerInfo act_info) + : vone(wrapper::vdup_n(static_cast(1.f), ExactTagType{})) + { + ARM_COMPUTE_UNUSED(act_info); + } + + /** Run activation function. + * + * @param[in] vval Vector of values. + */ + void operator()(ExactType &vval) + { + vval = wrapper::vinv(wrapper::vadd(vone, wrapper::vexpq(wrapper::vnegq(vval)))); + } + + /** Vector of ones. */ + const ExactType vone; +}; /** RELU activation object */ template struct relu diff --git a/arm_compute/core/NEON/wrapper/intrinsics/add.h b/arm_compute/core/NEON/wrapper/intrinsics/add.h new file mode 100644 index 0000000000..da730f133c --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/add.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_ADD_H__ +#define __ARM_COMPUTE_WRAPPER_ADD_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VADD_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vadd(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VADD_IMPL(uint8x8_t, uint8x8_t, vadd, u8) +VADD_IMPL(int8x8_t, int8x8_t, vadd, s8) +VADD_IMPL(uint16x4_t, uint16x4_t, vadd, u16) +VADD_IMPL(int16x4_t, int16x4_t, vadd, s16) +VADD_IMPL(uint32x2_t, uint32x2_t, vadd, u32) +VADD_IMPL(int32x2_t, int32x2_t, vadd, s32) +VADD_IMPL(uint64x1_t, uint64x1_t, vadd, u64) +VADD_IMPL(int64x1_t, int64x1_t, vadd, s64) +VADD_IMPL(float32x2_t, float32x2_t, vadd, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VADD_IMPL(float16x4_t, float16x4_t, vadd, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VADD_IMPL(uint8x16_t, uint8x16_t, vaddq, u8) +VADD_IMPL(int8x16_t, int8x16_t, vaddq, s8) +VADD_IMPL(uint16x8_t, uint16x8_t, vaddq, u16) +VADD_IMPL(int16x8_t, int16x8_t, vaddq, s16) +VADD_IMPL(uint32x4_t, uint32x4_t, vaddq, u32) +VADD_IMPL(int32x4_t, int32x4_t, vaddq, s32) +VADD_IMPL(uint64x2_t, uint64x2_t, vaddq, u64) +VADD_IMPL(int64x2_t, int64x2_t, vaddq, s64) +VADD_IMPL(float32x4_t, float32x4_t, vaddq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VADD_IMPL(float16x8_t, float16x8_t, vaddq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VADD_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_ADD_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/exp.h b/arm_compute/core/NEON/wrapper/intrinsics/exp.h new file mode 100644 index 0000000000..85aa9c739a --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/exp.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_EXP_H__ +#define __ARM_COMPUTE_WRAPPER_EXP_H__ + +#include "arm_compute/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VEXPQ_IMPL(vtype, postfix) \ + inline vtype vexpq(const vtype &a) \ + { \ + return vexpq_##postfix(a); \ + } + +VEXPQ_IMPL(float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VEXPQ_IMPL(float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#undef VEXPQ_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_EXP_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h index b302b366cd..58bfba9645 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h @@ -24,11 +24,17 @@ #ifndef __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ #define __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ +#include "arm_compute/core/NEON/wrapper/intrinsics/add.h" #include "arm_compute/core/NEON/wrapper/intrinsics/and.h" #include "arm_compute/core/NEON/wrapper/intrinsics/dup_n.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/exp.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/inv.h" #include "arm_compute/core/NEON/wrapper/intrinsics/load.h" #include "arm_compute/core/NEON/wrapper/intrinsics/max.h" #include "arm_compute/core/NEON/wrapper/intrinsics/min.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/mla.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/mul.h" +#include "arm_compute/core/NEON/wrapper/intrinsics/neg.h" #include "arm_compute/core/NEON/wrapper/intrinsics/store.h" #endif /* __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/inv.h b/arm_compute/core/NEON/wrapper/intrinsics/inv.h new file mode 100644 index 0000000000..a86a9d4671 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/inv.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_INV_H__ +#define __ARM_COMPUTE_WRAPPER_INV_H__ + +#include "arm_compute/core/NEON/NEMath.h" +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VINV_IMPL(vtype, prefix, postfix) \ + inline vtype vinv(const vtype &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VINV_IMPL(float32x2_t, vinv, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VINV_IMPL(float16x4_t, vinv, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VINV_IMPL(float32x4_t, vinvq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VINV_IMPL(float16x8_t, vinvq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VINV_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_INV_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/mla.h b/arm_compute/core/NEON/wrapper/intrinsics/mla.h new file mode 100644 index 0000000000..32a650b57f --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/mla.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_MLA_H__ +#define __ARM_COMPUTE_WRAPPER_MLA_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMLA_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vmla(const vtype &a, const vtype &b, const vtype &c) \ + { \ + return prefix##_##postfix(a, b, c); \ + } + +VMLA_IMPL(uint8x8_t, uint8x8_t, vmla, u8) +VMLA_IMPL(int8x8_t, int8x8_t, vmla, s8) +VMLA_IMPL(uint16x4_t, uint16x4_t, vmla, u16) +VMLA_IMPL(int16x4_t, int16x4_t, vmla, s16) +VMLA_IMPL(uint32x2_t, uint32x2_t, vmla, u32) +VMLA_IMPL(int32x2_t, int32x2_t, vmla, s32) +VMLA_IMPL(float32x2_t, float32x2_t, vmla, f32) + +VMLA_IMPL(uint8x16_t, uint8x16_t, vmlaq, u8) +VMLA_IMPL(int8x16_t, int8x16_t, vmlaq, s8) +VMLA_IMPL(uint16x8_t, uint16x8_t, vmlaq, u16) +VMLA_IMPL(int16x8_t, int16x8_t, vmlaq, s16) +VMLA_IMPL(uint32x4_t, uint32x4_t, vmlaq, u32) +VMLA_IMPL(int32x4_t, int32x4_t, vmlaq, s32) +VMLA_IMPL(float32x4_t, float32x4_t, vmlaq, f32) + +#undef VMLA_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_MLA_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/mul.h b/arm_compute/core/NEON/wrapper/intrinsics/mul.h new file mode 100644 index 0000000000..c1908fc7b3 --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/mul.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_MUL_H__ +#define __ARM_COMPUTE_WRAPPER_MUL_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VMUL_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vmul(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VMUL_IMPL(uint8x8_t, uint8x8_t, vmul, u8) +VMUL_IMPL(int8x8_t, int8x8_t, vmul, s8) +VMUL_IMPL(uint16x4_t, uint16x4_t, vmul, u16) +VMUL_IMPL(int16x4_t, int16x4_t, vmul, s16) +VMUL_IMPL(uint32x2_t, uint32x2_t, vmul, u32) +VMUL_IMPL(int32x2_t, int32x2_t, vmul, s32) +VMUL_IMPL(float32x2_t, float32x2_t, vmul, f32) + +VMUL_IMPL(uint8_t, uint8x16_t, vmulq, u8) +VMUL_IMPL(int8_t, int8x16_t, vmulq, s8) +VMUL_IMPL(uint16_t, uint16x8_t, vmulq, u16) +VMUL_IMPL(int16_t, int16x8_t, vmulq, s16) +VMUL_IMPL(uint32_t, uint32x4_t, vmulq, u32) +VMUL_IMPL(int32_t, int32x4_t, vmulq, s32) +VMUL_IMPL(float32x4_t, float32x4_t, vmulq, f32) + +#undef VMUL_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_MUL_H__ */ diff --git a/arm_compute/core/NEON/wrapper/intrinsics/neg.h b/arm_compute/core/NEON/wrapper/intrinsics/neg.h new file mode 100644 index 0000000000..0ea1d429fe --- /dev/null +++ b/arm_compute/core/NEON/wrapper/intrinsics/neg.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_WRAPPER_NEG_H__ +#define __ARM_COMPUTE_WRAPPER_NEG_H__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VNEG_IMPL(vtype, postfix) \ + inline vtype vneg(const vtype &a) \ + { \ + return vneg_##postfix(a); \ + } + +VNEG_IMPL(int8x8_t, s8) +VNEG_IMPL(int16x4_t, s16) +VNEG_IMPL(int32x2_t, s32) +VNEG_IMPL(float32x2_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VNEG_IMPL(float16x4_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VNEG_IMPL +#define VNEGQ_IMPL(vtype, postfix) \ + inline vtype vnegq(const vtype &a) \ + { \ + return vnegq_##postfix(a); \ + } + +VNEGQ_IMPL(int8x16_t, s8) +VNEGQ_IMPL(int16x8_t, s16) +VNEGQ_IMPL(int32x4_t, s32) +VNEGQ_IMPL(float32x4_t, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VNEGQ_IMPL(float16x8_t, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +#undef VNEGQ_IMPL +} // namespace wrapper +} // namespace arm_compute +#endif /* __ARM_COMPUTE_WRAPPER_NEG_H__ */ diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h index 89a5c4aaaf..d9b7269efc 100644 --- a/arm_compute/runtime/NEON/NEFunctions.h +++ b/arm_compute/runtime/NEON/NEFunctions.h @@ -119,5 +119,6 @@ #include "arm_compute/runtime/NEON/functions/NEWarpPerspective.h" #include "arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h" #include "arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h" +#include "arm_compute/runtime/NEON/functions/NEYOLOLayer.h" #endif /* __ARM_COMPUTE_NEFUNCTIONS_H__ */ \ No newline at end of file diff --git a/arm_compute/runtime/NEON/functions/NEYOLOLayer.h b/arm_compute/runtime/NEON/functions/NEYOLOLayer.h new file mode 100644 index 0000000000..e09dd42f8f --- /dev/null +++ b/arm_compute/runtime/NEON/functions/NEYOLOLayer.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_NEYOLOLAYER_H__ +#define __ARM_COMPUTE_NEYOLOLAYER_H__ + +#include "arm_compute/runtime/NEON/INESimpleFunction.h" + +#include "arm_compute/core/NEON/kernels/NEYOLOLayerKernel.h" +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +class ITensor; + +/** Basic function to run @ref NEYOLOLayerKernel */ +class NEYOLOLayer : public INESimpleFunction +{ +public: + /** Set the input and output tensor. + * + * @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place + * + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result + * of the activation function. Data types supported: F16/F32. + * @param[out] output Destination tensor. Data type supported: same as @p input + * @param[in] act_info Activation layer parameters. + * @param[in] num_classes Number of classes to activate (must be submultiple of @p input channels) + */ + void configure(ITensor *input, ITensor *output, const ActivationLayerInfo &act_info, int32_t num_classes); + /** Static function to check if given info will lead to a valid configuration of @ref NEYOLOLayer + * + * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result + * of the activation function. Data types supported: F16/F32. + * @param[in] output Destination tensor info. Data type supported: same as @p input + * @param[in] act_info Activation layer information. + * @param[in] num_classes Number of classes to activate (must be submultiple of @p input channels) + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes); +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_NEYOLOLAYER_H__ */ diff --git a/src/core/NEON/kernels/NEYOLOLayerKernel.cpp b/src/core/NEON/kernels/NEYOLOLayerKernel.cpp new file mode 100644 index 0000000000..009562b89c --- /dev/null +++ b/src/core/NEON/kernels/NEYOLOLayerKernel.cpp @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/NEYOLOLayerKernel.h" + +#include "arm_compute/core/CPP/Validate.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/NEAsymm.h" +#include "arm_compute/core/NEON/NEFixedPoint.h" +#include "arm_compute/core/NEON/NEMath.h" +#include "arm_compute/core/NEON/kernels/detail/NEActivationFunctionDetail.h" +#include "arm_compute/core/QAsymm8.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" + +#include + +using namespace arm_compute; +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + ARM_COMPUTE_UNUSED(act_info); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); + ARM_COMPUTE_RETURN_ERROR_ON(act_info.activation() != ActivationLayerInfo::ActivationFunction::LOGISTIC); + + const unsigned int channel_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); + ARM_COMPUTE_RETURN_ERROR_ON(num_classes <= 0); + ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(channel_idx) % (num_classes + 5)) != 0); + + // Checks performed when output is configured + if((output != nullptr) && (output->total_size() != 0)) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + } + + return Status{}; +} + +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) +{ + if(output != nullptr) + { + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + + // Output auto inizialitation if not yet initialized + auto_init_if_empty(*output, *input); + } + + const bool is_nchw = input->data_layout() == DataLayout::NCHW; + const unsigned int num_elems_processed_per_iteration = is_nchw ? 16 / input->element_size() : 1; + + Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration)); + bool window_changed = false; + + if(output != nullptr) + { + AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); + AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); + window_changed = update_window_and_padding(win, input_access, output_access); + output_access.set_valid_region(win, input->valid_region()); + } + else + { + window_changed = update_window_and_padding(win, AccessWindowHorizontal(input, 0, num_elems_processed_per_iteration)); + } + + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, win); +} +} // namespace + +NEYOLOLayerKernel::NEYOLOLayerKernel() + : _func(nullptr), _input(nullptr), _output(nullptr), _act_info(), _num_classes() +{ +} + +void NEYOLOLayerKernel::yolo_layer_fp32_nchw(const Window &window) +{ + Iterator input(_input, window); + Iterator output(_output, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + float32x4_t res = vld1q_f32(reinterpret_cast(input.ptr())); + + const int box_ch_id = id.z() % (_num_classes + 5); + const bool activate = box_ch_id != 2 && box_ch_id != 3; + + // Perform activation + if(activate) + { + auto activation = ::detail::logistic(_act_info); + activation(res); + } + + // Store results + vst1q_f32(reinterpret_cast(output.ptr()), res); + }, + input, output); +} + +void NEYOLOLayerKernel::yolo_layer_fp32_nhwc(const Window &window) +{ + Iterator input(_input, window); + Iterator output(_output, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + float res = *(reinterpret_cast(input.ptr())); + + const int box_ch_id = id.x() % (_num_classes + 5); + const bool activate = box_ch_id != 2 && box_ch_id != 3; + + // Perform activation + if(activate) + { + res = 1.f / (1.f + std::exp(-res)); + } + + // Store result + *(reinterpret_cast(output.ptr())) = res; + }, + input, output); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +void NEYOLOLayerKernel::yolo_layer_fp16_nchw(const Window &window) +{ + Iterator input(_input, window); + Iterator output(_output, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + float16x8_t res = vld1q_f16(reinterpret_cast(input.ptr())); + + const int box_ch_id = id.z() % (_num_classes + 5); + const bool activate = box_ch_id != 2 && box_ch_id != 3; + + // Perform activation + if(activate) + { + auto activation = ::detail::logistic(_act_info); + activation(res); + } + + // Store results + vst1q_f16(reinterpret_cast(output.ptr()), res); + }, + input, output); +} + +void NEYOLOLayerKernel::yolo_layer_fp16_nhwc(const Window &window) +{ + Iterator input(_input, window); + Iterator output(_output, window); + + execute_window_loop(window, [&](const Coordinates & id) + { + float16_t res = *(reinterpret_cast(input.ptr())); + + const int box_ch_id = id.x() % (_num_classes + 5); + const bool activate = box_ch_id != 2 && box_ch_id != 3; + + // Perform activation + if(activate) + { + res = 1.f / (1.f + std::exp(-res)); + } + + // Store result + *(reinterpret_cast(output.ptr())) = res; + }, + input, output); +} +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + +void NEYOLOLayerKernel::configure(ITensor *input, ITensor *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr, act_info, num_classes)); + + _input = input; + _output = output; + _act_info = act_info; + _num_classes = num_classes; + + switch(_input->info()->data_type()) + { +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F16: + _func = (_input->info()->data_layout() == DataLayout::NHWC) ? &NEYOLOLayerKernel::yolo_layer_fp16_nhwc : &NEYOLOLayerKernel::yolo_layer_fp16_nchw; + break; +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + case DataType::F32: + _func = (_input->info()->data_layout() == DataLayout::NHWC) ? &NEYOLOLayerKernel::yolo_layer_fp32_nhwc : &NEYOLOLayerKernel::yolo_layer_fp32_nchw; + break; + default: + ARM_COMPUTE_ERROR("Element size not supported"); + break; + } + + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), (output == nullptr) ? nullptr : output->info()); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + ICPPKernel::configure(win_config.second); +} + +Status NEYOLOLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, act_info, num_classes)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), (output == nullptr) ? nullptr : output->clone().get()).first); + + return Status{}; +} + +void NEYOLOLayerKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + ARM_COMPUTE_ERROR_ON(_func == nullptr); + + (this->*_func)(window); +} diff --git a/src/runtime/NEON/functions/NEYOLOLayer.cpp b/src/runtime/NEON/functions/NEYOLOLayer.cpp new file mode 100644 index 0000000000..e52d054673 --- /dev/null +++ b/src/runtime/NEON/functions/NEYOLOLayer.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/functions/NEYOLOLayer.h" + +#include "arm_compute/core/NEON/kernels/NEYOLOLayerKernel.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +void NEYOLOLayer::configure(ITensor *input, ITensor *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, output, act_info, num_classes); + _kernel = std::move(k); +} + +Status NEYOLOLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info, int32_t num_classes) +{ + return NEYOLOLayerKernel::validate(input, output, act_info, num_classes); +} +} // namespace arm_compute diff --git a/tests/validation/NEON/YOLOLayer.cpp b/tests/validation/NEON/YOLOLayer.cpp new file mode 100644 index 0000000000..926a2dad86 --- /dev/null +++ b/tests/validation/NEON/YOLOLayer.cpp @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEYOLOLayer.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "tests/NEON/Accessor.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/ActivationFunctionsDataset.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/YOLOLayerFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +/** Tolerance */ +constexpr AbsoluteTolerance tolerance_f32(1e-6f); +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr RelativeTolerance tolerance_f16(0.001f); +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + +/** Floating point data sets. */ +const auto YOLODataset = combine(combine(combine(combine(framework::dataset::make("InPlace", { false, true }), framework::dataset::make("ActivationFunction", + ActivationLayerInfo::ActivationFunction::LOGISTIC)), + framework::dataset::make("AlphaBeta", { 0.5f, 1.f })), + framework::dataset::make("Classes", 40)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })); +} // namespace + +TEST_SUITE(NEON) +TEST_SUITE(YOLOLayer) + +template +using NEYOLOLayerFixture = YOLOValidationFixture; + +TEST_SUITE(Float) +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, NEYOLOLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallYOLOShapes(), YOLODataset), framework::dataset::make("DataType", + DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f32); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, NEYOLOLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeYOLOShapes(), YOLODataset), framework::dataset::make("DataType", + DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f32); +} +TEST_SUITE_END() // FP32 + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, NEYOLOLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallYOLOShapes(), YOLODataset), framework::dataset::make("DataType", + DataType::F16))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NEYOLOLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeYOLOShapes(), YOLODataset), framework::dataset::make("DataType", + DataType::F16))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); +} +TEST_SUITE_END() // FP16 +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +TEST_SUITE_END() // Float + +TEST_SUITE_END() // YOLOLayer +TEST_SUITE_END() // NEON +} // namespace validation +} // namespace test +} // namespace arm_compute -- cgit v1.2.1