From aaba4c626bcc6365e0108130633ce43fafe9da45 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 22 Aug 2018 16:20:21 +0100 Subject: COMPMID-1188: Add support for activation in NEBatchNormalization. Change-Id: I1e206574dac6433218db6e138adb7bf5f66a536d Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/145222 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- arm_compute/core/NEON/wrapper/intrinsics/min.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arm_compute/core/NEON/wrapper/intrinsics/min.h') diff --git a/arm_compute/core/NEON/wrapper/intrinsics/min.h b/arm_compute/core/NEON/wrapper/intrinsics/min.h index ae79631190..5ea2068f24 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/min.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/min.h @@ -43,6 +43,9 @@ VMIN_IMPL(int16_t, int16x4_t, vmin, s16) VMIN_IMPL(uint32_t, uint32x2_t, vmin, u32) VMIN_IMPL(int32_t, int32x2_t, vmin, s32) VMIN_IMPL(float, float32x2_t, vmin, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMIN_IMPL(float16_t, float16x4_t, vmin, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC VMIN_IMPL(uint8_t, uint8x16_t, vminq, u8) VMIN_IMPL(int8_t, int8x16_t, vminq, s8) @@ -51,6 +54,9 @@ VMIN_IMPL(int16_t, int16x8_t, vminq, s16) VMIN_IMPL(uint32_t, uint32x4_t, vminq, u32) VMIN_IMPL(int32_t, int32x4_t, vminq, s32) VMIN_IMPL(float, float32x4_t, vminq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VMIN_IMPL(float16_t, float16x8_t, vminq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC #undef VMIN_IMPL } // namespace wrapper -- cgit v1.2.1