diff options
author | Murray Kornelsen <murray.kornelsen@mail.mcgill.ca> | 2022-07-13 21:22:39 -0400 |
---|---|---|
committer | Pablo Marquez Tello <pablo.tello@arm.com> | 2022-09-14 09:15:03 +0000 |
commit | 926f502ca731fa49bcdf949408ce25728616e5f2 (patch) | |
tree | 7e221103a9c0c5c0e4c054abc07cbdf11c7c7b4e /src/core/NEON/NEMath.inl | |
parent | 6e09e1404c635d948cf20eb6b4b5747dfb6656f2 (diff) | |
download | ComputeLibrary-926f502ca731fa49bcdf949408ce25728616e5f2.tar.gz |
Adding GELU activation
OpenCL implementation uses built in erf.
NEON implementation requires new vectorized erf.
Uses the following approximation:
erf(x) = 1 - 1 / (1 + a1x + a2x^2 + a3x^3 + a4x^4)^4
a1 = 0.278393, a2 = 0.230389, a3 = 0.000972, a4 = 0.078108
From https://en.wikipedia.org/wiki/Error_function#Numerical_approximations
Signed-off-by: Murray Kornelsen <murray.kornelsen@mail.mcgill.ca>
Change-Id: I2d3964b2c26a4334166b17135f9104bc6324fad2
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7921
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Pablo Marquez Tello <pablo.tello@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/NEMath.inl')
-rw-r--r-- | src/core/NEON/NEMath.inl | 50 |
1 files changed, 49 insertions, 1 deletions
diff --git a/src/core/NEON/NEMath.inl b/src/core/NEON/NEMath.inl index 05cf3013bc..1b0b894153 100644 --- a/src/core/NEON/NEMath.inl +++ b/src/core/NEON/NEMath.inl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2021 Arm Limited. + * Copyright (c) 2016-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -166,6 +166,43 @@ inline float32x4_t vexpq_f32(float32x4_t x) return poly; } +#ifdef __aarch64__ +inline float32x4_t verfq_f32(float32x4_t x) +{ + static const float erffdata[4] = { 0.278393f, 0.230389f, 0.000972f, 0.078108f }; + static const float32x4_t coeffdata = vld1q_f32(erffdata); + static const float32x4_t onev{ vdupq_n_f32(1.0f) }; + + uint32x4_t selector = vcltzq_f32(x); + + float32x4_t absx = vabsq_f32(x); + float32x4_t absx2 = vmulq_f32(x, x); + float32x4_t absx3 = vmulq_f32(absx2, absx); + float32x4_t absx4 = vmulq_f32(absx2, absx2); + + float32x4_t denom = onev; + denom = vfmaq_laneq_f32(denom, absx, coeffdata, 0); + denom = vfmaq_laneq_f32(denom, absx2, coeffdata, 1); + denom = vfmaq_laneq_f32(denom, absx3, coeffdata, 2); + denom = vfmaq_laneq_f32(denom, absx4, coeffdata, 3); + + denom = vmulq_f32(denom, denom); + denom = vmulq_f32(denom, denom); + + float32x4_t fract = onev; + fract = vdivq_f32(fract, denom); + + float32x4_t result = onev; + result = vsubq_f32(result, fract); + + float32x4_t inverse = vnegq_f32(result); + + result = vbslq_f32(selector, inverse, result); + + return result; +} +#endif // #ifdef __aarch64__ + inline float32x4_t vlogq_f32(float32x4_t x) { static const int32x4_t CONST_127 = vdupq_n_s32(127); // 127 @@ -517,6 +554,17 @@ inline float16x8_t vexpq_f16(float16x8_t x) return res; } +#ifdef __aarch64__ +inline float16x8_t verfq_f16(float16x8_t x) +{ + const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x)); + const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x)); + + const float16x8_t res = vcombine_f16(vcvt_f16_f32(verfq_f32(x_low)), vcvt_f16_f32(verfq_f32(x_high))); + return res; +} +#endif // #ifdef __aarch64__ + inline float16x8_t vlogq_f16(float16x8_t x) { const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x)); |