aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/NEMath.inl
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/NEMath.inl')
-rw-r--r--src/core/NEON/NEMath.inl351
1 files changed, 264 insertions, 87 deletions
diff --git a/src/core/NEON/NEMath.inl b/src/core/NEON/NEMath.inl
index 5ac62badcc..a5aba0bf23 100644
--- a/src/core/NEON/NEMath.inl
+++ b/src/core/NEON/NEMath.inl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,6 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#include "src/core/utils/Math.h"
#include "support/ToolchainSupport.h"
#include <cmath>
@@ -28,35 +30,17 @@
namespace arm_compute
{
-/** Exponent polynomial coefficients */
-const std::array<float32x4_t, 8> exp_tab =
-{
- {
- vdupq_n_f32(1.f),
- vdupq_n_f32(0.0416598916054f),
- vdupq_n_f32(0.500000596046f),
- vdupq_n_f32(0.0014122662833f),
- vdupq_n_f32(1.00000011921f),
- vdupq_n_f32(0.00833693705499f),
- vdupq_n_f32(0.166665703058f),
- vdupq_n_f32(0.000195780929062f),
- }
-};
-
/** Logarithm polynomial coefficients */
-const std::array<float32x4_t, 8> log_tab =
-{
- {
- vdupq_n_f32(-2.29561495781f),
- vdupq_n_f32(-2.47071170807f),
- vdupq_n_f32(-5.68692588806f),
- vdupq_n_f32(-0.165253549814f),
- vdupq_n_f32(5.17591238022f),
- vdupq_n_f32(0.844007015228f),
- vdupq_n_f32(4.58445882797f),
- vdupq_n_f32(0.0141278216615f),
- }
-};
+const std::array<float32x4_t, 8> log_tab = {{
+ vdupq_n_f32(-2.29561495781f),
+ vdupq_n_f32(-2.47071170807f),
+ vdupq_n_f32(-5.68692588806f),
+ vdupq_n_f32(-0.165253549814f),
+ vdupq_n_f32(5.17591238022f),
+ vdupq_n_f32(0.844007015228f),
+ vdupq_n_f32(4.58445882797f),
+ vdupq_n_f32(0.0141278216615f),
+}};
/** Sin polynomial coefficients */
constexpr float te_sin_coeff2 = 0.166666666666f; // 1/(2*3)
@@ -65,6 +49,15 @@ constexpr float te_sin_coeff4 = 0.023809523810f; // 1/(6*7)
constexpr float te_sin_coeff5 = 0.013888888889f; // 1/(8*9)
#ifndef DOXYGEN_SKIP_THIS
+inline float32x4_t prefer_vfmaq_f32(float32x4_t a, float32x4_t b, float32x4_t c)
+{
+#if __ARM_FEATURE_FMA
+ return vfmaq_f32(a, b, c);
+#else // __ARM_FEATURE_FMA
+ return vmlaq_f32(a, b, c);
+#endif // __ARM_FEATURE_FMA
+}
+
inline float32x4_t vfloorq_f32(float32x4_t val)
{
static const float32x4_t CONST_1 = vdupq_n_f32(1.f);
@@ -85,14 +78,32 @@ inline float32x4_t vroundq_rte_f32(float32x4_t val)
static const int32x4_t CONST_1_INT = vdupq_n_s32(1);
const float32x4_t floor_val = vfloorq_f32(val);
const float32x4_t diff = vsubq_f32(val, floor_val);
+ const float32x4_t fp32_upper_limit =
+ vreinterpretq_f32_u32(vdupq_n_u32(0x4B000000)); // 0x4B000000 = (23U + 127U) << 23U
/*
- * Select the floor value when (diff<0.5 || (diff==0.5 && floor_val%2==0).
- * This condition is checked by vorrq_u32(vcltq_f32(diff, CONST_HALF_FLOAT) ,vandq_u32(vceqq_f32(diff, CONST_HALF_FLOAT) , vmvnq_u32(vtstq_s32(vandq_s32(vcvtq_s32_f32(floor_val), CONST_1_INT),CONST_1_INT))))
+ * 1. Select the floor value when (diff<0.5 || (diff==0.5 && floor_val%2==0).
+ * This condition is checked by vorrq_u32(vcltq_f32(diff, CONST_HALF_FLOAT) ,vandq_u32(vceqq_f32(diff, CONST_HALF_FLOAT) , vmvnq_u32(vtstq_s32(vandq_s32(vcvtq_s32_f32(floor_val), CONST_1_INT),CONST_1_INT))))
+ *
+ * 2. In case the input value (val) is out of signed int32 range, then simple use the input value as the rounded value
+ * Because:
+ * in this case converting to int32 would saturate
+ * If the input float value is >= 2^23 * 1.00... 23 Zeros ..0 then the rounded value is exactly equal to the input value.
+ * Because:
+ * in IEEE single precision floating point representation the fraction part is 23 bit, so if exponent is 23 it means the fraction part = 0 as any digits after decimal point are truncated.
+ * Hence, rounding has no effect:
+ * Threshold upper limit with format |S|E(8bits)| Fraction(23bits) | = (23 + 127) << 23 (assuming positive sign): Adding 127, because 127 represents the actual zero in this format.
*/
- return vbslq_f32(vorrq_u32(vcltq_f32(diff, CONST_HALF_FLOAT), vandq_u32(vceqq_f32(diff, CONST_HALF_FLOAT), vmvnq_u32(vtstq_s32(vandq_s32(vcvtq_s32_f32(floor_val), CONST_1_INT), CONST_1_INT)))),
- floor_val, vaddq_f32(floor_val, CONST_1_FLOAT));
+ float32x4_t rounded_val = vbslq_f32(
+ vorrq_u32(vcltq_f32(diff, CONST_HALF_FLOAT),
+ vandq_u32(vceqq_f32(diff, CONST_HALF_FLOAT),
+ vmvnq_u32(vtstq_s32(vandq_s32(vcvtq_s32_f32(floor_val), CONST_1_INT), CONST_1_INT)))),
+ floor_val, vaddq_f32(floor_val, CONST_1_FLOAT));
+
+ float32x4_t result = vbslq_f32(vcgeq_f32(vabsq_f32(val), fp32_upper_limit), val, rounded_val);
+
+ return result;
#endif // __aarch64__
}
@@ -108,8 +119,8 @@ inline float32x2_t vinvsqrt_f32(float32x2_t x)
inline float32x4_t vinvsqrtq_f32(float32x4_t x)
{
float32x4_t sqrt_reciprocal = vrsqrteq_f32(x);
- sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
- sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
+ sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
+ sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
return sqrt_reciprocal;
}
@@ -142,30 +153,140 @@ inline float32x4_t vtaylor_polyq_f32(float32x4_t x, const std::array<float32x4_t
return res;
}
+static const uint32_t exp_f32_coeff[] = {
+ 0x3f7ffff6, // x^1: 0x1.ffffecp-1f
+ 0x3efffedb, // x^2: 0x1.fffdb6p-2f
+ 0x3e2aaf33, // x^3: 0x1.555e66p-3f
+ 0x3d2b9f17, // x^4: 0x1.573e2ep-5f
+ 0x3c072010, // x^5: 0x1.0e4020p-7f
+};
+
inline float32x4_t vexpq_f32(float32x4_t x)
{
- static const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f); // ln(2)
- static const float32x4_t CONST_INV_LN2 = vdupq_n_f32(1.4426950408f); // 1/ln(2)
- static const float32x4_t CONST_INF = vdupq_n_f32(std::numeric_limits<float>::infinity());
- static const float32x4_t CONST_MAX_INPUT = vdupq_n_f32(88.7f);
- static const float32x4_t CONST_0 = vdupq_n_f32(0.f);
- static const int32x4_t CONST_NEGATIVE_126 = vdupq_n_s32(-126);
-
- // Perform range reduction [-log(2),log(2)]
- int32x4_t m = vcvtq_s32_f32(vmulq_f32(x, CONST_INV_LN2));
- float32x4_t val = vmlsq_f32(x, vcvtq_f32_s32(m), CONST_LN2);
-
- // Polynomial Approximation
- float32x4_t poly = vtaylor_polyq_f32(val, exp_tab);
-
- // Reconstruct
- poly = vreinterpretq_f32_s32(vqaddq_s32(vreinterpretq_s32_f32(poly), vqshlq_n_s32(m, 23)));
- poly = vbslq_f32(vcltq_s32(m, CONST_NEGATIVE_126), CONST_0, poly); // Handle underflow
- poly = vbslq_f32(vcgtq_f32(x, CONST_MAX_INPUT), CONST_INF, poly); // Handle overflow
+ const auto c1 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[0]));
+ const auto c2 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[1]));
+ const auto c3 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[2]));
+ const auto c4 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[3]));
+ const auto c5 = vreinterpretq_f32_u32(vdupq_n_u32(exp_f32_coeff[4]));
+
+ const auto shift = vreinterpretq_f32_u32(vdupq_n_u32(0x4b00007f)); // 2^23 + 127 = 0x1.0000fep23f
+ const auto inv_ln2 = vreinterpretq_f32_u32(vdupq_n_u32(0x3fb8aa3b)); // 1 / ln(2) = 0x1.715476p+0f
+ const auto neg_ln2_hi =
+ vreinterpretq_f32_u32(vdupq_n_u32(0xbf317200)); // -ln(2) from bits -1 to -19: -0x1.62e400p-1f
+ const auto neg_ln2_lo =
+ vreinterpretq_f32_u32(vdupq_n_u32(0xb5bfbe8e)); // -ln(2) from bits -20 to -42: -0x1.7f7d1cp-20f
+
+ const auto inf = vdupq_n_f32(std::numeric_limits<float>::infinity());
+ const auto max_input = vdupq_n_f32(88.37f); // Approximately ln(2^127.5)
+ const auto zero = vdupq_n_f32(0.f);
+ const auto min_input = vdupq_n_f32(-86.64f); // Approximately ln(2^-125)
+
+ // Range reduction:
+ // e^x = 2^n * e^r
+ // where:
+ // n = floor(x / ln(2))
+ // r = x - n * ln(2)
+ //
+ // By adding x / ln(2) with 2^23 + 127 (shift):
+ // * As FP32 fraction part only has 23-bits, the addition of 2^23 + 127 forces decimal part
+ // of x / ln(2) out of the result. The integer part of x / ln(2) (i.e. n) + 127 will occupy
+ // the whole fraction part of z in FP32 format.
+ // Subtracting 2^23 + 127 (shift) from z will result in the integer part of x / ln(2)
+ // (i.e. n) because the decimal part has been pushed out and lost.
+ // * The addition of 127 makes the FP32 fraction part of z ready to be used as the exponent
+ // in FP32 format. Left shifting z by 23 bits will result in 2^n.
+ const auto z = prefer_vfmaq_f32(shift, x, inv_ln2);
+ const auto n = z - shift;
+ const auto scale = vreinterpretq_f32_u32(vreinterpretq_u32_f32(z) << 23); // 2^n
+
+ // The calculation of n * ln(2) is done using 2 steps to achieve accuracy beyond FP32.
+ // This outperforms longer Taylor series (3-4 tabs) both in term of accuracy and performance.
+ const auto r_hi = prefer_vfmaq_f32(x, n, neg_ln2_hi);
+ const auto r = prefer_vfmaq_f32(r_hi, n, neg_ln2_lo);
+
+ // Compute the truncated Taylor series of e^r.
+ // poly = scale * (1 + c1 * r + c2 * r^2 + c3 * r^3 + c4 * r^4 + c5 * r^5)
+ const auto r2 = r * r;
+
+ const auto p1 = c1 * r;
+ const auto p23 = prefer_vfmaq_f32(c2, c3, r);
+ const auto p45 = prefer_vfmaq_f32(c4, c5, r);
+ const auto p2345 = prefer_vfmaq_f32(p23, p45, r2);
+ const auto p12345 = prefer_vfmaq_f32(p1, p2345, r2);
+
+ auto poly = prefer_vfmaq_f32(scale, p12345, scale);
+
+ // Handle underflow and overflow.
+ poly = vbslq_f32(vcltq_f32(x, min_input), zero, poly);
+ poly = vbslq_f32(vcgtq_f32(x, max_input), inf, poly);
return poly;
}
+#ifdef __aarch64__
+inline float32x4_t verfq_f32(float32x4_t x)
+{
+ const float32x4_t max_value = vdupq_n_f32(3.9375); // 4 - 8/128
+ const float32x4_t shift = vdupq_n_f32(65536); // 2^16
+ const float32x4_t third = vdupq_n_f32(0.3333333333); // 1/3
+ const float32x4_t one = vdupq_n_f32(1.f);
+ const uint32x4_t max_index = vdupq_n_u32(512);
+ const uint32x4_t sign_mask = vdupq_n_u32(0x7fffffff);
+
+ const float32x4_t x_abs = vabsq_f32(x);
+
+ // erf(x) for x in [0, 3.9375] is approxiated as follows:
+ //
+ // erf(x) = erf(r) + scale(r) * d * (1 - r * d - 1/3 * d^2)
+ //
+ // where:
+ // r = floor(x * 128) / 128
+ // d = x - r
+ //
+ // erf(r) and scale(r) are stored in a 513-entry lookup table.
+ // The LUT covers the range from 0 to 4 with the step of 1/128.
+ //
+ // Special cases:
+ // erf(x) = 1 for x > 3.9375
+ // erf(x) = -1 for x < -3.9375
+
+ // Find the LUT indices by rounding the input value to the step of 1/128.
+ //
+ // `shift` is used to push out the 16 LSBs of the input value. Only 7 bits in the fraction part
+ // of the input value is preserved.
+ const float32x4_t z = x_abs + shift;
+ const float32x4_t r = z - shift;
+
+ uint32x4_t index = vreinterpretq_u32_f32(z) - vreinterpretq_u32_f32(shift);
+ index = vminq_u32(index, max_index);
+
+ // Lookup erf(r) and scale(r).
+ const float64_t entry_0 = *reinterpret_cast<const float64_t *>(&erf_f32_lut[index[0]]);
+ const float64_t entry_1 = *reinterpret_cast<const float64_t *>(&erf_f32_lut[index[1]]);
+ const float64_t entry_2 = *reinterpret_cast<const float64_t *>(&erf_f32_lut[index[2]]);
+ const float64_t entry_3 = *reinterpret_cast<const float64_t *>(&erf_f32_lut[index[3]]);
+
+ const float32x4_t entry_01 = vreinterpretq_f32_f64(float64x2_t{entry_0, entry_1});
+ const float32x4_t entry_23 = vreinterpretq_f32_f64(float64x2_t{entry_2, entry_3});
+
+ const float32x4_t erf_r = vuzp1q_f32(entry_01, entry_23);
+ const float32x4_t scale_r = vuzp2q_f32(entry_01, entry_23);
+
+ // Approximate erf(x) = erf(r) + scale(r) * d * (1 - r * d - 1/3 * d^2).
+ const float32x4_t d = x_abs - r;
+ const float32x4_t d2 = d * d;
+
+ const float32x4_t t0 = vfmaq_f32(r, third, d); // t0 = r + 1/3 * d.
+ const float32x4_t t1 = vfmsq_f32(d, d2, t0); // t1 = d - d2 * t0 = d * (1 - r * d - 1/3 * d^2).
+ const float32x4_t erf_x = vfmaq_f32(erf_r, scale_r, t1);
+
+ const float32x4_t clamped = vbslq_f32(x_abs > max_value, one, erf_x);
+ const float32x4_t result = vbslq_f32(sign_mask, clamped, x);
+
+ return result;
+}
+#endif // #ifdef __aarch64__
+
inline float32x4_t vlogq_f32(float32x4_t x)
{
static const int32x4_t CONST_127 = vdupq_n_s32(127); // 127
@@ -193,12 +314,14 @@ inline float32x4_t vtanhq_f32(float32x4_t val)
static const float32x4_t CONST_THR = vdupq_n_f32(5.e-3);
static const float32x4_t CONST_1_3 = vdupq_n_f32(0.3333333f);
- float32x4_t x = vminq_f32(vmaxq_f32(val, CONST_MIN_TANH), CONST_MAX_TANH);
+ float32x4_t x = vminq_f32(vmaxq_f32(val, CONST_MIN_TANH), CONST_MAX_TANH);
// x * (1 - x^2/3) if |x| < 5.e-3 or (exp2x - 1) / (exp2x + 1) otherwise
- float32x4_t exp2x = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vexpq_f32(vmulq_f32(CONST_2, x)), vmulq_f32(x, x));
- float32x4_t num = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vsubq_f32(exp2x, CONST_1), vmulq_f32(CONST_1_3, exp2x));
- float32x4_t den = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vaddq_f32(exp2x, CONST_1), vsubq_f32(CONST_1, num));
- float32x4_t tanh = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vmulq_f32(num, vinvq_f32(den)), vmulq_f32(x, den));
+ float32x4_t exp2x =
+ vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vexpq_f32(vmulq_f32(CONST_2, x)), vmulq_f32(x, x));
+ float32x4_t num =
+ vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vsubq_f32(exp2x, CONST_1), vmulq_f32(CONST_1_3, exp2x));
+ float32x4_t den = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vaddq_f32(exp2x, CONST_1), vsubq_f32(CONST_1, num));
+ float32x4_t tanh = vbslq_f32(vcgtq_f32(vabsq_f32(x), CONST_THR), vmulq_f32(num, vinvq_f32(den)), vmulq_f32(x, den));
return tanh;
}
@@ -364,30 +487,23 @@ inline float32x4x4_t convert_to_float32x4x4(const int8x16_t &in)
inline void convert_float32x4x3_to_uint8x8x3(const float32x4x3_t &in1, const float32x4x3_t &in2, uint8x8x3_t &out)
{
- out.val[0] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[0])),
- vqmovn_u32(vcvtq_u32_f32(in2.val[0]))));
- out.val[1] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[1])),
- vqmovn_u32(vcvtq_u32_f32(in2.val[1]))));
- out.val[2] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[2])),
- vqmovn_u32(vcvtq_u32_f32(in2.val[2]))));
+ out.val[0] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[0])), vqmovn_u32(vcvtq_u32_f32(in2.val[0]))));
+ out.val[1] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[1])), vqmovn_u32(vcvtq_u32_f32(in2.val[1]))));
+ out.val[2] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[2])), vqmovn_u32(vcvtq_u32_f32(in2.val[2]))));
}
inline void convert_float32x4x4_to_uint8x16(const float32x4x4_t &in, uint8x16_t &out)
{
- const auto low = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[0])),
- vqmovn_u32(vcvtq_u32_f32(in.val[1])));
- const auto high = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[2])),
- vqmovn_u32(vcvtq_u32_f32(in.val[3])));
- out = vcombine_u8(vqmovn_u16(low), vqmovn_u16(high));
+ const auto low = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[0])), vqmovn_u32(vcvtq_u32_f32(in.val[1])));
+ const auto high = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[2])), vqmovn_u32(vcvtq_u32_f32(in.val[3])));
+ out = vcombine_u8(vqmovn_u16(low), vqmovn_u16(high));
}
inline void convert_float32x4x4_to_int8x16(const float32x4x4_t &in, int8x16_t &out)
{
- const auto low = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[0])),
- vqmovn_s32(vcvtq_s32_f32(in.val[1])));
- const auto high = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[2])),
- vqmovn_s32(vcvtq_s32_f32(in.val[3])));
- out = vcombine_s8(vqmovn_s16(low), vqmovn_s16(high));
+ const auto low = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[0])), vqmovn_s32(vcvtq_s32_f32(in.val[1])));
+ const auto high = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[2])), vqmovn_s32(vcvtq_s32_f32(in.val[3])));
+ out = vcombine_s8(vqmovn_s16(low), vqmovn_s16(high));
}
template <>
@@ -418,6 +534,18 @@ inline float32x4x4_t convert_int_to_float<float32x4x4_t, int8x16_t>(const int8x1
return convert_int8x16_to_float32x4x4(in);
}
+inline float vreduce(const float32x4_t &v)
+{
+ const float32x2_t v0 = vget_high_f32(v);
+ const float32x2_t v1 = vget_low_f32(v);
+ const float32x2_t v_out = vadd_f32(v0, v1);
+
+ const float a = vget_lane_f32(v_out, 0);
+ const float b = vget_lane_f32(v_out, 1);
+
+ return a + b;
+}
+
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
/** Exponent polynomial coefficients */
/** Logarithm polynomial coefficients */
@@ -448,8 +576,8 @@ inline float16x4_t vinvsqrt_f16(float16x4_t x)
inline float16x8_t vinvsqrtq_f16(float16x8_t x)
{
float16x8_t sqrt_reciprocal = vrsqrteq_f16(x);
- sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
- sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
+ sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
+ sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
return sqrt_reciprocal;
}
@@ -469,19 +597,44 @@ inline float16x8_t vinvq_f16(float16x8_t x)
return recip;
}
-inline float16x8_t vtanhq_f16(float16x8_t val)
+inline float16x4_t vtanh_rational_approx_f16(float16x4_t x16)
{
- const float16x8_t CONST_1 = vdupq_n_f16(1.f);
- const float16x8_t CONST_2 = vdupq_n_f16(2.f);
- const float16x8_t CONST_MIN_TANH = vdupq_n_f16(-10.f);
- const float16x8_t CONST_MAX_TANH = vdupq_n_f16(10.f);
+ // Calculate rational approximation part of tanh exactly on a half-register of F16 by using F32s
+ // Note: doesn't handle overflows, needs truncating at |x| = 4.508
+ const float32x4_t x = vcvt_f32_f16(x16);
- const float16x8_t x = vminq_f16(vmaxq_f16(val, CONST_MIN_TANH), CONST_MAX_TANH);
- const float16x8_t exp2x = vexpq_f16(vmulq_f16(CONST_2, x));
- const float16x8_t num = vsubq_f16(exp2x, CONST_1);
- const float16x8_t den = vaddq_f16(exp2x, CONST_1);
- const float16x8_t tanh = vmulq_f16(num, vinvq_f16(den));
- return tanh;
+ const float32x4_t ONE = vdupq_n_f32(1.0f);
+ const float32x4_t C1 = vdupq_n_f32(0.43760237f);
+ const float32x4_t C2 = vdupq_n_f32(0.104402f);
+ const float32x4_t C3 = vdupq_n_f32(0.013442706f);
+ const float32x4_t C4 = vdupq_n_f32(0.00073561433f);
+
+ const float32x4_t x2 = vmulq_f32(x, x);
+
+ // Denominator polynomial 1 + C1*x^2 + C3*x^4
+ float32x4_t denom = vfmaq_f32(C1, C3, x2);
+ denom = vfmaq_f32(ONE, x2, denom);
+
+ // Numerator polynomial x*(1 + C2*x^2 + C4*x^4)
+ float32x4_t numer = vfmaq_f32(C2, C4, x2);
+ numer = vfmaq_f32(ONE, x2, numer);
+ numer = vmulq_f32(numer, x);
+
+ return vcvt_f16_f32(vdivq_f32(numer, denom));
+}
+
+inline float16x8_t vtanhq_f16(float16x8_t x)
+{
+ // Split into high/low and use rational approximation on both parts exactly
+ const float16x8_t tanh =
+ vcombine_f16(vtanh_rational_approx_f16(vget_low_f16(x)), vtanh_rational_approx_f16(vget_high_f16(x)));
+
+ // tanh(x) == sign(x) to F16 precision for |x| >= 4.508, use sign after this
+ const float16x8_t ONE = vdupq_n_f16(1.0f);
+ const float16x8_t MAX_X = vdupq_n_f16(4.508f);
+ const auto at_limit = vcageq_f16(x, MAX_X); // |x| >= 4.508
+ const float16x8_t sign_x = vbslq_f16(vclezq_f16(x), -ONE, ONE);
+ return vbslq_f16(at_limit, sign_x, tanh);
}
inline float16x8_t vtaylor_polyq_f16(float16x8_t x, const std::array<float16x8_t, 8> &coeffs)
@@ -505,6 +658,17 @@ inline float16x8_t vexpq_f16(float16x8_t x)
return res;
}
+#ifdef __aarch64__
+inline float16x8_t verfq_f16(float16x8_t x)
+{
+ const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
+ const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
+
+ const float16x8_t res = vcombine_f16(vcvt_f16_f32(verfq_f32(x_low)), vcvt_f16_f32(verfq_f32(x_high)));
+ return res;
+}
+#endif // #ifdef __aarch64__
+
inline float16x8_t vlogq_f16(float16x8_t x)
{
const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
@@ -550,6 +714,19 @@ inline float16x4_t vsin_f16(float16x4_t val)
return vcvt_f16_f32(vcombine_f32(res_low, res_high));
}
+inline float16_t vreduce(const float16x8_t &v)
+{
+ const float16x4_t v0 = vget_high_f16(v);
+ const float16x4_t v1 = vget_low_f16(v);
+ const float16x4_t v_out = vadd_f16(v0, v1);
+
+ const float16_t a = vget_lane_f16(v_out, 0);
+ const float16_t b = vget_lane_f16(v_out, 1);
+ const float16_t c = vget_lane_f16(v_out, 2);
+ const float16_t d = vget_lane_f16(v_out, 3);
+
+ return a + b + c + d;
+}
#endif /* DOXYGEN_SKIP_THIS */
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
} // namespace arm_compute