From e7280585b317f695a932be5981895377e174946d Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Tue, 13 Oct 2020 23:34:09 +0100 Subject: COMPMID-3805: Fix SQRT non-zero output for zero input - For AArch64, NEActivationLayerKernel uses vsqrt rather than vinvsqrt. - For non-AArch64, it masks values to ensure zero input results in zero output without producing NaN. - Test cases for FP16 and FP32's positive boundary values are added. Change-Id: Ic0104ee5d7045059c2e9bd052616a4a3b43a315d Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4150 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- src/core/NEON/kernels/NEActivationLayerKernel.cpp | 39 +++++++++++++--- src/core/NEON/wrapper/intrinsics/intrinsics.h | 1 + src/core/NEON/wrapper/intrinsics/sqrt.h | 56 +++++++++++++++++++++++ tests/validation/NEON/ActivationLayer.cpp | 51 ++++++++++++++++++++- 4 files changed, 138 insertions(+), 9 deletions(-) create mode 100644 src/core/NEON/wrapper/intrinsics/sqrt.h diff --git a/src/core/NEON/kernels/NEActivationLayerKernel.cpp b/src/core/NEON/kernels/NEActivationLayerKernel.cpp index 621af51f3c..d80aab7069 100644 --- a/src/core/NEON/kernels/NEActivationLayerKernel.cpp +++ b/src/core/NEON/kernels/NEActivationLayerKernel.cpp @@ -108,6 +108,23 @@ std::pair validate_and_configure_window(const ITensorInfo *input return std::make_pair(Status{}, win); } + +#ifndef __aarch64__ +inline float32x4_t mask_float_vector(const float32x4_t &in, const uint32x4_t &mask) +{ + auto int_in = vreinterpretq_u32_f32(in); + return vreinterpretq_f32_u32(wrapper::vand(int_in, mask)); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +inline float16x8_t mask_float_vector(const float16x8_t &in, const uint16x8_t &mask) +{ + auto int_in = vreinterpretq_u16_f16(in); + return vreinterpretq_f16_u16(wrapper::vand(int_in, mask)); +} +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +#endif /* __arch64__ */ + } // namespace NEActivationLayerKernel::NEActivationLayerKernel() @@ -252,12 +269,12 @@ NEActivationLayerKernel::activation(const ITensor *src, ITensor *dst, const Wind Iterator input(src, win_collapsed); Iterator output(dst, win_collapsed); - // A small delta added to the input to prevent NAN values caused by zeros in inputs to SQRT -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - const auto delta = wrapper::vdup_n(static_cast(1e-7), ExactTagType {}); -#else /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - const auto delta = wrapper::vdup_n(static_cast(1e-24), ExactTagType {}); -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + // In case of non-aarch64, a small delta value is added to the input + // to prevent NAN values caused by zeros in inputs to SQRT. + // In case of aarh64, we call vsqrt directly, so we don't use delta. +#ifndef __aarch64__ + const auto delta = wrapper::vdup_n(static_cast((src->info()->data_type() == DataType::F32 ? 1e-24 : 1e-7)), ExactTagType {}); +#endif /* __aarch64 */ const auto const_1 = wrapper::vdup_n(static_cast(1.f), ExactTagType {}); const auto const_0 = wrapper::vdup_n(static_cast(0.f), ExactTagType{}); const auto const_6 = wrapper::vdup_n(static_cast(6.f), ExactTagType{}); @@ -310,7 +327,15 @@ NEActivationLayerKernel::activation(const ITensor *src, ITensor *dst, const Wind tmp = wrapper::vbsl(wrapper::vcge(vin, const_0), vin, wrapper::vmul(va, wrapper::vsub(wrapper::vexpq(vin), const_1))); break; case ActivationFunction::SQRT: - tmp = wrapper::vinv(wrapper::vinvsqrt(wrapper::vadd(vin, delta))); +#ifdef __aarch64__ + tmp = wrapper::vsqrt(vin); +#else /* aarch64 */ + { + const auto bitmask = wrapper::vceq(vin, wrapper::vdup_n(T(0), ExactTagType{})); + tmp = wrapper::vinv(wrapper::vinvsqrt(wrapper::vadd(vin, mask_float_vector(delta, bitmask)))); + tmp = mask_float_vector(tmp, wrapper::vnot(bitmask)); + } +#endif /* aarch64 */ break; case ActivationFunction::SQUARE: tmp = wrapper::vmul(vin, vin); diff --git a/src/core/NEON/wrapper/intrinsics/intrinsics.h b/src/core/NEON/wrapper/intrinsics/intrinsics.h index 495321a6a1..070f3c7065 100644 --- a/src/core/NEON/wrapper/intrinsics/intrinsics.h +++ b/src/core/NEON/wrapper/intrinsics/intrinsics.h @@ -66,6 +66,7 @@ #include "src/core/NEON/wrapper/intrinsics/round.h" #include "src/core/NEON/wrapper/intrinsics/setlane.h" #include "src/core/NEON/wrapper/intrinsics/sin.h" +#include "src/core/NEON/wrapper/intrinsics/sqrt.h" #include "src/core/NEON/wrapper/intrinsics/store.h" #include "src/core/NEON/wrapper/intrinsics/sub.h" #include "src/core/NEON/wrapper/intrinsics/tanh.h" diff --git a/src/core/NEON/wrapper/intrinsics/sqrt.h b/src/core/NEON/wrapper/intrinsics/sqrt.h new file mode 100644 index 0000000000..11954cf6c9 --- /dev/null +++ b/src/core/NEON/wrapper/intrinsics/sqrt.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2020 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_WRAPPER_SQRT_H +#define ARM_COMPUTE_WRAPPER_SQRT_H + +#ifdef __aarch64__ + +#include + +namespace arm_compute +{ +namespace wrapper +{ +#define VSQRT_IMPL(type, prefix, postfix) \ + inline type vsqrt(const type &a) \ + { \ + return prefix##_##postfix(a); \ + } + +VSQRT_IMPL(float32x2_t, vsqrt, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSQRT_IMPL(float16x4_t, vsqrt, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +VSQRT_IMPL(float32x4_t, vsqrtq, f32) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +VSQRT_IMPL(float16x8_t, vsqrtq, f16) +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +} // namespace wrapper +} // namespace arm_compute + +#endif // __aarch64__ + +#endif /* ARM_COMPUTE_WRAPPER_SQRT_H */ \ No newline at end of file diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index 5af78d41f8..9b2cad1db2 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -22,6 +22,8 @@ * SOFTWARE. */ #include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/Requires.h" +#include "arm_compute/core/utils/misc/Traits.h" #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h" #include "arm_compute/runtime/RuntimeContext.h" #include "arm_compute/runtime/Tensor.h" @@ -123,6 +125,43 @@ const auto NeonActivationFunctionsDataset = concat(datasets::ActivationFunctions /** Input data sets. */ const auto ActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), NeonActivationFunctionsDataset), framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); + +template ::value)> +void test_float_sqrt_boundary_value() +{ + constexpr auto vector_size = uint32_t{ 16 }; + + auto data_type = DataType::F32; +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + data_type = std::is_same::value ? DataType::F16 : data_type; +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + + const auto boundary_value_vector = std::vector + { + std::numeric_limits::min(), + T(0), + std::numeric_limits::epsilon(), + std::numeric_limits::max(), + }; + + // the following size ensures that the whole logic (vector + left-over) to be tested + // using all boundary values iff boundary_value_vecotr.size() is smaller than vector_size. + auto shape = TensorShape{ vector_size + boundary_value_vector.size() }; + auto info = ActivationLayerInfo{ ActivationLayerInfo::ActivationFunction::SQRT }; + auto src = create_tensor(shape, data_type); + + auto act = NEActivationLayer{}; + act.configure(&src, nullptr, info); + src.allocator()->allocate(); + library->fill_static_values(Accessor(src), boundary_value_vector); + act.run(); + + auto reference_src = SimpleTensor { shape, data_type }; + library->fill_static_values(reference_src, boundary_value_vector); + auto reference_dst = reference::activation_layer(reference_src, info); + + validate(Accessor(src), reference_dst); +} } // namespace TEST_SUITE(NEON) @@ -158,6 +197,10 @@ using NEActivationLayerFixture = ActivationValidationFixture(); +} FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType", DataType::F16))) @@ -165,10 +208,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture, framework::Data // Validate output validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); } -TEST_SUITE_END() -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +TEST_SUITE_END() // FP16 +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ TEST_SUITE(FP32) +TEST_CASE(SqrtBoundaryValue, framework::DatasetMode::ALL) +{ + test_float_sqrt_boundary_value(); +} FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType", DataType::F32))) -- cgit v1.2.1