diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2020-05-12 21:03:56 +0100 |
---|---|---|
committer | TeresaARM <teresa.charlinreyes@arm.com> | 2020-05-13 08:52:14 +0000 |
commit | 2cd7a37af612723b712ca37d5fa99e17a84d7425 (patch) | |
tree | a50372dc04aef20d49afffe027b9a6364031b8d2 /arm_compute/core/NEON/wrapper/intrinsics | |
parent | a208a808363195978188803b27a0de2a57d3e77d (diff) | |
download | ComputeLibrary-2cd7a37af612723b712ca37d5fa99e17a84d7425.tar.gz |
COMPMID-3464: Address NESoftmaxLayer failures for QASYMM8_SIGNED
Normalization with the maximum value was causing results to wrap-around
As a work-around we use saturating intrinsics to perform the operation
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I719b7ac7ad274dc2ae339bc4a055f9200134ed97
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3184
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Sang-Hoon Park <sang-hoon.park@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/wrapper/intrinsics')
-rw-r--r-- | arm_compute/core/NEON/wrapper/intrinsics/sub.h | 30 |
1 files changed, 28 insertions, 2 deletions
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/sub.h b/arm_compute/core/NEON/wrapper/intrinsics/sub.h index 870908d253..2c6c96125a 100644 --- a/arm_compute/core/NEON/wrapper/intrinsics/sub.h +++ b/arm_compute/core/NEON/wrapper/intrinsics/sub.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -62,7 +62,33 @@ VSUB_IMPL(float32x4_t, float32x4_t, vsubq, f32) VSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16) #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -#undef vsub_IMPL +#undef VSUB_IMPL + +#define VQSUB_IMPL(stype, vtype, prefix, postfix) \ + inline vtype vqsub(const vtype &a, const vtype &b) \ + { \ + return prefix##_##postfix(a, b); \ + } + +VQSUB_IMPL(uint8x8_t, uint8x8_t, vqsub, u8) +VQSUB_IMPL(int8x8_t, int8x8_t, vqsub, s8) +VQSUB_IMPL(uint16x4_t, uint16x4_t, vqsub, u16) +VQSUB_IMPL(int16x4_t, int16x4_t, vqsub, s16) +VQSUB_IMPL(uint32x2_t, uint32x2_t, vqsub, u32) +VQSUB_IMPL(int32x2_t, int32x2_t, vqsub, s32) +VQSUB_IMPL(uint64x1_t, uint64x1_t, vqsub, u64) +VQSUB_IMPL(int64x1_t, int64x1_t, vqsub, s64) + +VQSUB_IMPL(uint8x16_t, uint8x16_t, vqsubq, u8) +VQSUB_IMPL(int8x16_t, int8x16_t, vqsubq, s8) +VQSUB_IMPL(uint16x8_t, uint16x8_t, vqsubq, u16) +VQSUB_IMPL(int16x8_t, int16x8_t, vqsubq, s16) +VQSUB_IMPL(uint32x4_t, uint32x4_t, vqsubq, u32) +VQSUB_IMPL(int32x4_t, int32x4_t, vqsubq, s32) +VQSUB_IMPL(uint64x2_t, uint64x2_t, vqsubq, u64) +VQSUB_IMPL(int64x2_t, int64x2_t, vqsubq, s64) + +#undef VQSUB_IMPL } // namespace wrapper } // namespace arm_compute #endif /* ARM_COMPUTE_WRAPPER_SUB_H */ |