aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2020-05-12 21:03:56 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2020-05-13 08:52:14 +0000
commit2cd7a37af612723b712ca37d5fa99e17a84d7425 (patch)
treea50372dc04aef20d49afffe027b9a6364031b8d2
parenta208a808363195978188803b27a0de2a57d3e77d (diff)
downloadComputeLibrary-2cd7a37af612723b712ca37d5fa99e17a84d7425.tar.gz
COMPMID-3464: Address NESoftmaxLayer failures for QASYMM8_SIGNED
Normalization with the maximum value was causing results to wrap-around As a work-around we use saturating intrinsics to perform the operation Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: I719b7ac7ad274dc2ae339bc4a055f9200134ed97 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3184 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Sang-Hoon Park <sang-hoon.park@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/sub.h30
-rw-r--r--src/core/NEON/kernels/NESoftmaxLayerKernel.cpp2
2 files changed, 29 insertions, 3 deletions
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/sub.h b/arm_compute/core/NEON/wrapper/intrinsics/sub.h
index 870908d253..2c6c96125a 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/sub.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/sub.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,7 +62,33 @@ VSUB_IMPL(float32x4_t, float32x4_t, vsubq, f32)
VSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-#undef vsub_IMPL
+#undef VSUB_IMPL
+
+#define VQSUB_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vqsub(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VQSUB_IMPL(uint8x8_t, uint8x8_t, vqsub, u8)
+VQSUB_IMPL(int8x8_t, int8x8_t, vqsub, s8)
+VQSUB_IMPL(uint16x4_t, uint16x4_t, vqsub, u16)
+VQSUB_IMPL(int16x4_t, int16x4_t, vqsub, s16)
+VQSUB_IMPL(uint32x2_t, uint32x2_t, vqsub, u32)
+VQSUB_IMPL(int32x2_t, int32x2_t, vqsub, s32)
+VQSUB_IMPL(uint64x1_t, uint64x1_t, vqsub, u64)
+VQSUB_IMPL(int64x1_t, int64x1_t, vqsub, s64)
+
+VQSUB_IMPL(uint8x16_t, uint8x16_t, vqsubq, u8)
+VQSUB_IMPL(int8x16_t, int8x16_t, vqsubq, s8)
+VQSUB_IMPL(uint16x8_t, uint16x8_t, vqsubq, u16)
+VQSUB_IMPL(int16x8_t, int16x8_t, vqsubq, s16)
+VQSUB_IMPL(uint32x4_t, uint32x4_t, vqsubq, u32)
+VQSUB_IMPL(int32x4_t, int32x4_t, vqsubq, s32)
+VQSUB_IMPL(uint64x2_t, uint64x2_t, vqsubq, u64)
+VQSUB_IMPL(int64x2_t, int64x2_t, vqsubq, s64)
+
+#undef VQSUB_IMPL
} // namespace wrapper
} // namespace arm_compute
#endif /* ARM_COMPUTE_WRAPPER_SUB_H */
diff --git a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp
index 790c8bacc5..41bf03ad1d 100644
--- a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp
+++ b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp
@@ -311,7 +311,7 @@ void logits_1d_softmax_qasymm8(const ITensor &in, const ITensor &max, void *cons
for(; x <= (input_width - vec_size); x += vec_size)
{
auto vec_elements = wrapper::vloadq(in_ptr + x);
- vec_elements = wrapper::vsub(vec_max, vec_elements);
+ vec_elements = wrapper::vqsub(vec_max, vec_elements);
auto vec_elements_flt = convert_int_to_float<float32x4x4_t>(vec_elements);
if(is_log)