aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2022-09-11 15:59:19 +0100
committerGunes Bayir <gunes.bayir@arm.com>2022-09-16 12:38:59 +0000
commitc4f2743951473f8d97f5a43767fdbb31a4df967c (patch)
tree26c49b1af8113bb169931f3af5e502904d455a25 /src/core/NEON
parent0d05b6690fe69c57f63ca43d59b551f074613062 (diff)
downloadComputeLibrary-c4f2743951473f8d97f5a43767fdbb31a4df967c.tar.gz
Optimize Quantized/Integer Bilinear Scale for Neon™
This patch introduces several performance optimizations regarding the Bilinear Scale operator with REPLICATE Border mode. Changes apply only to NHWC. This patch - Reduces the memory footprint by disabling precomputation of indices and weights when they're not used - Rewrites the kernels for QASYMM8/QASYMM8_SIGNED/U8(Uint8) - Adds S8(Int8) Bilinear Scale for Border mode REPLICATE - Removes Bilinear Scale SVE kernels for Quantized and Integer types and adjust the heuristics to choose the Neon™ implementation - Adds new test cases where the input and output of the Bilinear Scale operator have different quantization scale and offset Resolves: COMPMID-5453, COMPMID-5454 Change-Id: I3d251e76e0c6978fd5a0a1795ec62ab536bec93c Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8250 Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON')
-rw-r--r--src/core/NEON/wrapper/intrinsics/cvt.h20
1 files changed, 18 insertions, 2 deletions
diff --git a/src/core/NEON/wrapper/intrinsics/cvt.h b/src/core/NEON/wrapper/intrinsics/cvt.h
index e52e3dd0c4..baad1319b2 100644
--- a/src/core/NEON/wrapper/intrinsics/cvt.h
+++ b/src/core/NEON/wrapper/intrinsics/cvt.h
@@ -59,19 +59,35 @@ VCVT_TO_F16_IMPL(float16x4_t, float32x4_t, vcvt, f16, f32)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
template <typename T>
-inline typename std::enable_if<std::is_same<T, uint8_t>::value, uint32x4_t>::type
+inline typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, uint32_t>::value, uint32x4_t >::type
vcvt(const float32x4_t &a)
{
return vcvtq_u32_f32(a);
}
template <typename T>
-inline typename std::enable_if<std::is_same<T, int8_t>::value, int32x4_t>::type
+inline typename std::enable_if < std::is_same<T, int8_t>::value || std::is_same<T, int32_t>::value, int32x4_t >::type
vcvt(const float32x4_t &a)
{
return vcvtq_s32_f32(a);
}
+#ifdef __aarch64__
+template <typename T>
+inline typename std::enable_if<std::is_same<T, uint32_t>::value, uint32x4_t>::type
+vcvta(const float32x4_t &a)
+{
+ return vcvtaq_u32_f32(a);
+}
+
+template <typename T>
+inline typename std::enable_if<std::is_same<T, int32_t>::value, int32x4_t>::type
+vcvta(const float32x4_t &a)
+{
+ return vcvtaq_s32_f32(a);
+}
+#endif //__aarch64__
+
#if defined(ARM_COMPUTE_ENABLE_BF16)
/** Convert 2x128-bit floating point vectors into 1x128-bit bfloat16 vector
*