aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/wrapper/intrinsics/bsl.h
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-12-03 14:30:05 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-01-14 17:53:22 +0000
commit5a5945387e70f62e6e1e95a177fae261d7570443 (patch)
treeff8bd61c2e071b5a0b923f4a0d1bef72486435e9 /arm_compute/core/NEON/wrapper/intrinsics/bsl.h
parentdea2d2d58fe3a742e6f66fe50befbe0044e15ad1 (diff)
downloadComputeLibrary-5a5945387e70f62e6e1e95a177fae261d7570443.tar.gz
COMPMID-1809: Remove padding in NEGEMMConvolutionLayer 64-bit path.
Change-Id: I1806591a2c73a1f057f13d8c6107d7b9796a82c8 Reviewed-on: https://review.mlplatform.org/370 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/wrapper/intrinsics/bsl.h')
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/bsl.h40
1 files changed, 20 insertions, 20 deletions
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/bsl.h b/arm_compute/core/NEON/wrapper/intrinsics/bsl.h
index 9831b4b842..38f9d5f171 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/bsl.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/bsl.h
@@ -30,32 +30,32 @@ namespace arm_compute
{
namespace wrapper
{
-#define VBSL_IMPL(vctype, vtype, prefix, postfix) \
- inline vtype vbsl(const vctype &a, const vtype &b, const vtype &c) \
- { \
- return prefix##_##postfix(a, b, c); \
+#define VBSL_IMPL(stype, vtype, ctype, prefix, postfix) \
+ inline vtype vbsl(const ctype &a, const vtype &b, const vtype &c) \
+ { \
+ return prefix##_##postfix(a, b, c); \
}
-VBSL_IMPL(uint8x8_t, uint8x8_t, vbsl, u8)
-VBSL_IMPL(uint8x8_t, int8x8_t, vbsl, s8)
-VBSL_IMPL(uint16x4_t, uint16x4_t, vbsl, u16)
-VBSL_IMPL(uint16x4_t, int16x4_t, vbsl, s16)
-VBSL_IMPL(uint32x2_t, uint32x2_t, vbsl, u32)
-VBSL_IMPL(uint32x2_t, int32x2_t, vbsl, s32)
-VBSL_IMPL(uint32x2_t, float32x2_t, vbsl, f32)
+VBSL_IMPL(uint8_t, uint8x8_t, uint8x8_t, vbsl, u8)
+VBSL_IMPL(int8_t, int8x8_t, uint8x8_t, vbsl, s8)
+VBSL_IMPL(uint16_t, uint16x4_t, uint16x4_t, vbsl, u16)
+VBSL_IMPL(int16_t, int16x4_t, uint16x4_t, vbsl, s16)
+VBSL_IMPL(uint32_t, uint32x2_t, uint32x2_t, vbsl, u32)
+VBSL_IMPL(int32_t, int32x2_t, uint32x2_t, vbsl, s32)
+VBSL_IMPL(float32x2_t, float32x2_t, uint32x2_t, vbsl, f32)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-VBSL_IMPL(uint16x4_t, float16x4_t, vbsl, f16)
+VBSL_IMPL(float16x4_t, float16x4_t, uint16x4_t, vbsl, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-VBSL_IMPL(uint8x16_t, uint8x16_t, vbslq, u8)
-VBSL_IMPL(uint8x16_t, int8x16_t, vbslq, s8)
-VBSL_IMPL(uint16x8_t, uint16x8_t, vbslq, u16)
-VBSL_IMPL(uint16x8_t, int16x8_t, vbslq, s16)
-VBSL_IMPL(uint32x4_t, uint32x4_t, vbslq, u32)
-VBSL_IMPL(uint32x4_t, int32x4_t, vbslq, s32)
-VBSL_IMPL(uint32x4_t, float32x4_t, vbslq, f32)
+VBSL_IMPL(uint8_t, uint8x16_t, uint8x16_t, vbslq, u8)
+VBSL_IMPL(int8_t, int8x16_t, uint8x16_t, vbslq, s8)
+VBSL_IMPL(uint16_t, uint16x8_t, uint16x8_t, vbslq, u16)
+VBSL_IMPL(int16_t, int16x8_t, uint16x8_t, vbslq, s16)
+VBSL_IMPL(uint32_t, uint32x4_t, uint32x4_t, vbslq, u32)
+VBSL_IMPL(int32_t, int32x4_t, uint32x4_t, vbslq, s32)
+VBSL_IMPL(float32x4_t, float32x4_t, uint32x4_t, vbslq, f32)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-VBSL_IMPL(uint16x8_t, float16x8_t, vbslq, f16)
+VBSL_IMPL(float16x8_t, float16x8_t, uint16x8_t, vbslq, f16)
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
#undef VBSL_IMPL