From 168d6a83b8c5c66c6a961c2b9d747685319b56dc Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Tue, 3 May 2022 17:15:42 +0100 Subject: Use svcreate instead of list initializations. Partially resolves COMPMID-5250 when building with SVE2. Change-Id: I16bd74d4cd6c70371efd8235c507ba5e7f8f906f Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7498 Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez Tello Comments-Addressed: Arm Jenkins --- .../kernels/activation/generic/sve2/qasymm8.cpp | 60 ++++++++-------------- .../activation/generic/sve2/qasymm8_signed.cpp | 60 ++++++++-------------- .../kernels/activation/generic/sve2/qsymm16.cpp | 30 +++-------- src/cpu/kernels/cast/generic/neon/bfloat16.cpp | 1 + .../elementwise_binary/generic/sve/impl.cpp | 2 +- 5 files changed, 48 insertions(+), 105 deletions(-) (limited to 'src/cpu/kernels') diff --git a/src/cpu/kernels/activation/generic/sve2/qasymm8.cpp b/src/cpu/kernels/activation/generic/sve2/qasymm8.cpp index de513679d5..2fa8dee5f1 100644 --- a/src/cpu/kernels/activation/generic/sve2/qasymm8.cpp +++ b/src/cpu/kernels/activation/generic/sve2/qasymm8.cpp @@ -125,16 +125,11 @@ void sve2_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL // De-quantize const auto vin_deq = svdequantize_z(pg, vin, qi_in); // Perform activation - const svfloat32x4_t tmp_dep = - { - { { - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 0))))), - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 1))))), - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 2))))), - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 3))))), - } - } - }; + const svfloat32x4_t tmp_dep = svcreate4_f32(svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 0))))), + svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 1))))), + svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 2))))), + svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 3)))))); + // Re-quantize to new output space tmp = svquantize_z(pg, tmp_dep, qi_out); } @@ -143,16 +138,11 @@ void sve2_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL // De-quantize const auto vin_deq = svdequantize_z(pg, vin, qi_in); // Perform activation - const svfloat32x4_t tmp_dep = - { - { { - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 0), vb_f32))), - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 1), vb_f32))), - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 2), vb_f32))), - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 3), vb_f32))), - } - } - }; + const svfloat32x4_t tmp_dep = svcreate4_f32(svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 0), vb_f32))), + svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 1), vb_f32))), + svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 2), vb_f32))), + svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 3), vb_f32)))); + // Re-quantize to new output space tmp = svquantize_z(pg, tmp_dep, qi_out); } @@ -161,16 +151,11 @@ void sve2_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL // De-quantize const auto vin_deq = svdequantize_z(pg, vin, qi_in); // Perform activation - const svfloat32x4_t tmp_dep = - { - { { - svmul_f32_z(pg, svget4_f32(vin_deq, 0), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 0), const_3_f32))))), - svmul_f32_z(pg, svget4_f32(vin_deq, 1), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 1), const_3_f32))))), - svmul_f32_z(pg, svget4_f32(vin_deq, 2), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 2), const_3_f32))))), - svmul_f32_z(pg, svget4_f32(vin_deq, 3), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 3), const_3_f32))))), - } - } - }; + const svfloat32x4_t tmp_dep = svcreate4_f32(svmul_f32_z(pg, svget4_f32(vin_deq, 0), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, + svget4_f32(vin_deq, 0), const_3_f32))))), + svmul_f32_z(pg, svget4_f32(vin_deq, 1), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 1), const_3_f32))))), + svmul_f32_z(pg, svget4_f32(vin_deq, 2), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 2), const_3_f32))))), + svmul_f32_z(pg, svget4_f32(vin_deq, 3), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 3), const_3_f32)))))); // Re-quantize to new output space tmp = svquantize_z(pg, tmp_dep, qi_out); } @@ -180,16 +165,11 @@ void sve2_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL svint32x4_t tmp_dep; // Expand to int32 - const svint32x4_t vin_s32 = - { - { { - svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(vin))), - svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(vin))), - svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(vin))), - svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(vin))), - } - } - }; + const svint32x4_t vin_s32 = svcreate4_s32( + svreinterpret_s32_u32(svmovlb_u32(svmovlb_u16(vin))), + svreinterpret_s32_u32(svmovlt_u32(svmovlb_u16(vin))), + svreinterpret_s32_u32(svmovlb_u32(svmovlt_u16(vin))), + svreinterpret_s32_u32(svmovlt_u32(svmovlt_u16(vin)))); // Compare elements to input offset if(qi_in.scale >= 0) diff --git a/src/cpu/kernels/activation/generic/sve2/qasymm8_signed.cpp b/src/cpu/kernels/activation/generic/sve2/qasymm8_signed.cpp index 906ec877f9..da4f5e222c 100644 --- a/src/cpu/kernels/activation/generic/sve2/qasymm8_signed.cpp +++ b/src/cpu/kernels/activation/generic/sve2/qasymm8_signed.cpp @@ -125,16 +125,11 @@ void sve2_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const Acti // De-quantize const auto vin_deq = svdequantize_z(pg, vin, qi_in); // Perform activation - const svfloat32x4_t tmp_dep = - { - { { - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 0))))), - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 1))))), - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 2))))), - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 3))))), - } - } - }; + const svfloat32x4_t tmp_dep = svcreate4_f32( + svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 0))))), + svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 1))))), + svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 2))))), + svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget4_f32(vin_deq, 3)))))); // Re-quantize to new output space tmp = svquantize_signed_z(pg, tmp_dep, qi_out); } @@ -143,16 +138,11 @@ void sve2_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const Acti // De-quantize const auto vin_deq = svdequantize_z(pg, vin, qi_in); // Perform activation - const svfloat32x4_t tmp_dep = - { - { { - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 0), vb_f32))), - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 1), vb_f32))), - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 2), vb_f32))), - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 3), vb_f32))), - } - } - }; + const svfloat32x4_t tmp_dep = svcreate4_f32( + svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 0), vb_f32))), + svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 1), vb_f32))), + svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 2), vb_f32))), + svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget4_f32(vin_deq, 3), vb_f32)))); // Re-quantize to new output space tmp = svquantize_signed_z(pg, tmp_dep, qi_out); } @@ -161,16 +151,11 @@ void sve2_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const Acti // De-quantize const auto vin_deq = svdequantize_z(pg, vin, qi_in); // Perform activation - const svfloat32x4_t tmp_dep = - { - { { - svmul_f32_z(pg, svget4_f32(vin_deq, 0), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 0), const_3_f32))))), - svmul_f32_z(pg, svget4_f32(vin_deq, 1), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 1), const_3_f32))))), - svmul_f32_z(pg, svget4_f32(vin_deq, 2), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 2), const_3_f32))))), - svmul_f32_z(pg, svget4_f32(vin_deq, 3), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 3), const_3_f32))))), - } - } - }; + const svfloat32x4_t tmp_dep = svcreate4_f32( + svmul_f32_z(pg, svget4_f32(vin_deq, 0), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 0), const_3_f32))))), + svmul_f32_z(pg, svget4_f32(vin_deq, 1), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 1), const_3_f32))))), + svmul_f32_z(pg, svget4_f32(vin_deq, 2), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 2), const_3_f32))))), + svmul_f32_z(pg, svget4_f32(vin_deq, 3), svmul_f32_z(pg, const_inv_6_f32, svmin_f32_z(pg, const_6_f32, svmax_f32_z(pg, const_0_f32, svadd_f32_z(pg, svget4_f32(vin_deq, 3), const_3_f32)))))); // Re-quantize to new output space tmp = svquantize_signed_z(pg, tmp_dep, qi_out); } @@ -180,16 +165,11 @@ void sve2_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const Acti svint32x4_t tmp_dep; // Expand to int32 - const svint32x4_t vin_s32 = - { - { { - svmovlb_s32(svmovlb_s16(vin)), - svmovlt_s32(svmovlb_s16(vin)), - svmovlb_s32(svmovlt_s16(vin)), - svmovlt_s32(svmovlt_s16(vin)), - } - } - }; + const svint32x4_t vin_s32 = svcreate4_s32( + svmovlb_s32(svmovlb_s16(vin)), + svmovlt_s32(svmovlb_s16(vin)), + svmovlb_s32(svmovlt_s16(vin)), + svmovlt_s32(svmovlt_s16(vin))); // Compare elements to input offset if(qi_in.scale >= 0) diff --git a/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp b/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp index ca6534604f..1d6f68273a 100644 --- a/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp +++ b/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp @@ -72,14 +72,8 @@ void sve2_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationL // De-quantize auto vin_deq = svdequantize_qsymm16_z(pg, vin, qi_in.scale); // Perform activation - const svfloat32x2_t tmp_dep = - { - { { - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget2_f32(vin_deq, 0))))), - svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget2_f32(vin_deq, 1))))), - } - } - }; + const svfloat32x2_t tmp_dep = svcreate2_f32(svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget2_f32(vin_deq, 0))))), + svdiv_f32_z(pg, vconst_1, svadd_f32_z(pg, vconst_1, svexp_f32_z(pg, svneg_f32_z(pg, svget2_f32(vin_deq, 1)))))); // Re-quantize to new output space tmp = svquantize_qsymm16_z(pg, tmp_dep, qi_out.scale); } @@ -88,14 +82,8 @@ void sve2_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationL // De-quantize auto vin_deq = svdequantize_qsymm16_z(pg, vin, qi_in.scale); // Perform activation - const svfloat32x2_t tmp_dep = - { - { { - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget2_f32(vin_deq, 0), vb_f32))), - svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget2_f32(vin_deq, 1), vb_f32))), - } - } - }; + const svfloat32x2_t tmp_dep = svcreate2_f32(svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget2_f32(vin_deq, 0), vb_f32))), + svmul_f32_z(pg, va_f32, svtanh_f32_z(pg, svmul_f32_z(pg, svget2_f32(vin_deq, 1), vb_f32)))); // Re-quantize to new output space tmp = svquantize_qsymm16_z(pg, tmp_dep, qi_out.scale); } @@ -104,14 +92,8 @@ void sve2_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationL // De-quantize auto vin_deq = svdequantize_qsymm16_z(pg, vin, qi_in.scale); // Perform activation - const svfloat32x2_t tmp_dep = - { - { { - svmin_f32_z(pg,va_f32, svmax_f32_z(pg,vb_f32, svget2_f32(vin_deq, 0))), - svmin_f32_z(pg,va_f32, svmax_f32_z(pg,vb_f32, svget2_f32(vin_deq, 1))), - } - } - }; + const svfloat32x2_t tmp_dep = svcreate2_f32(svmin_f32_z(pg, va_f32, svmax_f32_z(pg, vb_f32, svget2_f32(vin_deq, 0))), + svmin_f32_z(pg, va_f32, svmax_f32_z(pg, vb_f32, svget2_f32(vin_deq, 1)))); // Re-quantize to new output space tmp = svquantize_qsymm16_z(pg, tmp_dep, qi_out.scale); } diff --git a/src/cpu/kernels/cast/generic/neon/bfloat16.cpp b/src/cpu/kernels/cast/generic/neon/bfloat16.cpp index b15584b0aa..aac4ef4ca0 100644 --- a/src/cpu/kernels/cast/generic/neon/bfloat16.cpp +++ b/src/cpu/kernels/cast/generic/neon/bfloat16.cpp @@ -24,6 +24,7 @@ #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) #include "arm_compute/core/TensorInfo.h" +#include "src/core/NEON/wrapper/wrapper.h" #include "src/cpu/kernels/CpuCastKernel.h" #include "src/cpu/kernels/cast/list.h" #include "support/SaturateCast.h" diff --git a/src/cpu/kernels/elementwise_binary/generic/sve/impl.cpp b/src/cpu/kernels/elementwise_binary/generic/sve/impl.cpp index 40564d25f9..2a8b155d14 100644 --- a/src/cpu/kernels/elementwise_binary/generic/sve/impl.cpp +++ b/src/cpu/kernels/elementwise_binary/generic/sve/impl.cpp @@ -244,7 +244,7 @@ template void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); template void elementwise_arithmetic_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window); -template +template void elementwise_comparison_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window) { static_assert(sizeof(InputScalarType) >= sizeof(OutputScalarType), "input data type's width should be equal to or greater than output data type's width"); -- cgit v1.2.1