aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/NEAsymm.inl
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-11-28 11:31:23 +0000
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-12-05 11:58:51 +0000
commit8d4d1b85bc57d5f76f3939bb422e44df68dc2342 (patch)
tree8de9dd3c7bec7ea59caa4d6e70b3bbeac877c8b8 /arm_compute/core/NEON/NEAsymm.inl
parent25a6b67cd8188e5a968c0c89adf99f874c7eecb4 (diff)
downloadComputeLibrary-8d4d1b85bc57d5f76f3939bb422e44df68dc2342.tar.gz
COMPMID-2796: Add support for QASYMM8_SIGNED in NEActivationLayer and NEPReluLayer
Change-Id: I089fd19a6beab7779d690bc9ace327f661c2753d Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/2407 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/NEAsymm.inl')
-rw-r--r--arm_compute/core/NEON/NEAsymm.inl33
1 files changed, 33 insertions, 0 deletions
diff --git a/arm_compute/core/NEON/NEAsymm.inl b/arm_compute/core/NEON/NEAsymm.inl
index a98c6aa390..71205e0403 100644
--- a/arm_compute/core/NEON/NEAsymm.inl
+++ b/arm_compute/core/NEON/NEAsymm.inl
@@ -56,4 +56,37 @@ inline qasymm8x16_t vmlaq_qasymm8(qasymm8x16_t vd, float32x4_t vs, float32x4_t v
// convert uint16 vectors to uint8 vectors (with saturation)
return vcombine_u8(vqmovn_u16(vd_low_u16x8), vqmovn_u16(vd_high_u16x8));
}
+inline qasymm8x16_signed_t vmlaq_qasymm8_signed(qasymm8x16_signed_t vd, float32x4_t vs, float32x4_t vo)
+{
+ // Convert uint8 vectors to int16 vectors
+ const int8x8_t vd_low = vget_low_s8(vd);
+ const int8x8_t vd_high = vget_high_s8(vd);
+ int16x8_t vd_low_s16x8 = vmovl_s8(vd_low);
+ int16x8_t vd_high_s16x8 = vmovl_s8(vd_high);
+ // Convert int16 vectors to int32 vectors
+ int32x4_t A_s32x4 = vmovl_s16(vget_low_s16(vd_low_s16x8));
+ int32x4_t B_s32x4 = vmovl_s16(vget_high_s16(vd_low_s16x8));
+ int32x4_t C_s32x4 = vmovl_s16(vget_low_s16(vd_high_s16x8));
+ int32x4_t D_s32x4 = vmovl_s16(vget_high_s16(vd_high_s16x8));
+ // Convert int32 vectors to float32 vectors
+ float32x4_t A_f32x4 = vcvtq_f32_s32(A_s32x4);
+ float32x4_t B_f32x4 = vcvtq_f32_s32(B_s32x4);
+ float32x4_t C_f32x4 = vcvtq_f32_s32(C_s32x4);
+ float32x4_t D_f32x4 = vcvtq_f32_s32(D_s32x4);
+ // vd = vd*vs + vo
+ A_f32x4 = vmlaq_f32(vo, A_f32x4, vs);
+ B_f32x4 = vmlaq_f32(vo, B_f32x4, vs);
+ C_f32x4 = vmlaq_f32(vo, C_f32x4, vs);
+ D_f32x4 = vmlaq_f32(vo, D_f32x4, vs);
+ // Convert float32 vectors to int32 vectors
+ A_s32x4 = vcvtq_s32_f32(A_f32x4);
+ B_s32x4 = vcvtq_s32_f32(B_f32x4);
+ C_s32x4 = vcvtq_s32_f32(C_f32x4);
+ D_s32x4 = vcvtq_s32_f32(D_f32x4);
+ // Convert int32 vectors to int16 vectors (with saturation)
+ vd_low_s16x8 = vcombine_s16(vqmovn_s32(A_s32x4), vqmovn_s32(B_s32x4));
+ vd_high_s16x8 = vcombine_s16(vqmovn_s32(C_s32x4), vqmovn_s32(D_s32x4));
+ // convert int16 vectors to int8 vectors (with saturation)
+ return vcombine_s8(vqmovn_s16(vd_low_s16x8), vqmovn_s16(vd_high_s16x8));
+}
} // namespace arm_compute