From a84faffd290139be54e4a52ab11da0369262e889 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 5 Dec 2018 18:17:24 +0000 Subject: COMPMID-1826: Add support for QASYMM8 in NEArithmeticAdditionKernel Change-Id: Ia7fb128e1f3944d0d831e1d125a6db3e1d257106 Reviewed-on: https://review.mlplatform.org/355 Tested-by: Arm Jenkins Reviewed-by: Isabella Gottardi Reviewed-by: Anthony Barbier --- .../core/NEON/kernels/NEArithmeticAdditionKernel.h | 33 +++++++++++----------- .../runtime/NEON/functions/NEArithmeticAddition.h | 14 ++++----- 2 files changed, 24 insertions(+), 23 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h index 8cf21eae9d..73beca6ded 100644 --- a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h +++ b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h @@ -56,25 +56,26 @@ public: * * Valid configurations (Input1,Input2) -> Output : * - * - (U8,U8) -> U8 - * - (U8,U8) -> S16 - * - (S16,U8) -> S16 - * - (U8,S16) -> S16 - * - (S16,S16) -> S16 - * - (F16,F16) -> F16 - * - (F32,F32) -> F32 + * - (U8,U8) -> U8 + * - (U8,U8) -> S16 + * - (S16,U8) -> S16 + * - (U8,S16) -> S16 + * - (S16,S16) -> S16 + * - (F16,F16) -> F16 + * - (F32,F32) -> F32 + * - (QASYMM8,QASYMM8) -> QASYMM8 * - * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[in] input2 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[out] output The output tensor. Data types supported: U8/S16/F16/F32. + * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32. * @param[in] policy Overflow policy. */ void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy); /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAdditionKernel * - * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[in] input2 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[in] output The output tensor. Data types supported: U8/S16/F16/F32. + * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32. * @param[in] policy Overflow policy. * * @return a status @@ -88,9 +89,9 @@ public: private: /** Common signature for all the specialised add functions * - * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[in] input2 An input tensor. Data types supported: U8/S16/F16/F32 - * @param[out] output The output tensor. Data types supported: U8/S16/F16/F32. + * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[out] output The output tensor. Data types supported: U8/QASYMM8/S16/F16/F32. * @param[in] window Region on which to execute the kernel. */ using AddFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window); diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h index c29646397c..e35f2fa0cd 100644 --- a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h +++ b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h @@ -37,22 +37,22 @@ class NEArithmeticAddition : public INESimpleFunction public: /** Initialise the kernel's inputs, output and conversion policy. * - * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32 - * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32 - * @param[out] output Output tensor. Data types supported: U8/S16/F16/F32 + * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/F16/F32 * @param[in] policy Policy to use to handle overflow. */ void configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy); /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAddition * - * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32 - * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32 - * @param[in] output Output tensor. Data types supported: U8/S16/F16/F32 + * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32 + * @param[in] output Output tensor. Data types supported: U8/SQASYMM8/16/F16/F32 * @param[in] policy Policy to use to handle overflow. * * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy); }; -} +} // namespace arm_compute #endif /*__ARM_COMPUTE_NEARITHMETICADDITION_H__ */ -- cgit v1.2.1