From 448a81fcec04333364a1e3266d5081596d3a0477 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 21 Nov 2019 14:10:25 +0000 Subject: COMPMID-2805: Add QASYMM8_SIGNED support in NEGEMMLowpOutputStage Add support from requantizing down from S32 to Int8 with fixed point requantization. This involves the following: - Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier - Add bias to final result if bias tensor is not a nullptr - Round to nearest division by a power-of-two using result_shift - Add offset to each result - Clamp the value between the specified min and max bounds - Cast to int8 data type Change-Id: I641b3fac0833c568d8565ccb859bbc561a24c17d Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/2340 Comments-Addressed: Arm Jenkins Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins --- src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'src/runtime/NEON') diff --git a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp index b89e7a168e..3ef9351b78 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp @@ -25,6 +25,7 @@ #include "arm_compute/core/ITensor.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h" +#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h" #include "support/ToolchainSupport.h" @@ -56,6 +57,19 @@ Status NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITens return NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max); } +void NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, + int result_offset_after_shift, int min, int max) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); + _kernel = std::move(k); +} + +Status NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max) +{ + return NEGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel::validate(input, bias, output, min, max); +} + void NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, int min, int max) { auto k = arm_compute::support::cpp14::make_unique(); -- cgit v1.2.1