From 1b14c75c0d591c4abe4d2d41b7e4e165fbf58382 Mon Sep 17 00:00:00 2001 From: Sheri Zhang Date: Mon, 9 Mar 2020 14:29:52 +0000 Subject: COMPMID-2968: Add support for QASYMM8_SIGNED in CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel Signed-off-by: Sheri Zhang Change-Id: I37e6e76dbd5546c0eaedfacd01ea905c37148e8a Signed-off-by: Sheri Zhang Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2861 Comments-Addressed: Arm Jenkins Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins --- src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp | 28 ++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) (limited to 'src/runtime/CL') diff --git a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp index e86f303ff4..fbd1820098 100644 --- a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp @@ -24,11 +24,11 @@ #include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ScaleKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.h" #include "support/MemorySupport.h" namespace arm_compute @@ -90,15 +90,24 @@ void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat::configure(const ICLTensor * float multiplier, int offset, int min, int max) { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, bias, output, multiplier, offset, min, max); + GEMMLowpOutputStageInfo info = GEMMLowpOutputStageInfo(); + info.gemmlowp_offset = offset; + info.gemmlowp_real_multiplier = multiplier; + info.gemmlowp_min_bound = min; + info.gemmlowp_max_bound = max; + + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, bias, output, &info); _kernel = std::move(k); } Status CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max) { - return CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel::validate(input, bias, output, min, max); + GEMMLowpOutputStageInfo info = GEMMLowpOutputStageInfo(); + info.gemmlowp_min_bound = min; + info.gemmlowp_max_bound = max; + return CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::validate(input, bias, output, &info); } void CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, @@ -165,6 +174,13 @@ void CLGEMMLowpOutputStage::configure(const ICLTensor *input, const ICLTensor *b } break; } + case GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT: + { + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, bias, output, &info); + _kernel = std::move(k); + break; + } default: ARM_COMPUTE_ERROR("Unsupported GEMMLowpOutputStage type."); } @@ -202,6 +218,10 @@ Status CLGEMMLowpOutputStage::validate(const ITensorInfo *input, const ITensorIn return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported output data type."); } } + case GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT: + { + return CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel::validate(input, bias, output, &info); + } default: return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported GEMMLowpOutputStage type."); } -- cgit v1.2.1