From bc415af5ee9517fd113e9ea0f01fdc84f9693dc4 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Thu, 13 Jun 2019 15:58:32 +0100 Subject: COMPMID-2406: Create a new GEMMLowpQuantizeDownInt32ToInt16ScaleKernel for NEON Change-Id: I3f3e247728fd6dafca066e41835f0ef9442d9b7a Signed-off-by: giuros01 Reviewed-on: https://review.mlplatform.org/c/1379 Comments-Addressed: Arm Jenkins Reviewed-by: Georgios Pinitas Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins --- .../NEON/functions/NEGEMMLowpOutputStage.cpp | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp') diff --git a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp index ce69fa0bfd..b89e7a168e 100644 --- a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp +++ b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,12 +24,13 @@ #include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h" #include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h" #include "support/ToolchainSupport.h" -using namespace arm_compute; - +namespace arm_compute +{ void NEGEMMLowpQuantizeDownInt32ToUint8Scale::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min, int max) { auto k = arm_compute::support::cpp14::make_unique(); @@ -53,4 +54,17 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const ITenso Status NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max) { return NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max); -} \ No newline at end of file +} + +void NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_fixedpoint_multiplier, int result_shift, int min, int max) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, min, max); + _kernel = std::move(k); +} + +Status NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max) +{ + return NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel::validate(input, bias, output, min, max); +} +} // namespace arm_compute -- cgit v1.2.1