diff options
author | Gian Marco Iodice <gianmarco.iodice@arm.com> | 2018-10-18 10:21:02 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:55:45 +0000 |
commit | 4b90865ab985d571f70c60583cdfb8c7a65f1670 (patch) | |
tree | f116a4ffef5f5e823689dd00c1e5c9d987f3d295 /src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp | |
parent | c55beee7ef70fa08a5d217619083b288a74fcb27 (diff) | |
download | ComputeLibrary-4b90865ab985d571f70c60583cdfb8c7a65f1670.tar.gz |
COMPMID-1413 - Improve the performance of GEMMLowp with 8 bit dot product on OpenCL
COMPMID-1424 - Add dot product support for CLDepthwise QASYMM8 3x3 NHWC non-unit stride
With this patch we are able to improve the performance of MobileNet v1-qasymm8 by 37 %
Tried to use the dot product instruction in CLDepthwise QASYMM8 3x3 NHWC non-unit stride
but I have not seen any benefit (maybe because we have few arithemtic operation and we
do not have more load instructions). However Depthwise convolution has been improved by
30%
Change-Id: Id768a99c2e53a04276707e427af5d0ec93419ada
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/155082
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp')
-rw-r--r-- | src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp index f5dc655776..f1c24626dc 100644 --- a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp @@ -45,17 +45,17 @@ Status CLGEMMLowpQuantizeDownInt32ToUint8Scale::validate(const ITensorInfo *inpu void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift, - int min, int max, unsigned int output_3d_depth) + int min, int max) { auto k = arm_compute::support::cpp14::make_unique<CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel>(); - k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max, output_3d_depth); + k->configure(input, bias, output, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); _kernel = std::move(k); } Status CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, - int min, int max, unsigned int output_3d_depth) + int min, int max) { - return CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max, output_3d_depth); + return CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max); } void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat::configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, |