aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/kernels/scale/neon/list.h
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2022-09-11 15:59:19 +0100
committerGunes Bayir <gunes.bayir@arm.com>2022-09-16 12:38:59 +0000
commitc4f2743951473f8d97f5a43767fdbb31a4df967c (patch)
tree26c49b1af8113bb169931f3af5e502904d455a25 /src/cpu/kernels/scale/neon/list.h
parent0d05b6690fe69c57f63ca43d59b551f074613062 (diff)
downloadComputeLibrary-c4f2743951473f8d97f5a43767fdbb31a4df967c.tar.gz
Optimize Quantized/Integer Bilinear Scale for Neon™
This patch introduces several performance optimizations regarding the Bilinear Scale operator with REPLICATE Border mode. Changes apply only to NHWC. This patch - Reduces the memory footprint by disabling precomputation of indices and weights when they're not used - Rewrites the kernels for QASYMM8/QASYMM8_SIGNED/U8(Uint8) - Adds S8(Int8) Bilinear Scale for Border mode REPLICATE - Removes Bilinear Scale SVE kernels for Quantized and Integer types and adjust the heuristics to choose the Neon™ implementation - Adds new test cases where the input and output of the Bilinear Scale operator have different quantization scale and offset Resolves: COMPMID-5453, COMPMID-5454 Change-Id: I3d251e76e0c6978fd5a0a1795ec62ab536bec93c Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8250 Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/cpu/kernels/scale/neon/list.h')
-rw-r--r--src/cpu/kernels/scale/neon/list.h20
1 files changed, 9 insertions, 11 deletions
diff --git a/src/cpu/kernels/scale/neon/list.h b/src/cpu/kernels/scale/neon/list.h
index 17ff4bb676..28a1087224 100644
--- a/src/cpu/kernels/scale/neon/list.h
+++ b/src/cpu/kernels/scale/neon/list.h
@@ -25,11 +25,8 @@
#define SRC_CORE_NEON_KERNELS_SCALE_LIST_H
#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/Window.h"
-#include "src/core/NEON/NEMath.h"
#include "src/core/NEON/wrapper/wrapper.h"
-#include "src/core/helpers/ScaleHelpers.h"
#include "src/core/utils/ScaleUtils.h"
#include "support/Rounding.h"
@@ -44,6 +41,7 @@ namespace cpu
DECLARE_SCALE_KERNEL(s16_neon_scale);
DECLARE_SCALE_KERNEL(u8_neon_scale);
+DECLARE_SCALE_KERNEL(s8_neon_scale);
DECLARE_SCALE_KERNEL(qasymm8_neon_scale);
DECLARE_SCALE_KERNEL(qasymm8_signed_neon_scale);
@@ -360,10 +358,10 @@ void bilinear_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offset
int cout = 0;
for(; cout <= (out_dim_ch - step_cout); cout += step_cout)
{
- auto in00 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi0_offset));
- auto in01 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi0_offset));
- auto in10 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi1_offset));
- auto in11 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi1_offset));
+ const auto in00 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi0_offset));
+ const auto in01 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi0_offset));
+ const auto in10 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi1_offset));
+ const auto in11 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi1_offset));
auto out0 = wrapper::vmul(in00, s00);
out0 = wrapper::vmla(out0, in01, s01);
@@ -374,10 +372,10 @@ void bilinear_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offset
for(; cout < out_dim_ch; ++cout)
{
- T in00 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi0_offset));
- T in01 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi0_offset));
- T in10 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi1_offset));
- T in11 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi1_offset));
+ const T in00 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi0_offset));
+ const T in01 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi0_offset));
+ const T in10 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi1_offset));
+ const T in11 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi1_offset));
T out0 = in00 * s00_s;
out0 += in01 * s01_s;