aboutsummaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2022-09-04 21:00:10 +0100
committerGunes Bayir <gunes.bayir@arm.com>2022-09-09 09:29:43 +0000
commit0eed305680ade0c48d07f592c4c4a8aaaad077b7 (patch)
treeec4aa5c2e66135d377b5a34f5cf03f97462424c0 /src/cpu
parentd11de9861e6c32fa389f503e037098f50ffed156 (diff)
downloadComputeLibrary-0eed305680ade0c48d07f592c4c4a8aaaad077b7.tar.gz
Optimize FP32/16 Bilinear Scale Kernel for Neon™
This patch removes index and weight pre-computations where it's not used and reduces some calculations inside the inner-most loop of Scale. Resolves: COMPMID-5452 Change-Id: Ie149b1b76a90a8cb659ada0f97aef78caf69932f Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8220 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/kernels/CpuScaleKernel.cpp4
-rw-r--r--src/cpu/kernels/scale/neon/list.h73
-rw-r--r--src/cpu/operators/CpuScale.cpp47
3 files changed, 67 insertions, 57 deletions
diff --git a/src/cpu/kernels/CpuScaleKernel.cpp b/src/cpu/kernels/CpuScaleKernel.cpp
index c9e858fc02..e7386a385a 100644
--- a/src/cpu/kernels/CpuScaleKernel.cpp
+++ b/src/cpu/kernels/CpuScaleKernel.cpp
@@ -140,12 +140,12 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dx, const I
ARM_COMPUTE_RETURN_ERROR_ON(output_width == 0);
ARM_COMPUTE_RETURN_ERROR_ON(output_height == 0);
- if(info.interpolation_policy == InterpolationPolicy::NEAREST_NEIGHBOR)
+ if(info.interpolation_policy == InterpolationPolicy::NEAREST_NEIGHBOR && offsets != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(offsets, 1, DataType::S32);
}
- if(info.interpolation_policy == InterpolationPolicy::BILINEAR)
+ if(info.interpolation_policy == InterpolationPolicy::BILINEAR && offsets != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(offsets, 1, DataType::S32);
if(dx != nullptr && dy != nullptr)
diff --git a/src/cpu/kernels/scale/neon/list.h b/src/cpu/kernels/scale/neon/list.h
index 9679f161e7..17ff4bb676 100644
--- a/src/cpu/kernels/scale/neon/list.h
+++ b/src/cpu/kernels/scale/neon/list.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -131,7 +131,7 @@ void nearest_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets
for(; cout < out_dim_ch; ++cout)
{
- auto out0 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T)));
+ auto out0 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T)));
*(reinterpret_cast<T *>(out_ptr + cout * sizeof(T))) = out0;
}
}
@@ -322,9 +322,13 @@ void bilinear_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offset
const auto a1 = (yi_f - static_cast<float>(yi));
const auto b1 = (1.f - a1);
- const auto yi0 = utility::clamp<int>(yi, 0, in_dim_h - 1);
- const auto yi1 = utility::clamp<int>(yi + 1, 0, in_dim_h - 1);
+ const int yi0 = utility::clamp<int>(yi, 0, in_dim_h - 1);
+ const int yi1 = utility::clamp<int>(yi + 1, 0, in_dim_h - 1);
+ const int yi0_offset = yi0 * in_stride_z;
+ const int yi1_offset = yi1 * in_stride_z;
+
+ const int y_offset = yo * out_stride_z;
for(int xo = xo_start; xo < xo_end; xo += xo_step)
{
// Floating-point coordinate
@@ -340,49 +344,46 @@ void bilinear_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offset
const auto s10_s = static_cast<T>(b * a1);
const auto s11_s = static_cast<T>(a * a1);
- const auto xi0 = utility::clamp<int>(xi, 0, in_dim_w - 1);
- const auto xi1 = utility::clamp<int>(xi + 1, 0, in_dim_w - 1);
+ const auto s00 = wrapper::vdup_n(s00_s, ExactTagType{});
+ const auto s01 = wrapper::vdup_n(s01_s, ExactTagType{});
+ const auto s10 = wrapper::vdup_n(s10_s, ExactTagType{});
+ const auto s11 = wrapper::vdup_n(s11_s, ExactTagType{});
+
+ const int xi0 = utility::clamp<int>(xi, 0, in_dim_w - 1);
+ const int xi1 = utility::clamp<int>(xi + 1, 0, in_dim_w - 1);
+
+ const int xi0_offset = xi0 * in_stride_y;
+ const int xi1_offset = xi1 * in_stride_y;
+
+ const int offset = xo * out_stride_y + y_offset;
int cout = 0;
for(; cout <= (out_dim_ch - step_cout); cout += step_cout)
{
- auto in00 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- auto in01 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- auto in10 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- auto in11 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- in00 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi0) * in_stride_y + (yi0) * in_stride_z));
- in01 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi1) * in_stride_y + (yi0) * in_stride_z));
- in10 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi0) * in_stride_y + (yi1) * in_stride_z));
- in11 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi1) * in_stride_y + (yi1) * in_stride_z));
-
- const auto s00 = wrapper::vdup_n(s00_s, ExactTagType{});
- const auto s01 = wrapper::vdup_n(s01_s, ExactTagType{});
- const auto s10 = wrapper::vdup_n(s10_s, ExactTagType{});
- const auto s11 = wrapper::vdup_n(s11_s, ExactTagType{});
- auto out0 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- out0 = wrapper::vmla(out0, in00, s00);
- out0 = wrapper::vmla(out0, in01, s01);
- out0 = wrapper::vmla(out0, in10, s10);
- out0 = wrapper::vmla(out0, in11, s11);
- wrapper::vstore(reinterpret_cast<T *>(out_ptr + cout * sizeof(T) + xo * out_stride_y + yo * out_stride_z), out0);
+ auto in00 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi0_offset));
+ auto in01 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi0_offset));
+ auto in10 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi1_offset));
+ auto in11 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi1_offset));
+
+ auto out0 = wrapper::vmul(in00, s00);
+ out0 = wrapper::vmla(out0, in01, s01);
+ out0 = wrapper::vmla(out0, in10, s10);
+ out0 = wrapper::vmla(out0, in11, s11);
+ wrapper::vstore(reinterpret_cast<T *>(out_ptr + offset + cout * sizeof(T)), out0);
}
for(; cout < out_dim_ch; ++cout)
{
- auto in00 = static_cast<T>(0);
- auto in01 = static_cast<T>(0);
- auto in10 = static_cast<T>(0);
- auto in11 = static_cast<T>(0);
- in00 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi0) * in_stride_y + (yi0) * in_stride_z));
- in01 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi1) * in_stride_y + (yi0) * in_stride_z));
- in10 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi0) * in_stride_y + (yi1) * in_stride_z));
- in11 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi1) * in_stride_y + (yi1) * in_stride_z));
- auto out0 = static_cast<T>(0);
- out0 += in00 * s00_s;
+ T in00 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi0_offset));
+ T in01 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi0_offset));
+ T in10 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi1_offset));
+ T in11 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi1_offset));
+
+ T out0 = in00 * s00_s;
out0 += in01 * s01_s;
out0 += in10 * s10_s;
out0 += in11 * s11_s;
- *(reinterpret_cast<T *>(out_ptr + cout * sizeof(T) + xo * out_stride_y + yo * out_stride_z)) = out0;
+ *(reinterpret_cast<T *>(out_ptr + offset + cout * sizeof(T))) = out0;
}
}
}
diff --git a/src/cpu/operators/CpuScale.cpp b/src/cpu/operators/CpuScale.cpp
index 27da238c16..fdb52e5ede 100644
--- a/src/cpu/operators/CpuScale.cpp
+++ b/src/cpu/operators/CpuScale.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,9 +23,6 @@
*/
#include "src/cpu/operators/CpuScale.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/common/utils/Log.h"
#include "src/core/utils/ScaleUtils.h"
@@ -218,26 +215,38 @@ void CpuScale::prepare(ITensorPack &tensors)
_scale_info.interpolation_policy;
const SamplingPolicy sampling_policy = _scale_info.sampling_policy;
- switch(policy_to_use)
+ bool precompute_indices_weights = arm_compute::scale_utils::is_precomputation_required(_data_layout, src->info()->data_type(), policy_to_use);
+
+ if(precompute_indices_weights == true)
{
- case InterpolationPolicy::NEAREST_NEIGHBOR:
- {
- // Pre-compute offsets for nearest interpolation
- precompute_dx_dy_offsets(nullptr, nullptr, offsets, wr, hr, sampling_policy, is_align_corners_used);
- break;
- }
- case InterpolationPolicy::BILINEAR:
+ switch(policy_to_use)
{
- // Pre-compute dx, dy and offsets for bilinear interpolation
- precompute_dx_dy_offsets(dx, dy, offsets, wr, hr, sampling_policy, is_align_corners_used);
- break;
+ case InterpolationPolicy::NEAREST_NEIGHBOR:
+ {
+ // Pre-compute offsets for nearest interpolation
+ precompute_dx_dy_offsets(nullptr, nullptr, offsets, wr, hr, sampling_policy, is_align_corners_used);
+ break;
+ }
+ case InterpolationPolicy::BILINEAR:
+ {
+ // Pre-compute dx, dy and offsets for bilinear interpolation
+ precompute_dx_dy_offsets(dx, dy, offsets, wr, hr, sampling_policy, is_align_corners_used);
+ break;
+ }
+ case InterpolationPolicy::AREA:
+ {
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Unsupported interpolation mode");
}
- case InterpolationPolicy::AREA:
+ }
+ else
+ {
+ if(policy_to_use != InterpolationPolicy::NEAREST_NEIGHBOR && policy_to_use != InterpolationPolicy::BILINEAR && policy_to_use != InterpolationPolicy::AREA)
{
- break;
- }
- default:
ARM_COMPUTE_ERROR("Unsupported interpolation mode");
+ }
}
}
}