aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2022-09-04 21:00:10 +0100
committerGunes Bayir <gunes.bayir@arm.com>2022-09-09 09:29:43 +0000
commit0eed305680ade0c48d07f592c4c4a8aaaad077b7 (patch)
treeec4aa5c2e66135d377b5a34f5cf03f97462424c0
parentd11de9861e6c32fa389f503e037098f50ffed156 (diff)
downloadComputeLibrary-0eed305680ade0c48d07f592c4c4a8aaaad077b7.tar.gz
Optimize FP32/16 Bilinear Scale Kernel for Neon™
This patch removes index and weight pre-computations where it's not used and reduces some calculations inside the inner-most loop of Scale. Resolves: COMPMID-5452 Change-Id: Ie149b1b76a90a8cb659ada0f97aef78caf69932f Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8220 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/utils/ScaleUtils.cpp18
-rw-r--r--src/core/utils/ScaleUtils.h13
-rw-r--r--src/cpu/kernels/CpuScaleKernel.cpp4
-rw-r--r--src/cpu/kernels/scale/neon/list.h73
-rw-r--r--src/cpu/operators/CpuScale.cpp47
-rw-r--r--src/runtime/NEON/functions/NEScale.cpp58
6 files changed, 130 insertions, 83 deletions
diff --git a/src/core/utils/ScaleUtils.cpp b/src/core/utils/ScaleUtils.cpp
index d46ca0ea8e..82c6405e89 100644
--- a/src/core/utils/ScaleUtils.cpp
+++ b/src/core/utils/ScaleUtils.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,12 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
#include "src/core/utils/ScaleUtils.h"
-#include "arm_compute/core/Helpers.h"
+#include "src/common/cpuinfo/CpuIsaInfo.h"
+
+#include "arm_compute/core/CPP/CPPTypes.h"
+#include "arm_compute/core/TensorInfo.h"
float arm_compute::scale_utils::calculate_resize_ratio(size_t input_size, size_t output_size, bool align_corners)
{
@@ -34,4 +38,14 @@ float arm_compute::scale_utils::calculate_resize_ratio(size_t input_size, size_t
ARM_COMPUTE_ERROR_ON(out == 0);
return static_cast<float>(in) / static_cast<float>(out);
+}
+
+bool arm_compute::scale_utils::is_precomputation_required(DataLayout data_layout, DataType data_type, InterpolationPolicy policy)
+{
+ // whether to precompute indices & weights
+ // The Neon™ kernels (which are preferred over SVE when policy is BILINEAR) do not use
+ // precomputed index and weights when data type is FP32/16.
+ // If policy is nearest_neighbor for SVE, then precompute because it's being used
+ // To be revised in COMPMID-5453/5454
+ return data_layout != DataLayout::NHWC || (data_type != DataType::F32 && data_type != DataType::F16) || (CPUInfo::get().get_isa().sve == true && policy == InterpolationPolicy::NEAREST_NEIGHBOR);
} \ No newline at end of file
diff --git a/src/core/utils/ScaleUtils.h b/src/core/utils/ScaleUtils.h
index 3cc986b1db..c09509253c 100644
--- a/src/core/utils/ScaleUtils.h
+++ b/src/core/utils/ScaleUtils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020, 2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,6 +53,17 @@ inline bool is_align_corners_allowed_sampling_policy(SamplingPolicy sampling_pol
{
return sampling_policy != SamplingPolicy::CENTER;
}
+
+/** Returns if precomputation of indices and/or weights is required or/not
+ *
+ * @param[in] data_layout Data layout
+ * @param[in] data_type Data type
+ * @param[in] policy Interpolation policy
+ *
+ * @return True if precomputation is required
+ */
+bool is_precomputation_required(DataLayout data_layout, DataType data_type, InterpolationPolicy policy);
+
} // namespace scale_utils
} // namespace arm_compute
#endif /* UTILS_CORE_SCALEUTILS_H */ \ No newline at end of file
diff --git a/src/cpu/kernels/CpuScaleKernel.cpp b/src/cpu/kernels/CpuScaleKernel.cpp
index c9e858fc02..e7386a385a 100644
--- a/src/cpu/kernels/CpuScaleKernel.cpp
+++ b/src/cpu/kernels/CpuScaleKernel.cpp
@@ -140,12 +140,12 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dx, const I
ARM_COMPUTE_RETURN_ERROR_ON(output_width == 0);
ARM_COMPUTE_RETURN_ERROR_ON(output_height == 0);
- if(info.interpolation_policy == InterpolationPolicy::NEAREST_NEIGHBOR)
+ if(info.interpolation_policy == InterpolationPolicy::NEAREST_NEIGHBOR && offsets != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(offsets, 1, DataType::S32);
}
- if(info.interpolation_policy == InterpolationPolicy::BILINEAR)
+ if(info.interpolation_policy == InterpolationPolicy::BILINEAR && offsets != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(offsets, 1, DataType::S32);
if(dx != nullptr && dy != nullptr)
diff --git a/src/cpu/kernels/scale/neon/list.h b/src/cpu/kernels/scale/neon/list.h
index 9679f161e7..17ff4bb676 100644
--- a/src/cpu/kernels/scale/neon/list.h
+++ b/src/cpu/kernels/scale/neon/list.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -131,7 +131,7 @@ void nearest_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offsets
for(; cout < out_dim_ch; ++cout)
{
- auto out0 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T)));
+ auto out0 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T)));
*(reinterpret_cast<T *>(out_ptr + cout * sizeof(T))) = out0;
}
}
@@ -322,9 +322,13 @@ void bilinear_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offset
const auto a1 = (yi_f - static_cast<float>(yi));
const auto b1 = (1.f - a1);
- const auto yi0 = utility::clamp<int>(yi, 0, in_dim_h - 1);
- const auto yi1 = utility::clamp<int>(yi + 1, 0, in_dim_h - 1);
+ const int yi0 = utility::clamp<int>(yi, 0, in_dim_h - 1);
+ const int yi1 = utility::clamp<int>(yi + 1, 0, in_dim_h - 1);
+ const int yi0_offset = yi0 * in_stride_z;
+ const int yi1_offset = yi1 * in_stride_z;
+
+ const int y_offset = yo * out_stride_z;
for(int xo = xo_start; xo < xo_end; xo += xo_step)
{
// Floating-point coordinate
@@ -340,49 +344,46 @@ void bilinear_neon_scale(const ITensor *src, ITensor *dst, const ITensor *offset
const auto s10_s = static_cast<T>(b * a1);
const auto s11_s = static_cast<T>(a * a1);
- const auto xi0 = utility::clamp<int>(xi, 0, in_dim_w - 1);
- const auto xi1 = utility::clamp<int>(xi + 1, 0, in_dim_w - 1);
+ const auto s00 = wrapper::vdup_n(s00_s, ExactTagType{});
+ const auto s01 = wrapper::vdup_n(s01_s, ExactTagType{});
+ const auto s10 = wrapper::vdup_n(s10_s, ExactTagType{});
+ const auto s11 = wrapper::vdup_n(s11_s, ExactTagType{});
+
+ const int xi0 = utility::clamp<int>(xi, 0, in_dim_w - 1);
+ const int xi1 = utility::clamp<int>(xi + 1, 0, in_dim_w - 1);
+
+ const int xi0_offset = xi0 * in_stride_y;
+ const int xi1_offset = xi1 * in_stride_y;
+
+ const int offset = xo * out_stride_y + y_offset;
int cout = 0;
for(; cout <= (out_dim_ch - step_cout); cout += step_cout)
{
- auto in00 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- auto in01 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- auto in10 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- auto in11 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- in00 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi0) * in_stride_y + (yi0) * in_stride_z));
- in01 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi1) * in_stride_y + (yi0) * in_stride_z));
- in10 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi0) * in_stride_y + (yi1) * in_stride_z));
- in11 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi1) * in_stride_y + (yi1) * in_stride_z));
-
- const auto s00 = wrapper::vdup_n(s00_s, ExactTagType{});
- const auto s01 = wrapper::vdup_n(s01_s, ExactTagType{});
- const auto s10 = wrapper::vdup_n(s10_s, ExactTagType{});
- const auto s11 = wrapper::vdup_n(s11_s, ExactTagType{});
- auto out0 = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- out0 = wrapper::vmla(out0, in00, s00);
- out0 = wrapper::vmla(out0, in01, s01);
- out0 = wrapper::vmla(out0, in10, s10);
- out0 = wrapper::vmla(out0, in11, s11);
- wrapper::vstore(reinterpret_cast<T *>(out_ptr + cout * sizeof(T) + xo * out_stride_y + yo * out_stride_z), out0);
+ auto in00 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi0_offset));
+ auto in01 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi0_offset));
+ auto in10 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi1_offset));
+ auto in11 = wrapper::vloadq(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi1_offset));
+
+ auto out0 = wrapper::vmul(in00, s00);
+ out0 = wrapper::vmla(out0, in01, s01);
+ out0 = wrapper::vmla(out0, in10, s10);
+ out0 = wrapper::vmla(out0, in11, s11);
+ wrapper::vstore(reinterpret_cast<T *>(out_ptr + offset + cout * sizeof(T)), out0);
}
for(; cout < out_dim_ch; ++cout)
{
- auto in00 = static_cast<T>(0);
- auto in01 = static_cast<T>(0);
- auto in10 = static_cast<T>(0);
- auto in11 = static_cast<T>(0);
- in00 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi0) * in_stride_y + (yi0) * in_stride_z));
- in01 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi1) * in_stride_y + (yi0) * in_stride_z));
- in10 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi0) * in_stride_y + (yi1) * in_stride_z));
- in11 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + (xi1) * in_stride_y + (yi1) * in_stride_z));
- auto out0 = static_cast<T>(0);
- out0 += in00 * s00_s;
+ T in00 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi0_offset));
+ T in01 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi0_offset));
+ T in10 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi0_offset + yi1_offset));
+ T in11 = *(reinterpret_cast<const T *>(in_ptr + cout * sizeof(T) + xi1_offset + yi1_offset));
+
+ T out0 = in00 * s00_s;
out0 += in01 * s01_s;
out0 += in10 * s10_s;
out0 += in11 * s11_s;
- *(reinterpret_cast<T *>(out_ptr + cout * sizeof(T) + xo * out_stride_y + yo * out_stride_z)) = out0;
+ *(reinterpret_cast<T *>(out_ptr + offset + cout * sizeof(T))) = out0;
}
}
}
diff --git a/src/cpu/operators/CpuScale.cpp b/src/cpu/operators/CpuScale.cpp
index 27da238c16..fdb52e5ede 100644
--- a/src/cpu/operators/CpuScale.cpp
+++ b/src/cpu/operators/CpuScale.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,9 +23,6 @@
*/
#include "src/cpu/operators/CpuScale.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "src/common/utils/Log.h"
#include "src/core/utils/ScaleUtils.h"
@@ -218,26 +215,38 @@ void CpuScale::prepare(ITensorPack &tensors)
_scale_info.interpolation_policy;
const SamplingPolicy sampling_policy = _scale_info.sampling_policy;
- switch(policy_to_use)
+ bool precompute_indices_weights = arm_compute::scale_utils::is_precomputation_required(_data_layout, src->info()->data_type(), policy_to_use);
+
+ if(precompute_indices_weights == true)
{
- case InterpolationPolicy::NEAREST_NEIGHBOR:
- {
- // Pre-compute offsets for nearest interpolation
- precompute_dx_dy_offsets(nullptr, nullptr, offsets, wr, hr, sampling_policy, is_align_corners_used);
- break;
- }
- case InterpolationPolicy::BILINEAR:
+ switch(policy_to_use)
{
- // Pre-compute dx, dy and offsets for bilinear interpolation
- precompute_dx_dy_offsets(dx, dy, offsets, wr, hr, sampling_policy, is_align_corners_used);
- break;
+ case InterpolationPolicy::NEAREST_NEIGHBOR:
+ {
+ // Pre-compute offsets for nearest interpolation
+ precompute_dx_dy_offsets(nullptr, nullptr, offsets, wr, hr, sampling_policy, is_align_corners_used);
+ break;
+ }
+ case InterpolationPolicy::BILINEAR:
+ {
+ // Pre-compute dx, dy and offsets for bilinear interpolation
+ precompute_dx_dy_offsets(dx, dy, offsets, wr, hr, sampling_policy, is_align_corners_used);
+ break;
+ }
+ case InterpolationPolicy::AREA:
+ {
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Unsupported interpolation mode");
}
- case InterpolationPolicy::AREA:
+ }
+ else
+ {
+ if(policy_to_use != InterpolationPolicy::NEAREST_NEIGHBOR && policy_to_use != InterpolationPolicy::BILINEAR && policy_to_use != InterpolationPolicy::AREA)
{
- break;
- }
- default:
ARM_COMPUTE_ERROR("Unsupported interpolation mode");
+ }
}
}
}
diff --git a/src/runtime/NEON/functions/NEScale.cpp b/src/runtime/NEON/functions/NEScale.cpp
index 9f48e78a5a..74ab860d91 100644
--- a/src/runtime/NEON/functions/NEScale.cpp
+++ b/src/runtime/NEON/functions/NEScale.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2021 Arm Limited.
+ * Copyright (c) 2016-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -75,34 +75,46 @@ void NEScale::configure(ITensor *input, ITensor *output, const ScaleKernelInfo &
TensorShape shape(output->info()->dimension(idx_width));
shape.set(1, output->info()->dimension(idx_height), false);
- const TensorInfo tensor_info_dxdy(shape, Format::F32);
- const TensorInfo tensor_info_offsets(shape, Format::S32);
+ bool precompute_indices_weights = arm_compute::scale_utils::is_precomputation_required(data_layout, input->info()->data_type(), policy_to_use);
- _impl->dx.allocator()->init(tensor_info_dxdy);
- _impl->dy.allocator()->init(tensor_info_dxdy);
- _impl->offsets.allocator()->init(tensor_info_offsets);
- switch(policy_to_use)
+ if(precompute_indices_weights == true)
{
- case InterpolationPolicy::NEAREST_NEIGHBOR:
- {
- // Allocate once the configure methods have been called
- _impl->offsets.allocator()->allocate();
- break;
- }
- case InterpolationPolicy::BILINEAR:
+ const TensorInfo tensor_info_dxdy(shape, Format::F32);
+ const TensorInfo tensor_info_offsets(shape, Format::S32);
+
+ _impl->dx.allocator()->init(tensor_info_dxdy);
+ _impl->dy.allocator()->init(tensor_info_dxdy);
+ _impl->offsets.allocator()->init(tensor_info_offsets);
+ switch(policy_to_use)
{
- // Allocate once the configure methods have been called
- _impl->dx.allocator()->allocate();
- _impl->dy.allocator()->allocate();
- _impl->offsets.allocator()->allocate();
- break;
+ case InterpolationPolicy::NEAREST_NEIGHBOR:
+ {
+ // Allocate once the configure methods have been called
+ _impl->offsets.allocator()->allocate();
+ break;
+ }
+ case InterpolationPolicy::BILINEAR:
+ {
+ // Allocate once the configure methods have been called
+ _impl->dx.allocator()->allocate();
+ _impl->dy.allocator()->allocate();
+ _impl->offsets.allocator()->allocate();
+ break;
+ }
+ case InterpolationPolicy::AREA:
+ {
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Unsupported interpolation mode");
}
- case InterpolationPolicy::AREA:
+ }
+ else
+ {
+ if(policy_to_use != InterpolationPolicy::NEAREST_NEIGHBOR && policy_to_use != InterpolationPolicy::BILINEAR && policy_to_use != InterpolationPolicy::AREA)
{
- break;
- }
- default:
ARM_COMPUTE_ERROR("Unsupported interpolation mode");
+ }
}
}