aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2022-08-11 12:15:39 +0100
committerGunes Bayir <gunes.bayir@arm.com>2022-08-18 16:31:44 +0000
commit53929b1fd4dd3c27f5afb5b8626e27605ebe62cf (patch)
tree39d12e25aa5b9d7552f14b5e4838da0e8a4230c8
parent9ee8a3e542a8f4fa05816f1a4b82543c0deffbba (diff)
downloadComputeLibrary-53929b1fd4dd3c27f5afb5b8626e27605ebe62cf.tar.gz
Use Neon™ kernels for FP Bilinear Resize for SVE
Removes FP Bilinear SVE kernels and uses Neon™ kernels instead Resolves: COMPMID-5449 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Change-Id: I8e01de44bd884cb6578ca0b9358509b69bc31ca2 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8100 Benchmark: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/cpu/kernels/CpuKernelSelectionTypes.h8
-rw-r--r--src/cpu/kernels/CpuScaleKernel.cpp34
-rw-r--r--src/cpu/kernels/CpuScaleKernel.h6
-rw-r--r--src/cpu/kernels/scale/sve/fp16.cpp80
-rw-r--r--src/cpu/kernels/scale/sve/fp32.cpp78
5 files changed, 41 insertions, 165 deletions
diff --git a/src/cpu/kernels/CpuKernelSelectionTypes.h b/src/cpu/kernels/CpuKernelSelectionTypes.h
index 19c41f9fcd..e3ecc4e709 100644
--- a/src/cpu/kernels/CpuKernelSelectionTypes.h
+++ b/src/cpu/kernels/CpuKernelSelectionTypes.h
@@ -90,6 +90,13 @@ struct CpuAddKernelDataTypeISASelectorData
bool can_interpret_inputs_as_1d_array;
};
+struct ScaleKernelDataTypeISASelectorData
+{
+ DataType dt;
+ cpuinfo::CpuIsaInfo isa;
+ InterpolationPolicy interpolation_policy;
+};
+
// Selector pointer types
using DataTypeISASelectorPtr = std::add_pointer<bool(const DataTypeISASelectorData &data)>::type;
using DataTypeDataLayoutSelectorPtr = std::add_pointer<bool(const DataTypeDataLayoutISASelectorData &data)>::type;
@@ -99,6 +106,7 @@ using DepthwiseConv2dNativeDataTypeISASelectorPtr = std::add_pointer<bool(const
using CastDataTypeISASelectorDataPtr = std::add_pointer<bool(const CastDataTypeISASelectorData &data)>::type;
using ActivationDataTypeISASelectorDataPtr = std::add_pointer<bool(const ActivationDataTypeISASelectorData &data)>::type;
using CpuAddKernelDataTypeISASelectorDataPtr = std::add_pointer<bool(const CpuAddKernelDataTypeISASelectorData &data)>::type;
+using ScaleKernelDataTypeISASelectorDataPtr = std::add_pointer<bool(const ScaleKernelDataTypeISASelectorData &data)>::type;
} // namespace kernels
} // namespace cpu
diff --git a/src/cpu/kernels/CpuScaleKernel.cpp b/src/cpu/kernels/CpuScaleKernel.cpp
index e230dfa938..c9e858fc02 100644
--- a/src/cpu/kernels/CpuScaleKernel.cpp
+++ b/src/cpu/kernels/CpuScaleKernel.cpp
@@ -52,62 +52,68 @@ static const std::vector<CpuScaleKernel::ScaleKernel> available_kernels =
{
{
"sve_fp16_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.sve && data.isa.fp16; },
+ [](const ScaleKernelDataTypeISASelectorData & data)
+ {
+ return data.dt == DataType::F16 && data.isa.sve && data.isa.fp16 && data.interpolation_policy != InterpolationPolicy::BILINEAR;
+ },
REGISTER_FP16_SVE(arm_compute::cpu::fp16_sve_scale)
},
{
"sve_fp32_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F32 && data.isa.sve; },
+ [](const ScaleKernelDataTypeISASelectorData & data)
+ {
+ return data.dt == DataType::F32 && data.isa.sve && data.interpolation_policy != InterpolationPolicy::BILINEAR;
+ },
REGISTER_FP32_SVE(arm_compute::cpu::fp32_sve_scale)
},
{
"sve_qu8_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.isa.sve; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.isa.sve; },
REGISTER_QASYMM8_SVE(arm_compute::cpu::qasymm8_sve_scale)
},
{
"sve_qs8_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED && data.isa.sve; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED && data.isa.sve; },
REGISTER_QASYMM8_SIGNED_SVE(arm_compute::cpu::qasymm8_signed_sve_scale)
},
{
"sve_u8_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::U8 && data.isa.sve; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::U8 && data.isa.sve; },
REGISTER_INTEGER_SVE(arm_compute::cpu::u8_sve_scale)
},
{
"sve_s16_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::S16 && data.isa.sve; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::S16 && data.isa.sve; },
REGISTER_INTEGER_SVE(arm_compute::cpu::s16_sve_scale)
},
{
"neon_fp16_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.fp16; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.fp16; },
REGISTER_FP16_NEON(arm_compute::cpu::common_neon_scale<float16_t>)
},
{
"neon_fp32_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F32; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::F32; },
REGISTER_FP32_NEON(arm_compute::cpu::common_neon_scale<float>)
},
{
"neon_qu8_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8; },
REGISTER_QASYMM8_NEON(arm_compute::cpu::qasymm8_neon_scale)
},
{
"neon_qs8_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED; },
REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::qasymm8_signed_neon_scale)
},
{
"neon_u8_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::U8; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::U8; },
REGISTER_INTEGER_NEON(arm_compute::cpu::u8_neon_scale)
},
{
"neon_s16_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::S16; },
+ [](const ScaleKernelDataTypeISASelectorData & data) { return data.dt == DataType::S16; },
REGISTER_INTEGER_NEON(arm_compute::cpu::s16_neon_scale)
},
};
@@ -115,7 +121,7 @@ static const std::vector<CpuScaleKernel::ScaleKernel> available_kernels =
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dx, const ITensorInfo *dy,
const ITensorInfo *offsets, ITensorInfo *dst, const ScaleKernelInfo &info)
{
- const auto *uk = CpuScaleKernel::get_implementation(DataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa() });
+ const auto *uk = CpuScaleKernel::get_implementation(ScaleKernelDataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa(), info.interpolation_policy });
ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
@@ -174,7 +180,7 @@ void CpuScaleKernel::configure(const ITensorInfo *src, const ITensorInfo *dx, co
dst,
info));
- const auto *uk = CpuScaleKernel::get_implementation(DataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa() });
+ const auto *uk = CpuScaleKernel::get_implementation(ScaleKernelDataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa(), info.interpolation_policy });
ARM_COMPUTE_ERROR_ON_NULLPTR(uk);
_run_method = uk->ukernel;
diff --git a/src/cpu/kernels/CpuScaleKernel.h b/src/cpu/kernels/CpuScaleKernel.h
index e0e9e387bd..416e115796 100644
--- a/src/cpu/kernels/CpuScaleKernel.h
+++ b/src/cpu/kernels/CpuScaleKernel.h
@@ -75,9 +75,9 @@ public:
struct ScaleKernel
{
- const char *name;
- const DataTypeISASelectorPtr is_selected;
- ScaleKernelPtr ukernel;
+ const char *name;
+ const ScaleKernelDataTypeISASelectorDataPtr is_selected;
+ ScaleKernelPtr ukernel;
};
static const std::vector<ScaleKernel> &get_available_kernels();
diff --git a/src/cpu/kernels/scale/sve/fp16.cpp b/src/cpu/kernels/scale/sve/fp16.cpp
index d08bfd8cdf..ceda19f366 100644
--- a/src/cpu/kernels/scale/sve/fp16.cpp
+++ b/src/cpu/kernels/scale/sve/fp16.cpp
@@ -84,77 +84,6 @@ void fp16_sve_scale_nearest(const ITensor *src, ITensor *dst, const ITensor *off
},
out);
}
-
-void fp16_sve_scale_bilinear(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy,
- BorderMode border_mode, PixelValue constant_border_value, float sampling_offset,
- bool align_corners, const Window &window)
-{
- // Compute the ratio between source height and destination height
- const auto hr = scale_utils::calculate_resize_ratio(src->info()->dimension(2), dst->info()->dimension(2), align_corners);
-
- Iterator out(dst, window);
- const int in_stride_c = src->info()->dimension(0) + src->info()->padding().left + src->info()->padding().right;
- const int in_dim_w = src->info()->dimension(1);
- const int in_dim_h = src->info()->dimension(2);
- const int in_stride_wc = in_stride_c * (in_dim_w + src->info()->padding().top + src->info()->padding().bottom);
-
- // Don't increment in Y and Z direction for the input tensor
- // A pointer to the start of this plane is needed as base for the precomputed offsets
- Window win_in(window);
- win_in.set(Window::DimY, Window::Dimension(0, 0, 0));
- win_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
- Iterator in(src, win_in);
-
- if(border_mode == BorderMode::CONSTANT)
- {
- using ConstType = typename std::conditional<std::is_same<float16_t, float16_t>::value, half, float16_t>::type;
-
- const float16_t const_border_value = static_cast<float16_t>(constant_border_value.get<ConstType>());
- execute_window_loop(window, [&](const Coordinates & id)
- {
- const auto offset = *reinterpret_cast<const int32_t *>(offsets->ptr_to_element(Coordinates(id.y(), id.z())));
- const auto dx_val = *reinterpret_cast<const float *>(dx->ptr_to_element(Coordinates(id.y(), id.z())));
- const auto dy_val = *reinterpret_cast<const float *>(dy->ptr_to_element(Coordinates(id.y(), id.z())));
- const int32_t in_hi = std::floor((id.z() + sampling_offset) * hr - sampling_offset);
- const float16_t *in_ptr = reinterpret_cast<const float16_t *>(in.ptr()) + offset * in_stride_c + in_hi * in_stride_wc;
-
- const auto a00 = (0 <= offset && offset < in_dim_w && 0 <= in_hi && in_hi < in_dim_h) ? *in_ptr : const_border_value;
- const auto a01 = (-1 <= offset && offset < in_dim_w - 1 && 0 <= in_hi && in_hi < in_dim_h) ? *(in_ptr + in_stride_c) : const_border_value;
- const auto a10 = (0 <= offset && offset < in_dim_w && -1 <= in_hi && in_hi < in_dim_h - 1) ? *(in_ptr + in_stride_wc) : const_border_value;
- const auto a11 = (-1 <= offset && offset < in_dim_w - 1 && -1 <= in_hi && in_hi < in_dim_h - 1) ? *(in_ptr + in_stride_c + in_stride_wc) : const_border_value;
-
- *reinterpret_cast<float16_t *>(out.ptr()) = static_cast<float16_t>(scale_helpers::delta_bilinear(a00, a01, a10, a11, dx_val, dy_val));
- },
- in, out);
- }
- else if(border_mode == BorderMode::REPLICATE)
- {
- execute_window_loop(window, [&](const Coordinates & id)
- {
- const auto offset = *reinterpret_cast<const int32_t *>(offsets->ptr_to_element(Coordinates(id.y(), id.z())));
- const auto dx_val = *reinterpret_cast<const float *>(dx->ptr_to_element(Coordinates(id.y(), id.z())));
- const auto dy_val = *reinterpret_cast<const float *>(dy->ptr_to_element(Coordinates(id.y(), id.z())));
- const int in_hi = std::floor((id.z() + sampling_offset) * hr - sampling_offset);
-
- auto clamped_w = utility::clamp<int>(offset, 0, in_dim_w - 1);
- auto clamped_w1 = utility::clamp<int>(offset + 1, 0, in_dim_w - 1);
- auto clamped_h = utility::clamp<int>(in_hi, 0, in_dim_h - 1);
- auto clamped_h1 = utility::clamp<int>(in_hi + 1, 0, in_dim_h - 1);
-
- const auto a00 = *(reinterpret_cast<const float16_t *>(in.ptr()) + clamped_w * in_stride_c + clamped_h * in_stride_wc);
- const auto a01 = *(reinterpret_cast<const float16_t *>(in.ptr()) + clamped_w1 * in_stride_c + clamped_h * in_stride_wc);
- const auto a10 = *(reinterpret_cast<const float16_t *>(in.ptr()) + clamped_w * in_stride_c + clamped_h1 * in_stride_wc);
- const auto a11 = *(reinterpret_cast<const float16_t *>(in.ptr()) + clamped_w1 * in_stride_c + clamped_h1 * in_stride_wc);
-
- *reinterpret_cast<float16_t *>(out.ptr()) = static_cast<float16_t>(scale_helpers::delta_bilinear(a00, a01, a10, a11, dx_val, dy_val));
- },
- in, out);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not implemented");
- }
-}
}
namespace cpu
{
@@ -162,13 +91,14 @@ void fp16_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, co
InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset,
bool align_corners, const Window &window)
{
- if(policy == InterpolationPolicy::BILINEAR)
+ ARM_COMPUTE_UNUSED(dx, dy, border_mode, constant_border_value);
+ if(policy == InterpolationPolicy::NEAREST_NEIGHBOR)
{
- fp16_sve_scale_bilinear(src, dst, offsets, dx, dy, border_mode, constant_border_value, sampling_offset, align_corners, window);
+ fp16_sve_scale_nearest(src, dst, offsets, sampling_offset, align_corners, window);
}
- else if(policy == InterpolationPolicy::NEAREST_NEIGHBOR)
+ else
{
- fp16_sve_scale_nearest(src, dst, offsets, sampling_offset, align_corners, window);
+ ARM_COMPUTE_ERROR("Not implemented");
}
}
} // namespace cpu
diff --git a/src/cpu/kernels/scale/sve/fp32.cpp b/src/cpu/kernels/scale/sve/fp32.cpp
index 98b343870f..f3472f1efd 100644
--- a/src/cpu/kernels/scale/sve/fp32.cpp
+++ b/src/cpu/kernels/scale/sve/fp32.cpp
@@ -83,75 +83,6 @@ void fp32_sve_scale_nearest(const ITensor *src, ITensor *dst, const ITensor *off
},
out);
}
-
-void fp32_sve_scale_bilinear(const ITensor *src, ITensor *dst, const ITensor *offsets, const ITensor *dx, const ITensor *dy,
- BorderMode border_mode, PixelValue constant_border_value, float sampling_offset,
- bool align_corners, const Window &window)
-{
- // Compute the ratio between source height and destination height
- const auto hr = scale_utils::calculate_resize_ratio(src->info()->dimension(2), dst->info()->dimension(2), align_corners);
-
- Iterator out(dst, window);
- const int in_stride_c = src->info()->dimension(0) + src->info()->padding().left + src->info()->padding().right;
- const int in_dim_w = src->info()->dimension(1);
- const int in_dim_h = src->info()->dimension(2);
- const int in_stride_wc = in_stride_c * (in_dim_w + src->info()->padding().top + src->info()->padding().bottom);
-
- // Don't increment in Y and Z direction for the input tensor
- // A pointer to the start of this plane is needed as base for the precomputed offsets
- Window win_in(window);
- win_in.set(Window::DimY, Window::Dimension(0, 0, 0));
- win_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
- Iterator in(src, win_in);
-
- if(border_mode == BorderMode::CONSTANT)
- {
- const float const_border_value = static_cast<float>(constant_border_value.get<float>());
- execute_window_loop(window, [&](const Coordinates & id)
- {
- const auto offset = *reinterpret_cast<const int32_t *>(offsets->ptr_to_element(Coordinates(id.y(), id.z())));
- const auto dx_val = *reinterpret_cast<const float *>(dx->ptr_to_element(Coordinates(id.y(), id.z())));
- const auto dy_val = *reinterpret_cast<const float *>(dy->ptr_to_element(Coordinates(id.y(), id.z())));
- const int32_t in_hi = std::floor((id.z() + sampling_offset) * hr - sampling_offset);
- const float *in_ptr = reinterpret_cast<const float *>(in.ptr()) + offset * in_stride_c + in_hi * in_stride_wc;
-
- const auto a00 = (0 <= offset && offset < in_dim_w && 0 <= in_hi && in_hi < in_dim_h) ? *in_ptr : const_border_value;
- const auto a01 = (-1 <= offset && offset < in_dim_w - 1 && 0 <= in_hi && in_hi < in_dim_h) ? *(in_ptr + in_stride_c) : const_border_value;
- const auto a10 = (0 <= offset && offset < in_dim_w && -1 <= in_hi && in_hi < in_dim_h - 1) ? *(in_ptr + in_stride_wc) : const_border_value;
- const auto a11 = (-1 <= offset && offset < in_dim_w - 1 && -1 <= in_hi && in_hi < in_dim_h - 1) ? *(in_ptr + in_stride_c + in_stride_wc) : const_border_value;
-
- *reinterpret_cast<float *>(out.ptr()) = static_cast<float>(scale_helpers::delta_bilinear(a00, a01, a10, a11, dx_val, dy_val));
- },
- in, out);
- }
- else if(border_mode == BorderMode::REPLICATE)
- {
- execute_window_loop(window, [&](const Coordinates & id)
- {
- const auto offset = *reinterpret_cast<const int32_t *>(offsets->ptr_to_element(Coordinates(id.y(), id.z())));
- const auto dx_val = *reinterpret_cast<const float *>(dx->ptr_to_element(Coordinates(id.y(), id.z())));
- const auto dy_val = *reinterpret_cast<const float *>(dy->ptr_to_element(Coordinates(id.y(), id.z())));
- const int in_hi = std::floor((id.z() + sampling_offset) * hr - sampling_offset);
-
- auto clamped_w = utility::clamp<int>(offset, 0, in_dim_w - 1);
- auto clamped_w1 = utility::clamp<int>(offset + 1, 0, in_dim_w - 1);
- auto clamped_h = utility::clamp<int>(in_hi, 0, in_dim_h - 1);
- auto clamped_h1 = utility::clamp<int>(in_hi + 1, 0, in_dim_h - 1);
-
- const auto a00 = *(reinterpret_cast<const float *>(in.ptr()) + clamped_w * in_stride_c + clamped_h * in_stride_wc);
- const auto a01 = *(reinterpret_cast<const float *>(in.ptr()) + clamped_w1 * in_stride_c + clamped_h * in_stride_wc);
- const auto a10 = *(reinterpret_cast<const float *>(in.ptr()) + clamped_w * in_stride_c + clamped_h1 * in_stride_wc);
- const auto a11 = *(reinterpret_cast<const float *>(in.ptr()) + clamped_w1 * in_stride_c + clamped_h1 * in_stride_wc);
-
- *reinterpret_cast<float *>(out.ptr()) = static_cast<float>(scale_helpers::delta_bilinear(a00, a01, a10, a11, dx_val, dy_val));
- },
- in, out);
- }
- else
- {
- ARM_COMPUTE_ERROR("Not implemented");
- }
-}
}
namespace cpu
{
@@ -159,13 +90,14 @@ void fp32_sve_scale(const ITensor *src, ITensor *dst, const ITensor *offsets, co
InterpolationPolicy policy, BorderMode border_mode, PixelValue constant_border_value, float sampling_offset,
bool align_corners, const Window &window)
{
- if(policy == InterpolationPolicy::BILINEAR)
+ ARM_COMPUTE_UNUSED(dx, dy, border_mode, constant_border_value);
+ if(policy == InterpolationPolicy::NEAREST_NEIGHBOR)
{
- fp32_sve_scale_bilinear(src, dst, offsets, dx, dy, border_mode, constant_border_value, sampling_offset, align_corners, window);
+ fp32_sve_scale_nearest(src, dst, offsets, sampling_offset, align_corners, window);
}
- else if(policy == InterpolationPolicy::NEAREST_NEIGHBOR)
+ else
{
- fp32_sve_scale_nearest(src, dst, offsets, sampling_offset, align_corners, window);
+ ARM_COMPUTE_ERROR("Not implemented");
}
}
} // namespace cpu