From ded3663274db0e4359461659fb3c813792df16e3 Mon Sep 17 00:00:00 2001 From: Freddie Liardet Date: Fri, 3 Sep 2021 15:08:23 +0100 Subject: Remove padding in cpuPool2d NCHW Remove padding from all cpuPool2d NCHW kernels (FP16,FP32 & Quantized) Resolves: COMPMID-4728, COMPMID-4823 Signed-off-by: Freddie Liardet Change-Id: Ida619f67cd6606b33828f2d9dee925aeb794cc50 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6358 Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez Tello Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins --- arm_compute/core/Types.h | 2 +- src/cpu/kernels/CpuPool2dKernel.cpp | 109 ++------- src/cpu/kernels/CpuPool2dKernel.h | 2 - src/cpu/kernels/pool2d/neon/nchw/all.cpp | 369 ++++++++++++++++++------------- src/cpu/kernels/pool2d/neon/quantized.h | 201 ++++++++++------- src/cpu/operators/CpuPool2d.cpp | 28 --- src/cpu/operators/CpuPool2d.h | 1 - tests/validation/NEON/PoolingLayer.cpp | 4 +- 8 files changed, 359 insertions(+), 357 deletions(-) diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index 37ba9f93bf..b2b09825c1 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -675,8 +675,8 @@ public: * @param[in] stride_x Stride, in elements, across x. * @param[in] stride_y Stride, in elements, across y. * @param[in] pad_left Padding across x on the left, in elements. - * @param[in] pad_top Padding across y on the top, in elements. * @param[in] pad_right Padding across x on the right, in elements. + * @param[in] pad_top Padding across y on the top, in elements. * @param[in] pad_bottom Padding across y on the bottom, in elements. * @param[in] round Dimensions rounding. */ diff --git a/src/cpu/kernels/CpuPool2dKernel.cpp b/src/cpu/kernels/CpuPool2dKernel.cpp index d7fb75ee60..90ddefa599 100644 --- a/src/cpu/kernels/CpuPool2dKernel.cpp +++ b/src/cpu/kernels/CpuPool2dKernel.cpp @@ -239,7 +239,6 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const std::pair validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, ITensorInfo *indices, const PoolingLayerInfo &pool_info, unsigned int &num_elems_processed_per_iteration, - BorderSize &border_size, int pool_size_x, int pool_size_y) { // dst auto inizialitation if not yet initialized @@ -251,29 +250,22 @@ std::pair validate_and_configure_window(ITensorInfo *src, ITenso pool_info))) .set_data_type(DataType::U32) /* we store the offset to the element */); } - const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout; - unsigned int num_elems_read_per_iteration = 0; - unsigned int num_elems_horizontal_window = 0; - int pool_stride_x = 0; - int pool_stride_y = 0; - const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const int src_width = src->dimension(idx_width); - const int src_height = src->dimension(idx_height); - const PadStrideInfo pad_stride_info = pool_info.pad_stride_info; + const auto data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout; + ARM_COMPUTE_ERROR_ON(src->data_layout() != DataLayout::NCHW); + + int pool_stride_x = 0; + int pool_stride_y = 0; + const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const PadStrideInfo pad_stride_info = pool_info.pad_stride_info; + std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride(); - const int pool_pad_right = pad_stride_info.pad_right(); - const int pool_pad_top = pad_stride_info.pad_top(); - const int pool_pad_left = pad_stride_info.pad_left(); - const int pool_pad_bottom = pad_stride_info.pad_bottom(); - const bool is_square = pool_size_x == pool_size_y; - const unsigned int pooled_w = dst->dimension(idx_width); - const unsigned int pooled_h = dst->dimension(idx_height); + const bool is_square = pool_size_x == pool_size_y; + const unsigned int pooled_w = dst->dimension(idx_width); + const unsigned int pooled_h = dst->dimension(idx_height); //If it's not squared and optimized will be executed the MxN - num_elems_read_per_iteration = 1; num_elems_processed_per_iteration = 1; - num_elems_horizontal_window = 1; if(is_square) { @@ -284,14 +276,10 @@ std::pair validate_and_configure_window(ITensorInfo *src, ITenso switch(pool_size_x) { case 2: - num_elems_read_per_iteration = 16; num_elems_processed_per_iteration = (pool_stride_x == 2) ? 8 : 15; - num_elems_horizontal_window = (pool_stride_x == 2) ? 8 : 16; break; case 3: - num_elems_read_per_iteration = 16; num_elems_processed_per_iteration = (pool_stride_x == 2) ? 7 : 14; - num_elems_horizontal_window = (pool_stride_x == 2) ? 8 : 16; break; default: break; @@ -299,36 +287,11 @@ std::pair validate_and_configure_window(ITensorInfo *src, ITenso break; #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: - switch(pool_size_x) - { - case 2: - case 3: - num_elems_read_per_iteration = 4; - num_elems_processed_per_iteration = 1; - num_elems_horizontal_window = 1; - break; - default: - break; - } + num_elems_processed_per_iteration = 1; break; #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ case DataType::F32: - switch(pool_size_x) - { - case 2: - num_elems_read_per_iteration = 2; - break; - case 3: - num_elems_read_per_iteration = 4; // We use vload4 for pooling3 - break; - case 7: - num_elems_read_per_iteration = 8; // We use vload8 for pooling7 - break; - default: - break; - } num_elems_processed_per_iteration = 1; - num_elems_horizontal_window = 1; break; default: ARM_COMPUTE_ERROR("Element size not supported"); @@ -338,47 +301,18 @@ std::pair validate_and_configure_window(ITensorInfo *src, ITenso bool window_changed = false; Window win{}; - if(data_layout == DataLayout::NCHW) - { - // Number of iterations in X dimension - const int num_iterations_x = (pooled_w + num_elems_processed_per_iteration - 1) / num_elems_processed_per_iteration; - // Upper limit for the number of right/bottom border elements that are accessed - const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - src_width; - const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - src_height; - border_size = BorderSize(pool_pad_top, pool_pad_right, pool_pad_bottom, pool_pad_left); - border_size.right = std::max(upper_bound_w, pool_pad_right); - border_size.bottom = std::max(upper_bound_h, pool_pad_bottom); - TensorShape dst_shape{ src->tensor_shape() }; - dst_shape.set(0, pooled_w); - dst_shape.set(1, pooled_h); - TensorInfo dst_info(src->clone()->set_tensor_shape(dst_shape)); - win = calculate_max_window(dst_info, Steps(num_elems_processed_per_iteration)); - AccessWindowStatic src_access(src, -pool_pad_left, -pool_pad_top, ceil_to_multiple(src_width + border_size.right, pool_size_x), src_height + border_size.bottom); - AccessWindowHorizontal dst_access(dst, 0, num_elems_horizontal_window); - if(indices) - { - AccessWindowHorizontal indices_access(indices, 0, num_elems_horizontal_window); - window_changed = update_window_and_padding(win, src_access, dst_access, indices_access); - } - else - { - window_changed = update_window_and_padding(win, src_access, dst_access); - } - dst_access.set_valid_region(win, ValidRegion(Coordinates(), dst->tensor_shape())); - - border_size = src->padding(); - } + // Upper limit for the number of right/bottom border elements that are accessed + TensorShape dst_shape{ src->tensor_shape() }; + dst_shape.set(0, pooled_w); + dst_shape.set(1, pooled_h); + TensorInfo dst_info(src->clone()->set_tensor_shape(dst_shape)); + win = calculate_max_window(dst_info, Steps(num_elems_processed_per_iteration)); Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; return std::make_pair(err, win); } } // namespace -BorderSize CpuPool2dKernel::border_size() const -{ - return _border_size; -} - void CpuPool2dKernel::configure(ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices) { ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); @@ -419,7 +353,7 @@ void CpuPool2dKernel::configure(ITensorInfo *src, ITensorInfo *dst, const Poolin { // Configure kernel window auto win_config = validate_and_configure_window(src, dst, indices, pool_info, _num_elems_processed_per_iteration, - _border_size, pool_size.x(), pool_size.y()); + pool_size.x(), pool_size.y()); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICpuKernel::configure(win_config.second); } @@ -430,7 +364,6 @@ Status CpuPool2dKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src); unsigned int num_elems_processed_per_iteration = 0; - BorderSize border_size(0); const bool is_global_pooling = pool_info.is_global_pooling; @@ -444,7 +377,7 @@ Status CpuPool2dKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, pool_info, indices, Size2D(pool_size_x, pool_size_y))); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(), - (indices) ? indices->clone().get() : nullptr, pool_info, num_elems_processed_per_iteration, border_size, + (indices) ? indices->clone().get() : nullptr, pool_info, num_elems_processed_per_iteration, pool_size_x, pool_size_y) .first); diff --git a/src/cpu/kernels/CpuPool2dKernel.h b/src/cpu/kernels/CpuPool2dKernel.h index 70fe52d29c..aedeb7fbe9 100644 --- a/src/cpu/kernels/CpuPool2dKernel.h +++ b/src/cpu/kernels/CpuPool2dKernel.h @@ -60,7 +60,6 @@ public: // Inherited methods overridden: void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; - BorderSize border_size() const override; const char *name() const override; private: @@ -70,7 +69,6 @@ private: PoolingLayerInfo _pool_info{}; DataLayout _data_layout{ DataLayout::UNKNOWN }; unsigned int _num_elems_processed_per_iteration{ 0 }; - BorderSize _border_size{ 0 }; Size2D _pool_size{}; int _pool_stride_x{}; PoolingKernelPtr _run_method{ nullptr }; diff --git a/src/cpu/kernels/pool2d/neon/nchw/all.cpp b/src/cpu/kernels/pool2d/neon/nchw/all.cpp index 3ca7701087..109fc1b283 100644 --- a/src/cpu/kernels/pool2d/neon/nchw/all.cpp +++ b/src/cpu/kernels/pool2d/neon/nchw/all.cpp @@ -28,18 +28,55 @@ #include "src/core/NEON/wrapper/intrinsics/intrinsics.h" #include "src/core/helpers/WindowHelpers.h" #include "src/cpu/kernels/pool2d/neon/list.h" +#include #ifdef ENABLE_NCHW_KERNELS namespace arm_compute { namespace cpu { +#define READ_2_RIGHT_BOUNDARY_AWARE(height, width, pad_left, pad_top, x, y, ptr, fval) \ + (x == width + pad_left - 1) ? vset_lane_f32(*(ptr), vdup_n_f32(fval), 0) : vld1_f32(ptr) +#define READ_2_LEFT_BOUNDARY_AWARE(height, width, pad_left, pad_top, x, y, ptr, fval) \ + (x == pad_left - 1) ? vset_lane_f32(*(1 + ptr), vdup_n_f32(fval), 1) : READ_2_RIGHT_BOUNDARY_AWARE(height, width, pad_left, pad_top, x, y, ptr, fval) +#define READ_2_BOUNDARY_AWARE(height, width, pad_left, pad_top, x, y, ptr, fval) \ + ((y < pad_top) || (x < pad_left - 1) || (y >= height + pad_top) || (x > width + pad_left - 1)) ? vdup_n_f32(fval) : READ_2_LEFT_BOUNDARY_AWARE(height, width, pad_left, pad_top, x, y, ptr, fval) + +#define READ_4_BOUNDARY_AWARE(height, width, pad_left, pad_top, x, y, ptr, fval) \ + vcombine_f32(READ_2_BOUNDARY_AWARE(height, width, pad_left, pad_top, x, y, ptr, fval), \ + READ_2_BOUNDARY_AWARE(height, width, pad_left, pad_top, (x + 2), y, (ptr + 2), fval)) + +float32x4x2_t read_8_boundary_aware(int height, int width, int pad_left, int pad_top, int x, int y, const float *ptr, float fval) +{ + float32x4x2_t vec; + vec.val[0] = READ_4_BOUNDARY_AWARE(height, width, pad_left, pad_top, x, y, ptr, fval); + vec.val[1] = READ_4_BOUNDARY_AWARE(height, width, pad_left, pad_top, (x + 4), y, (ptr + 4), fval); + return vec; +} + #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + +float16x4_t read_4_boundary_aware_fp16(int srcw, int srch, int pad_l, int pad_t, int x, int y, const float16_t *ptr, float16_t fval) +{ + float16_t vec[4]; + const bool row_in_bounds((y >= pad_t) && (y < (srch + pad_t))); + for(int i = 0; i < 4; i++) + { + if(row_in_bounds && (x + i >= pad_l) && (x + i < (srcw + pad_l))) + { + vec[i] = *(ptr + i); + } + else + { + vec[i] = fval; + } + } + return wrapper::vload(vec); +} + void pooling3_fp16_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, PoolingLayerInfo &pool_info, const Window &window_src, const Window &window) { ARM_COMPUTE_UNUSED(dst1); - ARM_COMPUTE_UNUSED(pool_info.pool_type); - ARM_COMPUTE_UNUSED(pool_info.exclude_padding); Iterator in(src, window_src); Iterator out(dst0, window); @@ -52,19 +89,29 @@ void pooling3_fp16_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P int pool_stride_x = 0; int pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride(); - const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); - const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); - + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const int upper_bound_w = src_w + (pool_info.exclude_padding ? 0 : pool_pad_right); + const int upper_bound_h = src_h + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + constexpr float16_t fp16_min = -100.0f; + const float16_t fill_value = (pool_info.pool_type == PoolingType::MAX) ? fp16_min : 0.f; const unsigned char *const src_top_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top))); const unsigned char *const src_middle_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 1)); const unsigned char *const src_bottom_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 2)); execute_window_loop(window, [&](const Coordinates & id) { - float16x4_t top_data = vld1_f16(reinterpret_cast(src_top_ptr + in.offset())); - float16x4_t middle_data = vld1_f16(reinterpret_cast(src_middle_ptr + in.offset())); - float16x4_t bottom_data = vld1_f16(reinterpret_cast(src_bottom_ptr + in.offset())); - float16x4_t res = {}; + const auto x_val = id.x() * pool_stride_x; + const auto y_val_0 = id.y() * pool_stride_y; + const auto y_val_1 = (id.y() * pool_stride_y) + 1; + const auto y_val_2 = (id.y() * pool_stride_y) + 2; + float16x4_t top_data = read_4_boundary_aware_fp16(src_w, src_h, pool_pad_left, pool_pad_top, + x_val, y_val_0, reinterpret_cast(src_top_ptr + in.offset()), fill_value); + float16x4_t middle_data = read_4_boundary_aware_fp16(src_w, src_h, pool_pad_left, pool_pad_top, + x_val, y_val_1, reinterpret_cast(src_middle_ptr + in.offset()), fill_value); + float16x4_t bottom_data = read_4_boundary_aware_fp16(src_w, src_h, pool_pad_left, pool_pad_top, + x_val, y_val_2, reinterpret_cast(src_bottom_ptr + in.offset()), fill_value); + float16x4_t res = {}; // Get power of 2 in case of l2 pooling if(pool_info.pool_type == PoolingType::L2) @@ -88,7 +135,7 @@ void pooling3_fp16_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P else { const float16x4_t max_data = vmax_f16(vmax_f16(top_data, bottom_data), middle_data); - res = vpmax_f16(vset_lane_f16(-std::numeric_limits::max(), max_data, 3), max_data); + res = vpmax_f16(vset_lane_f16(fp16_min, max_data, 3), max_data); res = vpmax_f16(res, res); } @@ -119,6 +166,25 @@ f16_to_f32(float32x2_t in) return in; } +template +auto read_2_boundary_aware(int srcw, int srch, int pad_l, int pad_t, int x, int y, const T *ptr, T fval) +{ + T vec[2]; + const bool row_in_bounds((y >= pad_t) && (y < (srch + pad_t))); + for(int i = 0; i < 2; i++) + { + if(row_in_bounds && (x + i >= pad_l) && (x + i < (srcw + pad_l))) + { + vec[i] = *(ptr + i); + } + else + { + vec[i] = fval; + } + } + return wrapper::vload(vec); +} + template void pooling2_nchw_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *dst1, PoolingLayerInfo &pool_info, const Window &window_src, const Window &window) { @@ -130,16 +196,25 @@ void pooling2_nchw_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *d int pool_stride_x = 0; int pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride(); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); const uint8_t *const src_top_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top))); const uint8_t *const src_bottom_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 1)); const int pad_left = src->info()->padding().left; const int pad_right = src->info()->padding().right; const int in_stride_y = static_cast(src->info()->strides_in_bytes().y()); + constexpr T float_min = -100.0f; + const T fill_value = (pool_info.pool_type == PoolingType::MAX) ? float_min : 0.f; execute_window_loop(window, [&](const Coordinates & id) { - auto top_data = wrapper::vload(reinterpret_cast(src_top_ptr + in.offset())); - auto bottom_data = wrapper::vload(reinterpret_cast(src_bottom_ptr + in.offset())); + const auto x_val = id.x() * pool_stride_x; + const auto y_val_0 = id.y() * pool_stride_y; + const auto y_val_1 = (id.y() * pool_stride_y) + 1; + auto top_data = read_2_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_top, + x_val, y_val_0, reinterpret_cast(src_top_ptr + in.offset()), fill_value); + auto bottom_data = read_2_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_top, + x_val, y_val_1, reinterpret_cast(src_bottom_ptr + in.offset()), fill_value); float32x2_t top_data_f32 = f16_to_f32(top_data); float32x2_t bottom_data_f32 = f16_to_f32(bottom_data); @@ -180,17 +255,29 @@ void pooling2_fp16_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P const int pool_pad_bottom = pool_info.pad_stride_info.pad_bottom(); int pool_stride_x, pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride(); - const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); - const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const int upper_bound_w = src_w + (pool_info.exclude_padding ? 0 : pool_pad_right); + const int upper_bound_h = src_h + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + constexpr float16_t fp16_min = -100.0f; + const float16_t fill_value = (pool_info.pool_type == PoolingType::MAX) ? fp16_min : 0.0f; const unsigned char *const src_top_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top))); const unsigned char *const src_bottom_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 1)); execute_window_loop(window, [&](const Coordinates & id) { - float16x4_t top_data = vld1_f16(reinterpret_cast(src_top_ptr + in.offset())); - float16x4_t bottom_data = vld1_f16(reinterpret_cast(src_bottom_ptr + in.offset())); - float16x4_t res = {}; + const auto in_top_ptr = reinterpret_cast(src_top_ptr + in.offset()); + const auto in_bottom_ptr = reinterpret_cast(src_bottom_ptr + in.offset()); + + const auto x_val = id.x() * pool_stride_x; + const auto y_val_0 = id.y() * pool_stride_y; + const auto y_val_1 = (id.y() * pool_stride_y) + 1; + float16x4_t top_data = read_4_boundary_aware_fp16(src_w, src_h, pool_pad_left, pool_pad_top, + x_val, y_val_0, in_top_ptr, fill_value); + float16x4_t bottom_data = read_4_boundary_aware_fp16(src_w, src_h, pool_pad_left, pool_pad_top, + x_val, y_val_1, in_bottom_ptr, fill_value); + float16x4_t res = {}; // Get power of 2 in case of l2 pooling if(pool_info.pool_type == PoolingType::L2) @@ -242,48 +329,35 @@ void poolingMxN_fp16_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, int pool_stride_x = 0; int pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride(); - const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); - const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const int upper_bound_w = src_w + (pool_info.exclude_padding ? 0 : pool_pad_right); + const int upper_bound_h = src_h + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + constexpr float16_t fp16_min = -100.0f; + const float16_t fill_value = (pool_info.pool_type == PoolingType::MAX) ? fp16_min : 0.0f; execute_window_loop(window, [&](const Coordinates & id) { - float16_t res = 0.0f; - float16x8_t vres = vdupq_n_f16(0.0f); + float16_t res = 0.0f; if(pool_info.pool_type != PoolingType::MAX) { // Calculate scale - const float scale = calculate_avg_scale(pool_info.exclude_padding, DataLayout::NCHW, id, pool_size_x, pool_size_y, upper_bound_w, upper_bound_h, pool_pad_left, pool_pad_top, pool_stride_x, - pool_stride_y); + const float16_t scale = calculate_avg_scale(pool_info.exclude_padding, DataLayout::NCHW, id, pool_size_x, pool_size_y, upper_bound_w, upper_bound_h, pool_pad_left, pool_pad_top, pool_stride_x, + pool_stride_y); // Perform pooling - for(int y = 0; y < pool_size_y; ++y) { - int x = 0; - for(; x <= (pool_size_x - 8); x += 8) + for(int x = 0; x < pool_size_x; ++x) { - const float16x8_t data = vld1q_f16(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); - - // Get power of 2 in case of l2 pooling and accumulate - if(pool_info.pool_type == PoolingType::L2) - { - vres = vaddq_f16(vres, vmulq_f16(data, data)); - } - else - { - vres = vaddq_f16(vres, data); - } - } + const auto ptr = reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + + (y - pool_pad_top) * static_cast(src->info()->strides_in_bytes().y())); - // Leftover for loop - for(; x < pool_size_x; ++x) - { - float16_t data = *(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) - + (y - pool_pad_top) * static_cast(src->info()->strides_in_bytes().y()))); + const int idx = x + id.x() * pool_stride_x - pool_pad_left; + const int idy = y + id.y() * pool_stride_y - pool_pad_top; + float16_t data = (idx < 0 || idy < 0 || idx >= src_w || idy >= src_h) ? fill_value : *ptr; - // Get power of 2 in case of l2 pooling if(pool_info.pool_type == PoolingType::L2) { data *= data; @@ -293,45 +367,26 @@ void poolingMxN_fp16_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, } } - // Reduction - float16x4_t tmp = vpadd_f16(vget_high_f16(vres), vget_low_f16(vres)); - res += vget_lane_f16(tmp, 0); - res += vget_lane_f16(tmp, 1); - res += vget_lane_f16(tmp, 2); - res += vget_lane_f16(tmp, 3); - // Divide by scale res *= scale; } - else + else // if max pooling { - float16x8_t vres = vdupq_n_f16(std::numeric_limits::lowest()); - res = std::numeric_limits::lowest(); + res = fp16_min; for(int y = 0; y < pool_size_y; ++y) { - int x = 0; - for(; x <= (pool_size_x - 8); x += 8) + for(int x = 0; x < pool_size_x; ++x) { - const float16x8_t data = vld1q_f16(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); - vres = vmaxq_f16(vres, data); - } + const auto ptr = reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + + (y - pool_pad_top) * static_cast(src->info()->strides_in_bytes().y())); - // Leftover for loop - for(; x < pool_size_x; ++x) - { - const float16_t data = *(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) - + (y - pool_pad_top) * static_cast(src->info()->strides_in_bytes().y()))); - res = std::max(res, data); + const int idx = x + id.x() * pool_stride_x - pool_pad_left; + const int idy = y + id.y() * pool_stride_y - pool_pad_top; + float16_t data = (idx < 0 || idy < 0 || idx >= src_w || idy >= src_h) ? fill_value : *ptr; + res = std::max(res, data); } } - - float16x4_t tmp = vpmax_f16(vget_high_f16(vres), vget_low_f16(vres)); - res = std::max(res, vget_lane_f16(tmp, 0)); - res = std::max(res, vget_lane_f16(tmp, 1)); - res = std::max(res, vget_lane_f16(tmp, 2)); - res = std::max(res, vget_lane_f16(tmp, 3)); } // Calculate square-root in case of l2 pooling @@ -362,8 +417,11 @@ void poolingMxN_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, int pool_stride_x = 0; int pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride(); - const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); - const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const int upper_bound_w = src_w + (pool_info.exclude_padding ? 0 : pool_pad_right); + const int upper_bound_h = src_h + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const float fill_value = (pool_info.pool_type == PoolingType::MAX) ? -std::numeric_limits::max() : 0.0f; execute_window_loop(window, [&](const Coordinates & id) { @@ -372,38 +430,21 @@ void poolingMxN_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, if(pool_info.pool_type != PoolingType::MAX) { // Calculate scale - const float scale = calculate_avg_scale(pool_info.exclude_padding, DataLayout::NCHW, id, pool_size_x, pool_size_y, upper_bound_w, upper_bound_h, pool_pad_left, pool_pad_top, pool_stride_x, - pool_stride_y); + const float scale = calculate_avg_scale(pool_info.exclude_padding, DataLayout::NCHW, id, pool_size_x, pool_size_y, upper_bound_w, upper_bound_h, + pool_pad_left, pool_pad_top, pool_stride_x, pool_stride_y); // Perform pooling - float32x4_t vres = vdupq_n_f32(0.0f); - for(int y = 0; y < pool_size_y; ++y) { - int x = 0; - for(; x <= (pool_size_x - 4); x += 4) + for(int x = 0; x < pool_size_x; ++x) { - const float32x4_t data = vld1q_f32(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); + const auto ptr = reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + + (y - pool_pad_top) * static_cast(src->info()->strides_in_bytes().y())); - // Get power of 2 in case of l2 pooling and accumulate - if(pool_info.pool_type == PoolingType::L2) - { - vres = vmlaq_f32(vres, data, data); - } - else - { - vres = vaddq_f32(vres, data); - } - } + const int idx = x + id.x() * pool_stride_x - pool_pad_left; + const int idy = y + id.y() * pool_stride_y - pool_pad_top; + float data = (idx < 0 || idy < 0 || idx >= src_w || idy >= src_h) ? fill_value : *ptr; - // Leftover for loop - for(; x < pool_size_x; ++x) - { - float data = *(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); - - // Get power of 2 in case of l2 pooling if(pool_info.pool_type == PoolingType::L2) { data *= data; @@ -413,51 +454,26 @@ void poolingMxN_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, } } -#if defined(__aarch64__) - // Reduction operation available on 64 bit architectures only - res += vaddvq_f32(vres); -#else // __aarch64__ - // Reduction - float32x2_t tmp = vpadd_f32(vget_high_f32(vres), vget_low_f32(vres)); - tmp = vpadd_f32(tmp, tmp); - - res += vget_lane_f32(tmp, 0); -#endif // __aarch64__ // Divide by scale res *= scale; } - else + else // if max pooling { - float32x4_t vres = vdupq_n_f32(std::numeric_limits::lowest()); - res = std::numeric_limits::lowest(); + res = std::numeric_limits::lowest(); for(int y = 0; y < pool_size_y; ++y) { - int x = 0; - for(; x <= (pool_size_x - 4); x += 4) + for(int x = 0; x < pool_size_x; ++x) { - const float32x4_t data = vld1q_f32(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); - vres = vmaxq_f32(vres, data); - } + const auto ptr = reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + + (y - pool_pad_top) * static_cast(src->info()->strides_in_bytes().y())); - // Leftover for loop - for(; x < pool_size_x; ++x) - { - const float data = *(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); - res = std::max(res, data); + const int idx = x + id.x() * pool_stride_x - pool_pad_left; + const int idy = y + id.y() * pool_stride_y - pool_pad_top; + float data = (idx < 0 || idy < 0 || idx >= src_w || idy >= src_h) ? fill_value : *ptr; + res = std::max(res, data); } } -#if defined(__aarch64__) - // Reduction operation available on 64 bit architectures only - res = std::max(vmaxvq_f32(vres), res); -#else // __aarch64__ - float32x2_t tmp = vpmax_f32(vget_high_f32(vres), vget_low_f32(vres)); - tmp = vpmax_f32(tmp, tmp); - - res = std::max(res, vget_lane_f32(tmp, 0)); -#endif // __aarch64__ } // Calculate square-root in case of l2 pooling @@ -490,20 +506,28 @@ void pooling2_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P int pool_stride_x = 0; int pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride(); - const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); - const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const int upper_bound_w = src_w + (pool_info.exclude_padding ? 0 : pool_pad_right); + const int upper_bound_h = src_h + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const float fill_value = (pool_info.pool_type == PoolingType::MAX) ? -std::numeric_limits::max() : 0.0f; const uint8_t *const src_top_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top))); const uint8_t *const src_bottom_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 1)); execute_window_loop(window, [&](const Coordinates & id) { - const auto in_top_ptr = reinterpret_cast(src_top_ptr + in.offset()); - const auto in_bottom_ptr = reinterpret_cast(src_bottom_ptr + in.offset()); - float32x2_t top_data = vld1_f32(in_top_ptr); - float32x2_t bottom_data = vld1_f32(in_bottom_ptr); - float32x2_t res = {}; - float final_res = 0; + const auto in_top_ptr = reinterpret_cast(src_top_ptr + in.offset()); + const auto in_bottom_ptr = reinterpret_cast(src_bottom_ptr + in.offset()); + + const auto x_val = id.x() * pool_stride_x; + const auto y_val_0 = id.y() * pool_stride_y; + const auto y_val_1 = (id.y() * pool_stride_y) + 1; + auto top_data = READ_2_BOUNDARY_AWARE(src_h, src_w, pool_pad_left, pool_pad_top, x_val, y_val_0, in_top_ptr, fill_value); + auto bottom_data = READ_2_BOUNDARY_AWARE(src_h, src_w, pool_pad_left, pool_pad_top, x_val, y_val_1, in_bottom_ptr, fill_value); + float32x2_t res = {}; + float final_res = 0; + // Get power of 2 in case of l2 pooling if(pool_info.pool_type == PoolingType::L2) { @@ -556,8 +580,11 @@ void pooling3_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P int pool_stride_x = 0; int pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride(); - const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); - const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const int upper_bound_w = src_w + (pool_info.exclude_padding ? 0 : pool_pad_right); + const int upper_bound_h = src_h + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const float fill_value = (pool_info.pool_type == PoolingType::MAX) ? -std::numeric_limits::max() : 0.0f; const uint8_t *const src_top_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top))); const uint8_t *const src_middle_ptr = src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 1)); @@ -565,11 +592,20 @@ void pooling3_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P execute_window_loop(window, [&](const Coordinates & id) { - float32x4_t top_data = vld1q_f32(reinterpret_cast(src_top_ptr + in.offset())); - float32x4_t middle_data = vld1q_f32(reinterpret_cast(src_middle_ptr + in.offset())); - float32x4_t bottom_data = vld1q_f32(reinterpret_cast(src_bottom_ptr + in.offset())); - float32x2_t res = {}; - float final_res = 0; + const auto in_top_ptr = reinterpret_cast(src_top_ptr + in.offset()); + const auto in_middle_ptr = reinterpret_cast(src_middle_ptr + in.offset()); + const auto in_bottom_ptr = reinterpret_cast(src_bottom_ptr + in.offset()); + + const auto x_val = id.x() * pool_stride_x; + const auto y_val_0 = id.y() * pool_stride_y; + const auto y_val_1 = (id.y() * pool_stride_y) + 1; + const auto y_val_2 = (id.y() * pool_stride_y) + 2; + auto top_data = READ_4_BOUNDARY_AWARE(src_h, src_w, pool_pad_left, pool_pad_top, x_val, y_val_0, in_top_ptr, fill_value); + auto middle_data = READ_4_BOUNDARY_AWARE(src_h, src_w, pool_pad_left, pool_pad_top, x_val, y_val_1, in_middle_ptr, fill_value); + auto bottom_data = READ_4_BOUNDARY_AWARE(src_h, src_w, pool_pad_left, pool_pad_top, x_val, y_val_2, in_bottom_ptr, fill_value); + + float32x2_t res = {}; + float final_res = 0; // Get power of 2 in case of l2 pooling if(pool_info.pool_type == PoolingType::L2) @@ -625,8 +661,11 @@ void pooling7_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P int pool_stride_x = 0; int pool_stride_y = 0; std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride(); - const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); - const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const int upper_bound_w = src_w + (pool_info.exclude_padding ? 0 : pool_pad_right); + const int upper_bound_h = src_h + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const float fill_value = (pool_info.pool_type == PoolingType::MAX) ? -std::numeric_limits::max() : 0.0f; std::array src_ptrs{ {} }; for(int i = 0; i < pool_size; ++i) @@ -636,8 +675,15 @@ void pooling7_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P execute_window_loop(window, [&](const Coordinates & id) { + auto in_ptr = reinterpret_cast(src_ptrs[0] + in.offset()); + + auto x_val = id.x() * pool_stride_x; + auto y_val = id.y() * pool_stride_y; + float32x4x2_t data = read_8_boundary_aware(src_h, src_w, pool_pad_left, pool_pad_top, x_val, y_val, in_ptr, fill_value); + float32x2_t res = {}; float final_res = 0.f; + if(pool_info.pool_type != PoolingType::MAX) { // Calculate scale @@ -645,8 +691,6 @@ void pooling7_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P pool_stride_y); const float32x2_t scale_v = vdup_n_f32(scale); - // Perform pooling - float32x4x2_t data = vld2q_f32(reinterpret_cast(src_ptrs[0] + in.offset())); // Get power of 2 in case of l2 pooling if(pool_info.pool_type == PoolingType::L2) { @@ -656,7 +700,11 @@ void pooling7_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P float32x4_t sum_data = vaddq_f32(data.val[0], vsetq_lane_f32(0.f, data.val[1], 3)); for(int i = 1; i < pool_size; ++i) { - data = vld2q_f32(reinterpret_cast(src_ptrs[i] + in.offset())); + in_ptr = reinterpret_cast(src_ptrs[i] + in.offset()); + + x_val = id.x() * pool_stride_x; + y_val = (id.y() * pool_stride_y) + i; + data = read_8_boundary_aware(src_h, src_w, pool_pad_left, pool_pad_top, x_val, y_val, in_ptr, fill_value); // Get power of 2 in case of l2 pooling if(pool_info.pool_type == PoolingType::L2) { @@ -671,14 +719,17 @@ void pooling7_fp32_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, P } else { - float32x4x2_t max_data = vld2q_f32(reinterpret_cast(src_ptrs[0] + in.offset())); for(int i = 1; i < pool_size; ++i) { - const float32x4x2_t data = vld2q_f32(reinterpret_cast(src_ptrs[i] + in.offset())); - max_data = vmax2q_f32(max_data, data); + in_ptr = reinterpret_cast(src_ptrs[i] + in.offset()); + + x_val = id.x() * pool_stride_x; + y_val = (id.y() * pool_stride_y) + i; + float32x4x2_t temp = read_8_boundary_aware(src_h, src_w, pool_pad_left, pool_pad_top, x_val, y_val, in_ptr, fill_value); + data = vmax2q_f32(data, temp); } - res = vpmax_f32(vget_high_f32(vsetq_lane_f32(-std::numeric_limits::max(), max_data.val[1], 3)), vget_low_f32(max_data.val[1])); - res = vpmax_f32(res, vpmax_f32(vget_high_f32(max_data.val[0]), vget_low_f32(max_data.val[0]))); + res = vpmax_f32(vget_high_f32(vsetq_lane_f32(-std::numeric_limits::max(), data.val[1], 3)), vget_low_f32(data.val[1])); + res = vpmax_f32(res, vpmax_f32(vget_high_f32(data.val[0]), vget_low_f32(data.val[0]))); res = vpmax_f32(res, res); } final_res = vget_lane_f32(res, 0); diff --git a/src/cpu/kernels/pool2d/neon/quantized.h b/src/cpu/kernels/pool2d/neon/quantized.h index a16960a205..386e043984 100644 --- a/src/cpu/kernels/pool2d/neon/quantized.h +++ b/src/cpu/kernels/pool2d/neon/quantized.h @@ -466,6 +466,63 @@ inline void scale_vector_q16x8(bool exclude_padding, TVec &v, const Coordinates v = wrapper::vsetlane(elems[7], v, 7); } +template +auto load16_boundary_aware(int srcw, int srch, int pad_l, int pad_r, int pad_t, int pad_b, int x, int y, const T *ptr, T fval) +{ + ARM_COMPUTE_UNUSED(pad_b, pad_r); + T vec[16]; + //handle reading a row out of the tensor + const bool row_in_bounds((y >= pad_t) && (y < (srch + pad_t))); + for(int i = 0; i < 16; i++) + { + if(row_in_bounds && (x + i >= pad_l) && (x + i < (srcw + pad_l))) + { + vec[i] = *(ptr + i); + } + else + { + vec[i] = fval; + } + } + return wrapper::vloadq(vec); +} + +template +inline void write16_boundary_aware(int x, int dst_w, const V &lower, const V &upper, T *ptr) +{ + if(deinterleave) + { + for(int i = 0; i < 8 && (i * 2 + x) < dst_w; ++i) + { + *(ptr + i * 2) = lower[i]; + } + for(int i = 0; i < 8 && (i * 2 + x + 1) < dst_w; ++i) + { + *(ptr + 1 + i * 2) = upper[i]; + } + } + else + { + for(int i = 0; i < 8 && (i + x) < dst_w; ++i) + { + *(ptr + i) = lower[i]; + } + for(int i = 0; i < 8 && (i + x + 8) < dst_w; ++i) + { + *(ptr + i + 8) = upper[i]; + } + } +} + +template +inline void write8_boundary_aware(int x, int dst_w, const V &v, T *ptr) +{ + for(int i = 0; i < 8 && (i + x) < dst_w; ++i) + { + *(ptr + i) = v[i]; + } +} + template void pooling2_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *dst1, PoolingLayerInfo &pool_info, const Window &window_src, const Window &window) { @@ -474,9 +531,8 @@ void pooling2_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds Iterator out(dst0, window); /** SIMD vector types */ - using q8x8_t = typename wrapper::traits::neon_vector::type; - using q8x16_t = typename wrapper::traits::neon_vector::type; - using q8x8x2_t = typename std::conditional::value, uint8x8x2_t, int8x8x2_t>::type; + using q8x8_t = typename wrapper::traits::neon_vector::type; + using q8x16_t = typename wrapper::traits::neon_vector::type; using q16_t = typename wrapper::traits::promote_t; using q16x4_t = typename wrapper::traits::neon_vector::type; using q16x8_t = typename wrapper::traits::neon_vector::type; @@ -490,14 +546,11 @@ void pooling2_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds const int pool_pad_left = pool_info.pad_stride_info.pad_left(); const int pool_pad_bottom = pool_info.pad_stride_info.pad_bottom(); std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride(); - const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); - const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); - - const T *const src_top_ptr = reinterpret_cast(src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top)))); - const T *const src_bottom_ptr = reinterpret_cast(src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 1))); - - const int scale_step_x = (pool_stride_x == 1) ? 2 : 1; - + const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); + const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); + const T *const src_top_ptr = reinterpret_cast(src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top)))); + const T *const src_bottom_ptr = reinterpret_cast(src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 1))); + const int scale_step_x = (pool_stride_x == 1) ? 2 : 1; const UniformQuantizationInfo src_qinfo = src->info()->quantization_info().uniform(); const UniformQuantizationInfo dst_qinfo = dst0->info()->quantization_info().uniform(); const bool have_different_qinfo = src_qinfo != dst_qinfo; @@ -505,13 +558,25 @@ void pooling2_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds const float requant_scale = dst_qinfo.scale / src_qinfo.scale; const int32_t requant_offset = dst_qinfo.offset - static_cast(static_cast(src_qinfo.offset) / requant_scale); const UniformQuantizationInfo requant_qinfo = UniformQuantizationInfo(requant_scale, requant_offset); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const int dst_w = dst0->info()->dimension(0); + + const T fill_value = (pool_info.pool_type == PoolingType::MAX) ? std::numeric_limits::min() : T(0); execute_window_loop(window, [&](const Coordinates & id) { - const auto top_data = wrapper::vloadq(src_top_ptr + in.offset()); - const auto bottom_data = wrapper::vloadq(src_bottom_ptr + in.offset()); - q8x8_t lower_res = {}; - q8x8_t upper_res = {}; + const auto x_val = id.x() * pool_stride_x; + const auto y_val_0 = id.y() * pool_stride_y; + const auto y_val_1 = (id.y() * pool_stride_y) + 1; + + auto top_data = load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, + x_val, y_val_0, reinterpret_cast(src_top_ptr + in.offset()), fill_value); + auto bottom_data = load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, + x_val, y_val_1, reinterpret_cast(src_bottom_ptr + in.offset()), fill_value); + + q8x8_t lower_res = {}; + q8x8_t upper_res = {}; if(pool_info.pool_type != PoolingType::MAX) { @@ -580,16 +645,15 @@ void pooling2_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds lower_res = wrapper::vgetlow(requantized_dst); upper_res = wrapper::vgethigh(requantized_dst); } - + auto out_ptr = reinterpret_cast(out.ptr()); // Store result if(pool_stride_x == 1) { - const q8x8x2_t res = { { lower_res, upper_res } }; - wrapper::vstore(reinterpret_cast(out.ptr()), res); + write16_boundary_aware(id.x(), dst_w, lower_res, upper_res, out_ptr); } else { - wrapper::vstore(reinterpret_cast(out.ptr()), lower_res); + write8_boundary_aware(id.x(), dst_w, lower_res, out_ptr); } }, in, out); @@ -632,13 +696,27 @@ void pooling3_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds const T *const src_middle_ptr = reinterpret_cast(src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 1))); const T *const src_bottom_ptr = reinterpret_cast(src->ptr_to_element(Coordinates(-static_cast(pool_pad_left), -static_cast(pool_pad_top) + 2))); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const T fill_value = (pool_info.pool_type == PoolingType::AVG) ? T(0) : std::numeric_limits::min(); + const int dst_w = dst0->info()->dimension(0); + execute_window_loop(window, [&](const Coordinates & id) { - const auto top_data = wrapper::vloadq(src_top_ptr + in.offset()); - const auto middle_data = wrapper::vloadq(src_middle_ptr + in.offset()); - const auto bottom_data = wrapper::vloadq(src_bottom_ptr + in.offset()); - q8x8_t fres = {}; - q8x16_t fqres = {}; + const auto x_val = id.x() * pool_stride_x; + const auto y_val_0 = id.y() * pool_stride_y; + const auto y_val_1 = (id.y() * pool_stride_y) + 1; + const auto y_val_2 = (id.y() * pool_stride_y) + 2; + + auto top_data = load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, + x_val, y_val_0, reinterpret_cast(src_top_ptr + in.offset()), fill_value); + auto middle_data = load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, + x_val, y_val_1, reinterpret_cast(src_middle_ptr + in.offset()), fill_value); + auto bottom_data = load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, + x_val, y_val_2, reinterpret_cast(src_bottom_ptr + in.offset()), fill_value); + + q8x8_t fres = {}; + q8x16_t fqres = {}; if(pool_info.pool_type == PoolingType::AVG) { @@ -735,7 +813,7 @@ void pooling3_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds { fqres = vrequantize_pooling(wrapper::vgetlow(fqres), wrapper::vgethigh(fqres), requant_qinfo); } - wrapper::vstore(reinterpret_cast(out.ptr()), fqres); + write16_boundary_aware(id.x(), dst_w, wrapper::vgetlow(fqres), wrapper::vgethigh(fqres), reinterpret_cast(out.ptr())); } else { @@ -743,7 +821,7 @@ void pooling3_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor *ds { fres = vrequantize_pooling(fres, requant_qinfo); } - wrapper::vstore(reinterpret_cast(out.ptr()), fres); + write8_boundary_aware(id.x(), dst_w, fres, reinterpret_cast(out.ptr())); } }, in, out); @@ -757,11 +835,8 @@ void poolingMxN_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor * Iterator out(dst0, window); /** SIMD vector types */ - using q8x8_t = typename wrapper::traits::neon_vector::type; - using q16_t = typename wrapper::traits::promote_t; - using q16x8_t = typename wrapper::traits::neon_vector::type; - using q32_t = typename wrapper::traits::promote_t; - using q32x4_t = typename wrapper::traits::neon_vector::type; + using q16_t = typename wrapper::traits::promote_t; + using q32_t = typename wrapper::traits::promote_t; const int pool_size_x = pool_info.is_global_pooling ? src->info()->tensor_shape().x() : pool_info.pool_size.width; const int pool_size_y = pool_info.is_global_pooling ? src->info()->tensor_shape().y() : pool_info.pool_size.height; @@ -775,8 +850,13 @@ void poolingMxN_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor * const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right); const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom); - const UniformQuantizationInfo &src_qinfo = src->info()->quantization_info().uniform(); - const UniformQuantizationInfo &dst_qinfo = dst0->info()->quantization_info().uniform(); + const UniformQuantizationInfo &src_qinfo = src->info()->quantization_info().uniform(); + const UniformQuantizationInfo &dst_qinfo = dst0->info()->quantization_info().uniform(); + const int src_w = src->info()->dimension(0); + const int src_h = src->info()->dimension(1); + const T fill_value = (pool_info.pool_type == PoolingType::AVG) ? T(0) : std::numeric_limits::min(); + const int stridex_in_bytes = static_cast(src->info()->strides_in_bytes().x()); + const int stridey_in_bytes = static_cast(src->info()->strides_in_bytes().y()); execute_window_loop(window, [&](const Coordinates & id) { @@ -784,8 +864,7 @@ void poolingMxN_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor * if(pool_info.pool_type != PoolingType::MAX) { - q32x4_t vres = wrapper::vdup_n(static_cast(0.f), wrapper::traits::vector_128_tag{}); - q32_t sres = 0; + q32_t sres = 0; // Calculate scale const float scale = calculate_avg_scale(pool_info.exclude_padding, DataLayout::NCHW, id, pool_size_x, pool_size_y, upper_bound_w, upper_bound_h, pool_pad_left, pool_pad_top, pool_stride_x, @@ -794,61 +873,33 @@ void poolingMxN_quantized_neon_nchw(const ITensor *src, ITensor *dst0, ITensor * // Perform pooling for(int y = 0; y < pool_size_y; ++y) { - int x = 0; - for(; x <= (pool_size_x - 8); x += 8) + for(int x = 0; x < pool_size_x; ++x) { - const q8x8_t data = wrapper::vload(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); - - const q16x8_t data_q16 = wrapper::vmovl(data); - vres = wrapper::vadd(vres, wrapper::vaddl(wrapper::vgethigh(data_q16), wrapper::vgetlow(data_q16))); - } + const auto in_ptr = reinterpret_cast(in.ptr() + (x - pool_pad_left) * stridex_in_bytes + (y - pool_pad_top) * stridey_in_bytes); - // Leftover for loop - for(; x < pool_size_x; ++x) - { - T data = *(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); + const int idx = x + id.x() * pool_stride_x - pool_pad_left; + const int idy = y + id.y() * pool_stride_y - pool_pad_top; + const T data = (idx < 0 || idy < 0 || idx >= src_w || idy >= src_h) ? fill_value : *in_ptr; sres += data; } } - - // Reduction - const auto tmp = wrapper::vpadd(wrapper::vgethigh(vres), wrapper::vgetlow(vres)); - sres += wrapper::vgetlane(tmp, 0) + wrapper::vgetlane(tmp, 1); - // Divide by scale res = static_cast(support::cpp11::round(sres * scale)); } else { - q8x8_t vres = wrapper::vdup_n(std::numeric_limits::min(), wrapper::traits::vector_64_tag{}); - for(int y = 0; y < pool_size_y; ++y) { - int x = 0; - for(; x <= (pool_size_x - 8); x += 8) - { - const q8x8_t data = wrapper::vload(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); - vres = wrapper::vmax(vres, data); - } - // Leftover for loop - for(; x < pool_size_x; ++x) + for(int x = 0; x < pool_size_x; ++x) { - const T data = *(reinterpret_cast(in.ptr() + (x - pool_pad_left) * static_cast(src->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast - (src->info()->strides_in_bytes().y()))); - res = std::max(res, data); + const auto in_ptr = reinterpret_cast(in.ptr() + (x - pool_pad_left) * stridex_in_bytes + (y - pool_pad_top) * stridey_in_bytes); + + const int idx = x + id.x() * pool_stride_x - pool_pad_left; + const int idy = y + id.y() * pool_stride_y - pool_pad_top; + const T data = (idx < 0 || idy < 0 || idx >= src_w || idy >= src_h) ? fill_value : *in_ptr; + res = std::max(res, data); } } - - // Reduce max - vres = wrapper::vpmax(vres, vres); - vres = wrapper::vpmax(vres, vres); - vres = wrapper::vpmax(vres, vres); - - // Get max value - res = std::max(res, wrapper::vgetlane(vres, 0)); } // Store result res = (src_qinfo != dst_qinfo) ? Qasymm8QuantizationHelper::quantize(Qasymm8QuantizationHelper::dequantize(res, src_qinfo), dst_qinfo) : res; diff --git a/src/cpu/operators/CpuPool2d.cpp b/src/cpu/operators/CpuPool2d.cpp index a4ac871d48..eabbd5e0cc 100644 --- a/src/cpu/operators/CpuPool2d.cpp +++ b/src/cpu/operators/CpuPool2d.cpp @@ -39,7 +39,6 @@ namespace cpu { CpuPool2d::CpuPool2d() : _pooling_layer_kernel(), - _border_handler(), _asm_glue(), _is_global_pooling_layer(false), _data_layout(DataLayout::NCHW), @@ -86,28 +85,6 @@ void CpuPool2d::configure(ITensorInfo *src, ITensorInfo *dst, const PoolingLayer auto k = std::make_unique(); k->configure(src, dst, pool_info, indices); _pooling_layer_kernel = std::move(k); - - switch(_data_layout) - { - case DataLayout::NCHW: - { - // Configure border depending on operation required (quantize border in case of asymmetric data_type) - BorderMode border_mode = (!indices && pool_info.pool_type == PoolingType::MAX) ? BorderMode::REPLICATE : BorderMode::CONSTANT; - PixelValue zero_value((indices) ? std::numeric_limits::min() : 0.f); - if(is_data_type_quantized_asymmetric(src->data_type()) && !pool_info.exclude_padding) - { - zero_value = PixelValue(0, src->data_type(), src->quantization_info()); - } - auto b = std::make_unique(); - b->configure(src, _pooling_layer_kernel->border_size(), border_mode, zero_value); - _border_handler = std::move(b); - break; - } - case DataLayout::NHWC: - break; - default: - ARM_COMPUTE_ERROR("Data layout not supported"); - } } } @@ -137,14 +114,9 @@ void CpuPool2d::run(ITensorPack &tensors) switch(_data_layout) { case DataLayout::NCHW: - // Fill border - NEScheduler::get().schedule_op(_border_handler.get(), Window::DimY, _border_handler->window(), tensors); - - // Run pooling layer NEScheduler::get().schedule_op(_pooling_layer_kernel.get(), _is_global_pooling_layer ? Window::DimZ : Window::DimY, _pooling_layer_kernel->window(), tensors); break; case DataLayout::NHWC: - // Run pooling layer NEScheduler::get().schedule_op(_pooling_layer_kernel.get(), Window::DimX, _pooling_layer_kernel->window(), tensors); break; default: diff --git a/src/cpu/operators/CpuPool2d.h b/src/cpu/operators/CpuPool2d.h index 471637164f..02c2609a6a 100644 --- a/src/cpu/operators/CpuPool2d.h +++ b/src/cpu/operators/CpuPool2d.h @@ -73,7 +73,6 @@ public: private: std::unique_ptr _pooling_layer_kernel; - std::unique_ptr _border_handler; std::unique_ptr _asm_glue; bool _is_global_pooling_layer; diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index a6ba5de11b..77a501582c 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -193,9 +193,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunIndices, NEPoolingLayerIndicesFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallNoneUnitShapes(), combine(PoolingLayerIndicesDatasetFPSmall, framework::dataset::make("DataType", DataType::F16))), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }) - - )) + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); -- cgit v1.2.1