aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSheri Zhang <sheri.zhang@arm.com>2021-04-15 12:58:20 +0100
committerSheri Zhang <sheri.zhang@arm.com>2021-04-19 09:59:51 +0000
commit4f1650f0c9919f0bac5024b8e31c0f754d25aec3 (patch)
tree9c434cd214c21cda7533ba1447608237afe77ac9
parentc6fcfb4adc37a6cf09472168dc177234d4fabdfa (diff)
downloadComputeLibrary-4f1650f0c9919f0bac5024b8e31c0f754d25aec3.tar.gz
Remove padding from CLNormalizePlanarYUVLayerKernel
Resolve: COMPMID-3911 Signed-off-by: Sheri Zhang <sheri.zhang@arm.com> Change-Id: Id5615b6a8b52030fb611a1a04bcd4664b8232e90 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5451 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/helpers.h5
-rw-r--r--src/core/CL/cl_kernels/normalize_planar_yuv_layer.cl24
-rw-r--r--src/core/CL/cl_kernels/normalize_planar_yuv_layer_quantized.cl54
-rw-r--r--src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp52
4 files changed, 80 insertions, 55 deletions
diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h
index d5e8352438..2eae5ee1c9 100644
--- a/src/core/CL/cl_kernels/helpers.h
+++ b/src/core/CL/cl_kernels/helpers.h
@@ -502,6 +502,11 @@
#define convert_char1_sat convert_char_sat
#define convert_uchar1_sat convert_uchar_sat
+#define convert_uchar2_sat convert_uchar2_sat
+#define convert_uchar3_sat convert_uchar3_sat
+#define convert_uchar4_sat convert_uchar4_sat
+#define convert_uchar8_sat convert_uchar8_sat
+#define convert_uchar16_sat convert_uchar16_sat
#define convert_short1_sat convert_short_sat
#define convert_ushort1_sat convert_ushort_sat
#define convert_int1_sat convert_int_sat
diff --git a/src/core/CL/cl_kernels/normalize_planar_yuv_layer.cl b/src/core/CL/cl_kernels/normalize_planar_yuv_layer.cl
index f803f5288e..0a098356b4 100644
--- a/src/core/CL/cl_kernels/normalize_planar_yuv_layer.cl
+++ b/src/core/CL/cl_kernels/normalize_planar_yuv_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -84,6 +84,7 @@ __kernel void normalize_planar_yuv_layer_nchw(TENSOR3D_DECLARATION(src),
*
* @note Data type should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE e.g. -DVEC_SIZE=8
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
*
* @param[in] src_ptr Pointer to the first source tensor. Supported data types: F16/F32
* @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
@@ -115,20 +116,19 @@ __kernel void normalize_planar_yuv_layer_nhwc(TENSOR3D_DECLARATION(src),
VECTOR_DECLARATION(mean),
VECTOR_DECLARATION(std))
{
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
- Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
- Vector std = CONVERT_TO_VECTOR_STRUCT(std);
+ uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0);
- const uint current_slice = get_global_id(0);
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
+ __global uchar *mean_addr = mean_ptr + mean_offset_first_element_in_bytes + x_offs;
+ __global uchar *std_addr = std_ptr + std_offset_first_element_in_bytes + x_offs;
- const TYPE curr_mean = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(mean.ptr + current_slice * VEC_SIZE * sizeof(DATA_TYPE)));
- const TYPE curr_std = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(std.ptr + current_slice * VEC_SIZE * sizeof(DATA_TYPE)));
+ const TYPE curr_mean = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)mean_addr);
+ const TYPE curr_std = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)std_addr);
- TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr);
- TYPE res = (data - curr_mean) / curr_std;
+ TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src_addr);
+ TYPE res0 = (data - curr_mean) / curr_std;
- VSTORE(VEC_SIZE)
- (res, 0, (__global DATA_TYPE *)dst.ptr);
+ STORE_VECTOR_SELECT(res, DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif // defined(DATA_TYPE) && defined(VEC_SIZE)
diff --git a/src/core/CL/cl_kernels/normalize_planar_yuv_layer_quantized.cl b/src/core/CL/cl_kernels/normalize_planar_yuv_layer_quantized.cl
index 27017a08ca..d660fffb58 100644
--- a/src/core/CL/cl_kernels/normalize_planar_yuv_layer_quantized.cl
+++ b/src/core/CL/cl_kernels/normalize_planar_yuv_layer_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -76,17 +76,21 @@ __kernel void normalize_planar_yuv_layer_q8_nchw(TENSOR3D_DECLARATION(src),
const uint current_slice = get_global_id(2) % NUM_CHANNELS;
- float16 curr_mean_flt = (float16)(*((__global DATA_TYPE *)(mean.ptr + current_slice * sizeof(DATA_TYPE))));
- curr_mean_flt = round(curr_mean_flt - OFFSET_FLT) * SCALE_FLT;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ curr_mean_flt = (VEC_DATA_TYPE(float, VEC_SIZE))(*((__global DATA_TYPE *)(mean.ptr + current_slice * sizeof(DATA_TYPE))));
+ curr_mean_flt = round(curr_mean_flt - OFFSET_FLT) * SCALE_FLT;
- float16 curr_std_flt = (float16)(*((__global DATA_TYPE *)(std.ptr + current_slice * sizeof(DATA_TYPE))));
- curr_std_flt = round(curr_std_flt - OFFSET_FLT) * SCALE_FLT;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ curr_std_flt = (VEC_DATA_TYPE(float, VEC_SIZE))(*((__global DATA_TYPE *)(std.ptr + current_slice * sizeof(DATA_TYPE))));
+ curr_std_flt = round(curr_std_flt - OFFSET_FLT) * SCALE_FLT;
- float16 data_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr), float16);
- data_flt = round(data_flt - OFFSET_FLT) * SCALE_FLT;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ data_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr), VEC_DATA_TYPE(float, VEC_SIZE));
+ data_flt = round(data_flt - OFFSET_FLT) * SCALE_FLT;
// Perform normalization
- float16 res_flt = (data_flt - curr_mean_flt) / curr_std_flt;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res_flt = (data_flt - curr_mean_flt) / curr_std_flt;
const TYPE res_u8 = CONVERT_SAT(round(res_flt / SCALE_FLT) + OFFSET_FLT, TYPE);
VSTORE(VEC_SIZE)
@@ -101,6 +105,7 @@ __kernel void normalize_planar_yuv_layer_q8_nchw(TENSOR3D_DECLARATION(src),
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE e.g. -DVEC_SIZE=8
* @note The quantization offset should be given as a preprocessor argument using -DOFFSET e.g. -DOFFSET=8
* @note The quantization scale should be given as a preprocessor argument using -DSCALE e.g. -DSCALE=8
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
*
* @param[in] src_ptr Pointer to the first source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
* @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes)
@@ -132,27 +137,30 @@ __kernel void normalize_planar_yuv_layer_q8_nhwc(TENSOR3D_DECLARATION(src),
VECTOR_DECLARATION(mean),
VECTOR_DECLARATION(std))
{
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
- Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
- Vector std = CONVERT_TO_VECTOR_STRUCT(std);
+ uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0);
- const uint current_slice = get_global_id(0);
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs + get_global_id(1) * src_stride_y + get_global_id(2) * src_stride_z;
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs + get_global_id(1) * dst_stride_y + get_global_id(2) * dst_stride_z;
+ __global uchar *mean_addr = mean_ptr + mean_offset_first_element_in_bytes + x_offs;
+ __global uchar *std_addr = std_ptr + std_offset_first_element_in_bytes + x_offs;
- float16 curr_mean_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(mean.ptr + current_slice * VEC_SIZE * sizeof(DATA_TYPE))), float16);
- curr_mean_flt = round(curr_mean_flt - OFFSET_FLT) * SCALE_FLT;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ curr_mean_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)mean_addr), VEC_DATA_TYPE(float, VEC_SIZE));
+ curr_mean_flt = round(curr_mean_flt - OFFSET_FLT) * SCALE_FLT;
- float16 curr_std_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(std.ptr + current_slice * VEC_SIZE * sizeof(DATA_TYPE))), float16);
- curr_std_flt = round(curr_std_flt - OFFSET_FLT) * SCALE_FLT;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ curr_std_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)std_addr), VEC_DATA_TYPE(float, VEC_SIZE));
+ curr_std_flt = round(curr_std_flt - OFFSET_FLT) * SCALE_FLT;
- float16 data_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr), float16);
- data_flt = round(data_flt - OFFSET_FLT) * (SCALE_FLT);
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ data_flt = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src_addr), VEC_DATA_TYPE(float, VEC_SIZE));
+ data_flt = round(data_flt - OFFSET_FLT) * (SCALE_FLT);
// Perform normalization
- float16 res_flt = (data_flt - curr_mean_flt) / curr_std_flt;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ res_flt = (data_flt - curr_mean_flt) / curr_std_flt;
- const TYPE res_u8 = CONVERT_SAT(round(res_flt / SCALE_FLT) + OFFSET_FLT, TYPE);
- VSTORE(VEC_SIZE)
- (res_u8, 0, (__global DATA_TYPE *)dst.ptr);
+ const TYPE res0 = CONVERT_SAT(round(res_flt / SCALE_FLT) + OFFSET_FLT, TYPE);
+ STORE_VECTOR_SELECT(res, DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(OFFSET) && defined(SCALE)
diff --git a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp
index e78d906a9d..cf2511adec 100644
--- a/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp
+++ b/src/core/CL/kernels/CLNormalizePlanarYUVLayerKernel.cpp
@@ -64,11 +64,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, ITensorInfo *mean, ITensorInfo *std)
+std::pair<Status, Window> validate_and_configure_window_nchw(ITensorInfo *input, ITensorInfo *output)
{
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output, *input->clone());
-
const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
@@ -78,13 +75,6 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
bool window_changed = update_window_and_padding(win, input_access, output_access);
- if(input->data_layout() == DataLayout::NHWC)
- {
- AccessWindowHorizontal mean_access(mean, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal std_access(std, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, mean_access, std_access);
- }
-
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
@@ -106,19 +96,30 @@ void CLNormalizePlanarYUVLayerKernel::configure(const CLCompileContext &compile_
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, mean, std);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), mean->info(), std->info()));
+ // Output tensor auto initialization if not yet initialized
+ auto_init_if_empty(*output->info(), *input->info()->clone());
+
+ auto padding_info = get_padding_info({ input, output });
+
_input = input;
_output = output;
_mean = mean;
_std = std;
- const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
- const unsigned int channel_idx = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL);
- const DataType dt = input->info()->data_type();
+ const DataLayout data_layout = input->info()->data_layout();
+
+ // Get number of elements to process per iterations
+ const unsigned int num_elems_processed_per_iteration = (data_layout == DataLayout::NHWC) ? adjust_vec_size(16 / input->info()->element_size(),
+ input->info()->dimension(0)) :
+ (16 / input->info()->element_size());
+ const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const DataType dt = input->info()->data_type();
// Set build options
CLBuildOptions build_opts;
build_opts.add_option(("-DDATA_TYPE=" + get_cl_type_from_data_type(dt)));
build_opts.add_option(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+ build_opts.add_option(("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration)));
build_opts.add_option(("-DNUM_CHANNELS=" + support::cpp11::to_string(input->info()->dimension(channel_idx))));
std::string kernel_name = "normalize_planar_yuv_layer_";
@@ -131,13 +132,22 @@ void CLNormalizePlanarYUVLayerKernel::configure(const CLCompileContext &compile_
}
// Create kernel
- kernel_name += lower_string(string_from_data_layout(input->info()->data_layout()));
+ kernel_name += lower_string(string_from_data_layout(data_layout));
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), mean->info(), std->info());
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ if(data_layout == DataLayout::NHWC)
+ {
+ Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+ ICLKernel::configure_internal(win);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+ }
+ else
+ {
+ auto win_config = validate_and_configure_window_nchw(input->info(), output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ ICLKernel::configure_internal(win_config.second);
+ }
// Set config_id for enabling LWS tuning
_config_id = "normalize_planar_yuv_layer_";
@@ -155,8 +165,10 @@ void CLNormalizePlanarYUVLayerKernel::configure(const CLCompileContext &compile_
Status CLNormalizePlanarYUVLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *std)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, mean, std));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), mean->clone().get(), std->clone().get()).first);
-
+ if(input->data_layout() == DataLayout::NCHW)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_nchw(input->clone().get(), output->clone().get()).first);
+ }
return Status{};
}