aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2021-04-08 12:50:12 +0100
committerManuel Bottini <manuel.bottini@arm.com>2021-04-14 10:37:16 +0000
commit21c28957f9c6fe1a28ef934e711bb7474b8d65ee (patch)
tree220293c48b9705ed8a9a828825503442f414672d
parent91b7f7423a97f0ae713a13182f289621dad17c43 (diff)
downloadComputeLibrary-21c28957f9c6fe1a28ef934e711bb7474b8d65ee.tar.gz
Remove OpenCL padding: CLNormalizationLayerKernel
Only for NHWC data layout Resolves: COMPMID-3910 Change-Id: Ie2d71482b3e3b55ac155e9af152032a5de8bbd50 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5388 Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/runtime/CL/functions/CLNormalizationLayer.h4
-rw-r--r--src/core/CL/CLKernelLibrary.cpp3
-rw-r--r--src/core/CL/cl_kernels/normalization_layer.cl145
-rw-r--r--src/core/CL/kernels/CLNormalizationLayerKernel.cpp103
-rw-r--r--src/runtime/CL/functions/CLNormalizationLayer.cpp21
-rw-r--r--tests/validation/CL/NormalizationLayer.cpp9
6 files changed, 188 insertions, 97 deletions
diff --git a/arm_compute/runtime/CL/functions/CLNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLNormalizationLayer.h
index 389b21e5c8..706cb6f152 100644
--- a/arm_compute/runtime/CL/functions/CLNormalizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLNormalizationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -99,5 +99,5 @@ private:
std::unique_ptr<CLNormalizationLayerKernel> _norm_kernel; /**< Normalization layer kernel to run */
std::unique_ptr<CLFillBorderKernel> _border_handler; /**< Kernel to handle borders */
};
-}
+} // namespace arm_compute
#endif /* ARM_COMPUTE_CLNORMALIZATIONLAYER_H */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 2652884912..eef204fde9 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -364,7 +364,8 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "memset", "memset.cl" },
{ "minmax_layer", "minmax_layer.cl" },
{ "non_max_suppression", "nonmax.cl" },
- { "normalization_layer_cross_map", "normalization_layer.cl" },
+ { "normalization_layer_cross_map_nchw", "normalization_layer.cl" },
+ { "normalization_layer_cross_map_nhwc", "normalization_layer.cl" },
{ "normalization_layer_in_map_nchw", "normalization_layer.cl" },
{ "normalization_layer_in_map_nhwc", "normalization_layer.cl" },
{ "normalize_planar_yuv_layer_nchw", "normalize_planar_yuv_layer.cl" },
diff --git a/src/core/CL/cl_kernels/normalization_layer.cl b/src/core/CL/cl_kernels/normalization_layer.cl
index ff4dc8ec38..4569208824 100644
--- a/src/core/CL/cl_kernels/normalization_layer.cl
+++ b/src/core/CL/cl_kernels/normalization_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "helpers.h"
+#include "tile_helpers.h"
#define MUL_OP(x, y) ((x) * (y))
#define ADD_OP(x, y) ((x) + (y))
@@ -29,9 +30,6 @@
#define POW_OP(x, y) pow((x), (y))
#define SQCVT_SAT(a) (a)
-#define LOAD_OP(offset, ptr) vload4(offset, ptr)
-#define STORE_OP(data, offset, ptr) vstore4(data, offset, ptr)
-
#if defined(NUM_SLICES)
/** Apply cross-map normalization.
*
@@ -58,8 +56,8 @@
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
-__kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
+__kernel void normalization_layer_cross_map_nchw(TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
{
Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
@@ -80,7 +78,7 @@ __kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
for(int i = left_slice; i <= right_slice; i++)
{
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, 0, 0, i));
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(&in, 0, 0, i));
acc = ADD_OP(acc, MUL_OP(values, values));
}
@@ -88,19 +86,84 @@ __kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
normalized = POW_OP(acc, beta_v);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- normalized_pixel = DIV_OP(LOAD_OP(0, (__global DATA_TYPE *)in.ptr), normalized);
+ normalized_pixel = DIV_OP(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr), normalized);
- STORE_OP(normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
+ VSTORE(VEC_SIZE)
+ (normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
}
#endif /* defined(NUM_SLICES) */
#if defined(WIDTH_SIZE)
+/** Apply cross-map normalization.
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size, e.g. -DVEC_SIZE=16
+ * @note The radius should be given as a preprocessor argument using -DRADIUS=size. e.g. -DRADIUS=5
+ * @note The number of slices should be given as a preprocessor argument using -DNUM_SLICES=size. e.g. -DNUM_SLICES=192
+ * @note Scaling coefficient (= alpha/norm_size), beta and kappa need to be passed at compile time using -DCOEFF, -DALPHA and -DKAPPA
+ *
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ */
+__kernel void normalization_layer_cross_map_nhwc(TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
+{
+ // Offset computation
+ const uint x_offs = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER);
+
+ // Address computation
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z;
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ acc = 0;
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ coeff_v = SQCVT_SAT(COEFF);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ beta_v = SQCVT_SAT(BETA);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ kappa_v = SQCVT_SAT(KAPPA);
+
+ const int left_slice = max((int)0, (int)x_offs - (int)RADIUS);
+ const int right_slice = min((int)WIDTH_SIZE - 1, (int)x_offs + (int)RADIUS);
+
+ for(int i = left_slice; i <= right_slice; ++i)
+ {
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + i * sizeof(DATA_TYPE)));
+ acc = ADD_OP(acc, MUL_OP(values, values));
+ }
+
+ acc = ADD_OP(MUL_OP(acc, coeff_v), kappa_v);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ normalized = POW_OP(acc, beta_v);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ normalized_pixel0 = DIV_OP(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + x_offs * sizeof(DATA_TYPE))), normalized);
+
+ STORE_VECTOR_SELECT(normalized_pixel, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
+}
+
/** Apply in-map normalization when tensors are in the NCHW data layout format.
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size, e.g. -DVEC_SIZE=16
* @note The radius should be given as a preprocessor argument using -DRADIUS=size. e.g. -DRADIUS=5
* @note Scaling coefficient (= alpha/norm_size), beta and kappa need to be passed at compile time using -DCOEFF, -DALPHA and -DKAPPA
+ * @note The leftover size in the X dimension shoud be given as preprocessor argument using -DVEC_SIZE_LEFTOVER is; x_dimension % VEC_SIZE. e.g. -DVEC_SIZE_LEFTOVER=1
*
* @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
* @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
@@ -126,13 +189,13 @@ __kernel void normalization_layer_in_map_nchw(TENSOR3D_DECLARATION(input),
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- acc = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0;
+ acc = 0;
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- coeff_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(COEFF);
+ coeff_v = SQCVT_SAT(COEFF);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- beta_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(BETA);
+ beta_v = SQCVT_SAT(BETA);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- kappa_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(KAPPA);
+ kappa_v = SQCVT_SAT(KAPPA);
const int current_col = get_global_id(0) << 2;
const int left_pos = max(-(int)RADIUS, -3 - current_col);
@@ -152,10 +215,10 @@ __kernel void normalization_layer_in_map_nchw(TENSOR3D_DECLARATION(input),
{
#if defined(IN_MAP_2D)
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, i, j, 0));
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(&in, i, j, 0));
#else /* defined(IN_MAP_2D) */
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, i, 0, 0));
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor3D_offset(&in, i, 0, 0));
#endif /* defined(IN_MAP_2D) */
acc = ADD_OP(acc, MUL_OP(values, values));
}
@@ -167,13 +230,14 @@ __kernel void normalization_layer_in_map_nchw(TENSOR3D_DECLARATION(input),
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
normalized = POW_OP(acc, beta_v);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- normalized_pixel = DIV_OP(LOAD_OP(0, (__global DATA_TYPE *)in.ptr), normalized);
+ normalized_pixel = DIV_OP(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr), normalized);
- STORE_OP(normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
+ VSTORE(VEC_SIZE)
+ (normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
}
#endif // defined(WIDTH_SIZE)
-#if defined(NUM_SLICES)
+#if defined(NUM_SLICES) && defined(DIM1_SIZE)
/** Apply in-map normalization when tensors are in the NHWC data layout format.
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
@@ -202,42 +266,43 @@ __kernel void normalization_layer_in_map_nchw(TENSOR3D_DECLARATION(input),
__kernel void normalization_layer_in_map_nhwc(TENSOR3D_DECLARATION(input),
TENSOR3D_DECLARATION(output))
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+ // Offset computation
+ const uint x_offs = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER);
+ const int current_cols = get_global_id(1);
+ const int current_rows = get_global_id(2);
+
+ // Address computation
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE);
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + current_cols * output_stride_y + current_rows * output_stride_z;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- acc = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0;
+ acc = 0;
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- coeff_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(COEFF);
+ coeff_v = SQCVT_SAT(COEFF);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- beta_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(BETA);
+ beta_v = SQCVT_SAT(BETA);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- kappa_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(KAPPA);
+ kappa_v = SQCVT_SAT(KAPPA);
- const int current_cols = get_global_id(1);
- const int first_col = max(-(int)RADIUS, -current_cols);
- const int last_col = min((int)RADIUS, (int)get_global_size(1) - 1 - current_cols);
+ const int first_col = max(0, current_cols - (int)RADIUS);
+ const int last_col = min((int)DIM1_SIZE - 1, current_cols + (int)RADIUS);
#if defined(IN_MAP_2D)
- const int current_rows = get_global_id(2);
- const int first_row = max(-(int)RADIUS, -current_rows);
- const int last_row = min((int)RADIUS, (int)NUM_SLICES - 1 - current_rows);
+ const int first_row = max(0, current_rows - (int)RADIUS);
+ const int last_row = min((int)NUM_SLICES - 1, current_rows + (int)RADIUS);
#endif /* defined(IN_MAP_2D) */
#if defined(IN_MAP_2D)
for(int j = first_row; j <= last_row; ++j)
{
+#else // defined(IN_MAP_2D)
+ const int j = current_rows;
#endif /* defined(IN_MAP_2D) */
for(int i = first_col; i <= last_col; ++i)
{
-#if defined(IN_MAP_2D)
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, 0, i, j));
-#else /* defined(IN_MAP_2D) */
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, 0, i, 0));
-#endif /* defined(IN_MAP_2D) */
- acc = ADD_OP(acc, MUL_OP(values, values));
+ values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + i * input_stride_y + j * input_stride_z));
+ acc = ADD_OP(acc, MUL_OP(values, values));
}
#if defined(IN_MAP_2D)
}
@@ -247,8 +312,8 @@ __kernel void normalization_layer_in_map_nhwc(TENSOR3D_DECLARATION(input),
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
normalized = POW_OP(acc, beta_v);
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- normalized_pixel = DIV_OP(LOAD_OP(0, (__global DATA_TYPE *)in.ptr), normalized);
+ normalized_pixel0 = DIV_OP(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + current_cols * output_stride_y + current_rows * output_stride_z)), normalized);
- STORE_OP(normalized_pixel, 0, (__global DATA_TYPE *)out.ptr);
+ STORE_VECTOR_SELECT(normalized_pixel, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
-#endif /* defined(NUM_SLICES) */
+#endif // defined(NUM_SLICES) && defined(DIM1_SIZE) \ No newline at end of file
diff --git a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
index 1ea0d2c23d..9242505315 100644
--- a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
@@ -37,11 +37,10 @@
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
namespace
{
-constexpr unsigned int num_elems_processed_per_iteration = 4;
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, NormalizationLayerInfo norm_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
@@ -67,31 +66,45 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*output, *input->clone());
- const unsigned int norm_idx = get_normalization_dimension_index(input->data_layout(), norm_info);
- const bool is_norm_accross_width = norm_idx == 0;
+ bool window_changed = false;
+ Window win;
+ const DataLayout data_layout = input->data_layout();
+ if(data_layout == DataLayout::NCHW)
+ {
+ const unsigned int vec_size_x = adjust_vec_size(max_cl_vector_width / input->element_size(), input->dimension(0));
+ const unsigned int norm_idx = get_normalization_dimension_index(input->data_layout(), norm_info);
+ const bool is_norm_accross_width = norm_idx == 0;
- const unsigned int border_width = is_norm_accross_width ? num_elems_processed_per_iteration - 1 : 0;
- const BorderSize border_size = BorderSize(0, border_width);
+ const unsigned int border_width = is_norm_accross_width ? vec_size_x - 1 : 0;
+ const BorderSize border_size = BorderSize(0, border_width);
- Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
- bool window_changed = false;
+ win = calculate_max_window(*input, Steps(vec_size_x));
- // We do not use a Rectangle window for IN_MAP_2D as we clamp the top and bottom accesses inside the kernel, avoiding padding
- // Reads can occur within the valid region of the input
- if(is_norm_accross_width)
- {
- AccessWindowStatic input_access(input, -border_size.left, 0, input->dimension(0) + border_size.right, 0);
- window_changed = window_changed || update_window_and_padding(win, input_access);
+ // We do not use a Rectangle window for IN_MAP_2D as we clamp the top and bottom accesses inside the kernel, avoiding padding
+ // Reads can occur within the valid region of the input
+ if(is_norm_accross_width)
+ {
+ AccessWindowStatic input_access(input, -border_size.left, 0, input->dimension(0) + border_size.right, 0);
+ window_changed = window_changed || update_window_and_padding(win, input_access);
+ }
+ else
+ {
+ AccessWindowHorizontal input_access(input, -border_size.left, vec_size_x);
+ window_changed = window_changed || update_window_and_padding(win, input_access);
+ }
+
+ AccessWindowHorizontal output_access(output, 0, vec_size_x);
+ window_changed = window_changed || update_window_and_padding(win, output_access);
}
else
{
- AccessWindowHorizontal input_access(input, -border_size.left, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, input_access);
+ unsigned int vec_size_x = adjust_vec_size(max_cl_vector_width / input->element_size(), input->dimension(0));
+ if(norm_info.is_cross_map())
+ {
+ vec_size_x = 1;
+ }
+ win = calculate_max_window(*input, Steps(vec_size_x));
}
-
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, output_access);
-
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
@@ -115,21 +128,32 @@ void CLNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *ou
void CLNormalizationLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), *input->info()->clone());
+ auto padding_info = get_padding_info({ input, output });
// Perform validation step
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), norm_info));
+ auto win_config = validate_and_configure_window(input->info(), output->info(), norm_info);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
_input = input;
_output = output;
- const DataLayout data_layout = input->info()->data_layout();
- const unsigned int norm_idx = get_normalization_dimension_index(data_layout, norm_info);
- _is_norm_across_width = norm_idx == 0;
- const unsigned int border_width = _is_norm_across_width ? num_elems_processed_per_iteration - 1 : 0;
- _border_size = BorderSize(0, border_width);
+ const DataLayout data_layout = input->info()->data_layout();
+ unsigned int vec_size_x = adjust_vec_size(max_cl_vector_width / input->info()->element_size(), input->info()->dimension(0));
+ int vec_size_x_leftovers = input->info()->dimension(0) % vec_size_x;
+ if(norm_info.is_cross_map() && data_layout == DataLayout::NHWC)
+ {
+ vec_size_x = 1;
+ vec_size_x_leftovers = 0;
+ }
+
+ if(data_layout == DataLayout::NCHW)
+ {
+ const unsigned int norm_idx = get_normalization_dimension_index(data_layout, norm_info);
+ _is_norm_across_width = norm_idx == 0;
+ const unsigned int border_width = _is_norm_across_width ? vec_size_x - 1 : 0;
+ _border_size = BorderSize(0, border_width);
+ }
const bool is_in_map_2D = (norm_info.type() == NormType::IN_MAP_2D);
@@ -139,11 +163,13 @@ void CLNormalizationLayerKernel::configure(const CLCompileContext &compile_conte
build_opts.add_option(("-DCOEFF=" + float_to_string_with_full_precision(norm_info.scale_coeff())));
build_opts.add_option(("-DBETA=" + float_to_string_with_full_precision(norm_info.beta())));
build_opts.add_option(("-DKAPPA=" + float_to_string_with_full_precision(norm_info.kappa())));
- build_opts.add_option(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+ build_opts.add_option(("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x)));
+ build_opts.add_option(("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_x_leftovers)));
build_opts.add_option(("-DRADIUS=" + support::cpp11::to_string(norm_info.norm_size() / 2)));
build_opts.add_option(("-DNUM_SLICES=" + support::cpp11::to_string(input->info()->dimension(2))));
build_opts.add_option_if(is_in_map_2D, "-DIN_MAP_2D");
build_opts.add_option_if(norm_info.is_in_map() || (data_layout == DataLayout::NHWC && norm_info.is_cross_map()), "-DWIDTH_SIZE=" + support::cpp11::to_string(input->info()->dimension(0)));
+ build_opts.add_option_if(norm_info.is_in_map() && data_layout == DataLayout::NHWC, "-DDIM1_SIZE=" + support::cpp11::to_string(input->info()->dimension(1)));
// Create kernel
std::string kernel_name;
@@ -153,21 +179,11 @@ void CLNormalizationLayerKernel::configure(const CLCompileContext &compile_conte
}
else
{
- if(data_layout == DataLayout::NCHW)
- {
- kernel_name = "normalization_layer_cross_map";
- }
- else
- {
- // 1D Cross-Map normalization in NHWC is the same as 1D In-Map normalization in NCHW
- kernel_name = "normalization_layer_in_map_nchw";
- }
+ kernel_name = "normalization_layer_cross_map_" + lower_string(string_from_data_layout(data_layout));
}
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output->info(), norm_info);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
// Set config_id for enabling LWS tuning
@@ -181,6 +197,10 @@ void CLNormalizationLayerKernel::configure(const CLCompileContext &compile_conte
_config_id += support::cpp11::to_string(input->info()->dimension(0));
_config_id += "_";
_config_id += support::cpp11::to_string(input->info()->dimension(1));
+ if(data_layout == DataLayout::NHWC)
+ {
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+ }
}
Status CLNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, NormalizationLayerInfo norm_info)
@@ -209,3 +229,4 @@ void CLNormalizationLayerKernel::run(const Window &window, cl::CommandQueue &que
}
while(window_collapsed.slide_window_slice_3D(slice));
}
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLNormalizationLayer.cpp b/src/runtime/CL/functions/CLNormalizationLayer.cpp
index ec6fa803f5..12560f1b02 100644
--- a/src/runtime/CL/functions/CLNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLNormalizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,8 +33,8 @@
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLNormalizationLayerKernel.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
CLNormalizationLayer::CLNormalizationLayer()
: _norm_kernel(std::make_unique<CLNormalizationLayerKernel>()),
_border_handler(std::make_unique<CLFillBorderKernel>())
@@ -55,8 +55,11 @@ void CLNormalizationLayer::configure(const CLCompileContext &compile_context, IC
// Configure normalization kernel
_norm_kernel->configure(compile_context, input, output, norm_info);
- // Fill the border by 3 elements since we need vload4 in the IN_MAP normalization kernel
- _border_handler->configure(compile_context, input, _norm_kernel->border_size(), BorderMode::CONSTANT, PixelValue());
+ if(!_norm_kernel->border_size().empty())
+ {
+ // Fill the border by 3 elements since we need vload4 in the IN_MAP normalization kernel
+ _border_handler->configure(compile_context, input, _norm_kernel->border_size(), BorderMode::CONSTANT, PixelValue());
+ }
}
Status CLNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const NormalizationLayerInfo &norm_info)
@@ -66,9 +69,13 @@ Status CLNormalizationLayer::validate(const ITensorInfo *input, const ITensorInf
void CLNormalizationLayer::run()
{
- // Run border handler
- CLScheduler::get().enqueue(*_border_handler, false);
+ if(!_norm_kernel->border_size().empty())
+ {
+ // Run border handler
+ CLScheduler::get().enqueue(*_border_handler, false);
+ }
// Run normalization kernel
CLScheduler::get().enqueue(*_norm_kernel);
}
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/CL/NormalizationLayer.cpp b/tests/validation/CL/NormalizationLayer.cpp
index 1aed2786ff..b1c28ad644 100644
--- a/tests/validation/CL/NormalizationLayer.cpp
+++ b/tests/validation/CL/NormalizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,25 +70,22 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Even normalization
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Non implemented IN_MAP_2D
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink
+ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Windows shrinking for NCHW
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
})),
framework::dataset::make("NormInfo", { NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
NormalizationLayerInfo(NormType::IN_MAP_1D, 4),
NormalizationLayerInfo(NormType::IN_MAP_2D, 5),
- NormalizationLayerInfo(NormType::IN_MAP_1D, 5),
NormalizationLayerInfo(NormType::CROSS_MAP, 5),
})),
- framework::dataset::make("Expected", { false, false, false, false, false, true })),
+ framework::dataset::make("Expected", { false, false, false, false, true })),
input_info, output_info, norm_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), norm_info)) == expected, framework::LogLevel::ERRORS);