aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-07-20 13:23:44 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commite2220551b7a64b929650ba9a60529c31e70c13c5 (patch)
tree5d609887f15b4392cdade7bb388710ceafc62260 /src/core/CL
parenteff8d95991205e874091576e2d225f63246dd0bb (diff)
downloadComputeLibrary-e2220551b7a64b929650ba9a60529c31e70c13c5.tar.gz
COMPMID-1367: Enable NHWC in graph examples
Change-Id: Iabc54a3a1bdcd46a9a921cda39c7c85fef672b72 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/141449 Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/CL')
-rw-r--r--src/core/CL/cl_kernels/pooling_layer.cl4
-rw-r--r--src/core/CL/kernels/CLIm2ColKernel.cpp1
-rw-r--r--src/core/CL/kernels/CLNormalizationLayerKernel.cpp22
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp4
4 files changed, 18 insertions, 13 deletions
diff --git a/src/core/CL/cl_kernels/pooling_layer.cl b/src/core/CL/cl_kernels/pooling_layer.cl
index c38a78ce3e..080835348d 100644
--- a/src/core/CL/cl_kernels/pooling_layer.cl
+++ b/src/core/CL/cl_kernels/pooling_layer.cl
@@ -549,10 +549,10 @@ __kernel void pooling_layer_MxN_nhwc(
for(int y = 0; y < POOL_SIZE_Y; ++y)
{
- int y1 = select(y, PAD_Y - idx_height, y + idx_height < PAD_Y || y + idx_height > MAX_HEIGHT);
+ int y1 = select(y, PAD_Y - idx_height, y + idx_height - PAD_Y < 0 || y + idx_height - PAD_Y >= MAX_HEIGHT);
for(int x = 0; x < POOL_SIZE_X; ++x)
{
- int x1 = select(x, PAD_X - idx_width - 1, x + idx_width < PAD_X || x + idx_width > MAX_WIDTH);
+ int x1 = select(x, PAD_X - idx_width - 1, x + idx_width - PAD_X < 0 || x + idx_width - PAD_X >= MAX_WIDTH);
x1 = select(x1, PAD_X - idx_width - 1, y != y1);
VEC_DATA_TYPE(DATA_TYPE, 8)
diff --git a/src/core/CL/kernels/CLIm2ColKernel.cpp b/src/core/CL/kernels/CLIm2ColKernel.cpp
index b1290b8edd..a09129bba6 100644
--- a/src/core/CL/kernels/CLIm2ColKernel.cpp
+++ b/src/core/CL/kernels/CLIm2ColKernel.cpp
@@ -288,7 +288,6 @@ void CLIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_ERROR_ON(input->info()->data_layout() == DataLayout::UNKNOWN);
- ARM_COMPUTE_ERROR_ON_MSG(output->info()->data_layout() != DataLayout::NCHW, "Special case Im2Col output layout is NCHW");
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), has_bias, dilation));
_input = input;
diff --git a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
index df01eab240..edc9e9d58c 100644
--- a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
@@ -42,6 +42,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, N
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_layout() == DataLayout::NHWC && norm_info.type() == NormType::IN_MAP_2D,
+ "Only Cross-map and 1D In-map normalization is supported for NHWC layout");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(norm_info.norm_size() % 2), "Normalization size should be odd");
// Checks performed when output is configured
@@ -59,14 +61,15 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*output, *input->clone());
- const unsigned int norm_size = norm_info.norm_size();
- bool is_in_map = norm_info.is_in_map();
+ const unsigned int norm_idx = get_normalization_dimension_index(input->data_layout(), norm_info);
+ const unsigned int norm_size = norm_info.norm_size();
+ bool is_norm_accross_width = norm_idx == 0;
- const unsigned int border_width = is_in_map ? std::min(norm_size / 2, 3U) : 0;
+ const unsigned int border_width = is_norm_accross_width ? std::min(norm_size / 2, 3U) : 0;
const BorderSize border_size = BorderSize(0, border_width);
const unsigned int num_elems_processed_per_iteration = 4;
- const unsigned int num_elems_read_per_iteration = is_in_map ? (num_elems_processed_per_iteration + 2 * (norm_size / 2)) : num_elems_processed_per_iteration;
+ const unsigned int num_elems_read_per_iteration = is_norm_accross_width ? (num_elems_processed_per_iteration + 2 * (norm_size / 2)) : num_elems_processed_per_iteration;
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
@@ -84,7 +87,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
} // namespace
CLNormalizationLayerKernel::CLNormalizationLayerKernel()
- : _input(nullptr), _output(nullptr), _border_size(0), _is_in_map(false)
+ : _input(nullptr), _output(nullptr), _border_size(0), _is_norm_across_width(false)
{
}
@@ -106,8 +109,9 @@ void CLNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *ou
_input = input;
_output = output;
- _is_in_map = norm_info.is_in_map();
- const unsigned int border_width = _is_in_map ? std::min(norm_info.norm_size() / 2, 3U) : 0;
+ const unsigned int norm_idx = get_normalization_dimension_index(input->info()->data_layout(), norm_info);
+ _is_norm_across_width = norm_idx == 0;
+ const unsigned int border_width = _is_norm_across_width ? std::min(norm_info.norm_size() / 2, 3U) : 0;
_border_size = BorderSize(0, border_width);
const unsigned int num_elems_processed_per_iteration = 4;
@@ -125,7 +129,7 @@ void CLNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *ou
build_opts.add_option_if(is_in_map_2D, "-DIN_MAP_2D");
// Create kernel
- std::string kernel_name = _is_in_map ? "normalization_layer_in_map" : "normalization_layer_cross_map";
+ std::string kernel_name = _is_norm_across_width ? "normalization_layer_in_map" : "normalization_layer_cross_map";
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
// Configure kernel window
@@ -159,7 +163,7 @@ void CLNormalizationLayerKernel::run(const Window &window, cl::CommandQueue &que
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
- const int collapsed_dimension = _is_in_map ? Window::DimZ : 4;
+ const int collapsed_dimension = _is_norm_across_width ? Window::DimZ : 4;
Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), collapsed_dimension);
Window slice = window_collapsed.first_slice_window_3D();
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index 246ab68130..d5ea092c78 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -154,7 +154,9 @@ std::tuple<Status, Window, CLPoolingConfig> validate_and_configure_window(ITenso
num_elems_processed_per_iteration = 8;
win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
- AccessWindowRectangle input_access(input, 0, -pool_pad_left, num_elems_processed_per_iteration, pool_size_x);
+ AccessWindowStatic input_access(input,
+ 0, -1,
+ ceil_to_multiple(input->dimension(0), num_elems_processed_per_iteration), input->dimension(1));
AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
window_changed = update_window_and_padding(win, input_access, output_access);
output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));