From 215b4ea6c9dee480a22070d5873b0b8cb52531a0 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Thu, 28 Jun 2018 16:29:29 +0100 Subject: COMPMID-1277 - Optimizing CLIm2ColKernel for NHWC. This patch includes: - Im2Col optimizations for NHWC using a new data layout - Refactoring of CLIm2ColKernel adding validation method and auto-init - Removed im2col_reduced from CLIm2ColKernel and created a new kernel CLFlattenLayerKernel Change-Id: I1620640b6796baa268324b33ae92cdd8de53e27c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/141241 Tested-by: Jenkins Reviewed-by: Giorgio Arena --- tests/validation/reference/Im2Col.cpp | 109 +++++++++++++++++++++++++--------- 1 file changed, 81 insertions(+), 28 deletions(-) (limited to 'tests/validation/reference/Im2Col.cpp') diff --git a/tests/validation/reference/Im2Col.cpp b/tests/validation/reference/Im2Col.cpp index 83ef8b40a5..2459499474 100644 --- a/tests/validation/reference/Im2Col.cpp +++ b/tests/validation/reference/Im2Col.cpp @@ -23,8 +23,6 @@ */ #include "Im2Col.h" -#include "Permute.h" - #include "arm_compute/core/Types.h" #include "tests/validation/Helpers.h" #include "tests/validation/reference/Utils.h" @@ -41,46 +39,45 @@ template void im2col_nchw(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias) { ARM_COMPUTE_ERROR_ON(src.data_layout() != DataLayout::NCHW); - // Create reference - const int pad_x = conv_info.pad().first; - const int pad_y = conv_info.pad().second; const int stride_x = conv_info.stride().first; const int stride_y = conv_info.stride().second; const int kernel_width = kernel_dims.width; const int kernel_height = kernel_dims.height; + const int pad_x = conv_info.pad().first; + const int pad_y = conv_info.pad().second; const int src_width = src.shape().x(); const int src_height = src.shape().y(); - const int src_depth = src.shape().z(); + const int src_channels = src.shape().z(); const int batches = src.shape().total_size_upper(3); + const int dst_height = dst.shape().y(); const int pad_val = is_data_type_quantized_asymmetric(src.data_type()) ? src.quantization_info().offset : 0; + int dst_idx = 0; - int dst_idx = 0; - // dst[dst_idx++] will write out of bounds if kernel_height == kernel_width == 1 because lasty will be the bottom padding row - // and this is not present in the dst buffer - const int lasty = src_height + (kernel_height > 1 ? pad_y : 0) - kernel_height; - const int lastx = src_width + (kernel_width > 1 ? pad_x : 0) - kernel_width; + // Compute width and height of the convolved tensors + std::pair convolved_dims = scaled_dimensions(src_width, src_height, kernel_dims.width, kernel_dims.height, conv_info); for(int b = 0; b < batches; ++b) { - for(int y = -pad_y; y <= lasty; y += stride_y) + for(int yo = 0; yo < dst_height; ++yo) { - for(int x = -pad_x; x <= lastx; x += stride_x) + // Compute input spatial coordinates + const int xi = (yo % convolved_dims.first) * stride_x; + const int yi = (yo / convolved_dims.first) * stride_y; + + for(int ci = 0; ci < src_channels; ++ci) { - for(int z = 0; z < src_depth; ++z) + for(int yk = 0; yk < kernel_height; ++yk) { - for(int patch_y = y; patch_y < (y + kernel_height); ++patch_y) + for(int xk = 0; xk < kernel_width; ++xk) { - for(int patch_x = x; patch_x < (x + kernel_width); ++patch_x) - { - dst[dst_idx++] = tensor_elem_at(src, Coordinates(patch_x, patch_y, z, b), BorderMode::CONSTANT, static_cast(pad_val)); - } + dst[dst_idx++] = tensor_elem_at(src, Coordinates(xi + xk - pad_x, yi + yk - pad_y, ci, b), BorderMode::CONSTANT, static_cast(pad_val)); } } + } - if(has_bias) - { - dst[dst_idx++] = static_cast(1); - } + if(has_bias) + { + dst[dst_idx++] = static_cast(1); } } } @@ -133,7 +130,56 @@ void im2col_nhwc(const SimpleTensor &src, SimpleTensor &dst, const Size2D } template -void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias) +void im2col_nhwc_channel_first(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias) +{ + ARM_COMPUTE_ERROR_ON(src.data_layout() != DataLayout::NHWC); + const int stride_x = conv_info.stride().first; + const int stride_y = conv_info.stride().second; + const int kernel_width = kernel_dims.width; + const int kernel_height = kernel_dims.height; + const int pad_x = conv_info.pad().first; + const int pad_y = conv_info.pad().second; + const int src_width = src.shape().y(); + const int src_height = src.shape().z(); + const int src_channels = src.shape().x(); + const int batches = src.shape().total_size_upper(3); + const int dst_width = has_bias ? dst.shape().x() - 1 : dst.shape().x(); + const int dst_height = dst.shape().y(); + const int pad_val = is_data_type_quantized_asymmetric(src.data_type()) ? src.quantization_info().offset : 0; + + // Compute width and height of the convolved tensors + std::pair convolved_dims = scaled_dimensions(src_width, src_height, kernel_dims.width, kernel_dims.height, conv_info); + + for(int b = 0; b < batches; ++b) + { + for(int yo = 0; yo < dst_height; ++yo) + { + // Compute input spatial coordinates + const int xi = (yo % convolved_dims.first) * stride_x; + const int yi = (yo / convolved_dims.first) * stride_y; + + for(int ci = 0; ci < src_channels; ++ci) + { + for(int yk = 0; yk < kernel_height; ++yk) + { + for(int xk = 0; xk < kernel_width; ++xk) + { + dst[ci + (xk + yk * kernel_width) * src_channels + yo * dst.shape().x() + b * dst.shape().x() * dst.shape().y()] = tensor_elem_at(src, Coordinates(ci, xi + xk - pad_x, yi + yk - pad_y, b), + BorderMode::CONSTANT, static_cast(pad_val)); + } + } + } + + if(has_bias) + { + dst[dst_width + yo * dst.shape().x() + b * dst.shape().x() * dst.shape().y()] = static_cast(1); + } + } + } +} + +template +void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, bool channels_first_output_nhwc) { switch(src.data_layout()) { @@ -144,7 +190,14 @@ void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kern } case DataLayout::NHWC: { - im2col_nhwc(src, dst, kernel_dims, conv_info, has_bias); + if(channels_first_output_nhwc) + { + im2col_nhwc_channel_first(src, dst, kernel_dims, conv_info, has_bias); + } + else + { + im2col_nhwc(src, dst, kernel_dims, conv_info, has_bias); + } break; } default: @@ -155,9 +208,9 @@ void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kern } } -template void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias); -template void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias); -template void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias); +template void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, bool channels_first_output_nhwc); +template void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, bool channels_first_output_nhwc); +template void im2col(const SimpleTensor &src, SimpleTensor &dst, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, bool channels_first_output_nhwc); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1