aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/utils
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-03-22 11:24:56 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit247f52cfe337f7b2542b900e3d8cf122e9d4f11c (patch)
treebcbabb7f1eea588a5d37566829763506d328e7a9 /arm_compute/core/utils
parenteb8a399ba655b85c6854676832eb11b0af4108fe (diff)
downloadComputeLibrary-247f52cfe337f7b2542b900e3d8cf122e9d4f11c.tar.gz
COMPMID-1013 - Create WinogradInfo data structure
COMPMID-1014 - Refactoring Winograd's dataset Change-Id: I6abdcbf9a90d663f4db666cd410afece9f1d034d Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/125899 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute/core/utils')
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h50
1 files changed, 31 insertions, 19 deletions
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 8816819bcd..c3d5b64a92 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -196,31 +196,35 @@ inline TensorShape compute_fully_connected_reshaped_weights_shape(const ITensorI
return output_shape;
}
-inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const Size2D &output_tile)
+inline TensorShape compute_winograd_filter_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
{
TensorShape tensor_shape{ input.tensor_shape() };
- tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
- tensor_shape.set(Window::DimY, input.dimension(2));
- tensor_shape.set(Window::DimZ, (output_tile.width == 2) ? 16 : 36);
+ const Size2D kernel_size = winograd_info.kernel_size;
+ const Size2D output_tile_size = winograd_info.output_tile_size;
+ const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
- if(input.data_layout() == DataLayout::NCHW)
- {
- tensor_shape.set(Window::DimX, input.dimension(3));
- }
+ tensor_shape.remove_dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH));
+ tensor_shape.set(Window::DimX, input.dimension(3));
+ tensor_shape.set(Window::DimY, input.dimension(get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)));
+ tensor_shape.set(Window::DimZ, input_tile_size.area());
return tensor_shape;
}
-
-inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const PadStrideInfo &conv_info, const Size2D &kernel_size)
+inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
{
+ const PadStrideInfo conv_info = winograd_info.convolution_info;
+ const Size2D kernel_size = winograd_info.kernel_size;
+ const Size2D output_tile_size = winograd_info.output_tile_size;
+ const Size2D input_tile_size = Size2D(output_tile_size.width + kernel_size.width - 1, output_tile_size.height + kernel_size.height - 1);
+
// Compute height
- const unsigned int num_tiles_x = std::ceil((input.tensor_shape().x() - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right()) / 2.f);
- const unsigned int num_tiles_y = std::ceil((input.tensor_shape().y() - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom()) / 2.f);
+ const unsigned int num_tiles_x = std::ceil((input.tensor_shape().x() - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right()) / static_cast<float>(output_tile_size.width));
+ const unsigned int num_tiles_y = std::ceil((input.tensor_shape().y() - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom()) / static_cast<float>(output_tile_size.height));
const unsigned int width = input.tensor_shape()[get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL)];
const unsigned int height = num_tiles_x * num_tiles_y;
- const unsigned int depth = 16; // COMPMID-990
+ const unsigned int depth = input_tile_size.area();
TensorShape output_shape{ input.tensor_shape() };
output_shape.set(0, width);
@@ -229,14 +233,24 @@ inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &inp
return output_shape;
}
-
-inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const Size2D &output_convolved_dims, DataLayout data_layout)
+inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const WinogradInfo &winograd_info)
{
+ const PadStrideInfo conv_info = winograd_info.convolution_info;
+ const Size2D kernel_size = winograd_info.kernel_size;
+ const Size2D input_dimensions = winograd_info.input_dimensions;
+ const DataLayout data_layout = winograd_info.output_data_layout;
+
+ // Compute output shape
+ unsigned int output_width = 0;
+ unsigned int output_height = 0;
+ std::tie(output_width, output_height) = scaled_dimensions(input_dimensions.width, input_dimensions.height,
+ kernel_size.width, kernel_size.height, conv_info);
+
TensorShape tensor_shape{ input.tensor_shape() };
// Output dimension
- const unsigned int out_w = output_convolved_dims.width;
- const unsigned int out_h = output_convolved_dims.height;
+ const unsigned int out_w = output_width;
+ const unsigned int out_h = output_height;
const unsigned int out_c = input.dimension(0);
tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH), out_w);
@@ -245,7 +259,6 @@ inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &in
return tensor_shape;
}
-
inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info)
{
const TensorShape input_shape{ input.tensor_shape() };
@@ -271,7 +284,6 @@ inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, cons
return output_shape;
}
-
inline TensorShape compute_min_max_shape(const ITensorInfo *input)
{
TensorShape output_shape{ input->tensor_shape() };