From d2fab7315bac3a586f2f1b1c8d64f2441f89ca64 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Fri, 2 Mar 2018 11:18:12 +0000 Subject: COMPMID-935 - Implementing Convolution with Winograd on OpenCL (part 4) Implemented Winograd Output Transform (2x2,3x3) on OpenCL Implemented CLWinogradConvolutionLayer on OpenCL Change-Id: I6a113fc5f052ca07f878d2b800d2ab003f84af65 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/125148 Reviewed-by: Georgios Pinitas Tested-by: Jenkins --- arm_compute/core/CL/CLKernels.h | 1 + .../core/CL/kernels/CLGEMMMatrixMultiplyKernel.h | 1 + .../CL/kernels/CLWinogradOutputTransformKernel.h | 81 ++++++++++++++++++++++ arm_compute/core/utils/misc/ShapeCalculator.h | 40 +++++++++-- 4 files changed, 117 insertions(+), 6 deletions(-) create mode 100644 arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h (limited to 'arm_compute/core') diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h index ef629c2e81..6f5c61523f 100644 --- a/arm_compute/core/CL/CLKernels.h +++ b/arm_compute/core/CL/CLKernels.h @@ -111,5 +111,6 @@ #include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h" #include "arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h" #include "arm_compute/core/CL/kernels/CLWinogradInputTransformKernel.h" +#include "arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h" #endif /* __ARM_COMPUTE_CLKERNELS_H__ */ diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h index 7260c4a4f6..ee7e7c0e97 100644 --- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h +++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h @@ -84,6 +84,7 @@ private: const ICLTensor *_input0; const ICLTensor *_input1; ICLTensor *_output; + bool _slide_matrix_b; }; } // namespace arm_compute #endif /* __ARM_COMPUTE_CLGEMMMATRIXMULTIPLYKERNEL_H__ */ diff --git a/arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h b/arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h new file mode 100644 index 0000000000..35117c65db --- /dev/null +++ b/arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CLWINOGRADOUTPUTTRANSFORMKERNEL_H__ +#define __ARM_COMPUTE_CLWINOGRADOUTPUTTRANSFORMKERNEL_H__ + +#include "arm_compute/core/CL/ICLKernel.h" + +namespace arm_compute +{ +class ICLTensor; + +/** Interface for the Winograd output transform kernel. */ +class CLWinogradOutputTransformKernel : public ICLKernel +{ +public: + /** Default constructor */ + CLWinogradOutputTransformKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLWinogradOutputTransformKernel(const CLWinogradOutputTransformKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLWinogradOutputTransformKernel &operator=(const CLWinogradOutputTransformKernel &) = delete; + /** Allow instances of this class to be moved */ + CLWinogradOutputTransformKernel(CLWinogradOutputTransformKernel &&) = default; + /** Allow instances of this class to be moved */ + CLWinogradOutputTransformKernel &operator=(CLWinogradOutputTransformKernel &&) = default; + /** Default destructor */ + ~CLWinogradOutputTransformKernel() = default; + /** Set the input and output tensor. + * + * @param[in] input Source tensor with shape [C, N, 16, batches]. Data types supported: F32. + * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input + * @param[out] output Destination tensor with shape [output_convolved_dims.width, output_convolved_dims.height, C, batches]. Data type supported: same as @p input + * @param[in] kernel_dims Kernel dimensions (Width and height). Currently only supported 3x3 kernels + * @param[in] output_convolved_dims Output dimensions after the convolution (Width and height) + * @param[in] num_tiles Number of tiles of size 2x2 in the output tensor along the X and Y direction + */ + void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const Size2D &kernel_dims, const Size2D &output_convolved_dims, const Size2D &num_tiles); + /** Static function to check if given info will lead to a valid configuration of @ref CLWinogradOutputTransformKernel + * + * @param[in] input Source tensor with shape [C, N, 16, batches]. Data types supported: F32. + * @param[in] bias Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. It can be a nullptr. Data type supported: as @p input + * @param[out] output Destination tensor with shape [output_convolved_dims.width, output_convolved_dims.height, C, batches]. Data type supported: same as @p input + * @param[in] kernel_dims Kernel dimensions (Width and height). Currently only supported 3x3 kernels + * @param[in] output_convolved_dims Output dimensions after the convolution (Width and height) + * @param[in] num_tiles Number of tiles of size 2x2 in the output tensor along the X and Y direction + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const Size2D &kernel_dims, const Size2D &output_convolved_dims, const Size2D &num_tiles); + + // Inherited methods overridden: + void run(const Window &window, cl::CommandQueue &queue) override; + +private: + const ICLTensor *_input; + const ICLTensor *_bias; + ICLTensor *_output; +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_CLWINOGRADOUTPUTTRANSFORMKERNEL_H__ */ diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index 1e90927a93..5344ce7e74 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -28,6 +28,8 @@ #include "arm_compute/core/ITensorInfo.h" #include "arm_compute/core/Utils.h" +#include + namespace arm_compute { namespace misc @@ -233,19 +235,45 @@ inline TensorShape compute_winograd_input_transform_shape(const ITensorInfo &inp return output_shape; } + +inline TensorShape compute_winograd_output_transform_shape(const ITensorInfo &input, const Size2D &output_convolved_dims, DataLayout data_layout) +{ + TensorShape tensor_shape{ input.tensor_shape() }; + + // Output dimension + const unsigned int out_w = output_convolved_dims.width; + const unsigned int out_h = output_convolved_dims.height; + const unsigned int out_c = input.dimension(0); + + tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH), out_w); + tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT), out_h); + tensor_shape.set(get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL), out_c); + + return tensor_shape; +} + inline TensorShape compute_deep_convolution_shape(const ITensorInfo &input, const ITensorInfo &weights, PadStrideInfo conv_info) { const TensorShape input_shape{ input.tensor_shape() }; const TensorShape weights_shape{ weights.tensor_shape() }; - unsigned int output_width = 0; - unsigned int output_height = 0; - std::tie(output_width, output_height) = scaled_dimensions(input_shape.x(), input_shape.y(), weights_shape.x(), weights_shape.y(), conv_info); + const size_t idx_width = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_height = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::HEIGHT); + const size_t idx_channel = get_data_layout_dimension_index(input.data_layout(), DataLayoutDimension::CHANNEL); + + const unsigned int input_width = input_shape[idx_width]; + const unsigned int input_height = input_shape[idx_height]; + const unsigned int weights_width = weights_shape[idx_width]; + const unsigned int weights_height = weights_shape[idx_height]; + const unsigned int weights_channel = weights_shape[idx_channel]; + unsigned int output_width = 0; + unsigned int output_height = 0; + std::tie(output_width, output_height) = scaled_dimensions(input_width, input_height, weights_width, weights_height, conv_info); TensorShape output_shape{ input_shape }; - output_shape.set(0, output_width); - output_shape.set(1, output_height); - output_shape.set(2, weights_shape[3]); + output_shape.set(idx_width, output_width); + output_shape.set(idx_height, output_height); + output_shape.set(idx_channel, weights_channel); return output_shape; } -- cgit v1.2.1