From 368e63507ad62dc1607f752302d8db6b7d603f71 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Mon, 20 Aug 2018 15:06:07 +0100 Subject: COMPMID-1047 Extract Flatten function from Im2Col for NEON Change-Id: I80f3aaadc8cae8c9ca1a5a239e79bda302b89bd8 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/144813 Tested-by: Jenkins Reviewed-by: Gian Marco Iodice --- arm_compute/core/NEON/NEKernels.h | 1 + .../core/NEON/kernels/NEFlattenLayerKernel.h | 81 ++++++++++ arm_compute/core/NEON/kernels/NEIm2ColKernel.h | 55 +++---- arm_compute/runtime/CL/functions/CLFlattenLayer.h | 2 +- .../runtime/NEON/functions/NEFlattenLayer.h | 17 ++- .../runtime/NEON/functions/NEFullyConnectedLayer.h | 6 +- arm_compute/runtime/NEON/functions/NEIm2Col.h | 44 +++--- src/core/NEON/NEFlattenLayerKernel.cpp | 137 +++++++++++++++++ src/core/NEON/kernels/NEIm2ColKernel.cpp | 170 +++++++-------------- src/runtime/NEON/functions/NEFlattenLayer.cpp | 11 +- .../NEON/functions/NEFullyConnectedLayer.cpp | 32 ++-- src/runtime/NEON/functions/NEIm2Col.cpp | 9 +- 12 files changed, 357 insertions(+), 208 deletions(-) create mode 100644 arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h create mode 100644 src/core/NEON/NEFlattenLayerKernel.cpp diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h index d99e5b4d3c..8664c7732c 100644 --- a/arm_compute/core/NEON/NEKernels.h +++ b/arm_compute/core/NEON/NEKernels.h @@ -61,6 +61,7 @@ #include "arm_compute/core/NEON/kernels/NEFillArrayKernel.h" #include "arm_compute/core/NEON/kernels/NEFillBorderKernel.h" #include "arm_compute/core/NEON/kernels/NEFillInnerBorderKernel.h" +#include "arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h" #include "arm_compute/core/NEON/kernels/NEFloorKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMAssemblyBaseKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h" diff --git a/arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h b/arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h new file mode 100644 index 0000000000..e5c388a95a --- /dev/null +++ b/arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_NEFLATTENLAYERKERNEL_H__ +#define __ARM_COMPUTE_NEFLATTENLAYERKERNEL_H__ + +#include "arm_compute/core/NEON/INEKernel.h" + +namespace arm_compute +{ +class ITensor; + +/** Interface for the flatten layer kernel. */ +class NEFlattenLayerKernel : public INEKernel +{ +public: + const char *name() const override + { + return "NEFlattenLayerKernel"; + } + /** Default constructor */ + NEFlattenLayerKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEFlattenLayerKernel(const NEFlattenLayerKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEFlattenLayerKernel &operator=(const NEFlattenLayerKernel &) = delete; + /** Allow instances of this class to be moved */ + NEFlattenLayerKernel(NEFlattenLayerKernel &&) = default; + /** Allow instances of this class to be moved */ + NEFlattenLayerKernel &operator=(NEFlattenLayerKernel &&) = default; + /** Default destructor */ + ~NEFlattenLayerKernel() = default; + + /** Set the input and output of the kernel. + * + * @param[in] input First input tensor to flatten with at least 3 dimensions. + * The dimensions above the third will be interpreted as batches. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[out] output Output tensor with shape [w*h*d, input_batches] where: + * w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input + */ + void configure(const ITensor *input, ITensor *output); + /** Static function to check if given info will lead to a valid configuration of @ref NEFlattenLayerKernel + * + * @param[in] input First input tensor to flatten with at least 3 dimensions. + * The dimensions above the third will be interpreted as batches. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[out] output Output tensor with shape [w*h*d, input_batches] where: + * w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output); + + // Inherited methods overridden: + void run(const Window &window, const ThreadInfo &info) override; + +private: + const ITensor *_input; + ITensor *_output; +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_NEFLATTENLAYERKERNEL_H__ */ diff --git a/arm_compute/core/NEON/kernels/NEIm2ColKernel.h b/arm_compute/core/NEON/kernels/NEIm2ColKernel.h index 38bdff0e98..ec89f0f713 100644 --- a/arm_compute/core/NEON/kernels/NEIm2ColKernel.h +++ b/arm_compute/core/NEON/kernels/NEIm2ColKernel.h @@ -76,55 +76,46 @@ public: /** Set the input and output of the kernel. * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32 - * Note: QASYMM8 works only for has_bias = false - * @param[out] output The output tensor. Data types supported: Same as @p input - * @param[in] kernel_dims The kernel dimensions (width and height). - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] has_bias In case biases are provided expands the matrix with 1. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported - * @param[in] is_fully_connected (Optional) Determines whether this kernel will be called by @ref NEFullyConnectedLayer in order to validate the arguments - * @param[in] is_flatten (Optional) Determines whether this kernel will be called by @ref NEFlattenLayer in order to validate the arguments + * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32 + * Note: QASYMM8 works only for has_bias = false + * @param[out] output The output tensor. Data types supported: Same as @p input + * @param[in] kernel_dims The kernel dimensions (width and height). + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] has_bias In case biases are provided expands the matrix with 1. + * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). + * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported */ void configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, - bool has_bias, const Size2D &dilation = Size2D(1U, 1U), unsigned int num_groups = 1, bool is_fully_connected = false, bool is_flatten = false); + bool has_bias, const Size2D &dilation = Size2D(1U, 1U), unsigned int num_groups = 1); /** Static function to check if given info will lead to a valid configuration of @ref NEIm2ColKernel * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32 - * Note: QASYMM8 works only for has_bias = false - * @param[in] output The output tensor. Data types supported: Same as @p input - * @param[in] kernel_dims The kernel dimensions (width and height). - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] has_bias In case biases are provided expands the matrix with 1. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported - * @param[in] is_fully_connected (Optional)Determines whether this kernel will be called by @ref NEFullyConnectedLayer in order to validate the arguments - * @param[in] is_flatten (Optional) Determines whether this kernel will be called by @ref NEFlattenLayer in order to validate the arguments + * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32 + * Note: QASYMM8 works only for has_bias = false + * @param[in] output The output tensor. Data types supported: Same as @p input + * @param[in] kernel_dims The kernel dimensions (width and height). + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] has_bias In case biases are provided expands the matrix with 1. + * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). + * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported * * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, - bool has_bias, const Size2D &dilation = Size2D(1U, 1U), unsigned int num_groups = 1, bool is_fully_connected = false, bool is_flatten = false); + bool has_bias, const Size2D &dilation = Size2D(1U, 1U), unsigned int num_groups = 1); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; private: - /** Template function to run the im2col optimised for the fully connected and flatten layers case - * - * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). - */ - template - void run_reduced(const Window &window); - /** Template function to run the im2col used for the convolution layer case + /** Template function to run the im2col * * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()). */ template - void run_generic(const Window &window); + void run_im2col(const Window &window); + /** Common signature for all the specialised im2col functions * * @param[in] window Region on which to execute the kernel. diff --git a/arm_compute/runtime/CL/functions/CLFlattenLayer.h b/arm_compute/runtime/CL/functions/CLFlattenLayer.h index ebc0e5e53f..24ed56f345 100644 --- a/arm_compute/runtime/CL/functions/CLFlattenLayer.h +++ b/arm_compute/runtime/CL/functions/CLFlattenLayer.h @@ -47,7 +47,7 @@ public: * w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input */ void configure(const ICLTensor *input, ICLTensor *output); - /** Static function to check if given info will lead to a valid configuration of @ref CLTranspose + /** Static function to check if given info will lead to a valid configuration of @ref CLFlattenLayer * * @param[in] input First input tensor to flatten with at least 3 dimensions. * The dimensions above the third will be interpreted as batches. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 diff --git a/arm_compute/runtime/NEON/functions/NEFlattenLayer.h b/arm_compute/runtime/NEON/functions/NEFlattenLayer.h index 2c259fa178..26d7c7f636 100644 --- a/arm_compute/runtime/NEON/functions/NEFlattenLayer.h +++ b/arm_compute/runtime/NEON/functions/NEFlattenLayer.h @@ -31,11 +31,7 @@ namespace arm_compute { class ITensor; -/** Basic function to execute flatten. This function calls the following NEON kernel: -* -* -# @ref NEIm2ColKernel -* -*/ +/** Basic function to execute flatten layer kernel. */ class NEFlattenLayer : public INESimpleFunction { public: @@ -46,6 +42,17 @@ public: * w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input */ void configure(const ITensor *input, ITensor *output); + + /** Static function to check if given info will lead to a valid configuration of @ref NEFlattenLayer + * + * @param[in] input First input tensor to flatten with at least 3 dimensions. + * The dimensions above the third will be interpreted as batches. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[out] output Output tensor with shape [w*h*d, input_batches] where: + * w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output); }; } // namespace arm_compute diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h index fe0f2f03f7..9c9074ceec 100644 --- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h +++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h @@ -26,8 +26,8 @@ #include "arm_compute/runtime/IFunction.h" +#include "arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.h" -#include "arm_compute/core/NEON/kernels/NEIm2ColKernel.h" #include "arm_compute/core/NEON/kernels/NETransposeKernel.h" #include "arm_compute/runtime/MemoryGroup.h" #include "arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h" @@ -129,14 +129,14 @@ private: void configure_mm(const ITensor *input, const ITensor *weights, ITensor *output); MemoryGroup _memory_group; - NEIm2ColKernel _im2col_kernel; + NEFlattenLayerKernel _flatten_kernel; NEConvertFullyConnectedWeights _convert_weights; NEFullyConnectedLayerReshapeWeights _reshape_weights_function; NEGEMM _mm_gemm; NEGEMMLowpMatrixMultiplyCore _mm_gemmlowp; NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint _gemmlowp_output_stage; NEGEMMMatrixAccumulateBiasesKernel _accumulate_biases_kernel; - Tensor _im2col_output; + Tensor _flatten_output; Tensor _gemmlowp_output; Tensor _converted_weights_output; Tensor _reshape_weights_output; diff --git a/arm_compute/runtime/NEON/functions/NEIm2Col.h b/arm_compute/runtime/NEON/functions/NEIm2Col.h index 9df4f070d8..de4780f8f0 100644 --- a/arm_compute/runtime/NEON/functions/NEIm2Col.h +++ b/arm_compute/runtime/NEON/functions/NEIm2Col.h @@ -42,38 +42,34 @@ public: NEIm2Col(); /** Configure the im2col NEON kernel * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32 - * Note: QASYMM8 works only for has_bias = false - * @param[out] output The output tensor. Data types supported: Same as @p input - * @param[in] kernel_dims The kernel dimensions (width and height). - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] has_bias In case biases are provided expands the matrix with 1. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution - * @param[in] is_fully_connected (Optional) Determines whether this function will be called by @ref NEFullyConnectedLayer in order to validate the arguments - * @param[in] is_flatten (Optional) Determines whether this function will be called by @ref NEFlattenLayer in order to validate the arguments + * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32 + * Note: QASYMM8 works only for has_bias = false + * @param[out] output The output tensor. Data types supported: Same as @p input + * @param[in] kernel_dims The kernel dimensions (width and height). + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] has_bias In case biases are provided expands the matrix with 1. + * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). + * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution */ void configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation = Size2D(1U, 1U), - unsigned int num_groups = 1, bool is_fully_connected = false, bool is_flatten = false); + unsigned int num_groups = 1); /** Static function to check if given info will lead to a valid configuration of @ref NEIm2Col * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32 - * Note: QASYMM8 works only for has_bias = false - * @param[in] output The output tensor. Data types supported: Same as @p input - * @param[in] kernel_dims The kernel dimensions (width and height). - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] has_bias In case biases are provided expands the matrix with 1. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution - * @param[in] is_fully_connected (Optional) Determines whether this function will be called by @ref NEFullyConnectedLayer in order to validate the arguments - * @param[in] is_flatten (Optional) Determines whether this function will be called by @ref NEFlattenLayer in order to validate the arguments + * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], + * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32 + * Note: QASYMM8 works only for has_bias = false + * @param[in] output The output tensor. Data types supported: Same as @p input + * @param[in] kernel_dims The kernel dimensions (width and height). + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] has_bias In case biases are provided expands the matrix with 1. + * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). + * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution * * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation = Size2D(1U, 1U), - unsigned int num_groups = 1, bool is_fully_connected = false, bool is_flatten = false); + unsigned int num_groups = 1); // Inherited methods overridden: void run() override; diff --git a/src/core/NEON/NEFlattenLayerKernel.cpp b/src/core/NEON/NEFlattenLayerKernel.cpp new file mode 100644 index 0000000000..b8452fb5fc --- /dev/null +++ b/src/core/NEON/NEFlattenLayerKernel.cpp @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h" + +#include "arm_compute/core/CPP/Validate.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Validate.h" + +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +#include + +using namespace arm_compute; +using namespace misc::shape_calculator; + +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, + DataType::U16, DataType::S16, + DataType::U32, DataType::S32, + DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + + // Checks performed when output is configured + if(output->total_size() != 0) + { + const TensorInfo tensor_info_output = input->clone()->set_tensor_shape(compute_flatten_shape(input)); + + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &tensor_info_output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + } + + return Status{}; +} + +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) +{ + // Output tensor auto initialization if not yet initialized + auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_flatten_shape(input))); + + Window win = calculate_max_window(*input, Steps()); // Flatten does not need paddings + + output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape())); + + return std::make_pair(Status{}, win); +} +} // namespace + +NEFlattenLayerKernel::NEFlattenLayerKernel() + : _input(nullptr), _output(nullptr) +{ +} + +void NEFlattenLayerKernel::configure(const ITensor *input, ITensor *output) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); + + _input = input; + _output = output; + + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), output->info()); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + INEKernel::configure(win_config.second); +} + +Status NEFlattenLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first); + return Status{}; +} + +void NEFlattenLayerKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); + + const size_t in_width = _input->info()->dimension(0); + const size_t in_height = _input->info()->dimension(1); + const size_t out_step_x = in_width * _input->info()->element_size(); + const size_t out_step_y = out_step_x * in_height; + + Window in_window(window); + in_window.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Window out_window; + out_window.use_tensor_dimensions(_output->info()->tensor_shape()); + out_window.set(Window::DimX, Window::Dimension(out_window.x().start(), out_window.x().end(), in_width)); + + Window in_slice = in_window.first_slice_window_3D(); + Window out_slice = out_window.first_slice_window_1D(); + + do + { + Iterator in(_input, in_slice); + Iterator out(_output, out_slice); + + uint8_t *out_ptr = out.ptr(); + + execute_window_loop(in_slice, [&](const Coordinates & id) + { + memcpy(out_ptr + id.y() * out_step_x + id.z() * out_step_y, in.ptr(), out_step_x); + }, + in); + } + while(in_window.slide_window_slice_3D(in_slice) && out_window.slide_window_slice_1D(out_slice)); +} diff --git a/src/core/NEON/kernels/NEIm2ColKernel.cpp b/src/core/NEON/kernels/NEIm2ColKernel.cpp index 98b1488a9d..e5d31289a4 100644 --- a/src/core/NEON/kernels/NEIm2ColKernel.cpp +++ b/src/core/NEON/kernels/NEIm2ColKernel.cpp @@ -41,11 +41,12 @@ #include using namespace arm_compute; +using namespace misc::shape_calculator; namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, - bool has_bias, const Size2D &dilation, unsigned int num_groups, bool is_fully_connected, bool is_flatten) + bool has_bias, const Size2D &dilation, unsigned int num_groups) { ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); @@ -55,18 +56,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c if(output->total_size() > 0) { - TensorShape expected_output_shape; - - if(is_flatten || is_fully_connected) - { - expected_output_shape = misc::shape_calculator::compute_flatten_shape(input); - } - else - { - expected_output_shape = misc::shape_calculator::compute_im2col_conv_shape(input, kernel_dims, conv_info, has_bias, dilation, false); - } - - TensorInfo expected_output = output->clone()->set_tensor_shape(expected_output_shape); + TensorInfo expected_output = output->clone()->set_tensor_shape(compute_im2col_conv_shape(input, kernel_dims, conv_info, has_bias, dilation, false)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&expected_output, output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } @@ -74,6 +64,31 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c return Status{}; } +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, + bool has_bias, const Size2D &dilation) +{ + const unsigned int width_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); + const unsigned int height_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + const unsigned int channel_idx = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); + + std::pair convolved_dims = scaled_dimensions(input->dimension(width_idx), input->dimension(height_idx), + kernel_dims.width, kernel_dims.height, + conv_info, dilation); + + // Output tensor auto initialization if not yet initialized + auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_im2col_conv_shape(input, kernel_dims, conv_info, has_bias, dilation, false))); + + Window win = calculate_max_window(*input, Steps()); + win.set(width_idx, Window::Dimension(0, convolved_dims.first, 1)); + win.set(height_idx, Window::Dimension(0, convolved_dims.second, 1)); + win.set(channel_idx, Window::Dimension(0, 1, 1)); + + // The NEIm2ColKernel doesn't need padding so update_window_and_padding() can be skipped + output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape())); + + return std::make_pair(Status{}, win); +} + template inline void linearize_volume(const uint8_t *const in_ptr, T *out_ptr, @@ -174,7 +189,7 @@ inline void linearize_volume(const uint8_t *const in_ptr, } // namespace template -void NEIm2ColKernel::run_generic(const Window &window) +void NEIm2ColKernel::run_im2col(const Window &window) { ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); @@ -244,66 +259,21 @@ void NEIm2ColKernel::run_generic(const Window &window) in, out); } -template -void NEIm2ColKernel::run_reduced(const Window &window) -{ - const size_t in_width = _input->info()->dimension(0); - const size_t in_height = _input->info()->dimension(1); - const size_t out_step_x = in_width * _input->info()->element_size(); - const size_t out_step_y = out_step_x * in_height; - const size_t out_width = _output->info()->dimension(0); - - Window in_window(window); - in_window.set(Window::DimX, Window::Dimension(0, 1, 1)); - - Window out_window; - out_window.use_tensor_dimensions(_output->info()->tensor_shape()); - out_window.set(Window::DimX, Window::Dimension(out_window.x().start(), out_window.x().end(), in_width)); - - Window in_slice = in_window.first_slice_window_3D(); - Window out_slice = out_window.first_slice_window_1D(); - - do - { - Iterator in(_input, in_slice); - Iterator out(_output, out_slice); - - uint8_t *out_ptr = out.ptr(); - - execute_window_loop(in_slice, [&](const Coordinates & id) - { - memcpy(out_ptr + id.y() * out_step_x + id.z() * out_step_y, in.ptr(), out_step_x); - }, - in); - - // Add bias - if(_has_bias) - { - *(reinterpret_cast(out_ptr) + out_width - 1) = static_cast(1); - } - } - while(in_window.slide_window_slice_3D(in_slice) && out_window.slide_window_slice_1D(out_slice)); -} - NEIm2ColKernel::NEIm2ColKernel() : _func(), _input(nullptr), _output(nullptr), _convolved_dims(), _conv_info(), _kernel_width(0), _kernel_height(0), _has_bias(false), _dilation(1U, 1U) { } void NEIm2ColKernel::configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, - bool has_bias, const Size2D &dilation, unsigned int num_groups, bool is_fully_connected, bool is_flatten) + bool has_bias, const Size2D &dilation, unsigned int num_groups) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Perform validation step - ARM_COMPUTE_UNUSED(is_fully_connected, is_flatten); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, dilation, num_groups)); ARM_COMPUTE_UNUSED(num_groups); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, dilation, num_groups, is_fully_connected, is_flatten)); const DataLayout data_layout = input->info()->data_layout(); const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); _input = input; _output = output; @@ -316,73 +286,35 @@ void NEIm2ColKernel::configure(const ITensor *input, ITensor *output, const Size _conv_info, _dilation); _has_bias = has_bias; - unsigned int stride_x = 0; - unsigned int stride_y = 0; - std::tie(stride_x, stride_y) = conv_info.stride(); - - bool run_img2col_reduced = (output->info()->dimension(0) == (input->info()->dimension(0) * input->info()->dimension(1) * input->info()->dimension(2))) && (TensorShape::num_max_dimensions >= 4) - && (std::equal(input->info()->tensor_shape().cbegin() + 3, - input->info()->tensor_shape().cend(), - output->info()->tensor_shape().cbegin() + 1)) - && ((stride_x == 1) && (stride_y == 1) && !conv_info.has_padding()) - && ((dilation.x() == 1) && (dilation.y() == 1)); - - Window window = calculate_max_window(*input->info(), Steps()); - - if(run_img2col_reduced) + switch(_input->info()->data_type()) { - switch(_input->info()->data_type()) - { - case DataType::F32: - _func = &NEIm2ColKernel::run_reduced; - break; + case DataType::F32: + _func = (!conv_info.has_padding()) ? &NEIm2ColKernel::run_im2col : &NEIm2ColKernel::run_im2col; + break; #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - _func = &NEIm2ColKernel::run_reduced; - break; + case DataType::F16: + _func = (!conv_info.has_padding()) ? &NEIm2ColKernel::run_im2col : &NEIm2ColKernel::run_im2col; + break; #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - case DataType::QASYMM8: - _func = &NEIm2ColKernel::run_reduced; - break; - default: - ARM_COMPUTE_ERROR("Data type not supported"); - break; - } + case DataType::QASYMM8: + _func = (!conv_info.has_padding()) ? &NEIm2ColKernel::run_im2col : &NEIm2ColKernel::run_im2col; + break; + default: + ARM_COMPUTE_ERROR("Data type not supported"); + break; } - else - { - switch(_input->info()->data_type()) - { - case DataType::F32: - _func = (!conv_info.has_padding()) ? &NEIm2ColKernel::run_generic : &NEIm2ColKernel::run_generic; - break; -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - _func = (!conv_info.has_padding()) ? &NEIm2ColKernel::run_generic : &NEIm2ColKernel::run_generic; - break; -#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ - case DataType::QASYMM8: - _func = (!conv_info.has_padding()) ? &NEIm2ColKernel::run_generic : &NEIm2ColKernel::run_generic; - break; - default: - ARM_COMPUTE_ERROR("Data type not supported"); - break; - } - window.set(width_idx, Window::Dimension(0, _convolved_dims.first, 1)); - window.set(height_idx, Window::Dimension(0, _convolved_dims.second, 1)); - window.set(channel_idx, Window::Dimension(0, 1, 1)); - } - - // The NEIm2ColKernel doesn't need padding so update_window_and_padding() can be skipped - output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape())); - IKernel::configure(window); + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), output->info(), kernel_dims, conv_info, has_bias, dilation); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + INEKernel::configure(win_config.second); } Status NEIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, - bool has_bias, const Size2D &dilation, unsigned int num_groups, bool is_fully_connected, bool is_flatten) + bool has_bias, const Size2D &dilation, unsigned int num_groups) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, dilation, num_groups, is_fully_connected, is_flatten)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, dilation, num_groups)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), kernel_dims, conv_info, has_bias, dilation).first); return Status{}; } diff --git a/src/runtime/NEON/functions/NEFlattenLayer.cpp b/src/runtime/NEON/functions/NEFlattenLayer.cpp index 1814d61e2f..57bef2b933 100644 --- a/src/runtime/NEON/functions/NEFlattenLayer.cpp +++ b/src/runtime/NEON/functions/NEFlattenLayer.cpp @@ -23,7 +23,7 @@ */ #include "arm_compute/runtime/NEON/functions/NEFlattenLayer.h" -#include "arm_compute/core/NEON/kernels/NEIm2ColKernel.h" +#include "arm_compute/core/NEON/kernels/NEFlattenLayerKernel.h" #include "arm_compute/core/Size2D.h" #include "support/ToolchainSupport.h" @@ -31,7 +31,12 @@ using namespace arm_compute; void NEFlattenLayer::configure(const ITensor *input, ITensor *output) { - auto k = arm_compute::support::cpp14::make_unique(); - k->configure(input, output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false, Size2D(1U, 1U), 1, false, true); + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, output); _kernel = std::move(k); +} + +Status NEFlattenLayer::validate(const ITensorInfo *input, const ITensorInfo *output) +{ + return NEFlattenLayerKernel::validate(input, output); } \ No newline at end of file diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp index f1606aa93e..60f6294394 100644 --- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp +++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp @@ -74,8 +74,8 @@ Status NEFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, c } NEFullyConnectedLayer::NEFullyConnectedLayer(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _im2col_kernel(), _convert_weights(), _reshape_weights_function(), _mm_gemm(), _mm_gemmlowp(), _gemmlowp_output_stage(), _accumulate_biases_kernel(), - _im2col_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _original_weights(nullptr), _are_weights_converted(true), _are_weights_reshaped(false), + : _memory_group(std::move(memory_manager)), _flatten_kernel(), _convert_weights(), _reshape_weights_function(), _mm_gemm(), _mm_gemmlowp(), _gemmlowp_output_stage(), _accumulate_biases_kernel(), + _flatten_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _original_weights(nullptr), _are_weights_converted(true), _are_weights_reshaped(false), _is_fc_after_conv(false), _accumulate_biases(false), _is_quantized(false), _is_prepared(false) { } @@ -112,19 +112,19 @@ void NEFullyConnectedLayer::configure_conv_fc(const ITensor *input, const ITenso // If the fully connected layer is called after a convolution layer, the input tensor must be linearized - // Initialize output tensor for im2col - TensorShape shape_im2col = compute_flatten_shape(input->info()); - _im2col_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col)); + // Initialize output tensor for flatten + TensorShape shape_flatten = compute_flatten_shape(input->info()); + _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten)); - // Configure im2col kernel - _memory_group.manage(&_im2col_output); - _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false, Size2D(1U, 1U), 1, true); + // Configure flatten kernel + _memory_group.manage(&_flatten_output); + _flatten_kernel.configure(input, &_flatten_output); // Configure matrix multiply kernel - configure_mm(&_im2col_output, weights, output); + configure_mm(&_flatten_output, weights, output); - // Allocate the output tensor for im2col once all the configure methods have been called - _im2col_output.allocator()->allocate(); + // Allocate the output tensor for flatten once all the configure methods have been called + _flatten_output.allocator()->allocate(); } void NEFullyConnectedLayer::configure_fc_fc(const ITensor *input, const ITensor *weights, ITensor *output) @@ -249,7 +249,7 @@ Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorIn bool is_fc_after_conv = true; bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); - const ITensorInfo &im2col_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input))); + const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input))); const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights))); const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone()); const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32)); @@ -307,9 +307,9 @@ Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorIn // Fully Connected layer after a Convolution Layer without batches ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2)))); - // Validate im2col kernel - ARM_COMPUTE_RETURN_ON_ERROR(NEIm2ColKernel::validate(input, &im2col_input, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false, Size2D(1U, 1U), 1, true)); - input_to_use = &im2col_input; + // Validate flatten kernel + ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayerKernel::validate(input, &flatten_input)); + input_to_use = &flatten_input; } else { @@ -337,7 +337,7 @@ void NEFullyConnectedLayer::run() // Linearize input if it comes from a convolutional layer if(_is_fc_after_conv) { - NEScheduler::get().schedule(&_im2col_kernel, Window::DimY); + NEScheduler::get().schedule(&_flatten_kernel, Window::DimY); } // Run matrix multiply diff --git a/src/runtime/NEON/functions/NEIm2Col.cpp b/src/runtime/NEON/functions/NEIm2Col.cpp index 4245b650e2..9102fca7f6 100644 --- a/src/runtime/NEON/functions/NEIm2Col.cpp +++ b/src/runtime/NEON/functions/NEIm2Col.cpp @@ -34,18 +34,17 @@ NEIm2Col::NEIm2Col() { } -void NEIm2Col::configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, unsigned int num_groups, - bool is_fully_connected, bool is_flatten) +void NEIm2Col::configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, unsigned int num_groups) { _y_dim = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); - _kernel.configure(input, output, kernel_dims, conv_info, has_bias, dilation, num_groups, is_fully_connected, is_flatten); + _kernel.configure(input, output, kernel_dims, conv_info, has_bias, dilation, num_groups); } Status NEIm2Col::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, const Size2D &dilation, - unsigned int num_groups, bool is_fully_connected, bool is_flatten) + unsigned int num_groups) { - return NEIm2ColKernel::validate(input, output, kernel_dims, conv_info, has_bias, dilation, num_groups, is_fully_connected, is_flatten); + return NEIm2ColKernel::validate(input, output, kernel_dims, conv_info, has_bias, dilation, num_groups); } void NEIm2Col::run() -- cgit v1.2.1