From a046e164b96a8441b2fa14ef578f7db46a0e97da Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Tue, 8 Oct 2019 09:36:26 +0100 Subject: COMPMID-2600: Implement a new and generic depthwise convolution for CL QASYMM8 NHWC The NCHW case is supported at function level by permuting the inputs/outputs to NHWC. This patch also removes CLDirectConvolutionLayerOutputStageKernel which is deprecated and some kernels which were only used in the generic case of depthwise convolution. Change-Id: I91e0f02d0a2f4a4a352e08c248e648944137fe68 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/2056 Reviewed-by: Giorgio Arena Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice --- arm_compute/core/CL/CLKernels.h | 4 - .../CLDepthwiseConvolutionLayerNativeKernel.h | 4 +- ...seConvolutionLayerReshapeWeightsGenericKernel.h | 79 ------ .../core/CL/kernels/CLDepthwiseIm2ColKernel.h | 90 ------- .../CL/kernels/CLDepthwiseVectorToTensorKernel.h | 79 ------ .../CLDirectConvolutionLayerOutputStageKernel.h | 91 ------- .../CL/functions/CLDepthwiseConvolutionLayer.h | 47 ++-- docs/00_introduction.dox | 11 +- src/core/CL/CLKernelLibrary.cpp | 5 +- src/core/CL/cl_kernels/activation_quant_helpers.h | 2 +- src/core/CL/cl_kernels/depthwise_convolution.cl | 187 +------------- .../cl_kernels/depthwise_convolution_quantized.cl | 165 ++++++++++++- .../direct_convolution_1x1_3x3_5x5_quantized.cl | 78 +----- src/core/CL/cl_kernels/helpers.h | 22 ++ src/core/CL/cl_kernels/helpers_asymm.h | 2 + .../CLDepthwiseConvolutionLayerNativeKernel.cpp | 56 ++++- ...ConvolutionLayerReshapeWeightsGenericKernel.cpp | 147 ----------- src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp | 148 ------------ .../CL/kernels/CLDepthwiseVectorToTensorKernel.cpp | 125 ---------- .../CLDirectConvolutionOutputStageKernel.cpp | 209 ---------------- ...pQuantizeDownInt32ToUint8ScaleByFloatKernel.cpp | 5 +- .../CL/functions/CLDepthwiseConvolutionLayer.cpp | 268 ++++++++------------- src/runtime/CL/tuners/BifrostTuner.cpp | 24 +- tests/datasets/DepthwiseConvolutionLayerDataset.h | 16 +- 24 files changed, 381 insertions(+), 1483 deletions(-) delete mode 100644 arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.h delete mode 100644 arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h delete mode 100644 arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h delete mode 100644 arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h delete mode 100644 src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.cpp delete mode 100644 src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp delete mode 100644 src/core/CL/kernels/CLDepthwiseVectorToTensorKernel.cpp delete mode 100644 src/core/CL/kernels/CLDirectConvolutionOutputStageKernel.cpp diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h index 3d9b2c81cd..c3c485db7c 100644 --- a/arm_compute/core/CL/CLKernels.h +++ b/arm_compute/core/CL/CLKernels.h @@ -56,15 +56,11 @@ #include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h" #include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h" #include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h" -#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.h" #include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h" -#include "arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h" -#include "arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h" #include "arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h" #include "arm_compute/core/CL/kernels/CLDerivativeKernel.h" #include "arm_compute/core/CL/kernels/CLDilateKernel.h" #include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h" -#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h" #include "arm_compute/core/CL/kernels/CLElementWiseUnaryLayerKernel.h" #include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h" #include "arm_compute/core/CL/kernels/CLErodeKernel.h" diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h index f8c841ab6a..31ec871123 100644 --- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h +++ b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h @@ -49,7 +49,7 @@ public: CLDepthwiseConvolutionLayerNativeKernel &operator=(CLDepthwiseConvolutionLayerNativeKernel &&) = default; /** Initialize the function's source, destination and parameters * - * @param[in] input Source tensor. Data type supported: FP32/FP16. Data layout supported: NHWC + * @param[in] input Source tensor. Data type supported: QASYMM8/FP32/FP16. Data layout supported: NHWC * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, N, M]. Data type supported: Same as @p input. * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. * Data type supported: Same as @p input. @@ -64,7 +64,7 @@ public: const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U)); /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel * - * @param[in] input Source tensor info. Data type supported: FP32/FP16. Data layout supported: NHWC + * @param[in] input Source tensor info. Data type supported: QASYMM8/FP32/FP16. Data layout supported: NHWC * @param[in] weights Weights tensor info. A 3D tensor with dimensions [IFM, N, M]. Data type supported: Same as @p input. * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. * Data type supported: Same as @p input. diff --git a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.h deleted file mode 100644 index 3f969957e1..0000000000 --- a/arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERRESHAPEWEIGHTSGENERICKERNEL_H__ -#define __ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERRESHAPEWEIGHTSGENERICKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Interface for the depthwise weights reshape kernel. - * This kernel reshape original weights' low 2D dimensions into a single row and - * have the second dimension as the original depth size. - * - **/ -class CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel(const CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel &operator=(const CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel &) = delete; - /** Allow instances of this class to be moved */ - CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel(CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel &&) = default; - /** Allow instances of this class to be moved */ - CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel &operator=(CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel &&) = default; - /** Set the input and output of the kernel. - * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. - * Data type supported: QASYMM8/F16/F32. - * @param[out] output The output tensor. Data type supported: same as @p input. - * @param[in] biases (Optional) The input biases to add. Shape [IFM]. Data type supported: same as @p input. - */ - void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *biases = nullptr); - /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel - * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM]. - * Data type supported: QASYMM8/F32. - * @param[in] output The output tensor. Data type supported: same as @p input. - * @param[in] biases (Optional) The input biases to add. Shape [IFM]. Data type supported: same as @p input. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *biases = nullptr); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input; - const ICLTensor *_biases; - ICLTensor *_output; -}; -} // arm_compute -#endif /*__ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERRESHAPEWEIGHTSGENERICKERNEL_H__ */ diff --git a/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h deleted file mode 100644 index 15798471a8..0000000000 --- a/arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_CLDEPTHWISEIM2COLKERNEL_H__ -#define __ARM_COMPUTE_CLDEPTHWISEIM2COLKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" -#include "arm_compute/core/Size2D.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Interface for the depthwise im2col reshape kernel. - * This kernel reshape the input low 3 dimensions to a new 3D shape where the output's first dimension is - * the linear patch size (FILTER_WIDTH * FILTER_HEIGHT) and second dimension is number of patches per image and third dimension unchanged . - **/ -class CLDepthwiseIm2ColKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLDepthwiseIm2ColKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthwiseIm2ColKernel(const CLDepthwiseIm2ColKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthwiseIm2ColKernel &operator=(const CLDepthwiseIm2ColKernel &) = delete; - /** Allow instances of this class to be moved */ - CLDepthwiseIm2ColKernel(CLDepthwiseIm2ColKernel &&) = default; - /** Allow instances of this class to be moved */ - CLDepthwiseIm2ColKernel &operator=(CLDepthwiseIm2ColKernel &&) = default; - /** Set the input and output of the kernel. - * - * @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32 - * @param[out] output The output tensor. First 3 lower dimensions represent a transform of each 3D input, - * while every dimension above 3 represents a batch. Data types supported: Same as @p input - * @param[in] kernel_dims The kernel dimensions (width and height). - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] has_bias Boolean that specifies if the depthwise convolution has bias. - * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - */ - void configure(const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias = false, unsigned int depth_multiplier = 1, - const Size2D &dilation = Size2D(1U, 1U)); - /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseIm2ColKernel - * - * @param[in] input The input tensor info to convert. 3 lower dimensions represent a single input [width, height, IFM], - * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F32 - * @param[in] output The output tensor info. First 3 lower dimensions represent a transform of each 3D input, - * while every dimension above 3 represents a batch. Data types supported: Same as @p input - * @param[in] kernel_dims The kernel dimensions (width and height). - * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. - * @param[in] has_bias Boolean that specifies if the depthwise convolution has bias. - * @param[in] depth_multiplier Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, - const Size2D &dilation = Size2D(1U, 1U)); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input; - ICLTensor *_output; -}; -} // arm_compute -#endif /*__ARM_COMPUTE_CLDEPTHWISEIM2COLKERNEL_H__ */ diff --git a/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h b/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h deleted file mode 100644 index c9ec8e13bf..0000000000 --- a/arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_CLDEPTHWISEVECTORTOTENSORKERNEL_H__ -#define __ARM_COMPUTE_CLDEPTHWISEVECTORTOTENSORKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Interface for the depthwise vector to tensor kernel. - * - * This kernel takes the 1D tensor that's been produced by the MatrixVectorMultiply - * kernel and reshapes it to given width and height (previously calculated, based - * on input/weights dimensions and convolution strides and padding). - * - **/ -class CLDepthwiseVectorToTensorKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLDepthwiseVectorToTensorKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthwiseVectorToTensorKernel(const CLDepthwiseVectorToTensorKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthwiseVectorToTensorKernel &operator=(const CLDepthwiseVectorToTensorKernel &) = delete; - /** Allow instances of this class to be moved */ - CLDepthwiseVectorToTensorKernel(CLDepthwiseVectorToTensorKernel &&) = default; - /** Allow instances of this class to be moved */ - CLDepthwiseVectorToTensorKernel &operator=(CLDepthwiseVectorToTensorKernel &&) = default; - /** Set the input and output of the kernel. - * - * @param[in] input The input vector to convert. Data type supported: QASYMM8/S32/F16/F32. - * @param[out] output The output tensor. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: same as @p input. - * @param[in] conv_w The converted tensor's width. - * @param[in] conv_h The converted tensor's height. - */ - void configure(const ICLTensor *input, ICLTensor *output, size_t conv_w, size_t conv_h); - /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseVectorToTensorKernel - * - * @param[in] input The input vector to convert. Data type supported: QASYMM8/S32/F16/F32. - * @param[in] output The output tensor. 3 lower dimensions represent a single input [width, height, IFM]. Data type supported: same as @p input. - * @param[in] conv_w The converted tensor's width. - * @param[in] conv_h The converted tensor's height. - * - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, size_t conv_w, size_t conv_h); - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input; - ICLTensor *_output; -}; -} // arm_compute -#endif /*__ARM_COMPUTE_CLDEPTHWISEVECTORTOTENSORKERNEL_H__ */ diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h deleted file mode 100644 index 80bc012d9f..0000000000 --- a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef __ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H__ -#define __ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H__ - -#include "arm_compute/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ITensor; -/** OpenCL kernel to accumulate the biases, if provided, or downscale in case of quantized input. - * - * @deprecated This kernel is deprecated and will be removed in release 19.05 - * - * @note We assume bias to be shared - * - */ -class CLDirectConvolutionLayerOutputStageKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLDirectConvolutionLayerOutputStageKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDirectConvolutionLayerOutputStageKernel(const CLDirectConvolutionLayerOutputStageKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDirectConvolutionLayerOutputStageKernel &operator=(const CLDirectConvolutionLayerOutputStageKernel &) = delete; - /** Allow instances of this class to be moved */ - CLDirectConvolutionLayerOutputStageKernel(CLDirectConvolutionLayerOutputStageKernel &&) = default; - /** Allow instances of this class to be moved */ - CLDirectConvolutionLayerOutputStageKernel &operator=(CLDirectConvolutionLayerOutputStageKernel &&) = default; - /** Default destructor */ - ~CLDirectConvolutionLayerOutputStageKernel() = default; - /** Set the accumulate buffer and the biases of the kernel. - * - * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place. - * Data type supported: S32/F16/F32 - * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input - * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr) - * Required parameter if output is of QASYMM8 type. - * Data types supported: QASYMM8/F16/F32 - * @param[in] result_fixedpoint_multiplier (Optional)Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add - * @param[in] result_shift (Optional)Integer value used to round to nearest division by a power-of-two the result after the fixed point multiplication - * @param[in] result_offset_after_shift (Optional)Offset to be applied to result before converting it back to QASYMM8 - */ - void configure(ICLTensor *input, const ICLTensor *bias = nullptr, ICLTensor *output = nullptr, - int result_fixedpoint_multiplier = 0, int result_shift = 0, int result_offset_after_shift = 0); - /** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayerOutputStageKernel - * - * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place. - * Data type supported: F16/F32 - * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input - * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr) - * Data type supported: F16/F32 - * @return a status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *bias = nullptr, const ITensorInfo *output = nullptr); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - ICLTensor *_input; - const ICLTensor *_bias; - ICLTensor *_output; - int _result_fixedpoint_multiplier; - int _result_shift; - int _result_offset_after_shift; -}; -} // namespace arm_compute -#endif /*__ARM_COMPUTE_CLDIRECTCONVOLUTIONLAYEROUTPUTSTAGEKERNEL_H__ */ diff --git a/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h index d177f4505a..98581a21fe 100644 --- a/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h @@ -26,17 +26,12 @@ #include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h" #include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h" -#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.h" +#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h" #include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h" -#include "arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h" -#include "arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h" -#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h" #include "arm_compute/core/CL/kernels/CLFillBorderKernel.h" -#include "arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h" #include "arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h" #include "arm_compute/core/Types.h" #include "arm_compute/runtime/CL/CLTensor.h" -#include "arm_compute/runtime/CL/functions/CLActivationLayer.h" #include "arm_compute/runtime/CL/functions/CLPermute.h" #include "arm_compute/runtime/IFunction.h" #include "arm_compute/runtime/MemoryGroup.h" @@ -121,17 +116,15 @@ private: /** Basic function to execute a generic depthwise convolution. This function calls the following OpenCL kernels: * - * -# @ref CLDepthwiseIm2ColKernel - * -# @ref CLGEMMMatrixVectorMultiplyKernel - * -# @ref CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel - * -# @ref CLFillBorderKernel (if pad_x or pad_y > 0) + * -# @ref CLDepthwiseConvolutionLayerNativeKernel + * -# @ref CLPermute (x 3) if the data layout is NCHW * */ class CLDepthwiseConvolutionLayer : public IFunction { public: /** Default constructor */ - CLDepthwiseConvolutionLayer(); + CLDepthwiseConvolutionLayer(std::shared_ptr memory_manager = nullptr); /** Prevent instances of this class from being copied (As this class contains pointers) */ CLDepthwiseConvolutionLayer(const CLDepthwiseConvolutionLayer &) = delete; /** Default move constructor */ @@ -177,23 +170,21 @@ public: void prepare() override; private: - CLDepthwiseIm2ColKernel _im2col_kernel; - CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel _weights_reshape_kernel; - CLGEMMMatrixVectorMultiplyKernel _v2mm_kernel; - CLDepthwiseVectorToTensorKernel _vector_to_tensor_kernel; - CLDirectConvolutionLayerOutputStageKernel _output_stage_kernel; - CLActivationLayer _activationlayer_function; - CLFillBorderKernel _v2mm_input_fill_border; - CLFillBorderKernel _v2mm_weights_fill_border; - CLTensor _input_reshaped; - CLTensor _weights_reshaped; - CLTensor _v2mm_output; - CLTensor _output_reshaped; - bool _is_prepared; - bool _is_quantized; - bool _is_activationlayer_enabled; - const ICLTensor *_original_weights; - std::unique_ptr _optimised_function; + MemoryGroup _memory_group; + + std::unique_ptr _optimised_function; + CLDepthwiseConvolutionLayerNativeKernel _dwc_native_kernel; + CLPermute _permute_input_to_nhwc; + CLPermute _permute_weights_to_nhwc; + CLPermute _permute_output_to_nchw; + + CLTensor _permuted_input; + CLTensor _permuted_weights; + CLTensor _permuted_output; + const ITensor *_original_weights; + + bool _needs_permute; + bool _is_prepared; }; } // namespace arm_compute #endif /*__ARM_COMPUTE_CLDEPTHWISECONVOLUTION_H__ */ diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox index 6e014e39c9..1210b2bdec 100644 --- a/docs/00_introduction.dox +++ b/docs/00_introduction.dox @@ -236,6 +236,13 @@ If there is more than one release in a month then an extra sequential number is @subsection S2_2_changelog Changelog +v19.11 Public major release + - Deprecated OpenCL kernels / functions + - CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel + - CLDepthwiseIm2ColKernel + - CLDepthwiseVectorToTensorKernel + - CLDirectConvolutionLayerOutputStageKernel + v19.08 Public major release - Various bug fixes. - Various optimisations. @@ -624,7 +631,7 @@ v18.02 Public major release - Added fused batched normalization and activation to @ref CLBatchNormalizationLayer and @ref NEBatchNormalizationLayer - Added support for non-square pooling to @ref NEPoolingLayer and @ref CLPoolingLayer - New OpenCL kernels / functions: - - @ref CLDirectConvolutionLayerOutputStageKernel + - CLDirectConvolutionLayerOutputStageKernel - New NEON kernels / functions - Added name() method to all kernels. - Added support for Winograd 5x5. @@ -746,7 +753,7 @@ v17.09 Public major release - @ref NEReshapeLayerKernel / @ref NEReshapeLayer - New OpenCL kernels / functions: - - @ref CLDepthwiseConvolutionLayer3x3NCHWKernel @ref CLDepthwiseConvolutionLayer3x3NHWCKernel @ref CLDepthwiseIm2ColKernel @ref CLDepthwiseVectorToTensorKernel CLDepthwiseWeightsReshapeKernel / @ref CLDepthwiseConvolutionLayer3x3 @ref CLDepthwiseConvolutionLayer CLDepthwiseSeparableConvolutionLayer + - @ref CLDepthwiseConvolutionLayer3x3NCHWKernel @ref CLDepthwiseConvolutionLayer3x3NHWCKernel CLDepthwiseIm2ColKernel CLDepthwiseVectorToTensorKernel CLDepthwiseWeightsReshapeKernel / @ref CLDepthwiseConvolutionLayer3x3 @ref CLDepthwiseConvolutionLayer CLDepthwiseSeparableConvolutionLayer - @ref CLDequantizationLayerKernel / @ref CLDequantizationLayer - @ref CLDirectConvolutionLayerKernel / @ref CLDirectConvolutionLayer - @ref CLFlattenLayer diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index fa5193fde2..7b7263fca7 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -222,6 +222,7 @@ const std::map CLKernelLibrary::_kernel_program_map = { "depthwise_convolution_3x3_nhwc", "depthwise_convolution.cl" }, { "depthwise_convolution_3x3_nhwc_stride1", "depthwise_convolution.cl" }, { "dwc_MxN_native_fp_nhwc", "depthwise_convolution.cl" }, + { "dwc_MxN_native_quantized8_nhwc", "depthwise_convolution_quantized.cl" }, { "dwc_3x3_native_qasymm8_nchw", "depthwise_convolution_quantized.cl" }, { "dwc_3x3_native_qasymm8_dot8_nchw", "depthwise_convolution_quantized.cl" }, { "dwc_3x3_reshaped_qasymm8_nhwc", "depthwise_convolution_quantized.cl" }, @@ -234,9 +235,6 @@ const std::map CLKernelLibrary::_kernel_program_map = { "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32", "depthwise_convolution.cl" }, { "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32", "depthwise_convolution.cl" }, { "depthwise_convolution_reshape_weights", "depthwise_convolution.cl" }, - { "depthwise_convolution_reshape_weights_generic", "depthwise_convolution.cl" }, - { "depthwise_im2col", "depthwise_convolution.cl" }, - { "depthwise_vector_to_tensor", "depthwise_convolution.cl" }, { "dequantization_layer", "dequantization_layer.cl" }, { "dequantization_layer_per_channel_nhwc", "dequantization_layer.cl" }, { "dequantization_layer_per_channel_nchw", "dequantization_layer.cl" }, @@ -413,7 +411,6 @@ const std::map CLKernelLibrary::_kernel_program_map = { "NV21_to_RGB888_bt709", "color_convert.cl" }, { "NV21_to_RGBA8888_bt709", "color_convert.cl" }, { "NV21_to_YUV444_bt709", "color_convert.cl" }, - { "output_stage_quantized", "direct_convolution_1x1_3x3_5x5_quantized.cl" }, { "pad_layer_constant", "pad_layer.cl" }, { "pad_layer_symmetric_reflect", "pad_layer.cl" }, { "permute", "permute.cl" }, diff --git a/src/core/CL/cl_kernels/activation_quant_helpers.h b/src/core/CL/cl_kernels/activation_quant_helpers.h index 402e7ac41f..0e4eb2b32e 100644 --- a/src/core/CL/cl_kernels/activation_quant_helpers.h +++ b/src/core/CL/cl_kernels/activation_quant_helpers.h @@ -41,7 +41,7 @@ inline TYPE relu_op(TYPE x) // Bounded RELU Activation inline TYPE brelu_op(TYPE x) { - return min((TYPE)A_VAL, max(CONST_0, x)); + return min((TYPE)A_VAL, max((TYPE)CONST_0, x)); } // Lower Upper Bounded RELU Activation inline TYPE lu_brelu_op(TYPE x) diff --git a/src/core/CL/cl_kernels/depthwise_convolution.cl b/src/core/CL/cl_kernels/depthwise_convolution.cl index 1b2f5cccaa..3a227282ff 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution.cl @@ -782,173 +782,6 @@ __kernel void depthwise_convolution_reshape_weights( } #endif // defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DST_WIDTH) -#if defined(NCHW) -#define in_stride_x src_stride_x -#define in_stride_y src_stride_y -#define in_stride_z src_stride_z -#define out_stride_x dst_stride_x -#define out_stride_y dst_stride_y -#define out_stride_z dst_stride_z -#else //defined(NCHW) -#define in_stride_x src_stride_y -#define in_stride_y src_stride_z -#define in_stride_z src_stride_x -#define out_stride_x dst_stride_y -#define out_stride_y dst_stride_z -#define out_stride_z dst_stride_x -#endif //defined(NCHW) - -#if defined(SRC_WIDTH) && defined(DATA_TYPE) -/** This kernel reshapes each of the tensor's low three dimensions to single rows. - * - * @note Datatype and source width should be given as a preprocessor argument using -DDATA_TYPE=type and -DSRC_WIDTH=width. e.g. -DSRC_WIDTH=128 - * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 - * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes) - * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[out] dst_ptr Pointer to the destination tensor. Same as @p src_ptr - * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F16/F32 - * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) - * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector - */ -__kernel void depthwise_convolution_reshape_weights_generic( - TENSOR3D_DECLARATION(src), - IMAGE_DECLARATION(dst) -#ifdef HAS_BIAS - , - VECTOR_DECLARATION(biases) -#endif /* HAS_BIAS */ -) -{ -#ifdef HAS_BIAS - Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); -#endif /* HAS_BIAS */ - - __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + get_global_id(1) * in_stride_y + get_global_id(2) * in_stride_z; - __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + get_global_id(1) * SRC_WIDTH * dst_stride_x + get_global_id(2) * dst_stride_y; - - for(int i = 0; i < SRC_WIDTH; ++i, input_ptr += in_stride_x) - { - *((__global DATA_TYPE *)(output_ptr + i * dst_stride_x)) = *((__global DATA_TYPE *)input_ptr); - } - -#if defined(HAS_BIAS) - if(get_global_id(1) == 0) - { - *((__global DATA_TYPE *)(output_ptr + SRC_WIDTH * get_global_size(1) * dst_stride_x)) = *((__global DATA_TYPE *)(biases.ptr + get_global_id(2) * biases_stride_x)); - } -#endif // defined(HAS_BIAS) -} -#endif //defined(SRC_WIDTH) && defined(DATA_TYPE) - -#if defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(PAD_RIGHT) && defined(PAD_BOTTOM) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE) && defined(PAD_VALUE) && defined(DEPTH_MULTIPLIER) && defined(DILATION_X) && defined(DILATION_Y) -/** This kernel performs a reshaping of the input tensor to a tensor used to perform depthwise convolution using vector to matrix multiplication. - * - * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float - * @note The convolution information must be passed at compile time using -DSTRIDE_X, -DSTRIDE_Y, -DPAD_LEFT, -DPAD_TOP, -DPAD_RIGHT, -DPAD_BOTTOM, -DKERNEL_WIDHT, -DKERNEL_HEIGHT, -DSRC_WIDTH, -DSRC_HEIGHT, -DDEPTH_MULTIPLIER - * @note The dilation_x and dilation_y must be passed at compile time using -DDILATION_X and -DDILATION_Y: e.g. -DDILATION_X=1, -DDILATION_Y=1 - * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 - * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr - * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - */ -__kernel void depthwise_im2col(TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst)) -{ - Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); - - const int src_pixel_linear = get_global_id(1) * STRIDE_X; - const int full_length = SRC_WIDTH + PAD_LEFT + PAD_RIGHT; - const int max_initial_x = STRIDE_X * (((full_length - (KERNEL_WIDTH + (KERNEL_WIDTH - 1) * (DILATION_X - 1))) / STRIDE_X) + 1); - - const int src_x = -PAD_LEFT + src_pixel_linear % max_initial_x; - const int src_y = -PAD_TOP + src_pixel_linear / max_initial_x * STRIDE_Y; - const int src_z = get_global_id(2) / DEPTH_MULTIPLIER; - - __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + src_z * in_stride_z; - __global DATA_TYPE *output_ptr = ((__global DATA_TYPE *)(dst.ptr)); - - for(int y = src_y; y < src_y + KERNEL_HEIGHT + (KERNEL_HEIGHT - 1) * (DILATION_Y - 1); y += DILATION_Y) - { - for(int x = src_x; x < src_x + KERNEL_WIDTH + (KERNEL_WIDTH - 1) * (DILATION_X - 1); x += DILATION_X, ++output_ptr) - { - if(x < 0 || x >= SRC_WIDTH || y < 0 || y >= SRC_HEIGHT) - { - *output_ptr = PAD_VALUE; - } - else - { - *output_ptr = *((__global DATA_TYPE *)(input_ptr + x * in_stride_x + y * in_stride_y)); - } - } - } -#if defined(HAS_BIAS) - *output_ptr = (DATA_TYPE)(1); -#endif // defined(HAS_BIAS) -} - -#endif //defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(PAD_RIGHT) && defined(PAD_BOTTOM) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_WIDTH) && defined(DATA_TYPE) && defined(PAD_VALUE) && defined(DEPTH_MULTIPLIER) - -#if defined(CONV_WIDTH) && defined(CONV_HEIGHT) && defined(DATA_TYPE) - -/** This kernel performs a reshaping of the output of the depthwise generic convolution. - * - * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float - * @note The convolution information must be passed at compile time using -DCONV_WIDTH, -DCONV_HEIGHT, e.g -DCONV_WIDTH=32, -DCONV_HEIGHT=42 - * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 - * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr - * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - */ -__kernel void depthwise_vector_to_tensor( - VECTOR_DECLARATION(src), - TENSOR3D_DECLARATION(dst)) -{ - Vector src = CONVERT_TO_VECTOR_STRUCT(src); - - const int patch_size = CONV_WIDTH * CONV_HEIGHT; - const int id0 = get_global_id(0); - const int z = id0 / patch_size; - const int index2D = id0 - z * patch_size; - - __global uchar *out_ptr = dst_ptr + dst_offset_first_element_in_bytes + index2D % CONV_WIDTH * out_stride_x + index2D / CONV_WIDTH * out_stride_y + z * out_stride_z; - *((__global DATA_TYPE *)out_ptr) = *((__global DATA_TYPE *)src.ptr); -} - -#endif //defined(CONV_WIDTH) && defined(CONV_HEIGHT) && defined(DATA_TYPE) - #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F16) #if defined(CONV_STRIDE_X) #if CONV_STRIDE_X == 1 @@ -1478,7 +1311,7 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16( //3x3 Convolution of elements starting in 0th row pixels0 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src.stride_x, src.stride_y, 0, weights_addr, weights_stride_y); //3x3 Convolution of elements starting in 2nd row - pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src.stride_x, src.stride_y, 2, weights_addr, weights_stride_y); + pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src.stride_x, src.stride_y, 2, weights_addr, weights_stride_y); #endif /* DILATION_X==1 && DILATION_Y==1 */ #ifdef HAS_BIAS @@ -1556,23 +1389,17 @@ __kernel void dwc_MxN_native_fp_nhwc( int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y int b = get_global_id(2) / (int)DST_DEPTH; // batch #else // defined(DST_DEPTH) - int z = get_global_id(2); // spatial coordinate y + int z = get_global_id(2); // spatial coordinate y #endif // defined(DST_DEPTH) - __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + - x * sizeof(DATA_TYPE) * (int)N0; + __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) * (int)N0; - __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + - x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0 + - y * dst_stride_y + - z * dst_stride_z; + __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0 + y * dst_stride_y + z * dst_stride_z; - __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + - x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0; + __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0; #if defined(HAS_BIAS) - __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + - x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0; + __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0; #endif // defined(HAS_BIAS) #if defined(DST_DEPTH) @@ -1611,7 +1438,7 @@ __kernel void dwc_MxN_native_fp_nhwc( #if GPU_ARCH == GPU_ARCH_MIDGARD res += i * w; #else // GPU_ARCH == GPU_ARCH_MIDGARD - res = fma(i, w, res); + res = fma(i, w, res); #endif // GPU_ARCH == GPU_ARCH_MIDGARD } x_coord_tmp += DILATION_X; diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl index 8f2e441693..10872d460a 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl @@ -24,19 +24,30 @@ #include "helpers_asymm.h" -#if defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER)) - -#if defined(ACTIVATION_TYPE) && defined(CONST_0) -#define DATA_TYPE uchar #ifndef VEC_SIZE +#if defined(N0) +#define VEC_SIZE N0 +#else /* defined(N0) */ #define VEC_SIZE 8 +#endif /* defined(N0) */ #endif /* VEC_SIZE */ + +#if defined(ACTIVATION_TYPE) && defined(CONST_0) +#define DATA_TYPE uchar #include "activation_layer_quant.cl" #define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QUANT(ACTIVATION_TYPE, x) #else /* defined(ACTIVATION_TYPE) && defined(CONST_0) */ #define ACTIVATION_FUNC(x) (x) #endif /* defined(ACTIVATION_TYPE) && defined(CONST_0) */ +#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE) +#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE) +#define VEC_UCHAR VEC_DATA_TYPE(uchar, VEC_SIZE) +#define VEC_USHORT VEC_DATA_TYPE(ushort, VEC_SIZE) +#define VEC_SHORT VEC_DATA_TYPE(short, VEC_SIZE) + +#if defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER)) + #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) #define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), val); @@ -635,11 +646,6 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw( #define asymm_mult_by_quant_multiplier_less_than_one(x, y, z) ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, y, z, VEC_SIZE) -#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE) -#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE) -#define VEC_UCHAR VEC_DATA_TYPE(uchar, VEC_SIZE) -#define VEC_USHORT VEC_DATA_TYPE(ushort, VEC_SIZE) - #define MULTIPLY_ADD(x, y, acc) acc += CONVERT(CONVERT(x, VEC_USHORT) * CONVERT(y, VEC_USHORT), VEC_INT) #if WEIGHTS_OFFSET != 0 @@ -1375,3 +1381,144 @@ __kernel void dwc_3x3_reshaped_qasymm8_dot8_stride1_nhwc( #endif // defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) #endif // defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER)) + +#if defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT) +/** This function computes the depthwise convolution for NHWC data layout. This kernel assumes that the weights tensor is NOT reshaped + * + * @note The number of elements processed must be passed at compile time using -DN0 (e.g. -DN0=2) + * @note The depth multiplier must be passed at compile time using -DDEPTH_MULTIPLIER (e.g. -DDEPTH_MULTIPLIER=1) + * @note The first dimension of the input tensor must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM1=112) + * @note The second dimension of the input tensor must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM2=80) + * @note The kernel width must be passed at compile time using -DKERNEL_WIDTH (e.g. -DKERNEL_WIDTH=5) + * @note The kernel height must be passed at compile time using -DKERNEL_HEIGHT (e.g. -DKERNEL_HEIGHT=5) + * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1) + * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1) + * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X) + * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1) + * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu + * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes) + * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) + * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) + * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8 + * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) + * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) + * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) + * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes) + * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor + * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as src_ptr + * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) + * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector + */ +__kernel void dwc_MxN_native_quantized8_nhwc( + TENSOR4D_DECLARATION(src), + TENSOR4D_DECLARATION(dst), + TENSOR3D_DECLARATION(weights), +#if defined(HAS_BIAS) + VECTOR_DECLARATION(biases) +#endif // defined(HAS_BIAS) +) +{ + int x = get_global_id(0); // channels + int y = get_global_id(1); // spatial coordinate x +#if defined(DST_DEPTH) + int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y + int b = get_global_id(2) / (int)DST_DEPTH; // batch +#else // defined(DST_DEPTH) + int z = get_global_id(2); // spatial coordinate y +#endif // defined(DST_DEPTH) + + __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(uchar) * (int)N0; + + __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(uchar) * (int)DEPTH_MULTIPLIER * (int)N0 + y * dst_stride_y + z * dst_stride_z; + + __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + x * sizeof(uchar) * (int)DEPTH_MULTIPLIER * (int)N0; + +#if defined(HAS_BIAS) + __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int) * (int)DEPTH_MULTIPLIER * (int)N0; +#endif // defined(HAS_BIAS) + +#if defined(DST_DEPTH) + s_addr += b * src_stride_w; + d_addr += b * dst_stride_w; +#endif // defined(DST_DEPTH) + +#if DEPTH_MULTIPLIER > 1 + for(int d = 0; d < (int)DEPTH_MULTIPLIER; ++d) + { +#endif // DEPTH_MULTIPLIER > 1 + // Each work-item computes N0x1x1 elements + VEC_SHORT res = 0; + + int x_coord = y * CONV_STRIDE_X - (int)CONV_PAD_LEFT; + int y_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP; + + for(int yk = 0; yk < KERNEL_HEIGHT; ++yk) + { + if(y_coord >= 0 && y_coord < SRC_DIM2) + { + int x_coord_tmp = x_coord; + + for(int xk = 0; xk < KERNEL_WIDTH; ++xk) + { + if(x_coord_tmp >= 0 && x_coord_tmp < SRC_DIM1) + { + int s_offset = x_coord_tmp * (int)src_stride_y + y_coord * (int)src_stride_z; + int w_offset = xk * weights_stride_y + yk * weights_stride_z; + + // Load input and weights values + VEC_SHORT i = CONVERT(VLOAD(N0)(0, (__global uchar *)(s_addr + s_offset)), VEC_SHORT); + VEC_SHORT w = CONVERT(VLOAD(N0)(0, (__global uchar *)(w_addr + w_offset)), VEC_SHORT); + + res += (i + (VEC_SHORT)INPUT_OFFSET) * (w + (VEC_SHORT)WEIGHTS_OFFSET); + } + x_coord_tmp += DILATION_X; + } + } + y_coord += DILATION_Y; + } + +#if defined(HAS_BIAS) + VEC_SHORT bias = CONVERT(VLOAD(N0)(0, (__global int *)(b_addr)), VEC_SHORT); + res += bias; +#endif // defined(HAS_BIAS) + + res = CONVERT(ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(CONVERT(res, VEC_INT), OUTPUT_MULTIPLIER, OUTPUT_SHIFT, N0), VEC_SHORT); + res += (VEC_SHORT)OUTPUT_OFFSET; + + VEC_UCHAR res1 = CONVERT_SAT(res, VEC_UCHAR); + + VSTORE(N0) + (ACTIVATION_FUNC(res1), 0, (__global uchar *)(d_addr)); + +#if DEPTH_MULTIPLIER > 1 + w_addr += sizeof(uchar); + d_addr += sizeof(uchar); +#if defined(HAS_BIAS) + b_addr += sizeof(int); +#endif // defined(HAS_BIAS) + } +#endif // DEPTH_MULTIPLIER > 1 +} +#endif // defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defiend(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT) diff --git a/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl b/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl index 83da76785b..5ad9afb23c 100644 --- a/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl +++ b/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -247,79 +247,3 @@ __kernel void direct_convolution_1x1_3x3_5x5_quantized( vstore8(convert_uchar8_sat(pixels0), 0, (__global uchar *)dst.ptr); } #endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) - -#if defined(VEC_SIZE) - -#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE) -#define CONVERT_SAT_UCHAR_STR(x, size) (convert_uchar##size##_sat((x))) -#define CONVERT_SAT_UCHAR(x, size) CONVERT_SAT_UCHAR_STR(x, size) - -/** This function computes the output stage of a depthwise convolution. - * - * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8 - * @param[in] src_stride_x Stride of the source image in X dimension (in bytes) - * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes) - * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image - * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: QASYMM8 - * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - * @param[in] bias_ptr (Optional) Pointer to the biases vector. Supported data types: S32 - * @param[in] bias_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) - * @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector - * @param[in] output_offset Quantized offset of zero point of the output tensor data range - * @param[in] output_multiplier Output scale multiplier - * @param[in] output_shift Output scale divisor exponent - */ -__kernel void output_stage_quantized( - TENSOR3D_DECLARATION(src), - TENSOR3D_DECLARATION(dst), -#if defined(HAS_BIAS) - VECTOR_DECLARATION(bias), -#endif //defined(HAS_BIAS) - int output_offset, - int output_multiplier, - int output_shift) -{ - Image src = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(src); - Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst); -#if defined(HAS_BIAS) - Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias); -#endif //defined(HAS_BIAS) - - // Load input - VEC_INT vals = VLOAD(VEC_SIZE)(0, (__global int *)(src.ptr)); - -#if defined(HAS_BIAS) - // Load and add bias -#if defined(NCHW) - int bias_value = *((__global int *)(vector_offset(&bias, get_global_id(2)))); -#else // defined(NCHW) - VEC_INT bias_value = VLOAD(VEC_SIZE)(0, ((__global int *)(vector_offset(&bias, get_global_id(0) * VEC_SIZE)))); -#endif // defined(NCHW) - - vals += (VEC_INT)(bias_value); -#endif //defined(HAS_BIAS) - - vals = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(vals, output_multiplier, output_shift, VEC_SIZE); - vals = vals + output_offset; - - // Store result in dst - VSTORE(VEC_SIZE) - (CONVERT_SAT_UCHAR(vals, VEC_SIZE), 0, (__global uchar *)dst.ptr); -} - -#undef VEC_INT -#undef CONVERT_SAT_UCHAR_STR -#undef CONVERT_SAT_UCHAR - -#endif // defined(VEC_SIZE) diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h index f7f208529a..8117c1e519 100644 --- a/src/core/CL/cl_kernels/helpers.h +++ b/src/core/CL/cl_kernels/helpers.h @@ -150,6 +150,28 @@ #define convert_half8_sat convert_half8 #define convert_half16_sat convert_half16 +#define convert_float1 convert_float +#define convert_half1 convert_half +#define convert_char1 convert_char +#define convert_uchar1 convert_uchar +#define convert_short1 convert_short +#define convert_ushort1 convert_ushort +#define convert_int1 convert_int +#define convert_uint1 convert_uint +#define convert_long1 convert_long +#define convert_ulong1 convert_ulong +#define convert_double1 convert_double + +#define convert_char1_sat convert_char_sat +#define convert_uchar1_sat convert_uchar_sat +#define convert_short1_sat convert_short_sat +#define convert_ushort1_sat convert_ushort_sat +#define convert_int1_sat convert_int_sat +#define convert_uint1_sat convert_uint_sat +#define convert_long1_sat convert_long_sat +#define convert_ulong1_sat convert_ulong_sat +#define convert_double1_sat convert_double_sat + #define VEC_DATA_TYPE_STR(type, size) type##size #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) diff --git a/src/core/CL/cl_kernels/helpers_asymm.h b/src/core/CL/cl_kernels/helpers_asymm.h index 53e6719cd7..57ecccc2b2 100644 --- a/src/core/CL/cl_kernels/helpers_asymm.h +++ b/src/core/CL/cl_kernels/helpers_asymm.h @@ -381,11 +381,13 @@ DEQUANTIZE_IMPL(uchar, 4) DEQUANTIZE_IMPL(ushort, 4) DEQUANTIZE_IMPL(short, 4) +ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(1) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(2) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(4) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(8) ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(16) +ASYMM_MULT_IMPL(1) ASYMM_MULT_IMPL(2) ASYMM_MULT_IMPL(4) ASYMM_MULT_IMPL(8) diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp index 8b624bb2cb..2115fc614d 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.cpp @@ -35,6 +35,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" namespace arm_compute { @@ -46,7 +47,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, ARM_COMPUTE_UNUSED(dwc_info); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier > 1 && dwc_weights_info.n0 != 1); ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1); @@ -59,8 +60,16 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, if(biases != nullptr) { ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(0)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases); ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); + + if(is_data_type_quantized(input->data_type())) + { + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases); + } } if(output->total_size() != 0) @@ -137,6 +146,7 @@ void CLDepthwiseConvolutionLayerNativeKernel::configure(const ICLTensor *input, const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); const size_t weights_width = weights->info()->dimension(idx_w); const size_t weights_height = weights->info()->dimension(idx_h); + const bool is_quantized = is_data_type_quantized(input->info()->data_type()); CLBuildOptions build_opts; build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS"); @@ -155,10 +165,46 @@ void CLDepthwiseConvolutionLayerNativeKernel::configure(const ICLTensor *input, build_opts.add_option("-DCONV_STRIDE_Y=" + support::cpp11::to_string(conv_info.stride().second)); build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x())); build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y())); - build_opts.add_option_if(dwc_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(dwc_info.activation_info.a())); - build_opts.add_option_if(dwc_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(dwc_info.activation_info.b())); - std::string kernel_name("dwc_MxN_native_fp_nhwc"); + std::string kernel_name = (is_quantized) ? "dwc_MxN_native_quantized8_nhwc" : "dwc_MxN_native_fp_nhwc"; + + if(is_quantized) + { + const UniformQuantizationInfo iq_info = _input->info()->quantization_info().uniform(); + const UniformQuantizationInfo wq_info = _weights->info()->quantization_info().uniform(); + const UniformQuantizationInfo oq_info = _output->info()->quantization_info().uniform(); + + float multiplier = iq_info.scale * wq_info.scale / oq_info.scale; + int output_multiplier = 0; + int output_shift = 0; + quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); + + build_opts.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iq_info.offset)); + build_opts.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wq_info.offset)); + build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oq_info.offset)); + build_opts.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier)); + build_opts.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift)); + + if(dwc_info.activation_info.enabled()) + { + const int a_val = quantize_qasymm8(dwc_info.activation_info.a(), oq_info); + const int b_val = quantize_qasymm8(dwc_info.activation_info.b(), oq_info); + const int o1 = oq_info.offset; + + build_opts.add_option("-DA_VAL=" + support::cpp11::to_string(a_val)); + build_opts.add_option("-DB_VAL=" + support::cpp11::to_string(b_val)); + build_opts.add_option("-DCONST_0=" + support::cpp11::to_string(o1)); + + const float s1 = iq_info.scale; + build_opts.add_option("-DS1_VAL=" + float_to_string_with_full_precision(s1)); + build_opts.add_option("-DO1_VAL=" + support::cpp11::to_string(o1)); + } + } + else + { + build_opts.add_option_if(dwc_info.activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(dwc_info.activation_info.a())); + build_opts.add_option_if(dwc_info.activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(dwc_info.activation_info.b())); + } ICLKernel::configure_internal(win_config.second); _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.cpp deleted file mode 100644 index 582c600c61..0000000000 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.cpp +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibrary.h" -#include "arm_compute/core/CL/CLValidate.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/OpenCL.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Types.h" -#include "support/ToolchainSupport.h" - -using namespace arm_compute; - -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *biases) -{ - const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); - const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); - - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(input->data_type()) && (biases != nullptr)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_c) != output->dimension(1)); - ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(0) != (input->dimension(idx_w) * input->dimension(idx_h) + ((biases != nullptr) ? 1 : 0))); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - - if(biases != nullptr) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases); - ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != input->dimension(idx_c)); - ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); - } - - return Status{}; -} -} // namespace - -CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel() - : _input(nullptr), _biases(nullptr), _output(nullptr) -{ -} - -void CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *biases) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), (biases != nullptr) ? biases->info() : nullptr)); - - _input = input; - _biases = biases; - _output = output; - - const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); - - // Create kernel - std::set build_opts; - - build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.emplace("-DSRC_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_w))); - build_opts.emplace("-D" + string_from_data_layout(input->info()->data_layout())); - if(_biases != nullptr) - { - build_opts.emplace("-DHAS_BIAS"); - } - - _kernel = static_cast(CLKernelLibrary::get().create_kernel("depthwise_convolution_reshape_weights_generic", build_opts)); - - // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps()); - // The CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel doesn't need padding so update_window_and_padding() can be skipped - output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape())); - - ICLKernel::configure_internal(win); -} - -Status CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *biases) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, biases)); - return Status{}; -} - -void CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window); - - Window slice = window.first_slice_window_3D(); - Window slice_out = window.first_slice_window_2D(); - - const size_t idx_w = get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::HEIGHT); - const size_t idx_c = get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::CHANNEL); - - // Setup slice - slice.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(idx_w), _input->info()->dimension(idx_w))); - slice.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(idx_h), 1)); - slice.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(idx_c), 1)); - - // Setup output slice - // The first two dimensions of the output are increased by the inner loops - slice_out.set(Window::DimX, Window::Dimension(0, 0, 0)); - slice_out.set(Window::DimY, Window::Dimension(0, 0, 0)); - - // Set biases - if(_biases != nullptr) - { - unsigned int idx = num_arguments_per_3D_tensor() + num_arguments_per_2D_tensor(); - Window slice_biases; - slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape()); - add_1D_tensor_argument(idx, _biases, slice_biases); - } - - do - { - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, slice); - add_2D_tensor_argument(idx, _output, slice_out); - enqueue(queue, *this, slice, lws_hint()); - } - while(window.slide_window_slice_3D(slice) && window.slide_window_slice_2D(slice_out)); -} diff --git a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp deleted file mode 100644 index 0312a57664..0000000000 --- a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/CL/kernels/CLDepthwiseIm2ColKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibrary.h" -#include "arm_compute/core/CL/CLValidate.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/OpenCL.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Types.h" -#include "support/ToolchainSupport.h" - -#include - -using namespace arm_compute; - -CLDepthwiseIm2ColKernel::CLDepthwiseIm2ColKernel() - : _input(nullptr), _output(nullptr) -{ -} - -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, - const Size2D &dilation) -{ - const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); - - ARM_COMPUTE_UNUSED(conv_info); - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(input->data_type()) && has_bias); - ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(idx_c) * depth_multiplier) != output->dimension(2)); - ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(0) != (kernel_dims.width * kernel_dims.height + ((has_bias) ? 1 : 0))); - ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || dilation.y() < 1); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - - return Status{}; -} -} // namespace - -void CLDepthwiseIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, - const Size2D &dilation) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, depth_multiplier, dilation)); - - _input = input; - _output = output; - - const DataLayout data_layout = input->info()->data_layout(); - const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform(); - - // Create kernel - CLBuildOptions build_opts; - - build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.add_option("-DSTRIDE_X=" + support::cpp11::to_string(conv_info.stride().first)); - build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(conv_info.stride().second)); - build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left())); - build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top())); - build_opts.add_option("-DPAD_RIGHT=" + support::cpp11::to_string(conv_info.pad_right())); - build_opts.add_option("-DPAD_BOTTOM=" + support::cpp11::to_string(conv_info.pad_bottom())); - build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_w))); - build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(idx_h))); - build_opts.add_option("-DKERNEL_WIDTH=" + support::cpp11::to_string(kernel_dims.width)); - build_opts.add_option("-DKERNEL_HEIGHT=" + support::cpp11::to_string(kernel_dims.height)); - build_opts.add_option("-DDEPTH_MULTIPLIER=" + support::cpp11::to_string(depth_multiplier)); - build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x())); - build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y())); - build_opts.add_option("-D" + string_from_data_layout(input->info()->data_layout())); - build_opts.add_option_if(has_bias, "-DHAS_BIAS"); - build_opts.add_option_if_else(is_data_type_quantized_asymmetric(input->info()->data_type()), - "-DPAD_VALUE=" + support::cpp11::to_string(qinfo.offset), - "-DPAD_VALUE=0"); - - _kernel = static_cast(CLKernelLibrary::get().create_kernel("depthwise_im2col", build_opts.options())); - - // Configure kernel window - Window win = calculate_max_window(*output->info(), Steps()); - // CLDepthwiseIm2ColKernel doesn't need padding so update_window_and_padding() can be skipped - output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape())); - - ICLKernel::configure_internal(win); -} - -Status CLDepthwiseIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, - const Size2D &dilation) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, depth_multiplier, dilation)); - - return Status{}; -} - -void CLDepthwiseIm2ColKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window); - - Window slice = window.first_slice_window_3D(); - Window slice_in = window.first_slice_window_3D(); - - // Setup slice - slice.set(Window::DimX, Window::Dimension(0, _output->info()->dimension(0), _output->info()->dimension(0))); - slice.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1), 1)); - slice.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(2), 1)); - - // Setup input slice - // The first three dimensions of the input are increased by the inner loops - slice_in.set(Window::DimX, Window::Dimension(0, 0, 0)); - slice_in.set(Window::DimY, Window::Dimension(0, 0, 0)); - slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0)); - - do - { - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, slice_in); - add_3D_tensor_argument(idx, _output, slice); - enqueue(queue, *this, slice, lws_hint()); - } - while(window.slide_window_slice_3D(slice) && window.slide_window_slice_3D(slice_in)); -} diff --git a/src/core/CL/kernels/CLDepthwiseVectorToTensorKernel.cpp b/src/core/CL/kernels/CLDepthwiseVectorToTensorKernel.cpp deleted file mode 100644 index 0f029fda74..0000000000 --- a/src/core/CL/kernels/CLDepthwiseVectorToTensorKernel.cpp +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2017-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/CL/kernels/CLDepthwiseVectorToTensorKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibrary.h" -#include "arm_compute/core/CL/CLValidate.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/OpenCL.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "support/ToolchainSupport.h" - -using namespace arm_compute; -using namespace arm_compute::misc::shape_calculator; - -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, size_t conv_w, size_t conv_h) -{ - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::S32, DataType::F16, DataType::F32); - - if(output->total_size() != 0) - { - TensorShape output_shape = compute_vector_to_tensor_output_shape(input->tensor_shape(), conv_w, conv_h, output->data_layout()); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - } - - return Status{}; -} -} // namespace - -CLDepthwiseVectorToTensorKernel::CLDepthwiseVectorToTensorKernel() - : _input(nullptr), _output(nullptr) -{ -} - -void CLDepthwiseVectorToTensorKernel::configure(const ICLTensor *input, ICLTensor *output, size_t conv_w, size_t conv_h) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - - // Output auto inizialitation if not yet initialized - TensorShape output_shape = compute_vector_to_tensor_output_shape(input->info()->tensor_shape(), conv_w, conv_h, output->info()->data_layout()); - auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape)); - - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), conv_w, conv_h)); - - _input = input; - _output = output; - - // Create kernel - CLBuildOptions build_opts; - build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.add_option("-DCONV_WIDTH=" + support::cpp11::to_string(conv_w)); - build_opts.add_option("-DCONV_HEIGHT=" + support::cpp11::to_string(conv_h)); - build_opts.add_option("-D" + string_from_data_layout(output->info()->data_layout())); - - _kernel = static_cast(CLKernelLibrary::get().create_kernel("depthwise_vector_to_tensor", build_opts.options())); - - // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps()); - // The CLDepthwisevectorToTensorKernel doesn't need padding so update_window_and_padding() can be skipped - output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape())); - - ICLKernel::configure_internal(win); -} - -Status CLDepthwiseVectorToTensorKernel::validate(const ITensorInfo *input, const ITensorInfo *output, size_t conv_w, size_t conv_h) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, conv_w, conv_h)); - return Status{}; -} - -void CLDepthwiseVectorToTensorKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window); - - Window slice = window.first_slice_window_1D(); - Window slice_out = window.first_slice_window_3D(); - - // Setup slice - slice.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(0), 1)); - - // Setup output slice - // The first three dimensions of the output are increased by the inner loops - slice_out.set(Window::DimX, Window::Dimension(0, 0, 0)); - slice_out.set(Window::DimY, Window::Dimension(0, 0, 0)); - slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0)); - - do - { - unsigned int idx = 0; - add_1D_tensor_argument(idx, _input, slice); - add_3D_tensor_argument(idx, _output, slice_out); - enqueue(queue, *this, slice, lws_hint()); - } - while(window.slide_window_slice_1D(slice) && window.slide_window_slice_3D(slice_out)); -} diff --git a/src/core/CL/kernels/CLDirectConvolutionOutputStageKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionOutputStageKernel.cpp deleted file mode 100644 index 515321cdfc..0000000000 --- a/src/core/CL/kernels/CLDirectConvolutionOutputStageKernel.cpp +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (c) 2018-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h" - -#include "arm_compute/core/AccessWindowStatic.h" -#include "arm_compute/core/CL/CLValidate.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/Window.h" - -#include -#include - -namespace arm_compute -{ -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32, DataType::F16, - DataType::F32); - - if(bias != nullptr) - { - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(bias); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32, DataType::F16, DataType::F32); - - if(is_data_type_quantized_asymmetric(input->data_type())) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, bias); - } - - ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_float(input->data_type()), - "Calling output stage kernel with floating point arguments"); - } - - // Checks performed on output - if(input->data_type() == DataType::S32) - { - // Quantized configuration checks - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8); - } - else - { - // In case of out-of-place computation (supported for non-quantized configurations) - if((output != nullptr) && (output->total_size() != 0)) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - } - } - - return Status{}; -} - -std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *bias, ITensorInfo *output) -{ - bool window_changed = false; - const unsigned int num_elems_processed_per_iteration = 16 / element_size_from_data_type(input->data_type()); - - // Configure kernel window - Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration)); - - // Input window - AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration); - window_changed = window_changed || update_window_and_padding(win, input_access); - - // Bias window - if(bias != nullptr) - { - AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->dimension(1)); - window_changed = window_changed || update_window_and_padding(win, bias_access); - } - - // Output window - if(output != nullptr && (output->total_size() != 0)) - { - AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); - window_changed = window_changed || update_window_and_padding(win, output_access); - output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); - } - else - { - input_access.set_valid_region(win, ValidRegion(Coordinates(), input->tensor_shape())); - } - - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_pair(err, win); -} -} // namespace - -CLDirectConvolutionLayerOutputStageKernel::CLDirectConvolutionLayerOutputStageKernel() - : _input(nullptr), _bias(nullptr), _output(nullptr), _result_fixedpoint_multiplier(0), _result_shift(0), _result_offset_after_shift(0) -{ -} - -void CLDirectConvolutionLayerOutputStageKernel::configure(ICLTensor *input, const ICLTensor *bias, ICLTensor *output, - int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input); - - // Auto-initialize output if required - if(output != nullptr) - { - // Work out expected output data type - const DataType output_dt = (input->info()->data_type() == DataType::S32) ? DataType::QASYMM8 : input->info()->data_type(); - // Output tensor auto initialization if not yet initialized - auto_init_if_empty(*output->info(), input->info()->clone()->set_data_type(output_dt)); - } - - // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (bias == nullptr) ? nullptr : bias->info(), (output == nullptr) ? nullptr : output->info())); - - _bias = bias; - _input = input; - _output = output; - _result_fixedpoint_multiplier = result_fixedpoint_multiplier; - _result_shift = result_shift; - _result_offset_after_shift = result_offset_after_shift; - - const unsigned int num_elems_accessed_per_iteration = 16 / element_size_from_data_type(input->info()->data_type()); - - // Create kernel - CLBuildOptions build_opts; - build_opts.add_option_if(bias != nullptr, "-DHAS_BIAS"); - build_opts.add_option("-D" + string_from_data_layout(input->info()->data_layout())); - build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_accessed_per_iteration)); - _kernel = static_cast(CLKernelLibrary::get().create_kernel("output_stage_quantized", build_opts.options())); - - // Set static kernel arguments - int idx = 2 * num_arguments_per_3D_tensor() + ((bias != nullptr) ? num_arguments_per_1D_tensor() : 0); - _kernel.setArg(idx++, _result_offset_after_shift); - _kernel.setArg(idx++, _result_fixedpoint_multiplier); - _kernel.setArg(idx++, _result_shift); - - // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), (bias == nullptr) ? nullptr : bias->info(), (output == nullptr) ? nullptr : output->info()); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - ICLKernel::configure_internal(win_config.second); -} - -Status CLDirectConvolutionLayerOutputStageKernel::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, bias, output)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), bias == nullptr ? nullptr : bias->clone().get(), output == nullptr ? nullptr : output->clone().get()).first); - - return Status{}; -} - -void CLDirectConvolutionLayerOutputStageKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window); - - Window slice = window.first_slice_window_3D(); - - // Set bias vector - if(_bias != nullptr) - { - unsigned int idx1 = 2 * num_arguments_per_3D_tensor(); - Window slice_biases; - slice_biases.use_tensor_dimensions(_bias->info()->tensor_shape()); - add_1D_tensor_argument(idx1, _bias, slice_biases); - } - - // Run kernel - do - { - // Set arguments - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, slice); - add_3D_tensor_argument(idx, _output, slice); - enqueue(queue, *this, slice, lws_hint()); - } - while(window.slide_window_slice_3D(slice)); -} -} // namespace arm_compute diff --git a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.cpp b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.cpp index 2967a73866..ae096f295c 100644 --- a/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel.cpp @@ -35,8 +35,6 @@ #include "support/ToolchainSupport.h" -using namespace arm_compute; - namespace arm_compute { namespace @@ -101,8 +99,6 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen } // namespace class Coordinates; -} // namespace arm_compute - CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel::CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel() : _input(nullptr), _bias(nullptr), _output(nullptr) { @@ -177,3 +173,4 @@ void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel::run(const Window &win } while(collapsed.slide_window_slice_3D(slice)); } +} // namespace arm_compute diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp index f01b58a8b3..d9c21150df 100644 --- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp @@ -246,28 +246,44 @@ void CLDepthwiseConvolutionLayer3x3::prepare() } } -CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer() - : _im2col_kernel(), _weights_reshape_kernel(), _v2mm_kernel(), _vector_to_tensor_kernel(), _output_stage_kernel(), _activationlayer_function(), _v2mm_input_fill_border(), _v2mm_weights_fill_border(), - _input_reshaped(), _weights_reshaped(), _v2mm_output(), _output_reshaped(), _is_prepared(false), _is_quantized(false), _is_activationlayer_enabled(false), _original_weights(nullptr), - _optimised_function(nullptr) +CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayer(std::shared_ptr memory_manager) + : _memory_group(std::move(memory_manager)), + _optimised_function(nullptr), + _dwc_native_kernel(), + _permute_input_to_nhwc(), + _permute_weights_to_nhwc(), + _permute_output_to_nchw(), + _permuted_input(), + _permuted_weights(), + _permuted_output(), + _original_weights(), + _needs_permute(false), + _is_prepared(false) { } void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); + ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); + ARM_COMPUTE_ERROR_THROW_ON(CLDepthwiseConvolutionLayer::validate(input->info(), + weights->info(), + biases != nullptr ? biases->info() : nullptr, + output->info(), + conv_info, + depth_multiplier, + act_info, + dilation)); const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); - ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_w) + (weights->info()->dimension(idx_w) - 1) * (dilation.x() - 1) > input->info()->dimension(idx_w) + conv_info.pad_left() + conv_info.pad_right()); - ARM_COMPUTE_ERROR_ON(weights->info()->dimension(idx_h) + (weights->info()->dimension(idx_h) - 1) * (dilation.y() - 1) > input->info()->dimension(idx_h) + conv_info.pad_top() + conv_info.pad_bottom()); - const bool can_run_optimised_3x3_kernel = (weights->info()->dimension(idx_w) == 3) && (weights->info()->dimension(idx_h) == 3); + _needs_permute = false; + _is_prepared = false; + _original_weights = weights; + if(bool(can_run_optimised_3x3_kernel)) { auto f = arm_compute::support::cpp14::make_unique(); @@ -276,103 +292,46 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w } else { - const size_t idx_c = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL); - - const size_t weights_w = weights->info()->dimension(idx_w); - const size_t weights_h = weights->info()->dimension(idx_h); - const size_t weights_z = weights->info()->dimension(idx_c); - - _is_prepared = false; - _original_weights = weights; - _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); - - bool append_bias = (biases != nullptr) && !_is_quantized; - const GPUTarget gpu_target = CLScheduler::get().target(); - - // Calculate output shape - TensorShape output_shape = shape_calculator::compute_depthwise_convolution_shape(*input->info(), *weights->info(), conv_info, depth_multiplier, dilation); - - // Output auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape)); - ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape); - - // Output width and height - const unsigned int conv_w = output_shape[idx_w]; - const unsigned int conv_h = output_shape[idx_h]; - - // Set up intermediate tensors - const size_t patch_size = weights_w * weights_h + ((append_bias) ? 1 : 0); - const size_t conv_size = conv_w * conv_h; - - const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform(); - const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform(); - - // Im2Col configuration - TensorShape shape_im2col = input->info()->tensor_shape(); - shape_im2col.set(0, patch_size); - shape_im2col.set(1, conv_size); - shape_im2col.set(2, weights_z); - _input_reshaped.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col)); - _im2col_kernel.set_target(gpu_target); - _im2col_kernel.configure(input, &_input_reshaped, Size2D(weights_w, weights_h), conv_info, append_bias, depth_multiplier, dilation); - CLScheduler::get().tune_kernel_static(_im2col_kernel); - - // Weights reshape configuration - const TensorShape shape_weights_reshape(patch_size, weights_z); - _weights_reshaped.allocator()->init(weights->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_weights_reshape)); - _weights_reshape_kernel.configure(weights, &_weights_reshaped, append_bias ? biases : nullptr); - - // GEMV configuration - DataType v2mm_dt = (input->info()->data_type() == DataType::QASYMM8) ? DataType::S32 : input->info()->data_type(); - TensorShape shape_v2mm_out = input->info()->tensor_shape(); - shape_v2mm_out.set(0, conv_size * weights_z); - shape_v2mm_out.set(1, 1); - shape_v2mm_out.set(2, 1); - _v2mm_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(v2mm_dt).set_tensor_shape(shape_v2mm_out)); - _v2mm_kernel.set_target(gpu_target); - _v2mm_kernel.configure(&_input_reshaped, &_weights_reshaped, &_v2mm_output); - CLScheduler::get().tune_kernel_static(_v2mm_kernel); - _output_reshaped.allocator()->init(_v2mm_output.info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape)); - _vector_to_tensor_kernel.configure(&_v2mm_output, (_is_quantized) ? &_output_reshaped : output, conv_w, conv_h); - - // Output staged configuration - if(_is_quantized) - { - const UniformQuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? iq_info : oq_info; - - int output_multiplier = 0; - int output_shift = 0; - const float multiplier = iq_info.scale * wq_info.scale / output_quant_info.scale; - quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); - _output_stage_kernel.configure(&_output_reshaped, biases, output, output_multiplier, output_shift, output_quant_info.offset); - _output_reshaped.allocator()->allocate(); - } + _needs_permute = input->info()->data_layout() == DataLayout::NCHW; - // Fill borders on inputs - PixelValue zero_in(static_cast(0)); - PixelValue zero_w(static_cast(0)); - if(_is_quantized) + ICLTensor *input_to_use = input; + const ICLTensor *weights_to_use = weights; + ICLTensor *output_to_use = output; + if(_needs_permute) { - zero_in = PixelValue(static_cast(iq_info.offset)); - zero_w = PixelValue(static_cast(wq_info.offset)); - } - BorderSize border_size = _v2mm_kernel.border_size(); - _v2mm_input_fill_border.configure(&_input_reshaped, border_size, BorderMode::CONSTANT, zero_in); + _memory_group.manage(&_permuted_input); + _memory_group.manage(&_permuted_output); - border_size.bottom = 0; - _v2mm_weights_fill_border.configure(&_weights_reshaped, border_size, BorderMode::CONSTANT, zero_w); + // Configure the function to transform the input tensor from NCHW -> NHWC + _permute_input_to_nhwc.configure(input, &_permuted_input, PermutationVector(2U, 0U, 1U)); + _permuted_input.info()->set_data_layout(DataLayout::NHWC); - // Allocate intermediate tensors - _input_reshaped.allocator()->allocate(); - _v2mm_output.allocator()->allocate(); + // Configure the function to transform the weights tensor from IHW -> HWI + _permute_weights_to_nhwc.configure(weights, &_permuted_weights, PermutationVector(2U, 0U, 1U)); + _permuted_weights.info()->set_data_layout(DataLayout::NHWC); + + // Set output quantization info before dwc kernel configure + _permuted_output.info()->set_quantization_info(output->info()->quantization_info()); + + input_to_use = &_permuted_input; + weights_to_use = &_permuted_weights; + output_to_use = &_permuted_output; + } - //Configure Activation Layer - _is_activationlayer_enabled = act_info.enabled(); + DWCWeightsKernelInfo dwc_weights_info; + dwc_weights_info.n0 = (depth_multiplier == 1) ? 8 : 1; + DWCKernelInfo dwc_info; + dwc_info.activation_info = act_info; + _dwc_native_kernel.configure(input_to_use, weights_to_use, biases, output_to_use, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation); - if(_is_activationlayer_enabled) + if(_needs_permute) { - _activationlayer_function.configure(output, nullptr, act_info); + _permuted_input.allocator()->allocate(); + + // Configure the function to transform the convoluted output to NCHW format + _permuted_output.info()->set_data_layout(DataLayout::NCHW); + _permute_output_to_nchw.configure(&_permuted_output, output, PermutationVector(1U, 2U, 0U)); + _permuted_output.allocator()->allocate(); } } } @@ -380,6 +339,8 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation) { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); + const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); @@ -390,60 +351,36 @@ Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITe if(!can_run_optimised_3x3_kernel) { - const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); - - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); - ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(idx_c) * depth_multiplier) != weights->dimension(idx_c)); - - const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); - const bool append_bias = (biases != nullptr) && !is_quantized; - const TensorShape output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation); - const size_t weights_w = weights->dimension(idx_w); - const size_t weights_h = weights->dimension(idx_h); - const size_t weights_z = weights->dimension(idx_c); - const unsigned int conv_w = output_shape[idx_w]; - const unsigned int conv_h = output_shape[idx_h]; - const size_t patch_size = weights_w * weights_h + ((append_bias) ? 1 : 0); - const size_t conv_size = conv_w * conv_h; - - TensorShape shape_im2col = input->tensor_shape(); - shape_im2col.set(0, patch_size); - shape_im2col.set(1, conv_size); - shape_im2col.set(2, weights_z); - TensorInfo input_reshaped(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col)); - ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseIm2ColKernel::validate(input, &input_reshaped, Size2D(weights_w, weights_h), conv_info, append_bias, depth_multiplier, dilation)); - - const TensorShape shape_weights_reshape(patch_size, weights_z); - TensorInfo weights_reshaped(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_weights_reshape)); - ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::validate(weights, &weights_reshaped, append_bias ? biases : nullptr)); - - DataType v2mm_dt = (input->data_type() == DataType::QASYMM8) ? DataType::S32 : input->data_type(); - TensorShape shape_v2mm_out = input->tensor_shape(); - shape_v2mm_out.set(0, conv_size * weights_z); - shape_v2mm_out.set(1, 1); - shape_v2mm_out.set(2, 1); - TensorInfo v2mm_output(input->clone()->set_is_resizable(true).reset_padding().set_data_type(v2mm_dt).set_tensor_shape(shape_v2mm_out)); - ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMMatrixVectorMultiplyKernel::validate(&input_reshaped, &weights_reshaped, &v2mm_output)); - - TensorInfo output_reshaped(v2mm_output.clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape)); - ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseVectorToTensorKernel::validate(&v2mm_output, (is_quantized) ? &output_reshaped : output, conv_w, conv_h)); - - if(is_quantized) + DWCWeightsKernelInfo dwc_weights_info; + dwc_weights_info.n0 = (depth_multiplier == 1) ? 8 : 1; + DWCKernelInfo dwc_info; + dwc_info.activation_info = act_info; + + const bool needs_permute = input->data_layout() == DataLayout::NCHW; + + if(needs_permute) { - const UniformQuantizationInfo iq_info = input->quantization_info().uniform(); - const UniformQuantizationInfo wq_info = weights->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = (output->total_size() == 0) ? iq_info : output->quantization_info().uniform(); - - const float multiplier = iq_info.scale * wq_info.scale / oq_info.scale; - ARM_COMPUTE_UNUSED(multiplier); - ARM_COMPUTE_RETURN_ERROR_ON(multiplier > 1.0f); - ARM_COMPUTE_RETURN_ON_ERROR(CLDirectConvolutionLayerOutputStageKernel::validate(&output_reshaped, biases, output)); + TensorShape permuted_input_shape = input->tensor_shape(); + TensorShape permuted_weights_shape = weights->tensor_shape(); + TensorShape permuted_output_shape = shape_calculator::compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation); + + permute(permuted_input_shape, PermutationVector(2U, 0U, 1U)); + permute(permuted_weights_shape, PermutationVector(2U, 0U, 1U)); + permute(permuted_output_shape, PermutationVector(2U, 0U, 1U)); + + const TensorInfo permuted_input = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_input_shape).set_data_layout(DataLayout::NHWC); + const TensorInfo permuted_weights = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_weights_shape).set_data_layout(DataLayout::NHWC); + const TensorInfo permuted_output = output->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(permuted_output_shape).set_data_layout(DataLayout::NHWC); + + ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(input, &permuted_input, PermutationVector(2U, 0U, 1U))); + ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(weights, &permuted_weights, PermutationVector(2U, 0U, 1U))); + ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(&permuted_input, &permuted_weights, biases, &permuted_output, dwc_weights_info, + dwc_info, conv_info, depth_multiplier, dilation)); + ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(&permuted_output, output, PermutationVector(1U, 2U, 0U))); } - - // Validate Activation Layer - if(act_info.enabled()) + else { - ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info)); + ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerNativeKernel::validate(input, weights, biases, output, dwc_weights_info, dwc_info, conv_info, depth_multiplier, dilation)); } } else @@ -457,23 +394,22 @@ void CLDepthwiseConvolutionLayer::run() { prepare(); + MemoryGroupResourceScope scope_mg(_memory_group); + if(_optimised_function != nullptr) { _optimised_function->run(); } else { - CLScheduler::get().enqueue(_im2col_kernel); - CLScheduler::get().enqueue(_v2mm_input_fill_border); - CLScheduler::get().enqueue(_v2mm_kernel); - CLScheduler::get().enqueue(_vector_to_tensor_kernel); - if(_is_quantized) + if(_needs_permute) { - CLScheduler::get().enqueue(_output_stage_kernel); + _permute_input_to_nhwc.run(); } - if(_is_activationlayer_enabled) + CLScheduler::get().enqueue(_dwc_native_kernel); + if(_needs_permute) { - _activationlayer_function.run(); + _permute_output_to_nchw.run(); } } } @@ -484,21 +420,17 @@ void CLDepthwiseConvolutionLayer::prepare() { _optimised_function->prepare(); } - else + else if(!_is_prepared) { - if(!_is_prepared) + if(_needs_permute) { ARM_COMPUTE_ERROR_ON(!_original_weights->is_used()); - // Run weights reshaping and mark original weights tensor as unused - _weights_reshaped.allocator()->allocate(); - CLScheduler::get().enqueue(_weights_reshape_kernel); - CLScheduler::get().enqueue(_v2mm_weights_fill_border); + _permuted_weights.allocator()->allocate(); + _permute_weights_to_nhwc.run(); _original_weights->mark_as_unused(); - - CLScheduler::get().queue().finish(); - _is_prepared = true; } + _is_prepared = true; } } } // namespace arm_compute diff --git a/src/runtime/CL/tuners/BifrostTuner.cpp b/src/runtime/CL/tuners/BifrostTuner.cpp index 187f52fcf7..5b23baaed3 100644 --- a/src/runtime/CL/tuners/BifrostTuner.cpp +++ b/src/runtime/CL/tuners/BifrostTuner.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -171,24 +171,6 @@ void tune_im2col_kernel(CLIm2ColKernel &k) k.set_lws_hint(lws_hint); } -void tune_depthwise_im2col_kernel(CLDepthwiseIm2ColKernel &k) -{ - cl::NDRange lws_hint = k.lws_hint(); - const GPUTarget gpu_target = k.get_target(); - - // Configure the local work size for Bifrost with a value obtained - // via exhaustive autotuning for the MobileNets tensor shapes. - if(gpu_target_is_in(gpu_target, - GPUTarget::G71, GPUTarget::G72, GPUTarget::G76, - GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, - GPUTarget::G52, GPUTarget::G52LIT)) - { - lws_hint = cl::NDRange(1, 2, 1); - } - - k.set_lws_hint(lws_hint); -} - void tune_gemv_kernel(CLGEMMMatrixVectorMultiplyKernel &k) { cl::NDRange lws_hint = k.lws_hint(); @@ -311,10 +293,6 @@ void BifrostTuner::tune_kernel_static(ICLKernel &kernel) { tune_im2col_kernel(*utils::cast::polymorphic_downcast(&kernel)); } - else if(dynamic_cast(&kernel) != nullptr) - { - tune_depthwise_im2col_kernel(*utils::cast::polymorphic_downcast(&kernel)); - } else if(dynamic_cast(&kernel) != nullptr) { tune_gemv_kernel(*utils::cast::polymorphic_downcast(&kernel)); diff --git a/tests/datasets/DepthwiseConvolutionLayerDataset.h b/tests/datasets/DepthwiseConvolutionLayerDataset.h index d0617275c0..2990b135d2 100644 --- a/tests/datasets/DepthwiseConvolutionLayerDataset.h +++ b/tests/datasets/DepthwiseConvolutionLayerDataset.h @@ -120,7 +120,7 @@ class SmallDepthwiseConvolutionLayerDataset final : public DepthwiseConvolutionL public: SmallDepthwiseConvolutionLayerDataset() { - add_config(TensorShape(7U, 7U, 1U), Size2D(3U, 3U), PadStrideInfo(1, 1, 0, 0)); + add_config(TensorShape(7U, 7U, 1U), Size2D(1U, 1U), PadStrideInfo(1, 1, 0, 0)); add_config(TensorShape(23U, 27U, 5U), Size2D(3U, 5U), PadStrideInfo(2, 1, 0, 0)); add_config(TensorShape(33U, 27U, 7U), Size2D(7U, 3U), PadStrideInfo(3, 2, 1, 0)); // Asymmetric padding @@ -135,16 +135,16 @@ class LargeDepthwiseConvolutionLayerDataset final : public DepthwiseConvolutionL public: LargeDepthwiseConvolutionLayerDataset() { - add_config(TensorShape(33U, 27U, 11U), Size2D(3U, 3U), PadStrideInfo(1, 2, 0, 1)); + add_config(TensorShape(33U, 27U, 11U), Size2D(3U, 4U), PadStrideInfo(1, 2, 0, 1)); add_config(TensorShape(17U, 31U, 2U), Size2D(5U, 9U), PadStrideInfo(1, 2, 1, 1)); add_config(TensorShape(23U, 27U, 5U), Size2D(11U, 3U), PadStrideInfo(1, 2, 0, 0)); add_config(TensorShape(17U, 31U, 2U, 3U), Size2D(5U, 9U), PadStrideInfo(1, 2, 1, 1)); - add_config(TensorShape(233U, 277U, 55U), Size2D(3U, 3U), PadStrideInfo(2, 1, 0, 0)); - add_config(TensorShape(333U, 277U, 77U), Size2D(3U, 3U), PadStrideInfo(3, 2, 1, 0)); - add_config(TensorShape(177U, 311U, 22U), Size2D(3U, 3U), PadStrideInfo(1, 2, 1, 1)); - add_config(TensorShape(233U, 277U, 55U), Size2D(3U, 3U), PadStrideInfo(1, 2, 0, 0)); - add_config(TensorShape(333U, 277U, 77U), Size2D(3U, 3U), PadStrideInfo(2, 3, 0, 1)); - add_config(TensorShape(177U, 311U, 22U), Size2D(3U, 3U), PadStrideInfo(2, 1, 1, 1)); + add_config(TensorShape(233U, 277U, 55U), Size2D(1U, 1U), PadStrideInfo(2, 1, 0, 0)); + add_config(TensorShape(333U, 277U, 77U), Size2D(1U, 1U), PadStrideInfo(3, 2, 1, 0)); + add_config(TensorShape(177U, 311U, 22U), Size2D(3U, 4U), PadStrideInfo(1, 2, 1, 1)); + add_config(TensorShape(233U, 277U, 55U), Size2D(3U, 4U), PadStrideInfo(1, 2, 0, 0)); + add_config(TensorShape(333U, 277U, 77U), Size2D(3U, 4U), PadStrideInfo(2, 3, 0, 1)); + add_config(TensorShape(177U, 311U, 22U), Size2D(3U, 4U), PadStrideInfo(2, 1, 1, 1)); // Asymmetric padding add_config(TensorShape(33U, 27U, 7U), Size2D(5U, 7U), PadStrideInfo(3, 2, 2, 1, 2, 0, DimensionRoundingType::FLOOR)); add_config(TensorShape(33U, 27U, 7U), Size2D(5U, 7U), PadStrideInfo(3, 2, 1, 3, 0, 2, DimensionRoundingType::FLOOR)); -- cgit v1.2.1