diff options
Diffstat (limited to 'src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h')
-rw-r--r-- | src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h | 114 |
1 files changed, 57 insertions, 57 deletions
diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h index 325f4e7067..d34a662966 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,9 +24,10 @@ #ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H #define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H -#include "src/core/CL/ICLKernel.h" - #include "arm_compute/core/KernelDescriptors.h" +#include "arm_compute/function_info/ConvolutionInfo.h" + +#include "src/core/CL/ICLKernel.h" namespace arm_compute { @@ -47,85 +48,84 @@ public: CLDepthwiseConvolutionLayerNativeKernel(CLDepthwiseConvolutionLayerNativeKernel &&) = default; /** Allow instances of this class to be moved */ CLDepthwiseConvolutionLayerNativeKernel &operator=(CLDepthwiseConvolutionLayerNativeKernel &&) = default; + /** Initialize the function's source, destination and parameters * + * @param[in] compile_context The compile context to be used. * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, N, M]. * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8. * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED. - * @param[out] output Destination tensor. Data type supported: Same as @p input. - * @param[in] dwc_weights_info Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread + * @param[out] output Destination tensor. Pass in nullptr or @p input for in-place operation. Data type supported: Same as @p input. * @param[in] dwc_info Depthwise convolution layer info - * @param[in] conv_info Padding and stride information to use for the convolution. - * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). + * @param[in] conv_info Convolution info (padding, stride, dilation, ...) * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization, * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization, * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 + * + * @note: In-place is only supported when + * * data layout: NHWC + * * filter: 1x1 + * * @p depth_multiplier: 1 + * * strides: 1 + * * dilation: 1 + * * no padding + * * no change of data layout after configure */ - void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info, - const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U), - const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr); - /** Initialize the function's source, destination and parameters + void configure(const CLCompileContext &compile_context, + ICLTensor *input, + const ICLTensor *weights, + const ICLTensor *biases, + ICLTensor *output, + const DWCComputeKernelInfo &dwc_info, + const ConvolutionInfo &conv_info, + const ICLTensor *output_multipliers = nullptr, + const ICLTensor *output_shifts = nullptr); + + /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel * - * @param[in] compile_context The compile context to be used. - * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC - * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, N, M]. - * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8. - * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. - * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED. - * @param[out] output Destination tensor. Data type supported: Same as @p input. - * @param[in] dwc_weights_info Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread - * @param[in] dwc_info Depthwise convolution layer info - * @param[in] conv_info Padding and stride information to use for the convolution. - * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 - * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 + * Similar to @ref CLDepthwiseConvolutionLayerNativeKernel::configure() */ - void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const DWCWeightsKernelInfo &dwc_weights_info, - const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U), - const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr); + void configure(ICLTensor *input, + const ICLTensor *weights, + const ICLTensor *biases, + ICLTensor *output, + const DWCComputeKernelInfo &dwc_info, + const ConvolutionInfo &conv_info, + const ICLTensor *output_multipliers = nullptr, + const ICLTensor *output_shifts = nullptr); + /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayerNativeKernel * - * @param[in] input Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/FP32/FP16. Data layout supported: NHWC - * @param[in] weights Weights tensor info. A 3D tensor with dimensions [IFM, N, M]. - * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8. - * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. - * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED. - * @param[in] output Destination tensor info. Data type supported: Same as @p input. - * @param[in] dwc_weights_info Depthwise convolution layer weights info to retrieve the number of output elements processed by each thread - * @param[in] dwc_info Depthwise convolution layer info - * @param[in] conv_info Padding and stride information to use for the convolution. - * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 - * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 + * Similar to @ref CLDepthwiseConvolutionLayerNativeKernel::configure() * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const DWCWeightsKernelInfo &dwc_weights_info, - const DWCKernelInfo &dwc_info, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, const Size2D &dilation = Size2D(1U, 1U), - const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr); + static Status validate(const ITensorInfo *input, + const ITensorInfo *weights, + const ITensorInfo *biases, + const ITensorInfo *output, + const DWCComputeKernelInfo &dwc_info, + const ConvolutionInfo &conv_info, + const ITensorInfo *output_multipliers = nullptr, + const ITensorInfo *output_shifts = nullptr); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; private: - const ICLTensor *_input; - const ICLTensor *_weights; - const ICLTensor *_biases; - ICLTensor *_output; - unsigned int _depth_multiplier; - const ICLTensor *_output_multipliers; - const ICLTensor *_output_shifts; - bool _is_quantized; + const ICLTensor *_input{}; + const ICLTensor *_weights{}; + const ICLTensor *_biases{}; + ICLTensor *_output{}; + unsigned int _depth_multiplier{0}; + const ICLTensor *_output_multipliers{}; + const ICLTensor *_output_shifts{}; + bool _export_input_to_cl_image{false}; + bool _export_weights_to_cl_image{true}; + bool _is_quantized{false}; }; } // namespace arm_compute #endif /*ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERNATIVEKERNEL_H */ |