From 76335eb8d8733b0bbc0110546797211540870c50 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Thu, 17 Nov 2022 11:03:39 +0000 Subject: Implement the OpenCL kernel to compute the indirect convolution - Implement indirect convolution kernel - Add operator support - Add test Resolves COMPMID-5709 Change-Id: I9272304163471a5a40da7fdec204599f3c1d8e32 Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8701 Comments-Addressed: Arm Jenkins Reviewed-by: Gunes Bayir Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- .../CL/functions/CLIndirectConvolutionLayer.h | 120 +++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h (limited to 'arm_compute/runtime/CL/functions') diff --git a/arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h new file mode 100644 index 0000000000..8185f8df78 --- /dev/null +++ b/arm_compute/runtime/CL/functions/CLIndirectConvolutionLayer.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_CLINDIRECTCONVOLUTIONLAYER_H +#define ARM_COMPUTE_CLINDIRECTCONVOLUTIONLAYER_H + +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/IFunction.h" + +#include + +namespace arm_compute +{ +class CLCompileContext; +class ICLTensor; +class ITensorInfo; + +/** Basic function to run the indirect convolution function + */ +class CLIndirectConvolutionLayer : public IFunction +{ +public: + /** Constructor */ + CLIndirectConvolutionLayer(); + /** Destructor */ + ~CLIndirectConvolutionLayer(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLIndirectConvolutionLayer(const CLIndirectConvolutionLayer &) = delete; + /** Default move constructor */ + CLIndirectConvolutionLayer(CLIndirectConvolutionLayer &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLIndirectConvolutionLayer &operator=(const CLIndirectConvolutionLayer &) = delete; + /** Default move assignment operator */ + CLIndirectConvolutionLayer &operator=(CLIndirectConvolutionLayer &&); + /** Set the input and output tensors. + * + * Valid data layouts: + * - NHWC + * + * Valid data type configurations: + * |src0 |src1 |src2 |dst | + * |:--------------|:--------------|:------|:--------------| + * |F16 |F16 |F16 |F16 | + * |F32 |F32 |F32 |F32 | + * + * @param[in] input Source tensor. 3 lower dimensions represent a single input, + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor. Data type supported:Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * Data type supported: Should match @p input data type. + * @param[out] output Destination tensor. 3 lower dimensions represent a single output, while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + */ + void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Set the input and output tensors. + * + * @param[in] compile_context The compile context to be used. + * @param[in] input Source tensor. 3 lower dimensions represent a single input, + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions. Data type supported:Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * Data type supported: Should match @p input data type. + * @param[out] output Destination tensor. 3 lower dimensions represent a single output, while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + */ + void configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref CLIndirectConvolutionLayer + * + * @param[in] input Source tensor. 3 lower dimensions represent a single input, + * while every optional dimension from 4 and above represent a batch of inputs. + * Data types supported: F16/F32. + * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions. Data type supported:Same as @p input. + * @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. + * Data type supported: Should match @p input data type. + * @param[in] output Destination tensor. 3 lower dimensions represent a single output, while the rest represent batch of outputs. + * Data types supported: Same as @p input. + * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; +}; +} +#endif /* ARM_COMPUTE_CLINDIRECTCONVOLUTIONLAYER_H */ -- cgit v1.2.1