From ad7515d231acb075a9585e52f257373b1a1b5d1f Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Fri, 24 Jul 2020 00:02:23 +0100 Subject: COMPMID-3385: Async support to CLArithmetic* kernels/functions Pt.1 Signed-off-by: Michalis Spyrou Change-Id: I94007565e688f8a0aead4f14c9fc30bfd9f9f7eb Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3613 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- .../core/CL/kernels/CLElementwiseOperationKernel.h | 56 +-- arm_compute/core/CL/kernels/CLFillBorderKernel.h | 12 +- .../runtime/CL/functions/CLElementwiseOperations.h | 500 ++++++++++++++++++++- arm_compute/runtime/CL/functions/CLLSTMLayer.h | 169 ++++--- arm_compute/runtime/CL/functions/CLPReluLayer.h | 65 ++- arm_compute/runtime/CL/functions/CLQLSTMLayer.h | 137 +++--- arm_compute/runtime/CL/functions/CLRNNLayer.h | 22 +- .../CL/kernels/CLElementwiseOperationKernel.cpp | 45 +- src/core/CL/kernels/CLFillBorderKernel.cpp | 55 ++- src/runtime/CL/CLOperator.cpp | 2 +- .../CL/functions/CLElementwiseOperations.cpp | 428 ++++++++++++++++-- src/runtime/CL/functions/CLLSTMLayer.cpp | 30 +- src/runtime/CL/functions/CLPReluLayer.cpp | 83 +++- src/runtime/CL/functions/CLQLSTMLayer.cpp | 68 +-- src/runtime/CL/functions/CLRNNLayer.cpp | 6 +- 15 files changed, 1354 insertions(+), 324 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h b/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h index 1995aed7b6..76bc879638 100644 --- a/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h +++ b/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h @@ -54,7 +54,7 @@ public: ~CLElementwiseOperationKernel() = default; // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; + void run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, const Window &window, cl::CommandQueue &queue) override; BorderSize border_size() const override; @@ -64,9 +64,9 @@ protected: /** Initialise the kernel's output. * - * @param[in] input1 First tensor input. Data types supported: U8/S8/QASYMM8/QASYMM8_SIGNED/U16/S16/F16/U32/S32/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[in] output Output tensor. Data types supported: Same as @p input1. + * @param[in] input1 First tensor input info. Data types supported: U8/S8/QASYMM8/QASYMM8_SIGNED/U16/S16/F16/U32/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. * * @return a pair of Status and Window */ @@ -87,18 +87,18 @@ protected: /** Commmon configure function for element-wise operators with no additional options (e.g., Div, Min, Max, SquaredDiff) * */ - void configure_common(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output); + void configure_common(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output); /** Commmon configure function for element-wise operators with no additional options (e.g., Div, Min, Max, SquaredDiff) * */ - void configure_common(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output); + void configure_common(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output); ActivationLayerInfo _act_info; private: - const ICLTensor *_input1; /**< Source tensor 1 */ - const ICLTensor *_input2; /**< Source tensor 2 */ - ICLTensor *_output; /**< Destination tensor */ + const ITensorInfo *_input1; /**< Source tensor info 1 */ + const ITensorInfo *_input2; /**< Source tensor info 2 */ + ITensorInfo *_output; /**< Destination tensor info */ }; /** Addition operation */ @@ -113,32 +113,32 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel * * @param[in] op Arithmetic operation to be executed. - * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[in] output Output tensor. Data types supported: Same as @p input1. + * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. * @param[in] policy Policy to use to handle overflow. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + void configure(ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ConvertPolicy &policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel * * @param[in] compile_context The compile context to be used. * @param[in] op Arithmetic operation to be executed. - * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[in] output Output tensor. Data types supported: Same as @p input1. + * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. * @param[in] policy Policy to use to handle overflow. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy, + void configure(const CLCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ConvertPolicy &policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel * * @param[in] op Arithmetic operation to be executed. - * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. - * @param[in] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] input1 First tensor input info info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. + * @param[in] input2 Second tensor input info info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info info. Data types supported: Same as @p input1. * @param[in] policy Policy to use to handle overflow. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. * @@ -170,22 +170,22 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel * * @param[in] op Arithmetic operation to be executed. - * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[in] output Output tensor. Data types supported: Same as @p input1. + * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + void configure(ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel * * @param[in] compile_context The compile context to be used. * @param[in] op Arithmetic operation to be executed. - * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[in] output Output tensor. Data types supported: Same as @p input1. + * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, + void configure(const CLCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel diff --git a/arm_compute/core/CL/kernels/CLFillBorderKernel.h b/arm_compute/core/CL/kernels/CLFillBorderKernel.h index 0a4de25ac3..8cad68dc1a 100644 --- a/arm_compute/core/CL/kernels/CLFillBorderKernel.h +++ b/arm_compute/core/CL/kernels/CLFillBorderKernel.h @@ -49,6 +49,15 @@ public: /** Default destructor */ ~CLFillBorderKernel() = default; + /** Initialise the kernel's input, output and border mode. + * + * @param[in] compile_context The compile context to be used. + * @param[in,out] tensor Tensor to process Data types supported: U8/QASYMM8/S8/QASYMM8_SIGNED/U16/S16/U32/S32/F16/F32. + * @param[in] border_size Size of the border to fill in elements. + * @param[in] border_mode Border mode to use for the convolution. + * @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT. + */ + void configure(const CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue()); /** Initialise the kernel's input, output and border mode. * * @param[in,out] tensor Tensor to process Data types supported: U8/QASYMM8/S8/QASYMM8_SIGNED/U16/S16/U32/S32/F16/F32. @@ -65,7 +74,7 @@ public: * @param[in] border_mode Border mode to use for the convolution. * @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT. */ - void configure(const CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue()); + void configure(const CLCompileContext &compile_context, ITensorInfo *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value = PixelValue()); /** Function to set the constant value on fill border kernel depending on type. * @@ -76,6 +85,7 @@ public: void set_constant_border(unsigned int idx, const PixelValue &constant_border_value); // Inherited methods overridden: + void run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, const Window &window, cl::CommandQueue &queue) override; void run(const Window &window, cl::CommandQueue &queue) override; bool is_parallelisable() const override; diff --git a/arm_compute/runtime/CL/functions/CLElementwiseOperations.h b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h index 9cd3c150cc..5af24c90ac 100644 --- a/arm_compute/runtime/CL/functions/CLElementwiseOperations.h +++ b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h @@ -24,21 +24,372 @@ #ifndef ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H #define ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" +#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h" +#include "arm_compute/runtime/CL/ICLOperator.h" +#include "arm_compute/runtime/IFunction.h" namespace arm_compute { class ICLTensor; +namespace experimental +{ +/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition + * + * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * @note The function performs an arithmetic addition between two tensors. + */ +class CLArithmeticAddition : public ICLOperator +{ +public: + /** Default Constructor */ + CLArithmeticAddition(); + /** Initialise the kernel's inputs, output and conversion policy. + * + * Valid configurations (Input1,Input2) -> Output : + * + * - (U8,U8) -> U8 + * - (U8,U8) -> S16 + * - (S16,U8) -> S16 + * - (U8,S16) -> S16 + * - (S16,S16) -> S16 + * - (S32,S32) -> S32 + * - (F16,F16) -> F16 + * - (F32,F32) -> F32 + * - (QASYMM8,QASYMM8) -> QASYMM8 + * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED + * - (QSYMM16,QSYMM16) -> QSYMM16 + * + * @param[in] compile_context The compile context to be used. + * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * @param[in] policy Policy to use to handle overflow. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition + * + * Valid configurations (Input1,Input2) -> Output : + * + * - (U8,U8) -> U8 + * - (U8,U8) -> S16 + * - (S16,U8) -> S16 + * - (U8,S16) -> S16 + * - (S16,S16) -> S16 + * - (S32,S32) -> S32 + * - (F16,F16) -> F16 + * - (F32,F32) -> F32 + * - (QASYMM8,QASYMM8) -> QASYMM8 + * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED + * - (QSYMM16,QSYMM16) -> QSYMM16 + * + * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * @param[in] policy Policy to use to handle overflow. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override; + +private: + CLFillBorderKernel _border_handler; +}; + +/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction + * + * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32. + * @note The function performs an arithmetic subtraction between two tensors. + */ +class CLArithmeticSubtraction : public ICLOperator +{ +public: + /** Default Constructor */ + CLArithmeticSubtraction(); + /** Initialise the kernel's inputs, output and conversion policy. + * + * Valid configurations (Input1,Input2) -> Output : + * + * - (U8,U8) -> U8 + * - (U8,U8) -> S16 + * - (S16,U8) -> S16 + * - (U8,S16) -> S16 + * - (S16,S16) -> S16 + * - (S32,S32) -> S32 + * - (F16,F16) -> F16 + * - (F32,F32) -> F32 + * - (QASYMM8,QASYMM8) -> QASYMM8 + * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED + * - (QSYMM16,QSYMM16) -> QSYMM16 + * + * @param[in] compile_context The compile context to be used. + * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * @param[in] policy Policy to use to handle overflow. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction + * + * Valid configurations (Input1,Input2) -> Output : + * + * - (U8,U8) -> U8 + * - (U8,U8) -> S16 + * - (S16,U8) -> S16 + * - (U8,S16) -> S16 + * - (S16,S16) -> S16 + * - (S32,S32) -> S32 + * - (F16,F16) -> F16 + * - (F32,F32) -> F32 + * - (QASYMM8,QASYMM8) -> QASYMM8 + * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED + * - (QSYMM16,QSYMM16) -> QSYMM16 + * + * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. + * @param[in] policy Policy to use to handle overflow. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override; + +private: + CLFillBorderKernel _border_handler; +}; + +/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division + * + * @note The tensor data type for the inputs must be F16/F32. + * @note The function performs an arithmetic division between two tensors. + */ +class CLArithmeticDivision : public ICLOperator +{ +public: + /** Default Constructor */ + CLArithmeticDivision(); + /** Initialise the kernel's inputs, output. + * + * @param[in] compile_context The compile context to be used. + * @param[in, out] input1 First tensor input. Data types supported: F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[in, out] input2 Second tensor input. Same as @p input1. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[out] output Output tensor. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision + * + * @param[in] input1 First tensor input info. Data types supported: F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override; + +private: + CLFillBorderKernel _border_handler; +}; + +/** Basic function to run @ref CLArithmeticOperationKernel for max + * + * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32. + * @note The function performs a max operation between two tensors. + */ +class CLElementwiseMax : public ICLOperator +{ +public: + /** Default Constructor */ + CLElementwiseMax(); + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in] compile_context The compile context to be used. + * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[out] output Output tensor. Data types supported: same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max + * + * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: same as @p input1. + * @param[in] output Output tensor info. Data types supported: same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override; + +private: + CLFillBorderKernel _border_handler; +}; + +/** Basic function to run @ref CLArithmeticOperationKernel for min + * + * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32. + * @note The function performs a max operation between two tensors. + */ +class CLElementwiseMin : public ICLOperator +{ +public: + /** Default Constructor */ + CLElementwiseMin(); + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in] compile_context The compile context to be used. + * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[out] output Output tensor. Data types supported: same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min + * + * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: same as @p input1. + * @param[in] output Output tensor info. Data types supported: same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override; + +private: + CLFillBorderKernel _border_handler; +}; + +/** Basic function to run @ref CLArithmeticOperationKernel for squared difference + * + * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32. + * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2 + */ +class CLElementwiseSquaredDiff : public ICLOperator +{ +public: + /** Default Constructor */ + CLElementwiseSquaredDiff(); + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in] compile_context The compile context to be used. + * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[in, out] input2 Second tensor input. Data types supported: same as @p input1. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[out] output Output tensor. Data types supported: same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference + * + * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: same as @p input1. + * @param[in] output Output tensor info. Data types supported: same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override; + +private: + CLFillBorderKernel _border_handler; +}; + +/** Basic function to run @ref CLArithmeticOperationKernel for power + * + * @note The tensor data type for the inputs must be F16/F32. + * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i]) + */ +class CLElementwisePower : public ICLOperator +{ +public: + /** Default Constructor */ + CLElementwisePower(); + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in] compile_context The compile context to be used. + * @param[in, out] input1 First tensor input. Data types supported: F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[in, out] input2 Second tensor input. Data types supported: F16/F32. + * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0. + * @param[out] output Output tensor. Data types supported:F16/F32. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for power + * + * @param[in] input1 First tensor input info. Data types supported: F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: F16/F32. + * @param[in] output Output tensor info. Data types supported: F16/F32. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override; + +private: + CLFillBorderKernel _border_handler; +}; +} // namespace experimental + /** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition * * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32. * @note The function performs an arithmetic addition between two tensors. */ -class CLArithmeticAddition : public ICLSimpleFunction +class CLArithmeticAddition : public IFunction { public: + /** Default Constructor */ + CLArithmeticAddition(); + /** Default Destructor */ + ~CLArithmeticAddition(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLArithmeticAddition(const CLArithmeticAddition &) = delete; + /** Default move constructor */ + CLArithmeticAddition(CLArithmeticAddition &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLArithmeticAddition &operator=(const CLArithmeticAddition &) = delete; + /** Default move assignment operator */ + CLArithmeticAddition &operator=(CLArithmeticAddition &&); /** Initialise the kernel's inputs, output and conversion policy. * * Valid configurations (Input1,Input2) -> Output : @@ -89,7 +440,8 @@ public: * @param[in] policy Policy to use to handle overflow. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition * * Valid configurations (Input1,Input2) -> Output : @@ -115,6 +467,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction @@ -122,9 +481,21 @@ public: * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32. * @note The function performs an arithmetic subtraction between two tensors. */ -class CLArithmeticSubtraction : public ICLSimpleFunction +class CLArithmeticSubtraction : public IFunction { public: + /** Default Constructor */ + CLArithmeticSubtraction(); + /** Default Destructor */ + ~CLArithmeticSubtraction(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLArithmeticSubtraction(const CLArithmeticSubtraction &) = delete; + /** Default move constructor */ + CLArithmeticSubtraction(CLArithmeticSubtraction &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLArithmeticSubtraction &operator=(const CLArithmeticSubtraction &) = delete; + /** Default move assignment operator */ + CLArithmeticSubtraction &operator=(CLArithmeticSubtraction &&); /** Initialise the kernel's inputs, output and conversion policy. * * Valid configurations (Input1,Input2) -> Output : @@ -149,7 +520,7 @@ public: * @param[in] policy Policy to use to handle overflow. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Initialise the kernel's inputs, output and conversion policy. * * Valid configurations (Input1,Input2) -> Output : @@ -175,7 +546,8 @@ public: * @param[in] policy Policy to use to handle overflow. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, + const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction * * Valid configurations (Input1,Input2) -> Output : @@ -201,6 +573,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division @@ -208,9 +587,21 @@ public: * @note The tensor data type for the inputs must be F16/F32. * @note The function performs an arithmetic division between two tensors. */ -class CLArithmeticDivision : public ICLSimpleFunction +class CLArithmeticDivision : public IFunction { public: + /** Default Constructor */ + CLArithmeticDivision(); + /** Default Destructor */ + ~CLArithmeticDivision(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLArithmeticDivision(const CLArithmeticDivision &) = delete; + /** Default move constructor */ + CLArithmeticDivision(CLArithmeticDivision &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLArithmeticDivision &operator=(const CLArithmeticDivision &) = delete; + /** Default move assignment operator */ + CLArithmeticDivision &operator=(CLArithmeticDivision &&); /** Initialise the kernel's inputs, output. * * @param[in, out] input1 First tensor input. Data types supported: F16/F32. @@ -231,7 +622,7 @@ public: * @param[out] output Output tensor. Data types supported: Same as @p input1. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. */ - void configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision * * @param[in] input1 First tensor input info. Data types supported: F16/F32. @@ -242,6 +633,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref CLArithmeticOperationKernel for max @@ -249,9 +647,21 @@ public: * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32. * @note The function performs a max operation between two tensors. */ -class CLElementwiseMax : public ICLSimpleFunction +class CLElementwiseMax : public IFunction { public: + /** Default Constructor */ + CLElementwiseMax(); + /** Default Destructor */ + ~CLElementwiseMax(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLElementwiseMax(const CLElementwiseMax &) = delete; + /** Default move constructor */ + CLElementwiseMax(CLElementwiseMax &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLElementwiseMax &operator=(const CLElementwiseMax &) = delete; + /** Default move assignment operator */ + CLElementwiseMax &operator=(CLElementwiseMax &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. @@ -283,6 +693,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref CLArithmeticOperationKernel for min @@ -290,9 +707,21 @@ public: * @note The tensor data type for the inputs must be U8/QASYMM8/S16/QSYMM16/S32/U32/F16/F32. * @note The function performs a max operation between two tensors. */ -class CLElementwiseMin : public ICLSimpleFunction +class CLElementwiseMin : public IFunction { public: + /** Default Constructor */ + CLElementwiseMin(); + /** Default Destructor */ + ~CLElementwiseMin(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLElementwiseMin(const CLElementwiseMin &) = delete; + /** Default move constructor */ + CLElementwiseMin(CLElementwiseMin &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLElementwiseMin &operator=(const CLElementwiseMin &) = delete; + /** Default move assignment operator */ + CLElementwiseMin &operator=(CLElementwiseMin &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/U32/F16/F32. @@ -324,6 +753,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref CLArithmeticOperationKernel for squared difference @@ -331,9 +767,21 @@ public: * @note The tensor data type for the inputs must be QASYMM8/U8/S16/QSYMM16/F16/F32. * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2 */ -class CLElementwiseSquaredDiff : public ICLSimpleFunction +class CLElementwiseSquaredDiff : public IFunction { public: + /** Default Constructor */ + CLElementwiseSquaredDiff(); + /** Default Destructor */ + ~CLElementwiseSquaredDiff(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLElementwiseSquaredDiff(const CLElementwiseSquaredDiff &) = delete; + /** Default move constructor */ + CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLElementwiseSquaredDiff &operator=(const CLElementwiseSquaredDiff &) = delete; + /** Default move assignment operator */ + CLElementwiseSquaredDiff &operator=(CLElementwiseSquaredDiff &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32. @@ -365,6 +813,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref CLArithmeticOperationKernel for power @@ -372,9 +827,21 @@ public: * @note The tensor data type for the inputs must be F16/F32. * @note The function performs an elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i]) */ -class CLElementwisePower : public ICLSimpleFunction +class CLElementwisePower : public IFunction { public: + /** Default Constructor */ + CLElementwisePower(); + /** Default Destructor */ + ~CLElementwisePower(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLElementwisePower(const CLElementwisePower &) = delete; + /** Default move constructor */ + CLElementwisePower(CLElementwisePower &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLElementwisePower &operator=(const CLElementwisePower &) = delete; + /** Default move assignment operator */ + CLElementwisePower &operator=(CLElementwisePower &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: F16/F32. @@ -406,6 +873,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; } // namespace arm_compute #endif /* ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H */ diff --git a/arm_compute/runtime/CL/functions/CLLSTMLayer.h b/arm_compute/runtime/CL/functions/CLLSTMLayer.h index e5733cd784..abfcc3a62f 100644 --- a/arm_compute/runtime/CL/functions/CLLSTMLayer.h +++ b/arm_compute/runtime/CL/functions/CLLSTMLayer.h @@ -27,7 +27,6 @@ #include "arm_compute/runtime/IFunction.h" #include "arm_compute/core/CL/kernels/CLCopyKernel.h" -#include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h" #include "arm_compute/core/CL/kernels/CLMemsetKernel.h" #include "arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h" #include "arm_compute/core/Types.h" @@ -201,90 +200,90 @@ public: void prepare() override; private: - MemoryGroup _memory_group; - CLFullyConnectedLayer _fully_connected_input_gate; - CLArithmeticAddition _accum_input_gate1; - CLSaturatedArithmeticOperationKernel _subtract_input_gate; - CLPixelWiseMultiplicationKernel _pixelwise_mul_input_gate; - CLActivationLayer _activation_input_gate; - CLFullyConnectedLayer _fully_connected_forget_gate; - CLArithmeticAddition _accum_forget_gate1; - CLPixelWiseMultiplicationKernel _pixelwise_mul_forget_gate; - CLActivationLayer _activation_forget_gate; - CLFullyConnectedLayer _fully_connected_cell_state; - CLGEMM _gemm_cell_state1; - CLTransposeKernel _transpose_cell_state; - CLSaturatedArithmeticOperationKernel _accum_cell_state1; - CLSaturatedArithmeticOperationKernel _accum_cell_state2; - CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_state1; - CLActivationLayer _activation_cell_state; - CLActivationLayer _cell_clip; - CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_state2; - CLFullyConnectedLayer _fully_connected_output; - CLPixelWiseMultiplicationKernel _pixelwise_mul_output_state1; - CLArithmeticAddition _accum_output1; - CLActivationLayer _activation_output; - CLActivationLayer _activation_output_state; - CLPixelWiseMultiplicationKernel _pixelwise_mul_output_state2; - CLFullyConnectedLayer _fully_connected_output_state; - CLActivationLayer _projection_clip; - CLCopyKernel _copy_cell_state; - CLCopyKernel _copy_output; - CLConcatenateLayer _concat_scratch_buffer; - CLConcatenateLayer _concat_inputs_forget_gate; - CLConcatenateLayer _concat_weights_forget_gate; - CLConcatenateLayer _concat_weights_input_gate; - CLConcatenateLayer _concat_weights_output; - CLMemsetKernel _ones_memset_kernel; - CLMeanStdDevNormalizationLayer _mean_std_norm_input_gate; - CLPixelWiseMultiplicationKernel _pixelwise_mul_input_gate_coeff; - CLSaturatedArithmeticOperationKernel _accum_input_gate_bias; - CLMeanStdDevNormalizationLayer _mean_std_norm_forget_gate; - CLPixelWiseMultiplicationKernel _pixelwise_mul_forget_gate_coeff; - CLSaturatedArithmeticOperationKernel _accum_forget_gate_bias; - CLMeanStdDevNormalizationLayer _mean_std_norm_cell_gate; - CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_gate_coeff; - CLSaturatedArithmeticOperationKernel _accum_cell_gate_bias; - CLMeanStdDevNormalizationLayer _mean_std_norm_output_gate; - CLPixelWiseMultiplicationKernel _pixelwise_mul_output_gate_coeff; - CLSaturatedArithmeticOperationKernel _accum_output_gate_bias; - CLTensor _input_gate_out1; - CLTensor _input_gate_out2; - CLTensor _input_gate_out3; - CLTensor _input_gate_out4; - CLTensor _forget_gate_out1; - CLTensor _forget_gate_out2; - CLTensor _forget_gate_out3; - CLTensor _forget_gate_out4; - CLTensor _forget_gate_out5; - CLTensor _forget_gate_out6; - CLTensor _cell_state_out1; - CLTensor _cell_state_out2; - CLTensor _cell_state_out3; - CLTensor _cell_state_out4; - CLTensor _cell_state_out5; - CLTensor _output1; - CLTensor _output2; - CLTensor _output3; - CLTensor _output4; - CLTensor _cell_state_activation; - CLTensor _output_state1; - CLTensor _ones; - CLTensor _input_layer_norm_out1; - CLTensor _input_layer_norm_out2; - CLTensor _forget_layer_norm_out1; - CLTensor _forget_layer_norm_out2; - CLTensor _cell_layer_norm_out1; - CLTensor _cell_layer_norm_out2; - CLTensor _output_layer_norm_out1; - CLTensor _output_layer_norm_out2; - bool _run_peephole_opt; - bool _run_cifg_opt; - bool _perform_cell_clipping; - bool _has_projection_weights; - bool _perform_projection_clipping; - bool _is_prepared; - bool _is_layer_norm_lstm; + MemoryGroup _memory_group; + CLFullyConnectedLayer _fully_connected_input_gate; + CLArithmeticAddition _accum_input_gate1; + CLArithmeticSubtraction _subtract_input_gate; + CLPixelWiseMultiplicationKernel _pixelwise_mul_input_gate; + CLActivationLayer _activation_input_gate; + CLFullyConnectedLayer _fully_connected_forget_gate; + CLArithmeticAddition _accum_forget_gate1; + CLPixelWiseMultiplicationKernel _pixelwise_mul_forget_gate; + CLActivationLayer _activation_forget_gate; + CLFullyConnectedLayer _fully_connected_cell_state; + CLGEMM _gemm_cell_state1; + CLTransposeKernel _transpose_cell_state; + CLArithmeticAddition _accum_cell_state1; + CLArithmeticAddition _accum_cell_state2; + CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_state1; + CLActivationLayer _activation_cell_state; + CLActivationLayer _cell_clip; + CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_state2; + CLFullyConnectedLayer _fully_connected_output; + CLPixelWiseMultiplicationKernel _pixelwise_mul_output_state1; + CLArithmeticAddition _accum_output1; + CLActivationLayer _activation_output; + CLActivationLayer _activation_output_state; + CLPixelWiseMultiplicationKernel _pixelwise_mul_output_state2; + CLFullyConnectedLayer _fully_connected_output_state; + CLActivationLayer _projection_clip; + CLCopyKernel _copy_cell_state; + CLCopyKernel _copy_output; + CLConcatenateLayer _concat_scratch_buffer; + CLConcatenateLayer _concat_inputs_forget_gate; + CLConcatenateLayer _concat_weights_forget_gate; + CLConcatenateLayer _concat_weights_input_gate; + CLConcatenateLayer _concat_weights_output; + CLMemsetKernel _ones_memset_kernel; + CLMeanStdDevNormalizationLayer _mean_std_norm_input_gate; + CLPixelWiseMultiplicationKernel _pixelwise_mul_input_gate_coeff; + CLArithmeticAddition _accum_input_gate_bias; + CLMeanStdDevNormalizationLayer _mean_std_norm_forget_gate; + CLPixelWiseMultiplicationKernel _pixelwise_mul_forget_gate_coeff; + CLArithmeticAddition _accum_forget_gate_bias; + CLMeanStdDevNormalizationLayer _mean_std_norm_cell_gate; + CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_gate_coeff; + CLArithmeticAddition _accum_cell_gate_bias; + CLMeanStdDevNormalizationLayer _mean_std_norm_output_gate; + CLPixelWiseMultiplicationKernel _pixelwise_mul_output_gate_coeff; + CLArithmeticAddition _accum_output_gate_bias; + CLTensor _input_gate_out1; + CLTensor _input_gate_out2; + CLTensor _input_gate_out3; + CLTensor _input_gate_out4; + CLTensor _forget_gate_out1; + CLTensor _forget_gate_out2; + CLTensor _forget_gate_out3; + CLTensor _forget_gate_out4; + CLTensor _forget_gate_out5; + CLTensor _forget_gate_out6; + CLTensor _cell_state_out1; + CLTensor _cell_state_out2; + CLTensor _cell_state_out3; + CLTensor _cell_state_out4; + CLTensor _cell_state_out5; + CLTensor _output1; + CLTensor _output2; + CLTensor _output3; + CLTensor _output4; + CLTensor _cell_state_activation; + CLTensor _output_state1; + CLTensor _ones; + CLTensor _input_layer_norm_out1; + CLTensor _input_layer_norm_out2; + CLTensor _forget_layer_norm_out1; + CLTensor _forget_layer_norm_out2; + CLTensor _cell_layer_norm_out1; + CLTensor _cell_layer_norm_out2; + CLTensor _output_layer_norm_out1; + CLTensor _output_layer_norm_out2; + bool _run_peephole_opt; + bool _run_cifg_opt; + bool _perform_cell_clipping; + bool _has_projection_weights; + bool _perform_projection_clipping; + bool _is_prepared; + bool _is_layer_norm_lstm; }; } // namespace arm_compute #endif /* ARM_COMPUTE_CLLSTMLAYER_H */ diff --git a/arm_compute/runtime/CL/functions/CLPReluLayer.h b/arm_compute/runtime/CL/functions/CLPReluLayer.h index eb3d3be3e3..08567cccfb 100644 --- a/arm_compute/runtime/CL/functions/CLPReluLayer.h +++ b/arm_compute/runtime/CL/functions/CLPReluLayer.h @@ -24,20 +24,72 @@ #ifndef ARM_COMPUTE_CLPRELULAYER_H #define ARM_COMPUTE_CLPRELULAYER_H -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/CL/ICLSimpleFunction.h" +#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h" +#include "arm_compute/runtime/CL/ICLOperator.h" +#include "arm_compute/runtime/IFunction.h" namespace arm_compute { class ICLTensor; +namespace experimental +{ /** Basic function to run @ref CLArithmeticOperationKernel for PRELU * * @note The function implements an activation layer with the PRELU activation function. */ -class CLPReluLayer : public ICLSimpleFunction +class CLPReluLayer : public ICLOperator { public: + /** Default Constructor */ + CLPReluLayer(); + /** Set the input and output tensor. + * + * @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place + * + * @param[in] compile_context The compile context to be used. + * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[in] alpha PRelu layer parameters. Data types supported: same of @p input. + * @param[out] output Destination tensor. Data type supported: same as @p input + */ + void configure(const CLCompileContext &compile_context, ITensorInfo *input, ITensorInfo *alpha, ITensorInfo *output); + /** Static function to check if given info will lead to a valid configuration of @ref CLPReluLayer + * + * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[in] alpha PRelu layer parameters. Data types supported: same of @p input. + * @param[in] output Destination tensor info. Data type supported: same as @p input + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output); + + // Inherited methods overridden: + void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override; + +private: + CLFillBorderKernel _border_handler; +}; +} // namespace experimental + +/** Basic function to run @ref CLArithmeticOperationKernel for PRELU + * + * @note The function implements an activation layer with the PRELU activation function. + */ +class CLPReluLayer : public IFunction +{ +public: + /** Default Constructor */ + CLPReluLayer(); + /** Default Destructor */ + ~CLPReluLayer(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLPReluLayer(const CLPReluLayer &) = delete; + /** Default move constructor */ + CLPReluLayer(CLPReluLayer &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLPReluLayer &operator=(const CLPReluLayer &) = delete; + /** Default move assignment operator */ + CLPReluLayer &operator=(CLPReluLayer &&); /** Set the input and output tensor. * * @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place @@ -66,6 +118,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; } // namespace arm_compute #endif /* ARM_COMPUTE_CLPRELULAYER_H */ diff --git a/arm_compute/runtime/CL/functions/CLQLSTMLayer.h b/arm_compute/runtime/CL/functions/CLQLSTMLayer.h index 97ae9878ea..0aea91ae8e 100644 --- a/arm_compute/runtime/CL/functions/CLQLSTMLayer.h +++ b/arm_compute/runtime/CL/functions/CLQLSTMLayer.h @@ -25,12 +25,12 @@ #define ARM_COMPUTE_CLQLSTMLAYER_H #include "arm_compute/core/CL/kernels/CLCopyKernel.h" -#include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMLowpReductionKernel.h" #include "arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h" #include "arm_compute/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h" #include "arm_compute/core/Types.h" #include "arm_compute/runtime/CL/functions/CLActivationLayer.h" +#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h" #include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h" #include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h" #include "arm_compute/runtime/CL/functions/CLTranspose.h" @@ -48,7 +48,7 @@ class ICLTensor; * * -# @ref CLActivationLayer Activation functions (tanh and logistic) * -# @ref CLCopyKernel Copy kernel for copying output_state_out to output - * -# @ref CLSaturatedArithmeticOperationKernel Elementwise addition and subtraction + * -# @ref CLArithmeticAddition Elementwise addition and subtraction * -# @ref CLGEMMLowpMatrixMultiplyCore Quantized matrix multiplication core. Accumulators are 32-bit integers * -# @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint Convert 32-bit integers into QSYMM16 * -# @ref CLGEMMLowpMatrixAReductionKernel For precomputing effective biases to use @@ -285,70 +285,70 @@ private: }; // Functions used - CLTranspose _transpose_input_to_forget_weights{}; - CLTranspose _transpose_input_to_cell_weights{}; - CLTranspose _transpose_input_to_output_weights{}; - CLTranspose _transpose_input_to_input_weights{}; - CLTranspose _transpose_recurrent_to_forget_weights{}; - CLTranspose _transpose_recurrent_to_cell_weights{}; - CLTranspose _transpose_recurrent_to_output_weights{}; - CLTranspose _transpose_recurrent_to_input_weights{}; - CLTranspose _transpose_projection_weights{}; - CLGEMMLowpMatrixAReductionKernel _input_to_input_reduction{}; - CLGEMMLowpMatrixAReductionKernel _recurrent_to_input_reduction{}; - CLGEMMLowpMatrixAReductionKernel _input_to_forget_reduction{}; - CLGEMMLowpMatrixAReductionKernel _recurrent_to_forget_reduction{}; - CLGEMMLowpMatrixAReductionKernel _input_to_cell_reduction{}; - CLGEMMLowpMatrixAReductionKernel _recurrent_to_cell_reduction{}; - CLGEMMLowpMatrixAReductionKernel _input_to_output_reduction{}; - CLGEMMLowpMatrixAReductionKernel _recurrent_to_output_reduction{}; - CLGEMMLowpMatrixAReductionKernel _projection_reduction{}; - CLSaturatedArithmeticOperationKernel _projection_bias_add{}; - CLGEMMLowpMatrixMultiplyCore _mm_input_to_forget{}; - CLGEMMLowpMatrixMultiplyCore _mm_recurrent_to_forget{}; - CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_to_forget{}; - CLGEMMLowpOutputStage _input_to_forget_outstage{}; - CLGEMMLowpOutputStage _recurrent_to_forget_outstage{}; - CLGEMMLowpOutputStage _cell_to_forget_outstage{}; - CLSaturatedArithmeticOperationKernel _accumulate_input_recurrent_forget{}; - CLSaturatedArithmeticOperationKernel _accumulate_cell_forget{}; - CLActivationLayer _forget_gate_sigmoid{}; - CLGEMMLowpMatrixMultiplyCore _mm_input_to_cell{}; - CLGEMMLowpOutputStage _input_to_cell_outstage{}; - CLGEMMLowpMatrixMultiplyCore _mm_recurrent_to_cell{}; - CLGEMMLowpOutputStage _recurrent_to_cell_outstage{}; - CLSaturatedArithmeticOperationKernel _accumulate_input_recurrent_modulation{}; - CLActivationLayer _cell_gate_tanh{}; - CLSaturatedArithmeticOperationKernel _input_gate_sub{}; - CLGEMMLowpMatrixMultiplyCore _mm_input_to_input{}; - CLGEMMLowpOutputStage _input_to_input_outstage{}; - CLGEMMLowpMatrixMultiplyCore _mm_recurrent_to_input{}; - CLGEMMLowpOutputStage _recurrent_to_input_outstage{}; - CLSaturatedArithmeticOperationKernel _accumulate_input_recurrent_input{}; - CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_to_input{}; - CLGEMMLowpOutputStage _cell_to_input_outstage{}; - CLSaturatedArithmeticOperationKernel _accumulate_cell_input{}; - CLActivationLayer _input_gate_sigmoid{}; - CLPixelWiseMultiplicationKernel _pixelwise_mul_forget_cell{}; - CLPixelWiseMultiplicationKernel _pixelwise_mul_input_cell{}; - CLSaturatedArithmeticOperationKernel _add_forget_cell{}; - CLActivationLayer _cell_clip{}; - CLGEMMLowpMatrixMultiplyCore _mm_input_to_output{}; - CLGEMMLowpOutputStage _input_to_output_outstage{}; - CLGEMMLowpMatrixMultiplyCore _mm_recurrent_to_output{}; - CLGEMMLowpOutputStage _recurrent_to_output_outstage{}; - CLSaturatedArithmeticOperationKernel _accumulate_input_recurrent_output{}; - CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_to_output{}; - CLGEMMLowpOutputStage _cell_to_output_outstage{}; - CLSaturatedArithmeticOperationKernel _accumulate_cell_to_output{}; - CLActivationLayer _output_gate_sigmoid{}; - CLActivationLayer _hidden_tanh{}; - CLPixelWiseMultiplicationKernel _pixelwise_mul_hidden{}; - CLGEMMLowpOutputStage _hidden_outstage{}; - CLGEMMLowpMatrixMultiplyCore _mm_projection{}; - CLGEMMLowpOutputStage _projection_outstage{}; - CLSaturatedArithmeticOperationKernel _accumulate_projection{}; - CLActivationLayer _projection_clip{}; + CLTranspose _transpose_input_to_forget_weights{}; + CLTranspose _transpose_input_to_cell_weights{}; + CLTranspose _transpose_input_to_output_weights{}; + CLTranspose _transpose_input_to_input_weights{}; + CLTranspose _transpose_recurrent_to_forget_weights{}; + CLTranspose _transpose_recurrent_to_cell_weights{}; + CLTranspose _transpose_recurrent_to_output_weights{}; + CLTranspose _transpose_recurrent_to_input_weights{}; + CLTranspose _transpose_projection_weights{}; + CLGEMMLowpMatrixAReductionKernel _input_to_input_reduction{}; + CLGEMMLowpMatrixAReductionKernel _recurrent_to_input_reduction{}; + CLGEMMLowpMatrixAReductionKernel _input_to_forget_reduction{}; + CLGEMMLowpMatrixAReductionKernel _recurrent_to_forget_reduction{}; + CLGEMMLowpMatrixAReductionKernel _input_to_cell_reduction{}; + CLGEMMLowpMatrixAReductionKernel _recurrent_to_cell_reduction{}; + CLGEMMLowpMatrixAReductionKernel _input_to_output_reduction{}; + CLGEMMLowpMatrixAReductionKernel _recurrent_to_output_reduction{}; + CLGEMMLowpMatrixAReductionKernel _projection_reduction{}; + CLArithmeticAddition _projection_bias_add{}; + CLGEMMLowpMatrixMultiplyCore _mm_input_to_forget{}; + CLGEMMLowpMatrixMultiplyCore _mm_recurrent_to_forget{}; + CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_to_forget{}; + CLGEMMLowpOutputStage _input_to_forget_outstage{}; + CLGEMMLowpOutputStage _recurrent_to_forget_outstage{}; + CLGEMMLowpOutputStage _cell_to_forget_outstage{}; + CLArithmeticAddition _accumulate_input_recurrent_forget{}; + CLArithmeticAddition _accumulate_cell_forget{}; + CLActivationLayer _forget_gate_sigmoid{}; + CLGEMMLowpMatrixMultiplyCore _mm_input_to_cell{}; + CLGEMMLowpOutputStage _input_to_cell_outstage{}; + CLGEMMLowpMatrixMultiplyCore _mm_recurrent_to_cell{}; + CLGEMMLowpOutputStage _recurrent_to_cell_outstage{}; + CLArithmeticAddition _accumulate_input_recurrent_modulation{}; + CLActivationLayer _cell_gate_tanh{}; + CLArithmeticSubtraction _input_gate_sub{}; + CLGEMMLowpMatrixMultiplyCore _mm_input_to_input{}; + CLGEMMLowpOutputStage _input_to_input_outstage{}; + CLGEMMLowpMatrixMultiplyCore _mm_recurrent_to_input{}; + CLGEMMLowpOutputStage _recurrent_to_input_outstage{}; + CLArithmeticAddition _accumulate_input_recurrent_input{}; + CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_to_input{}; + CLGEMMLowpOutputStage _cell_to_input_outstage{}; + CLArithmeticAddition _accumulate_cell_input{}; + CLActivationLayer _input_gate_sigmoid{}; + CLPixelWiseMultiplicationKernel _pixelwise_mul_forget_cell{}; + CLPixelWiseMultiplicationKernel _pixelwise_mul_input_cell{}; + CLArithmeticAddition _add_forget_cell{}; + CLActivationLayer _cell_clip{}; + CLGEMMLowpMatrixMultiplyCore _mm_input_to_output{}; + CLGEMMLowpOutputStage _input_to_output_outstage{}; + CLGEMMLowpMatrixMultiplyCore _mm_recurrent_to_output{}; + CLGEMMLowpOutputStage _recurrent_to_output_outstage{}; + CLArithmeticAddition _accumulate_input_recurrent_output{}; + CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_to_output{}; + CLGEMMLowpOutputStage _cell_to_output_outstage{}; + CLArithmeticAddition _accumulate_cell_to_output{}; + CLActivationLayer _output_gate_sigmoid{}; + CLActivationLayer _hidden_tanh{}; + CLPixelWiseMultiplicationKernel _pixelwise_mul_hidden{}; + CLGEMMLowpOutputStage _hidden_outstage{}; + CLGEMMLowpMatrixMultiplyCore _mm_projection{}; + CLGEMMLowpOutputStage _projection_outstage{}; + CLArithmeticAddition _accumulate_projection{}; + CLActivationLayer _projection_clip{}; std::array _layer_norms{ {} }; CLCopyKernel _copy_output{}; @@ -358,7 +358,10 @@ private: TensorCopyKernel _hidden_to_output_copy{}; // Tensor pointers - const ICLTensor *_input_to_input_weights{ nullptr }; + const ICLTensor *_input_to_input_weights + { + nullptr + }; const ICLTensor *_recurrent_to_input_weights{ nullptr }; const ICLTensor *_projection_bias{ nullptr }; const ICLTensor *_input_to_forget_weights{ nullptr }; diff --git a/arm_compute/runtime/CL/functions/CLRNNLayer.h b/arm_compute/runtime/CL/functions/CLRNNLayer.h index 81f7810edd..9d1cb1a724 100644 --- a/arm_compute/runtime/CL/functions/CLRNNLayer.h +++ b/arm_compute/runtime/CL/functions/CLRNNLayer.h @@ -25,9 +25,9 @@ #define ARM_COMPUTE_CLRNN_LAYER_H #include "arm_compute/core/CL/kernels/CLCopyKernel.h" -#include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h" #include "arm_compute/runtime/CL/ICLSimpleFunction.h" #include "arm_compute/runtime/CL/functions/CLActivationLayer.h" +#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h" #include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h" #include "arm_compute/runtime/CL/functions/CLGEMM.h" @@ -85,16 +85,16 @@ public: void prepare() override; private: - MemoryGroup _memory_group; - CLGEMM _gemm_state_f; - CLSaturatedArithmeticOperationKernel _add_kernel; - CLActivationLayer _activation; - CLFullyConnectedLayer _fully_connected_kernel; - CLCopyKernel _copy_kernel; - CLTensor _fully_connected_out; - CLTensor _gemm_output; - CLTensor _add_output; - bool _is_prepared; + MemoryGroup _memory_group; + CLGEMM _gemm_state_f; + CLArithmeticAddition _add_kernel; + CLActivationLayer _activation; + CLFullyConnectedLayer _fully_connected_kernel; + CLCopyKernel _copy_kernel; + CLTensor _fully_connected_out; + CLTensor _gemm_output; + CLTensor _add_output; + bool _is_prepared; }; } #endif /* ARM_COMPUTE_CLRNN_LAYER_H */ diff --git a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp index ecac1e0c86..7cc6fb38b1 100644 --- a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp +++ b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp @@ -26,6 +26,7 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLValidate.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/utils/misc/Cast.h" #include "support/StringSupport.h" #include @@ -241,15 +242,15 @@ CLElementwiseOperationKernel::CLElementwiseOperationKernel() { } -void CLElementwiseOperationKernel::configure_common(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output) +void CLElementwiseOperationKernel::configure_common(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output) { configure_common(CLKernelLibrary::get().get_compile_context(), input1, input2, output); } -void CLElementwiseOperationKernel::configure_common(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output) +void CLElementwiseOperationKernel::configure_common(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output) { // Configure kernel window - auto win_config = validate_and_configure_window(*input1->info(), *input2->info(), *output->info()); + auto win_config = validate_and_configure_window(*input1, *input2, *output); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); _input1 = input1; @@ -257,13 +258,13 @@ void CLElementwiseOperationKernel::configure_common(const CLCompileContext &comp _output = output; std::string kernel_name = "elementwise_operation_" + name(); - if(is_data_type_quantized(input1->info()->data_type())) + if(is_data_type_quantized(input1->data_type())) { kernel_name += "_quantized"; } // Set kernel build options - CLBuildOptions build_opts = generate_build_options(*input1->info(), *input2->info(), *output->info()); + CLBuildOptions build_opts = generate_build_options(*input1, *input2, *output); if(_act_info.enabled()) { build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(_act_info.activation()))); @@ -276,17 +277,21 @@ void CLElementwiseOperationKernel::configure_common(const CLCompileContext &comp ICLKernel::configure_internal(win_config.second); - _config_id = generate_id_for_tuning(kernel_name, *input1->info(), *output->info()); + _config_id = generate_id_for_tuning(kernel_name, *input1, *output); } -void CLElementwiseOperationKernel::run(const Window &window, cl::CommandQueue &queue) +void CLElementwiseOperationKernel::run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, const Window &window, cl::CommandQueue &queue) { ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - const TensorShape &in_shape1 = _input1->info()->tensor_shape(); - const TensorShape &in_shape2 = _input2->info()->tensor_shape(); - const TensorShape &out_shape = _output->info()->tensor_shape(); + const auto src_0 = utils::cast::polymorphic_downcast(inputs.at(TensorType::ACL_SRC_0)); + const auto src_1 = utils::cast::polymorphic_downcast(inputs.at(TensorType::ACL_SRC_1)); + auto dst = utils::cast::polymorphic_downcast(outputs.at(TensorType::ACL_DST)); + + const TensorShape &in_shape1 = src_0->info()->tensor_shape(); + const TensorShape &in_shape2 = src_1->info()->tensor_shape(); + const TensorShape &out_shape = dst->info()->tensor_shape(); bool can_collapse = true; const bool is_vector = in_shape1.num_dimensions() == 1 || in_shape2.num_dimensions() == 1; @@ -313,9 +318,9 @@ void CLElementwiseOperationKernel::run(const Window &window, cl::CommandQueue &q { unsigned int idx = 0; - add_3D_tensor_argument(idx, _input1, slice_input1); - add_3D_tensor_argument(idx, _input2, slice_input2); - add_3D_tensor_argument(idx, _output, slice); + add_3D_tensor_argument(idx, src_0, slice_input1); + add_3D_tensor_argument(idx, src_1, slice_input2); + add_3D_tensor_argument(idx, dst, slice); enqueue(queue, *this, slice, lws_hint()); @@ -327,25 +332,25 @@ void CLElementwiseOperationKernel::run(const Window &window, cl::CommandQueue &q BorderSize CLElementwiseOperationKernel::border_size() const { - const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0)); + const unsigned int replicateSize = _output->dimension(0) - std::min(_input1->dimension(0), _input2->dimension(0)); const unsigned int border = std::min(num_elems_processed_per_iteration - 1U, replicateSize); return BorderSize{ 0, border, 0, 0 }; } /** Arithmetic operations with saturation*/ -void CLSaturatedArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy, +void CLSaturatedArithmeticOperationKernel::configure(ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ConvertPolicy &policy, const ActivationLayerInfo &act_info) { configure(CLKernelLibrary::get().get_compile_context(), op, input1, input2, output, policy, act_info); } -void CLSaturatedArithmeticOperationKernel::configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, +void CLSaturatedArithmeticOperationKernel::configure(const CLCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ConvertPolicy &policy, const ActivationLayerInfo &act_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output); - ARM_COMPUTE_ERROR_THROW_ON(CLSaturatedArithmeticOperationKernel::validate(op, input1->info(), input2->info(), output->info(), policy, act_info)); + ARM_COMPUTE_ERROR_THROW_ON(CLSaturatedArithmeticOperationKernel::validate(op, input1, input2, output, policy, act_info)); _policy = policy; _op = op; @@ -392,16 +397,16 @@ std::string CLSaturatedArithmeticOperationKernel::name() /** Arithmetic operations*/ -void CLArithmeticOperationKernel::configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +void CLArithmeticOperationKernel::configure(ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info) { configure(CLKernelLibrary::get().get_compile_context(), op, input1, input2, output, act_info); } -void CLArithmeticOperationKernel::configure(const CLCompileContext &compile_context, ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, +void CLArithmeticOperationKernel::configure(const CLCompileContext &compile_context, ArithmeticOperation op, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output); - ARM_COMPUTE_ERROR_THROW_ON(CLArithmeticOperationKernel::validate(op, input1->info(), input2->info(), output->info(), act_info)); + ARM_COMPUTE_ERROR_THROW_ON(CLArithmeticOperationKernel::validate(op, input1, input2, output, act_info)); _op = op; _act_info = act_info; diff --git a/src/core/CL/kernels/CLFillBorderKernel.cpp b/src/core/CL/kernels/CLFillBorderKernel.cpp index 67dac3280e..1fca646129 100644 --- a/src/core/CL/kernels/CLFillBorderKernel.cpp +++ b/src/core/CL/kernels/CLFillBorderKernel.cpp @@ -33,6 +33,7 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "arm_compute/core/utils/misc/Cast.h" #include "support/StringSupport.h" namespace arm_compute @@ -61,11 +62,17 @@ void CLFillBorderKernel::configure(ICLTensor *tensor, BorderSize border_size, Bo } void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ICLTensor *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value) +{ + _tensor = tensor; + configure(compile_context, tensor->info(), border_size, border_mode, constant_border_value); +} + +void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ITensorInfo *tensor, BorderSize border_size, BorderMode border_mode, const PixelValue &constant_border_value) { ARM_COMPUTE_ERROR_ON(tensor == nullptr); - ARM_COMPUTE_ERROR_ON(tensor->info()->num_channels() != 1); + ARM_COMPUTE_ERROR_ON(tensor->num_channels() != 1); - border_size.limit(tensor->info()->padding()); + border_size.limit(tensor->padding()); // If there is no border: early exit if(border_size.empty() || border_mode == BorderMode::UNDEFINED) @@ -76,7 +83,7 @@ void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ICLT // Select appropriate kernel std::string kernel_name = "fill_image_borders_" + lower_string(string_from_border_mode(border_mode)); - const DataType dt = tensor->info()->data_type(); + const DataType dt = tensor->data_type(); // Define build options CLBuildOptions build_opts; @@ -88,16 +95,15 @@ void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ICLT // Create kernel _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); - _tensor = tensor; // Create static kernel arguments - const unsigned int valid_width = tensor->info()->valid_region().shape[0]; - const unsigned int valid_height = tensor->info()->valid_region().shape[1]; + const unsigned int valid_width = tensor->valid_region().shape[0]; + const unsigned int valid_height = tensor->valid_region().shape[1]; const cl_int2 valid_region_coords = { { - static_cast(tensor->info()->valid_region().anchor[0]), - static_cast(tensor->info()->valid_region().anchor[1]), + static_cast(tensor->valid_region().anchor[0]), + static_cast(tensor->valid_region().anchor[1]), } }; const unsigned int total_valid_width = border_size.left + valid_width + border_size.right; @@ -149,7 +155,7 @@ void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ICLT Window win; win.set(Window::DimX, Window::Dimension(0, total_valid_width + valid_height)); win.set(Window::DimY, Window::Dimension(0, 1, 1)); - win.use_tensor_dimensions(tensor->info()->tensor_shape(), Window::DimZ); + win.use_tensor_dimensions(tensor->tensor_shape(), Window::DimZ); ICLKernel::configure_internal(win); // Set config_id for enabling LWS tuning @@ -157,13 +163,40 @@ void CLFillBorderKernel::configure(const CLCompileContext &compile_context, ICLT _config_id += "_"; _config_id += lower_string(string_from_data_type(dt)); _config_id += "_"; - _config_id += support::cpp11::to_string(tensor->info()->dimension(0)); + _config_id += support::cpp11::to_string(tensor->dimension(0)); _config_id += "_"; - _config_id += support::cpp11::to_string(tensor->info()->dimension(1)); + _config_id += support::cpp11::to_string(tensor->dimension(1)); _config_id += "_"; _config_id += lower_string(string_from_border_mode(border_mode)); } +void CLFillBorderKernel::run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_UNUSED(outputs); + + // Border mode undefined or border width == 0 + if(_kernel() == nullptr) + { + return; + } + + const auto tensor = utils::cast::polymorphic_downcast(inputs.at(TensorType::ACL_SRC)); + + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window); + + Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); + Window slice = collapsed.first_slice_window_3D(); + + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, tensor, slice); + enqueue(queue, *this, slice, lws_hint()); + } + while(collapsed.slide_window_slice_3D(slice)); +} + void CLFillBorderKernel::run(const Window &window, cl::CommandQueue &queue) { // Border mode undefined or border width == 0 diff --git a/src/runtime/CL/CLOperator.cpp b/src/runtime/CL/CLOperator.cpp index 11ee30eae9..c41454e933 100644 --- a/src/runtime/CL/CLOperator.cpp +++ b/src/runtime/CL/CLOperator.cpp @@ -37,7 +37,7 @@ void ICLOperator::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTe { ARM_COMPUTE_UNUSED(workspace); - if(inputs.empty() || outputs.empty()) + if(inputs.empty()) { ARM_COMPUTE_ERROR("No inputs provided"); } diff --git a/src/runtime/CL/functions/CLElementwiseOperations.cpp b/src/runtime/CL/functions/CLElementwiseOperations.cpp index 670e605b4d..e66e4bf526 100644 --- a/src/runtime/CL/functions/CLElementwiseOperations.cpp +++ b/src/runtime/CL/functions/CLElementwiseOperations.cpp @@ -25,6 +25,7 @@ #include "arm_compute/core/CL/ICLTensor.h" #include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h" +#include "arm_compute/runtime/CL/CLScheduler.h" #include "support/MemorySupport.h" #include @@ -33,26 +34,43 @@ namespace arm_compute { namespace { -void configure_border_handler(const CLCompileContext &compile_context, CLFillBorderKernel &border_handler, BorderSize border_size, ICLTensor *input1, ICLTensor *input2, const ICLTensor *output) +void configure_border_handler(const CLCompileContext &compile_context, CLFillBorderKernel &border_handler, BorderSize border_size, ITensorInfo *input1, ITensorInfo *input2, const ITensorInfo *output) { - if(output->info()->dimension(0) > 1) + if(output->dimension(0) > 1) { - ICLTensor *broadcasted_info = (input1->info()->dimension(0) == 1) ? input1 : input2; + ITensorInfo *broadcasted_info = (input1->dimension(0) == 1) ? input1 : input2; - if(broadcasted_info->info()->dimension(0) == 1) + if(broadcasted_info->dimension(0) == 1) { border_handler.configure(compile_context, broadcasted_info, border_size, BorderMode::REPLICATE); } } } + +void select_border_input(InputTensorMap &tensor_map, InputTensorMap &inputs, OutputTensorMap &outputs) +{ + if(outputs.at(TensorType::ACL_DST)->info()->dimension(0) > 1) + { + if(inputs.at(TensorType::ACL_SRC_1)->info()->dimension(0) == 1) + { + tensor_map[TensorType::ACL_SRC] = inputs.at(TensorType::ACL_SRC_1); + } + else + { + tensor_map[TensorType::ACL_SRC] = inputs.at(TensorType::ACL_SRC_0); + } + } +} } // namespace -void CLArithmeticAddition::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) +namespace experimental +{ +CLArithmeticAddition::CLArithmeticAddition() + : _border_handler() { - configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, policy, act_info); } -void CLArithmeticAddition::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) +void CLArithmeticAddition::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) { auto k = arm_compute::support::cpp14::make_unique(); k->configure(compile_context, ArithmeticOperation::ADD, input1, input2, output, policy, act_info); @@ -65,12 +83,20 @@ Status CLArithmeticAddition::validate(const ITensorInfo *input1, const ITensorIn return CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, input1, input2, output, policy, act_info); } -void CLArithmeticSubtraction::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) +void CLArithmeticAddition::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) { - configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, policy, act_info); + InputTensorMap src; + select_border_input(src, inputs, outputs); + CLScheduler::get().enqueue_op(_border_handler, src, {}); + ICLOperator::run(inputs, outputs, workspace); } -void CLArithmeticSubtraction::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) +CLArithmeticSubtraction::CLArithmeticSubtraction() + : _border_handler() +{ +} +void CLArithmeticSubtraction::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy, + const ActivationLayerInfo &act_info) { auto k = arm_compute::support::cpp14::make_unique(); k->configure(compile_context, ArithmeticOperation::SUB, input1, input2, output, policy, act_info); @@ -84,12 +110,20 @@ Status CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITenso return CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::SUB, input1, input2, output, policy, act_info); } -void CLArithmeticDivision::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +void CLArithmeticSubtraction::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) +{ + InputTensorMap src; + select_border_input(src, inputs, outputs); + CLScheduler::get().enqueue_op(_border_handler, src, {}); + ICLOperator::run(inputs, outputs, workspace); +} + +CLArithmeticDivision::CLArithmeticDivision() + : _border_handler() { - configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); } -void CLArithmeticDivision::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +void CLArithmeticDivision::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info) { auto k = arm_compute::support::cpp14::make_unique(); k->configure(compile_context, ArithmeticOperation::DIV, input1, input2, output, act_info); @@ -102,12 +136,20 @@ Status CLArithmeticDivision::validate(const ITensorInfo *input1, const ITensorIn return CLArithmeticOperationKernel::validate(ArithmeticOperation::DIV, input1, input2, output, act_info); } -void CLElementwiseMax::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +void CLArithmeticDivision::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) { - configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); + InputTensorMap src; + select_border_input(src, inputs, outputs); + CLScheduler::get().enqueue_op(_border_handler, src, {}); + ICLOperator::run(inputs, outputs, workspace); } -void CLElementwiseMax::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +CLElementwiseMax::CLElementwiseMax() + : _border_handler() +{ +} + +void CLElementwiseMax::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info) { auto k = arm_compute::support::cpp14::make_unique(); k->configure(compile_context, ArithmeticOperation::MAX, input1, input2, output, act_info); @@ -120,12 +162,20 @@ Status CLElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo * return CLArithmeticOperationKernel::validate(ArithmeticOperation::MAX, input1, input2, output, act_info); } -void CLElementwiseMin::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +void CLElementwiseMax::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) { - configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); + InputTensorMap src; + select_border_input(src, inputs, outputs); + CLScheduler::get().enqueue_op(_border_handler, src, {}); + ICLOperator::run(inputs, outputs, workspace); } -void CLElementwiseMin::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +CLElementwiseMin::CLElementwiseMin() + : _border_handler() +{ +} + +void CLElementwiseMin::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info) { auto k = arm_compute::support::cpp14::make_unique(); k->configure(compile_context, ArithmeticOperation::MIN, input1, input2, output, act_info); @@ -138,12 +188,20 @@ Status CLElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo * return CLArithmeticOperationKernel::validate(ArithmeticOperation::MIN, input1, input2, output, act_info); } -void CLElementwiseSquaredDiff::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +void CLElementwiseMin::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) { - configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); + InputTensorMap src; + select_border_input(src, inputs, outputs); + CLScheduler::get().enqueue_op(_border_handler, src, {}); + ICLOperator::run(inputs, outputs, workspace); } -void CLElementwiseSquaredDiff::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +CLElementwiseSquaredDiff::CLElementwiseSquaredDiff() + : _border_handler() +{ +} + +void CLElementwiseSquaredDiff::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info) { auto k = arm_compute::support::cpp14::make_unique(); k->configure(compile_context, ArithmeticOperation::SQUARED_DIFF, input1, input2, output, act_info); @@ -156,12 +214,20 @@ Status CLElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITens return CLArithmeticOperationKernel::validate(ArithmeticOperation::SQUARED_DIFF, input1, input2, output, act_info); } -void CLElementwisePower::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +void CLElementwiseSquaredDiff::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) { - configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); + InputTensorMap src; + select_border_input(src, inputs, outputs); + CLScheduler::get().enqueue_op(_border_handler, src, {}); + ICLOperator::run(inputs, outputs, workspace); } -void CLElementwisePower::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +CLElementwisePower::CLElementwisePower() + : _border_handler() +{ +} + +void CLElementwisePower::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info) { auto k = arm_compute::support::cpp14::make_unique(); k->configure(compile_context, ArithmeticOperation::POWER, input1, input2, output, act_info); @@ -174,4 +240,316 @@ Status CLElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo return CLArithmeticOperationKernel::validate(ArithmeticOperation::POWER, input1, input2, output, act_info); } +void CLElementwisePower::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) +{ + InputTensorMap src; + select_border_input(src, inputs, outputs); + CLScheduler::get().enqueue_op(_border_handler, src, {}); + ICLOperator::run(inputs, outputs, workspace); +} +} // namespace experimental + +struct CLArithmeticAddition::Impl +{ + const ICLTensor *src_0{ nullptr }; + const ICLTensor *src_1{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; +}; + +CLArithmeticAddition::CLArithmeticAddition() + : _impl(support::cpp14::make_unique()) +{ +} +CLArithmeticAddition::CLArithmeticAddition(CLArithmeticAddition &&) = default; +CLArithmeticAddition &CLArithmeticAddition::operator=(CLArithmeticAddition &&) = default; +CLArithmeticAddition::~CLArithmeticAddition() = default; + +void CLArithmeticAddition::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) +{ + configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, policy, act_info); +} + +void CLArithmeticAddition::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, + const ActivationLayerInfo &act_info) +{ + _impl->src_0 = input1; + _impl->src_1 = input2; + _impl->dst = output; + _impl->op = arm_compute::support::cpp14::make_unique(); + _impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), policy, act_info); +} + +Status CLArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) +{ + return experimental::CLArithmeticAddition::validate(input1, input2, output, policy, act_info); +} + +void CLArithmeticAddition::run() +{ + const InputTensorMap src{ { TensorType::ACL_SRC_0, _impl->src_0 }, { TensorType::ACL_SRC_1, _impl->src_1 } }; + const OutputTensorMap dst{ { TensorType::ACL_DST, _impl->dst } }; + + _impl->op->run(src, dst, {}); +} + +struct CLArithmeticSubtraction::Impl +{ + const ICLTensor *src_0{ nullptr }; + const ICLTensor *src_1{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; +}; + +CLArithmeticSubtraction::CLArithmeticSubtraction() + : _impl(support::cpp14::make_unique()) +{ +} +CLArithmeticSubtraction::CLArithmeticSubtraction(CLArithmeticSubtraction &&) = default; +CLArithmeticSubtraction &CLArithmeticSubtraction::operator=(CLArithmeticSubtraction &&) = default; +CLArithmeticSubtraction::~CLArithmeticSubtraction() = default; + +void CLArithmeticSubtraction::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) +{ + configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, policy, act_info); +} + +void CLArithmeticSubtraction::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy, + const ActivationLayerInfo &act_info) +{ + _impl->src_0 = input1; + _impl->src_1 = input2; + _impl->dst = output; + _impl->op = arm_compute::support::cpp14::make_unique(); + _impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), policy, act_info); +} + +Status CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info) +{ + return experimental::CLArithmeticSubtraction::validate(input1, input2, output, policy, act_info); +} + +void CLArithmeticSubtraction::run() +{ + const InputTensorMap src{ { TensorType::ACL_SRC_0, _impl->src_0 }, { TensorType::ACL_SRC_1, _impl->src_1 } }; + const OutputTensorMap dst{ { TensorType::ACL_DST, _impl->dst } }; + + _impl->op->run(src, dst, {}); +} + +struct CLArithmeticDivision::Impl +{ + const ICLTensor *src_0{ nullptr }; + const ICLTensor *src_1{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; +}; + +CLArithmeticDivision::CLArithmeticDivision() + : _impl(support::cpp14::make_unique()) +{ +} +CLArithmeticDivision::CLArithmeticDivision(CLArithmeticDivision &&) = default; +CLArithmeticDivision &CLArithmeticDivision::operator=(CLArithmeticDivision &&) = default; +CLArithmeticDivision::~CLArithmeticDivision() = default; + +void CLArithmeticDivision::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); +} + +void CLArithmeticDivision::configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + _impl->src_0 = input1; + _impl->src_1 = input2; + _impl->dst = output; + _impl->op = arm_compute::support::cpp14::make_unique(); + _impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info); +} + +Status CLArithmeticDivision::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info) +{ + return experimental::CLArithmeticDivision::validate(input1, input2, output, act_info); +} + +void CLArithmeticDivision::run() +{ + const InputTensorMap src{ { TensorType::ACL_SRC_0, _impl->src_0 }, { TensorType::ACL_SRC_1, _impl->src_1 } }; + const OutputTensorMap dst{ { TensorType::ACL_DST, _impl->dst } }; + + _impl->op->run(src, dst, {}); +} + +struct CLElementwiseMax::Impl +{ + const ICLTensor *src_0{ nullptr }; + const ICLTensor *src_1{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; +}; + +CLElementwiseMax::CLElementwiseMax() + : _impl(support::cpp14::make_unique()) +{ +} +CLElementwiseMax::CLElementwiseMax(CLElementwiseMax &&) = default; +CLElementwiseMax &CLElementwiseMax::operator=(CLElementwiseMax &&) = default; +CLElementwiseMax::~CLElementwiseMax() = default; + +void CLElementwiseMax::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); +} + +void CLElementwiseMax::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + _impl->src_0 = input1; + _impl->src_1 = input2; + _impl->dst = output; + _impl->op = arm_compute::support::cpp14::make_unique(); + _impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info); +} + +Status CLElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info) +{ + return experimental::CLElementwiseMax::validate(input1, input2, output, act_info); +} + +void CLElementwiseMax::run() +{ + const InputTensorMap src{ { TensorType::ACL_SRC_0, _impl->src_0 }, { TensorType::ACL_SRC_1, _impl->src_1 } }; + const OutputTensorMap dst{ { TensorType::ACL_DST, _impl->dst } }; + + _impl->op->run(src, dst, {}); +} + +struct CLElementwiseMin::Impl +{ + const ICLTensor *src_0{ nullptr }; + const ICLTensor *src_1{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; +}; + +CLElementwiseMin::CLElementwiseMin() + : _impl(support::cpp14::make_unique()) +{ +} +CLElementwiseMin::CLElementwiseMin(CLElementwiseMin &&) = default; +CLElementwiseMin &CLElementwiseMin::operator=(CLElementwiseMin &&) = default; +CLElementwiseMin::~CLElementwiseMin() = default; + +void CLElementwiseMin::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); +} + +void CLElementwiseMin::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + _impl->src_0 = input1; + _impl->src_1 = input2; + _impl->dst = output; + _impl->op = arm_compute::support::cpp14::make_unique(); + _impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info); +} + +Status CLElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info) +{ + return experimental::CLElementwiseMin::validate(input1, input2, output, act_info); +} + +void CLElementwiseMin::run() +{ + const InputTensorMap src{ { TensorType::ACL_SRC_0, _impl->src_0 }, { TensorType::ACL_SRC_1, _impl->src_1 } }; + const OutputTensorMap dst{ { TensorType::ACL_DST, _impl->dst } }; + + _impl->op->run(src, dst, {}); +} + +struct CLElementwiseSquaredDiff::Impl +{ + const ICLTensor *src_0{ nullptr }; + const ICLTensor *src_1{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; + std::unique_ptr _border_handler{ nullptr }; +}; + +CLElementwiseSquaredDiff::CLElementwiseSquaredDiff() + : _impl(support::cpp14::make_unique()) +{ +} +CLElementwiseSquaredDiff::CLElementwiseSquaredDiff(CLElementwiseSquaredDiff &&) = default; +CLElementwiseSquaredDiff &CLElementwiseSquaredDiff::operator=(CLElementwiseSquaredDiff &&) = default; +CLElementwiseSquaredDiff::~CLElementwiseSquaredDiff() = default; + +void CLElementwiseSquaredDiff::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); +} + +void CLElementwiseSquaredDiff::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + _impl->src_0 = input1; + _impl->src_1 = input2; + _impl->dst = output; + _impl->op = arm_compute::support::cpp14::make_unique(); + _impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info); +} + +Status CLElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info) +{ + return experimental::CLElementwiseSquaredDiff::validate(input1, input2, output, act_info); +} + +void CLElementwiseSquaredDiff::run() +{ + const InputTensorMap src{ { TensorType::ACL_SRC_0, _impl->src_0 }, { TensorType::ACL_SRC_1, _impl->src_1 } }; + const OutputTensorMap dst{ { TensorType::ACL_DST, _impl->dst } }; + + _impl->op->run(src, dst, {}); +} + +struct CLElementwisePower::Impl +{ + const ICLTensor *src_0{ nullptr }; + const ICLTensor *src_1{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; +}; + +CLElementwisePower::CLElementwisePower() + : _impl(support::cpp14::make_unique()) +{ +} +CLElementwisePower::CLElementwisePower(CLElementwisePower &&) = default; +CLElementwisePower &CLElementwisePower::operator=(CLElementwisePower &&) = default; +CLElementwisePower::~CLElementwisePower() = default; + +void CLElementwisePower::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + configure(CLKernelLibrary::get().get_compile_context(), input1, input2, output, act_info); +} + +void CLElementwisePower::configure(const CLCompileContext &compile_context, ICLTensor *input1, ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info) +{ + _impl->src_0 = input1; + _impl->src_1 = input2; + _impl->dst = output; + _impl->op = arm_compute::support::cpp14::make_unique(); + _impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), act_info); +} + +Status CLElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info) +{ + return experimental::CLElementwisePower::validate(input1, input2, output, act_info); +} + +void CLElementwisePower::run() +{ + const InputTensorMap src{ { TensorType::ACL_SRC_0, _impl->src_0 }, { TensorType::ACL_SRC_1, _impl->src_1 } }; + const OutputTensorMap dst{ { TensorType::ACL_DST, _impl->dst } }; + + _impl->op->run(src, dst, {}); +} } // namespace arm_compute diff --git a/src/runtime/CL/functions/CLLSTMLayer.cpp b/src/runtime/CL/functions/CLLSTMLayer.cpp index 1b46baaf5e..a1c412401b 100644 --- a/src/runtime/CL/functions/CLLSTMLayer.cpp +++ b/src/runtime/CL/functions/CLLSTMLayer.cpp @@ -155,7 +155,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe RoundingPolicy::TO_NEAREST_EVEN); // forget_gate_out is going to be reassigned, so allocate the tensor that it was assigned to before forget_gate_out->allocator()->allocate(); - _accum_forget_gate_bias.configure(compile_context, ArithmeticOperation::ADD, &_forget_layer_norm_out1, forget_gate_bias, &_forget_layer_norm_out2, ConvertPolicy::SATURATE); + _accum_forget_gate_bias.configure(compile_context, &_forget_layer_norm_out1, forget_gate_bias, &_forget_layer_norm_out2, ConvertPolicy::SATURATE); _forget_layer_norm_out1.allocator()->allocate(); forget_gate_out = &_forget_layer_norm_out2; } @@ -173,7 +173,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe _memory_group.manage(&_input_gate_out1); _ones.allocator()->init(TensorInfo(cell_state_shape, 1, input->info()->data_type())); _ones_memset_kernel.configure(compile_context, &_ones, PixelValue(1, _ones.info()->data_type())); - _subtract_input_gate.configure(compile_context, ArithmeticOperation::SUB, &_ones, forget_gate_out, &_input_gate_out1, ConvertPolicy::SATURATE); + _subtract_input_gate.configure(compile_context, &_ones, forget_gate_out, &_input_gate_out1, ConvertPolicy::SATURATE); _ones.allocator()->allocate(); _run_cifg_opt = true; } @@ -222,7 +222,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe RoundingPolicy::TO_NEAREST_EVEN); // input_gate_out is going to be reassigned, so allocate the tensor that it was assigned to before input_gate_out->allocator()->allocate(); - _accum_input_gate_bias.configure(compile_context, ArithmeticOperation::ADD, &_input_layer_norm_out1, lstm_params.input_gate_bias(), &_input_layer_norm_out2, ConvertPolicy::SATURATE); + _accum_input_gate_bias.configure(compile_context, &_input_layer_norm_out1, lstm_params.input_gate_bias(), &_input_layer_norm_out2, ConvertPolicy::SATURATE); _input_layer_norm_out1.allocator()->allocate(); input_gate_out = &_input_layer_norm_out2; } @@ -246,7 +246,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe _gemm_cell_state1.configure(compile_context, output_state_in, &_cell_state_out2, nullptr, &_cell_state_out3, 1.f, 0.f); _cell_state_out2.allocator()->allocate(); _memory_group.manage(&_cell_state_out4); - _accum_cell_state1.configure(compile_context, ArithmeticOperation::ADD, &_cell_state_out1, &_cell_state_out3, &_cell_state_out4, ConvertPolicy::SATURATE); + _accum_cell_state1.configure(compile_context, &_cell_state_out1, &_cell_state_out3, &_cell_state_out4, ConvertPolicy::SATURATE); CLTensor *cell_state_out_ptr = &_cell_state_out4; if(_is_layer_norm_lstm) { @@ -259,7 +259,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe RoundingPolicy::TO_NEAREST_EVEN); // cell_state_out_ptr is going to be reassigned, so allocate the tensor that it was assigned to before cell_state_out_ptr->allocator()->allocate(); - _accum_cell_gate_bias.configure(compile_context, ArithmeticOperation::ADD, &_cell_layer_norm_out1, cell_bias, &_cell_layer_norm_out2, ConvertPolicy::SATURATE); + _accum_cell_gate_bias.configure(compile_context, &_cell_layer_norm_out1, cell_bias, &_cell_layer_norm_out2, ConvertPolicy::SATURATE); _cell_layer_norm_out1.allocator()->allocate(); cell_state_out_ptr = &_cell_layer_norm_out2; } @@ -268,7 +268,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe _pixelwise_mul_cell_state1.configure(compile_context, cell_state_out_ptr, input_gate_out, &_cell_state_out5, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN); cell_state_out_ptr->allocator()->allocate(); _pixelwise_mul_cell_state2.configure(compile_context, forget_gate_out, cell_state_in, &_cell_state_out3, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN); - _accum_cell_state2.configure(compile_context, ArithmeticOperation::ADD, &_cell_state_out5, &_cell_state_out3, &_cell_state_out1, ConvertPolicy::SATURATE); + _accum_cell_state2.configure(compile_context, &_cell_state_out5, &_cell_state_out3, &_cell_state_out1, ConvertPolicy::SATURATE); _cell_state_out3.allocator()->allocate(); _cell_state_out5.allocator()->allocate(); // Perform clipping @@ -329,7 +329,7 @@ void CLLSTMLayer::configure(const CLCompileContext &compile_context, const ICLTe RoundingPolicy::TO_NEAREST_EVEN); // output_gate_out is going to be reassigned, so allocate the tensor that it was assigned to before output_gate_out->allocator()->allocate(); - _accum_output_gate_bias.configure(compile_context, ArithmeticOperation::ADD, &_output_layer_norm_out1, output_gate_bias, &_output_layer_norm_out2, ConvertPolicy::SATURATE); + _accum_output_gate_bias.configure(compile_context, &_output_layer_norm_out1, output_gate_bias, &_output_layer_norm_out2, ConvertPolicy::SATURATE); _output_layer_norm_out1.allocator()->allocate(); output_gate_out = &_output_layer_norm_out2; } @@ -538,7 +538,7 @@ Status CLLSTMLayer::validate(const ITensorInfo *input, } else { - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::SUB, &forget_gate, &forget_gate, &forget_gate, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticSubtraction::validate(&forget_gate, &forget_gate, &forget_gate, ConvertPolicy::SATURATE)); } // Validate cell state @@ -636,14 +636,14 @@ void CLLSTMLayer::run() { _mean_std_norm_forget_gate.run(); CLScheduler::get().enqueue(_pixelwise_mul_forget_gate_coeff); - CLScheduler::get().enqueue(_accum_forget_gate_bias); + _accum_forget_gate_bias.run(); } _activation_forget_gate.run(); if(_run_cifg_opt) { CLScheduler::get().enqueue(_ones_memset_kernel); - CLScheduler::get().enqueue(_subtract_input_gate); + _subtract_input_gate.run(); } else { @@ -659,7 +659,7 @@ void CLLSTMLayer::run() { _mean_std_norm_input_gate.run(); CLScheduler::get().enqueue(_pixelwise_mul_input_gate_coeff); - CLScheduler::get().enqueue(_accum_input_gate_bias); + _accum_input_gate_bias.run(); } _activation_input_gate.run(); } @@ -667,17 +667,17 @@ void CLLSTMLayer::run() _fully_connected_cell_state.run(); CLScheduler::get().enqueue(_transpose_cell_state); _gemm_cell_state1.run(); - CLScheduler::get().enqueue(_accum_cell_state1); + _accum_cell_state1.run(); if(_is_layer_norm_lstm) { _mean_std_norm_cell_gate.run(); CLScheduler::get().enqueue(_pixelwise_mul_cell_gate_coeff); - CLScheduler::get().enqueue(_accum_cell_gate_bias); + _accum_cell_gate_bias.run(); } _activation_cell_state.run(); CLScheduler::get().enqueue(_pixelwise_mul_cell_state1); CLScheduler::get().enqueue(_pixelwise_mul_cell_state2); - CLScheduler::get().enqueue(_accum_cell_state2); + _accum_cell_state2.run(); if(_perform_cell_clipping) { @@ -695,7 +695,7 @@ void CLLSTMLayer::run() { _mean_std_norm_output_gate.run(); CLScheduler::get().enqueue(_pixelwise_mul_output_gate_coeff); - CLScheduler::get().enqueue(_accum_output_gate_bias); + _accum_output_gate_bias.run(); } _activation_output.run(); diff --git a/src/runtime/CL/functions/CLPReluLayer.cpp b/src/runtime/CL/functions/CLPReluLayer.cpp index b1b97381c8..fbb466acc8 100644 --- a/src/runtime/CL/functions/CLPReluLayer.cpp +++ b/src/runtime/CL/functions/CLPReluLayer.cpp @@ -24,6 +24,7 @@ #include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h" #include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/runtime/CL/CLScheduler.h" #include "arm_compute/runtime/CL/functions/CLPReluLayer.h" #include "support/MemorySupport.h" @@ -31,26 +32,42 @@ namespace arm_compute { namespace { -void configure_border_handler(const CLCompileContext &compile_context, CLFillBorderKernel &border_handler, BorderSize border_size, ICLTensor *input1, ICLTensor *input2, const ICLTensor *output) +void configure_border_handler(const CLCompileContext &compile_context, CLFillBorderKernel &border_handler, BorderSize border_size, ITensorInfo *input1, ITensorInfo *input2, const ITensorInfo *output) { - if(output->info()->dimension(0) > 1) + if(output->dimension(0) > 1) { - ICLTensor *broadcasted_info = (input1->info()->dimension(0) == 1) ? input1 : input2; + ITensorInfo *broadcasted_info = (input1->dimension(0) == 1) ? input1 : input2; - if(broadcasted_info->info()->dimension(0) == 1) + if(broadcasted_info->dimension(0) == 1) { border_handler.configure(compile_context, broadcasted_info, border_size, BorderMode::REPLICATE); } } } +void select_border_input(InputTensorMap &tensor_map, InputTensorMap &inputs, OutputTensorMap &outputs) +{ + if(outputs.at(TensorType::ACL_DST)->info()->dimension(0) > 1) + { + if(inputs.at(TensorType::ACL_SRC_1)->info()->dimension(0) == 1) + { + tensor_map[TensorType::ACL_SRC] = inputs.at(TensorType::ACL_SRC_1); + } + else + { + tensor_map[TensorType::ACL_SRC] = inputs.at(TensorType::ACL_SRC_0); + } + } +} } // namespace -void CLPReluLayer::configure(ICLTensor *input, ICLTensor *alpha, ICLTensor *output) +namespace experimental +{ +CLPReluLayer::CLPReluLayer() + : _border_handler() { - configure(CLKernelLibrary::get().get_compile_context(), input, alpha, output); } -void CLPReluLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *alpha, ICLTensor *output) +void CLPReluLayer::configure(const CLCompileContext &compile_context, ITensorInfo *input, ITensorInfo *alpha, ITensorInfo *output) { auto k = arm_compute::support::cpp14::make_unique(); k->configure(compile_context, ArithmeticOperation::PRELU, input, alpha, output); @@ -62,4 +79,56 @@ Status CLPReluLayer::validate(const ITensorInfo *input, const ITensorInfo *alpha { return CLArithmeticOperationKernel::validate(ArithmeticOperation::PRELU, input, alpha, output); } + +void CLPReluLayer::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) +{ + InputTensorMap src; + select_border_input(src, inputs, outputs); + CLScheduler::get().enqueue_op(_border_handler, src, {}); + ICLOperator::run(inputs, outputs, workspace); +} +} // namespace experimental + +struct CLPReluLayer::Impl +{ + const ICLTensor *src_0{ nullptr }; + const ICLTensor *src_1{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; +}; + +CLPReluLayer::CLPReluLayer() + : _impl(support::cpp14::make_unique()) +{ +} +CLPReluLayer::CLPReluLayer(CLPReluLayer &&) = default; +CLPReluLayer &CLPReluLayer::operator=(CLPReluLayer &&) = default; +CLPReluLayer::~CLPReluLayer() = default; + +void CLPReluLayer::configure(ICLTensor *input, ICLTensor *alpha, ICLTensor *output) +{ + configure(CLKernelLibrary::get().get_compile_context(), input, alpha, output); +} + +void CLPReluLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *alpha, ICLTensor *output) +{ + _impl->src_0 = input; + _impl->src_1 = alpha; + _impl->dst = output; + _impl->op = arm_compute::support::cpp14::make_unique(); + _impl->op->configure(compile_context, input->info(), alpha->info(), output->info()); +} + +Status CLPReluLayer::validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output) +{ + return experimental::CLPReluLayer::validate(input, alpha, output); +} + +void CLPReluLayer::run() +{ + const InputTensorMap src{ { TensorType::ACL_SRC_0, _impl->src_0 }, { TensorType::ACL_SRC_1, _impl->src_1 } }; + const OutputTensorMap dst{ { TensorType::ACL_DST, _impl->dst } }; + + _impl->op->run(src, dst, {}); +} } // namespace arm_compute diff --git a/src/runtime/CL/functions/CLQLSTMLayer.cpp b/src/runtime/CL/functions/CLQLSTMLayer.cpp index 8c45a98935..c5c4aa3dfa 100644 --- a/src/runtime/CL/functions/CLQLSTMLayer.cpp +++ b/src/runtime/CL/functions/CLQLSTMLayer.cpp @@ -213,7 +213,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT _projection_reduction.configure(compile_context, _projection_weights, &_projection_eff_bias, GEMMLowpReductionKernelInfo(output_size, false, lstm_params.hidden_state_zero(), true)); if(_projection_bias != nullptr) { - _projection_bias_add.configure(compile_context, ArithmeticOperation::ADD, _projection_bias, &_projection_eff_bias, &_projection_eff_bias, ConvertPolicy::SATURATE); + _projection_bias_add.configure(compile_context, _projection_bias, &_projection_eff_bias, &_projection_eff_bias, ConvertPolicy::SATURATE); } } @@ -255,7 +255,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT &_mm_recurrent_to_forget_res, &_recurrent_to_forget_outstage_res, recurrent_to_forget_scale, mm_out_info, forget_gate_outstage_info); - _accumulate_input_recurrent_forget.configure(compile_context, ArithmeticOperation::ADD, &_input_to_forget_outstage_res, &_recurrent_to_forget_outstage_res, &_recurrent_to_forget_outstage_res, + _accumulate_input_recurrent_forget.configure(compile_context, &_input_to_forget_outstage_res, &_recurrent_to_forget_outstage_res, &_recurrent_to_forget_outstage_res, ConvertPolicy::SATURATE); _input_to_forget_outstage_res.allocator()->allocate(); @@ -270,7 +270,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT quantization::calculate_quantized_multiplier(cell_to_forget_scale, &gemmlowp_info.gemmlowp_multiplier, &gemmlowp_info.gemmlowp_shift); _cell_to_forget_outstage.configure(compile_context, &_mul_cell_to_forget_res, nullptr, &_cell_to_forget_outstage_res, gemmlowp_info); _mul_cell_to_forget_res.allocator()->allocate(); - _accumulate_cell_forget.configure(compile_context, ArithmeticOperation::ADD, &_recurrent_to_forget_outstage_res, &_cell_to_forget_outstage_res, &_recurrent_to_forget_outstage_res, + _accumulate_cell_forget.configure(compile_context, &_recurrent_to_forget_outstage_res, &_cell_to_forget_outstage_res, &_recurrent_to_forget_outstage_res, ConvertPolicy::SATURATE); _cell_to_forget_outstage_res.allocator()->allocate(); } @@ -307,7 +307,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT &_mm_recurrent_to_cell_res, &_recurrent_to_cell_outstage_res, recurrent_to_cell_scale, mm_out_info, cell_outstage_info); - _accumulate_input_recurrent_modulation.configure(compile_context, ArithmeticOperation::ADD, &_input_to_cell_outstage_res, &_recurrent_to_cell_outstage_res, &_recurrent_to_cell_outstage_res, + _accumulate_input_recurrent_modulation.configure(compile_context, &_input_to_cell_outstage_res, &_recurrent_to_cell_outstage_res, &_recurrent_to_cell_outstage_res, ConvertPolicy::SATURATE); _input_to_cell_outstage_res.allocator()->allocate(); @@ -333,7 +333,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT if(_has_cifg) { _ones.allocator()->init(*_forget_gate.info()); - _input_gate_sub.configure(compile_context, ArithmeticOperation::SUB, &_ones, &_forget_gate, &_input_gate, ConvertPolicy::SATURATE); + _input_gate_sub.configure(compile_context, &_ones, &_forget_gate, &_input_gate, ConvertPolicy::SATURATE); _ones.allocator()->allocate(); } else @@ -350,7 +350,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT output_state_in, &_recurrent_to_input_weights_transposed, &_recurrent_to_input_eff_bias, &_mm_recurrent_to_input_res, &_recurrent_to_input_outstage_res, recurrent_to_input_scale, mm_out_info, input_outstage_info); - _accumulate_input_recurrent_input.configure(compile_context, ArithmeticOperation::ADD, &_input_to_input_outstage_res, &_recurrent_to_input_outstage_res, &_recurrent_to_input_outstage_res, + _accumulate_input_recurrent_input.configure(compile_context, &_input_to_input_outstage_res, &_recurrent_to_input_outstage_res, &_recurrent_to_input_outstage_res, ConvertPolicy::SATURATE); _input_to_input_outstage_res.allocator()->allocate(); @@ -365,7 +365,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT _memory_group.manage(&_cell_to_input_outstage_res); _cell_to_input_outstage.configure(compile_context, &_mul_cell_to_input_res, nullptr, &_cell_to_input_outstage_res, gemmlowp_info); _mul_cell_to_input_res.allocator()->allocate(); - _accumulate_cell_input.configure(ArithmeticOperation::ADD, &_recurrent_to_input_outstage_res, &_cell_to_input_outstage_res, &_recurrent_to_input_outstage_res, ConvertPolicy::SATURATE); + _accumulate_cell_input.configure(&_recurrent_to_input_outstage_res, &_cell_to_input_outstage_res, &_recurrent_to_input_outstage_res, ConvertPolicy::SATURATE); _cell_to_input_outstage_res.allocator()->allocate(); } @@ -391,7 +391,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT _mul_input_cell_res.allocator()->init(mul_input_cell_info); _pixelwise_mul_input_cell.configure(compile_context, &_input_gate, &_cell_gate, &_mul_input_cell_res, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO); _cell_gate.allocator()->allocate(); - _add_forget_cell.configure(compile_context, ArithmeticOperation::ADD, &_forget_gate, &_mul_input_cell_res, cell_state_out, ConvertPolicy::SATURATE); + _add_forget_cell.configure(compile_context, &_forget_gate, &_mul_input_cell_res, cell_state_out, ConvertPolicy::SATURATE); _mul_input_cell_res.allocator()->allocate(); _forget_gate.allocator()->allocate(); if(_has_cell_clipping) @@ -412,7 +412,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT &_mm_recurrent_to_output_res, &_recurrent_to_output_outstage_res, recurrent_to_output_scale, mm_out_info, output_outstage_info); - _accumulate_input_recurrent_output.configure(compile_context, ArithmeticOperation::ADD, &_recurrent_to_output_outstage_res, &_input_to_output_outstage_res, &_recurrent_to_output_outstage_res, + _accumulate_input_recurrent_output.configure(compile_context, &_recurrent_to_output_outstage_res, &_input_to_output_outstage_res, &_recurrent_to_output_outstage_res, ConvertPolicy::SATURATE); _input_to_output_outstage_res.allocator()->allocate(); @@ -431,7 +431,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT _cell_to_output_outstage.configure(compile_context, &_mul_cell_to_output_res, nullptr, &_cell_to_output_outstage_res, gemmlowp_info); _mul_cell_to_output_res.allocator()->allocate(); - _accumulate_cell_to_output.configure(compile_context, ArithmeticOperation::ADD, &_recurrent_to_output_outstage_res, &_cell_to_output_outstage_res, &_recurrent_to_output_outstage_res, + _accumulate_cell_to_output.configure(compile_context, &_recurrent_to_output_outstage_res, &_cell_to_output_outstage_res, &_recurrent_to_output_outstage_res, ConvertPolicy::SATURATE); _cell_to_output_outstage_res.allocator()->allocate(); } @@ -510,7 +510,7 @@ void CLQLSTMLayer::configure(const CLCompileContext &compile_context, const ICLT accumulate_destination = &_projection_accumulate_res; } - _accumulate_projection.configure(compile_context, ArithmeticOperation::ADD, &_projection_outstage_res, accumulate_destination, accumulate_destination, ConvertPolicy::SATURATE); + _accumulate_projection.configure(compile_context, &_projection_outstage_res, accumulate_destination, accumulate_destination, ConvertPolicy::SATURATE); _projection_outstage_res.allocator()->allocate(); if(_projection_tensor_copy_required) @@ -647,8 +647,8 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, if(lstm_params.projection_bias() != nullptr) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lstm_params.projection_bias(), 1, DataType::S32); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, lstm_params.projection_bias(), &projection_eff_bias_info, - &projection_eff_bias_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(lstm_params.projection_bias(), &projection_eff_bias_info, + &projection_eff_bias_info, ConvertPolicy::SATURATE)); } } @@ -691,7 +691,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, const float recurrent_to_forget_scale = recurrent_to_forget_weights->quantization_info().uniform().scale * qoutput_state_in.scale / lstm_params.forget_intermediate_scale(); ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemmlowp_info, output_state_in, &recurrent_weights_transposed, &eff_bias_info, recurrent_to_forget_scale, &mm_out_info, &forget_outstage_info)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, &forget_outstage_info, &forget_outstage_info, &forget_outstage_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(&forget_outstage_info, &forget_outstage_info, &forget_outstage_info, ConvertPolicy::SATURATE)); if(lstm_params.has_peephole_opt()) { @@ -701,7 +701,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, const float cell_to_forget_scale = std::pow(2, cell_shift) * lstm_params.cell_to_forget_weights()->quantization_info().uniform().scale / lstm_params.forget_intermediate_scale(); ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(cell_to_forget_scale, &gemmlowp_info.gemmlowp_multiplier, &gemmlowp_info.gemmlowp_shift)); ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOutputStage::validate(&mm_out_info, nullptr, &forget_outstage_info, gemmlowp_info)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, &forget_outstage_info, &forget_outstage_info, &forget_outstage_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(&forget_outstage_info, &forget_outstage_info, &forget_outstage_info, ConvertPolicy::SATURATE)); } if(has_layer_norm) @@ -726,7 +726,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, const float recurrent_to_cell_scale = recurrent_to_cell_weights->quantization_info().uniform().scale * qoutput_state_in.scale / lstm_params.cell_intermediate_scale(); ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemmlowp_info, output_state_in, &input_weights_transposed, &eff_bias_info, recurrent_to_cell_scale, &mm_out_info, &cell_outstage_info)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, &cell_outstage_info, &cell_outstage_info, &cell_outstage_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(&cell_outstage_info, &cell_outstage_info, &cell_outstage_info, ConvertPolicy::SATURATE)); if(has_layer_norm) { @@ -743,7 +743,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, if(lstm_params.has_cifg_opt()) { ARM_COMPUTE_RETURN_ERROR_ON_MSG(lstm_params.input_gate_bias() != nullptr, "Input gate bias must not be present when CIFG is used"); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::SUB, &input_gate_info, &forget_gate_info, &forget_gate_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticSubtraction::validate(&input_gate_info, &forget_gate_info, &forget_gate_info, ConvertPolicy::SATURATE)); } else { @@ -762,7 +762,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, const float recurrent_to_input_scale = lstm_params.recurrent_to_input_weights()->quantization_info().uniform().scale * qoutput_state_in.scale / lstm_params.input_intermediate_scale(); ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemmlowp_info, output_state_in, &recurrent_weights_transposed, &eff_bias_info, recurrent_to_input_scale, &mm_out_info, &input_outstage_info)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, &input_outstage_info, &input_outstage_info, &input_outstage_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(&input_outstage_info, &input_outstage_info, &input_outstage_info, ConvertPolicy::SATURATE)); if(lstm_params.has_peephole_opt()) { @@ -771,7 +771,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, const float cell_to_input_scale = std::pow(2, cell_shift) * lstm_params.cell_to_input_weights()->quantization_info().uniform().scale / lstm_params.input_intermediate_scale(); ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(cell_to_input_scale, &gemmlowp_info.gemmlowp_multiplier, &gemmlowp_info.gemmlowp_shift)); ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOutputStage::validate(&mm_out_info, &eff_bias_info, &input_outstage_info, gemmlowp_info)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, &input_outstage_info, &input_outstage_info, &input_outstage_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(&input_outstage_info, &input_outstage_info, &input_outstage_info, ConvertPolicy::SATURATE)); } if(has_layer_norm) @@ -786,7 +786,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, // Cell. ARM_COMPUTE_RETURN_ON_ERROR(CLPixelWiseMultiplicationKernel::validate(&forget_gate_info, cell_state_in, &forget_gate_info, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO)); ARM_COMPUTE_RETURN_ON_ERROR(CLPixelWiseMultiplicationKernel::validate(&input_gate_info, cell_state_in, &cell_gate_info, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, &forget_gate_info, &cell_gate_info, cell_state_out, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(&forget_gate_info, &cell_gate_info, cell_state_out, ConvertPolicy::SATURATE)); if(quantized_cell_clip > 0) { ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(cell_state_out, nullptr, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, -quantized_cell_clip, @@ -801,7 +801,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, const float recurrent_to_output_scale = recurrent_to_output_weights->quantization_info().uniform().scale * qoutput_state_in.scale / lstm_params.output_intermediate_scale(); ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(gemmlowp_info, output_state_in, &recurrent_weights_transposed, &eff_bias_info, recurrent_to_output_scale, &mm_out_info, &output_outstage_info)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, &output_outstage_info, &output_outstage_info, &output_outstage_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(&output_outstage_info, &output_outstage_info, &output_outstage_info, ConvertPolicy::SATURATE)); if(lstm_params.has_peephole_opt()) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lstm_params.cell_to_output_weights(), 1, DataType::QSYMM16); @@ -811,7 +811,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, // ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(cell_to_output_scale, &gemmlowp_info.gemmlowp_multiplier, &gemmlowp_info.gemmlowp_shift)); ARM_COMPUTE_RETURN_ON_ERROR(CLPixelWiseMultiplicationKernel::validate(cell_state_out, lstm_params.cell_to_output_weights(), &output_outstage_info, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, &output_outstage_info, &output_outstage_info, &output_outstage_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(&output_outstage_info, &output_outstage_info, &output_outstage_info, ConvertPolicy::SATURATE)); } if(has_layer_norm) @@ -866,7 +866,7 @@ Status CLQLSTMLayer::validate(const ITensorInfo *input, ARM_COMPUTE_RETURN_ON_ERROR(CLQLSTMLayer::TensorCopyKernel::validate(*output_state_out, projection_outstage_info)); } - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, output_state_out, output_state_out, output_state_out, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(output_state_out, output_state_out, output_state_out, ConvertPolicy::SATURATE)); if(projection_tensor_copy_required) { @@ -922,13 +922,13 @@ void CLQLSTMLayer::run() _mm_recurrent_to_forget.run(); _recurrent_to_forget_outstage.run(); - CLScheduler::get().enqueue(_accumulate_input_recurrent_forget); + _accumulate_input_recurrent_forget.run(); if(_has_peephole) { CLScheduler::get().enqueue(_pixelwise_mul_cell_to_forget); _cell_to_forget_outstage.run(); - CLScheduler::get().enqueue(_accumulate_cell_forget); + _accumulate_cell_forget.run(); } if(_has_layer_norm) @@ -944,7 +944,7 @@ void CLQLSTMLayer::run() _mm_recurrent_to_cell.run(); _recurrent_to_cell_outstage.run(); - CLScheduler::get().enqueue(_accumulate_input_recurrent_modulation); + _accumulate_input_recurrent_modulation.run(); if(_has_layer_norm) { @@ -956,7 +956,7 @@ void CLQLSTMLayer::run() // Input gate if(_has_cifg) { - CLScheduler::get().enqueue(_input_gate_sub); + _input_gate_sub.run(); } else { @@ -964,13 +964,13 @@ void CLQLSTMLayer::run() _input_to_input_outstage.run(); _mm_recurrent_to_input.run(); _recurrent_to_input_outstage.run(); - CLScheduler::get().enqueue(_accumulate_input_recurrent_input); + _accumulate_input_recurrent_input.run(); if(_has_peephole) { CLScheduler::get().enqueue(_pixelwise_mul_cell_to_input); _cell_to_input_outstage.run(); - CLScheduler::get().enqueue(_accumulate_cell_input); + _accumulate_cell_input.run(); } if(_has_layer_norm) @@ -984,7 +984,7 @@ void CLQLSTMLayer::run() // Cell. CLScheduler::get().enqueue(_pixelwise_mul_forget_cell); CLScheduler::get().enqueue(_pixelwise_mul_input_cell); - CLScheduler::get().enqueue(_add_forget_cell); + _add_forget_cell.run(); if(_has_cell_clipping) { _cell_clip.run(); @@ -995,12 +995,12 @@ void CLQLSTMLayer::run() _input_to_output_outstage.run(); _mm_recurrent_to_output.run(); _recurrent_to_output_outstage.run(); - CLScheduler::get().enqueue(_accumulate_input_recurrent_output); + _accumulate_input_recurrent_output.run(); if(_has_peephole) { CLScheduler::get().enqueue(_pixelwise_mul_cell_to_output); _cell_to_output_outstage.run(); - CLScheduler::get().enqueue(_accumulate_cell_to_output); + _accumulate_cell_to_output.run(); } if(_has_layer_norm) @@ -1026,7 +1026,7 @@ void CLQLSTMLayer::run() _projection_output_to_accumulate_copy.run(); } - CLScheduler::get().enqueue(_accumulate_projection); + _accumulate_projection.run(); if(_projection_tensor_copy_required) { @@ -1108,7 +1108,7 @@ void CLQLSTMLayer::prepare() CLScheduler::get().enqueue(_projection_reduction); if(_projection_bias != nullptr) { - CLScheduler::get().enqueue(_projection_bias_add); + _projection_bias_add.run(); _projection_bias->mark_as_unused(); } diff --git a/src/runtime/CL/functions/CLRNNLayer.cpp b/src/runtime/CL/functions/CLRNNLayer.cpp index 666e7b0786..94e7f9440c 100644 --- a/src/runtime/CL/functions/CLRNNLayer.cpp +++ b/src/runtime/CL/functions/CLRNNLayer.cpp @@ -62,7 +62,7 @@ Status CLRNNLayer::validate(const ITensorInfo *input, const ITensorInfo *weights ARM_COMPUTE_RETURN_ON_ERROR(CLFullyConnectedLayer::validate(input, weights, bias, &shape_info)); ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(hidden_state, recurrent_weights, nullptr, &shape_info, 1.f, 0.f)); - ARM_COMPUTE_RETURN_ON_ERROR(CLSaturatedArithmeticOperationKernel::validate(ArithmeticOperation::ADD, &shape_info, &shape_info, &shape_info, ConvertPolicy::SATURATE)); + ARM_COMPUTE_RETURN_ON_ERROR(CLArithmeticAddition::validate(&shape_info, &shape_info, &shape_info, ConvertPolicy::SATURATE)); ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(&shape_info, &shape_info, info)); return Status{}; @@ -99,7 +99,7 @@ void CLRNNLayer::configure(const CLCompileContext &compile_context, const ICLTen _add_output.allocator()->init(TensorInfo(shape, 1, input->info()->data_type())); _memory_group.manage(&_add_output); - _add_kernel.configure(compile_context, ArithmeticOperation::ADD, &_fully_connected_out, &_gemm_output, &_add_output, ConvertPolicy::SATURATE); + _add_kernel.configure(compile_context, &_fully_connected_out, &_gemm_output, &_add_output, ConvertPolicy::SATURATE); _fully_connected_out.allocator()->allocate(); _gemm_output.allocator()->allocate(); @@ -118,7 +118,7 @@ void CLRNNLayer::run() _fully_connected_kernel.run(); _gemm_state_f.run(); - CLScheduler::get().enqueue(_add_kernel); + _add_kernel.run(); _activation.run(); // copy hidden out to output -- cgit v1.2.1