From ce0c67559cf03965acc8f212263a9f53205a0a3f Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Thu, 18 Jun 2020 10:14:57 +0100 Subject: COMPMID-3377: Async support to NEElementwiseUnaryLayerKernel kernels/functions Signed-off-by: Michalis Spyrou Change-Id: I208287b44ece051e95f891d43a691cb0ac6e56c5 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3419 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- arm_compute/core/CPP/ICPPKernel.h | 2 +- .../core/NEON/kernels/NEActivationLayerKernel.h | 2 +- .../NEON/kernels/NEElementwiseOperationKernel.h | 47 +-- .../core/NEON/kernels/NEReshapeLayerKernel.h | 2 +- arm_compute/core/experimental/Types.h | 26 +- arm_compute/runtime/CPP/CPPScheduler.h | 4 +- arm_compute/runtime/IOperator.h | 4 +- arm_compute/runtime/IScheduler.h | 2 +- arm_compute/runtime/NEON/INEOperator.h | 4 +- .../NEON/functions/NEElementwiseOperations.h | 383 ++++++++++++++++++++- arm_compute/runtime/NEON/functions/NEPReluLayer.h | 57 ++- arm_compute/runtime/OMP/OMPScheduler.h | 2 +- arm_compute/runtime/SingleThreadScheduler.h | 2 +- 13 files changed, 468 insertions(+), 69 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/CPP/ICPPKernel.h b/arm_compute/core/CPP/ICPPKernel.h index 3ec54756a0..45c7b52af4 100644 --- a/arm_compute/core/CPP/ICPPKernel.h +++ b/arm_compute/core/CPP/ICPPKernel.h @@ -84,7 +84,7 @@ public: * @param[in] window Region on which to execute the kernel. (Must be a region of the window returned by window()) * @param[in] info Info about executing thread and CPU. */ - virtual void run_op(const std::vector &inputs, const std::vector &outputs, const Window &window, const ThreadInfo &info) + virtual void run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, const Window &window, const ThreadInfo &info) { ARM_COMPUTE_UNUSED(inputs, outputs, window, info); } diff --git a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h index 399afa63c6..7064e3dc7c 100644 --- a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h @@ -76,7 +76,7 @@ public: static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info); // Inherited methods overridden: - void run_op(const std::vector &inputs, const std::vector &outputs, + void run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, const Window &window, const ThreadInfo &info) override; private: diff --git a/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h b/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h index 61c25e1a2a..b109ddd0f8 100644 --- a/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h +++ b/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h @@ -57,18 +57,19 @@ public: /** Default destructor */ ~NEElementwiseOperationKernel() = default; - // Inherited methods overridden: - void run(const Window &window, const ThreadInfo &info) override; - /** Common signature for all the specialised arithmetic functions * - * @param[in] input1 First tensor input. Data types supported: QASYMM8/S16/F16/S32/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[in] output Output tensor. Data types supported: Dependent on subclass. + * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Dependent on subclass. * @param[in] window Region on which to execute the kernel. */ using ElementwiseFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window); + // Inherited methods overridden: + void run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, + const Window &window, const ThreadInfo &info) override; + protected: /** Validate the argument passed to the kernel * @@ -81,7 +82,7 @@ protected: /** Commmon configure function for element-wise operators with no additional options (e.g. Min, Max, SquaredDiff) * */ - void configure_common(const ITensor *input1, const ITensor *input2, ITensor *output); + void configure_common(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output); /** Function to use for the particular tensor types passed to configure() */ std::function _function; @@ -100,11 +101,11 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel * * @param[in] op Arithmetic operation to be executed. - * @param[in] input1 First tensor input. Data types supported: QASYMM8/S16/F16/S32/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[in] output Output tensor. Data types supported: Same as @p input1. + * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. */ - void configure(ArithmeticOperation op, const ITensor *input1, const ITensor *input2, ITensor *output); + void configure(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output); /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel * @@ -130,11 +131,11 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel * - * @param[in] input1 First tensor input. Data types supported: F16/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[in] output Output tensor. Data types supported: Same as @p input1. + * @param[in] input1 First tensor input info. Data types supported: F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. */ - void configure(const ITensor *input1, const ITensor *input2, ITensor *output); + void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output); /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel * @@ -159,11 +160,11 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel * - * @param[in] input1 First tensor input. Data types supported: F16/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[out] output Output tensor. Data types supported: Same as @p input1. + * @param[in] input1 First tensor input info. Data types supported: F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[out] output Output tensor info. Data types supported: Same as @p input1. */ - void configure(const ITensor *input1, const ITensor *input2, ITensor *output); + void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output); /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel * @@ -189,11 +190,11 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel * * @param[in] op Comparison operation to be executed. - * @param[in] input1 First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. - * @param[in] input2 Second tensor input. Data types supported: Same as @p input1. - * @param[in] output Output tensor. Data types supported: U16/U32. + * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: U16/U32. */ - void configure(ComparisonOperation op, const ITensor *input1, const ITensor *input2, ITensor *output); + void configure(ComparisonOperation op, const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output); /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel * diff --git a/arm_compute/core/NEON/kernels/NEReshapeLayerKernel.h b/arm_compute/core/NEON/kernels/NEReshapeLayerKernel.h index 7a4dce128d..1ed3554db2 100644 --- a/arm_compute/core/NEON/kernels/NEReshapeLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEReshapeLayerKernel.h @@ -57,7 +57,7 @@ public: static Status validate(const ITensorInfo *input, const ITensorInfo *output); // Inherited methods overridden: - void run_op(const std::vector &inputs, const std::vector &outputs, + void run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, const Window &window, const ThreadInfo &info) override; }; } // namespace arm_compute diff --git a/arm_compute/core/experimental/Types.h b/arm_compute/core/experimental/Types.h index 2b5591872a..62dd6ff305 100644 --- a/arm_compute/core/experimental/Types.h +++ b/arm_compute/core/experimental/Types.h @@ -50,29 +50,9 @@ enum class TensorType ACL_INT_2 = 52 }; -/** Input tensor aggregate */ -struct InputTensor -{ - InputTensor(TensorType type, const ITensor *tensor) - : type(type), tensor(tensor) - { - } - - TensorType type{ TensorType::ACL_UNKNOWN }; - const ITensor *tensor{ nullptr }; -}; -/** Output tensor aggregate */ -struct OutputTensor -{ - OutputTensor(TensorType type, ITensor *tensor) - : type(type), tensor(tensor) - { - } - - TensorType type{ TensorType::ACL_UNKNOWN }; - ITensor *tensor{ nullptr }; -}; -using OperatorTensor = OutputTensor; +using InputTensorMap = std::map; +using OutputTensorMap = std::map; +using OperatorTensorMap = OutputTensorMap; namespace experimental { diff --git a/arm_compute/runtime/CPP/CPPScheduler.h b/arm_compute/runtime/CPP/CPPScheduler.h index 2ccb094fdb..2f7951eb59 100644 --- a/arm_compute/runtime/CPP/CPPScheduler.h +++ b/arm_compute/runtime/CPP/CPPScheduler.h @@ -77,7 +77,7 @@ public: * @param[in] inputs Vector that contains the input tensors. * @param[in] outputs Vector that contains the output tensors. */ - void schedule_op(ICPPKernel *kernel, const Hints &hints, const std::vector &inputs, const std::vector &outputs) override; + void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) override; protected: /** Will run the workloads in parallel using num_threads @@ -87,7 +87,7 @@ protected: void run_workloads(std::vector &workloads) override; private: - void schedule_common(ICPPKernel *kernel, const Hints &hints, const std::vector &inputs, const std::vector &outputs); + void schedule_common(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs); struct Impl; std::unique_ptr _impl; }; diff --git a/arm_compute/runtime/IOperator.h b/arm_compute/runtime/IOperator.h index cf3c8b05a1..887bed4de2 100644 --- a/arm_compute/runtime/IOperator.h +++ b/arm_compute/runtime/IOperator.h @@ -46,7 +46,7 @@ public: * @param[in] workspace Vector that contains the workspace tensors. * */ - virtual void run(std::vector inputs, std::vector outputs, std::vector workspace) = 0; + virtual void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) = 0; /** Prepare the function for executing * * Any one off pre-processing step required by the function is handled here @@ -55,7 +55,7 @@ public: * * @note Prepare stage might not need all the function's buffers' backing memory to be available in order to execute */ - virtual void prepare(std::vector constants) = 0; + virtual void prepare(OperatorTensorMap constants) = 0; /** Return the memory requirements required by the workspace */ diff --git a/arm_compute/runtime/IScheduler.h b/arm_compute/runtime/IScheduler.h index 40da86fd10..29135f42c0 100644 --- a/arm_compute/runtime/IScheduler.h +++ b/arm_compute/runtime/IScheduler.h @@ -157,7 +157,7 @@ public: * @param[in] inputs Vector containing the input tensors. * @param[in] outputs Vector containing the output tensors. */ - virtual void schedule_op(ICPPKernel *kernel, const Hints &hints, const std::vector &inputs, const std::vector &outputs) = 0; + virtual void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) = 0; /** Execute all the passed workloads * diff --git a/arm_compute/runtime/NEON/INEOperator.h b/arm_compute/runtime/NEON/INEOperator.h index 2f6e18048d..2e8f8f3d42 100644 --- a/arm_compute/runtime/NEON/INEOperator.h +++ b/arm_compute/runtime/NEON/INEOperator.h @@ -54,8 +54,8 @@ public: INEOperator &operator=(INEOperator &&) = default; // Inherited methods overridden: - void run(std::vector inputs, std::vector outputs, std::vector workspace) override final; - void prepare(std::vector constants) override final; + void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override final; + void prepare(OperatorTensorMap constants) override final; protected: std::unique_ptr _kernel; diff --git a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h index cac105cdb9..08f798ec6e 100644 --- a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h +++ b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h @@ -25,7 +25,8 @@ #define ARM_COMPUTE_NEELEMENTWISEOPERATIONS_H #include "arm_compute/core/Types.h" -#include "arm_compute/runtime/NEON/INESimpleFunction.h" +#include "arm_compute/runtime/IFunction.h" +#include "arm_compute/runtime/NEON/INEOperator.h" namespace arm_compute { @@ -36,9 +37,21 @@ class ITensor; * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. * @note The function performs a max operation between two tensors. */ -class NEElementwiseMax : public INESimpleFunction +class NEElementwiseMax : public IFunction { public: + /** Default Constructor */ + NEElementwiseMax(); + /** Default Destructor */ + ~NEElementwiseMax(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseMax(const NEElementwiseMax &) = delete; + /** Default move constructor */ + NEElementwiseMax(NEElementwiseMax &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseMax &operator=(const NEElementwiseMax &) = delete; + /** Default move assignment operator */ + NEElementwiseMax &operator=(NEElementwiseMax &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. @@ -57,6 +70,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref NEArithmeticOperationKernel for min @@ -64,9 +84,21 @@ public: * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. * @note The function performs a min operation between two tensors. */ -class NEElementwiseMin : public INESimpleFunction +class NEElementwiseMin : public IFunction { public: + /** Default Constructor */ + NEElementwiseMin(); + /** Default Destructor */ + ~NEElementwiseMin(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseMin(const NEElementwiseMin &) = delete; + /** Default move constructor */ + NEElementwiseMin(NEElementwiseMin &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseMin &operator=(const NEElementwiseMin &) = delete; + /** Default move assignment operator */ + NEElementwiseMin &operator=(NEElementwiseMin &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. @@ -85,6 +117,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref NEArithmeticOperationKernel for squared difference @@ -92,9 +131,21 @@ public: * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2 */ -class NEElementwiseSquaredDiff : public INESimpleFunction +class NEElementwiseSquaredDiff : public IFunction { public: + /** Default Constructor */ + NEElementwiseSquaredDiff(); + /** Default Destructor */ + ~NEElementwiseSquaredDiff(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseSquaredDiff(const NEElementwiseSquaredDiff &) = delete; + /** Default move constructor */ + NEElementwiseSquaredDiff(NEElementwiseSquaredDiff &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseSquaredDiff &operator=(const NEElementwiseSquaredDiff &) = delete; + /** Default move assignment operator */ + NEElementwiseSquaredDiff &operator=(NEElementwiseSquaredDiff &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. @@ -113,6 +164,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref NEArithmeticOperationKernel for division @@ -120,9 +178,21 @@ public: * @note The tensor data type for the inputs must be F16/F32. * @note The function performs a squared different operation between two tensors (i.e., out[i] = in1[i] / in2[i]) */ -class NEElementwiseDivision : public INESimpleFunction +class NEElementwiseDivision : public IFunction { public: + /** Default Constructor */ + NEElementwiseDivision(); + /** Default Destructor */ + ~NEElementwiseDivision(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseDivision(const NEElementwiseDivision &) = delete; + /** Default move constructor */ + NEElementwiseDivision(NEElementwiseDivision &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseDivision &operator=(const NEElementwiseDivision &) = delete; + /** Default move assignment operator */ + NEElementwiseDivision &operator=(NEElementwiseDivision &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: F16/F32. @@ -141,6 +211,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref NEArithmeticOperationKernel for power @@ -149,9 +226,21 @@ public: * @note The function performs a elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i]) * @note For an exponent that is a float, this function will only work with a positive base. */ -class NEElementwisePower : public INESimpleFunction +class NEElementwisePower : public IFunction { public: + /** Default Constructor */ + NEElementwisePower(); + /** Default Destructor */ + ~NEElementwisePower(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwisePower(const NEElementwisePower &) = delete; + /** Default move constructor */ + NEElementwisePower(NEElementwisePower &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwisePower &operator=(const NEElementwisePower &) = delete; + /** Default move assignment operator */ + NEElementwisePower &operator=(NEElementwisePower &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: F16/F32. @@ -170,6 +259,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref NEComparisonOperationKernel. @@ -177,9 +273,21 @@ public: * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. * @note The function performs a comparison operation between two tensors. */ -class NEElementwiseComparison : public INESimpleFunction +class NEElementwiseComparison : public IFunction { public: + /** Default Constructor */ + NEElementwiseComparison(); + /** Default Destructor */ + ~NEElementwiseComparison(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseComparison(const NEElementwiseComparison &) = delete; + /** Default move constructor */ + NEElementwiseComparison(NEElementwiseComparison &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseComparison &operator=(const NEElementwiseComparison &) = delete; + /** Default move assignment operator */ + NEElementwiseComparison &operator=(NEElementwiseComparison &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. @@ -198,6 +306,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ComparisonOperation op); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; /** Basic function to run @ref NEComparisonOperationKernel @@ -206,9 +321,21 @@ public: * @note The function performs a comparison operation between two tensors. */ template -class NEElementwiseComparisonStatic : public INESimpleFunction +class NEElementwiseComparisonStatic : public IFunction { public: + /** Default Constructor */ + NEElementwiseComparisonStatic(); + /** Default Destructor */ + ~NEElementwiseComparisonStatic(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseComparisonStatic(const NEElementwiseComparisonStatic &) = delete; + /** Default move constructor */ + NEElementwiseComparisonStatic(NEElementwiseComparisonStatic &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEElementwiseComparisonStatic &operator=(const NEElementwiseComparisonStatic &) = delete; + /** Default move assignment operator */ + NEElementwiseComparisonStatic &operator=(NEElementwiseComparisonStatic &&); /** Initialise the kernel's inputs, output and conversion policy. * * @param[in, out] input1 First tensor input. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. @@ -225,6 +352,245 @@ public: * @return a status */ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; +}; + +/** Basic function to run equal comparison. */ +using NEEqual = NEElementwiseComparisonStatic; +/** Basic function to run not equal comparison. */ +using NENotEqual = NEElementwiseComparisonStatic; +/** Basic function to run greater comparison. */ +using NEGreater = NEElementwiseComparisonStatic; +/** Basic function to run greater-equal comparison. */ +using NEGreaterEqual = NEElementwiseComparisonStatic; +/** Basic function to run less comparison. */ +using NELess = NEElementwiseComparisonStatic; +/** Basic function to run less-equal comparison. */ +using NELessEqual = NEElementwiseComparisonStatic; + +namespace experimental +{ +/** Basic function to run @ref NEArithmeticOperationKernel for max + * + * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @note The function performs a max operation between two tensors. + */ +class NEElementwiseMax : public INEOperator +{ +public: + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[out] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + */ + void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for max + * + * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + MemoryRequirements workspace() const override; +}; + +/** Basic function to run @ref NEArithmeticOperationKernel for min + * + * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @note The function performs a min operation between two tensors. + */ +class NEElementwiseMin : public INEOperator +{ +public: + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[out] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + */ + void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for min + * + * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + MemoryRequirements workspace() const override; +}; + +/** Basic function to run @ref NEArithmeticOperationKernel for squared difference + * + * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2 + */ +class NEElementwiseSquaredDiff : public INEOperator +{ +public: + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[out] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + */ + void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for squared difference + * + * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + MemoryRequirements workspace() const override; +}; + +/** Basic function to run @ref NEArithmeticOperationKernel for division + * + * @note The tensor data type for the inputs must be F16/F32. + * @note The function performs a squared different operation between two tensors (i.e., out[i] = in1[i] / in2[i]) + */ +class NEElementwiseDivision : public INEOperator +{ +public: + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in, out] input1 First tensor input info. Data types supported: F16/F32. + * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[out] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + */ + void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for division + * + * @param[in] input1 First tensor input info. Data types supported: F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + MemoryRequirements workspace() const override; +}; + +/** Basic function to run @ref NEArithmeticOperationKernel for power + * + * @note The tensor data type for the inputs must be F16/F32. + * @note The function performs a elementwise power of in1 to in2 (i.e., out[i] = in1[i] ^ in2[i]) + * @note For an exponent that is a float, this function will only work with a positive base. + */ +class NEElementwisePower : public INEOperator +{ +public: + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in, out] input1 First tensor input info. Data types supported: F16/F32. + * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[out] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + */ + void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for power + * + * @param[in] input1 First tensor input info. Data types supported: F16/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: Same as @p input1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo()); + + // Inherited methods overridden: + MemoryRequirements workspace() const override; +}; + +/** Basic function to run @ref NEComparisonOperationKernel. + * + * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @note The function performs a comparison operation between two tensors. + */ +class NEElementwiseComparison : public INEOperator +{ +public: + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[out] output Output tensor info. Data types supported: U16/U32. + * @param[in] op Comparison Operation to be performed. + */ + void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ComparisonOperation op); + /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel + * + * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: U16/U32. + * @param[in] op Comparison Operation to be performed. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ComparisonOperation op); + + // Inherited methods overridden: + MemoryRequirements workspace() const override; +}; + +/** Basic function to run @ref NEComparisonOperationKernel + * + * @note The tensor data type for the inputs must be QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @note The function performs a comparison operation between two tensors. + */ +template +class NEElementwiseComparisonStatic : public INEOperator +{ +public: + /** Initialise the kernel's inputs, output and conversion policy. + * + * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[out] output Output tensor info. Data types supported: U16/U32. + */ + void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output); + /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel + * + * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32. + * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1. + * @param[in] output Output tensor info. Data types supported: U16/U32. + * + * @return a status + */ + static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output); + + // Inherited methods overridden: + MemoryRequirements workspace() const override; }; /** Basic function to run equal comparison. */ @@ -239,5 +605,6 @@ using NEGreaterEqual = NEElementwiseComparisonStatic; /** Basic function to run less-equal comparison. */ using NELessEqual = NEElementwiseComparisonStatic; +} // namespace experimental } // namespace arm_compute #endif /* ARM_COMPUTE_NEELEMENTWISEOPERATIONS_H */ diff --git a/arm_compute/runtime/NEON/functions/NEPReluLayer.h b/arm_compute/runtime/NEON/functions/NEPReluLayer.h index 102a165383..9229a842e8 100644 --- a/arm_compute/runtime/NEON/functions/NEPReluLayer.h +++ b/arm_compute/runtime/NEON/functions/NEPReluLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -25,19 +25,63 @@ #define ARM_COMPUTE_NEPRELULAYER_H #include "arm_compute/core/Types.h" -#include "arm_compute/runtime/NEON/INESimpleFunction.h" +#include "arm_compute/runtime/IFunction.h" +#include "arm_compute/runtime/NEON/INEOperator.h" namespace arm_compute { class ITensor; +namespace experimental +{ /** Basic function to run @ref NEArithmeticOperationKernel for PRELU * * @note The function implements an activation layer with the PRELU activation function. */ -class NEPReluLayer : public INESimpleFunction +class NEPReluLayer : public INEOperator { public: + /** Set the input and output tensor. + * + * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[in] alpha Source alpha tensor info. Data types supported: same of @p input. + * @param[out] output Destination tensor info. Data type supported: same as @p input + */ + void configure(const ITensorInfo *input, const ITensorInfo *alpha, ITensorInfo *output); + /** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel + * + * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. + * @param[in] alpha Source alpha tensor info. Data types supported: same of @p input. + * @param[in] output Destination tensor info. Data type supported: same as @p input + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output); + + // Inherited methods overridden: + MemoryRequirements workspace() const override; +}; +} // namespace experimental + +/** Basic function to run @ref NEArithmeticOperationKernel for PRELU + * + * @note The function implements an activation layer with the PRELU activation function. + */ +class NEPReluLayer : public IFunction +{ +public: + /** Default Constructor */ + NEPReluLayer(); + /** Default Destructor */ + ~NEPReluLayer(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEPReluLayer(const NEPReluLayer &) = delete; + /** Default move constructor */ + NEPReluLayer(NEPReluLayer &&); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + NEPReluLayer &operator=(const NEPReluLayer &) = delete; + /** Default move assignment operator */ + NEPReluLayer &operator=(NEPReluLayer &&); /** Set the input and output tensor. * * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. @@ -54,6 +98,13 @@ public: * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output); + + // Inherited methods overridden: + void run() override; + +private: + struct Impl; + std::unique_ptr _impl; }; } // namespace arm_compute #endif /* ARM_COMPUTE_NEPRELULAYER_H */ diff --git a/arm_compute/runtime/OMP/OMPScheduler.h b/arm_compute/runtime/OMP/OMPScheduler.h index b7c186a838..1742e95263 100644 --- a/arm_compute/runtime/OMP/OMPScheduler.h +++ b/arm_compute/runtime/OMP/OMPScheduler.h @@ -66,7 +66,7 @@ public: * @param[in] inputs Vector containing the input tensors. * @param[in] outputs Vector containing the output tensors. */ - void schedule_op(ICPPKernel *kernel, const Hints &hints, const std::vector &inputs, const std::vector &outputs) override; + void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) override; protected: /** Execute all the passed workloads diff --git a/arm_compute/runtime/SingleThreadScheduler.h b/arm_compute/runtime/SingleThreadScheduler.h index 8a69a5be15..0d576b93eb 100644 --- a/arm_compute/runtime/SingleThreadScheduler.h +++ b/arm_compute/runtime/SingleThreadScheduler.h @@ -57,7 +57,7 @@ public: * @param[in] inputs Vector containing the input tensors. * @param[in] outputs Vector containing the output tensors. */ - void schedule_op(ICPPKernel *kernel, const Hints &hints, const std::vector &inputs, const std::vector &outputs) override; + void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) override; protected: /** Will run the workloads sequentially and in order. -- cgit v1.2.1