diff options
Diffstat (limited to 'arm_compute/core')
-rw-r--r-- | arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h | 58 |
1 files changed, 56 insertions, 2 deletions
diff --git a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h index 1e079cbb06..675c462c95 100644 --- a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h @@ -26,6 +26,8 @@ #include "arm_compute/core/CL/ICLSimple3DKernel.h" +#include <tuple> + namespace arm_compute { class ICLTensor; @@ -42,7 +44,7 @@ public: void configure(const ICLTensor *input, ICLTensor *output); }; -/** Interface for shifting the logits values around the max value and exponentiating the result */ +/** Interface for shifting, exponentiating and summing the logits */ class CLLogits1DShiftExpSumKernel : public ICLKernel { public: @@ -60,9 +62,9 @@ public: * * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32 * @param[in] max Max values tensor. Data types supported: same as @p input - * @param[in] beta A scaling factor for the exponent. * @param[out] output Destination tensor. Data types supported: same as @p input * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input + * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f */ void configure(const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f); @@ -76,6 +78,58 @@ private: ICLTensor *_sum; }; +/** Interface for max, shifting, exponentiating and summing the logits */ +class CLLogits1DMaxShiftExpSumKernel : public ICLKernel +{ +public: + using ParallelReductionInfo = std::tuple<bool, unsigned int>; + +public: + /** Default constructor */ + CLLogits1DMaxShiftExpSumKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLLogits1DMaxShiftExpSumKernel(const CLLogits1DMaxShiftExpSumKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLLogits1DMaxShiftExpSumKernel &operator=(const CLLogits1DMaxShiftExpSumKernel &) = delete; + /** Allow instances of this class to be moved */ + CLLogits1DMaxShiftExpSumKernel(CLLogits1DMaxShiftExpSumKernel &&) = default; + /** Allow instances of this class to be moved */ + CLLogits1DMaxShiftExpSumKernel &operator=(CLLogits1DMaxShiftExpSumKernel &&) = default; + /** Set the input and output tensors. + * + * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32 + * @param[in,out] max Max values tensor. Data types supported: same as @p input + * @param[out] output Destination tensor. Data types supported: same as @p input + * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input + * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f + */ + void configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f); + /** Checks if the given size is eligible for parallel reduction + * + * @note Serial reduction is launched for width < (_grid_size * _serial_vector_size). + * @note Parallel reduction is launched for width >= (_grid_size * _serial_vector_size) and vector_size is forced to 4. + * + * @param[in] size Size to check + * + * @return A two-element tuple where the first element is a boolean specifying is a parallel reduction will be run, + * while the second elements is the vector size of the execution. + */ + static ParallelReductionInfo is_parallel_reduction(size_t size); + + // Inherited methods overridden: + void run(const Window &window, cl::CommandQueue &queue) override; + +private: + const ICLTensor *_input; + ICLTensor *_max; + ICLTensor *_output; + ICLTensor *_sum; + +private: + static const unsigned int _grid_size; + static const unsigned int _serial_vector_size; + static const unsigned int _parallel_vector_size; +}; /** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */ class CLLogits1DNormKernel : public ICLKernel { |