From 62eeb53a5eee9d388a6074553175909fd1b441b5 Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Tue, 29 Oct 2019 13:13:19 +0000 Subject: COMPMID-2266: [CL] add support for Log Softmax Change-Id: I4a8f3519328553e24cbb4fe45a8ca4d47c90975d Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/2182 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h | 11 ++++++----- arm_compute/core/KernelDescriptors.h | 7 +++++++ arm_compute/runtime/CL/functions/CLSoftmaxLayer.h | 13 ++++++++++--- 3 files changed, 23 insertions(+), 8 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h index b272878fe7..04d94c041e 100644 --- a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -25,6 +25,7 @@ #define __ARM_COMPUTE_CLSOFTMAXLAYERKERNEL_H__ #include "arm_compute/core/CL/ICLSimple3DKernel.h" +#include "arm_compute/core/KernelDescriptors.h" #include @@ -120,9 +121,9 @@ public: * @param[in,out] max Max values tensor. Data types supported: same as @p input * @param[out] output Destination tensor. Data types supported: same as @p input * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input - * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f + * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. */ - void configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f); + void configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info); /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxShiftExpSumKernel * * @param[in] input Source tensor. Data types supported: F16/F32 @@ -178,9 +179,9 @@ public: * @param[in] input Source tensor. Data types supported: S32/F16/F32 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input * @param[out] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input - * @param[in] beta (Optional) A scaling factor for the exponent. (Default = 1.0) + * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. */ - void configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, float beta = 1.0f); + void configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info); /** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DNormKernel * * @param[in] input Source tensor. Data types supported: S32/F16/F32 diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h index 215f0f1651..905401bbda 100644 --- a/arm_compute/core/KernelDescriptors.h +++ b/arm_compute/core/KernelDescriptors.h @@ -75,5 +75,12 @@ struct DWCWeightsKernelInfo { unsigned int n0{ 0 }; /**< Number of columns processed by each thread */ }; + +/** Descriptor used by the softmax kernels */ +struct SoftmaxKernelInfo +{ + float beta{ 1.f }; /**< A scaling factor for the exponent with default value 1.0 */ + bool is_log{ false }; /**< Flag used to perform Log Softmax operation */ +}; } // namespace arm_compute #endif /* __ARM_COMPUTE_CORE_KERNEL_DESCRIPTORS_H__ */ diff --git a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h index 407827087c..e3feebb762 100644 --- a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h +++ b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h @@ -43,16 +43,20 @@ class ICLTensor; * Softmax is calculated by : * @f[ out = exp((x - max(x)) * beta) / sum(exp((x - max(x)) * beta)) @f] * + * Log Softmax is calculated by : + * @f[ out = (x - max(x) * beta) - \sum{e^{x - max(x) * beta}} @f] + * * This function runs the following kernels: * -# @ref CLLogits1DMaxKernel * -# @ref CLLogits1DShiftExpSumKernel * -# @ref CLLogits1DNormKernel */ -class CLSoftmaxLayer : public IFunction +template +class CLSoftmaxLayerGeneric : public IFunction { public: /** Constructor */ - CLSoftmaxLayer(std::shared_ptr memory_manager = nullptr); + CLSoftmaxLayerGeneric(std::shared_ptr memory_manager = nullptr); /** Set the input and output tensors. * * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32 @@ -106,5 +110,8 @@ private: CLTensor _output_flattened; bool _needs_flattening; }; -} + +using CLSoftmaxLayer = CLSoftmaxLayerGeneric; +using CLLogSoftmaxLayer = CLSoftmaxLayerGeneric; +} // namespace arm_compute #endif /* __ARM_COMPUTE_CLSOFTMAXLAYER_H__ */ -- cgit v1.2.1