From 0779fecbf897fe85c5e13da52b129e439c4cc75d Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Wed, 13 Nov 2019 17:08:12 +0000 Subject: COMPMID-2763 [CL] add support for QASYMM8_SIGNED to SoftmaxLayer Change-Id: I4556bde3aa51eb874a4e674dbbd575fa4491c088 Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/2375 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas --- arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h | 3 ++- arm_compute/core/KernelDescriptors.h | 5 +++-- arm_compute/core/Utils.h | 9 +++++++++ 3 files changed, 14 insertions(+), 3 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h index 93e403e257..f64739ae32 100644 --- a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h @@ -187,10 +187,11 @@ public: * @param[in] input Source tensor. Data types supported: S32/F16/F32 * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input * @param[in] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input + * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. * * @return a status */ - static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output); + static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h index 2aa076246e..f358153b0d 100644 --- a/arm_compute/core/KernelDescriptors.h +++ b/arm_compute/core/KernelDescriptors.h @@ -79,8 +79,9 @@ struct DWCWeightsKernelInfo /** Descriptor used by the softmax kernels */ struct SoftmaxKernelInfo { - float beta{ 1.f }; /**< A scaling factor for the exponent with default value 1.0 */ - bool is_log{ false }; /**< Flag used to perform Log Softmax operation */ + float beta{ 1.f }; /**< A scaling factor for the exponent with default value 1.0 */ + bool is_log{ false }; /**< Flag used to perform Log Softmax operation */ + DataType input_data_type{ DataType::UNKNOWN }; /**< Input tensor data type */ }; } // namespace arm_compute #endif /* ARM_COMPUTE_CORE_KERNEL_DESCRIPTORS_H */ diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h index 18c5471f8f..0a7eeefded 100644 --- a/arm_compute/core/Utils.h +++ b/arm_compute/core/Utils.h @@ -958,6 +958,15 @@ std::pair scaled_dimensions(unsigned int width, unsi */ bool needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis); +/** Returns output quantization information for softmax layer + * + * @param[in] input_type The data type of the input tensor + * @param[in] is_log True for log softmax + * + * @return Quantization information for the output tensor + */ +QuantizationInfo get_softmax_output_quantization_info(DataType input_type, bool is_log); + /** Convert a tensor format into a string. * * @param[in] format @ref Format to be translated to string. -- cgit v1.2.1