diff options
Diffstat (limited to 'arm_compute')
-rw-r--r-- | arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h | 12 | ||||
-rw-r--r-- | arm_compute/runtime/NEON/functions/NESoftmaxLayer.h | 44 |
2 files changed, 36 insertions, 20 deletions
diff --git a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h index 25c3196e34..fb650794fa 100644 --- a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -69,12 +69,20 @@ private: }; /** Interface for softmax computation for QASYMM8 with pre-computed max. */ +template <bool IS_LOG = false> class NELogits1DSoftmaxKernel : public INEKernel { public: const char *name() const override { - return "NELogits1DSoftmaxKernel"; + if(IS_LOG) + { + return "NELogits1DSoftmaxKernel"; + } + else + { + return "NELogits1DLogSoftmaxKernel"; + } } /** Default constructor */ NELogits1DSoftmaxKernel(); diff --git a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h index 4932aeff5a..9cc7088ae2 100644 --- a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h +++ b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h @@ -36,29 +36,33 @@ namespace arm_compute { class ITensor; -/** Basic function to compute a SoftmaxLayer. +/** Basic function to compute a SoftmaxLayer and a Log SoftmaxLayer. * * Softmax is calculated by : * @f[ out = \frac{e^{x - max(x)}}{\sum{e^{x - max(x)}}} @f] * + * Log Softmax is calculated by : + * @f[ out = (x - max(x)) - \sum{e^{x - max(x)}} @f] + * * This function runs the following kernels: * -# @ref NEFillBorderKernel * -# @ref NELogits1DMaxKernel * -# @ref NELogits1DSoftmaxKernel */ -class NESoftmaxLayer : public IFunction +template <bool IS_LOG = false> +class NESoftmaxLayerGeneric : public IFunction { public: /** Constructor */ - NESoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr); + NESoftmaxLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager = nullptr); /** Prevent instances of this class from being copied (As this class contains pointers) */ - NESoftmaxLayer(const NESoftmaxLayer &) = delete; + NESoftmaxLayerGeneric(const NESoftmaxLayerGeneric &) = delete; /** Default move constructor */ - NESoftmaxLayer(NESoftmaxLayer &&) = default; + NESoftmaxLayerGeneric(NESoftmaxLayerGeneric &&) = default; /** Prevent instances of this class from being copied (As this class contains pointers) */ - NESoftmaxLayer &operator=(const NESoftmaxLayer &) = delete; + NESoftmaxLayerGeneric &operator=(const NESoftmaxLayerGeneric &) = delete; /** Default move assignment operator */ - NESoftmaxLayer &operator=(NESoftmaxLayer &&) = default; + NESoftmaxLayerGeneric &operator=(NESoftmaxLayerGeneric &&) = default; /** Set the input and output tensors. * * @param[in,out] input Source tensor. Data types supported: QASYMM8/F16/F32. If the width is not a @@ -103,17 +107,21 @@ private: */ void configure_reshape_input_kernel(const ITensor *input, const ITensor *output, size_t axis); - MemoryGroup _memory_group; - NELogits1DMaxKernel _max_kernel; - NELogits1DSoftmaxKernel _softmax_kernel; - std::unique_ptr<INEKernel> _flat_or_reshape_kernel_ptr; - NEFillBorderKernel _fill_border_kernel; - NEReshapeLayerKernel _reshape_kernel; - Tensor _max; - Tensor _tmp; - Tensor _input_flattened; - Tensor _output_flattened; - bool _needs_flattening; + MemoryGroup _memory_group; + NELogits1DMaxKernel _max_kernel; + NELogits1DSoftmaxKernel<IS_LOG> _softmax_kernel; + std::unique_ptr<INEKernel> _flat_or_reshape_kernel_ptr; + NEFillBorderKernel _fill_border_kernel; + NEReshapeLayerKernel _reshape_kernel; + Tensor _max; + Tensor _tmp; + Tensor _input_flattened; + Tensor _output_flattened; + bool _needs_flattening; }; + +using NESoftmaxLayer = NESoftmaxLayerGeneric<false>; +using NELogSoftmaxLayer = NESoftmaxLayerGeneric<true>; + } // namespace arm_compute #endif /* __ARM_COMPUTE_NESOFTMAXLAYER_H__ */ |