From a2b89ca5407532257a959ad1852f29187e1be4ac Mon Sep 17 00:00:00 2001 From: Pablo Palmier Date: Thu, 5 Oct 2017 15:01:34 +0100 Subject: IVGCVSW-631 Neon support for Softmax beta parameter (F32 only) Change-Id: Ibf6f038b39f1a4e557f5d04feb08e3d5ef54e223 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112019 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com Reviewed-by: Anthony Barbier Reviewed-by: Georgios Pinitas --- arm_compute/core/Helpers.h | 2 +- arm_compute/core/Helpers.inl | 2 +- arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h | 6 ++++-- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arm_compute/core') diff --git a/arm_compute/core/Helpers.h b/arm_compute/core/Helpers.h index 1be24e1841..fdbb46fc78 100644 --- a/arm_compute/core/Helpers.h +++ b/arm_compute/core/Helpers.h @@ -501,7 +501,7 @@ bool auto_init_if_empty(ITensorInfo &info, * * @return True if the tensor info has been initialized */ -bool auto_init_if_empty(ITensorInfo &info_sink, ITensorInfo &info_source); +bool auto_init_if_empty(ITensorInfo &info_sink, const ITensorInfo &info_source); /* Set the shape to the specified value if the current assignment is empty. * diff --git a/arm_compute/core/Helpers.inl b/arm_compute/core/Helpers.inl index 1e565344b7..3672692814 100644 --- a/arm_compute/core/Helpers.inl +++ b/arm_compute/core/Helpers.inl @@ -217,7 +217,7 @@ inline bool auto_init_if_empty(ITensorInfo &info, return false; } -inline bool auto_init_if_empty(ITensorInfo &info_sink, ITensorInfo &info_source) +inline bool auto_init_if_empty(ITensorInfo &info_sink, const ITensorInfo &info_source) { if(info_sink.tensor_shape().total_size() == 0) { diff --git a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h index cce21569d9..c3e25181b6 100644 --- a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h @@ -78,14 +78,15 @@ public: * @param[in] max Max values tensor. Data types supported: same as @p input. * @param[out] output Destination tensor. Data types supported: same as @p input. * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input. + * @param[in] beta (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1. */ - void configure(const ITensor *input, const ITensor *max, ITensor *output, ITensor *sum); + void configure(const ITensor *input, const ITensor *max, ITensor *output, ITensor *sum, float beta = 1.0f); // Inherited methods overridden: void run(const Window &window, const ThreadInfo &info) override; private: - using Logits1DShiftExpSumFunction = void(const ITensor *in, const ITensor *max, ITensor *out, ITensor *sum, const Window &window); + using Logits1DShiftExpSumFunction = void(const ITensor *in, const ITensor *max, ITensor *out, ITensor *sum, const Window &window, float beta); private: Logits1DShiftExpSumFunction *_func; @@ -93,6 +94,7 @@ private: const ITensor *_max; ITensor *_output; ITensor *_sum; + float _beta; }; /** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */ -- cgit v1.2.1