aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorPablo Palmier <Pablo.Palmier@arm.com>2017-10-05 15:01:34 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commita2b89ca5407532257a959ad1852f29187e1be4ac (patch)
treea202070aea45a81ec1ea8a86fa4047035eb2d567 /arm_compute
parent5948634bb97e05934e9eea180ba41dcddf874416 (diff)
downloadComputeLibrary-a2b89ca5407532257a959ad1852f29187e1be4ac.tar.gz
IVGCVSW-631 Neon support for Softmax beta parameter (F32 only)
Change-Id: Ibf6f038b39f1a4e557f5d04feb08e3d5ef54e223 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112019 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/Helpers.h2
-rw-r--r--arm_compute/core/Helpers.inl2
-rw-r--r--arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h6
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.h3
-rw-r--r--arm_compute/runtime/NEON/functions/NESoftmaxLayer.h3
5 files changed, 10 insertions, 6 deletions
diff --git a/arm_compute/core/Helpers.h b/arm_compute/core/Helpers.h
index 1be24e1841..fdbb46fc78 100644
--- a/arm_compute/core/Helpers.h
+++ b/arm_compute/core/Helpers.h
@@ -501,7 +501,7 @@ bool auto_init_if_empty(ITensorInfo &info,
*
* @return True if the tensor info has been initialized
*/
-bool auto_init_if_empty(ITensorInfo &info_sink, ITensorInfo &info_source);
+bool auto_init_if_empty(ITensorInfo &info_sink, const ITensorInfo &info_source);
/* Set the shape to the specified value if the current assignment is empty.
*
diff --git a/arm_compute/core/Helpers.inl b/arm_compute/core/Helpers.inl
index 1e565344b7..3672692814 100644
--- a/arm_compute/core/Helpers.inl
+++ b/arm_compute/core/Helpers.inl
@@ -217,7 +217,7 @@ inline bool auto_init_if_empty(ITensorInfo &info,
return false;
}
-inline bool auto_init_if_empty(ITensorInfo &info_sink, ITensorInfo &info_source)
+inline bool auto_init_if_empty(ITensorInfo &info_sink, const ITensorInfo &info_source)
{
if(info_sink.tensor_shape().total_size() == 0)
{
diff --git a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
index cce21569d9..c3e25181b6 100644
--- a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
@@ -78,14 +78,15 @@ public:
* @param[in] max Max values tensor. Data types supported: same as @p input.
* @param[out] output Destination tensor. Data types supported: same as @p input.
* @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input.
+ * @param[in] beta (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
*/
- void configure(const ITensor *input, const ITensor *max, ITensor *output, ITensor *sum);
+ void configure(const ITensor *input, const ITensor *max, ITensor *output, ITensor *sum, float beta = 1.0f);
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
private:
- using Logits1DShiftExpSumFunction = void(const ITensor *in, const ITensor *max, ITensor *out, ITensor *sum, const Window &window);
+ using Logits1DShiftExpSumFunction = void(const ITensor *in, const ITensor *max, ITensor *out, ITensor *sum, const Window &window, float beta);
private:
Logits1DShiftExpSumFunction *_func;
@@ -93,6 +94,7 @@ private:
const ITensor *_max;
ITensor *_output;
ITensor *_sum;
+ float _beta;
};
/** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */
diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.h
index 19bfb83eca..e7f8d5053a 100644
--- a/arm_compute/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.h
+++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.h
@@ -51,8 +51,9 @@ public:
*
* @param[in] input Source tensor. Data types supported: F16/F32
* @param[out] output Destination tensor. Data types supported: same as @p input
+ * @param[in] beta (Optional) A scaling factor for the exponent. Only beta = 1 is supported.
*/
- void configure(const IGCTensor *input, IGCTensor *output);
+ void configure(const IGCTensor *input, IGCTensor *output, float beta = 1.0f);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
index a265f70043..38a0f2116f 100644
--- a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
+++ b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
@@ -53,8 +53,9 @@ public:
*
* @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32.
* @param[out] output Destination tensor. Data types supported: same as @p input.
+ * @param[in] beta (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
*/
- void configure(ITensor *input, ITensor *output);
+ void configure(ITensor *input, ITensor *output, float beta = 1.0f);
// Inherited methods overridden:
void run() override;