aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorSheri Zhang <sheri.zhang@arm.com>2020-05-05 11:47:36 +0100
committerSheri Zhang <sheri.zhang@arm.com>2020-05-12 13:12:58 +0000
commit1f567afcdfb2919fab417f0060155deda7132df8 (patch)
tree79631c4b121b89ff261156c41d1cc217afd891fc /arm_compute
parentc630e94d143ac5f46381f53a4994b29ea7ef2ac0 (diff)
downloadComputeLibrary-1f567afcdfb2919fab417f0060155deda7132df8.tar.gz
COMPMID-3442: Add support of negative axis in NESoftmaxLayer and reference code
Signed-off-by: Sheri Zhang <sheri.zhang@arm.com> Change-Id: I285cc3b74ac0a45f0ad5830baed5237cea568f15 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3147 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NESoftmaxLayer.h21
2 files changed, 17 insertions, 14 deletions
diff --git a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
index dfcc2ffe21..0e0be7936b 100644
--- a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,13 +43,13 @@ public:
NELogits1DMaxKernel();
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[out] output Destination tensor. Data types supported: same as @p input
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NELogits1DMaxKernel
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] output Destination tensor. Data types supported: same as @p input
*
* @return a status
@@ -98,7 +98,7 @@ public:
~NELogits1DSoftmaxKernel() = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] max Max values tensor. Same shape as input with dimension 0 set to 1.
* Data types supported: same as @p input.
* @param[out] output Destination tensor. Data types supported: same as @p input.
@@ -109,7 +109,7 @@ public:
void configure(const ITensor *input, const ITensor *max, ITensor *output, const float beta, ITensor *tmp);
/** Static function to check if given info will lead to a valid configuration of @ref NELogits1DSoftmaxKernel
*
- * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] max Max values tensor info. Same shape as input with dimension 0 set to 1.
* Data types supported: same as @p input.
* @param[in] output Destination tensor info. Data types supported: same as @p input.
diff --git a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
index f89add71ee..b80ceaf25c 100644
--- a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
+++ b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -65,28 +65,30 @@ public:
NESoftmaxLayerGeneric &operator=(NESoftmaxLayerGeneric &&) = default;
/** Set the input and output tensors.
*
- * @param[in,out] input Source tensor. Data types supported: QASYMM8/F16/F32. If the width is not a
+ * @param[in,out] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. If the width is not a
* multiple of the internal processing block size, @ref NEFillBorderKernel replicates the
* last value of each row to the nearest multiple.
* @param[out] output Destination tensor. Data types supported: same as @p input.
* @param[in] beta (Optional) A scaling factor for the exponent.
- * @param[in] axis (Optional) Reduction axis. Defaults to 1. Must be in range [1, input_num_dimensions).
+ * @param[in] axis (Optional) Reduction axis. Defaults to -1.
+ * Negative index is used to specify axis from the end (e.g. -1 for the last axis).Must be in range [-input_num_dimensions, input_num_dimensions).
* It has the purpose of squashing the first @p axis dimensions together. For instance, given a [4x4x4x4] image,
* when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
*/
- void configure(ITensor *input, ITensor *output, float beta = 1.0f, size_t axis = 1);
+ void configure(ITensor *input, ITensor *output, float beta = 1.0f, int32_t axis = -1);
/** Static function to check if given info will lead to a valid configuration of @ref NESoftmaxLayer
*
- * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] output Destination tensor info. Data types supported: same as @p input
* @param[in] beta (Optional) A scaling factor for the exponent.
- * @param[in] axis (Optional) Reduction axis. Defaults to 1. Must be in range [1, input_num_dimensions).
+ * @param[in] axis (Optional) Reduction axis. Defaults to -1.
+ * Negative index is used to specify axis from the end (e.g. -1 for the last axis).Must be in range [-input_num_dimensions, input_num_dimensions).
* It has the purpose of squashing the first @p axis dimensions together. For instance, given a [4x4x4x4] image,
* when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, size_t axis = 1);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, int32_t axis = -1);
// Inherited methods overridden:
void run() override;
@@ -101,11 +103,12 @@ private:
*
* @param[in] input Original source tensor.
* @param[in] output Original destination tensor.
- * @param[in] axis (Optional) Reduction axis. Defaults to 1. Must be in range [1, input_num_dimensions).
+ * @param[in] axis (Optional) Reduction axis. Defaults to -1.
+ * Negative index is used to specify axis from the end (e.g. -1 for the last axis).Must be in range [-input_num_dimensions, input_num_dimensions).
* It has the purpose of squashing the first @p axis dimensions together. For instance, given a [4x4x4x4] image,
* when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
*/
- void configure_reshape_input_kernel(const ITensor *input, const ITensor *output, size_t axis);
+ void configure_reshape_input_kernel(const ITensor *input, const ITensor *output, int32_t axis);
MemoryGroup _memory_group;
NELogits1DMaxKernel _max_kernel;