aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2018-09-03 09:53:53 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commitefbf6c8fd54159b26eda43eea7a12fce491ca13a (patch)
treef24f63d73703ddcb5fe0ea3ccef101660a9eb9a4 /arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
parent477531c258801caf3cce44eb3e43df611b42fc6d (diff)
downloadComputeLibrary-efbf6c8fd54159b26eda43eea7a12fce491ca13a.tar.gz
[COMPMID-386] Github: Support SoftmaxLayer on different number of dimensions?
Change-Id: I7422b977538ff29930a90f078badc2edee78af93 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/146638 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/runtime/CL/functions/CLSoftmaxLayer.h')
-rw-r--r--arm_compute/runtime/CL/functions/CLSoftmaxLayer.h23
1 files changed, 16 insertions, 7 deletions
diff --git a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
index 90c99d6569..8d2c03f930 100644
--- a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
+++ b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
@@ -58,16 +58,22 @@ public:
* @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
* @param[out] output Destination tensor. Data types supported: same as @p input
* @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f
+ * @param[in] axis (Optional) Reduction axis. It has the purpose of squashing the first @p axis
+ * dimensions together. For instance, given a [4x4x4x4] image,
+ * when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
*/
- void configure(const ICLTensor *input, ICLTensor *output, float beta = 1.0f);
+ void configure(const ICLTensor *input, ICLTensor *output, float beta = 1.0f, size_t axis = 1);
/** Static function to check if given info will lead to a valid configuration of @ref CLSoftmaxLayer
*
* @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
* @param[in] output Destination tensor. Data types supported: same as @p input
- *
+ * @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f
+ * @param[in] axis (Optional) Reduction axis. It has the purpose of squashing the first @p axis
+ * dimensions together. For instance, given a [4x4x4x4] image,
+ * when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
* @return a status
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta = 1.0f, size_t axis = 1);
// Inherited methods overridden:
void run() override;
@@ -82,19 +88,22 @@ private:
*
* @param[in] input Original source tensor.
* @param[in] output Original destination tensor.
+ * @param[in] axis (Optional) Reduction axis. It has the purpose of squashing the first @p axis
+ * dimensions together. For instance, given a [4x4x4x4] image,
+ * when @p axis is 2, the Softmax reduction will be applied on each of the [4x4] planes of the input image.
*/
- void configure_flatten_kernel(const ICLTensor *input, const ICLTensor *output);
+ void configure_reshape_input_kernel(const ICLTensor *input, const ICLTensor *output, size_t axis);
CLMemoryGroup _memory_group;
CLLogits1DMaxShiftExpSumKernel _max_shift_exp_sum_kernel;
CLLogits1DNormKernel _norm_kernel;
- CLFlattenLayerKernel _flatten_kernel;
+ std::unique_ptr<ICLKernel> _flatten_kernel_ptr;
CLReshapeLayerKernel _reshape_kernel;
CLTensor _max;
CLTensor _sum;
CLTensor _tmp;
- CLTensor _input_flat;
- CLTensor _output_flat;
+ CLTensor _input_flattened;
+ CLTensor _output_flattened;
bool _needs_flattening;
};
}