aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-07-17 16:11:53 +0100
committerManuel Bottini <manuel.bottini@arm.com>2019-07-22 14:06:55 +0000
commit10c53f1ef317095ddcd9143bf759cc68ecb0e721 (patch)
tree644954b909692f1d6b4e20194e81708503a62c2b /arm_compute
parentd176d54b94c5337c97bd87671ce390804da8c10b (diff)
downloadComputeLibrary-10c53f1ef317095ddcd9143bf759cc68ecb0e721.tar.gz
COMPMID-2307: QUANTIZED_16BIT_LSTM operator for CL
Change-Id: I1b52df359f1a368d585fac43a08496544dd2f86f Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1568 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLStridedSliceKernel.h6
-rw-r--r--arm_compute/core/QuantizationInfo.h12
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h1
-rw-r--r--arm_compute/runtime/CL/functions/CLConcatenateLayer.h10
-rw-r--r--arm_compute/runtime/CL/functions/CLDequantizationLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLLSTMLayerQuantized.h203
-rw-r--r--arm_compute/runtime/CL/functions/CLStridedSlice.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h2
9 files changed, 236 insertions, 12 deletions
diff --git a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
index 6d37f6a1a5..0ee5a13638 100644
--- a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
@@ -48,13 +48,13 @@ public:
~CLDequantizationLayerKernel() = default;
/** Set the input, output, min and max.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/QSYMM8.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QSYMM8/QSYMM16.
* @param[out] output Destination tensor. Data types supported: F16/F32.
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayerKernel
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16.
* @param[in] output Output tensor info. Data types supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/core/CL/kernels/CLStridedSliceKernel.h b/arm_compute/core/CL/kernels/CLStridedSliceKernel.h
index e104dcfdd7..5b69b3fd16 100644
--- a/arm_compute/core/CL/kernels/CLStridedSliceKernel.h
+++ b/arm_compute/core/CL/kernels/CLStridedSliceKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,7 +54,7 @@ public:
*
* @note Supported tensor rank: up to 4
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32
* @param[out] output Destination tensor. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
@@ -72,7 +72,7 @@ public:
*
* @note Supported tensor rank: up to 4
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32
* @param[in] output Destination tensor. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h
index 587a380d63..79afca0714 100644
--- a/arm_compute/core/QuantizationInfo.h
+++ b/arm_compute/core/QuantizationInfo.h
@@ -300,6 +300,18 @@ inline float dequantize(int8_t value, float scale)
return value * scale;
}
+/** Dequantize a value given a symmetric quantization scheme
+ *
+ * @param[in] value Value to dequantize
+ * @param[in] scale Scale to use for dequantization
+ *
+ * @return Dequantized value
+ */
+inline float dequantize(int16_t value, float scale)
+{
+ return value * scale;
+}
+
/** Quantize a value given a 16-bit symmetric quantization scheme
*
* @param[in] value Value to quantize
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index 8c154f2059..922fb6acd9 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -94,6 +94,7 @@
#include "arm_compute/runtime/CL/functions/CLIntegralImage.h"
#include "arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h"
#include "arm_compute/runtime/CL/functions/CLLSTMLayer.h"
+#include "arm_compute/runtime/CL/functions/CLLSTMLayerQuantized.h"
#include "arm_compute/runtime/CL/functions/CLLaplacianPyramid.h"
#include "arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h"
#include "arm_compute/runtime/CL/functions/CLLocallyConnectedLayer.h"
diff --git a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
index b69930c7d3..fb9724d167 100644
--- a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
@@ -60,7 +60,8 @@ public:
* @param[out] output Output tensor. Data types supported: Same as @p input.
* @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3.
*/
- void configure(const std::vector<ICLTensor *> &inputs_vector, ICLTensor *output, size_t axis);
+ void configure(std::vector<ICLTensor *> &inputs_vector, ICLTensor *output, size_t axis);
+ void configure(std::vector<const ICLTensor *> &inputs_vector, ICLTensor *output, size_t axis);
/** Static function to check if given info will lead to a valid configuration of @ref CLConcatenateLayer
*
* @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
@@ -73,11 +74,18 @@ public:
* @return a status
*/
static Status validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
+ static Status validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
// Inherited methods overridden:
void run() override;
private:
+ template <typename TensorType>
+ void configure_internal(std::vector<TensorType *> &&inputs_vector, ICLTensor *output, size_t axis);
+
+ template <typename TensorInfoType>
+ static Status validate_internal(const std::vector<TensorInfoType *> &inputs_vector, const ITensorInfo *output, size_t axis);
+
std::vector<std::unique_ptr<ICLKernel>> _concat_kernels;
unsigned int _num_inputs;
unsigned int _axis;
diff --git a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
index 2f7af01a84..ade589d79e 100644
--- a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
@@ -40,13 +40,13 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches.
- * Data types supported: QASYMM8/QSYMM8.
+ * Data types supported: QASYMM8/QSYMM8/QSYMM16.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayer
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8/QSYMM16.
* @param[in] output Output tensor info. Data type supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/runtime/CL/functions/CLLSTMLayerQuantized.h b/arm_compute/runtime/CL/functions/CLLSTMLayerQuantized.h
new file mode 100644
index 0000000000..e2d164c395
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLLSTMLayerQuantized.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLLSTMLAYERQUANTIZED_H__
+#define __ARM_COMPUTE_CLLSTMLAYERQUANTIZED_H__
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
+#include "arm_compute/runtime/CL/functions/CLConcatenateLayer.h"
+#include "arm_compute/runtime/CL/functions/CLDequantizationLayer.h"
+#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h"
+#include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h"
+#include "arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h"
+#include "arm_compute/runtime/CL/functions/CLQuantizationLayer.h"
+#include "arm_compute/runtime/CL/functions/CLSlice.h"
+#include "arm_compute/runtime/CL/functions/CLTranspose.h"
+
+#include "arm_compute/runtime/common/LSTMParams.h"
+
+namespace arm_compute
+{
+// Forward declarations
+class ICLTensor;
+
+/** Basic function to run @ref CLLSTMLayerQuantized
+ *
+ * This function calls the following CL functions/kernels:
+ *
+ * -# @ref CLGEMMLowpMatrixMultiplyCore Quantized matrix multiplication core. Accumulators are 32-bit integers
+ * -# @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint Convert 32-bit integers into QSYMM16
+ * -# @ref CLTranspose Matrix transpose
+ * -# @ref CLConcatenateLayer Tensor concatenation
+ * -# @ref CLActivationLayer Activation functions (tanh and logistic)
+ * -# @ref CLArithmeticAddition Elementwise addition
+ * -# @ref CLPixelWiseMultiplication Elementwise multiplication
+ * -# @ref CLSlice Tensor slicing
+ * -# @ref CLDequantizationLayer Dequantize into float
+ * -# @ref CLQuantizationLayer Quantize from float
+ * */
+class CLLSTMLayerQuantized : public IFunction
+{
+public:
+ /** Default constructor */
+ CLLSTMLayerQuantized(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLLSTMLayerQuantized(const CLLSTMLayerQuantized &) = delete;
+ /** Default move constructor */
+ CLLSTMLayerQuantized(CLLSTMLayerQuantized &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLLSTMLayerQuantized &operator=(const CLLSTMLayerQuantized &) = delete;
+ /** Default move assignment operator */
+ CLLSTMLayerQuantized &operator=(CLLSTMLayerQuantized &&) = default;
+ /** Initialize function's tensors.
+ *
+ * @param[in] input Source tensor. Input is a 2D tensor with dimensions [input_size, batch_size]. Data types supported: QASYMM8.
+ * @param[in] input_to_input_weights 2D weights tensor with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_forget_weights 2D weights tensor with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_cell_weights 2D weights tensor with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_output_weights 2D weights tensor with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_input_weights 2D weights tensor with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_forget_weights 2D weights tensor with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_cell_weights 2D weights tensor with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_output_weights 2D weights tensor with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_gate_bias 1D weights tensor with dimensions [output_size]. Data type supported: S32.
+ * @param[in] forget_gate_bias 1D weights tensor with dimensions [output_size]. Data type supported: S32.
+ * @param[in] cell_bias 1D weights tensor with dimensions [output_size]. Data type supported: S32.
+ * @param[in] output_gate_bias 1D weights tensor with dimensions [output_size]. Data type supported: S32.
+ * @param[in] cell_state_in 2D tensor with dimensions [output_size, batch_size]. Data type supported: QSYMM16.
+ * @param[in] output_state_in 2D tensor with dimensions [output_size, batch_size]. Data type supported: Same as @p input.
+ * @param[out] cell_state_out Destination tensor. Output is a 2D tensor with dimensions [output_size, batch_size]. Data type supported: QSYMM16.
+ * @param[out] output_state_out Destination tensor. Output is a 2D tensor with dimensions [output_size, batch_size].Data types supported: Same as @p input.
+ */
+ void configure(const ICLTensor *input,
+ const ICLTensor *input_to_input_weights, const ICLTensor *input_to_forget_weights, const ICLTensor *input_to_cell_weights, const ICLTensor *input_to_output_weights,
+ const ICLTensor *recurrent_to_input_weights, const ICLTensor *recurrent_to_forget_weights, const ICLTensor *recurrent_to_cell_weights, const ICLTensor *recurrent_to_output_weights,
+ const ICLTensor *input_gate_bias, const ICLTensor *forget_gate_bias, const ICLTensor *cell_bias, const ICLTensor *output_gate_bias,
+ ICLTensor *cell_state_in, const ICLTensor *output_state_in,
+ ICLTensor *cell_state_out, ICLTensor *output_state_out);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref CLLSTMLayerQuantized
+ *
+ * @param[in] input Source tensor info. Input is a 2D tensor info with dimensions [input_size, batch_size]. Data types supported: QASYMM8.
+ * @param[in] input_to_input_weights 2D weights tensor info with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_forget_weights 2D weights tensor info with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_cell_weights 2D weights tensor info with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_to_output_weights 2D weights tensor info with dimensions [input_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_input_weights 2D weights tensor info with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_forget_weights 2D weights tensor info with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_cell_weights 2D weights tensor info with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] recurrent_to_output_weights 2D weights tensor info with dimensions [output_size, output_size]. Data type supported: Same as @p input.
+ * @param[in] input_gate_bias 1D weights tensor info with dimensions [output_size]. Data type supported: S32.
+ * @param[in] forget_gate_bias 1D weights tensor info with dimensions [output_size]. Data type supported: S32.
+ * @param[in] cell_bias 1D weights tensor info with dimensions [output_size]. Data type supported: S32.
+ * @param[in] output_gate_bias 1D weights tensor info with dimensions [output_size]. Data type supported: S32.
+ * @param[in] cell_state_in 2D tensor info with dimensions [output_size, batch_size]. Data type supported: QSYMM16.
+ * @param[in] output_state_in 2D tensor info with dimensions [output_size, batch_size]. Data type supported: Same as @p input.
+ * @param[out] cell_state_out Destination tensor info. Output is a 2D tensor info with dimensions [output_size, batch_size]. Data type supported: QSYMM16.
+ * @param[out] output_state_out Destination tensor info. Output is a 2D tensor info with dimensions [output_size, batch_size].Data types supported: Same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input,
+ const ITensorInfo *input_to_input_weights, const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights,
+ const ITensorInfo *recurrent_to_input_weights, const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights,
+ const ITensorInfo *input_gate_bias, const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias,
+ const ITensorInfo *cell_state_in, const ITensorInfo *output_state_in,
+ const ITensorInfo *cell_state_out, const ITensorInfo *output_state_out);
+
+ // Inherited methods overridden:
+ void run() override;
+ void prepare() override;
+
+private:
+ CLMemoryGroup _memory_group;
+
+ // Functions used
+ CLGEMMLowpMatrixMultiplyCore _gemmlowp;
+ CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint _output_stage;
+ CLTranspose _transpose_weights;
+ CLConcatenateLayer _concat_input_weights;
+ CLConcatenateLayer _concat_recurrent_weights;
+ CLConcatenateLayer _concat_weights;
+ CLConcatenateLayer _concat_inputs;
+ CLConcatenateLayer _concat_bias;
+ CLActivationLayer _sigmoid_forget_gate;
+ CLActivationLayer _sigmoid_input_gate;
+ CLActivationLayer _sigmoid_output_gate;
+ CLActivationLayer _tanh_modulation_gate;
+ CLActivationLayer _tanh_output_state;
+ CLArithmeticAddition _add_cell_state_tmps;
+ CLArithmeticAddition _add2;
+ CLPixelWiseMultiplication _mul_forget_gate_cell_state;
+ CLPixelWiseMultiplication _mul_input_gate_input_mod_gate;
+ CLPixelWiseMultiplication _mul_output_state_tmp_output_gate;
+ CLSlice _slice_input_tensor;
+ CLSlice _slice_forget_tensor;
+ CLSlice _slice_cell_tensor;
+ CLSlice _slice_output_tensor;
+ CLDequantizationLayer _dequantize;
+ CLQuantizationLayer _quantize;
+
+ // Tensor pointers
+ const ICLTensor *_input_to_input_weights;
+ const ICLTensor *_input_to_forget_weights;
+ const ICLTensor *_input_to_cell_weights;
+ const ICLTensor *_input_to_output_weights;
+ const ICLTensor *_recurrent_to_input_weights;
+ const ICLTensor *_recurrent_to_forget_weights;
+ const ICLTensor *_recurrent_to_cell_weights;
+ const ICLTensor *_recurrent_to_output_weights;
+ const ICLTensor *_input_gate_bias;
+ const ICLTensor *_forget_gate_bias;
+ const ICLTensor *_cell_bias;
+ const ICLTensor *_output_gate_bias;
+
+ // Temporary tensors
+ CLTensor _recurrent_weights;
+ CLTensor _input_weights;
+ CLTensor _weights;
+ CLTensor _input;
+ CLTensor _weights_transposed;
+ CLTensor _output_highp;
+ CLTensor _output_lowp;
+ CLTensor _bias;
+ CLTensor _forget_gate_input;
+ CLTensor _input_gate_input;
+ CLTensor _output_gate_input;
+ CLTensor _input_modulation_gate_input;
+ CLTensor _forget_gate_output;
+ CLTensor _input_gate_output;
+ CLTensor _output_gate_output;
+ CLTensor _input_modulation_gate_output;
+ CLTensor _cell_state_tmp1;
+ CLTensor _cell_state_tmp2;
+ CLTensor _output_state_tmp;
+ CLTensor _output_state_out_symm;
+ CLTensor _output_state_out_f32;
+
+ bool _is_prepared;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLLSTMLAYERQUANTIZED_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLStridedSlice.h b/arm_compute/runtime/CL/functions/CLStridedSlice.h
index 4a336f6fdc..bb97b17fea 100644
--- a/arm_compute/runtime/CL/functions/CLStridedSlice.h
+++ b/arm_compute/runtime/CL/functions/CLStridedSlice.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,7 +39,7 @@ public:
*
* @note Supported tensor rank: up to 4
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32
* @param[out] output Destination tensor. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
@@ -57,7 +57,7 @@ public:
*
* @note Supported tensor rank: up to 4
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/QSYMM16/U32/S32/F16/F32
* @param[in] output Destination tensor. Data type supported: Same as @p input
* @param[in] starts The starts of the dimensions of the input tensor to be sliced. The length must be of rank(input).
* @param[in] ends The ends of the dimensions of the input tensor to be sliced. The length must be of rank(input).
diff --git a/arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h b/arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h
index b45d714990..7f02988c19 100644
--- a/arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h
+++ b/arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h
@@ -53,7 +53,7 @@ class ITensor;
* -# @ref NEGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint Convert 32-bit integers into QSYMM16
* -# @ref NETranspose Matrix transpose
* -# @ref NEConcatenateLayer Tensor concatenation
- * -# @ref NEActivationLayer Activation functions (tanh and logistig)
+ * -# @ref NEActivationLayer Activation functions (tanh and logistic)
* -# @ref NEArithmeticAddition Elementwise addition
* -# @ref NEPixelWiseMultiplication Elementwise multiplication
* -# @ref NESlice Tensor slicing