aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/CL/kernels
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/core/CL/kernels')
-rw-r--r--arm_compute/core/CL/kernels/CLActivationLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h12
-rw-r--r--arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h14
-rw-r--r--arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLCol2ImKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h9
-rw-r--r--arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h8
-rw-r--r--arm_compute/core/CL/kernels/CLFillBorderKernel.h2
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLIm2ColKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLPermuteKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h12
-rw-r--r--arm_compute/core/CL/kernels/CLPoolingLayerKernel.h5
-rw-r--r--arm_compute/core/CL/kernels/CLReshapeLayerKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h16
-rw-r--r--arm_compute/core/CL/kernels/CLTransposeKernel.h6
-rw-r--r--arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h4
27 files changed, 78 insertions, 82 deletions
diff --git a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
index c6d8f96a87..12d00de7e8 100644
--- a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
@@ -51,7 +51,7 @@ public:
* @note If the output tensor is a nullptr, the activation function will be performed in-place
*
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result
- * of the activation function. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * of the activation function. Data types supported: QASYMM8/F16/F32.
* @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] act_info Activation layer information.
*/
@@ -59,7 +59,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayerKernel
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
- * of the activation function. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * of the activation function. Data types supported: QASYMM8/F16/F32.
* @param[in] output Destination tensor info. Data type supported: same as @p input
* @param[in] act_info Activation layer information.
*
diff --git a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
index a33cbf321f..f4275f4153 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
@@ -53,17 +53,17 @@ public:
~CLArithmeticAdditionKernel() = default;
/** Initialise the kernel's inputs, output and convertion policy.
*
- * @param[in] input1 First tensor input. Data types supported: U8/QS8/QASYMM8/QS16/S16/F16/F32.
- * @param[in] input2 Second tensor input. Data types supported: U8/QS8 (only if @p input1 is QS8), QASYMM8 (only if @p input1 is QASYMM8), QS16 (only if @p input1 is QS16), S16/F16/F32.
- * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8),QASYMM8 (only if @p input1 is QASYMM8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32.
+ * @param[in] input2 Second tensor input. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+ * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAdditionKernel
*
- * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QASYMM8/QS16/S16/F16/F32.
- * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QASYMM8 (only if @p input1 is QASYMM8), QS16 (only if @p input1 is QS16), S16/F16/F32.
- * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QASYMM8 (only if both inputs are QASYMM8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if both inputs are QASYMM8), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*
* @return a status
diff --git a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
index c5f862a61f..35b918fe4b 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,17 +55,17 @@ public:
/** Initialise the kernel's inputs, output and convertion policy.
*
- * @param[in] input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32.
- * @param[in] input2 Second tensor input. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
- * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32.
+ * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32.
+ * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticSubtractionKernel
*
- * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
- * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
- * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] input1 First tensor input info. Data types supported: U8/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8/S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*
* @return a status
diff --git a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
index 8015f08d1b..9c8d02532a 100644
--- a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
@@ -54,7 +54,7 @@ public:
*
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
* 3 lower dimensions represent a single input with dimensions [width, height, FM].
- * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * The rest are optional and used for representing batches. Data types supported: F16/F32.
* @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
* @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
@@ -69,7 +69,7 @@ public:
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
* 3 lower dimensions represent a single input with dimensions [width, height, FM].
- * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * The rest are optional and used for representing batches. Data types supported: F16/F32.
* @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
* @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
diff --git a/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h b/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h
index 684a0e5027..f7bd205ec7 100644
--- a/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLChannelShuffleLayerKernel.h
@@ -48,14 +48,14 @@ public:
~CLChannelShuffleLayerKernel() = default;
/** Configure function's inputs and outputs.
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: Same as @p input
* @param[in] num_groups Number of groups. Must be greater than 1 and the number of channels of the tensors must be a multiple of the number of groups.
*/
void configure(const ICLTensor *input, ICLTensor *output, unsigned int num_groups);
/** Static function to check if given info will lead to a valid configuration of @ref CLChannelShuffleLayerKernel
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: Same as @p input
* @param[in] num_groups Number of groups. Must be greater than 1 and the number of channels of the tensors must be a multiple of the number of groups.
*
diff --git a/arm_compute/core/CL/kernels/CLCol2ImKernel.h b/arm_compute/core/CL/kernels/CLCol2ImKernel.h
index 3779325efe..94f21b1ebc 100644
--- a/arm_compute/core/CL/kernels/CLCol2ImKernel.h
+++ b/arm_compute/core/CL/kernels/CLCol2ImKernel.h
@@ -66,7 +66,7 @@ public:
/** Set the input and output of the kernel.
*
- * @param[in] input The input tensor to convert. Data types supported: QS8/QS16/QASYMM8/F16/F32
+ * @param[in] input The input tensor to convert. Data types supported: QASYMM8/F16/F32
* @param[out] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM],
* while the rest represent batch of outputs. Data types supported: Same as @p input
* @param[in] convolved_dims Output convolved dimensions.
@@ -74,7 +74,7 @@ public:
void configure(const ICLTensor *input, ICLTensor *output, std::pair<unsigned int, unsigned int> convolved_dims);
/** Static function to check if given info will lead to a valid configuration of @ref CLCol2ImKernel
*
- * @param[in] input The input tensor to convert. Data types supported: QS8/QS16/QASYMM8/F16/F32
+ * @param[in] input The input tensor to convert. Data types supported: QASYMM8/F16/F32
* @param[in] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM],
* while the rest represent batch of outputs. Data types supported: Same as @p input
* @param[in] convolved_dims Output convolved dimensions.
diff --git a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
index fe24aa9d8c..f5e2f0de89 100644
--- a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
+++ b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
@@ -55,7 +55,7 @@ public:
~CLConvertFullyConnectedWeightsKernel() = default;
/** Set the input and output tensor.
*
- * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
@@ -63,7 +63,7 @@ public:
void configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
/** Static function to check if given info will lead to a valid configuration of @ref CLConvertFullyConnectedWeightsKernel
*
- * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
diff --git a/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
index 467bdfab3b..cbcab8f554 100644
--- a/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthConcatenateLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,7 +52,7 @@ public:
~CLDepthConcatenateLayerKernel() = default;
/** Initialise the kernel's inputs and output
*
- * @param[in] input Input tensor. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] input Input tensor. Data types supported: F16/F32.
* @param[in] depth_offset The offset on the Z axis.
* @param[in,out] output Output tensor. Data types supported: Same as @p input.
*
diff --git a/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h b/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
index 3a6310d69e..7e795c607a 100644
--- a/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDepthConvertLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,17 +43,14 @@ public:
*
* Valid conversions Input -> Output :
*
- * - QS8 -> F32
- * - QS16 -> F32
* - U8 -> U16, S16, U32, S32
* - U16 -> U8, U32, S32
* - S16 -> U8, U32, S32
* - U32 -> U8, U16, S16
* - S32 -> U8, U16, S16
- * - F32 -> QS8, QS16
*
- * @param[in] input The input tensor to convert. Data types supported: U8/QS8/U16/S16/QS16/U32/S32/F32.
- * @param[out] output The output tensor. Data types supported: U8/QS8/U16/S16/QS16/U32/S32/F32.
+ * @param[in] input The input tensor to convert. Data types supported: U8/U16/S16/U32/S32/F32.
+ * @param[out] output The output tensor. Data types supported: U8/U16/S16/U32/S32/F32.
* @param[in] policy Conversion policy
* @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
*/
diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
index eb1bf58b1b..bd37e35334 100644
--- a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h
@@ -56,7 +56,7 @@ public:
* 5x5 convolution with stride_x = 1/2, stride_y = 1/2
*
* @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/QS8/QS16/F16/F32.
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
* The 3rd dimension must be the same as the input's volume 3rd dimension.
* Data type supported:Same as @p input.
@@ -70,7 +70,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayerKernel
*
* @param[in] input The input tensor to convolve. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
* The 3rd dimension must be the same as the input's volume 3rd dimension.
* Data type supported:Same as @p input.
diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h
index 9340e9a8d8..1947a98ba3 100644
--- a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h
+++ b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h
@@ -51,11 +51,11 @@ public:
/** Set the accumulate buffer and the biases of the kernel.
*
* @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: S32/QS16/QS32/F16/F32
+ * Data type supported: S32/QS32/F16/F32
* @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
* @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
* Required parameter if output is of QASYMM8 type.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * Data types supported: QASYMM8/F16/F32
* @param[in] result_fixedpoint_multiplier (Optional)Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
* @param[in] result_shift (Optional)Integer value used to round to nearest division by a power-of-two the result after the fixed point multiplication
* @param[in] result_offset_after_shift (Optional)Offset to be applied to result before converting it back to QASYMM8
@@ -65,10 +65,10 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayerOutputStageKernel
*
* @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: QS16/QS32/F16/F32
+ * Data type supported: QS32/F16/F32
* @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
* @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
- * Data type supported: QS8/QS16/F16/F32
+ * Data type supported: F16/F32
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *bias = nullptr, const ITensorInfo *output = nullptr);
diff --git a/arm_compute/core/CL/kernels/CLFillBorderKernel.h b/arm_compute/core/CL/kernels/CLFillBorderKernel.h
index 18031c7e7e..20e872eccb 100644
--- a/arm_compute/core/CL/kernels/CLFillBorderKernel.h
+++ b/arm_compute/core/CL/kernels/CLFillBorderKernel.h
@@ -51,7 +51,7 @@ public:
/** Initialise the kernel's input, output and border mode.
*
- * @param[in,out] tensor Tensor to process Data types supported: U8/QS8/S16/QS16/S32/F16/F32.
+ * @param[in,out] tensor Tensor to process Data types supported: U8/S16/S32/F16/F32.
* @param[in] border_size Size of the border to fill in elements.
* @param[in] border_mode Border mode to use for the convolution.
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
diff --git a/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h b/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h
index c0fef45afe..7f8e766f1a 100644
--- a/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h
@@ -64,14 +64,14 @@ public:
CLGEMMInterleave4x4Kernel &operator=(CLGEMMInterleave4x4Kernel &&) = default;
/** Initialise the kernel's input and output.
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: same as @p input
* @param[in] mult_interleave4x4_height (Optional) Multiplication factor for the height of the 4x4 interleave block
*/
void configure(const ICLTensor *input, ICLTensor *output, int mult_interleave4x4_height = 1);
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMMInterleave4x4Kernel
*
- * @param[in] input Input tensor info. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input.
* @param[in] mult_interleave4x4_height Multiplication factor for the height of the 4x4 interleave block
*
diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h
index 2956f93cdc..f201af0d5e 100644
--- a/arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMMatrixAccumulateBiasesKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,13 +46,13 @@ public:
CLGEMMMatrixAccumulateBiasesKernel &operator=(CLGEMMMatrixAccumulateBiasesKernel &&) = default;
/** Set the accumulate buffer and the biases of the kernel.
*
- * @param[in, out] accum The accumulate tensor to convert. Data types supported: QS8/QS16/F16/F32
+ * @param[in, out] accum The accumulate tensor to convert. Data types supported: F16/F32
* @param[in] biases The shared biases tensor to append. It must be 1D tensor. Data types supported: Same as @p input
*/
void configure(ICLTensor *accum, const ICLTensor *biases);
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixAccumulateBiasesKernel
*
- * @param[in] accum The accumulate tensor to convert. Data types supported: QS8/QS16/F16/F32
+ * @param[in] accum The accumulate tensor to convert. Data types supported: F16/F32
* @param[in] biases The shared biases tensor to append. It must be 1D tensor. Data types supported: Same as @p input
* @param[in] gpu_target GPU target
*
diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h
index 3755d943c5..bf8e1d4b17 100644
--- a/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMMatrixAdditionKernel.h
@@ -52,14 +52,14 @@ public:
*
* @note The input and output tensors must have the same dimensions
*
- * @param[in] input Input tensor (Matrix C). Data types supported: QS8/QS16/F16/F32
+ * @param[in] input Input tensor (Matrix C). Data types supported: F16/F32
* @param[in, out] output Output tensor. If this kernel is used to finalize the GEMM result (alpha * AB + beta * C), output must contain the result obtained by @ref CLGEMMMatrixMultiplyKernel. Data type supported: same as @p input
* @param[in] beta Weight of matrix C
*/
void configure(const ICLTensor *input, ICLTensor *output, float beta);
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixAdditionKernel.
*
- * @param[in] input Input tensor (Matrix C). Data types supported: QS8/QS16/F16/F32
+ * @param[in] input Input tensor (Matrix C). Data types supported: F16/F32
* @param[in] output Output tensor. If this kernel is used to finalize the GEMM result (alpha * AB + beta * C), output must contain the result obtained by @ref CLGEMMMatrixMultiplyKernel. Data type supported: same as @p input
* @param[in] beta Weight of matrix C
*
diff --git a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
index 15bba0cd0f..1b6a0c87a9 100644
--- a/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h
@@ -53,7 +53,7 @@ public:
CLGEMMMatrixMultiplyKernel &operator=(CLGEMMMatrixMultiplyKernel &&) = default;
/** Initialise the kernel's input, output and alpha
*
- * @param[in] input0 Input tensor containing the Matrix A. Data types supported: QS8/QS16/F16/F32
+ * @param[in] input0 Input tensor containing the Matrix A. Data types supported: F16/F32
* @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0
* @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
* @param[in] alpha Weight of the matrix product
@@ -64,7 +64,7 @@ public:
void configure(const ICLTensor *input0, const ICLTensor *input1, ICLTensor *output, float alpha, bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyKernel
*
- * @param[in] input0 Input tensor containing the Matrix A. Data types supported: QS8/QS16/F16/F32
+ * @param[in] input0 Input tensor containing the Matrix A. Data types supported: F16/F32
* @param[in] input1 Input tensor containing the Matrix B. Data type supported: same as @p input0
* @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
* @param[in] alpha Weight of the matrix product
diff --git a/arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h b/arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h
index 9a3069eab6..47a4ad515b 100644
--- a/arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h
+++ b/arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h
@@ -70,14 +70,14 @@ class CLGEMMTranspose1xWKernel : public ICLSimple2DKernel
public:
/** Initialise the kernel's input and output.
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: same as @p input
* @param[in] mult_transpose1xW_width (Optional) Multiplication factor for the width of the 1xW transposed block
*/
void configure(const ICLTensor *input, ICLTensor *output, int mult_transpose1xW_width = 1);
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMMTranspose1xWKernel
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output Output tensor. Data type supported: same as @p input.
* @param[in] mult_transpose1xW_width Multiplication factor for the width of the 1xW transposed block
*
diff --git a/arm_compute/core/CL/kernels/CLIm2ColKernel.h b/arm_compute/core/CL/kernels/CLIm2ColKernel.h
index 7e119a32a8..fc930abcbe 100644
--- a/arm_compute/core/CL/kernels/CLIm2ColKernel.h
+++ b/arm_compute/core/CL/kernels/CLIm2ColKernel.h
@@ -69,7 +69,7 @@ public:
/** Set the input and output of the kernel.
*
* @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
* @param[out] output The output tensor. First 2 lower dimensions represent a transform of each 3D input,
* while every dimension above represents a batch. Data types supported: Same as @p input
* @param[in] kernel_dims The kernel dimensions (width and height).
@@ -81,7 +81,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLIm2ColKernel
*
* @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
* @param[in] output The output tensor. First 2 lower dimensions represent a transform of each 3D input,
* while every dimension above represents a batch. Data types supported: Same as @p input
* @param[in] kernel_dims The kernel dimensions (width and height).
@@ -113,7 +113,7 @@ private:
/** Chooses and configure the right kernel for the given input arguments.
*
* @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
* @param[in] output The output tensor. First 2 lower dimensions represent a transform of each 3D input,
* while every dimension above represents a batch. Data types supported: Same as @p input
* @param[in] kernel_dims The kernel dimensions (width and height).
diff --git a/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h
index ef00e59e5c..f2d37a781c 100644
--- a/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h
@@ -48,7 +48,7 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data types supported: QS8/QS16/F16/F32.
+ * and an optional 4th dimension for batch of inputs. Data types supported: F16/F32.
* @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data types supported: same as @p input.
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
@@ -56,7 +56,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLNormalizationLayerKernel
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data types supported: QS8/QS16/F16/F32.
+ * and an optional 4th dimension for batch of inputs. Data types supported: F16/F32.
* @param[in] output Destination tensor. Output will have the same number of dimensions as input. Data types supported: same as @p input.
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*
diff --git a/arm_compute/core/CL/kernels/CLPermuteKernel.h b/arm_compute/core/CL/kernels/CLPermuteKernel.h
index b01df64ebd..21da141c0d 100644
--- a/arm_compute/core/CL/kernels/CLPermuteKernel.h
+++ b/arm_compute/core/CL/kernels/CLPermuteKernel.h
@@ -49,14 +49,14 @@ public:
CLPermuteKernel &operator=(CLPermuteKernel &&) = default;
/** Set the input and output of the kernel.
*
- * @param[in] input The input tensor to permute. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input The input tensor to permute. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output The output tensor. Data types supported: Same as @p input
* @param[in] perm Permutation vector
*/
void configure(const ICLTensor *input, ICLTensor *output, const PermutationVector &perm);
/** Static function to check if given info will lead to a valid configuration of @ref CLPermuteKernel
*
- * @param[in] input First tensor input info. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32.
+ * @param[in] input First tensor input info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
* @param[in] output Output tensor info. Data types supported: same as @p input.
* @param[in] perm Permutation vector
*
diff --git a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
index fcabb614df..b835aa701b 100644
--- a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
@@ -49,11 +49,11 @@ public:
CLPixelWiseMultiplicationKernel &operator=(CLPixelWiseMultiplicationKernel &&) = default;
/** Initialise the kernel's input, output and border mode.
*
- * @param[in] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input1 An input tensor. Data types supported: U8/S16/F16/F32.
* @param[in] input2 An input tensor. Data types supported: same as @p input1.
- * @param[out] output The output tensor, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+ * @param[out] output The output tensor, Data types supported: same as @p input1. Note: U8 requires both inputs to be U8.
* @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
*/
@@ -61,11 +61,11 @@ public:
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
/** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplicationKernel
*
- * @param[in] input1 An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input1 An input tensor info. Data types supported: U8/S16/F16/F32.
* @param[in] input2 An input tensor info. Data types supported: same as @p input1.
- * @param[in] output The output tensor info, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+ * @param[in] output The output tensor info, Data types supported: same as @p input1. Note: U8 requires both inputs to be U8.
* @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
*
diff --git a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
index c13507785b..db1a756229 100644
--- a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
@@ -51,16 +51,15 @@ public:
/** Set the input and output tensors.
*
- * @note QS8 and QS16 are supported only for pool sizes 3, 5 and 7
*
- * @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*/
void configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLPoolingLayerKernel
*
- * @param[in] input Source tensor info. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
* @param[in] output Destination tensor info. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
diff --git a/arm_compute/core/CL/kernels/CLReshapeLayerKernel.h b/arm_compute/core/CL/kernels/CLReshapeLayerKernel.h
index 044b5e7006..b253d66f4f 100644
--- a/arm_compute/core/CL/kernels/CLReshapeLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLReshapeLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ public:
~CLReshapeLayerKernel() = default;
/** Set the input and output of the kernel
*
- * @param[in] input Source tensor. Data type supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/F16/F32
+ * @param[in] input Source tensor. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
* @param[out] output Destination tensor. Data type supported: Same as @p input
*/
void configure(const ICLTensor *input, ICLTensor *output);
diff --git a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h
index c562565175..b272878fe7 100644
--- a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h
@@ -38,13 +38,13 @@ class CLLogits1DMaxKernel : public ICLSimple3DKernel
public:
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
* @param[out] output Destination tensor. Data types supported: same as @p input
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxKernel
*
- * @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
* @param[in] output Destination tensor. Data types supported: same as @p input
*
* @return a status
@@ -68,7 +68,7 @@ public:
CLLogits1DShiftExpSumKernel &operator=(CLLogits1DShiftExpSumKernel &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
* @param[in] max Max values tensor. Data types supported: same as @p input
* @param[out] output Destination tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
* @param[out] sum Sum of 1D logits tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
@@ -77,7 +77,7 @@ public:
void configure(const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f);
/** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DShiftExpSumKernel
*
- * @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
* @param[in] max Max values tensor. Data types supported: same as @p input
* @param[in] output Destination tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
* @param[in] sum Sum of 1D logits tensor. Data types supported: S32 for QASYMM8 @p input, or same as @p input
@@ -116,7 +116,7 @@ public:
CLLogits1DMaxShiftExpSumKernel &operator=(CLLogits1DMaxShiftExpSumKernel &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32
+ * @param[in] input Source tensor. Data types supported: F16/F32
* @param[in,out] max Max values tensor. Data types supported: same as @p input
* @param[out] output Destination tensor. Data types supported: same as @p input
* @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p input
@@ -125,7 +125,7 @@ public:
void configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f);
/** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DMaxShiftExpSumKernel
*
- * @param[in] input Source tensor. Data types supported: QS8/QS16/F16/F32
+ * @param[in] input Source tensor. Data types supported: F16/F32
* @param[in] max Max values tensor. Data types supported: same as @p input
* @param[in] output Destination tensor. Data types supported: same as @p input
* @param[in] sum Sum of 1D logits tensor. Data types supported: same as @p input
@@ -175,7 +175,7 @@ public:
CLLogits1DNormKernel &operator=(CLLogits1DNormKernel &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: QS8/QS16/S32/F16/F32
+ * @param[in] input Source tensor. Data types supported: S32/F16/F32
* @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
* @param[out] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
* @param[in] beta (Optional) A scaling factor for the exponent. (Default = 1.0)
@@ -183,7 +183,7 @@ public:
void configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, float beta = 1.0f);
/** Static function to check if given info will lead to a valid configuration of @ref CLLogits1DNormKernel
*
- * @param[in] input Source tensor. Data types supported: QS8/QS16/S32/F16/F32
+ * @param[in] input Source tensor. Data types supported: S32/F16/F32
* @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input
* @param[in] output Destination tensor. Data types supported: QASYMM8 for S32 @p input, or same as @p input
*
diff --git a/arm_compute/core/CL/kernels/CLTransposeKernel.h b/arm_compute/core/CL/kernels/CLTransposeKernel.h
index 2e1b481d3f..09d7a8a430 100644
--- a/arm_compute/core/CL/kernels/CLTransposeKernel.h
+++ b/arm_compute/core/CL/kernels/CLTransposeKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,13 +40,13 @@ class CLTransposeKernel : public ICLSimple2DKernel
public:
/** Initialise the kernel's input and output.
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: Same as @p input
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLTransposeKernel
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output Output tensor. Data type supported: Same as @p input
*
* @return a status
diff --git a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
index 7a54284199..664fc3c216 100644
--- a/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
+++ b/arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h
@@ -69,7 +69,7 @@ public:
/** Set the input and output of the kernel.
*
* @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QS8/QS16/QASYMM8/F16/F32
+ * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QASYMM8/F16/F32
* @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
* dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input
* @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
@@ -79,7 +79,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLWeightsReshapeKernel
*
* @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
- * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QS8/QS16/QASYMM8/F16/F32
+ * and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared. Data types supported: QASYMM8/F16/F32
* @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
* dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input
* @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
diff --git a/arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h b/arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h
index 5b8a318320..d206eb0da7 100644
--- a/arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLWidthConcatenateLayerKernel.h
@@ -52,7 +52,7 @@ public:
~CLWidthConcatenateLayerKernel() = default;
/** Initialise the kernel's inputs and output
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] width_offset The offset on the X axis.
* @param[in,out] output Output tensor. Data types supported: Same as @p input.
*
@@ -60,7 +60,7 @@ public:
void configure(const ICLTensor *input, unsigned int width_offset, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthConcatenateLayerKernel
*
- * @param[in] input Input tensor info. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] width_offset The offset on the X axis.
* @param[in] output Output tensor info. Data types supported: Same as @p input.
*