aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/runtime')
-rw-r--r--arm_compute/runtime/CL/functions/CLActivationLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLArithmeticAddition.h14
-rw-r--r--arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h16
-rw-r--r--arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLChannelShuffleLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLConvolutionLayer.h6
-rw-r--r--arm_compute/runtime/CL/functions/CLCopy.h2
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLDepthConvertLayer.h10
-rw-r--r--arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLFillBorder.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLFlattenLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h8
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMM.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h12
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLNormalizationLayer.h6
-rw-r--r--arm_compute/runtime/CL/functions/CLPermute.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h12
-rw-r--r--arm_compute/runtime/CL/functions/CLPoolingLayer.h6
-rw-r--r--arm_compute/runtime/CL/functions/CLReshapeLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLSoftmaxLayer.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLTranspose.h6
-rw-r--r--arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h4
-rw-r--r--arm_compute/runtime/CPP/functions/CPPPermute.h6
-rw-r--r--arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEActivationLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEArithmeticAddition.h12
-rw-r--r--arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h14
-rw-r--r--arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NECol2Im.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEConvolutionLayer.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h14
-rw-r--r--arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h13
-rw-r--r--arm_compute/runtime/NEON/functions/NEFillBorder.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEFlattenLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h8
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMM.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEIm2Col.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NENormalizationLayer.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEPermute.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h12
-rw-r--r--arm_compute/runtime/NEON/functions/NEPoolingLayer.h8
-rw-r--r--arm_compute/runtime/NEON/functions/NEReshapeLayer.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NESoftmaxLayer.h8
-rw-r--r--arm_compute/runtime/NEON/functions/NETranspose.h6
53 files changed, 161 insertions, 174 deletions
diff --git a/arm_compute/runtime/CL/functions/CLActivationLayer.h b/arm_compute/runtime/CL/functions/CLActivationLayer.h
index 7100eb4d84..e98fa4bf48 100644
--- a/arm_compute/runtime/CL/functions/CLActivationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLActivationLayer.h
@@ -44,7 +44,7 @@ public:
* @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place
*
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result
- * of the activation function. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * of the activation function. Data types supported: QASYMM8/F16/F32.
* @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] act_info Activation layer parameters.
*/
@@ -52,7 +52,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayer
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
- * of the activation function. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * of the activation function. Data types supported: QASYMM8/F16/F32.
* @param[in] output Destination tensor info. Data type supported: same as @p input
* @param[in] act_info Activation layer information.
*
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h b/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
index 9eea846d97..5aba60ad01 100644
--- a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
+++ b/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
@@ -33,7 +33,7 @@ class ICLTensor;
/** Basic function to run @ref CLArithmeticAdditionKernel
*
- * @note The tensor data type for the inputs must be U8/QS8/QS16/S16/F16/F32.
+ * @note The tensor data type for the inputs must be U8/S16/F16/F32.
* @note The function performs an arithmetic addition between two tensors.
*/
class CLArithmeticAddition : public ICLSimpleFunction
@@ -41,19 +41,19 @@ class CLArithmeticAddition : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs, output and convertion policy.
*
- * @param[in, out] input1 First tensor input. Data types supported: U8/QS8/QASYMM8/QS16/S16/F16/F32.
+ * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32.
* The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
- * @param[in, out] input2 Second tensor input. Data types supported: U8, QS8 (only if @p input1 is QS8), QASYMM8 (only if @p input1 is QASYMM8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+ * @param[in, out] input2 Second tensor input. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
* The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
- * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QASYMM8 (only if both inputs are QASYMM8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if both inputs are QASYMM8), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*/
void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAddition
*
- * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QASYMM8/QS16/S16/F16/F32.
- * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QASYMM8 (only if @p input1 is QASYMM8), QS16 (only if @p input1 is QS16), S16/F16/F32.
- * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QASYMM8 ( only if both inputs are QASYMM8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8 ( only if both inputs are QASYMM8), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*
* @return a status
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h b/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
index 0d3f5bce6a..b9690806d7 100644
--- a/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
+++ b/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,7 +34,7 @@ class ICLTensor;
/** Basic function to run @ref CLArithmeticSubtractionKernel
*
- * @note The tensor data type for the inputs must be U8/QS8/QS16/S16/F16/F32.
+ * @note The tensor data type for the inputs must be U8/S16/F16/F32.
* @note The function performs an arithmetic subtraction between two tensors.
*/
class CLArithmeticSubtraction : public ICLSimpleFunction
@@ -42,17 +42,17 @@ class CLArithmeticSubtraction : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs, output and convertion policy.
*
- * @param[in] input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32.
- * @param[in] input2 Second tensor input. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
- * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32.
+ * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32.
+ * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticSubtraction
*
- * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
- * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
- * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+ * @param[in] input1 First tensor input info. Data types supported: U8/S16/F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: U8/S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*
* @return a status
diff --git a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
index 9386a86ae5..215046ae7e 100644
--- a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
@@ -50,7 +50,7 @@ public:
*
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
* 3 lower dimensions represent a single input with dimensions [width, height, FM].
- * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * The rest are optional and used for representing batches. Data types supported: F16/F32.
* @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
* @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
@@ -65,7 +65,7 @@ public:
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
* 3 lower dimensions represent a single input with dimensions [width, height, FM].
- * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * The rest are optional and used for representing batches. Data types supported: F16/F32.
* @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
* @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
diff --git a/arm_compute/runtime/CL/functions/CLChannelShuffleLayer.h b/arm_compute/runtime/CL/functions/CLChannelShuffleLayer.h
index b632e3b2e3..0bcbfd6276 100644
--- a/arm_compute/runtime/CL/functions/CLChannelShuffleLayer.h
+++ b/arm_compute/runtime/CL/functions/CLChannelShuffleLayer.h
@@ -41,14 +41,14 @@ class CLChannelShuffleLayer : public ICLSimpleFunction
public:
/** Initialize the function
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: Same as @p input
* @param[in] num_groups Number of groups. Must be greater than 1 and the number of channels of the tensors must be a multiple of the number of groups.
*/
void configure(const ICLTensor *input, ICLTensor *output, unsigned int num_groups);
/** Static function to check if given info will lead to a valid configuration of @ref CLChannelShuffleLayerKernel
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: Same as @p input
* @param[in] num_groups Number of groups. Must be greater than 1 and the number of channels of the tensors must be a multiple of the number of groups.
*
diff --git a/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h b/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
index d8eef34c62..ae0c9d6459 100644
--- a/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
+++ b/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
@@ -37,7 +37,7 @@ class CLConvertFullyConnectedWeights : public ICLSimpleFunction
public:
/** Initialize the function.
*
- * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
@@ -45,7 +45,7 @@ public:
void configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
/** Static function to check if given info will lead to a valid configuration of @ref CLConvertFullyConnectedWeights
*
- * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
index 82bb71ca6c..c9a74f2a4c 100644
--- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
@@ -49,7 +49,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -68,7 +68,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported:Same as @p input.
* @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
@@ -88,7 +88,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
* @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
diff --git a/arm_compute/runtime/CL/functions/CLCopy.h b/arm_compute/runtime/CL/functions/CLCopy.h
index d76f0702af..170dc9a613 100644
--- a/arm_compute/runtime/CL/functions/CLCopy.h
+++ b/arm_compute/runtime/CL/functions/CLCopy.h
@@ -38,7 +38,7 @@ class CLCopy : public ICLSimpleFunction
public:
/** Initialise the function's source and destination.
*
- * @param[in] input Source tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32.
+ * @param[in] input Source tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
* @param[out] output Output tensor. Data types supported: Same as @p input.
*
*/
diff --git a/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
index 00b3b66c97..d505814e73 100644
--- a/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,7 +52,7 @@ public:
CLDepthConcatenateLayer();
/** Initialise the kernel's inputs vector and output.
*
- * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QS8/QS16/F16/F32.
+ * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: F16/F32.
* @param[out] output Output tensor. Data types supported: Same as @p input.
*/
void configure(std::vector<ICLTensor *> inputs_vector, ICLTensor *output);
diff --git a/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h b/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h
index c84dc15508..00fa0a686d 100644
--- a/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDepthConvertLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,20 +43,16 @@ public:
*
* Valid conversions Input -> Output :
*
- * - QS8 -> F32
- * - QS16 -> F32
* - U8 -> U16, S16, U32, S32
* - U16 -> U8, U32, S32
* - S16 -> U8, U32, S32
* - U32 -> U8, U16, S16
* - S32 -> U8, U16, S16
- * - F32 -> QS8, QS16
*
- * @param[in] input The input tensor to convert. Data types supported: QS8/U8/U16/S16/Q16/U32/S32/F32.
- * @param[out] output The output tensor. Data types supported: QS8/U8/U16/S16/QS16/U32/S32/F32.
+ * @param[in] input The input tensor to convert. Data types supported: U8/U16/S16/Q16/U32/S32/F32.
+ * @param[out] output The output tensor. Data types supported: U8/U16/S16/U32/S32/F32.
* @param[in] policy Conversion policy.
* @param[in] shift Value for down/up conversions. Must be 0 <= shift < 8.
- * It is not used on fixed point conversion.
*/
void configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
};
diff --git a/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
index 4363224a08..31683c51f9 100644
--- a/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h
@@ -47,7 +47,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QASYMM8/QS8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -61,7 +61,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported:Same as @p input.
* @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
diff --git a/arm_compute/runtime/CL/functions/CLFillBorder.h b/arm_compute/runtime/CL/functions/CLFillBorder.h
index 80a8cf20e3..f4515b6700 100644
--- a/arm_compute/runtime/CL/functions/CLFillBorder.h
+++ b/arm_compute/runtime/CL/functions/CLFillBorder.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,7 +38,7 @@ class CLFillBorder : public ICLSimpleFunction
public:
/** Initialize the function
*
- * @param[in,out] tensor Source tensor. Data types supported: QS8/U8/S16/QS16
+ * @param[in,out] tensor Source tensor. Data types supported: U8/S16
* @param[in] border_width The border width
* @param[in] border_mode Strategy to use for borders.
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
diff --git a/arm_compute/runtime/CL/functions/CLFlattenLayer.h b/arm_compute/runtime/CL/functions/CLFlattenLayer.h
index e19b0e465f..88df4a7f96 100644
--- a/arm_compute/runtime/CL/functions/CLFlattenLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFlattenLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,7 +41,7 @@ class CLFlattenLayer : public ICLSimpleFunction
public:
/** Initialise the kernel's input and output.
*
- * @param[in] input First input tensor to flatten with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: QS8/QS16/F16/F32
+ * @param[in] input First input tensor to flatten with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: F16/F32
* @param[out] output Output tensor with shape [w*h*d, input_batches] where:
* w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input
*/
diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
index 127d8acf10..3357868968 100644
--- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
+++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h
@@ -48,13 +48,13 @@ class CLFullyConnectedLayerReshapeWeights : public ICLSimpleFunction
public:
/** Set the input and output tensors.
*
- * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QASYMM8/F16/F32.
* @param[out] output Destination tensor which stores the transposed input tensor. Data type supported: Same as @p input.
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayerReshapeWeights
*
- * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QASYMM8/F16/F32.
* @param[in] output Destination tensor which stores the transposed input tensor. Data type supported: Same as @p input.
*
* @return a status
@@ -86,7 +86,7 @@ public:
CLFullyConnectedLayer &operator=(CLFullyConnectedLayer &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
* @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
* @param[out] output Destination tensor. Data type supported: Same as @p input.
@@ -99,7 +99,7 @@ public:
bool retain_internal_weights = false);
/** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
*
- * @param[in] input Source tensor. Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input
* @param[in] biases Bias tensor. It can be nullptr. Data type supported:Same as @p input.
* @param[in] output Destination tensor. Data type supported: Same as @p input.
diff --git a/arm_compute/runtime/CL/functions/CLGEMM.h b/arm_compute/runtime/CL/functions/CLGEMM.h
index 41c7467a3f..c4513f29d9 100644
--- a/arm_compute/runtime/CL/functions/CLGEMM.h
+++ b/arm_compute/runtime/CL/functions/CLGEMM.h
@@ -69,7 +69,7 @@ public:
*
* @note Whilst the first input tensor can be a vector, the second input tensor must be at least a matrix
*
- * @param[in] a First input tensor (Matrix or Vector A). Data types supported: QS8/QS16/F16/F32
+ * @param[in] a First input tensor (Matrix or Vector A). Data types supported: F16/F32
* @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a.
* @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a.
* @param[out] output Output tensor. Data type supported: same as @p a
@@ -82,7 +82,7 @@ public:
void configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMM.
*
- * @param[in] a First input tensor info (Matrix or Vector A). Data types supported: QS8/QS16/F16/F32
+ * @param[in] a First input tensor info (Matrix or Vector A). Data types supported: F16/F32
* @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a.
* @param[in] c Third input tensor info (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a.
* @param[out] output Output tensor info. Data type supported: same as @p a
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index 2c1f7a9d5e..09daa5f568 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -59,7 +59,7 @@ public:
/** Set the input and output tensors.
*
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data type supported: QASYMM8/F16/F32.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
* @param[out] output Destination tensor. Data types supported: Same as @p weights.
*/
@@ -67,7 +67,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLConvolutionLayerReshapeWeights
*
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data type supported: QASYMM8/F16/F32.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
* @param[in] output Destination tensor. Data types supported: Same as @p weights.
*
@@ -116,7 +116,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -134,7 +134,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -158,7 +158,7 @@ public:
private:
/** Configures the appropriate matrix multiply routine
*
- * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Data type supported: Same as @p input.
* @param[in, out] output Output tensor. Data types supported: Same as @p input,
* except for input of QASYMM8 type where output should be of S32 type.
@@ -167,7 +167,7 @@ private:
void configure_mm(const ICLTensor *input, const ICLTensor *weights, ICLTensor *output, int gemm_3d_depth = 1);
/** Static function to check if given info will lead to a valid configuration of @ref CLGEMMConvolutionLayer matrix multiply routines
*
- * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Data type supported: Same as @p input.
* @param[in] output Output tensor. Data types supported: Same as @p input,
* except for input of QASYMM8 type where output should be of S32 type.
diff --git a/arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h b/arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h
index ae05b0fd9c..48bb6ccb22 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMInterleave4x4.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,7 @@ class CLGEMMInterleave4x4 : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs, output
*
- * @param[in] input First input tensor. Data types supported: U8/S8/QS8/QASYMM8/QS16/U16/S16/F16/U32/S32/F32
+ * @param[in] input First input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: same as @p input
*/
void configure(const ICLTensor *input, ICLTensor *output);
diff --git a/arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h b/arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h
index ae56548c27..13e643a77d 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,7 +38,7 @@ class CLGEMMTranspose1xW : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs, output
*
- * @param[in] input First input tensor. Data type supported: U8/S8/QS8/QASYMM8/QS16/U16/S16/F16/U32/S32/F32
+ * @param[in] input First input tensor. Data type supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: same as @p input
*/
void configure(const ICLTensor *input, ICLTensor *output);
diff --git a/arm_compute/runtime/CL/functions/CLNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLNormalizationLayer.h
index 93925778d7..89e20d20f6 100644
--- a/arm_compute/runtime/CL/functions/CLNormalizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLNormalizationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,7 +51,7 @@ public:
/** Set the input and output tensors.
*
* @param[in, out] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data types supported: QS8/QS16/F16/F32 (Written to by the border handler)
+ * and an optional 4th dimension for batch of inputs. Data types supported: F16/F32 (Written to by the border handler)
* @param[out] output Destination tensor. Dimensions, data type and number of channels must match the input ones.
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
@@ -59,7 +59,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLNormalizationLayer
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data types supported: QS8/QS16/F16/F32
+ * and an optional 4th dimension for batch of inputs. Data types supported: F16/F32
* @param[in] output Destination tensor. Dimensions, data type and number of channels must match the input ones.
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*
diff --git a/arm_compute/runtime/CL/functions/CLPermute.h b/arm_compute/runtime/CL/functions/CLPermute.h
index a41567570a..638207fc48 100644
--- a/arm_compute/runtime/CL/functions/CLPermute.h
+++ b/arm_compute/runtime/CL/functions/CLPermute.h
@@ -39,14 +39,14 @@ class CLPermute : public ICLSimpleFunction
public:
/** Set the input and output tensors.
*
- * @param[in] input The input tensor to permute. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input The input tensor to permute. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output The output tensor. Data types supported: Same as @p input
* @param[in] perm Permutation vector
*/
void configure(const ICLTensor *input, ICLTensor *output, const PermutationVector &perm);
/** Static function to check if given info will lead to a valid configuration of @ref CLPermute.
*
- * @param[in] input First tensor input info. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32.
+ * @param[in] input First tensor input info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
* @param[in] output Output tensor info. Data types supported: same as @p input.
* @param[in] perm Permutation vector
*
diff --git a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
index 75b67cd17c..a59fb4aba8 100644
--- a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
+++ b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
@@ -37,13 +37,13 @@ class CLPixelWiseMultiplication : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs, output and convertion policy.
*
- * @param[in, out] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in, out] input1 An input tensor. Data types supported: U8/S16/F16/F32.
* The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
* @param[in, out] input2 An input tensor. Data types supported: same as @p input1.
* The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
- * @param[out] output The output tensor, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+ * @param[out] output The output tensor, Data types supported: same as @p input1. Note: U8 requires both inputs to be U8.
* @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
*/
@@ -51,11 +51,11 @@ public:
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
/** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplication
*
- * @param[in] input1 An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input1 An input tensor info. Data types supported: U8/S16/F16/F32.
* @param[in] input2 An input tensor info. Data types supported: same as @p input1.
- * @param[in] output The output tensor info, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+ * @param[in] output The output tensor info, Data types supported: same as @p input1. Note: U8 requires both inputs to be U8.
* @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
*
diff --git a/arm_compute/runtime/CL/functions/CLPoolingLayer.h b/arm_compute/runtime/CL/functions/CLPoolingLayer.h
index a8bdabad98..f7571c1d2d 100644
--- a/arm_compute/runtime/CL/functions/CLPoolingLayer.h
+++ b/arm_compute/runtime/CL/functions/CLPoolingLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,14 +43,14 @@ class CLPoolingLayer : public ICLSimpleFunction
public:
/** Set the input and output tensors.
*
- * @param[in,out] input Source tensor. (Written to only when padding != 0) Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in,out] input Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/F16/F32.
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*/
void configure(ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLPoolingLayer
*
- * @param[in] input Source tensor info. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
* @param[in] output Destination tensor info. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
diff --git a/arm_compute/runtime/CL/functions/CLReshapeLayer.h b/arm_compute/runtime/CL/functions/CLReshapeLayer.h
index 411e751290..cf5f7e5e15 100644
--- a/arm_compute/runtime/CL/functions/CLReshapeLayer.h
+++ b/arm_compute/runtime/CL/functions/CLReshapeLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,7 +36,7 @@ class CLReshapeLayer : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs and outputs
*
- * @param[in] input First tensor input. Data type supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/F16/F32
+ * @param[in] input First tensor input. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
* @param[out] output Output tensor. Data type supported: Same as @p input
*/
void configure(const ICLTensor *input, ICLTensor *output);
diff --git a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
index 8ab6c160d1..34349ed52b 100644
--- a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
+++ b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
@@ -53,14 +53,14 @@ public:
CLSoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
* @param[out] output Destination tensor. Data types supported: same as @p input
* @param[in] beta (Optional) A scaling factor for the exponent. Defaults to 1.f
*/
void configure(const ICLTensor *input, ICLTensor *output, float beta = 1.0f);
/** Static function to check if given info will lead to a valid configuration of @ref CLSoftmaxLayer
*
- * @param[in] input Source tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32
* @param[in] output Destination tensor. Data types supported: same as @p input
*
* @return a status
diff --git a/arm_compute/runtime/CL/functions/CLTranspose.h b/arm_compute/runtime/CL/functions/CLTranspose.h
index 89a2022e76..1e9bb95114 100644
--- a/arm_compute/runtime/CL/functions/CLTranspose.h
+++ b/arm_compute/runtime/CL/functions/CLTranspose.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,13 +40,13 @@ class CLTranspose : public ICLSimpleFunction
public:
/** Initialise the kernel's inputs and output
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: Same as @p input
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLTranspose
*
- * @param[in] input The input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] input The input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output The output tensor. Data types supported: Same as @p input
*
* @return a status
diff --git a/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h
index bcda05274b..289191e030 100644
--- a/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h
+++ b/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h
@@ -50,13 +50,13 @@ public:
CLWidthConcatenateLayer();
/** Initialise the kernel's inputs vector and output.
*
- * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/F16/F32.
* @param[out] output Output tensor. Data types supported: Same as @p input.
*/
void configure(std::vector<ICLTensor *> inputs_vector, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDepthConcatenateLayerKernel
*
- * @param[in] inputs_vector The vectors containing all the tensors info to concatenate. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] inputs_vector The vectors containing all the tensors info to concatenate. Data types supported: QASYMM8/F16/F32.
* @param[in] output Output tensor info. Data types supported: Same as @p input.
*
* @return a status
diff --git a/arm_compute/runtime/CPP/functions/CPPPermute.h b/arm_compute/runtime/CPP/functions/CPPPermute.h
index 0094576da6..40d6830425 100644
--- a/arm_compute/runtime/CPP/functions/CPPPermute.h
+++ b/arm_compute/runtime/CPP/functions/CPPPermute.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,14 +38,14 @@ class CPPPermute : public ICPPSimpleFunction
public:
/** Configure the permute CPP kernel
*
- * @param[in] input The input tensor to permute. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input The input tensor to permute. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output The output tensor. Data types supported: Same as @p input
* @param[in] perm Permutation vector
*/
void configure(const ITensor *input, ITensor *output, const PermutationVector &perm);
/** Static function to check if given info will lead to a valid configuration of @ref CPPPermute
*
- * @param[in] input The input tensor to permute. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input The input tensor to permute. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output The output tensor. Data types supported: Same as @p input
* @param[in] perm Permutation vector
*
diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h
index 45a883948c..421150e18e 100644
--- a/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h
+++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.h
@@ -108,7 +108,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -139,7 +139,7 @@ private:
void configure_mm(const IGCTensor *input, const IGCTensor *weights, IGCTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref GCGEMMConvolutionLayer matrix multiply routines
*
- * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Data type supported: Same as @p input.
* @param[in] output Output tensor. Data types supported: Same as @p input,
* except for input of QASYMM8 type where output should be of S32 type.
diff --git a/arm_compute/runtime/NEON/functions/NEActivationLayer.h b/arm_compute/runtime/NEON/functions/NEActivationLayer.h
index 59f5802d2a..a65146d461 100644
--- a/arm_compute/runtime/NEON/functions/NEActivationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEActivationLayer.h
@@ -44,7 +44,7 @@ public:
* @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place
*
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result
- * of the activation function. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * of the activation function. Data types supported: QASYMM8/F16/F32.
* @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] activation_info Activation layer parameters.
*/
@@ -52,7 +52,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEActivationLayer
*
* @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
- * of the activation function. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * of the activation function. Data types supported: QASYMM8/F16/F32.
* @param[in] output Destination tensor info. Data type supported: same as @p input
* @param[in] act_info Activation layer information.
*
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
index c72d0b6d61..c29646397c 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
@@ -37,17 +37,17 @@ class NEArithmeticAddition : public INESimpleFunction
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in] input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
- * @param[in] input2 Second tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
- * @param[out] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+ * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32
+ * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32
+ * @param[out] output Output tensor. Data types supported: U8/S16/F16/F32
* @param[in] policy Policy to use to handle overflow.
*/
void configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAddition
*
- * @param[in] input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
- * @param[in] input2 Second tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
- * @param[in] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+ * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32
+ * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32
+ * @param[in] output Output tensor. Data types supported: U8/S16/F16/F32
* @param[in] policy Policy to use to handle overflow.
*
* @return a status
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
index 751ed1adf1..9b460c1031 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,17 +37,17 @@ class NEArithmeticSubtraction : public INESimpleFunction
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in] input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
- * @param[in] input2 Second tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
- * @param[out] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+ * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32
+ * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32
+ * @param[out] output Output tensor. Data types supported: U8/S16/F16/F32
* @param[in] policy Policy to use to handle overflow.
*/
void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticSubtraction
*
- * @param[in] input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
- * @param[in] input2 Second tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
- * @param[in] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+ * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32
+ * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32
+ * @param[in] output Output tensor. Data types supported: U8/S16/F16/F32
* @param[in] policy Policy to use to handle overflow.
*
* @return a status
diff --git a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
index feb2087aa0..77f06129a3 100644
--- a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
@@ -50,7 +50,7 @@ public:
*
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
* 3 lower dimensions represent a single input with dimensions [width, height, FM].
- * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * The rest are optional and used for representing batches. Data types supported: F16/F32.
* @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
* @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
@@ -65,7 +65,7 @@ public:
*
* @param[in] input Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result.
* 3 lower dimensions represent a single input with dimensions [width, height, FM].
- * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+ * The rest are optional and used for representing batches. Data types supported: F16/F32.
* @param[in] output Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
* @param[in] mean Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
* @param[in] var Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
diff --git a/arm_compute/runtime/NEON/functions/NECol2Im.h b/arm_compute/runtime/NEON/functions/NECol2Im.h
index 9b05bd4513..42876a8aec 100644
--- a/arm_compute/runtime/NEON/functions/NECol2Im.h
+++ b/arm_compute/runtime/NEON/functions/NECol2Im.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,7 +39,7 @@ class NECol2Im : public INESimpleFunction
public:
/** Configure the col2im NEON kernel
*
- * @param[in] input The input tensor to convert. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input The input tensor to convert. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM],
* while the rest represent batch of outputs. Data types supported: Same as @p input
* @param[in] convolved_dims Output convolved dimensions.
@@ -47,7 +47,7 @@ public:
void configure(const ITensor *input, ITensor *output, const Size2D &convolved_dims);
/** Static function to check if given info will lead to a valid configuration of @ref NECol2Im
*
- * @param[in] input The input tensor to convert. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input The input tensor to convert. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output The output tensor. 3 lower dimensions represent a single output [width, height, OFM],
* while the rest represent batch of outputs. Data types supported: Same as @p input
* @param[in] convolved_dims Output convolved dimensions.
diff --git a/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h b/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
index bdb157f30b..3ec0390124 100644
--- a/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
+++ b/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
@@ -40,7 +40,7 @@ public:
NEConvertFullyConnectedWeights();
/** Initialize the function.
*
- * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
@@ -48,7 +48,7 @@ public:
void configure(const ITensor *input, ITensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
/** Static function to check if given info will lead to a valid configuration of @ref NEConvertFullyConnectedWeights
*
- * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
diff --git a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
index e143814a4e..c4226cbc5d 100644
--- a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
@@ -52,7 +52,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -72,7 +72,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -94,7 +94,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
* @param[in] output Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p input.
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
index 5b63b70634..eefb5fa362 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ public:
NEDepthConcatenateLayer();
/** Initialise the kernel's inputs vector and output.
*
- * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: QS8/QS16/F16/F32.
+ * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported: F16/F32.
* @param[out] output Output tensor. Data types supported: Same as @p inputs_vector.
*/
void configure(std::vector<ITensor *> inputs_vector, ITensor *output);
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
index b235e87b4a..eedadc242d 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,22 +46,14 @@ public:
/** Initialize the function's source, destination
*
* Valid conversions Input -> Output :
- * QS8 -> QS8, F32
* U8 -> U16, S16, S32
* U16 -> U8, U32
* S16 -> U8, S32
- * QS16 -> QS16, F32
- * F32 -> QS8, QS16
*
- * @warning In case of in-place fixed point position conversion make sure that configure has been called
- * before the updated tensor is used in other functions, as the TensorInfo of the tensor will be
- * altered. In-place is only supported for QS8 -> QS8, QS16 -> QS16.
- *
- * @param[in, out] input The input tensor to convert (Written in case of in-place computation). Data types supported: U8/QS8/U16/S16/F32.
- * @param[out] output The output tensor. Can be null in case of in-place computation. Data types supported: U8/QS8/U16/S16/U32/S32/F32.
+ * @param[in, out] input The input tensor to convert (Written in case of in-place computation). Data types supported: U8/U16/S16/F32.
+ * @param[out] output The output tensor. Can be null in case of in-place computation. Data types supported: U8/U16/S16/U32/S32/F32.
* @param[in] policy Conversion policy.
* @param[in] shift (Optional) Value for down/up conversions. Must be 0 <= shift < 8.
- * In case of fixed point position conversion, it specifies the new fixed point position, if operation is in-place.
*/
void configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift = 0);
};
diff --git a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
index ae384ffa56..a4a55d10f8 100644
--- a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
@@ -54,11 +54,11 @@ public:
/** Set the input, weights, biases and output tensors.
*
* @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
- * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
+ * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
+ * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
* 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
*
- * @param[in, out] input Input tensor. Data types supported: QS8/QS16/F16/F32.
+ * @param[in, out] input Input tensor. Data types supported: F16/F32.
* @param[in] weights Set of kernels to convolve the input volume.
* Supported sizes: 1x1, 3x3 and 5x5.
* The 3rd dimension must be the same as the input's volume 3rd dimension.
@@ -73,11 +73,11 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayer
*
* @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
- * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
+ * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
+ * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
* 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
*
- * @param[in] input Input tensor. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] input Input tensor. Data types supported: F16/F32.
* @param[in] weights Set of kernels to convolve the input volume.
* Supported sizes: 1x1, 3x3 and 5x5.
* The 3rd dimension must be the same as the input's volume 3rd dimension.
@@ -104,7 +104,6 @@ private:
NEActivationLayer _activationlayer_function;
Tensor _accumulator;
bool _has_bias;
- bool _is_fixed_point;
bool _is_activationlayer_enabled;
unsigned int _dim_split;
};
diff --git a/arm_compute/runtime/NEON/functions/NEFillBorder.h b/arm_compute/runtime/NEON/functions/NEFillBorder.h
index b6b7e77471..27a9eea9af 100644
--- a/arm_compute/runtime/NEON/functions/NEFillBorder.h
+++ b/arm_compute/runtime/NEON/functions/NEFillBorder.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,7 +41,7 @@ public:
*
* @note This function fills the borders within the XY-planes.
*
- * @param[in, out] input Source tensor. Data type supported: U8/QS8/S16/S32/F32
+ * @param[in, out] input Source tensor. Data type supported: U8/S16/S32/F32
* @param[in] border_width Width of the tensor border in pixels.
* @param[in] border_mode Strategy to use for borders.
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
diff --git a/arm_compute/runtime/NEON/functions/NEFlattenLayer.h b/arm_compute/runtime/NEON/functions/NEFlattenLayer.h
index e9c8e27d57..2c259fa178 100644
--- a/arm_compute/runtime/NEON/functions/NEFlattenLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFlattenLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,7 +41,7 @@ class NEFlattenLayer : public INESimpleFunction
public:
/** Initialise the kernel's input and output.
*
- * @param[in] input First input tensor to flatten with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: QS8/QS16/F16/F32
+ * @param[in] input First input tensor to flatten with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: F16/F32
* @param[out] output Output tensor with shape [w*h*d, input_batches] where:
* w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input
*/
diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
index 42c9e2d3e9..d4166b3830 100644
--- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
@@ -51,7 +51,7 @@ public:
NEFullyConnectedLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
- * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: QS8/QS16/F32.
+ * @param[in] input Weights tensor. The weights must be 2 dimensional. Data types supported: F32.
* @param[out] output Destination tensor. Data type supported: Same as @p input.
* @param[in] transpose_weights True if the weights must be transposed. Data types supported: Same as @p weights.
* @param[in] is_batched_fc_layer True if it is a batched fully connected layer
@@ -59,7 +59,7 @@ public:
void configure(const ITensor *input, ITensor *output, bool transpose_weights, bool is_batched_fc_layer);
/** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayerReshapeWeights
*
- * @param[in] input Weights tensor info. The weights must be 2 dimensional. Data types supported: QS8/QS16/F32.
+ * @param[in] input Weights tensor info. The weights must be 2 dimensional. Data types supported: F32.
* @param[in] output Destination tensor info. Data type supported: Same as @p input.
* @param[in] transpose_weights True if the weights must be transposed. Data types supported: Same as @p weights.
* @param[in] is_batched_fc_layer True if it is a batched fully connected layer
@@ -104,7 +104,7 @@ public:
NEFullyConnectedLayer &operator=(NEFullyConnectedLayer &&) = default;
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. Data type supported: QS8/QS16/F16/F32.
+ * @param[in] input Source tensor. Data type supported: F16/F32.
* @param[in] weights Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input.
* @param[in] biases Bias tensor. Can be nullptr. Data type supported:Same as @p input.
* @param[out] output Destination tensor. Data type supported: Same as @p input.
@@ -114,7 +114,7 @@ public:
void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, bool transpose_weights = true, bool are_weights_reshaped = false);
/** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
*
- * @param[in] input Source tensor info. Data type supported: QS8/QS16/F16/F32.
+ * @param[in] input Source tensor info. Data type supported: F16/F32.
* @param[in] weights Weights tensor info. The weights must be 2 dimensional. Data type supported: Same as @p input
* @param[in] biases Bias tensor info. It can be nullptr. Data type supported:Same as @p input.
* @param[in] output Destination tensor info. Data type supported: Same as @p input.
diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h
index 5d108b2c14..cf059e5c4d 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMM.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMM.h
@@ -66,7 +66,7 @@ public:
* @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C].
* @note GEMM: The tensors a, b, c, d must have the same data type. You should not mix data types when calling this function.
*
- * @param[in] a First input tensor (Matrix A or Vector A). Data type supported: QS8/QS16/F16/F32
+ * @param[in] a First input tensor (Matrix A or Vector A). Data type supported: F16/F32
* @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
* @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a
* @param[out] d Output tensor. Data type supported: same as @p a
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index 7075becf75..68e1145e35 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -60,7 +60,7 @@ public:
NEConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QS8/QASYMM8/QS16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QASYMM8/F32.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
* @param[out] output Destination tensor. Data types supported: Same as @p weights.
* @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
@@ -69,7 +69,7 @@ public:
void configure(const ITensor *weights, const ITensor *biases, ITensor *output, bool transpose1xW);
/** Static function to check if given info will lead to a valid configuration of @ref NEConvolutionLayerReshapeWeights
*
- * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QASYMM8/F16/F32.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
* @param[in] output Destination tensor. Data types supported: Same as @p weights.
* @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
@@ -116,7 +116,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F32.
+ * Data types supported: QASYMM8/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -134,7 +134,7 @@ public:
*
* @param[in] input Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
- * Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
* @param[in] biases Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -158,7 +158,7 @@ public:
private:
/** Configures the appropriate matrix multiply routine
*
- * @param[in] input Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Input tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] weights Weights tensor. Data type supported: Same as @p input.
* @param[out] output Output tensor. Data types supported: Same as @p input,
* except for input of QASYMM8 type where output should be of S32 type.
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h b/arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h
index b911fd064f..4a6bec03e6 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,7 @@ class NEGEMMInterleave4x4 : public INESimpleFunction
public:
/** Initialise the kernel's inputs, output
*
- * @param[in] input First input tensor. Data types supported: U8/S8/QS8/U16/S16/F16/U32/S32/F32
+ * @param[in] input First input tensor. Data types supported: U8/S8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: same as @p input
*/
void configure(const ITensor *input, ITensor *output);
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h b/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h
index 8b9ad136b4..3f8e731d01 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h
@@ -38,13 +38,13 @@ class NEGEMMTranspose1xW : public INESimpleFunction
public:
/** Initialise the kernel's inputs, output
*
- * @param[in] input First input tensor. Data type supported: U8/S8/QS8/U16/S16/F16/U32/S32/F32/
+ * @param[in] input First input tensor. Data type supported: U8/S8/U16/S16/F16/U32/S32/F32/
* @param[out] output Output tensor. Data type supported: same as @p input
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMTranspose1xW
*
- * @param[in] input First input tensor. Data type supported: U8/S8/QS8/U16/S16/F16/U32/S32/F32/
+ * @param[in] input First input tensor. Data type supported: U8/S8/U16/S16/F16/U32/S32/F32/
* @param[in] output Output tensor. Data type supported: same as @p input
*
* @return a status
diff --git a/arm_compute/runtime/NEON/functions/NEIm2Col.h b/arm_compute/runtime/NEON/functions/NEIm2Col.h
index caa8a011f6..d888b7e8f5 100644
--- a/arm_compute/runtime/NEON/functions/NEIm2Col.h
+++ b/arm_compute/runtime/NEON/functions/NEIm2Col.h
@@ -43,7 +43,7 @@ public:
/** Configure the im2col NEON kernel
*
* @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
* Note: QASYMM8 works only for has_bias = false
* @param[out] output The output tensor. Data types supported: Same as @p input
* @param[in] kernel_dims The kernel dimensions (width and height).
@@ -56,7 +56,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEIm2Col
*
* @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
- * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32
+ * while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
* Note: QASYMM8 works only for has_bias = false
* @param[in] output The output tensor. Data types supported: Same as @p input
* @param[in] kernel_dims The kernel dimensions (width and height).
diff --git a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
index 4b5ad28706..4f1f32fba5 100644
--- a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,7 +55,7 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data type supported: QS8/QS16/F16/F32
+ * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32
* @param[out] output Destination with the same dimensions, data type and number of channels of @p input
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
@@ -63,7 +63,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NENormalizationLayer
*
* @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data type supported: QS8/QS16/F16/F32
+ * and an optional 4th dimension for batch of inputs. Data type supported: F16/F32
* @param[in] output Destination with the same dimensions, data type and number of channels of @p input
* @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*
diff --git a/arm_compute/runtime/NEON/functions/NEPermute.h b/arm_compute/runtime/NEON/functions/NEPermute.h
index 58626cd2f2..580d24e415 100644
--- a/arm_compute/runtime/NEON/functions/NEPermute.h
+++ b/arm_compute/runtime/NEON/functions/NEPermute.h
@@ -40,7 +40,7 @@ public:
*
* @note Supported permutation vectors : [2, 0, 1], [1, 2, 0]
*
- * @param[in] input The input tensor to permute. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input The input tensor to permute. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output The output tensor. Data types supported: Same as @p input
* @param[in] perm Permutation vector
*/
@@ -49,7 +49,7 @@ public:
*
* @note Supported permutation vectors : [2, 0, 1], [1, 2, 0]
*
- * @param[in] input The input tensor to permute. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+ * @param[in] input The input tensor to permute. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output The output tensor. Data types supported: Same as @p input
* @param[in] perm Permutation vector
*
diff --git a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
index ba96ae6cfa..371bb2e13e 100644
--- a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
+++ b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
@@ -37,22 +37,22 @@ class NEPixelWiseMultiplication : public INESimpleFunction
public:
/** Initialise the kernel's inputs, output and convertion policy.
*
- * @param[in, out] input1 An input tensor. Data types supported: U8/QS8/S16/F16/F32.
+ * @param[in, out] input1 An input tensor. Data types supported: U8/S16/F16/F32.
* The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
* @param[in, out] input2 An input tensor. Data types supported: same as @p input1.
* The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
- * @param[out] output Output tensor. Data types supported: U8/QS8/S16/F16/F32.
+ * @param[out] output Output tensor. Data types supported: U8/S16/F16/F32.
* @param[in] scale Scale to apply after multiplication.
- * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+ * Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy.
* @param[in] rounding_policy Rounding policy.
*/
void configure(ITensor *input1, ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
*
- * @param[in] input1 First tensor info input. Data types supported: U8/QS8/S16/F16/F32.
- * @param[in] input2 Second tensor info input. Data types supported: U8/QS8/S16/F16/F32.
- * @param[in] output Output tensor info. Data types supported: U8/QS8/S16/F16/F32.
+ * @param[in] input1 First tensor info input. Data types supported: U8/S16/F16/F32.
+ * @param[in] input2 Second tensor info input. Data types supported: U8/S16/F16/F32.
+ * @param[in] output Output tensor info. Data types supported: U8/S16/F16/F32.
* @param[in] scale Scale to apply after multiplication. Must be positive.
* @param[in] overflow_policy Overflow policy.
* @param[in] rounding_policy Rounding policy.
diff --git a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
index 4224f75c77..26858d5cde 100644
--- a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
@@ -46,18 +46,18 @@ public:
NEPoolingLayer();
/** Set the input and output tensors.
*
- * @note QS8, QS16 and F16 are supported for pool sizes 2 and 3 only
+ * @note F16 is supported for pool sizes 2 and 3 only
*
- * @param[in, out] input Source tensor. (Written to only when padding != 0) Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in, out] input Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/F16/F32.
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*/
void configure(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref NEPoolingLayer
*
- * @note QS8, QS16 and F16 are supported for pool sizes 2 and 3 only
+ * @note F16 is supported for pool sizes 2 and 3 only
*
- * @param[in] input Source tensor. (Written to only when padding != 0) Data types supported: QS8/QASYMM8/QS16/F16/F32.
+ * @param[in] input Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/F16/F32.
* @param[in] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
diff --git a/arm_compute/runtime/NEON/functions/NEReshapeLayer.h b/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
index 0bab534ebc..a77a5f30dc 100644
--- a/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
@@ -37,7 +37,7 @@ class NEReshapeLayer : public INESimpleFunction
public:
/** Initialise the kernel's inputs and outputs
*
- * @param[in] input First tensor input. Data type supported: U8/S8/QS8/QASYMM8//U16/S16/QS16/U32/S32/F16/F32
+ * @param[in] input First tensor input. Data type supported: U8/S8/QASYMM8//U16/S16/U32/S32/F16/F32
* @param[out] output Output tensor. Data type supported: Same as @p input
*/
void configure(const ITensor *input, ITensor *output);
diff --git a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
index 3d981b6f75..61f46004d6 100644
--- a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
+++ b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
@@ -51,18 +51,18 @@ public:
NESoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
- * @param[in,out] input Source tensor. Data types supported: QASYMM8/QS8/QS16/F16/F32. If the width is not a
+ * @param[in,out] input Source tensor. Data types supported: QASYMM8/F16/F32. If the width is not a
* multiple of the internal processing block size, @ref NEFillBorderKernel replicates the
* last value of each row to the nearest multiple.
* @param[out] output Destination tensor. Data types supported: same as @p input.
- * @param[in] beta (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
+ * @param[in] beta (Optional) A scaling factor for the exponent.
*/
void configure(ITensor *input, ITensor *output, float beta = 1.0f);
/** Static function to check if given info will lead to a valid configuration of @ref NESoftmaxLayer
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/QS8/QS16/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
* @param[in] output Destination tensor. Data types supported: same as @p input
- * @param[in] beta (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
+ * @param[in] beta (Optional) A scaling factor for the exponent.
*
* @return a status
*/
diff --git a/arm_compute/runtime/NEON/functions/NETranspose.h b/arm_compute/runtime/NEON/functions/NETranspose.h
index 6d1e107084..0234288b4b 100644
--- a/arm_compute/runtime/NEON/functions/NETranspose.h
+++ b/arm_compute/runtime/NEON/functions/NETranspose.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,13 +41,13 @@ class NETranspose : public INESimpleFunction
public:
/** Initialise the kernel's inputs and output
*
- * @param[in] input Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[out] output Output tensor. Data type supported: Same as @p input
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NETranspose
*
- * @param[in] input The input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] input The input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
* @param[in] output The output tensor. Data types supported: Same as @p input
*
* @return a status