aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2020-07-03 13:34:52 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2020-07-07 15:25:46 +0000
commitf9b595adbdc3f6f51ffa2c1f2aa70d0262d0db2d (patch)
tree382f3ab76355de2ed5114a5e4bfc7e8f7c0422f4 /arm_compute/core/NEON
parent0cc50ed757f06f4f076e261cb7253dd67264dec6 (diff)
downloadComputeLibrary-f9b595adbdc3f6f51ffa2c1f2aa70d0262d0db2d.tar.gz
COMPMID-3532: Align data type support between doxygen and implementation - NEON
Change-Id: I70662cfb43890873b706b3f22b348f5d8cdd63ca Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3506 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-by: Sheri Zhang <sheri.zhang@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core/NEON')
-rw-r--r--arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h12
-rw-r--r--arm_compute/core/NEON/kernels/NEChannelExtractKernel.h6
-rw-r--r--arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h6
-rw-r--r--arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h38
-rw-r--r--arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h10
-rw-r--r--arm_compute/core/NEON/kernels/NEGaussian3x3Kernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEIm2ColKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEMaxUnpoolingLayerKernel.h11
-rw-r--r--arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h56
-rw-r--r--arm_compute/core/NEON/kernels/NEReductionOperationKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h4
13 files changed, 91 insertions, 72 deletions
diff --git a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
index dfd08d9b06..4872edd90f 100644
--- a/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h
@@ -74,6 +74,18 @@ public:
void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticSubtractionKernel
*
+ * Valid configurations (Input1,Input2) -> Output :
+ *
+ * - (U8,U8) -> U8
+ * - (U8,U8) -> S16
+ * - (QASYMM8, QASYMM8) -> QASYMM8
+ * - (QASYMM8_SIGNED, QASYMM8_SIGNED) -> QASYMM8_SIGNED
+ * - (S16,U8) -> S16
+ * - (U8,S16) -> S16
+ * - (S16,S16) -> S16
+ * - (F16,F16) -> F16
+ * - (F32,F32) -> F32
+ *
* @note Convert policy cannot be WRAP if datatype is QASYMM8
*
* @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/QSYMM16/S16/F16/F32
diff --git a/arm_compute/core/NEON/kernels/NEChannelExtractKernel.h b/arm_compute/core/NEON/kernels/NEChannelExtractKernel.h
index 8d62016fe5..d953ff33ed 100644
--- a/arm_compute/core/NEON/kernels/NEChannelExtractKernel.h
+++ b/arm_compute/core/NEON/kernels/NEChannelExtractKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -27,8 +27,6 @@
#include "arm_compute/core/NEON/INESimpleKernel.h"
#include "arm_compute/core/Types.h"
-#include <cstdint>
-
namespace arm_compute
{
class IMultiImage;
@@ -60,7 +58,7 @@ public:
*
* @param[in] input Source tensor. Formats supported: RGB888/RGBA8888/YUYV422/UYVY422
* @param[in] channel Channel to extract.
- * @param[out] output Destination tensor. Format supported: u8
+ * @param[out] output Destination tensor. Format supported: U8
*/
void configure(const ITensor *input, Channel channel, ITensor *output);
/** Set the input and output of the kernel
diff --git a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
index 3792fb3bd7..2a85da28de 100644
--- a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,13 +52,13 @@ public:
~NEDequantizationLayerKernel() = default;
/** Set input, output tensors.
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayerKernel
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[in] output Output tensor info. Data types supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h b/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h
index b109ddd0f8..12d7fb50a9 100644
--- a/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h
@@ -59,10 +59,10 @@ public:
/** Common signature for all the specialised arithmetic functions
*
- * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: Dependent on subclass.
- * @param[in] window Region on which to execute the kernel.
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor info. Data types supported: Dependent on subclass.
+ * @param[in] window Region on which to execute the kernel.
*/
using ElementwiseFunction = void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window);
@@ -100,10 +100,10 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel
*
- * @param[in] op Arithmetic operation to be executed.
- * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: Same as @p input1.
+ * @param[in] op Arithmetic operation to be executed.
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor info. Data types supported: Same as @p input1.
*/
void configure(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output);
@@ -131,9 +131,9 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel
*
- * @param[in] input1 First tensor input info. Data types supported: F16/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: Same as @p input1.
+ * @param[in] input1 First tensor input info. Data types supported: F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor info. Data types supported: Same as @p input1.
*/
void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output);
@@ -168,9 +168,9 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel
*
- * @param[in] input1 First tensor input info. Data types supported: F16/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[out] output Output tensor info. Data types supported: Same as @p input1.
+ * @param[in] input1 First tensor input info. Data types supported: F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input1.
*
* @return a Status
*/
@@ -189,10 +189,10 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEComparisonOperationKernel
*
- * @param[in] op Comparison operation to be executed.
- * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: U16/U32.
+ * @param[in] op Comparison operation to be executed.
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor info. Data types supported: U8.
*/
void configure(ComparisonOperation op, const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output);
@@ -201,7 +201,7 @@ public:
* @param[in] op Comparison operation to be executed.
* @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
* @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: U16/U32.
+ * @param[in] output Output tensor info. Data types supported: U8.
*
* @return a Status
*/
diff --git a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h
index 02c390b6ba..c63618c256 100644
--- a/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h
+++ b/arm_compute/core/NEON/kernels/NEElementwiseUnaryKernel.h
@@ -60,7 +60,7 @@ public:
/** Function to configure the @ref NEElementwiseUnaryKernel
*
* @param[in] op Arithmetic operation to be executed.
- * @param[in] input First tensor input. Data types supported: F16/F32.
+ * @param[in] input First tensor input. Data types supported: F16/F32, F16/F32/S32 for NEG/ABS operations.
* @param[out] output Output tensor. Data types supported: Same as @p input.
*/
void configure(ElementWiseUnary op, const ITensor *input, ITensor *output);
@@ -68,7 +68,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEElementwiseUnaryKernel
*
* @param[in] op Arithmetic operation to be executed.
- * @param[in] input First tensor input info. Data types supported: F16/F32.
+ * @param[in] input First tensor input info. Data types supported: F16/F32, F16/F32/S32 for NEG/ABS operations.
* @param[in] output Output tensor info. Data types supported: Same as @p input.
*
* @return a Status
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
index 1e472f5252..dcee3da2d5 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h
@@ -49,7 +49,7 @@ public:
/** Initialise the kernel's input and output.
*
- * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED
+ * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
* @param[out] output Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32
* @param[in] info Kernel metadata:
* - k Number of matrix columns/rows depending on the type of reduction.
@@ -82,7 +82,7 @@ public:
}
/** Initialise the kernel's input and output.
*
- * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED
+ * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
* @param[out] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
* @param[in] info Kernel metadata:
* - k (num_mtx_a_cols) Number of matrix A columns
@@ -93,7 +93,7 @@ public:
void configure(const ITensor *mtx_a, ITensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info) override;
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixAReductionKernel
*
- * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED
+ * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
* @param[in] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
* @param[in] info Kernel metadata:
* - k (num_mtx_a_cols) Number of matrix A columns
@@ -131,7 +131,7 @@ public:
}
/** Initialise the kernel's input and output.
*
- * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
+ * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
* @param[out] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
* @param[in] info Kernel metadata:
* - k (num_mtx_b_rows) Number of matrix B rows.
@@ -142,7 +142,7 @@ public:
void configure(const ITensor *mtx_b, ITensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info) override;
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixBReductionKernel
*
- * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
+ * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
* @param[in] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
* @param[in] info Kernel metadata:
* - k (num_mtx_b_rows) Number of matrix B rows.
diff --git a/arm_compute/core/NEON/kernels/NEGaussian3x3Kernel.h b/arm_compute/core/NEON/kernels/NEGaussian3x3Kernel.h
index fa92eef1b7..099b226d2f 100644
--- a/arm_compute/core/NEON/kernels/NEGaussian3x3Kernel.h
+++ b/arm_compute/core/NEON/kernels/NEGaussian3x3Kernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019 ARM Limited.
+ * Copyright (c) 2016-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,7 +41,7 @@ public:
/** Set the source, destination and border mode of the kernel
*
* @param[in] input Source tensor. Data type supported: U8
- * @param[out] output Destination tensor. Data type supported: S16
+ * @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] border_undefined True if the border mode is undefined. False if it's replicate or constant.
*/
void configure(const ITensor *input, ITensor *output, bool border_undefined);
diff --git a/arm_compute/core/NEON/kernels/NEIm2ColKernel.h b/arm_compute/core/NEON/kernels/NEIm2ColKernel.h
index 1c358b379d..97cdfb4958 100644
--- a/arm_compute/core/NEON/kernels/NEIm2ColKernel.h
+++ b/arm_compute/core/NEON/kernels/NEIm2ColKernel.h
@@ -79,7 +79,7 @@ public:
* @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
* Data types supported: QASYMM8/QASYMM8_SIGNED/BFLOAT16/F16/F32
- * Note: QASYMM8 works only for has_bias = false
+ * Note: QASYMM8/QASYMM8_SIGNED works only for has_bias = false
* @param[out] output The output tensor. Data types supported: Same as @p input
* @param[in] kernel_dims The kernel dimensions (width and height).
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
@@ -94,7 +94,7 @@ public:
* @param[in] input The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
* while every optional dimension from 4 and above represent a batch of inputs.
* Data types supported: QASYMM8/QASYMM8_SIGNED/BFLOAT16/F16/F32
- * Note: QASYMM8 works only for has_bias = false
+ * Note: QASYMM8/QASYMM8_SIGNED works only for has_bias = false
* @param[in] output The output tensor. Data types supported: Same as @p input
* @param[in] kernel_dims The kernel dimensions (width and height).
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
diff --git a/arm_compute/core/NEON/kernels/NEMaxUnpoolingLayerKernel.h b/arm_compute/core/NEON/kernels/NEMaxUnpoolingLayerKernel.h
index 269317b6c1..7160d5d328 100644
--- a/arm_compute/core/NEON/kernels/NEMaxUnpoolingLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEMaxUnpoolingLayerKernel.h
@@ -54,7 +54,7 @@ public:
*
* @note Output shape must be equal to the shape of the original input to pool.
*
- * @param[in] input Source tensor. Data types supported: F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[out] indices The indices of the maximal values. Data type supported: U32.
* @param[out] output Destination tensor. Data types supported: Same as @p input.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
@@ -62,9 +62,9 @@ public:
void configure(const ITensor *input, const ITensor *indices, ITensor *output, const PoolingLayerInfo &pool_info);
/** Static function to check if given info will lead to a valid configuration of @ref NEMaxUnpoolingLayerKernel
*
- * @param[in] input Source tensor. Data types supported: F16/F32.
- * @param[in] output Destination tensor. Data types supported: Same as @p input.
- * @param[in] indices The indices of the maximal values. Data type supported: U32.
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] output Destination tensor info. Data types supported: Same as @p input.
+ * @param[in] indices Tensor info of the indices of the maximal values. Data type supported: U32.
* @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
*
* @return a status
@@ -89,9 +89,6 @@ private:
const ITensor *_input;
ITensor *_output;
const ITensor *_indices;
- PoolingLayerInfo _pool_info;
- DataLayout _data_layout;
- unsigned int _num_elems_processed_per_iteration;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_NEMAXUNPOOLINGLAYERKERNEL_H */
diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
index 5483fae565..2263e480a9 100644
--- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
@@ -53,20 +53,26 @@ public:
~NEPixelWiseMultiplicationKernel() = default;
/** Initialise the kernel's input, output and border mode.
*
+ * Valid configurations (Input1,Input2) -> Output :
+ *
+ * - (U8,U8) -> U8
+ * - (U8,U8) -> S16
+ * - (U8,S16) -> S16
+ * - (S16,U8) -> S16
+ * - (S16,S16) -> S16
+ * - (F16,F16) -> F16
+ * - (F32,F32) -> F32
+ * - (QASYMM8,QASYMM8) -> QASYMM8
+ * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
+ * - (QSYMM16,QSYMM16) -> QSYMM16
+ * - (QSYMM16,QSYMM16) -> S32
+ *
* @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
* For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
*
- * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32
- * @param[in] input2 An input tensor. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, QSYMM16 (only if @p input1 is QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
- * @param[out] output Output tensor. Data types supported:
- * - U8, only if both inputs are U8.
- * - QASYMM8, only if both inputs are QASYMM8.
- * - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
- * - S16.
- * - QSYMM16, only if both inputs are QSYMM16.
- * - S32, only if both inputs are QSYMM16.
- * - F16, only if @p input1 is F16.
- * - F32, only if both inputs are F32.
+ * @param[in] input1 First input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32
+ * @param[in] input2 Second input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32
+ * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
* @param[in] scale Scale to apply after multiplication.
* Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if datatype is QASYMM8, QASYMM8_SIGNED or QSYMM16.
@@ -75,20 +81,26 @@ public:
void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplicationKernel
*
+ * Valid configurations (Input1,Input2) -> Output :
+ *
+ * - (U8,U8) -> U8
+ * - (U8,U8) -> S16
+ * - (U8,S16) -> S16
+ * - (S16,U8) -> S16
+ * - (S16,S16) -> S16
+ * - (F16,F16) -> F16
+ * - (F32,F32) -> F32
+ * - (QASYMM8,QASYMM8) -> QASYMM8
+ * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
+ * - (QSYMM16,QSYMM16) -> QSYMM16
+ * - (QSYMM16,QSYMM16) -> S32
+ *
* @note For @p scale equal to 1/255 only round to nearest even (implemented as round half up) is supported.
* For all other scale values only round to zero (implemented as round towards minus infinity) is supported.
*
- * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/QSYMM16/S16/F16/F32
- * @param[in] input2 An input tensor info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), QASYMM8_SIGNED (only if @p input1 is QASYMM8_SIGNED), S16, QSYMM16 (only if @p input1 is QSYMM16), F16 (only if @p input1 is F16), F32 (only if @p input1 is F32).
- * @param[in] output Output tensor info. Data types supported:
- * - U8, only if both inputs are U8.
- * - QASYMM8, only if both inputs are QASYMM8.
- * - QASYMM8_SIGNED, only if @p input1 is QASYMM8_SIGNED.
- * - S16.
- * - QSYMM16, only if both inputs are QSYMM16.
- * - S32, only if both inputs are QSYMM16.
- * - F16, only if @p input1 is F16.
- * - F32, only if both inputs are F32.
+ * @param[in] input1 First input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32
+ * @param[in] input2 Second input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32
+ * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32/S32
* @param[in] scale Scale to apply after multiplication.
* Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy. ConvertPolicy cannot be WRAP if datatype is QASYMM8, QASYMM8_SIGNED or QSYMM16.
diff --git a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h
index 28cca4987b..523c812f7d 100644
--- a/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEReductionOperationKernel.h
@@ -59,7 +59,7 @@ public:
/** Set the source, destination of the kernel
*
- * @param[in] input Source tensor. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32. Data layouts supported: NCHW.
+ * @param[in] input Source tensor. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32/S32. Data layouts supported: NCHW.
* @param[out] output Destination tensor.Data types and data layouts supported: same as @p input, S32 for ARG_MIX/ARG_MAX.
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0
@@ -69,7 +69,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEReductionOperationKernel.
*
- * @param[in] input Source tensor info. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32. Data layouts supported: NCHW.
+ * @param[in] input Source tensor info. Data type supported: QASYMM8_SIGNED/QASYMM8/F16/F32/S32. Data layouts supported: NCHW.
* @param[in] output Destination tensor info.Data types and data layouts supported: same as @p input, S32 for ARG_MIX/ARG_MAX.
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0
diff --git a/arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h b/arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h
index 9038eda9b2..5bd702aef6 100644
--- a/arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEUpsampleLayerKernel.h
@@ -52,7 +52,7 @@ public:
~NEUpsampleLayerKernel() = default;
/** Set the input output tensors.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[out] output Destination tensor. Data types supported: same as @p input.
* @param[in] info Contains stride information described in @ref Size2D.
* @param[in] policy Defines the policy to fill the intermediate pixels.
@@ -61,7 +61,7 @@ public:
void configure(const ITensor *input, ITensor *output, const Size2D &info, const InterpolationPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEUpsampleLayerKernel
*
- * @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
+ * @param[in] input Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] output Destination tensor info. Data types supported: same as @p input.
* @param[in] info Contains stride information described in @ref Size2D.
* @param[in] policy Defines the policy to fill the intermediate pixels.
diff --git a/arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h b/arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h
index b68cb50c7b..c6e4053293 100644
--- a/arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h
+++ b/arm_compute/core/NEON/kernels/NEWeightsReshapeKernel.h
@@ -76,7 +76,7 @@ public:
*
* @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
* and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared.
- * Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/BFLOAT16/FP16/F32
+ * Data types supported: All
* @param[in] bias The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
* dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input
* @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.
@@ -87,7 +87,7 @@ public:
*
* @param[in] input The input tensor to convert. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM] if shared,
* and 5D tensor with dimensions [kernel_x, kernel_y, IFM, OFM, num_patches] if unshared.
- * Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/BFLOAT16/F16/F32
+ * Data types supported: All
* @param[in] biases The shared biases tensor to append. Bias is 1D tensor with dimensions [OFM] if shared and 2D tensor with
* dimensions [OFM, num_patches] if unshared. Data types supported: Same as @p input
* @warning Appending biases to weights reshaped matrix is not supported for quantized asymmetric types.