aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/CL/CLKernelLibrary.h4
-rw-r--r--arm_compute/core/CL/ICLMultiHOG.h4
-rw-r--r--arm_compute/core/CL/ICLMultiImage.h4
-rw-r--r--arm_compute/core/FixedPoint.h126
-rw-r--r--arm_compute/core/GLES_COMPUTE/IGCKernel.h2
-rw-r--r--arm_compute/core/IArray.h2
-rw-r--r--arm_compute/core/IMultiHOG.h4
-rw-r--r--arm_compute/core/IMultiImage.h4
-rw-r--r--arm_compute/core/ITensorInfo.h10
-rw-r--r--arm_compute/core/NEON/NEFixedPoint.h20
-rw-r--r--arm_compute/core/NEON/kernels/NEActivationLayerKernel.h8
-rw-r--r--arm_compute/core/NEON/kernels/NEDerivativeKernel.h6
-rw-r--r--arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h34
-rw-r--r--arm_compute/core/NEON/kernels/NEHistogramKernel.h20
-rw-r--r--arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h12
-rw-r--r--arm_compute/core/NEON/kernels/NEWarpKernel.h6
-rw-r--r--arm_compute/core/Utils.h16
-rw-r--r--arm_compute/core/Validate.h346
-rw-r--r--arm_compute/runtime/CL/CLMultiImage.h20
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMLowp.h40
-rw-r--r--arm_compute/runtime/CL/functions/CLIntegralImage.h8
-rw-r--r--arm_compute/runtime/MultiImage.h28
-rw-r--r--arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h24
-rw-r--r--arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h32
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h24
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h42
-rw-r--r--arm_compute/runtime/NEON/functions/NEIntegralImage.h8
-rwxr-xr-xscripts/format_doxygen.py45
-rw-r--r--src/core/CL/cl_kernels/canny.cl20
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution.cl60
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution_quantized.cl70
-rw-r--r--src/core/CL/cl_kernels/fixed_point.h150
-rw-r--r--src/core/CL/cl_kernels/warp_helpers.h6
-rw-r--r--src/core/CL/cl_kernels/warp_perspective.cl2
-rw-r--r--tests/IArrayAccessor.h2
-rw-r--r--tests/validation/Helpers.h4
37 files changed, 617 insertions, 606 deletions
diff --git a/arm_compute/core/CL/CLKernelLibrary.h b/arm_compute/core/CL/CLKernelLibrary.h
index 8e2bb66684..25c7f75ba6 100644
--- a/arm_compute/core/CL/CLKernelLibrary.h
+++ b/arm_compute/core/CL/CLKernelLibrary.h
@@ -227,8 +227,8 @@ public:
return _kernel_path;
};
/** Gets the source of the selected program
- *
- * @param[in] program_name Program name.
+ *
+ * @param[in] program_name Program name.
*/
std::string get_program_source(const std::string &program_name);
/** Sets the CL context used to create programs.
diff --git a/arm_compute/core/CL/ICLMultiHOG.h b/arm_compute/core/CL/ICLMultiHOG.h
index 9f3c775230..90082a611f 100644
--- a/arm_compute/core/CL/ICLMultiHOG.h
+++ b/arm_compute/core/CL/ICLMultiHOG.h
@@ -35,14 +35,14 @@ class ICLMultiHOG : public IMultiHOG
public:
/** Return a pointer to the requested OpenCL HOG model
*
- * @param[in] index The index of the wanted OpenCL HOG model.
+ * @param[in] index The index of the wanted OpenCL HOG model.
*
* @return A pointer pointed to the HOG model
*/
virtual ICLHOG *cl_model(size_t index) = 0;
/** Return a constant pointer to the requested OpenCL HOG model
*
- * @param[in] index The index of the wanted OpenCL HOG model.
+ * @param[in] index The index of the wanted OpenCL HOG model.
*
* @return A constant pointer pointed to the OpenCL HOG model
*/
diff --git a/arm_compute/core/CL/ICLMultiImage.h b/arm_compute/core/CL/ICLMultiImage.h
index e8705b1824..774175607b 100644
--- a/arm_compute/core/CL/ICLMultiImage.h
+++ b/arm_compute/core/CL/ICLMultiImage.h
@@ -37,14 +37,14 @@ class ICLMultiImage : public IMultiImage
public:
/** Return a pointer to the requested OpenCL plane of the image.
*
- * @param[in] index The index of the wanted planed.
+ * @param[in] index The index of the wanted planed.
*
* @return A pointer pointed to the OpenCL plane
*/
virtual ICLImage *cl_plane(unsigned int index) = 0;
/** Return a constant pointer to the requested OpenCL plane of the image.
*
- * @param[in] index The index of the wanted planed.
+ * @param[in] index The index of the wanted planed.
*
* @return A constant pointer pointed to the OpenCL plane
*/
diff --git a/arm_compute/core/FixedPoint.h b/arm_compute/core/FixedPoint.h
index 82c2d3347e..6e00500b10 100644
--- a/arm_compute/core/FixedPoint.h
+++ b/arm_compute/core/FixedPoint.h
@@ -225,96 +225,96 @@ qint16_t sqmull_qs8(qint8_t a, qint8_t b, int fixed_point_position);
qint32_t sqmull_qs16(qint16_t a, qint16_t b, int fixed_point_position);
/** 16 bit fixed point scalar saturating multiply
-*
-* @param[in] a First 16 bit fixed point input
-* @param[in] b Second 16 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 16 bit fixed point multiplication. The result is saturated in case of overflow
-*/
+ *
+ * @param[in] a First 16 bit fixed point input
+ * @param[in] b Second 16 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 16 bit fixed point multiplication. The result is saturated in case of overflow
+ */
qint16_t sqmul_qs16(qint16_t a, qint16_t b, int fixed_point_position);
/** 8 bit fixed point scalar inverse square root
-*
-* @param[in] a 8 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 8 bit fixed point inverse square root.
-*/
+ *
+ * @param[in] a 8 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 8 bit fixed point inverse square root.
+ */
qint8_t sinvsqrt_qs8(qint8_t a, int fixed_point_position);
/** 16 bit fixed point scalar inverse square root
-*
-* @param[in] a 16 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 16 bit fixed point inverse square root.
-*/
+ *
+ * @param[in] a 16 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 16 bit fixed point inverse square root.
+ */
qint16_t sinvsqrt_qs16(qint16_t a, int fixed_point_position);
/** 8 bit fixed point scalar division
-*
-* @param[in] a First 8 bit fixed point input
-* @param[in] b Second 8 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 8 bit fixed point division.
-*/
+ *
+ * @param[in] a First 8 bit fixed point input
+ * @param[in] b Second 8 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 8 bit fixed point division.
+ */
qint8_t sdiv_qs8(qint8_t a, qint8_t b, int fixed_point_position);
/** 16 bit fixed point scalar division
-*
-* @param[in] a First 16 bit fixed point input
-* @param[in] b Second 16 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 16 bit fixed point division.
-*/
+ *
+ * @param[in] a First 16 bit fixed point input
+ * @param[in] b Second 16 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 16 bit fixed point division.
+ */
qint16_t sdiv_qs16(qint16_t a, qint16_t b, int fixed_point_position);
/** 8 bit fixed point scalar exponential
-*
-* @param[in] a 8 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 8 bit fixed point exponential.
-*/
+ *
+ * @param[in] a 8 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 8 bit fixed point exponential.
+ */
qint8_t sqexp_qs8(qint8_t a, int fixed_point_position);
/** 16 bit fixed point scalar exponential
-*
-* @param[in] a 16 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 16 bit fixed point exponential.
-*/
+ *
+ * @param[in] a 16 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 16 bit fixed point exponential.
+ */
qint16_t sqexp_qs16(qint16_t a, int fixed_point_position);
/** 16 bit fixed point scalar exponential
-*
-* @param[in] a 16 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 16 bit fixed point exponential.
-*/
+ *
+ * @param[in] a 16 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 16 bit fixed point exponential.
+ */
qint16_t sexp_qs16(qint16_t a, int fixed_point_position);
/** 8 bit fixed point scalar logarithm
-*
-* @param[in] a 8 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 8 bit fixed point logarithm.
-*/
+ *
+ * @param[in] a 8 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 8 bit fixed point logarithm.
+ */
qint8_t slog_qs8(qint8_t a, int fixed_point_position);
/** 16 bit fixed point scalar logarithm
-*
-* @param[in] a 16 bit fixed point input
-* @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
-*
-* @return The result of the 16 bit fixed point logarithm.
-*/
+ *
+ * @param[in] a 16 bit fixed point input
+ * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 16 bit fixed point logarithm.
+ */
qint16_t slog_qs16(qint16_t a, int fixed_point_position);
/** Convert an 8 bit fixed point to float
diff --git a/arm_compute/core/GLES_COMPUTE/IGCKernel.h b/arm_compute/core/GLES_COMPUTE/IGCKernel.h
index 0d3bfb30fd..11b2b17e51 100644
--- a/arm_compute/core/GLES_COMPUTE/IGCKernel.h
+++ b/arm_compute/core/GLES_COMPUTE/IGCKernel.h
@@ -58,7 +58,7 @@ public:
*
* @param[in] binding Tensor's binding point.
* @param[in] shift Number of bits to be shift for offset calculation
- */
+ */
BufferParam(const unsigned int binding, const unsigned int shift)
: binding_point(binding), buffer_data_type_shift(shift)
{
diff --git a/arm_compute/core/IArray.h b/arm_compute/core/IArray.h
index 960e18f3df..bc01df9817 100644
--- a/arm_compute/core/IArray.h
+++ b/arm_compute/core/IArray.h
@@ -124,7 +124,7 @@ public:
/** Resizes the array to contain "num" elements. If "num" is smaller than the maximum array size, the content is reduced to its first "num" elements,
* "num" elements can't be bigger than the maximum number of values which can be stored in this array.
*
- * @param[in] num The new array size in number of elements
+ * @param[in] num The new array size in number of elements
*/
void resize(size_t num)
{
diff --git a/arm_compute/core/IMultiHOG.h b/arm_compute/core/IMultiHOG.h
index e91da75398..5e9ee3a4ff 100644
--- a/arm_compute/core/IMultiHOG.h
+++ b/arm_compute/core/IMultiHOG.h
@@ -43,14 +43,14 @@ public:
virtual size_t num_models() const = 0;
/** Return a pointer to the requested HOG model
*
- * @param[in] index The index of the wanted HOG model.
+ * @param[in] index The index of the wanted HOG model.
*
* @return A pointer pointed to the HOG model
*/
virtual IHOG *model(size_t index) = 0;
/** Return a const pointer to the requested HOG model
*
- * @param[in] index The index of the wanted HOG model.
+ * @param[in] index The index of the wanted HOG model.
*
* @return A const pointer pointed to the HOG model
*/
diff --git a/arm_compute/core/IMultiImage.h b/arm_compute/core/IMultiImage.h
index 6ed3c785ca..0d11c2c6b8 100644
--- a/arm_compute/core/IMultiImage.h
+++ b/arm_compute/core/IMultiImage.h
@@ -43,14 +43,14 @@ public:
virtual const MultiImageInfo *info() const = 0;
/** Return a pointer to the requested plane of the image.
*
- * @param[in] index The index of the wanted planed.
+ * @param[in] index The index of the wanted planed.
*
* @return A pointer pointed to the plane
*/
virtual IImage *plane(unsigned int index) = 0;
/** Return a constant pointer to the requested plane of the image.
*
- * @param[in] index The index of the wanted planed.
+ * @param[in] index The index of the wanted planed.
*
* @return A constant pointer pointed to the plane
*/
diff --git a/arm_compute/core/ITensorInfo.h b/arm_compute/core/ITensorInfo.h
index 4f69442b48..9a67712f3d 100644
--- a/arm_compute/core/ITensorInfo.h
+++ b/arm_compute/core/ITensorInfo.h
@@ -90,11 +90,11 @@ public:
*/
virtual ITensorInfo &set_fixed_point_position(int fixed_point_position) = 0;
/** Set the quantization settings (scale and offset) of the tensor.
- *
- * @param[in] quantization_info QuantizationInfo containing the scale and offset
- *
- * @return Reference to this ITensorInfo object
- */
+ *
+ * @param[in] quantization_info QuantizationInfo containing the scale and offset
+ *
+ * @return Reference to this ITensorInfo object
+ */
virtual ITensorInfo &set_quantization_info(QuantizationInfo quantization_info) = 0;
/** Resets the padding settings of the tensor.
*
diff --git a/arm_compute/core/NEON/NEFixedPoint.h b/arm_compute/core/NEON/NEFixedPoint.h
index f8579e08b5..5719b63618 100644
--- a/arm_compute/core/NEON/NEFixedPoint.h
+++ b/arm_compute/core/NEON/NEFixedPoint.h
@@ -179,19 +179,19 @@ void vst1_qs16(qint16_t *addr, qint16x4_t b);
void vst1q_qs8(qint8_t *addr, qint8x16_t b);
/** Store a single 16 bit fixed point vector to memory (8 elements)
-*
-* @param[in] addr Memory address where the 16 bit fixed point vector should be stored
-* @param[in] b 16 bit fixed point vector to store
-*
-*/
+ *
+ * @param[in] addr Memory address where the 16 bit fixed point vector should be stored
+ * @param[in] b 16 bit fixed point vector to store
+ *
+ */
void vst1q_qs16(qint16_t *addr, qint16x8_t b);
/** Store two 16 bit fixed point vector to memory (8x2 elements)
-*
-* @param[in] addr Memory address where the 16 bit fixed point vectors should be stored
-* @param[in] b 16 bit fixed point vectors to store
-*
-*/
+ *
+ * @param[in] addr Memory address where the 16 bit fixed point vectors should be stored
+ * @param[in] b 16 bit fixed point vectors to store
+ *
+ */
void vst2q_qs16(qint16_t *addr, qint16x8x2_t b);
/** 16 bit fixed point vector saturating narrow (8 elements)
diff --git a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
index a3fd3feb58..ef51cbe841 100644
--- a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
@@ -72,27 +72,27 @@ private:
using ActivationFunctionExecutorPtr = void (NEActivationLayerKernel::*)(const Window &window);
/** Function to apply an activation function on a tensor.
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
template <ActivationLayerInfo::ActivationFunction F, typename T>
typename std::enable_if<std::is_same<T, float>::value, void>::type activation(const Window &window);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
/** Function to apply an activation function on a tensor.
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
template <ActivationLayerInfo::ActivationFunction F, typename T>
typename std::enable_if<std::is_same<T, float16_t>::value, void>::type activation(const Window &window);
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
/** Function to apply an activation function on a tensor.
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
template <ActivationLayerInfo::ActivationFunction F, typename T>
typename std::enable_if<std::is_same<T, qint8_t>::value, void>::type activation(const Window &window);
/** Function to apply an activation function on a tensor.
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
template <ActivationLayerInfo::ActivationFunction F, typename T>
typename std::enable_if<std::is_same<T, qint16_t>::value, void>::type activation(const Window &window);
diff --git a/arm_compute/core/NEON/kernels/NEDerivativeKernel.h b/arm_compute/core/NEON/kernels/NEDerivativeKernel.h
index 7613b586d0..5d46516f68 100644
--- a/arm_compute/core/NEON/kernels/NEDerivativeKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDerivativeKernel.h
@@ -64,17 +64,17 @@ public:
private:
/** Function to perform derivative along the X direction on the given window
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
void derivative_x(const Window &window);
/** Function to perform derivative along the Y direction on the given window
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
void derivative_y(const Window &window);
/** Function to perform derivative along the X and Y direction on the given window
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
void derivative_xy(const Window &window);
/** Common signature for all the specialised derivative functions
diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
index 654dee21af..7684350c0f 100644
--- a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h
@@ -58,27 +58,27 @@ public:
NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &operator=(NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &&) = default;
/** Initialise the kernel's input and output.
*
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
- * @param[in] result_offset Offset to be added to each element of the input matrix
- * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
- * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
- * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
- * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
- * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ * @param[in] input Input tensor. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
+ * @param[in] result_offset Offset to be added to each element of the input matrix
+ * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
*/
void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0);
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel
*
- * @param[in] input Input tensor. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
- * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
- * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
- * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ * @param[in] input Input tensor. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
*/
static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
diff --git a/arm_compute/core/NEON/kernels/NEHistogramKernel.h b/arm_compute/core/NEON/kernels/NEHistogramKernel.h
index 0fa911dbf0..672472e082 100644
--- a/arm_compute/core/NEON/kernels/NEHistogramKernel.h
+++ b/arm_compute/core/NEON/kernels/NEHistogramKernel.h
@@ -82,28 +82,28 @@ public:
private:
/** Function to merge multiple partial histograms.
*
- * @param[out] global_hist Pointer to the final histogram.
- * @param[in] local_hist Pointer to the partial histograms.
- * @param[in] bins Number of bins.
+ * @param[out] global_hist Pointer to the final histogram.
+ * @param[in] local_hist Pointer to the partial histograms.
+ * @param[in] bins Number of bins.
*/
void merge_histogram(uint32_t *global_hist, const uint32_t *local_hist, size_t bins);
/** Function to merge multiple minimum values of partial histograms.
*
- * @param[out] global_min Pointer to the global min value.
- * @param[in] local_min Local min value.
+ * @param[out] global_min Pointer to the global min value.
+ * @param[in] local_min Local min value.
*/
void merge_min(uint8_t *global_min, const uint8_t &local_min);
/** Function to perform histogram on the given window
- *
- * @param[in] win Region on which to execute the kernel
- * @param[in] info Info about the executing thread
+ *
+ * @param[in] win Region on which to execute the kernel
+ * @param[in] info Info about the executing thread
*/
void histogram_U8(Window win, const ThreadInfo &info);
/** Function to perform histogram on the given window where histogram is
* of fixed size 256 without ranges and offsets.
*
- * @param[in] win Region on which to execute the kernel
- * @param[in] info Info about the executing thread
+ * @param[in] win Region on which to execute the kernel
+ * @param[in] info Info about the executing thread
*/
void histogram_fixed_U8(Window win, const ThreadInfo &info);
/** Pre-calculate the pixel windowing for every possible pixel
diff --git a/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h b/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h
index 46b2a8ddb4..76c6163607 100644
--- a/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h
+++ b/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h
@@ -66,17 +66,17 @@ public:
private:
/** Function to perform magnitude on the given window
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
void magnitude(const Window &window);
/** Function to perform phase on the given window
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
void phase(const Window &window);
/** Function to perform magnitude and phase on the given window
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
void magnitude_phase(const Window &window);
@@ -130,17 +130,17 @@ public:
private:
/** Function to perform magnitude on the given window
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
void magnitude(const Window &window);
/** Function to perform phase on the given window
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
void phase(const Window &window);
/** Function to perform magnitude and phase on the given window
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
void magnitude_phase(const Window &window);
diff --git a/arm_compute/core/NEON/kernels/NEWarpKernel.h b/arm_compute/core/NEON/kernels/NEWarpKernel.h
index 3a1cab1585..d7cb82f27e 100644
--- a/arm_compute/core/NEON/kernels/NEWarpKernel.h
+++ b/arm_compute/core/NEON/kernels/NEWarpKernel.h
@@ -66,17 +66,17 @@ public:
protected:
/** function to perform warp affine or warp perspective on the given window when border mode == UNDEFINED
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
virtual void warp_undefined(const Window &window) = 0;
/** function to perform warp affine or warp perspective on the given window when border mode == CONSTANT
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
virtual void warp_constant(const Window &window) = 0;
/** function to perform warp affine or warp perspective on the given window when border mode == REPLICATE
*
- * @param[in] window Region on which to execute the kernel
+ * @param[in] window Region on which to execute the kernel
*/
virtual void warp_replicate(const Window &window) = 0;
/** Common signature for all the specialised warp functions
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index b8c5b34e5a..af9cf23548 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -384,14 +384,14 @@ inline DataType get_promoted_data_type(DataType dt)
}
/** Separate a 2D convolution into two 1D convolutions
-*
-* @param[in] conv 2D convolution
-* @param[out] conv_col 1D vertical convolution
-* @param[out] conv_row 1D horizontal convolution
-* @param[in] size Size of the 2D convolution
-*
-* @return true if the separation was successful
-*/
+ *
+ * @param[in] conv 2D convolution
+ * @param[out] conv_col 1D vertical convolution
+ * @param[out] conv_row 1D horizontal convolution
+ * @param[in] size Size of the 2D convolution
+ *
+ * @return true if the separation was successful
+ */
inline bool separate_matrix(const int16_t *conv, int16_t *conv_col, int16_t *conv_row, uint8_t size)
{
int32_t min_col = -1;
diff --git a/arm_compute/core/Validate.h b/arm_compute/core/Validate.h
index 4f3b6102f5..227c3e7d69 100644
--- a/arm_compute/core/Validate.h
+++ b/arm_compute/core/Validate.h
@@ -123,10 +123,10 @@ struct get_tensor_info_t<ITensorInfo *>
/** Create an error if one of the pointers is a nullptr.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] pointers Pointers to check against nullptr.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] pointers Pointers to check against nullptr.
*
* @return Error
*/
@@ -153,11 +153,11 @@ inline arm_compute::Error error_on_nullptr(const char *function, const char *fil
* - Its dimensions don't match the full window's ones
* - The step for each of its dimension is not identical to the corresponding one of the full window.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] full Full size window
- * @param[in] win Window to validate.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] full Full size window
+ * @param[in] win Window to validate.
*
* @return Error
*/
@@ -175,11 +175,11 @@ arm_compute::Error error_on_mismatching_windows(const char *function, const char
* - It is not fully contained inside the full window
* - The step for each of its dimension is not identical to the corresponding one of the full window.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] full Full size window
- * @param[in] sub Sub-window to validate.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] full Full size window
+ * @param[in] sub Sub-window to validate.
*
* @return Error
*/
@@ -194,12 +194,12 @@ arm_compute::Error error_on_invalid_subwindow(const char *function, const char *
*
* The window cannot be collapsed if the given dimension not equal to the full window's dimension or not start from 0.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] full Full size window
- * @param[in] window Window to be collapsed.
- * @param[in] dim Dimension need to be checked.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] full Full size window
+ * @param[in] window Window to be collapsed.
+ * @param[in] dim Dimension need to be checked.
*
* @return Error
*/
@@ -214,11 +214,11 @@ arm_compute::Error error_on_window_not_collapsable_at_dimension(const char *func
*
* The coordinates have too many dimensions if any of the dimensions greater or equal to max_dim is different from 0.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] pos Coordinates to validate
- * @param[in] max_dim Maximum number of dimensions allowed.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] pos Coordinates to validate
+ * @param[in] max_dim Maximum number of dimensions allowed.
*
* @return Error
*/
@@ -233,11 +233,11 @@ arm_compute::Error error_on_coordinates_dimensions_gte(const char *function, con
*
* The window has too many dimensions if any of the dimension greater or equal to max_dim is different from 0.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] win Window to validate
- * @param[in] max_dim Maximum number of dimensions allowed.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] win Window to validate
+ * @param[in] max_dim Maximum number of dimensions allowed.
*
* @return Error
*/
@@ -250,12 +250,12 @@ arm_compute::Error error_on_window_dimensions_gte(const char *function, const ch
/** Return an error if the passed dimension objects differ.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] dim1 The first object to be compared.
- * @param[in] dim2 The second object to be compared.
- * @param[in] dims (Optional) Further allowed objects.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] dim1 The first object to be compared.
+ * @param[in] dim2 The second object to be compared.
+ * @param[in] dims (Optional) Further allowed objects.
*
* @return Error
*/
@@ -273,12 +273,12 @@ arm_compute::Error error_on_mismatching_dimensions(const char *function, const c
/** Return an error if the passed two tensor infos have different shapes from the given dimension
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_info_1 The first tensor info to be compared.
- * @param[in] tensor_info_2 The second tensor info to be compared.
- * @param[in] tensor_infos (Optional) Further allowed tensor infos.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_info_1 The first tensor info to be compared.
+ * @param[in] tensor_info_2 The second tensor info to be compared.
+ * @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
* @return Error
*/
@@ -290,12 +290,12 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
}
/** Return an error if the passed two tensors have different shapes from the given dimension
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_1 The first tensor to be compared.
- * @param[in] tensor_2 The second tensor to be compared.
- * @param[in] tensors (Optional) Further allowed tensors.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_1 The first tensor to be compared.
+ * @param[in] tensor_2 The second tensor to be compared.
+ * @param[in] tensors (Optional) Further allowed tensors.
*
* @return Error
*/
@@ -307,13 +307,13 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
}
/** Return an error if the passed two tensors have different shapes from the given dimension
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] upper_dim The dimension from which to check.
- * @param[in] tensor_info_1 The first tensor info to be compared.
- * @param[in] tensor_info_2 The second tensor info to be compared.
- * @param[in] tensor_infos (Optional) Further allowed tensor infos.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] upper_dim The dimension from which to check.
+ * @param[in] tensor_info_1 The first tensor info to be compared.
+ * @param[in] tensor_info_2 The second tensor info to be compared.
+ * @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
* @return Error
*/
@@ -335,13 +335,13 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
}
/** Return an error if the passed two tensors have different shapes from the given dimension
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] upper_dim The dimension from which to check.
- * @param[in] tensor_1 The first tensor to be compared.
- * @param[in] tensor_2 The second tensor to be compared.
- * @param[in] tensors (Optional) Further allowed tensors.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] upper_dim The dimension from which to check.
+ * @param[in] tensor_1 The first tensor to be compared.
+ * @param[in] tensor_2 The second tensor to be compared.
+ * @param[in] tensors (Optional) Further allowed tensors.
*
* @return Error
*/
@@ -363,11 +363,11 @@ inline arm_compute::Error error_on_mismatching_shapes(const char *function, cons
/** Return an error if the passed two tensor infos have different data types
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_info The first tensor info to be compared.
- * @param[in] tensor_infos (Optional) Further allowed tensor infos.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_info The first tensor info to be compared.
+ * @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
* @return Error
*/
@@ -389,11 +389,11 @@ inline arm_compute::Error error_on_mismatching_data_types(const char *function,
}
/** Return an error if the passed two tensors have different data types
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor The first tensor to be compared.
- * @param[in] tensors (Optional) Further allowed tensors.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor The first tensor to be compared.
+ * @param[in] tensors (Optional) Further allowed tensors.
*
* @return Error
*/
@@ -416,12 +416,12 @@ inline arm_compute::Error error_on_mismatching_data_types(const char *function,
*
* @note: If the first tensor doesn't have fixed point data type, the function returns without throwing an error
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_info_1 The first tensor info to be compared.
- * @param[in] tensor_info_2 The second tensor info to be compared.
- * @param[in] tensor_infos (Optional) Further allowed tensor infos.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_info_1 The first tensor info to be compared.
+ * @param[in] tensor_info_2 The second tensor info to be compared.
+ * @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
* @return Error
*/
@@ -455,12 +455,12 @@ inline arm_compute::Error error_on_mismatching_fixed_point(const char *function,
*
* @note: If the first tensor doesn't have fixed point data type, the function returns without throwing an error
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_1 The first tensor to be compared.
- * @param[in] tensor_2 The second tensor to be compared.
- * @param[in] tensors (Optional) Further allowed tensors.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_1 The first tensor to be compared.
+ * @param[in] tensor_2 The second tensor to be compared.
+ * @param[in] tensors (Optional) Further allowed tensors.
*
* @return Error
*/
@@ -481,12 +481,12 @@ inline arm_compute::Error error_on_mismatching_fixed_point(const char *function,
*
* @note: If the first tensor info doesn't have asymmetric quantized data type, the function returns without throwing an error
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_info_1 The first tensor info to be compared.
- * @param[in] tensor_info_2 The second tensor info to be compared.
- * @param[in] tensor_infos (Optional) Further allowed tensor infos.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_info_1 The first tensor info to be compared.
+ * @param[in] tensor_info_2 The second tensor info to be compared.
+ * @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
* @return Error
*/
@@ -520,12 +520,12 @@ inline arm_compute::Error error_on_mismatching_quantization_info(const char *fun
*
* @note: If the first tensor doesn't have asymmetric quantized data type, the function returns without throwing an error
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_1 The first tensor to be compared.
- * @param[in] tensor_2 The second tensor to be compared.
- * @param[in] tensors (Optional) Further allowed tensors.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_1 The first tensor to be compared.
+ * @param[in] tensor_2 The second tensor to be compared.
+ * @param[in] tensors (Optional) Further allowed tensors.
*
* @return Error
*/
@@ -544,12 +544,12 @@ inline arm_compute::Error error_on_mismatching_quantization_info(const char *fun
/** Throw an error if the format of the passed tensor/multi-image does not match any of the formats provided.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] object Tensor/multi-image to validate.
- * @param[in] format First format allowed.
- * @param[in] formats (Optional) Further allowed formats.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] object Tensor/multi-image to validate.
+ * @param[in] format First format allowed.
+ * @param[in] formats (Optional) Further allowed formats.
*/
template <typename T, typename F, typename... Fs>
void error_on_format_not_in(const char *function, const char *file, const int line,
@@ -575,12 +575,12 @@ void error_on_format_not_in(const char *function, const char *file, const int li
/** Return an error if the data type of the passed tensor info does not match any of the data types provided.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_info Tensor info to validate.
- * @param[in] dt First data type allowed.
- * @param[in] dts (Optional) Further allowed data types.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_info Tensor info to validate.
+ * @param[in] dt First data type allowed.
+ * @param[in] dts (Optional) Further allowed data types.
*
* @return Error
*/
@@ -603,12 +603,12 @@ inline arm_compute::Error error_on_data_type_not_in(const char *function, const
}
/** Return an error if the data type of the passed tensor does not match any of the data types provided.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor Tensor to validate.
- * @param[in] dt First data type allowed.
- * @param[in] dts (Optional) Further allowed data types.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor Tensor to validate.
+ * @param[in] dt First data type allowed.
+ * @param[in] dts (Optional) Further allowed data types.
*
* @return Error
*/
@@ -627,13 +627,13 @@ inline arm_compute::Error error_on_data_type_not_in(const char *function, const
/** Return an error if the data type or the number of channels of the passed tensor info does not match any of the data types and number of channels provided.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_info Tensor info to validate.
- * @param[in] num_channels Number of channels to check
- * @param[in] dt First data type allowed.
- * @param[in] dts (Optional) Further allowed data types.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_info Tensor info to validate.
+ * @param[in] num_channels Number of channels to check
+ * @param[in] dt First data type allowed.
+ * @param[in] dts (Optional) Further allowed data types.
*
* @return Error
*/
@@ -648,13 +648,13 @@ inline arm_compute::Error error_on_data_type_channel_not_in(const char *function
}
/** Return an error if the data type or the number of channels of the passed tensor does not match any of the data types and number of channels provided.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor Tensor to validate.
- * @param[in] num_channels Number of channels to check
- * @param[in] dt First data type allowed.
- * @param[in] dts (Optional) Further allowed data types.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor Tensor to validate.
+ * @param[in] num_channels Number of channels to check
+ * @param[in] dt First data type allowed.
+ * @param[in] dts (Optional) Further allowed data types.
*
* @return Error
*/
@@ -673,10 +673,10 @@ inline arm_compute::Error error_on_data_type_channel_not_in(const char *function
/** Return an error if the tensor is not 2D.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor Tensor to validate.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor Tensor to validate.
*
* @return Error
*/
@@ -689,12 +689,12 @@ arm_compute::Error error_on_tensor_not_2d(const char *function, const char *file
/** Return an error if the channel is not in channels.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] cn Input channel
- * @param[in] channel First channel allowed.
- * @param[in] channels (Optional) Further allowed channels.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] cn Input channel
+ * @param[in] channel First channel allowed.
+ * @param[in] channels (Optional) Further allowed channels.
*
* @return Error
*/
@@ -719,11 +719,11 @@ inline arm_compute::Error error_on_channel_not_in(const char *function, const ch
/** Return an error if the channel is not in format.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] fmt Input channel
- * @param[in] cn First channel allowed.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] fmt Input channel
+ * @param[in] cn First channel allowed.
*
* @return Error
*/
@@ -742,10 +742,10 @@ arm_compute::Error error_on_channel_not_in_known_format(const char *function, co
* -# it doesn't contain models
* -# it doesn't have the HOG data objects with the same phase_type, normalization_type and l2_hyst_threshold (if normalization_type == L2HYS_NORM)
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] multi_hog IMultiHOG container to validate
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] multi_hog IMultiHOG container to validate
*
* @return Error
*/
@@ -758,10 +758,10 @@ arm_compute::Error error_on_invalid_multi_hog(const char *function, const char *
/** Return an error if the kernel is not configured.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] kernel Kernel to validate.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] kernel Kernel to validate.
*/
arm_compute::Error error_on_unconfigured_kernel(const char *function, const char *file, const int line,
const IKernel *kernel);
@@ -807,12 +807,12 @@ arm_compute::Error error_on_invalid_subtensor_valid_region(const char *function,
/** Return an error if the input fixed-point positions are different.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_info_1 The first tensor info to be compared.
- * @param[in] tensor_info_2 The second tensor info to be compared.
- * @param[in] tensor_infos (Optional) Further allowed tensor infos.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_info_1 The first tensor info to be compared.
+ * @param[in] tensor_info_2 The second tensor info to be compared.
+ * @param[in] tensor_infos (Optional) Further allowed tensor infos.
*
* @return Error
*/
@@ -830,12 +830,12 @@ inline arm_compute::Error error_on_mismatching_fixed_point_position(const char *
}
/** Return an error if the input fixed-point positions are different.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] tensor_1 The first tensor to be compared.
- * @param[in] tensor_2 The second tensor to be compared.
- * @param[in] tensors (Optional) Further allowed tensors.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] tensor_1 The first tensor to be compared.
+ * @param[in] tensor_2 The second tensor to be compared.
+ * @param[in] tensors (Optional) Further allowed tensors.
*
* @return Error
*/
@@ -854,11 +854,11 @@ inline arm_compute::Error error_on_mismatching_fixed_point_position(const char *
/** Return an error if the fixed-point value is not representable in the specified Q format.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] value The floating point value to be checked.
- * @param[in] tensor_info Input tensor info that has information on data type and fixed-point position.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] value The floating point value to be checked.
+ * @param[in] tensor_info Input tensor info that has information on data type and fixed-point position.
*
* @return Error
*/
@@ -876,11 +876,11 @@ inline arm_compute::Error error_on_value_not_representable_in_fixed_point(const
}
/** Return an error an error if the fixed-point value is not representable in the specified Q format.
*
- * @param[in] function Function in which the error occurred.
- * @param[in] file Name of the file where the error occurred.
- * @param[in] line Line on which the error occurred.
- * @param[in] value The floating point value to be checked.
- * @param[in] tensor Input tensor that has information on data type and fixed-point position.
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file Name of the file where the error occurred.
+ * @param[in] line Line on which the error occurred.
+ * @param[in] value The floating point value to be checked.
+ * @param[in] tensor Input tensor that has information on data type and fixed-point position.
*
* @return Error
*/
diff --git a/arm_compute/runtime/CL/CLMultiImage.h b/arm_compute/runtime/CL/CLMultiImage.h
index f70929db07..2c2b4709b4 100644
--- a/arm_compute/runtime/CL/CLMultiImage.h
+++ b/arm_compute/runtime/CL/CLMultiImage.h
@@ -44,18 +44,18 @@ public:
CLMultiImage();
/** Init the multi-planar image
*
- * @param[in] width Width of the whole image
- * @param[in] height Heigth of the whole image
- * @param[in] format Format of the whole image
+ * @param[in] width Width of the whole image
+ * @param[in] height Heigth of the whole image
+ * @param[in] format Format of the whole image
*/
void init(unsigned int width, unsigned int height, Format format);
/** Init the multi-planar image
*
* @note Uses conservative padding strategy which fits all kernels.
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
*/
void init_auto_padding(unsigned int width, unsigned int height, Format format);
/** Allocated a previously initialised multi image
@@ -73,10 +73,10 @@ public:
private:
/** Init the multi-planar image
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
- * @param[in] auto_padding Specifies whether the image uses auto padding
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
+ * @param[in] auto_padding Specifies whether the image uses auto padding
*/
void internal_init(unsigned int width, unsigned int height, Format format, bool auto_padding);
diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowp.h b/arm_compute/runtime/CL/functions/CLGEMMLowp.h
index 613fcaa7e0..ffd997f6ec 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMLowp.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMLowp.h
@@ -52,26 +52,26 @@ public:
/** Constructor */
CLGEMMLowp(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the kernel's inputs, output
- *
- * @note GEMM_LOWP: low precision matrix multiply kernel
- * This kernel performs the following computation:
- *
- * -# Convert a values from uint8 to int32 and add a_offset to each of them.
- * -# Convert b values from uint8 to int32 and add b_offset to each of them.
- * -# Compute the int32 matrix product of the resulting a * b.
- * -# Add output_offset to each entry of the result.
- * -# Multiply each entry of the result and round to the nearest integer
- * -# Clamp the resulting int32 values to the [0..255] range and cast to uint8.
- *
- * @param[in] a First input tensor (Matrix A). Data types supported: U8.
- * @param[in] b Second input tensor (Matrix B). Data types supported: same as @p a.
- * @param[out] output Output tensor. Data types supported: same as @p a.
- * @param[in] a_offset Offset to be added to each element of the matrix A.
- * @param[in] b_offset Offset to be added to each element of the matrix B.
- * @param[in] output_offset Offset to be added to each element of the output matrix
- * @param[in] output_mult_int Multiplied with each element of the output matrix
- * @param[in] shift Number of bits to shift right the result.
- */
+ *
+ * @note GEMM_LOWP: low precision matrix multiply kernel
+ * This kernel performs the following computation:
+ *
+ * -# Convert a values from uint8 to int32 and add a_offset to each of them.
+ * -# Convert b values from uint8 to int32 and add b_offset to each of them.
+ * -# Compute the int32 matrix product of the resulting a * b.
+ * -# Add output_offset to each entry of the result.
+ * -# Multiply each entry of the result and round to the nearest integer
+ * -# Clamp the resulting int32 values to the [0..255] range and cast to uint8.
+ *
+ * @param[in] a First input tensor (Matrix A). Data types supported: U8.
+ * @param[in] b Second input tensor (Matrix B). Data types supported: same as @p a.
+ * @param[out] output Output tensor. Data types supported: same as @p a.
+ * @param[in] a_offset Offset to be added to each element of the matrix A.
+ * @param[in] b_offset Offset to be added to each element of the matrix B.
+ * @param[in] output_offset Offset to be added to each element of the output matrix
+ * @param[in] output_mult_int Multiplied with each element of the output matrix
+ * @param[in] shift Number of bits to shift right the result.
+ */
void configure(const ICLTensor *a, const ICLTensor *b, ICLTensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/CL/functions/CLIntegralImage.h b/arm_compute/runtime/CL/functions/CLIntegralImage.h
index 25fc549b29..71f6897d1b 100644
--- a/arm_compute/runtime/CL/functions/CLIntegralImage.h
+++ b/arm_compute/runtime/CL/functions/CLIntegralImage.h
@@ -43,10 +43,10 @@ public:
/** Default Constructor. */
CLIntegralImage();
/** Initialise the function's source, destinations and border mode.
- *
- * @param[in] input Source tensor. Data types supported: U8.
- * @param[out] output Destination tensor, Data types supported: U32.
- */
+ *
+ * @param[in] input Source tensor. Data types supported: U8.
+ * @param[out] output Destination tensor, Data types supported: U32.
+ */
void configure(const ICLTensor *input, ICLTensor *output);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/MultiImage.h b/arm_compute/runtime/MultiImage.h
index 917e586ef8..30fa9b0256 100644
--- a/arm_compute/runtime/MultiImage.h
+++ b/arm_compute/runtime/MultiImage.h
@@ -45,18 +45,18 @@ public:
MultiImage();
/** Allocate the multi-planar image
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
*/
void init(unsigned int width, unsigned int height, Format format);
/** Allocate the multi-planar image
*
* @note Uses conservative padding strategy which fits all kernels.
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
*/
void init_auto_padding(unsigned int width, unsigned int height, Format format);
/** Allocated a previously initialised multi image
@@ -67,10 +67,10 @@ public:
void allocate();
/** Create a subimage from an existing MultiImage.
*
- * @param[in] image Image to use backing memory from
- * @param[in] coords Starting coordinates of the new image. Should be within the parent image sizes
- * @param[in] width The width of the subimage
- * @param[in] height The height of the subimage
+ * @param[in] image Image to use backing memory from
+ * @param[in] coords Starting coordinates of the new image. Should be within the parent image sizes
+ * @param[in] width The width of the subimage
+ * @param[in] height The height of the subimage
*/
void create_subimage(MultiImage *image, const Coordinates &coords, unsigned int width, unsigned int height);
@@ -82,10 +82,10 @@ public:
private:
/** Init the multi-planar image
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
- * @param[in] auto_padding Specifies whether the image uses auto padding
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
+ * @param[in] auto_padding Specifies whether the image uses auto padding
*/
void internal_init(unsigned int width, unsigned int height, Format format, bool auto_padding);
diff --git a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
index 3433e77ba1..8757bc63aa 100644
--- a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
@@ -68,18 +68,18 @@ public:
/** Constructor */
NEDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input, weights, biases and output tensors.
- *
- * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
- * @param[in] weights The 4d weights with dimensions [width, height, OFM, IFM]. Data type supported: Same as @p input.
- * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Same as @p input.
- * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
- * @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
- * @param[in] ax The number of zeros added to right edge of the input.
- * @param[in] ay The number of zeros added to top edge of the input.
- * @param[in] upscalex How much to scale the X axis.
- * @param[in] upscaley How much to scale the Y axis.
- *
- */
+ *
+ * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
+ * @param[in] weights The 4d weights with dimensions [width, height, OFM, IFM]. Data type supported: Same as @p input.
+ * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
+ * @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
+ * @param[in] ax The number of zeros added to right edge of the input.
+ * @param[in] ay The number of zeros added to top edge of the input.
+ * @param[in] upscalex How much to scale the X axis.
+ * @param[in] upscaley How much to scale the Y axis.
+ *
+ */
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info,
unsigned int ax, unsigned int ay, float upscalex, float upscaley);
diff --git a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
index daaf18f297..c731bf278f 100644
--- a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
@@ -51,22 +51,22 @@ public:
/** Constructor */
NEDirectConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input, weights, biases and output tensors.
- *
- * @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
- * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
- * 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
- *
- * @param[in, out] input Input tensor. Data types supported: QS8/QS16/F16/F32.
- * @param[in] weights Set of kernels to convolve the input volume.
- * Supported sizes: 1x1, 3x3 and 5x5.
- * The 3rd dimension must be the same as the input's volume 3rd dimension.
- * Data type supported: Same as @p input.
- * @param[in] bias Set of biases. Data type supported: Same as @p input.
- * @param[out] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- */
+ *
+ * @note: DirectConvolution only works in the following configurations:
+ * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
+ * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
+ * 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
+ *
+ * @param[in, out] input Input tensor. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] weights Set of kernels to convolve the input volume.
+ * Supported sizes: 1x1, 3x3 and 5x5.
+ * The 3rd dimension must be the same as the input's volume 3rd dimension.
+ * Data type supported: Same as @p input.
+ * @param[in] bias Set of biases. Data type supported: Same as @p input.
+ * @param[out] output Output tensor.
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ */
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
index 3b6aa1c7db..3d213a7668 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
@@ -46,11 +46,11 @@ public:
/** Constructor */
NEGEMMLowpAssemblyMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the kernel's inputs, output
- *
- * @param[in] a First input tensor (Matrix A). Data type supported: U8, S8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
- */
+ *
+ * @param[in] a First input tensor (Matrix A). Data type supported: U8, S8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ */
void configure(const ITensor *a, const ITensor *b, ITensor *output);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index 598756e435..889bbca7f2 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -58,18 +58,18 @@ public:
/** Constructor */
NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the kernel's inputs, output
- *
- * @note GEMM_LOWP: low precision GEMM kernel
- * This kernel performs the following computations:
- *
- * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
- * -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
- * -# Compute the matrix product of the resulting a * b in int32.
- *
- * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
- */
+ *
+ * @note GEMM_LOWP: low precision GEMM kernel
+ * This kernel performs the following computations:
+ *
+ * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
+ * -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
+ * -# Compute the matrix product of the resulting a * b in int32.
+ *
+ * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ */
void configure(const ITensor *a, const ITensor *b, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixMultiplyCore
*
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
index 9270d5581f..533a41c888 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
@@ -60,29 +60,29 @@ class NEGEMMLowpQuantizeDownInt32ToUint8Scale : public INESimpleFunction
{
public:
/** Initialise the kernel's inputs, output
- *
- * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
- * @param[in] result_offset Offset to be added to each element of the input matrix
- * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
- * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
- * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
- * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
- * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
- */
+ *
+ * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
+ * @param[in] result_offset Offset to be added to each element of the input matrix
+ * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ */
void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0);
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8Scale
- *
- * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
- * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
- * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
- * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
- */
+ *
+ * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ */
static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
};
}
diff --git a/arm_compute/runtime/NEON/functions/NEIntegralImage.h b/arm_compute/runtime/NEON/functions/NEIntegralImage.h
index 6d7dd697e8..1ac501c994 100644
--- a/arm_compute/runtime/NEON/functions/NEIntegralImage.h
+++ b/arm_compute/runtime/NEON/functions/NEIntegralImage.h
@@ -35,10 +35,10 @@ class NEIntegralImage : public INESimpleFunction
{
public:
/** Initialise the function's source, destinations and border mode.
- *
- * @param[in] input Source tensor. Data type supported: U8.
- * @param[out] output Destination tensor. Data type supported: U32.
- */
+ *
+ * @param[in] input Source tensor. Data type supported: U8.
+ * @param[out] output Destination tensor. Data type supported: U32.
+ */
void configure(const ITensor *input, ITensor *output);
};
}
diff --git a/scripts/format_doxygen.py b/scripts/format_doxygen.py
index 423c2bfbf9..e18f24eb52 100755
--- a/scripts/format_doxygen.py
+++ b/scripts/format_doxygen.py
@@ -12,32 +12,38 @@ def process_comment(fd, comment, first_param, last_param):
else:
params = list()
- # Copy the non param lines unmodified:
- fd.write("".join(comment[:first_param]))
-
# Measure the indentation of the first param and use that to create an empty comment line string:
- m = re.match(r" *\*", comment[first_param])
+ m = re.match(r" */", comment[0])
if not m:
- raise Exception("Not a comment ? '{}'".format(comment[first_param]))
+ raise Exception("{}: Not a comment ? '{}'".format(path,comment[first_param]))
+
+ line_prefix = " " * len(m.group(0)) + "*"
+ empty_line = line_prefix +"\n"
- empty_line = "{}\n".format(m.group(0))
+ fd.write(comment[0])
+ # Copy the non param lines with the correct indentation:
+ for comment_line in range(1,first_param):
+ line = comment[comment_line]
+ m = re.match(" *\*(.*)", line)
+ if not m:
+ raise Exception("{}:{}: Not a comment line ? ".format(path, n_line - len(comment) + comment_line + 1))
+ fd.write(line_prefix+ m.group(1)+"\n")
# For each param split the line into 3 columns: param, param_name, description
for param in range(first_param, last_param):
- m = re.match(r"([^@]+@param\[[^\]]+\]) +(\S+) +(.+)", comment[param])
+ m = re.match(r"[^@]+(@param\[[^\]]+\]) +(\S+) +(.+)", comment[param])
if m:
- params.append( (m.group(1), m.group(2), m.group(3)) )
+ params.append( (" "+m.group(1), m.group(2), m.group(3)) )
else:
# If it's not a match then it must be a multi-line param description:
- m = re.match("( *\*) +(.*)", comment[param])
-
+ m = re.match(" *\* +(.*)", comment[param])
if not m:
- raise Exception("Not a comment line ? ({})".format(n_line - len(comment) + param))
+ raise Exception("{}:{}: Not a comment line ? ".format(path, n_line - len(comment) + param + 1))
- params.append( (m.group(1), "", m.group(2)) )
+ params.append( ("", "", m.group(1)) )
# Now that we've got a list of params, find what is the longest string for each column:
max_len = [0, 0]
@@ -55,7 +61,7 @@ def process_comment(fd, comment, first_param, last_param):
# Write out the formatted list of params:
for p in params:
- fd.write("{}{} {}{} {}\n".format(
+ fd.write("{}{}{} {}{} {}\n".format( line_prefix,
p[0], " " * (max_len[0] - len(p[0])),
p[1], " " * (max_len[1] - len(p[1])),
p[2]))
@@ -66,8 +72,13 @@ def process_comment(fd, comment, first_param, last_param):
# insert empty line
fd.write(empty_line)
- # Copy the remaining of the comment unmodified:
- fd.write("".join(comment[last_param:]))
+ # Copy the remaining of the comment with the correct indentation:
+ for comment_line in range(last_param,len(comment)):
+ line = comment[comment_line]
+ m = re.match(" *\*(.*)", line)
+ if not m:
+ raise Exception("{}:{}: Not a comment line ? ".format(path, n_line - len(comment) + comment_line + 1))
+ fd.write(line_prefix+ m.group(1)+"\n")
if __name__ == "__main__":
n_file=0
@@ -111,7 +122,7 @@ if __name__ == "__main__":
#print("Start comment {}".format(n_line))
if len(comment) > 0:
- raise Exception("Already in a comment! ({})".format(n_line))
+ raise Exception("{}:{}: Already in a comment!".format(path,n_line))
comment.append(line)
@@ -146,7 +157,7 @@ if __name__ == "__main__":
#print("End comment {}".format(n_line))
if len(comment) < 1:
- raise Exception("Was not in a comment! ({})".format(n_line))
+ raise Exception("{}:{}: Was not in a comment! ".format(path, n_line))
#print("Process comment {} {}".format(first_param, last_param))
diff --git a/src/core/CL/cl_kernels/canny.cl b/src/core/CL/cl_kernels/canny.cl
index ec6719213c..166d681755 100644
--- a/src/core/CL/cl_kernels/canny.cl
+++ b/src/core/CL/cl_kernels/canny.cl
@@ -229,16 +229,16 @@ __kernel void suppress_non_maximum(
#define hysteresis_local_stack_L2 16 // The size of level 2 stack, adjust this can impact the match rate with VX implementation
/** Check whether pixel is valid
-*
-* Skip the pixel if the early_test fails.
-* Otherwise, it tries to add the pixel coordinate to the stack, and proceed to popping the stack instead if the stack is full
-*
-* @param[in] early_test Boolean condition based on the minv check and visited buffer check
-* @param[in] x_pos X-coordinate of pixel that is going to be recorded, has to be within the boundary
-* @param[in] y_pos Y-coordinate of pixel that is going to be recorded, has to be within the boundary
-* @param[in] x_cur X-coordinate of current central pixel
-* @param[in] y_cur Y-coordinate of current central pixel
-*/
+ *
+ * Skip the pixel if the early_test fails.
+ * Otherwise, it tries to add the pixel coordinate to the stack, and proceed to popping the stack instead if the stack is full
+ *
+ * @param[in] early_test Boolean condition based on the minv check and visited buffer check
+ * @param[in] x_pos X-coordinate of pixel that is going to be recorded, has to be within the boundary
+ * @param[in] y_pos Y-coordinate of pixel that is going to be recorded, has to be within the boundary
+ * @param[in] x_cur X-coordinate of current central pixel
+ * @param[in] y_cur Y-coordinate of current central pixel
+ */
#define check_pixel(early_test, x_pos, y_pos, x_cur, y_cur) \
{ \
if(!early_test) \
diff --git a/src/core/CL/cl_kernels/depthwise_convolution.cl b/src/core/CL/cl_kernels/depthwise_convolution.cl
index 411e097dc8..89555a0cb6 100644
--- a/src/core/CL/cl_kernels/depthwise_convolution.cl
+++ b/src/core/CL/cl_kernels/depthwise_convolution.cl
@@ -145,36 +145,36 @@ inline float2 convolution3x3(
}
/** This function computes the horizontal integral of the image.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: U8
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: F16/F32
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F16/F32
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F16/F32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: U8
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: F16/F32
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: F16/F32
+ * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the biases vector
+ * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F16/F32
+ * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
+ * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
+ */
__kernel void depthwise_convolution_3x3(
TENSOR3D_DECLARATION(src),
diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
index 19a509bd0a..0cd4e7148e 100644
--- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
+++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
@@ -176,41 +176,41 @@ inline uchar2 convolution3x3(
}
/** This function computes the horizontal integral of the image.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: QASYMM8
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: QASYMM8
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- * @param[in] input_offset Quantized offset of zero point of the input tensor data range
- * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range
- * @param[in] output_offset Quantized offset of zero point of the output tensor data range
- * @param[in] output_multiplier Output scale multiplier
- * @param[in] output_shift Output scale divisor exponent
- */
+ *
+ * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8
+ * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: QASYMM8
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8
+ * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: QASYMM8
+ * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
+ * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
+ * @param[in] input_offset Quantized offset of zero point of the input tensor data range
+ * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range
+ * @param[in] output_offset Quantized offset of zero point of the output tensor data range
+ * @param[in] output_multiplier Output scale multiplier
+ * @param[in] output_shift Output scale divisor exponent
+ */
__kernel void depthwise_convolution_3x3_quantized(
TENSOR3D_DECLARATION(src),
diff --git a/src/core/CL/cl_kernels/fixed_point.h b/src/core/CL/cl_kernels/fixed_point.h
index b329118f14..d55346b532 100644
--- a/src/core/CL/cl_kernels/fixed_point.h
+++ b/src/core/CL/cl_kernels/fixed_point.h
@@ -103,11 +103,11 @@ TYPE_ALIAS(int, qs32)
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
/** Computes saturating absolute value of fixed point vector.
- *
- * @param[in] type the actual data type.
- *
- * @return The result of the fixed point absolute value.
- */
+ *
+ * @param[in] type the actual data type.
+ *
+ * @return The result of the fixed point absolute value.
+ */
#define ABSQ_SAT_IMPL(type) \
inline type abs_##type##_sat(type VopA) \
{ \
@@ -121,11 +121,11 @@ ABSQ_SAT_IMPL(qs16x8)
#define ABS_SAT_OP_EXPAND(a, type, size) ABS_SAT_OP_EXPAND_STR(a, type, size)
/** Computes max of fixed point types.
- *
- * @param[in] type the actual data type.
- *
- * @return The result of the fixed point maximum.
- */
+ *
+ * @param[in] type the actual data type.
+ *
+ * @return The result of the fixed point maximum.
+ */
#define MAXQ_IMPL(type) \
inline type max_##type(type VopA, type VopB) \
{ \
@@ -147,11 +147,11 @@ MAXQ_IMPL(qs16x16)
#define MAX_OP_EXPAND(a, b, type, size) MAX_OP_EXPAND_STR(a, b, type, size)
/** Computes saturated addition of fixed point types.
- *
- * @param[in] type the actual data type.
- *
- * @return The result of the fixed point addition. The result is saturated in case of overflow
- */
+ *
+ * @param[in] type the actual data type.
+ *
+ * @return The result of the fixed point addition. The result is saturated in case of overflow
+ */
#define ADDQ_SAT_IMPL(type) \
inline type add_sat_##type(type VopA, type VopB) \
{ \
@@ -178,11 +178,11 @@ ADDQ_SAT_IMPL(qs32x16)
#define ADD_SAT_OP_EXPAND(a, b, type, size) ADD_SAT_OP_EXPAND_STR(a, b, type, size)
/** Computes saturated subtraction of fixed point types.
- *
- * @param[in] type the actual data type.
- *
- * @return The result of the fixed point subtraction. The result is saturated in case of overflow
- */
+ *
+ * @param[in] type the actual data type.
+ *
+ * @return The result of the fixed point subtraction. The result is saturated in case of overflow
+ */
#define SUBQ_SAT_IMPL(type) \
inline type sub_sat_##type(type VopA, type VopB) \
{ \
@@ -258,12 +258,12 @@ MULQ_SAT_IMPL(qs16x16, qs32x16)
#define MUL_SAT_OP_EXPAND(a, b, type, size, position) MUL_SAT_OP_EXPAND_STR(a, b, type, size, position)
/** Saturate multiply-accumulate
- *
- * @param[in] type the actual data type.
- * @param[in] itype the intermediate data type.
- *
- * @return The result of the fixed point multiply-accumulate. The result is saturated in case of overflow
- */
+ *
+ * @param[in] type the actual data type.
+ * @param[in] itype the intermediate data type.
+ *
+ * @return The result of the fixed point multiply-accumulate. The result is saturated in case of overflow
+ */
#define MLAQ_SAT_IMPL(type, itype) \
type mla_sat_##type(type VopA, type VopB, type VopC, int fixed_point_position) \
{ \
@@ -279,12 +279,12 @@ MLAQ_SAT_IMPL(qs16x8, qs32x8)
#define MLA_SAT_OP_EXPAND(a, b, c, type, size, position) MLA_SAT_OP_EXPAND_STR(a, b, c, type, size, position)
/** Saturate multiply-accumulate long
- *
- * @param[in] type the actual data type.
- * @param[in] itype the intermediate data type.
- *
- * @return The result of the fixed point multiply-accumulate long. The result is saturated in case of overflow
- */
+ *
+ * @param[in] type the actual data type.
+ * @param[in] itype the intermediate data type.
+ *
+ * @return The result of the fixed point multiply-accumulate long. The result is saturated in case of overflow
+ */
#define MLALQ_SAT_IMPL(type, itype) \
itype mlal_sat_##type(itype VopA, type VopB, type VopC, int fixed_point_position) \
{ \
@@ -299,13 +299,13 @@ MLALQ_SAT_IMPL(qs16x8, qs32x8)
#define MLAL_SAT_OP_EXPAND(a, b, c, type, size, position) MLAL_SAT_OP_EXPAND_STR(a, b, c, type, size, position)
/** Saturate division of two fixed point vectors
- *
- * @param[in] stype the actual scalar data type.
- * @param[in] type the actual data type.
- * @param[in] itype the intermediate data type.
- *
- * @return The result of the fixed point division. The result is saturated in case of overflow
- */
+ *
+ * @param[in] stype the actual scalar data type.
+ * @param[in] type the actual data type.
+ * @param[in] itype the intermediate data type.
+ *
+ * @return The result of the fixed point division. The result is saturated in case of overflow
+ */
#define DIVQ_SAT_IMPL(stype, type, itype) \
inline type div_sat_##type(type VopA, type VopB, int fixed_point_position) \
{ \
@@ -329,15 +329,15 @@ DIVQ_SAT_IMPL(qs16, qs16, qs32)
#define DIV_SAT_OP_VEC_EXPAND(a, b, type, size, position) DIV_SAT_OP_VEC_EXPAND_STR(a, b, type, size, position)
/** Saturate exponential of a fixed point vector
- *
- * @note Implemented approach uses taylor polynomial to approximate the exponential function.
- *
- * @param[in] stype the actual scalar data type.
- * @param[in] type the actual data type.
- * @param[in] size the number of the calculated elements.
- *
- * @return The result of the fixed point exponential. The result is saturated in case of overflow
- */
+ *
+ * @note Implemented approach uses taylor polynomial to approximate the exponential function.
+ *
+ * @param[in] stype the actual scalar data type.
+ * @param[in] type the actual data type.
+ * @param[in] size the number of the calculated elements.
+ *
+ * @return The result of the fixed point exponential. The result is saturated in case of overflow
+ */
#define EXPQ_IMPL(stype, type, size) \
inline type exp_sat_##type(type VopA, int fixed_point_position) \
{ \
@@ -372,15 +372,15 @@ EXPQ_IMPL(qs16, qs16x16, 16)
#define EXP_OP_EXPAND(a, type, size, position) EXP_OP_EXPAND_STR(a, type, size, position)
/** Saturate logarithm of a fixed point vector
- *
- * @note Implemented approach uses taylor polynomial to approximate the logarithm function.
- *
- * @param[in] stype the actual scalar data type.
- * @param[in] type the actual data type.
- * @param[in] size the number of the calculated elements.
- *
- * @return The result of the fixed point logarithm. The result is saturated in case of overflow
- */
+ *
+ * @note Implemented approach uses taylor polynomial to approximate the logarithm function.
+ *
+ * @param[in] stype the actual scalar data type.
+ * @param[in] type the actual data type.
+ * @param[in] size the number of the calculated elements.
+ *
+ * @return The result of the fixed point logarithm. The result is saturated in case of overflow
+ */
#define LOGQ_IMPL(stype, type, size) \
inline type log_sat_##type(type VopA, int fixed_point_position) \
{ \
@@ -410,15 +410,15 @@ LOGQ_IMPL(qs16, qs16x16, 16)
#define LOG_OP_EXPAND(a, type, size, position) LOG_OP_EXPAND_STR(a, type, size, position)
/** Saturate inverse square root of a fixed point vector
- *
- * @note Implemented approach uses Newton's method to approximate the inverse square root function.
- *
- * @param[in] stype the actual scalar data type.
- * @param[in] type the actual data type.
- * @param[in] size the number of the calculated elements.
- *
- * @return The result of the fixed point inverse square root. The result is saturated in case of overflow
- */
+ *
+ * @note Implemented approach uses Newton's method to approximate the inverse square root function.
+ *
+ * @param[in] stype the actual scalar data type.
+ * @param[in] type the actual data type.
+ * @param[in] size the number of the calculated elements.
+ *
+ * @return The result of the fixed point inverse square root. The result is saturated in case of overflow
+ */
#define INVSQRTQ_IMPL(stype, type, size) \
inline type invsqrt_sat_##type(type VopA, int fixed_point_position) \
{ \
@@ -447,15 +447,15 @@ INVSQRTQ_IMPL(qs16, qs16x8, 8)
#define INVSQRT_OP_EXPAND(a, type, size, position) INVSQRT_OP_EXPAND_STR(a, type, size, position)
/** Saturate hyperbolic tangent of a fixed point vector
- *
- * tanh(x) = (e^2x - 1)/(e^2x + 1)
- *
- * @param[in] stype the actual scalar data type.
- * @param[in] type the actual data type.
- * @param[in] size the number of the calculated elements.
- *
- * @return The result of the fixed point hyperbolic tangent. The result is saturated in case of overflow
- */
+ *
+ * tanh(x) = (e^2x - 1)/(e^2x + 1)
+ *
+ * @param[in] stype the actual scalar data type.
+ * @param[in] type the actual data type.
+ * @param[in] size the number of the calculated elements.
+ *
+ * @return The result of the fixed point hyperbolic tangent. The result is saturated in case of overflow
+ */
#define TANHQ_IMPL(stype, type, size) \
inline type tanh_sat_##type(type VopA, int fixed_point_position) \
{ \
diff --git a/src/core/CL/cl_kernels/warp_helpers.h b/src/core/CL/cl_kernels/warp_helpers.h
index 76f0a4a104..9afec7d081 100644
--- a/src/core/CL/cl_kernels/warp_helpers.h
+++ b/src/core/CL/cl_kernels/warp_helpers.h
@@ -78,7 +78,7 @@ inline const float2 get_current_coords()
* @param[in] coord Input coordinates
*
* @return vector of 8 floats with the coordinates, even positions are x and odd y.
-*/
+ */
inline const float8 get_neighbour_coords(const float2 coord)
{
return (float8)(/*tl*/ coord.s0, coord.s1, /*tr*/ coord.s0 + 1, coord.s1, /*bl*/ coord.s0, coord.s1 + 1, /*br*/ coord.s0 + 1, coord.s1 + 1);
@@ -91,7 +91,7 @@ inline const float8 get_neighbour_coords(const float2 coord)
* @param[in] width Width of the image
* @param[in] height Height of the image
* @param[in] border_size Border size
-*/
+ */
inline const VEC_DATA_TYPE(DATA_TYPE, 4) bilinear_interpolate_with_border(const Image *in, const float8 coords, const float width, const float height, const float border_size)
{
// If any of the 4 texels is out of the image's boundaries we use the border value (REPLICATE or CONSTANT) for any texel out of the image.
@@ -133,7 +133,7 @@ inline const VEC_DATA_TYPE(DATA_TYPE, 4) bilinear_interpolate_with_border(const
* @param[in] coords Vector of four 2D coordinates. Even pos is x and odd y.
* @param[in] width Width of the image
* @param[in] height Height of the image
-*/
+ */
inline const VEC_DATA_TYPE(DATA_TYPE, 4) bilinear_interpolate(const Image *in, const float8 coords, const float width, const float height)
{
return bilinear_interpolate_with_border(in, coords, width, height, 1);
diff --git a/src/core/CL/cl_kernels/warp_perspective.cl b/src/core/CL/cl_kernels/warp_perspective.cl
index d955e427c4..6ffb7e4a00 100644
--- a/src/core/CL/cl_kernels/warp_perspective.cl
+++ b/src/core/CL/cl_kernels/warp_perspective.cl
@@ -62,7 +62,7 @@ inline const float8 apply_perspective_transform(const float2 coord, const float1
* x0 = M[1][1] * x + M[1][2] * y + M[1][3]
* y0 = M[2][1] * x + M[2][2] * y + M[2][3]
* z0 = M[3][1] * x + M[3][2] * y + M[3][3]
-
+ *
* output(x,y) = input(x0/z0,y0/z0)
*
* @attention The matrix coefficients need to be passed at compile time:\n
diff --git a/tests/IArrayAccessor.h b/tests/IArrayAccessor.h
index f128d8f67e..8c73046cd9 100644
--- a/tests/IArrayAccessor.h
+++ b/tests/IArrayAccessor.h
@@ -51,7 +51,7 @@ public:
/** Resize array.
*
- * @param[in] num The new array size in number of elements
+ * @param[in] num The new array size in number of elements
*/
virtual void resize(size_t num) = 0;
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index b5e156c5f7..ab7dbd8a41 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -207,7 +207,7 @@ std::pair<T, T> get_batchnormalization_layer_test_bounds(int fixed_point_positio
* @param[in] src Quantized tensor.
*
* @return Float tensor.
-*/
+ */
SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint8_t> &src);
/** Convert float simple tensor into quantized using specified quantization information.
@@ -216,7 +216,7 @@ SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint8_t> &src);
* @param[in] quantization_info Quantification information.
*
* @return Quantized tensor.
-*/
+ */
SimpleTensor<uint8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info);
} // namespace validation
} // namespace test