aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/NEON/functions
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2017-11-23 18:02:04 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:41:04 +0000
commitf202e50a8b89f143f74c393e33e0154817bd3c1d (patch)
treee9653958a6e343c1d7610840b66b9391d3fcf75b /arm_compute/runtime/NEON/functions
parentdbfb31cdee063ec61e0ab1087f99f235c12d2e7e (diff)
downloadComputeLibrary-f202e50a8b89f143f74c393e33e0154817bd3c1d.tar.gz
COMPMID-556 Improved indentation and error handling in format_doxygen.py
Change-Id: I6f51ffe6c324d9da500716b52c97c344f2a2a164 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110486 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/runtime/NEON/functions')
-rw-r--r--arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h24
-rw-r--r--arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h32
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h24
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h42
-rw-r--r--arm_compute/runtime/NEON/functions/NEIntegralImage.h8
6 files changed, 70 insertions, 70 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
index 3433e77ba1..8757bc63aa 100644
--- a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
@@ -68,18 +68,18 @@ public:
/** Constructor */
NEDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input, weights, biases and output tensors.
- *
- * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
- * @param[in] weights The 4d weights with dimensions [width, height, OFM, IFM]. Data type supported: Same as @p input.
- * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Same as @p input.
- * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
- * @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
- * @param[in] ax The number of zeros added to right edge of the input.
- * @param[in] ay The number of zeros added to top edge of the input.
- * @param[in] upscalex How much to scale the X axis.
- * @param[in] upscaley How much to scale the Y axis.
- *
- */
+ *
+ * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
+ * @param[in] weights The 4d weights with dimensions [width, height, OFM, IFM]. Data type supported: Same as @p input.
+ * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
+ * @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
+ * @param[in] ax The number of zeros added to right edge of the input.
+ * @param[in] ay The number of zeros added to top edge of the input.
+ * @param[in] upscalex How much to scale the X axis.
+ * @param[in] upscaley How much to scale the Y axis.
+ *
+ */
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info,
unsigned int ax, unsigned int ay, float upscalex, float upscaley);
diff --git a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
index daaf18f297..c731bf278f 100644
--- a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
@@ -51,22 +51,22 @@ public:
/** Constructor */
NEDirectConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input, weights, biases and output tensors.
- *
- * @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
- * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
- * 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
- *
- * @param[in, out] input Input tensor. Data types supported: QS8/QS16/F16/F32.
- * @param[in] weights Set of kernels to convolve the input volume.
- * Supported sizes: 1x1, 3x3 and 5x5.
- * The 3rd dimension must be the same as the input's volume 3rd dimension.
- * Data type supported: Same as @p input.
- * @param[in] bias Set of biases. Data type supported: Same as @p input.
- * @param[out] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- */
+ *
+ * @note: DirectConvolution only works in the following configurations:
+ * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
+ * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
+ * 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
+ *
+ * @param[in, out] input Input tensor. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] weights Set of kernels to convolve the input volume.
+ * Supported sizes: 1x1, 3x3 and 5x5.
+ * The 3rd dimension must be the same as the input's volume 3rd dimension.
+ * Data type supported: Same as @p input.
+ * @param[in] bias Set of biases. Data type supported: Same as @p input.
+ * @param[out] output Output tensor.
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ */
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
index 3b6aa1c7db..3d213a7668 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
@@ -46,11 +46,11 @@ public:
/** Constructor */
NEGEMMLowpAssemblyMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the kernel's inputs, output
- *
- * @param[in] a First input tensor (Matrix A). Data type supported: U8, S8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
- */
+ *
+ * @param[in] a First input tensor (Matrix A). Data type supported: U8, S8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ */
void configure(const ITensor *a, const ITensor *b, ITensor *output);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index 598756e435..889bbca7f2 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -58,18 +58,18 @@ public:
/** Constructor */
NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the kernel's inputs, output
- *
- * @note GEMM_LOWP: low precision GEMM kernel
- * This kernel performs the following computations:
- *
- * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
- * -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
- * -# Compute the matrix product of the resulting a * b in int32.
- *
- * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
- */
+ *
+ * @note GEMM_LOWP: low precision GEMM kernel
+ * This kernel performs the following computations:
+ *
+ * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
+ * -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
+ * -# Compute the matrix product of the resulting a * b in int32.
+ *
+ * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ */
void configure(const ITensor *a, const ITensor *b, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixMultiplyCore
*
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
index 9270d5581f..533a41c888 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
@@ -60,29 +60,29 @@ class NEGEMMLowpQuantizeDownInt32ToUint8Scale : public INESimpleFunction
{
public:
/** Initialise the kernel's inputs, output
- *
- * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
- * @param[in] result_offset Offset to be added to each element of the input matrix
- * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
- * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
- * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
- * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
- * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
- */
+ *
+ * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
+ * @param[in] result_offset Offset to be added to each element of the input matrix
+ * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ */
void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0);
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8Scale
- *
- * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
- * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
- * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
- * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
- */
+ *
+ * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ */
static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
};
}
diff --git a/arm_compute/runtime/NEON/functions/NEIntegralImage.h b/arm_compute/runtime/NEON/functions/NEIntegralImage.h
index 6d7dd697e8..1ac501c994 100644
--- a/arm_compute/runtime/NEON/functions/NEIntegralImage.h
+++ b/arm_compute/runtime/NEON/functions/NEIntegralImage.h
@@ -35,10 +35,10 @@ class NEIntegralImage : public INESimpleFunction
{
public:
/** Initialise the function's source, destinations and border mode.
- *
- * @param[in] input Source tensor. Data type supported: U8.
- * @param[out] output Destination tensor. Data type supported: U32.
- */
+ *
+ * @param[in] input Source tensor. Data type supported: U8.
+ * @param[out] output Destination tensor. Data type supported: U32.
+ */
void configure(const ITensor *input, ITensor *output);
};
}