aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2017-11-23 18:02:04 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:41:04 +0000
commitf202e50a8b89f143f74c393e33e0154817bd3c1d (patch)
treee9653958a6e343c1d7610840b66b9391d3fcf75b /arm_compute/runtime
parentdbfb31cdee063ec61e0ab1087f99f235c12d2e7e (diff)
downloadComputeLibrary-f202e50a8b89f143f74c393e33e0154817bd3c1d.tar.gz
COMPMID-556 Improved indentation and error handling in format_doxygen.py
Change-Id: I6f51ffe6c324d9da500716b52c97c344f2a2a164 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110486 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'arm_compute/runtime')
-rw-r--r--arm_compute/runtime/CL/CLMultiImage.h20
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMMLowp.h40
-rw-r--r--arm_compute/runtime/CL/functions/CLIntegralImage.h8
-rw-r--r--arm_compute/runtime/MultiImage.h28
-rw-r--r--arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h24
-rw-r--r--arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h32
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h24
-rw-r--r--arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h42
-rw-r--r--arm_compute/runtime/NEON/functions/NEIntegralImage.h8
10 files changed, 118 insertions, 118 deletions
diff --git a/arm_compute/runtime/CL/CLMultiImage.h b/arm_compute/runtime/CL/CLMultiImage.h
index f70929db07..2c2b4709b4 100644
--- a/arm_compute/runtime/CL/CLMultiImage.h
+++ b/arm_compute/runtime/CL/CLMultiImage.h
@@ -44,18 +44,18 @@ public:
CLMultiImage();
/** Init the multi-planar image
*
- * @param[in] width Width of the whole image
- * @param[in] height Heigth of the whole image
- * @param[in] format Format of the whole image
+ * @param[in] width Width of the whole image
+ * @param[in] height Heigth of the whole image
+ * @param[in] format Format of the whole image
*/
void init(unsigned int width, unsigned int height, Format format);
/** Init the multi-planar image
*
* @note Uses conservative padding strategy which fits all kernels.
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
*/
void init_auto_padding(unsigned int width, unsigned int height, Format format);
/** Allocated a previously initialised multi image
@@ -73,10 +73,10 @@ public:
private:
/** Init the multi-planar image
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
- * @param[in] auto_padding Specifies whether the image uses auto padding
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
+ * @param[in] auto_padding Specifies whether the image uses auto padding
*/
void internal_init(unsigned int width, unsigned int height, Format format, bool auto_padding);
diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowp.h b/arm_compute/runtime/CL/functions/CLGEMMLowp.h
index 613fcaa7e0..ffd997f6ec 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMLowp.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMLowp.h
@@ -52,26 +52,26 @@ public:
/** Constructor */
CLGEMMLowp(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the kernel's inputs, output
- *
- * @note GEMM_LOWP: low precision matrix multiply kernel
- * This kernel performs the following computation:
- *
- * -# Convert a values from uint8 to int32 and add a_offset to each of them.
- * -# Convert b values from uint8 to int32 and add b_offset to each of them.
- * -# Compute the int32 matrix product of the resulting a * b.
- * -# Add output_offset to each entry of the result.
- * -# Multiply each entry of the result and round to the nearest integer
- * -# Clamp the resulting int32 values to the [0..255] range and cast to uint8.
- *
- * @param[in] a First input tensor (Matrix A). Data types supported: U8.
- * @param[in] b Second input tensor (Matrix B). Data types supported: same as @p a.
- * @param[out] output Output tensor. Data types supported: same as @p a.
- * @param[in] a_offset Offset to be added to each element of the matrix A.
- * @param[in] b_offset Offset to be added to each element of the matrix B.
- * @param[in] output_offset Offset to be added to each element of the output matrix
- * @param[in] output_mult_int Multiplied with each element of the output matrix
- * @param[in] shift Number of bits to shift right the result.
- */
+ *
+ * @note GEMM_LOWP: low precision matrix multiply kernel
+ * This kernel performs the following computation:
+ *
+ * -# Convert a values from uint8 to int32 and add a_offset to each of them.
+ * -# Convert b values from uint8 to int32 and add b_offset to each of them.
+ * -# Compute the int32 matrix product of the resulting a * b.
+ * -# Add output_offset to each entry of the result.
+ * -# Multiply each entry of the result and round to the nearest integer
+ * -# Clamp the resulting int32 values to the [0..255] range and cast to uint8.
+ *
+ * @param[in] a First input tensor (Matrix A). Data types supported: U8.
+ * @param[in] b Second input tensor (Matrix B). Data types supported: same as @p a.
+ * @param[out] output Output tensor. Data types supported: same as @p a.
+ * @param[in] a_offset Offset to be added to each element of the matrix A.
+ * @param[in] b_offset Offset to be added to each element of the matrix B.
+ * @param[in] output_offset Offset to be added to each element of the output matrix
+ * @param[in] output_mult_int Multiplied with each element of the output matrix
+ * @param[in] shift Number of bits to shift right the result.
+ */
void configure(const ICLTensor *a, const ICLTensor *b, ICLTensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/CL/functions/CLIntegralImage.h b/arm_compute/runtime/CL/functions/CLIntegralImage.h
index 25fc549b29..71f6897d1b 100644
--- a/arm_compute/runtime/CL/functions/CLIntegralImage.h
+++ b/arm_compute/runtime/CL/functions/CLIntegralImage.h
@@ -43,10 +43,10 @@ public:
/** Default Constructor. */
CLIntegralImage();
/** Initialise the function's source, destinations and border mode.
- *
- * @param[in] input Source tensor. Data types supported: U8.
- * @param[out] output Destination tensor, Data types supported: U32.
- */
+ *
+ * @param[in] input Source tensor. Data types supported: U8.
+ * @param[out] output Destination tensor, Data types supported: U32.
+ */
void configure(const ICLTensor *input, ICLTensor *output);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/MultiImage.h b/arm_compute/runtime/MultiImage.h
index 917e586ef8..30fa9b0256 100644
--- a/arm_compute/runtime/MultiImage.h
+++ b/arm_compute/runtime/MultiImage.h
@@ -45,18 +45,18 @@ public:
MultiImage();
/** Allocate the multi-planar image
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
*/
void init(unsigned int width, unsigned int height, Format format);
/** Allocate the multi-planar image
*
* @note Uses conservative padding strategy which fits all kernels.
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
*/
void init_auto_padding(unsigned int width, unsigned int height, Format format);
/** Allocated a previously initialised multi image
@@ -67,10 +67,10 @@ public:
void allocate();
/** Create a subimage from an existing MultiImage.
*
- * @param[in] image Image to use backing memory from
- * @param[in] coords Starting coordinates of the new image. Should be within the parent image sizes
- * @param[in] width The width of the subimage
- * @param[in] height The height of the subimage
+ * @param[in] image Image to use backing memory from
+ * @param[in] coords Starting coordinates of the new image. Should be within the parent image sizes
+ * @param[in] width The width of the subimage
+ * @param[in] height The height of the subimage
*/
void create_subimage(MultiImage *image, const Coordinates &coords, unsigned int width, unsigned int height);
@@ -82,10 +82,10 @@ public:
private:
/** Init the multi-planar image
*
- * @param[in] width Width of the whole image
- * @param[in] height Height of the whole image
- * @param[in] format Format of the whole image
- * @param[in] auto_padding Specifies whether the image uses auto padding
+ * @param[in] width Width of the whole image
+ * @param[in] height Height of the whole image
+ * @param[in] format Format of the whole image
+ * @param[in] auto_padding Specifies whether the image uses auto padding
*/
void internal_init(unsigned int width, unsigned int height, Format format, bool auto_padding);
diff --git a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
index 3433e77ba1..8757bc63aa 100644
--- a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
@@ -68,18 +68,18 @@ public:
/** Constructor */
NEDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input, weights, biases and output tensors.
- *
- * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
- * @param[in] weights The 4d weights with dimensions [width, height, OFM, IFM]. Data type supported: Same as @p input.
- * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Same as @p input.
- * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
- * @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
- * @param[in] ax The number of zeros added to right edge of the input.
- * @param[in] ay The number of zeros added to top edge of the input.
- * @param[in] upscalex How much to scale the X axis.
- * @param[in] upscaley How much to scale the Y axis.
- *
- */
+ *
+ * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: F32.
+ * @param[in] weights The 4d weights with dimensions [width, height, OFM, IFM]. Data type supported: Same as @p input.
+ * @param[in] bias Optional, ignored if NULL. The biases have one dimension. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. The output has the same number of dimensions as the @p input.
+ * @param[in] info Contains padding and policies to be used in the deconvolution, this is decribed in @ref PadStrideInfo.
+ * @param[in] ax The number of zeros added to right edge of the input.
+ * @param[in] ay The number of zeros added to top edge of the input.
+ * @param[in] upscalex How much to scale the X axis.
+ * @param[in] upscaley How much to scale the Y axis.
+ *
+ */
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info,
unsigned int ax, unsigned int ay, float upscalex, float upscaley);
diff --git a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
index daaf18f297..c731bf278f 100644
--- a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
@@ -51,22 +51,22 @@ public:
/** Constructor */
NEDirectConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input, weights, biases and output tensors.
- *
- * @note: DirectConvolution only works in the following configurations:
- * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
- * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
- * 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
- *
- * @param[in, out] input Input tensor. Data types supported: QS8/QS16/F16/F32.
- * @param[in] weights Set of kernels to convolve the input volume.
- * Supported sizes: 1x1, 3x3 and 5x5.
- * The 3rd dimension must be the same as the input's volume 3rd dimension.
- * Data type supported: Same as @p input.
- * @param[in] bias Set of biases. Data type supported: Same as @p input.
- * @param[out] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
- * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
- */
+ *
+ * @note: DirectConvolution only works in the following configurations:
+ * 1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
+ * 3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
+ * 5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
+ *
+ * @param[in, out] input Input tensor. Data types supported: QS8/QS16/F16/F32.
+ * @param[in] weights Set of kernels to convolve the input volume.
+ * Supported sizes: 1x1, 3x3 and 5x5.
+ * The 3rd dimension must be the same as the input's volume 3rd dimension.
+ * Data type supported: Same as @p input.
+ * @param[in] bias Set of biases. Data type supported: Same as @p input.
+ * @param[out] output Output tensor.
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: Same as @p input.
+ * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
+ */
void configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &conv_info);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
index 3b6aa1c7db..3d213a7668 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h
@@ -46,11 +46,11 @@ public:
/** Constructor */
NEGEMMLowpAssemblyMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the kernel's inputs, output
- *
- * @param[in] a First input tensor (Matrix A). Data type supported: U8, S8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
- */
+ *
+ * @param[in] a First input tensor (Matrix A). Data type supported: U8, S8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ */
void configure(const ITensor *a, const ITensor *b, ITensor *output);
// Inherited methods overridden:
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
index 598756e435..889bbca7f2 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h
@@ -58,18 +58,18 @@ public:
/** Constructor */
NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the kernel's inputs, output
- *
- * @note GEMM_LOWP: low precision GEMM kernel
- * This kernel performs the following computations:
- *
- * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
- * -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
- * -# Compute the matrix product of the resulting a * b in int32.
- *
- * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
- * @param[out] output Output tensor. Data type supported: Data type supported: S32
- */
+ *
+ * @note GEMM_LOWP: low precision GEMM kernel
+ * This kernel performs the following computations:
+ *
+ * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
+ * -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
+ * -# Compute the matrix product of the resulting a * b in int32.
+ *
+ * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8.
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
+ * @param[out] output Output tensor. Data type supported: Data type supported: S32
+ */
void configure(const ITensor *a, const ITensor *b, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixMultiplyCore
*
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
index 9270d5581f..533a41c888 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h
@@ -60,29 +60,29 @@ class NEGEMMLowpQuantizeDownInt32ToUint8Scale : public INESimpleFunction
{
public:
/** Initialise the kernel's inputs, output
- *
- * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
- * @param[in] result_offset Offset to be added to each element of the input matrix
- * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
- * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
- * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
- * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
- * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
- */
+ *
+ * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
+ * @param[in] result_offset Offset to be added to each element of the input matrix
+ * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
+ * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ */
void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0);
/** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8Scale
- *
- * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
- * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
- * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
- * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
- * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
- * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
- * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
- */
+ *
+ * @param[in] input Input tensor. It is the output of @ref NEGEMMLowpMatrixMultiplyCore function. Data type supported: S32
+ * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
+ * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
+ * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
+ * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
+ * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
+ * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
+ */
static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
};
}
diff --git a/arm_compute/runtime/NEON/functions/NEIntegralImage.h b/arm_compute/runtime/NEON/functions/NEIntegralImage.h
index 6d7dd697e8..1ac501c994 100644
--- a/arm_compute/runtime/NEON/functions/NEIntegralImage.h
+++ b/arm_compute/runtime/NEON/functions/NEIntegralImage.h
@@ -35,10 +35,10 @@ class NEIntegralImage : public INESimpleFunction
{
public:
/** Initialise the function's source, destinations and border mode.
- *
- * @param[in] input Source tensor. Data type supported: U8.
- * @param[out] output Destination tensor. Data type supported: U32.
- */
+ *
+ * @param[in] input Source tensor. Data type supported: U8.
+ * @param[out] output Destination tensor. Data type supported: U32.
+ */
void configure(const ITensor *input, ITensor *output);
};
}