diff options
Diffstat (limited to 'arm_compute/core/NEON')
7 files changed, 53 insertions, 53 deletions
diff --git a/arm_compute/core/NEON/NEFixedPoint.h b/arm_compute/core/NEON/NEFixedPoint.h index f8579e08b5..5719b63618 100644 --- a/arm_compute/core/NEON/NEFixedPoint.h +++ b/arm_compute/core/NEON/NEFixedPoint.h @@ -179,19 +179,19 @@ void vst1_qs16(qint16_t *addr, qint16x4_t b); void vst1q_qs8(qint8_t *addr, qint8x16_t b); /** Store a single 16 bit fixed point vector to memory (8 elements) -* -* @param[in] addr Memory address where the 16 bit fixed point vector should be stored -* @param[in] b 16 bit fixed point vector to store -* -*/ + * + * @param[in] addr Memory address where the 16 bit fixed point vector should be stored + * @param[in] b 16 bit fixed point vector to store + * + */ void vst1q_qs16(qint16_t *addr, qint16x8_t b); /** Store two 16 bit fixed point vector to memory (8x2 elements) -* -* @param[in] addr Memory address where the 16 bit fixed point vectors should be stored -* @param[in] b 16 bit fixed point vectors to store -* -*/ + * + * @param[in] addr Memory address where the 16 bit fixed point vectors should be stored + * @param[in] b 16 bit fixed point vectors to store + * + */ void vst2q_qs16(qint16_t *addr, qint16x8x2_t b); /** 16 bit fixed point vector saturating narrow (8 elements) diff --git a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h index a3fd3feb58..ef51cbe841 100644 --- a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h @@ -72,27 +72,27 @@ private: using ActivationFunctionExecutorPtr = void (NEActivationLayerKernel::*)(const Window &window); /** Function to apply an activation function on a tensor. * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ template <ActivationLayerInfo::ActivationFunction F, typename T> typename std::enable_if<std::is_same<T, float>::value, void>::type activation(const Window &window); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC /** Function to apply an activation function on a tensor. * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ template <ActivationLayerInfo::ActivationFunction F, typename T> typename std::enable_if<std::is_same<T, float16_t>::value, void>::type activation(const Window &window); #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ /** Function to apply an activation function on a tensor. * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ template <ActivationLayerInfo::ActivationFunction F, typename T> typename std::enable_if<std::is_same<T, qint8_t>::value, void>::type activation(const Window &window); /** Function to apply an activation function on a tensor. * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ template <ActivationLayerInfo::ActivationFunction F, typename T> typename std::enable_if<std::is_same<T, qint16_t>::value, void>::type activation(const Window &window); diff --git a/arm_compute/core/NEON/kernels/NEDerivativeKernel.h b/arm_compute/core/NEON/kernels/NEDerivativeKernel.h index 7613b586d0..5d46516f68 100644 --- a/arm_compute/core/NEON/kernels/NEDerivativeKernel.h +++ b/arm_compute/core/NEON/kernels/NEDerivativeKernel.h @@ -64,17 +64,17 @@ public: private: /** Function to perform derivative along the X direction on the given window * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ void derivative_x(const Window &window); /** Function to perform derivative along the Y direction on the given window * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ void derivative_y(const Window &window); /** Function to perform derivative along the X and Y direction on the given window * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ void derivative_xy(const Window &window); /** Common signature for all the specialised derivative functions diff --git a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h index 654dee21af..7684350c0f 100644 --- a/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h +++ b/arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h @@ -58,27 +58,27 @@ public: NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &operator=(NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &&) = default; /** Initialise the kernel's input and output. * - * @param[in] input Input tensor. Data type supported: S32 - * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. - * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. - * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8 - * @param[in] result_offset Offset to be added to each element of the input matrix - * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add - * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8 - * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 - * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, - * Along with @p min, this value can be used to implement "rectified linear unit" activation functions + * @param[in] input Input tensor. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. + * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8 + * @param[in] result_offset Offset to be added to each element of the input matrix + * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add + * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8 + * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 + * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, + * Along with @p min, this value can be used to implement "rectified linear unit" activation functions */ void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0); /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel * - * @param[in] input Input tensor. Data type supported: S32 - * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. - * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. - * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8 - * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 - * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, - * Along with @p min, this value can be used to implement "rectified linear unit" activation functions + * @param[in] input Input tensor. Data type supported: S32 + * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required. + * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input. + * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8 + * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8 + * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8, + * Along with @p min, this value can be used to implement "rectified linear unit" activation functions */ static Error validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0); diff --git a/arm_compute/core/NEON/kernels/NEHistogramKernel.h b/arm_compute/core/NEON/kernels/NEHistogramKernel.h index 0fa911dbf0..672472e082 100644 --- a/arm_compute/core/NEON/kernels/NEHistogramKernel.h +++ b/arm_compute/core/NEON/kernels/NEHistogramKernel.h @@ -82,28 +82,28 @@ public: private: /** Function to merge multiple partial histograms. * - * @param[out] global_hist Pointer to the final histogram. - * @param[in] local_hist Pointer to the partial histograms. - * @param[in] bins Number of bins. + * @param[out] global_hist Pointer to the final histogram. + * @param[in] local_hist Pointer to the partial histograms. + * @param[in] bins Number of bins. */ void merge_histogram(uint32_t *global_hist, const uint32_t *local_hist, size_t bins); /** Function to merge multiple minimum values of partial histograms. * - * @param[out] global_min Pointer to the global min value. - * @param[in] local_min Local min value. + * @param[out] global_min Pointer to the global min value. + * @param[in] local_min Local min value. */ void merge_min(uint8_t *global_min, const uint8_t &local_min); /** Function to perform histogram on the given window - * - * @param[in] win Region on which to execute the kernel - * @param[in] info Info about the executing thread + * + * @param[in] win Region on which to execute the kernel + * @param[in] info Info about the executing thread */ void histogram_U8(Window win, const ThreadInfo &info); /** Function to perform histogram on the given window where histogram is * of fixed size 256 without ranges and offsets. * - * @param[in] win Region on which to execute the kernel - * @param[in] info Info about the executing thread + * @param[in] win Region on which to execute the kernel + * @param[in] info Info about the executing thread */ void histogram_fixed_U8(Window win, const ThreadInfo &info); /** Pre-calculate the pixel windowing for every possible pixel diff --git a/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h b/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h index 46b2a8ddb4..76c6163607 100644 --- a/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h +++ b/arm_compute/core/NEON/kernels/NEMagnitudePhaseKernel.h @@ -66,17 +66,17 @@ public: private: /** Function to perform magnitude on the given window * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ void magnitude(const Window &window); /** Function to perform phase on the given window * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ void phase(const Window &window); /** Function to perform magnitude and phase on the given window * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ void magnitude_phase(const Window &window); @@ -130,17 +130,17 @@ public: private: /** Function to perform magnitude on the given window * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ void magnitude(const Window &window); /** Function to perform phase on the given window * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ void phase(const Window &window); /** Function to perform magnitude and phase on the given window * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ void magnitude_phase(const Window &window); diff --git a/arm_compute/core/NEON/kernels/NEWarpKernel.h b/arm_compute/core/NEON/kernels/NEWarpKernel.h index 3a1cab1585..d7cb82f27e 100644 --- a/arm_compute/core/NEON/kernels/NEWarpKernel.h +++ b/arm_compute/core/NEON/kernels/NEWarpKernel.h @@ -66,17 +66,17 @@ public: protected: /** function to perform warp affine or warp perspective on the given window when border mode == UNDEFINED * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ virtual void warp_undefined(const Window &window) = 0; /** function to perform warp affine or warp perspective on the given window when border mode == CONSTANT * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ virtual void warp_constant(const Window &window) = 0; /** function to perform warp affine or warp perspective on the given window when border mode == REPLICATE * - * @param[in] window Region on which to execute the kernel + * @param[in] window Region on which to execute the kernel */ virtual void warp_replicate(const Window &window) = 0; /** Common signature for all the specialised warp functions |