From f4cb81be294a1075ce3ce7d11dd60bdee5505ce9 Mon Sep 17 00:00:00 2001 From: Vidhya Sudhan Loganathan Date: Wed, 4 Jul 2018 15:13:14 +0100 Subject: COMPMID-970 : Remove QS8 / QS16 support Removed QS32 references Change-Id: Ic7df02c08ae7aa1b7dcae15bdda113321af851b8 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/138703 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h | 4 ++-- arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h | 2 +- .../core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arm_compute/core/NEON/kernels') diff --git a/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h b/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h index d5c9e3bbe9..1a276c353e 100644 --- a/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h +++ b/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h @@ -59,7 +59,7 @@ public: ~NEConvertFullyConnectedWeightsKernel() = default; /** Set the input and output tensor. * - * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32. + * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. * @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input. * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format. * @param[in] data_layout The data layout the weights have been trained in. @@ -67,7 +67,7 @@ public: void configure(const ITensor *input, ITensor *output, const TensorShape &original_input_shape, DataLayout data_layout); /** Static function to check if given info will lead to a valid configuration of @ref NEConvertFullyConnectedWeightsKernel * - * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32. + * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32. * @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input. * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format. * @param[in] data_layout The data layout the weights have been trained in. diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h index 589725ab01..e9349a3197 100644 --- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h @@ -74,7 +74,7 @@ public: * The 3rd dimension must be the same as the input's volume 3rd dimension. * Data type supported:Same as @p input. * @param[in] output Output tensor. - * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: QS32/F16/F32 + * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: F16/F32 * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. * * @return a status diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h index 7fd1d70374..9af3de5ffe 100644 --- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h +++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h @@ -55,7 +55,7 @@ public: /** Set the accumulate buffer and the biases of the kernel. * * @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place. - * Data type supported: QS32/F16/F32 + * Data type supported: F16/F32 * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input * @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr) * Data type supported: F16/F32 @@ -68,7 +68,7 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayerOutputStageKernel * * @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place. - * Data type supported: QS32/F16/F32 + * Data type supported: F16/F32 * @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input * @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr) * Data type supported: F16/F32 -- cgit v1.2.1