aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-04 15:13:14 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commitf4cb81be294a1075ce3ce7d11dd60bdee5505ce9 (patch)
tree33e903042fab84a7f469855078131bd3fd01df4e
parent060c4b784b1e66f937369e420fa97c7fb71d5fd4 (diff)
downloadComputeLibrary-f4cb81be294a1075ce3ce7d11dd60bdee5505ce9.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed QS32 references Change-Id: Ic7df02c08ae7aa1b7dcae15bdda113321af851b8 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/138703 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h4
-rw-r--r--arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h4
-rw-r--r--arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h2
-rw-r--r--arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h4
-rw-r--r--arm_compute/core/Types.h1
-rw-r--r--arm_compute/core/Utils.h3
-rw-r--r--arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h4
-rw-r--r--src/core/CL/CLHelpers.cpp10
-rw-r--r--src/core/CL/cl_kernels/convert_fc_weights.cl2
-rw-r--r--src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp4
14 files changed, 19 insertions, 31 deletions
diff --git a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
index f5e2f0de89..b85f93e992 100644
--- a/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
+++ b/arm_compute/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.h
@@ -55,7 +55,7 @@ public:
~CLConvertFullyConnectedWeightsKernel() = default;
/** Set the input and output tensor.
*
- * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
@@ -63,7 +63,7 @@ public:
void configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
/** Static function to check if given info will lead to a valid configuration of @ref CLConvertFullyConnectedWeightsKernel
*
- * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
diff --git a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h
index 1947a98ba3..d90a2cf4b8 100644
--- a/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h
+++ b/arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h
@@ -51,7 +51,7 @@ public:
/** Set the accumulate buffer and the biases of the kernel.
*
* @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: S32/QS32/F16/F32
+ * Data type supported: S32/F16/F32
* @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
* @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
* Required parameter if output is of QASYMM8 type.
@@ -65,7 +65,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLDirectConvolutionLayerOutputStageKernel
*
* @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: QS32/F16/F32
+ * Data type supported: F16/F32
* @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
* @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
* Data type supported: F16/F32
diff --git a/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h b/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h
index d5c9e3bbe9..1a276c353e 100644
--- a/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h
+++ b/arm_compute/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h
@@ -59,7 +59,7 @@ public:
~NEConvertFullyConnectedWeightsKernel() = default;
/** Set the input and output tensor.
*
- * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
@@ -67,7 +67,7 @@ public:
void configure(const ITensor *input, ITensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
/** Static function to check if given info will lead to a valid configuration of @ref NEConvertFullyConnectedWeightsKernel
*
- * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
index 589725ab01..e9349a3197 100644
--- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
@@ -74,7 +74,7 @@ public:
* The 3rd dimension must be the same as the input's volume 3rd dimension.
* Data type supported:Same as @p input.
* @param[in] output Output tensor.
- * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: QS32/F16/F32
+ * The 3rd dimensions must be equal to the 4th dimension of the @p kernels tensor. Data types supported: F16/F32
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
*
* @return a status
diff --git a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
index 7fd1d70374..9af3de5ffe 100644
--- a/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.h
@@ -55,7 +55,7 @@ public:
/** Set the accumulate buffer and the biases of the kernel.
*
* @param[in, out] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: QS32/F16/F32
+ * Data type supported: F16/F32
* @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
* @param[out] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
* Data type supported: F16/F32
@@ -68,7 +68,7 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayerOutputStageKernel
*
* @param[in] input Input to add the bias to. If @p output is not specified then accumulation is done in-place.
- * Data type supported: QS32/F16/F32
+ * Data type supported: F16/F32
* @param[in] bias (Optional) The shared bias tensor to add. It must be 1D Tensor. Data type supported: Same as @p input
* @param[in] output (Optional) If the output tensor is specified the accumulation is done out-of-place. (Defaults to nullptr)
* Data type supported: F16/F32
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 89fd4b8bb4..1363324e3b 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -79,7 +79,6 @@ enum class DataType
S16, /**< signed 16-bit number */
U32, /**< unsigned 32-bit number */
S32, /**< signed 32-bit number */
- QS32, /**< quantized, symmetric fixed-point 32-bit number */
U64, /**< unsigned 64-bit number */
S64, /**< signed 64-bit number */
F16, /**< 16-bit floating-point number */
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index cfebfa1506..729a46fe3f 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -119,7 +119,6 @@ inline size_t data_size_from_type(DataType data_type)
case DataType::F32:
case DataType::U32:
case DataType::S32:
- case DataType::QS32:
return 4;
case DataType::F64:
case DataType::U64:
@@ -192,7 +191,6 @@ inline size_t element_size_from_data_type(DataType dt)
case DataType::U32:
case DataType::S32:
case DataType::F32:
- case DataType::QS32:
return 4;
default:
ARM_COMPUTE_ERROR("Undefined element size for given data type");
@@ -527,7 +525,6 @@ inline DataType get_promoted_data_type(DataType dt)
case DataType::U32:
case DataType::S32:
case DataType::F32:
- case DataType::QS32:
ARM_COMPUTE_ERROR("Unsupported data type promotions!");
default:
ARM_COMPUTE_ERROR("Undefined data type!");
diff --git a/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h b/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
index ae0c9d6459..77e9d15e7f 100644
--- a/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
+++ b/arm_compute/runtime/CL/functions/CLConvertFullyConnectedWeights.h
@@ -37,7 +37,7 @@ class CLConvertFullyConnectedWeights : public ICLSimpleFunction
public:
/** Initialize the function.
*
- * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
@@ -45,7 +45,7 @@ public:
void configure(const ICLTensor *input, ICLTensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
/** Static function to check if given info will lead to a valid configuration of @ref CLConvertFullyConnectedWeights
*
- * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
diff --git a/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h b/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
index 3ec0390124..acbba28040 100644
--- a/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
+++ b/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
@@ -40,7 +40,7 @@ public:
NEConvertFullyConnectedWeights();
/** Initialize the function.
*
- * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[out] output The converted weights tensor. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
@@ -48,7 +48,7 @@ public:
void configure(const ITensor *input, ITensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
/** Static function to check if given info will lead to a valid configuration of @ref NEConvertFullyConnectedWeights
*
- * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
+ * @param[in] input Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
* @param[in] output The converted weights tensor info. Shape and Data Type: Same as @p input.
* @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
* @param[in] data_layout The data layout the weights have been trained in.
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index 07f8bd7bcd..55da5275df 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -50,8 +50,6 @@ std::string get_cl_type_from_data_type(const DataType &dt)
return "uint";
case DataType::S32:
return "int";
- case DataType::QS32:
- return "qs32";
case DataType::U64:
return "ulong";
case DataType::S64:
@@ -93,13 +91,7 @@ std::string get_data_size_from_data_type(const DataType &dt)
std::string get_underlying_cl_type_from_data_type(const DataType &dt)
{
- switch(dt)
- {
- case DataType::QS32:
- return "int";
- default:
- return get_cl_type_from_data_type(dt);
- }
+ return get_cl_type_from_data_type(dt);
}
GPUTarget get_target_from_device(cl::Device &device)
diff --git a/src/core/CL/cl_kernels/convert_fc_weights.cl b/src/core/CL/cl_kernels/convert_fc_weights.cl
index 5aadfb36f9..d47b733acd 100644
--- a/src/core/CL/cl_kernels/convert_fc_weights.cl
+++ b/src/core/CL/cl_kernels/convert_fc_weights.cl
@@ -32,7 +32,7 @@
* @attention Data type can be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float
* @attention Original input tensor width*height and depth should be given as a preprocessor argument using -DFACTOR_1=size and -DFACTOR_2=size for NCHW and vice versa for NHWC. e.g. -DFACTOR_1=256 and -DFACTOR_2=128
*
- * @param[in] src_ptr Pointer to the source image. Supported data types: U8, S8, QASYMM8, U16, S16, U32, S32, QS32, F16, F32
+ * @param[in] src_ptr Pointer to the source image. Supported data types: U8, S8, QASYMM8, U16, S16, U32, S32, F16, F32
* @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
diff --git a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
index a39d1f4a0b..86858d0c03 100644
--- a/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
+++ b/src/core/CL/kernels/CLConvertFullyConnectedWeightsKernel.cpp
@@ -76,7 +76,7 @@ Status CLConvertFullyConnectedWeightsKernel::validate(const ITensorInfo *input,
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, DataType::U16, DataType::S16, DataType::U32, DataType::S32,
- DataType::QS32, DataType::F16, DataType::F32);
+ DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() != 2);
diff --git a/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp b/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp
index e581f221a3..be5e6436b3 100644
--- a/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp
+++ b/src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp
@@ -66,7 +66,7 @@ Status NEConvertFullyConnectedWeightsKernel::validate(const ITensorInfo *input,
DataLayout data_layout)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, DataType::U16, DataType::S16, DataType::U32, DataType::S32,
- DataType::QS32, DataType::F16, DataType::F32);
+ DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() != 2);
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
index e4cd4d0465..6a373de1c3 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
@@ -47,11 +47,11 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *bias, con
ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8,
DataType::F16,
- DataType::QS32, DataType::S32, DataType::F32);
+ DataType::S32, DataType::F32);
if(bias != nullptr)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::F16, DataType::QS32, DataType::S32, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::F16, DataType::S32, DataType::F32);
if(is_data_type_quantized_asymmetric(input->data_type()))
{