aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-11-11 18:24:22 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-11-12 21:22:47 +0000
commit8217c8e4f488eb32733c481ab3a4d905069479f1 (patch)
treef54e10c459d5bc5b847ef0caba075a636d9f1df5 /arm_compute/core
parent94e0cf960ea6116eb57fa88d9b951f859b52c602 (diff)
downloadComputeLibrary-8217c8e4f488eb32733c481ab3a4d905069479f1.tar.gz
COMPMID-2895: Remove QASYMM8_PER_CHANNEL data type
Change-Id: I2d1b77370f8eceeaeae95306b4db5d90ababb76f Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/2266 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h4
-rw-r--r--arm_compute/core/NEON/NEAsymm.h17
-rw-r--r--arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h4
-rw-r--r--arm_compute/core/QuantizationInfo.h15
-rw-r--r--arm_compute/core/Types.h39
-rw-r--r--arm_compute/core/Utils.h5
6 files changed, 31 insertions, 53 deletions
diff --git a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
index 830d7518ce..739e2d45d2 100644
--- a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
@@ -48,13 +48,13 @@ public:
~CLDequantizationLayerKernel() = default;
/** Set the input, output, min and max.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] output Destination tensor. Data types supported: F16/F32.
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayerKernel
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[in] output Output tensor info. Data types supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/core/NEON/NEAsymm.h b/arm_compute/core/NEON/NEAsymm.h
index a3bd7e28f0..c75a58046b 100644
--- a/arm_compute/core/NEON/NEAsymm.h
+++ b/arm_compute/core/NEON/NEAsymm.h
@@ -325,23 +325,22 @@ inline float32x4x4_t vdequantize(const uint8x16_t &qv, float scale, int32_t offs
return vdequantized_input;
}
-/** Dequantize following an asymmetric quantization scheme a neon vector holding 16 quantized values.
+/** Dequantize following symmetric quantization scheme a neon vector holding 16 quantized values.
*
- * @param[in] qv Input values to be dequantized.
- * @param[in] vscale Vector containing quantization scaling factors.
- * @param[in] voffset Vector containing quantization offset.
+ * @param[in] qv Input values to be dequantized.
+ * @param[in] vscale Vector containing quantization scaling factors.
*
* @return Dequantized values in a neon vector
*/
-inline float32x4x4_t vdequantize(const uint8x16_t &qv, const float32x4x4_t vscale, const int32x4x4_t voffset)
+inline float32x4x4_t vdequantize(const int8x16_t &qv, const float32x4x4_t vscale)
{
const float32x4x4_t vdequantized_input =
{
{
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(qv))))), voffset.val[0])), vscale.val[0]),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(qv))))), voffset.val[1])), vscale.val[1]),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(qv))))), voffset.val[2])), vscale.val[2]),
- vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(qv))))), voffset.val[3])), vscale.val[3]),
+ vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(qv))))), vscale.val[0]),
+ vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(qv))))), vscale.val[1]),
+ vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(qv))))), vscale.val[2]),
+ vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(qv))))), vscale.val[3]),
}
};
return vdequantized_input;
diff --git a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
index 3e7feda650..7e65384677 100644
--- a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
@@ -52,13 +52,13 @@ public:
~NEDequantizationLayerKernel() = default;
/** Set input, output tensors.
*
- * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayerKernel
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8_PER_CHANNEL/QSYMM8/QSYMM16.
* @param[in] output Output tensor info. Data types supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h
index 949ee66b7c..ebd9b677da 100644
--- a/arm_compute/core/QuantizationInfo.h
+++ b/arm_compute/core/QuantizationInfo.h
@@ -265,21 +265,6 @@ inline int8_t quantize_qsymm8_per_channel(float value, const QuantizationInfo &q
return quantized;
}
-/** Quantize a value given a 8-bit asymmetric per channel quantization scheme
- *
- * @param[in] value Value to quantize
- * @param[in] qinfo Quantization information to use for quantizing
- * @param[in] channel_id channel index into the scale vector of quantization info
- *
- * @return Quantized value
- */
-inline int8_t quantize_qasymm8_per_channel(float value, const QuantizationInfo &qinfo, size_t channel_id = 0)
-{
- int quantized = arm_compute::round(value / qinfo.scale()[channel_id], RoundingPolicy::TO_NEAREST_UP);
- quantized = std::max(0, std::min(quantized, 255));
- return quantized;
-}
-
/** Dequantize a value given a 8-bit asymmetric quantization scheme
*
* @param[in] value Value to dequantize
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 9551cc6547..851292f1e1 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -73,26 +73,25 @@ enum class Format
/** Available data types */
enum class DataType
{
- UNKNOWN, /**< Unknown data type */
- U8, /**< unsigned 8-bit number */
- S8, /**< signed 8-bit number */
- QSYMM8, /**< quantized, symmetric fixed-point 8-bit number */
- QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number unsigned */
- QASYMM8_SIGNED, /**< quantized, asymmetric fixed-point 8-bit number signed */
- QSYMM8_PER_CHANNEL, /**< quantized, symmetric per channel fixed-point 8-bit number */
- QASYMM8_PER_CHANNEL, /**< quantized, asymmetric per channel fixed-point 8-bit number */
- U16, /**< unsigned 16-bit number */
- S16, /**< signed 16-bit number */
- QSYMM16, /**< quantized, symmetric fixed-point 16-bit number */
- QASYMM16, /**< quantized, asymmetric fixed-point 16-bit number */
- U32, /**< unsigned 32-bit number */
- S32, /**< signed 32-bit number */
- U64, /**< unsigned 64-bit number */
- S64, /**< signed 64-bit number */
- F16, /**< 16-bit floating-point number */
- F32, /**< 32-bit floating-point number */
- F64, /**< 64-bit floating-point number */
- SIZET /**< size_t */
+ UNKNOWN, /**< Unknown data type */
+ U8, /**< unsigned 8-bit number */
+ S8, /**< signed 8-bit number */
+ QSYMM8, /**< quantized, symmetric fixed-point 8-bit number */
+ QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number unsigned */
+ QASYMM8_SIGNED, /**< quantized, asymmetric fixed-point 8-bit number signed */
+ QSYMM8_PER_CHANNEL, /**< quantized, symmetric per channel fixed-point 8-bit number */
+ U16, /**< unsigned 16-bit number */
+ S16, /**< signed 16-bit number */
+ QSYMM16, /**< quantized, symmetric fixed-point 16-bit number */
+ QASYMM16, /**< quantized, asymmetric fixed-point 16-bit number */
+ U32, /**< unsigned 32-bit number */
+ S32, /**< signed 32-bit number */
+ U64, /**< unsigned 64-bit number */
+ S64, /**< signed 64-bit number */
+ F16, /**< 16-bit floating-point number */
+ F32, /**< 32-bit floating-point number */
+ F64, /**< 64-bit floating-point number */
+ SIZET /**< size_t */
};
/** Available Sampling Policies */
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index a6e1ea1a89..366d5dcc68 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -116,7 +116,6 @@ inline size_t data_size_from_type(DataType data_type)
case DataType::QASYMM8:
case DataType::QASYMM8_SIGNED:
case DataType::QSYMM8_PER_CHANNEL:
- case DataType::QASYMM8_PER_CHANNEL:
return 1;
case DataType::U16:
case DataType::S16:
@@ -537,7 +536,6 @@ inline DataType get_promoted_data_type(DataType dt)
case DataType::QASYMM8:
case DataType::QASYMM8_SIGNED:
case DataType::QSYMM8_PER_CHANNEL:
- case DataType::QASYMM8_PER_CHANNEL:
case DataType::QSYMM16:
case DataType::QASYMM16:
case DataType::F16:
@@ -1029,7 +1027,6 @@ inline bool is_data_type_quantized(DataType dt)
case DataType::QASYMM8:
case DataType::QASYMM8_SIGNED:
case DataType::QSYMM8_PER_CHANNEL:
- case DataType::QASYMM8_PER_CHANNEL:
case DataType::QSYMM16:
case DataType::QASYMM16:
return true;
@@ -1050,7 +1047,6 @@ inline bool is_data_type_quantized_asymmetric(DataType dt)
{
case DataType::QASYMM8:
case DataType::QASYMM8_SIGNED:
- case DataType::QASYMM8_PER_CHANNEL:
case DataType::QASYMM16:
return true;
default:
@@ -1088,7 +1084,6 @@ inline bool is_data_type_quantized_per_channel(DataType dt)
switch(dt)
{
case DataType::QSYMM8_PER_CHANNEL:
- case DataType::QASYMM8_PER_CHANNEL:
return true;
default:
return false;