aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h4
-rw-r--r--arm_compute/core/NEON/NEAsymm.h46
-rw-r--r--arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h4
-rw-r--r--arm_compute/core/QuantizationInfo.h71
-rw-r--r--arm_compute/runtime/CL/functions/CLDequantizationLayer.h5
-rw-r--r--arm_compute/runtime/NEON/functions/NEDequantizationLayer.h4
-rw-r--r--docs/00_introduction.dox6
-rw-r--r--src/core/CL/CLHelpers.cpp10
-rw-r--r--src/core/CL/cl_kernels/dequantization_layer.cl15
-rw-r--r--src/core/CL/kernels/CLDequantizationLayerKernel.cpp12
-rw-r--r--src/core/NEON/kernels/NEDequantizationLayerKernel.cpp74
-rw-r--r--src/core/Utils.cpp2
-rw-r--r--src/runtime/CL/CLTensorAllocator.cpp9
-rw-r--r--tests/AssetsLibrary.h3
-rw-r--r--tests/datasets/DatatypeDataset.h53
-rw-r--r--tests/validation/CL/DequantizationLayer.cpp21
-rw-r--r--tests/validation/CL/UNIT/TensorAllocator.cpp6
-rw-r--r--tests/validation/NEON/DequantizationLayer.cpp21
-rw-r--r--tests/validation/UNIT/TensorInfo.cpp20
-rw-r--r--tests/validation/fixtures/DequantizationLayerFixture.h55
-rw-r--r--tests/validation/reference/DequantizationLayer.cpp74
-rw-r--r--tests/validation/reference/DequantizationLayer.h4
-rw-r--r--utils/TypePrinter.h11
23 files changed, 413 insertions, 117 deletions
diff --git a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
index 3dfb19b306..6d37f6a1a5 100644
--- a/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLDequantizationLayerKernel.h
@@ -48,13 +48,13 @@ public:
~CLDequantizationLayerKernel() = default;
/** Set the input, output, min and max.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QSYMM8.
* @param[out] output Destination tensor. Data types supported: F16/F32.
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayerKernel
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8.
* @param[in] output Output tensor info. Data types supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/core/NEON/NEAsymm.h b/arm_compute/core/NEON/NEAsymm.h
index 2347c468ab..4c8f797360 100644
--- a/arm_compute/core/NEON/NEAsymm.h
+++ b/arm_compute/core/NEON/NEAsymm.h
@@ -223,6 +223,52 @@ inline float32x4x4_t vdequantize(const uint8x16_t &qv, const UniformQuantization
return vdequantized_input;
}
+/** Dequantize following an asymmetric quantization scheme a neon vector holding 16 quantized values.
+ *
+ * @param[in] qv Input values to be dequantized.
+ * @param[in] scale Quantization scaling factor.
+ * @param[in] offset Zero quantization offset.
+ *
+ * @return Dequantized values in a neon vector
+ */
+inline float32x4x4_t vdequantize(const uint8x16_t &qv, float scale, int32_t offset)
+{
+ const int32x4_t voffset = vdupq_n_s32(offset);
+ const float32x4_t vscale = vdupq_n_f32(scale);
+ const float32x4x4_t vdequantized_input =
+ {
+ {
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(qv))))), voffset)), vscale),
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(qv))))), voffset)), vscale),
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(qv))))), voffset)), vscale),
+ vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(qv))))), voffset)), vscale),
+ }
+ };
+ return vdequantized_input;
+}
+
+/** Dequantize following a symmetric quantization scheme a neon vector holding 16 quantized values.
+ *
+ * @param[in] qv Input values to be dequantized.
+ * @param[in] scale Quantization scaling factor.
+ *
+ * @return Dequantized values in a neon vector
+ */
+inline float32x4x4_t vdequantize(const int8x16_t &qv, float scale)
+{
+ const float32x4_t vscale = vdupq_n_f32(scale);
+ const float32x4x4_t vdequantized_input =
+ {
+ {
+ vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(qv))))), vscale),
+ vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(qv))))), vscale),
+ vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(qv))))), vscale),
+ vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(qv))))), vscale),
+ }
+ };
+ return vdequantized_input;
+}
+
/** Quantize a neon vector holding 8 floating point values.
*
* @param[in] qv Input values to be quantized.
diff --git a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
index 7d215f5f7b..3320ba6889 100644
--- a/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDequantizationLayerKernel.h
@@ -52,13 +52,13 @@ public:
~NEDequantizationLayerKernel() = default;
/** Set input, output tensors.
*
- * @param[in] input Source tensor. Data type supported: QASYMM8.
+ * @param[in] input Source tensor. Data type supported: QASYMM8/QSYMM8.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayerKernel
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8.
* @param[in] output Output tensor info. Data types supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h
index 06c9b61154..dcfdd6ba16 100644
--- a/arm_compute/core/QuantizationInfo.h
+++ b/arm_compute/core/QuantizationInfo.h
@@ -63,12 +63,13 @@ struct UniformQuantizationInfo
};
/** Quantization information */
-struct QuantizationInfo
+class QuantizationInfo
{
+public:
/** Default constructor */
QuantizationInfo() noexcept
- : scale(),
- offset()
+ : _scale(),
+ _offset()
{
}
/** Construct quantization info.
@@ -78,7 +79,7 @@ struct QuantizationInfo
* @param[in] scale Scale.
*/
QuantizationInfo(float scale)
- : scale(1, scale), offset()
+ : _scale(1, scale), _offset()
{
}
/** Construct quantization info.
@@ -89,7 +90,7 @@ struct QuantizationInfo
* @param[in] offset Offset.
*/
QuantizationInfo(float scale, int offset)
- : scale(1, scale), offset(1, offset)
+ : _scale(1, scale), _offset(1, offset)
{
}
/** Construct quantization info.
@@ -99,16 +100,32 @@ struct QuantizationInfo
* @param[in] scale Scale.
*/
QuantizationInfo(std::vector<float> scale)
- : scale(scale), offset()
+ : _scale(scale), _offset()
{
}
+ /** Scale vector accessor
+ *
+ * @return A reference to quantization scale metadata
+ */
+ const std::vector<float> &scale() const
+ {
+ return _scale;
+ }
+ /** Offset vector accessor
+ *
+ * @return A reference to quantization offset metadata
+ */
+ const std::vector<int32_t> &offset() const
+ {
+ return _offset;
+ }
/** Indicates whether this QuantizationInfo has valid settings or not
*
* @return True if the this has invalid settings.
*/
bool empty() const
{
- return scale.empty() && offset.empty();
+ return _scale.empty() && _offset.empty();
}
/** Return per layer quantization info
*
@@ -117,14 +134,15 @@ struct QuantizationInfo
UniformQuantizationInfo uniform() const
{
UniformQuantizationInfo uqinfo;
- uqinfo.scale = scale.empty() ? 0 : scale[0];
- uqinfo.offset = offset.empty() ? 0 : offset[0];
+ uqinfo.scale = _scale.empty() ? 0 : _scale[0];
+ uqinfo.offset = _offset.empty() ? 0 : _offset[0];
return uqinfo;
}
- std::vector<float> scale; /**< Vector containing scaling factors */
- std::vector<int32_t> offset; /**< Vector containing zero offsets */
+private:
+ std::vector<float> _scale; /**< Vector containing scaling factors */
+ std::vector<int32_t> _offset; /**< Vector containing zero offsets */
};
/** Check whether two quantization info are equal.
@@ -136,7 +154,7 @@ struct QuantizationInfo
*/
inline bool operator==(const QuantizationInfo &lhs, const QuantizationInfo &rhs)
{
- return (lhs.scale == rhs.scale) && (lhs.offset == rhs.offset);
+ return (lhs.scale() == rhs.scale()) && (lhs.offset() == rhs.offset());
}
/** Check whether two quantization info are not equal.
@@ -245,6 +263,19 @@ inline float dequantize_qasymm8(uint8_t value, const QuantizationInfo &qinfo)
return (static_cast<int>(value) - uqinfo.offset) * uqinfo.scale;
}
+/** Dequantize a value given an asymmetric quantization scheme
+ *
+ * @param[in] value Value to dequantize
+ * @param[in] scale Scale to use for dequantization
+ * @param[in] offset Zero-offset to use for dequantization
+ *
+ * @return Dequantized value
+ */
+inline float dequantize(uint8_t value, float scale, int32_t offset)
+{
+ return (static_cast<int>(value) - offset) * scale;
+}
+
/** Dequantize a value given a symmetric quantization scheme
*
* @param[in] value Value to dequantize
@@ -252,9 +283,21 @@ inline float dequantize_qasymm8(uint8_t value, const QuantizationInfo &qinfo)
*
* @return Dequantized value
*/
-inline float dequantize_qsymm8(int8_t value, const QuantizationInfo &qinfo)
+inline float dequantize_qsymm8(int8_t value, const UniformQuantizationInfo &qinfo)
+{
+ return value * qinfo.scale;
+}
+
+/** Dequantize a value given a symmetric quantization scheme
+ *
+ * @param[in] value Value to dequantize
+ * @param[in] scale Scale to use for dequantization
+ *
+ * @return Dequantized value
+ */
+inline float dequantize(int8_t value, float scale)
{
- return value * qinfo.uniform().scale;
+ return value * scale;
}
/** Quantize a value given a 16-bit symmetric quantization scheme
diff --git a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
index cf7c5761e4..2f7af01a84 100644
--- a/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDequantizationLayer.h
@@ -39,13 +39,14 @@ class CLDequantizationLayer : public ICLSimpleFunction
public:
/** Set the input and output tensors.
*
- * @param[in] input Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8.
+ * @param[in] input Source tensor with at least 3 dimensions. The dimensions over the third will be interpreted as batches.
+ * Data types supported: QASYMM8/QSYMM8.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLDequantizationLayer
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8.
* @param[in] output Output tensor info. Data type supported: F16/F32.
*
* @return a status
diff --git a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h
index b7c5bac844..8c24b38cee 100644
--- a/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDequantizationLayer.h
@@ -39,13 +39,13 @@ class NEDequantizationLayer : public INESimpleFunctionNoBorder
public:
/** Configure the kernel.
*
- * @param[in] input Source tensor. Data types supported: QASYMM8.
+ * @param[in] input Source tensor. Data types supported: QASYMM8/QSYMM8.
* @param[out] output Destination tensor with the same dimensions of input. Data type supported: F16/F32.
*/
void configure(const ITensor *input, ITensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEDequantizationLayer
*
- * @param[in] input Input tensor info. Data types supported: QASYMM8.
+ * @param[in] input Input tensor info. Data types supported: QASYMM8/QSYMM8.
* @param[in] output Output tensor info. Data type supported: F16/F32.
*
* @return a status
diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox
index 1e97b20499..8aa43201ad 100644
--- a/docs/00_introduction.dox
+++ b/docs/00_introduction.dox
@@ -236,6 +236,12 @@ If there is more than one release in a month then an extra sequential number is
@subsection S2_2_changelog Changelog
+v19.08 Public major release
+ - Various bug fixes.
+ - Various optimisations.
+ - Deprecated functions/interfaces
+ - Altered @ref QuantizationInfo interface to support per-channel quantization.
+
v19.05 Public major release
- Various bug fixes.
- Various optimisations.
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index f4ceca8200..2e6ceb4433 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -37,11 +37,11 @@ std::string get_cl_type_from_data_type(const DataType &dt)
switch(dt)
{
case DataType::U8:
+ case DataType::QASYMM8:
return "uchar";
case DataType::S8:
+ case DataType::QSYMM8:
return "char";
- case DataType::QASYMM8:
- return "uchar";
case DataType::U16:
return "ushort";
case DataType::S16:
@@ -69,11 +69,11 @@ std::string get_cl_select_type_from_data_type(const DataType &dt)
switch(dt)
{
case DataType::U8:
+ case DataType::QASYMM8:
return "uchar";
case DataType::S8:
+ case DataType::QSYMM8:
return "char";
- case DataType::QASYMM8:
- return "uchar";
case DataType::U16:
return "ushort";
case DataType::F16:
@@ -100,6 +100,7 @@ std::string get_data_size_from_data_type(const DataType &dt)
{
case DataType::U8:
case DataType::S8:
+ case DataType::QSYMM8:
case DataType::QASYMM8:
return "8";
case DataType::U16:
@@ -241,6 +242,7 @@ size_t preferred_vector_width(const cl::Device &device, const DataType dt)
case DataType::U8:
case DataType::S8:
case DataType::QASYMM8:
+ case DataType::QSYMM8:
return device.getInfo<CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR>();
case DataType::U16:
case DataType::S16:
diff --git a/src/core/CL/cl_kernels/dequantization_layer.cl b/src/core/CL/cl_kernels/dequantization_layer.cl
index 7307700473..ad3ed35480 100644
--- a/src/core/CL/cl_kernels/dequantization_layer.cl
+++ b/src/core/CL/cl_kernels/dequantization_layer.cl
@@ -23,16 +23,17 @@
*/
#include "helpers.h"
-#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(SCALE) && defined(OFFSET)
+#if defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST) && defined(SCALE) && defined(OFFSET)
/** This performs the dequantization of 8-bit unsigned integers to floating point.
*
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note Source datatype should be given as a preprocessor argument using -DDATA_TYPE_SRC=type. e.g. -DDATA_TYPE_SRC=char
+ * @note Destination datatype should be given as a preprocessor argument using -DDATA_TYPE_DST=type. e.g. -DDATA_TYPE_DST=float
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
* @note Quantization scale of input tensor is passed in with -DSCALE=scale.
* @note Quantization offset of input tensor is passed in with -DOFFSET=offset.
*
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8/QSYMM8
* @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
@@ -66,7 +67,7 @@ __kernel void dequantization_layer(
// Load data
VEC_DATA_TYPE(int, VEC_SIZE)
- val = CONVERT(VLOAD(VEC_SIZE)(0, (__global uchar *)input.ptr), VEC_DATA_TYPE(int, VEC_SIZE));
+ val = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_SRC *)input.ptr), VEC_DATA_TYPE(int, VEC_SIZE));
// Create scale and offset vectors
const VEC_DATA_TYPE(float, VEC_SIZE)
@@ -81,10 +82,10 @@ __kernel void dequantization_layer(
// Store result
VSTORE(VEC_SIZE)
- (CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), 0, (__global DATA_TYPE *)output.ptr);
+ (CONVERT(res, VEC_DATA_TYPE(DATA_TYPE_DST, VEC_SIZE)), 0, (__global DATA_TYPE_DST *)output.ptr);
#else // !defined(VEC_SIZE) || !defined(LAST_ACCESSED_X)
- *((__global DATA_TYPE *)(output.ptr)) = (DATA_TYPE)((float)((int)(*((__global uchar *)(input.ptr))) - (int)(OFFSET)) * (float)(SCALE));
+ *((__global DATA_TYPE_DST *)(output.ptr)) = (DATA_TYPE)((float)((int)(*((__global DATA_TYPE_SRC *)(input.ptr))) - (int)(OFFSET)) * (float)(SCALE));
#endif // defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
}
-#endif // defined(VEC_SIZE) && defined(DATA_TYPE) && defined(SCALE) && defined(OFFSET) \ No newline at end of file
+#endif // defined(VEC_SIZE) && defined(DATA_TYPE_SRC) && defined(DATA_TYPE_DST) && defined(SCALE) && defined(OFFSET)
diff --git a/src/core/CL/kernels/CLDequantizationLayerKernel.cpp b/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
index 0b066837a9..e383bc475d 100644
--- a/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDequantizationLayerKernel.cpp
@@ -40,7 +40,7 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QSYMM8);
if(output->tensor_shape().total_size() > 0)
{
@@ -95,15 +95,19 @@ void CLDequantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *o
}
ICLKernel::configure_internal(win);
- const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo qinfo = input->info()->quantization_info().uniform();
+ const int qoffset = is_data_type_quantized_asymmetric(input->info()->data_type()) ? qinfo.offset : 0;
// Create kernel
CLBuildOptions build_opts;
build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
- build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(qinfo.offset));
+ build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(qoffset));
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE_SRC=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE_DST=" + get_cl_type_from_data_type(output->info()->data_type()));
build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
+
+ // Create kernel name
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("dequantization_layer", build_opts.options()));
}
diff --git a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
index a6dc0977d2..bf0a2ca7bf 100644
--- a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
@@ -42,7 +42,7 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QSYMM8);
if(output->tensor_shape().total_size() > 0)
{
@@ -95,9 +95,11 @@ inline void store_result<float16_t>(float16_t *ptr, const float32x4x4_t &v)
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
template <typename T>
-void run_dequantization(const ITensor *input, ITensor *output, const Window &window)
+void run_dequantization_qasymm8(const ITensor *input, ITensor *output, const Window &window)
{
- const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
+ const float scale = qinfo.scale;
+ const int32_t offset = qinfo.offset;
const int window_step_x = 16;
const auto window_start_x = static_cast<int>(window.x().start());
@@ -120,7 +122,49 @@ void run_dequantization(const ITensor *input, ITensor *output, const Window &win
for(; x <= (window_end_x - window_step_x); x += window_step_x)
{
const auto vin = wrapper::vloadq(in_ptr + x);
- const auto vdeq = vdequantize(vin, qinfo);
+ const auto vdeq = vdequantize(vin, scale, offset);
+
+ store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
+ }
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ uint8_t val = *(in_ptr + x);
+ *(out_ptr + x) = static_cast<T>(dequantize(val, scale, offset));
+ }
+ },
+ in, out);
+}
+
+template <typename T>
+void run_dequantization_qsymm8(const ITensor *input, ITensor *output, const Window &window)
+{
+ const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
+ const float scale = qinfo.scale;
+
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ // Collapse window and reset first dimension to handle tail calculations manually
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ // Create iterators
+ Iterator in(input, win_collapsed);
+ Iterator out(output, win_collapsed);
+
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
+ const auto out_ptr = reinterpret_cast<T *>(out.ptr());
+
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto vin = wrapper::vloadq(in_ptr + x);
+ const auto vdeq = vdequantize(vin, scale);
store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
}
@@ -129,11 +173,27 @@ void run_dequantization(const ITensor *input, ITensor *output, const Window &win
for(; x < window_end_x; ++x)
{
uint8_t val = *(in_ptr + x);
- *(out_ptr + x) = static_cast<T>(dequantize_qasymm8(val, qinfo));
+ *(out_ptr + x) = static_cast<T>(dequantize(val, scale));
}
},
in, out);
}
+
+template <typename T>
+void run_dequantization_core(const ITensor *input, ITensor *output, const Window &window)
+{
+ switch(input->info()->data_type())
+ {
+ case DataType::QASYMM8:
+ run_dequantization_qasymm8<T>(input, output, window);
+ break;
+ case DataType::QSYMM8:
+ run_dequantization_qsymm8<T>(input, output, window);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type.");
+ }
+}
} // namespace
NEDequantizationLayerKernel::NEDequantizationLayerKernel()
@@ -173,11 +233,11 @@ void NEDequantizationLayerKernel::run(const Window &window, const ThreadInfo &in
switch(_output->info()->data_type())
{
case DataType::F32:
- run_dequantization<float>(_input, _output, window);
+ run_dequantization_core<float>(_input, _output, window);
break;
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
- run_dequantization<float16_t>(_input, _output, window);
+ run_dequantization_core<float16_t>(_input, _output, window);
break;
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
default:
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index 22be0002ee..499a6c8b29 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -158,6 +158,8 @@ const std::string &arm_compute::string_from_data_type(DataType dt)
{ DataType::F32, "F32" },
{ DataType::F64, "F64" },
{ DataType::SIZET, "SIZET" },
+ { DataType::QSYMM8, "QSYMM8" },
+ { DataType::QSYMM8_PER_CHANNEL, "QSYMM8_PER_CHANNEL" },
{ DataType::QASYMM8, "QASYMM8" },
{ DataType::QSYMM16, "QSYMM16" },
};
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index 63aa1ba9ea..f3f16cd8c0 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -87,11 +87,12 @@ void populate_quantization_info(CLFloatArray &scale, CLInt32Array &offset, const
clear_quantization_arrays(scale, offset);
// Create scale array
- const size_t num_elements = qinfo.scale.size();
- const size_t element_size = sizeof(decltype(qinfo.scale)::value_type);
- scale = CLFloatArray(num_elements + pad_size);
+ const std::vector<float> &qscale = qinfo.scale();
+ const size_t num_elements = qscale.size();
+ const size_t element_size = sizeof(std::remove_reference<decltype(qscale)>::type::value_type);
+ scale = CLFloatArray(num_elements + pad_size);
scale.resize(num_elements);
- CLScheduler::get().queue().enqueueWriteBuffer(scale.cl_buffer(), CL_TRUE, 0, num_elements * element_size, qinfo.scale.data());
+ CLScheduler::get().queue().enqueueWriteBuffer(scale.cl_buffer(), CL_TRUE, 0, num_elements * element_size, qinfo.scale().data());
}
} // namespace
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index 5c8019bdff..2f2665f381 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -634,6 +634,7 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
break;
}
case DataType::S8:
+ case DataType::QSYMM8:
{
std::uniform_int_distribution<int8_t> distribution_s8(std::numeric_limits<int8_t>::lowest(), std::numeric_limits<int8_t>::max());
fill(tensor, distribution_s8, seed_offset);
@@ -728,6 +729,7 @@ void AssetsLibrary::fill_tensor_uniform_ranged(T
break;
}
case DataType::S8:
+ case DataType::QSYMM8:
{
const auto converted_pairs = detail::convert_range_pair<int8_t>(excluded_range_pairs);
RangedUniformDistribution<int8_t> distribution_s8(std::numeric_limits<int8_t>::lowest(),
@@ -808,6 +810,7 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
break;
}
case DataType::S8:
+ case DataType::QSYMM8:
{
ARM_COMPUTE_ERROR_ON(!(std::is_same<int8_t, D>::value));
std::uniform_int_distribution<int8_t> distribution_s8(low, high);
diff --git a/tests/datasets/DatatypeDataset.h b/tests/datasets/DatatypeDataset.h
new file mode 100644
index 0000000000..bb2774b4b3
--- /dev/null
+++ b/tests/datasets/DatatypeDataset.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_DATATYPE_DATASET_H__
+#define __ARM_COMPUTE_TEST_DATATYPE_DATASET_H__
+
+#include "arm_compute/core/Types.h"
+#include "tests/framework/datasets/ContainerDataset.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class QuantizedTypes final : public framework::dataset::ContainerDataset<std::vector<DataType>>
+{
+public:
+ QuantizedTypes()
+ : ContainerDataset("QuantizedTypes",
+ {
+ DataType::QSYMM8,
+ DataType::QASYMM8,
+ })
+ {
+ }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_DATATYPE_DATASET_H__ */
diff --git a/tests/validation/CL/DequantizationLayer.cpp b/tests/validation/CL/DequantizationLayer.cpp
index b1b0d81c6d..2ef8c60998 100644
--- a/tests/validation/CL/DequantizationLayer.cpp
+++ b/tests/validation/CL/DequantizationLayer.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/runtime/CL/functions/CLDequantizationLayer.h"
#include "tests/CL/CLAccessor.h"
#include "tests/PaddingCalculator.h"
+#include "tests/datasets/DatatypeDataset.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
@@ -96,16 +97,14 @@ template <typename T>
using CLDequantizationLayerFixture = DequantizationValidationFixture<CLTensor, CLAccessor, CLDequantizationLayer, T>;
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDequantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDequantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), datasets::QuantizedTypes()),
+ framework::dataset::make("DataType", DataType::F16)))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDequantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDequantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), datasets::QuantizedTypes()),
+ framework::dataset::make("DataType", DataType::F16)))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -113,16 +112,14 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDequantizationLayerFixture<half>, framework::
TEST_SUITE_END() // FP16
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), datasets::QuantizedTypes()),
+ framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), datasets::QuantizedTypes()),
+ framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
validate(CLAccessor(_target), _reference);
diff --git a/tests/validation/CL/UNIT/TensorAllocator.cpp b/tests/validation/CL/UNIT/TensorAllocator.cpp
index 4b8e105240..d91f4dd022 100644
--- a/tests/validation/CL/UNIT/TensorAllocator.cpp
+++ b/tests/validation/CL/UNIT/TensorAllocator.cpp
@@ -249,9 +249,9 @@ TEST_CASE(Symm8PerChannelQuantizationInfo, framework::DatasetMode::ALL)
// Check quantization information
ARM_COMPUTE_EXPECT(!tensor.info()->quantization_info().empty(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!tensor.info()->quantization_info().scale.empty(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(tensor.info()->quantization_info().scale.size() == scale.size(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(tensor.info()->quantization_info().offset.empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!tensor.info()->quantization_info().scale().empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(tensor.info()->quantization_info().scale().size() == scale.size(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(tensor.info()->quantization_info().offset().empty(), framework::LogLevel::ERRORS);
CLQuantization quantization = tensor.quantization();
ARM_COMPUTE_ASSERT(quantization.scale != nullptr);
diff --git a/tests/validation/NEON/DequantizationLayer.cpp b/tests/validation/NEON/DequantizationLayer.cpp
index 0ae20b7b5d..a4606fe8a0 100644
--- a/tests/validation/NEON/DequantizationLayer.cpp
+++ b/tests/validation/NEON/DequantizationLayer.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
#include "tests/PaddingCalculator.h"
+#include "tests/datasets/DatatypeDataset.h"
#include "tests/datasets/ShapeDatasets.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
@@ -106,16 +107,14 @@ using NEDequantizationLayerFixture = DequantizationValidationFixture<Tensor, Acc
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), datasets::QuantizedTypes()),
+ framework::dataset::make("DataType", DataType::F16)))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), datasets::QuantizedTypes()),
+ framework::dataset::make("DataType", DataType::F16)))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -124,16 +123,14 @@ TEST_SUITE_END() // FP16
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), datasets::QuantizedTypes()),
+ framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), datasets::QuantizedTypes()),
+ framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
validate(Accessor(_target), _reference);
diff --git a/tests/validation/UNIT/TensorInfo.cpp b/tests/validation/UNIT/TensorInfo.cpp
index 96d07da2b4..009c757925 100644
--- a/tests/validation/UNIT/TensorInfo.cpp
+++ b/tests/validation/UNIT/TensorInfo.cpp
@@ -141,9 +141,9 @@ TEST_CASE(SymmQuantizationInfo, framework::DatasetMode::ALL)
// Check quantization information
ARM_COMPUTE_EXPECT(!info.quantization_info().empty(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!info.quantization_info().scale.empty(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(info.quantization_info().scale.size() == 1, framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(info.quantization_info().offset.empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!info.quantization_info().scale().empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(info.quantization_info().scale().size() == 1, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(info.quantization_info().offset().empty(), framework::LogLevel::ERRORS);
UniformQuantizationInfo qinfo = info.quantization_info().uniform();
ARM_COMPUTE_EXPECT(qinfo.scale == scale, framework::LogLevel::ERRORS);
@@ -160,10 +160,10 @@ TEST_CASE(AsymmQuantizationInfo, framework::DatasetMode::ALL)
// Check quantization information
ARM_COMPUTE_EXPECT(!info.quantization_info().empty(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!info.quantization_info().scale.empty(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(info.quantization_info().scale.size() == 1, framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!info.quantization_info().offset.empty(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(info.quantization_info().offset.size() == 1, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!info.quantization_info().scale().empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(info.quantization_info().scale().size() == 1, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!info.quantization_info().offset().empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(info.quantization_info().offset().size() == 1, framework::LogLevel::ERRORS);
UniformQuantizationInfo qinfo = info.quantization_info().uniform();
ARM_COMPUTE_EXPECT(qinfo.scale == scale, framework::LogLevel::ERRORS);
@@ -179,9 +179,9 @@ TEST_CASE(SymmPerChannelQuantizationInfo, framework::DatasetMode::ALL)
// Check quantization information
ARM_COMPUTE_EXPECT(!info.quantization_info().empty(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!info.quantization_info().scale.empty(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(info.quantization_info().scale.size() == scale.size(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(info.quantization_info().offset.empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!info.quantization_info().scale().empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(info.quantization_info().scale().size() == scale.size(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(info.quantization_info().offset().empty(), framework::LogLevel::ERRORS);
}
TEST_SUITE_END() // TensorInfoValidation
diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h
index 2e3712dff2..15f3711189 100644
--- a/tests/validation/fixtures/DequantizationLayerFixture.h
+++ b/tests/validation/fixtures/DequantizationLayerFixture.h
@@ -47,10 +47,11 @@ class DequantizationValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type, QuantizationInfo qinfo)
+ void setup(TensorShape shape, DataType src_data_type, DataType dst_datatype)
{
- _target = compute_target(shape, data_type, qinfo);
- _reference = compute_reference(shape, data_type, qinfo);
+ _quantization_info = generate_quantization_info(src_data_type);
+ _target = compute_target(shape, src_data_type, dst_datatype);
+ _reference = compute_reference(shape, src_data_type);
}
protected:
@@ -60,11 +61,11 @@ protected:
library->fill_tensor_uniform(tensor, 0);
}
- TensorType compute_target(const TensorShape &shape, DataType data_type, QuantizationInfo qinfo)
+ TensorType compute_target(const TensorShape &shape, DataType src_data_type, DataType dst_datatype)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, DataType::QASYMM8, 1, qinfo);
- TensorType dst = create_tensor<TensorType>(shape, data_type);
+ TensorType src = create_tensor<TensorType>(shape, src_data_type, 1, _quantization_info);
+ TensorType dst = create_tensor<TensorType>(shape, dst_datatype);
// Create and configure function
FunctionType dequantization_layer;
@@ -89,19 +90,43 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, QuantizationInfo qinfo)
+ SimpleTensor<T> compute_reference(const TensorShape &shape, DataType src_data_type)
{
- // Create reference
- SimpleTensor<uint8_t> src{ shape, DataType::QASYMM8, 1, qinfo };
-
- // Fill reference
- fill(src);
+ if(is_data_type_quantized_asymmetric(src_data_type))
+ {
+ SimpleTensor<uint8_t> src{ shape, src_data_type, 1, _quantization_info };
+ fill(src);
+ return reference::dequantization_layer<T>(src);
+ }
+ else
+ {
+ SimpleTensor<int8_t> src{ shape, src_data_type, 1, _quantization_info };
+ fill(src);
+ return reference::dequantization_layer<T>(src);
+ }
+ }
- return reference::dequantization_layer<T>(src);
+protected:
+ QuantizationInfo generate_quantization_info(DataType data_type)
+ {
+ std::uniform_int_distribution<> distribution(1, 127);
+ std::mt19937 gen(library.get()->seed());
+
+ switch(data_type)
+ {
+ case DataType::QSYMM8:
+ return QuantizationInfo(1.f / distribution(gen));
+ case DataType::QASYMM8:
+ return QuantizationInfo(1.f / distribution(gen), distribution(gen));
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
}
- TensorType _target{};
- SimpleTensor<T> _reference{};
+protected:
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+ QuantizationInfo _quantization_info{};
};
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DequantizationLayer.cpp b/tests/validation/reference/DequantizationLayer.cpp
index 286a609d79..d07371c883 100644
--- a/tests/validation/reference/DequantizationLayer.cpp
+++ b/tests/validation/reference/DequantizationLayer.cpp
@@ -23,6 +23,8 @@
*/
#include "DequantizationLayer.h"
+#include "Permute.h"
+
namespace arm_compute
{
namespace test
@@ -31,24 +33,82 @@ namespace validation
{
namespace reference
{
-template <typename T>
-SimpleTensor<T> dequantization_layer(const SimpleTensor<uint8_t> &src)
+namespace
+{
+template <typename TOut>
+TOut dequantize(int8_t val, const UniformQuantizationInfo qinfo)
+{
+ return static_cast<TOut>(dequantize_qsymm8(val, qinfo));
+}
+template <typename TOut>
+TOut dequantize(uint8_t val, const UniformQuantizationInfo qinfo)
+{
+ return static_cast<TOut>(dequantize_qasymm8(val, qinfo));
+}
+
+template <typename TOut, typename TIn>
+SimpleTensor<TOut> dequantization_layer_nchw(const SimpleTensor<TIn> &src)
{
- const DataType dst_data_type = std::is_same<T, float>::value ? DataType::F32 : DataType::F16;
- const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
+ const DataType src_data_type = src.data_type();
+ const DataType dst_data_type = std::is_same<TOut, float>::value ? DataType::F32 : DataType::F16;
- SimpleTensor<T> dst{ src.shape(), dst_data_type };
+ SimpleTensor<TOut> dst{ src.shape(), dst_data_type };
- for(int i = 0; i < src.num_elements(); ++i)
+ if(src_data_type == DataType::QSYMM8_PER_CHANNEL)
{
- dst[i] = static_cast<T>(dequantize_qasymm8(src[i], quantization_info));
+ const int WH = src.shape().x() * src.shape().y();
+ const int C = src.shape().z();
+ const int N = src.shape().total_size() / (WH * C);
+
+ const std::vector<float> qscales = src.quantization_info().scale();
+
+ for(int n = 0; n < N; ++n)
+ {
+ for(int c = 0; c < C; ++c)
+ {
+ const size_t idx = n * C * WH + c * WH;
+ const UniformQuantizationInfo channel_qinfo = { qscales[c], 0 };
+
+ // Dequantize slice
+ for(int s = 0; s < WH; ++s)
+ {
+ dst[idx + s] = dequantize<TOut>(src[idx + s], channel_qinfo);
+ }
+ }
+ }
+ }
+ else
+ {
+ const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
+ ARM_COMPUTE_ERROR_ON(quantization_info.offset != 0 && src_data_type == DataType::QSYMM8);
+
+ for(int i = 0; i < src.num_elements(); ++i)
+ {
+ dst[i] = static_cast<TOut>(dequantize<TOut>(src[i], quantization_info));
+ }
}
return dst;
}
+} // namespace
+template <typename TOut, typename TIn>
+SimpleTensor<TOut> dequantization_layer(const SimpleTensor<TIn> &src)
+{
+ if(src.data_layout() == DataLayout::NHWC && src.data_type() == DataType::QSYMM8_PER_CHANNEL)
+ {
+ SimpleTensor<TIn> src_nchw = reference::permute<TIn>(src, PermutationVector(1U, 2U, 0U));
+ return reference::permute<TOut>(dequantization_layer_nchw<TOut>(src_nchw), PermutationVector(2U, 0U, 1U));
+ }
+ else
+ {
+ return dequantization_layer_nchw<TOut>(src);
+ }
+}
template SimpleTensor<half> dequantization_layer(const SimpleTensor<uint8_t> &src);
template SimpleTensor<float> dequantization_layer(const SimpleTensor<uint8_t> &src);
+template SimpleTensor<half> dequantization_layer(const SimpleTensor<int8_t> &src);
+template SimpleTensor<float> dequantization_layer(const SimpleTensor<int8_t> &src);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DequantizationLayer.h b/tests/validation/reference/DequantizationLayer.h
index 1d0e54b442..8c780849fd 100644
--- a/tests/validation/reference/DequantizationLayer.h
+++ b/tests/validation/reference/DequantizationLayer.h
@@ -35,8 +35,8 @@ namespace validation
{
namespace reference
{
-template <typename T>
-SimpleTensor<T> dequantization_layer(const SimpleTensor<uint8_t> &src);
+template <typename TOut, typename TIn>
+SimpleTensor<TOut> dequantization_layer(const SimpleTensor<TIn> &src);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 49edffbb3d..1f60537c2a 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -322,14 +322,9 @@ inline std::string to_string(const GenerateProposalsInfo &proposals_info)
*/
inline ::std::ostream &operator<<(::std::ostream &os, const QuantizationInfo &qinfo)
{
- if(!qinfo.scale.empty())
- {
- os << "Scale:" << qinfo.scale[0] << "~";
- }
- if(!qinfo.empty())
- {
- os << "Offset:" << qinfo.offset[0];
- }
+ const UniformQuantizationInfo uqinfo = qinfo.uniform();
+ os << "Scale:" << uqinfo.scale << "~";
+ os << "Offset:" << uqinfo.offset;
return os;
}