aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2019-09-10 10:42:27 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-09-16 14:43:54 +0000
commitd87a7b297a5bfc2bad3ba78ea97754d7894e82ef (patch)
treec527ede747d386fe8dd1b39a877ea4e9a19a5fd3
parente874ef9b845424dceeac4211ca9dfec24949f03c (diff)
downloadComputeLibrary-d87a7b297a5bfc2bad3ba78ea97754d7894e82ef.tar.gz
COMPMID-2650: Add support for QASYMM16 in CLQuantizationLayer
Change-Id: I51dda621975f522a65d770304bed0ff0f30d1235 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/1902 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h6
-rw-r--r--arm_compute/core/utils/quantization/AsymmHelpers.h8
-rw-r--r--arm_compute/runtime/CL/functions/CLQuantizationLayer.h6
-rw-r--r--src/core/CL/cl_kernels/quantization_layer.cl28
-rw-r--r--src/core/CL/kernels/CLQuantizationLayerKernel.cpp54
-rw-r--r--src/core/utils/quantization/AsymmHelpers.cpp59
-rw-r--r--tests/validation/CL/QuantizationLayer.cpp54
-rw-r--r--tests/validation/NEON/QuantizationLayer.cpp36
-rw-r--r--tests/validation/fixtures/QuantizationLayerFixture.h24
-rw-r--r--tests/validation/reference/QuantizationLayer.cpp35
-rw-r--r--tests/validation/reference/QuantizationLayer.h4
11 files changed, 209 insertions, 105 deletions
diff --git a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h
index d16ae546ff..ac113d93d8 100644
--- a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h
@@ -52,13 +52,15 @@ public:
/** Set the input, output.
*
* @param[in] input Source tensor. Data types supported: F32/F16.
- * @param[out] output Destination tensor with the same dimensions of input. Output data type must be QASYMM8.
+ * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QASYMM16.
+ *
+ * @note Output auto initialization is not supported by this kernel
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayerKernel
*
* @param[in] input Input tensor info. Data types supported: F32/F16.
- * @param[in] output Output tensor info. Output data type must be QASYMM8.
+ * @param[in] output Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM16.
*
* @return a status
*/
diff --git a/arm_compute/core/utils/quantization/AsymmHelpers.h b/arm_compute/core/utils/quantization/AsymmHelpers.h
index d9f20cee2b..a0efe120f2 100644
--- a/arm_compute/core/utils/quantization/AsymmHelpers.h
+++ b/arm_compute/core/utils/quantization/AsymmHelpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,7 @@
#define __ARM_COMPUTE_QUANTIZATION_ASYMM_HELPERS_H__
#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Types.h"
namespace arm_compute
{
@@ -48,6 +49,11 @@ arm_compute::Status calculate_quantized_multiplier_less_than_one(float multiplie
* @return a status
*/
arm_compute::Status calculate_quantized_multiplier_greater_than_one(float multiplier, int *quantized_multiplier, int *left_shift);
+/** Get minimum and maximum values for the input quantized data type
+ *
+ * @ return min and max values for the quantized data type
+ */
+std::pair<int, int> get_min_max_values_from_quantized_data_type(DataType data_type);
} // namespace quantization
} // namespace arm_compute
#endif /* __ARM_COMPUTE_IO_FILE_HANDLER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
index 104f8e2eb2..41a03fdc08 100644
--- a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h
@@ -43,13 +43,15 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: F16/32.
- * @param[out] output Destination tensor with the same dimensions of input. Output data type must be QASYMM8.
+ * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QASYMM16.
+ *
+ * @note Output auto initialization is not supported by this function
*/
void configure(const ICLTensor *input, ICLTensor *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayer
*
* @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F16/32.
- * @param[in] output Output tensor info. Output data type must be QASYMM8.
+ * @param[in] output Output tensor info. Data types supported: QASYMM8/QASYMM16.
*
* @return a status
*/
diff --git a/src/core/CL/cl_kernels/quantization_layer.cl b/src/core/CL/cl_kernels/quantization_layer.cl
index 7ae34ef71a..41d9957150 100644
--- a/src/core/CL/cl_kernels/quantization_layer.cl
+++ b/src/core/CL/cl_kernels/quantization_layer.cl
@@ -27,10 +27,18 @@
#define CONVERT_RTE_VEC_STR(x, type, size) (convert_##type##size##_rte((x)))
#define CONVERT_RTE_VEC(x, type, size) CONVERT_RTE_VEC_STR(x, type, size)
-#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(SCALE) && defined(OFFSET)
+#if defined(VEC_SIZE) && defined(DATA_TYPE_IN) && defined(DATA_TYPE_OUT) && defined(SCALE) && defined(OFFSET) && defined(MIN_QUANT_VAL) && defined(MAX_QUANT_VAL)
/** This performs the quantization of floating point inputs to 8-bit unsigned integers.
*
+ * @note Input data type should be given as a preprocessor argument using -DDATA_TYPE_IN=type. e.g. -DDATA_TYPE=short
+ * @note Output data type should be given as a preprocessor argument using -DDATA_TYPE_OUT=type. e.g. -DDATA_TYPE=short
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Quantization scale should be given as a preprocessor argument using -DSCALE=scale. e.g. -DSCALE=0.125
+ * @note Quantization offset should be given as a preprocessor argument using -DOFFSET=offset. e.g. -DOFFSET=125
+ * @note Minimum value for quantized type should be given as a preprocessor argument using -DMIN_QUANT_VAL=value. e.g. -DMIN_QUANT_VAL=0
+ * @note Maximum value for quantized type should be given as a preprocessor argument using -DMAX_QUANT_VAL=value. e.g. -DMAXIN_QUANT_VAL=255
+ *
* @param[in] input_ptr Pointer to the source tensor. Supported data types: F32
* @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
@@ -64,22 +72,22 @@ __kernel void quantization_layer(
output.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * output_stride_x;
// Load data
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- val = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr);
+ VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE)
+ val = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input.ptr);
// Create scale and offset vectors
- const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) vscale = SCALE;
- const VEC_DATA_TYPE(int, VEC_SIZE) voffset = OFFSET;
+ const VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) vscale = SCALE;
+ const VEC_DATA_TYPE(int, VEC_SIZE) voffset = OFFSET;
// Quantize
VEC_DATA_TYPE(int, VEC_SIZE)
- res = CLAMP(CONVERT_RTE_VEC(val / vscale, int, VEC_SIZE) + voffset, 0, 255);
+ res = CLAMP(CONVERT_RTE_VEC(val / vscale, int, VEC_SIZE) + voffset, MIN_QUANT_VAL, MAX_QUANT_VAL);
- //Store result
+ // Store result
VSTORE(VEC_SIZE)
- (CONVERT(res, VEC_DATA_TYPE(uchar, VEC_SIZE)), 0, (__global uchar *)output.ptr);
+ (CONVERT(res, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)), 0, (__global DATA_TYPE_OUT *)output.ptr);
#else //!defined(VEC_SIZE) || !defined(LAST_ACCESSED_X)
- *((__global uchar *)(output.ptr)) = (uchar)CLAMP(CONVERT_RTE(((float) * (__global DATA_TYPE *)input.ptr) / ((float)SCALE), int) + (int)OFFSET, 0, 255);
+ *((__global DATA_TYPE_OUT *)(output.ptr)) = (DATA_TYPE_OUT)CLAMP(CONVERT_RTE(((float) * (__global DATA_TYPE_IN *)input.ptr) / ((float)SCALE), int) + (int)OFFSET, MIN_QUANT_VAL, MAX_QUANT_VAL);
#endif // defined(VEC_SIZE) && defined(LAST_ACCESSED_X)
}
-#endif //defined(VEC_SIZE) && defined(DATA_TYPE) && defined(SCALE) && defined(OFFSET)
+#endif //defined(VEC_SIZE) && defined(DATA_TYPE_IN) && defined(DATA_TYPE_OUT) && defined(SCALE) && defined(OFFSET) && defined(MIN_QUANT_VAL) && defined(MAX_QUANT_VAL)
diff --git a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
index 493255f1cd..19aa51b5b5 100644
--- a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
@@ -32,9 +32,10 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-using namespace arm_compute;
-
+namespace arm_compute
+{
namespace
{
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
@@ -42,28 +43,34 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- if((output != nullptr) && (output->total_size() != 0))
- {
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
- }
+
+ // Output must always be initialized
+ ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape().total_size() == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM16);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
return Status{};
}
-std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
{
// Configure kernel window
Window win = calculate_max_window(*input, Steps());
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output, input->tensor_shape(), 1, DataType::QASYMM8);
+ const int vec_size_x = 16 / input->element_size();
+ const int input_width_x = input->tensor_shape().x();
+ const bool multi_access_x = (input_width_x / vec_size_x > 0);
+ if(multi_access_x)
+ {
+ win.set(Window::DimX,
+ Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
+ }
Coordinates coord;
coord.set_num_dimensions(output->num_dimensions());
output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
- return std::make_tuple(Status{}, win);
+ return std::make_pair(Status{}, win);
}
} // namespace
@@ -84,31 +91,33 @@ void CLQuantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *out
const int input_width_x = input->info()->tensor_shape().x();
const bool multi_access_x = (input_width_x / vec_size_x > 0);
- // Create and update the window (if needed)
- Window win = calculate_max_window(*input->info());
- if(multi_access_x)
- {
- win.set(Window::DimX,
- Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
- }
- ICLKernel::configure_internal(win);
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(input->info(), output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ ICLKernel::configure_internal(win_config.second);
- const UniformQuantizationInfo qinfo = output->info()->quantization_info().uniform();
+ const UniformQuantizationInfo qinfo = output->info()->quantization_info().uniform();
+ const DataType output_data_type = output->info()->data_type();
// Create kernel
CLBuildOptions build_opts;
build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(qinfo.offset));
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output_data_type));
build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(input_width_x - vec_size_x, 0)));
+ std::pair<int, int> min_max_quant_values = quantization::get_min_max_values_from_quantized_data_type(output_data_type);
+ build_opts.add_option("-DMIN_QUANT_VAL=" + support::cpp11::to_string(min_max_quant_values.first));
+ build_opts.add_option("-DMAX_QUANT_VAL=" + support::cpp11::to_string(min_max_quant_values.second));
+
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("quantization_layer", build_opts.options()));
}
Status CLQuantizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
- ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get())));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
return Status{};
}
@@ -130,3 +139,4 @@ void CLQuantizationLayerKernel::run(const Window &window, cl::CommandQueue &queu
}
while(window_collapsed.slide_window_slice_3D(slice));
}
+} // namespace arm_compute
diff --git a/src/core/utils/quantization/AsymmHelpers.cpp b/src/core/utils/quantization/AsymmHelpers.cpp
index d606adba62..59052449af 100644
--- a/src/core/utils/quantization/AsymmHelpers.cpp
+++ b/src/core/utils/quantization/AsymmHelpers.cpp
@@ -27,14 +27,16 @@
#include <limits>
#include <numeric>
-using namespace arm_compute::quantization;
-
+namespace arm_compute
+{
+namespace quantization
+{
constexpr int64_t fixed_point_one_Q0 = (1LL << 31);
constexpr float epsilon = 0.00001f;
-arm_compute::Status arm_compute::quantization::calculate_quantized_multiplier_less_than_one(float multiplier,
- int *quant_multiplier,
- int *right_shift)
+Status calculate_quantized_multiplier_less_than_one(float multiplier,
+ int *quant_multiplier,
+ int *right_shift)
{
ARM_COMPUTE_RETURN_ERROR_ON(quant_multiplier == nullptr);
ARM_COMPUTE_RETURN_ERROR_ON(right_shift == nullptr);
@@ -44,19 +46,19 @@ arm_compute::Status arm_compute::quantization::calculate_quantized_multiplier_le
{
*quant_multiplier = 1;
*right_shift = 0;
- return arm_compute::Status{};
+ return Status{};
}
if(std::fabs(0.0f - multiplier) < epsilon)
{
*quant_multiplier = 0;
*right_shift = 0;
- return arm_compute::Status{};
+ return Status{};
}
const double q = std::frexp(multiplier, right_shift);
*right_shift *= -1;
- auto q_fixed = static_cast<int64_t>(round(q * fixed_point_one_Q0));
+ auto q_fixed = static_cast<int64_t>(support::cpp11::round(q * fixed_point_one_Q0));
ARM_COMPUTE_RETURN_ERROR_ON(q_fixed > fixed_point_one_Q0);
if(q_fixed == fixed_point_one_Q0)
{
@@ -67,18 +69,18 @@ arm_compute::Status arm_compute::quantization::calculate_quantized_multiplier_le
ARM_COMPUTE_RETURN_ERROR_ON(q_fixed > std::numeric_limits<int32_t>::max());
*quant_multiplier = static_cast<int32_t>(q_fixed);
- return arm_compute::Status{};
+ return Status{};
}
-arm_compute::Status arm_compute::quantization::calculate_quantized_multiplier_greater_than_one(float multiplier,
- int *quantized_multiplier,
- int *left_shift)
+Status calculate_quantized_multiplier_greater_than_one(float multiplier,
+ int *quantized_multiplier,
+ int *left_shift)
{
ARM_COMPUTE_RETURN_ERROR_ON(quantized_multiplier == nullptr);
ARM_COMPUTE_RETURN_ERROR_ON(left_shift == nullptr);
ARM_COMPUTE_RETURN_ERROR_ON(multiplier < 1.f);
const double q = std::frexp(multiplier, left_shift);
- auto q_fixed = static_cast<int64_t>(round(q * fixed_point_one_Q0));
+ auto q_fixed = static_cast<int64_t>(support::cpp11::round(q * fixed_point_one_Q0));
ARM_COMPUTE_RETURN_ERROR_ON(q_fixed > fixed_point_one_Q0);
if(q_fixed == fixed_point_one_Q0)
{
@@ -89,5 +91,34 @@ arm_compute::Status arm_compute::quantization::calculate_quantized_multiplier_gr
ARM_COMPUTE_RETURN_ERROR_ON(q_fixed > std::numeric_limits<int32_t>::max());
*quantized_multiplier = static_cast<int32_t>(q_fixed);
- return arm_compute::Status{};
+ return Status{};
+}
+std::pair<int, int> get_min_max_values_from_quantized_data_type(DataType data_type)
+{
+ int min_quant_val = 0;
+ int max_quant_val = 0;
+ switch(data_type)
+ {
+ case DataType::QASYMM8:
+ min_quant_val = std::numeric_limits<uint8_t>::min();
+ max_quant_val = std::numeric_limits<uint8_t>::max();
+ break;
+ case DataType::QSYMM8:
+ min_quant_val = std::numeric_limits<int8_t>::min();
+ max_quant_val = std::numeric_limits<int8_t>::max();
+ break;
+ case DataType::QASYMM16:
+ min_quant_val = std::numeric_limits<uint16_t>::min();
+ max_quant_val = std::numeric_limits<uint16_t>::max();
+ break;
+ case DataType::QSYMM16:
+ min_quant_val = std::numeric_limits<int16_t>::min();
+ max_quant_val = std::numeric_limits<int16_t>::max();
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
+ return std::make_pair(min_quant_val, max_quant_val);
}
+} // quantization
+} // arm_compute
diff --git a/tests/validation/CL/QuantizationLayer.cpp b/tests/validation/CL/QuantizationLayer.cpp
index 26e030489c..0aa7a100dc 100644
--- a/tests/validation/CL/QuantizationLayer.cpp
+++ b/tests/validation/CL/QuantizationLayer.cpp
@@ -43,8 +43,8 @@ namespace validation
namespace
{
constexpr AbsoluteTolerance<float> tolerance_f32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
-const auto QuantizationShapes = concat(datasets::Small3DShapes(),
- datasets::Small4DShapes());
+const auto QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes());
+const auto QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes());
} // namespace
TEST_SUITE(CL)
@@ -71,7 +71,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationShapes, framework::dataset::make("DataType", DataType::F32)), shape, data_type)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationSmallShapes, framework::dataset::make("DataType", DataType::F32)), shape, data_type)
{
// Create tensors
CLTensor src = create_tensor<CLTensor>(shape, data_type);
@@ -95,20 +95,40 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS
}
template <typename T>
-using CLQuantizationLayerFixture = QuantizationValidationFixture<CLTensor, CLAccessor, CLQuantizationLayer, T>;
+using CLQuantizationLayerQASYMM8Fixture = QuantizationValidationFixture<CLTensor, CLAccessor, CLQuantizationLayer, T, uint8_t>;
+template <typename T>
+using CLQuantizationLayerQASYMM16Fixture = QuantizationValidationFixture<CLTensor, CLAccessor, CLQuantizationLayer, T, uint16_t>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLQuantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataTypeIn", DataType::F32)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, CLQuantizationLayerQASYMM16Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataTypeIn", DataType::F32)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, CLQuantizationLayerQASYMM8Fixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
+ framework::dataset::make("DataTypeIn", DataType::F32)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLQuantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunLargeQASYMM16, CLQuantizationLayerQASYMM16Fixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
+ framework::dataset::make("DataTypeIn", DataType::F32)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -116,16 +136,18 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLQuantizationLayerFixture<float>, framework::D
TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLQuantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataTypeIn", DataType::F16)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLQuantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, CLQuantizationLayerQASYMM8Fixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
+ framework::dataset::make("DataTypeIn", DataType::F16)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
diff --git a/tests/validation/NEON/QuantizationLayer.cpp b/tests/validation/NEON/QuantizationLayer.cpp
index 0b503c09b3..8d19c93761 100644
--- a/tests/validation/NEON/QuantizationLayer.cpp
+++ b/tests/validation/NEON/QuantizationLayer.cpp
@@ -45,8 +45,8 @@ namespace
/** Tolerance for quantization */
constexpr AbsoluteTolerance<uint8_t> tolerance_u8(1);
-const auto QuantizationShapes = concat(datasets::Small3DShapes(),
- datasets::Small4DShapes());
+const auto QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes());
+const auto QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes());
} // namespace
TEST_SUITE(NEON)
@@ -73,7 +73,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationShapes, framework::dataset::make("DataType", DataType::F32)), shape, data_type)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationSmallShapes, framework::dataset::make("DataType", DataType::F32)), shape, data_type)
{
// Create tensors
Tensor src = create_tensor<Tensor>(shape, data_type);
@@ -97,20 +97,22 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS
}
template <typename T>
-using NEQuantizationLayerFixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T>;
+using NEQuantizationLayerQASYMM8Fixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T, uint8_t>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, NEQuantizationLayerQASYMM8Fixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
@@ -120,16 +122,18 @@ TEST_SUITE_END() // Float
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(Half)
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
- framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunLargeQASYMM8, NEQuantizationLayerQASYMM8Fixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes,
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h
index 84d4d7a7b3..4ffc659027 100644
--- a/tests/validation/fixtures/QuantizationLayerFixture.h
+++ b/tests/validation/fixtures/QuantizationLayerFixture.h
@@ -42,15 +42,15 @@ namespace test
{
namespace validation
{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename Tin, typename Tout>
class QuantizationValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type, QuantizationInfo quant_info)
+ void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo)
{
- _target = compute_target(shape, data_type, quant_info);
- _reference = compute_reference(shape, data_type, quant_info);
+ _target = compute_target(shape, data_type_in, data_type_out, qinfo);
+ _reference = compute_reference(shape, data_type_in, data_type_out, qinfo);
}
protected:
@@ -60,11 +60,11 @@ protected:
library->fill_tensor_uniform(tensor, 0);
}
- TensorType compute_target(const TensorShape &shape, DataType data_type, QuantizationInfo quant_info)
+ TensorType compute_target(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, DataType::QASYMM8, 1, quant_info);
+ TensorType src = create_tensor<TensorType>(shape, data_type_in);
+ TensorType dst = create_tensor<TensorType>(shape, data_type_out, 1, qinfo);
// Create and configure function
FunctionType quantization_layer;
@@ -89,19 +89,19 @@ protected:
return dst;
}
- SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, DataType data_type, QuantizationInfo quant_info)
+ SimpleTensor<Tout> compute_reference(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo)
{
// Create reference
- SimpleTensor<T> src{ shape, data_type };
+ SimpleTensor<Tin> src{ shape, data_type_in };
// Fill reference
fill(src);
- return reference::quantization_layer<T>(src, quant_info);
+ return reference::quantization_layer<Tin, Tout>(src, data_type_out, qinfo);
}
- TensorType _target{};
- SimpleTensor<uint8_t> _reference{};
+ TensorType _target{};
+ SimpleTensor<Tout> _reference{};
};
} // namespace validation
diff --git a/tests/validation/reference/QuantizationLayer.cpp b/tests/validation/reference/QuantizationLayer.cpp
index 182585abf9..ae23f7ec27 100644
--- a/tests/validation/reference/QuantizationLayer.cpp
+++ b/tests/validation/reference/QuantizationLayer.cpp
@@ -33,26 +33,45 @@ namespace validation
{
namespace reference
{
-template <typename T>
-SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<T> &src, const QuantizationInfo &quantization_info)
+template <typename Tin, typename Tout>
+SimpleTensor<Tout> quantization_layer(const SimpleTensor<Tin> &src, DataType output_data_type, const QuantizationInfo &quantization_info)
{
// Create reference
- SimpleTensor<uint8_t> dst{ src.shape(), DataType::QASYMM8, 1, quantization_info };
+ SimpleTensor<Tout> dst{ src.shape(), output_data_type, 1, quantization_info };
const UniformQuantizationInfo qinfo = quantization_info.uniform();
- for(int i = 0; i < src.num_elements(); ++i)
+ switch(output_data_type)
{
+ case DataType::QASYMM8:
+ for(int i = 0; i < src.num_elements(); ++i)
+ {
#ifdef __aarch64__
- dst[i] = quantize_qasymm8((src[i]), qinfo, RoundingPolicy::TO_NEAREST_EVEN);
+ dst[i] = quantize_qasymm8((src[i]), qinfo, RoundingPolicy::TO_NEAREST_EVEN);
#else // __aarch64__
- dst[i] = quantize_qasymm8((src[i]), qinfo, RoundingPolicy::TO_ZERO);
+ dst[i] = quantize_qasymm8((src[i]), qinfo, RoundingPolicy::TO_ZERO);
#endif // __aarch64__
+ }
+ break;
+ case DataType::QASYMM16:
+ for(int i = 0; i < src.num_elements(); ++i)
+ {
+#ifdef __aarch64__
+ dst[i] = quantize_qasymm16((src[i]), qinfo, RoundingPolicy::TO_NEAREST_EVEN);
+#else // __aarch64__
+ dst[i] = quantize_qasymm16((src[i]), qinfo, RoundingPolicy::TO_ZERO);
+#endif // __aarch64__
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported output data type");
}
return dst;
}
-template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<half> &src, const QuantizationInfo &quantization_info);
-template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info);
+template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<half> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
+template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<float> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
+template SimpleTensor<uint16_t> quantization_layer(const SimpleTensor<half> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
+template SimpleTensor<uint16_t> quantization_layer(const SimpleTensor<float> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/QuantizationLayer.h b/tests/validation/reference/QuantizationLayer.h
index 462396f131..0e80b4906a 100644
--- a/tests/validation/reference/QuantizationLayer.h
+++ b/tests/validation/reference/QuantizationLayer.h
@@ -35,8 +35,8 @@ namespace validation
{
namespace reference
{
-template <typename T>
-SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<T> &src, const QuantizationInfo &quantization_info);
+template <typename Tin, typename Tout>
+SimpleTensor<Tout> quantization_layer(const SimpleTensor<Tin> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
} // namespace reference
} // namespace validation
} // namespace test