From 76259ca5a74a60ef2b80f29431ab53bbac9fdb63 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Thu, 12 Jul 2018 11:14:20 +0100 Subject: COMPMID-1385: Fixed QASYMM8 mismatches Added support for different quantization info in the operands and the output tensors in CLArithmeticAddition. Change-Id: I7704baccc3b609fcc514f947f1b5b5331745ed9e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/139947 Tested-by: Jenkins Reviewed-by: Anthony Barbier Reviewed-by: Giorgio Arena --- src/core/CL/cl_kernels/arithmetic_op_quantized.cl | 21 +++++------ src/core/CL/kernels/CLArithmeticAdditionKernel.cpp | 7 +++- tests/validation/CL/ArithmeticAddition.cpp | 9 +++-- .../fixtures/ArithmeticAdditionFixture.h | 42 +++++++++++++--------- tests/validation/reference/ArithmeticAddition.cpp | 35 +++++++++++++----- tests/validation/reference/ArithmeticAddition.h | 6 +++- 6 files changed, 80 insertions(+), 40 deletions(-) diff --git a/src/core/CL/cl_kernels/arithmetic_op_quantized.cl b/src/core/CL/cl_kernels/arithmetic_op_quantized.cl index 82e92e32a8..082317ba11 100644 --- a/src/core/CL/cl_kernels/arithmetic_op_quantized.cl +++ b/src/core/CL/cl_kernels/arithmetic_op_quantized.cl @@ -31,11 +31,11 @@ #define SUB(x, y) (x) - (y) #endif /* SATURATE */ -#if defined(OFFSET) +#if defined(OFFSET_IN1) /** This function adds two tensors. * - * @attention The quantization offset must be passed at compile time using -DOFFSET, i.e. -DOFFSET=10 + * @attention The quantization offset must be passed at compile time using -DOFFSET_IN1, i.e. -DOFFSET_IN1=10 * @attention To perform saturating operation -DSATURATE has to be passed to the compiler otherwise wrapping policy will be used. * * @param[in] in1_ptr Pointer to the source tensor. Supported data types: QASYMM8 @@ -73,17 +73,18 @@ __kernel void arithmetic_add_quantized( Tensor3D in2 = CONVERT_TO_TENSOR3D_STRUCT(in2); Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out); - // Load values - const short16 in_a = CONVERT(vload16(0, (__global uchar *)in1.ptr), short16); - const short16 in_b = CONVERT(vload16(0, (__global uchar *)in2.ptr), short16); - const short16 offset = OFFSET; + int16 in_a = CONVERT(vload16(0, (__global uchar *)in1.ptr), int16); + int16 in_b = CONVERT(vload16(0, (__global uchar *)in2.ptr), int16); - // Calculate result - short16 res = ADD(in_a, SUB(in_b, offset)); + in_a = SUB(in_a, (int16)((int)OFFSET_IN1)); + in_b = SUB(in_b, (int16)((int)OFFSET_IN2)); - res = max((short16)0, min(res, (short16)255)); + const float16 in1f32 = convert_float16(in_a) * (float16)((float)SCALE_IN1); + const float16 in2f32 = convert_float16(in_b) * (float16)((float)SCALE_IN2); + const float16 qresf32 = (in1f32 + in2f32) / ((float16)(float)SCALE_OUT) + ((float16)((float16)OFFSET_OUT)); + const uchar16 res = convert_uchar16_sat(convert_int16_rte(qresf32)); // Store result - vstore16(CONVERT(res, uchar16), 0, (__global uchar *)out.ptr); + vstore16(res, 0, (__global uchar *)out.ptr); } #endif /* defined(OFFSET) */ diff --git a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp index 78651f8679..6d6cb6f98c 100644 --- a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp +++ b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp @@ -142,7 +142,12 @@ void CLArithmeticAdditionKernel::configure(const ICLTensor *input1, const ICLTen build_opts.emplace("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type())); if(is_data_type_quantized_asymmetric(input1->info()->data_type())) { - build_opts.emplace("-DOFFSET=" + support::cpp11::to_string(input1->info()->quantization_info().offset)); + build_opts.emplace("-DOFFSET_IN1=" + support::cpp11::to_string(input1->info()->quantization_info().offset)); + build_opts.emplace("-DOFFSET_IN2=" + support::cpp11::to_string(input2->info()->quantization_info().offset)); + build_opts.emplace("-DOFFSET_OUT=" + support::cpp11::to_string(output->info()->quantization_info().offset)); + build_opts.emplace("-DSCALE_IN1=" + support::cpp11::to_string(input1->info()->quantization_info().scale)); + build_opts.emplace("-DSCALE_IN2=" + support::cpp11::to_string(input2->info()->quantization_info().scale)); + build_opts.emplace("-DSCALE_OUT=" + support::cpp11::to_string(output->info()->quantization_info().scale)); kernel_name += "_quantized"; } diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp index 256d93f7f5..bd27bc4043 100644 --- a/tests/validation/CL/ArithmeticAddition.cpp +++ b/tests/validation/CL/ArithmeticAddition.cpp @@ -152,9 +152,14 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da validate(dst.info()->padding(), padding); } -FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), ArithmeticAdditionQASYMM8Dataset), +FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticAdditionQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallShapes(), + ArithmeticAdditionQASYMM8Dataset), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) }))) + framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 255.f, 20) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255.f, 5) })) + + ) { // Validate output validate(CLAccessor(_target), _reference); diff --git a/tests/validation/fixtures/ArithmeticAdditionFixture.h b/tests/validation/fixtures/ArithmeticAdditionFixture.h index 6d529a843c..8b14485aca 100644 --- a/tests/validation/fixtures/ArithmeticAdditionFixture.h +++ b/tests/validation/fixtures/ArithmeticAdditionFixture.h @@ -46,10 +46,10 @@ class ArithmeticAdditionGenericFixture : public framework::Fixture public: template void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, - QuantizationInfo quantization_info) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) { - _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, quantization_info); - _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, quantization_info); + _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out); + _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out); } protected: @@ -60,12 +60,12 @@ protected: } TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, - QuantizationInfo quantization_info) + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) { // Create tensors - TensorType ref_src1 = create_tensor(shape0, data_type0, 1, quantization_info); - TensorType ref_src2 = create_tensor(shape1, data_type1, 1, quantization_info); - TensorType dst = create_tensor(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, quantization_info); + TensorType ref_src1 = create_tensor(shape0, data_type0, 1, qinfo0); + TensorType ref_src2 = create_tensor(shape1, data_type1, 1, qinfo1); + TensorType dst = create_tensor(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out); // Create and configure function FunctionType add; @@ -94,18 +94,20 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, - QuantizationInfo quantization_info) + SimpleTensor compute_reference(const TensorShape &shape0, const TensorShape &shape1, + DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) { // Create reference - SimpleTensor ref_src1{ shape0, data_type0, 1, quantization_info }; - SimpleTensor ref_src2{ shape1, data_type1, 1, quantization_info }; + SimpleTensor ref_src1{ shape0, data_type0, 1, qinfo0 }; + SimpleTensor ref_src2{ shape1, data_type1, 1, qinfo1 }; + SimpleTensor ref_dst{ TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, qinfo_out }; // Fill reference fill(ref_src1, 0); fill(ref_src2, 1); - return reference::arithmetic_addition(ref_src1, ref_src2, output_data_type, convert_policy); + return reference::arithmetic_addition(ref_src1, ref_src2, ref_dst, convert_policy); } TensorType _target{}; @@ -119,7 +121,8 @@ public: template void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { - ArithmeticAdditionGenericFixture::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo()); + ArithmeticAdditionGenericFixture::setup(shape0, shape1, data_type0, data_type1, + output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); } }; @@ -130,7 +133,8 @@ public: template void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { - ArithmeticAdditionGenericFixture::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo()); + ArithmeticAdditionGenericFixture::setup(shape0, shape1, data_type0, data_type1, + output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); } }; @@ -141,7 +145,8 @@ public: template void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy) { - ArithmeticAdditionGenericFixture::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, QuantizationInfo()); + ArithmeticAdditionGenericFixture::setup(shape, shape, data_type0, data_type1, + output_data_type, convert_policy, QuantizationInfo(), QuantizationInfo(), QuantizationInfo()); } }; @@ -161,9 +166,12 @@ class ArithmeticAdditionValidationQuantizedFixture : public ArithmeticAdditionGe { public: template - void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, QuantizationInfo quantization_info) + void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, + QuantizationInfo qinfo0, QuantizationInfo qinfo1, QuantizationInfo qinfo_out) + { - ArithmeticAdditionGenericFixture::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, quantization_info); + ArithmeticAdditionGenericFixture::setup(shape, shape, data_type0, data_type1, + output_data_type, convert_policy, qinfo0, qinfo1, qinfo_out); } }; } // namespace validation diff --git a/tests/validation/reference/ArithmeticAddition.cpp b/tests/validation/reference/ArithmeticAddition.cpp index 4569277103..f08f3f51e1 100644 --- a/tests/validation/reference/ArithmeticAddition.cpp +++ b/tests/validation/reference/ArithmeticAddition.cpp @@ -85,10 +85,8 @@ struct BroadcastUnroll<0> } // namespace template -SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, DataType dst_data_type, ConvertPolicy convert_policy) +SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, SimpleTensor &dst, ConvertPolicy convert_policy) { - SimpleTensor dst(TensorShape::broadcast_shape(src1.shape(), src2.shape()), dst_data_type); - Coordinates id_src1, id_src2, id_dst; BroadcastUnroll::unroll(src1, src2, dst, convert_policy, id_src1, id_src2, id_dst); @@ -97,26 +95,24 @@ SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTen } template <> -SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, DataType dst_data_type, ConvertPolicy convert_policy) +SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, SimpleTensor &dst, ConvertPolicy convert_policy) { - if(dst_data_type == DataType::QASYMM8) + if(dst.data_type() == DataType::QASYMM8) { SimpleTensor src1_tmp = convert_from_asymmetric(src1); SimpleTensor src2_tmp = convert_from_asymmetric(src2); - SimpleTensor dst_tmp(TensorShape::broadcast_shape(src1.shape(), src2.shape()), dst_data_type); + SimpleTensor dst_tmp(TensorShape::broadcast_shape(src1.shape(), src2.shape()), dst.data_type()); Coordinates id_src1, id_src2, id_dst; BroadcastUnroll::unroll(src1_tmp, src2_tmp, dst_tmp, convert_policy, id_src1, id_src2, id_dst); - SimpleTensor dst = convert_to_asymmetric(dst_tmp, src1.quantization_info()); + dst = convert_to_asymmetric(dst_tmp, dst.quantization_info()); return dst; } else { // DataType::U8 - SimpleTensor dst(TensorShape::broadcast_shape(src1.shape(), src2.shape()), dst_data_type); - Coordinates id_src1, id_src2, id_dst; BroadcastUnroll::unroll(src1, src2, dst, convert_policy, id_src1, id_src2, id_dst); @@ -125,10 +121,31 @@ SimpleTensor arithmetic_addition(const SimpleTensor &src1, con } } +template SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, SimpleTensor &dst, ConvertPolicy convert_policy); +template SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, SimpleTensor &dst, ConvertPolicy convert_policy); +template SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, SimpleTensor &dst, ConvertPolicy convert_policy); +template SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, SimpleTensor &dst, ConvertPolicy convert_policy); + +template +SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, DataType dst_data_type, ConvertPolicy convert_policy) +{ + SimpleTensor dst(TensorShape::broadcast_shape(src1.shape(), src2.shape()), dst_data_type); + arithmetic_addition(src1, src2, dst, convert_policy); + return dst; +} + +template <> +SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, DataType dst_data_type, ConvertPolicy convert_policy) +{ + SimpleTensor dst(TensorShape::broadcast_shape(src1.shape(), src2.shape()), dst_data_type); + return arithmetic_addition(src1, src2, dst, convert_policy); +} + template SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, DataType dst_data_type, ConvertPolicy convert_policy); template SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, DataType dst_data_type, ConvertPolicy convert_policy); template SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, DataType dst_data_type, ConvertPolicy convert_policy); template SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, DataType dst_data_type, ConvertPolicy convert_policy); + } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/ArithmeticAddition.h b/tests/validation/reference/ArithmeticAddition.h index 5902a6f529..faeabd7a6f 100644 --- a/tests/validation/reference/ArithmeticAddition.h +++ b/tests/validation/reference/ArithmeticAddition.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -35,8 +35,12 @@ namespace validation { namespace reference { +template +SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, SimpleTensor &dst, ConvertPolicy convert_policy); + template SimpleTensor arithmetic_addition(const SimpleTensor &src1, const SimpleTensor &src2, DataType dst_data_type, ConvertPolicy convert_policy); + } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1