From fd472f05dc73005a89a5e6275940ab5c9a609485 Mon Sep 17 00:00:00 2001 From: Viet-Hoa Do Date: Wed, 15 Mar 2023 14:05:06 +0000 Subject: Add quantized support for unary elementwise in CPU * Add quantized unary elementwise in CPU using LUT. * Widen the input data range of the test suite. - Fix CPU exponential function overflow/underflow range. - Fix saturation issue of CL round operator. Resolves: COMPMID-5763 Signed-off-by: Viet-Hoa Do Change-Id: I41445de2b4a33ec6b01e0ab701516c240c852d0b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9367 Tested-by: Arm Jenkins Reviewed-by: Jakub Sujak Reviewed-by: Pablo Marquez Tello Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- .../validation/fixtures/ElementwiseUnaryFixture.h | 212 ++++++++++++++++++--- 1 file changed, 181 insertions(+), 31 deletions(-) (limited to 'tests/validation/fixtures') diff --git a/tests/validation/fixtures/ElementwiseUnaryFixture.h b/tests/validation/fixtures/ElementwiseUnaryFixture.h index 1dc4f03e99..9b40d34d2b 100644 --- a/tests/validation/fixtures/ElementwiseUnaryFixture.h +++ b/tests/validation/fixtures/ElementwiseUnaryFixture.h @@ -24,8 +24,10 @@ #ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE #define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE +#include "arm_compute/core/QuantizationInfo.h" #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" #include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/IAccessor.h" @@ -33,6 +35,11 @@ #include "tests/framework/Fixture.h" #include "tests/validation/reference/ElementwiseUnary.h" +#include +#include +#include +#include + namespace arm_compute { namespace test @@ -64,67 +71,131 @@ protected: { case ElementWiseUnary::EXP: { - FloatDistributionType distribution{ FloatType(-1.0f), FloatType(1.0f) }; - library->fill(tensor, distribution, i); + switch(data_type) + { + case DataType::F32: + { + FloatDistributionType distribution{ FloatType(-86.63f), FloatType(88.36f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(-9.00f), FloatType(10.73f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); + } + break; } case ElementWiseUnary::RSQRT: + case ElementWiseUnary::LOG: { - if(data_type == DataType::F32 || data_type == DataType::F16) + // For floating-point data type, the chosen input range is all positive numbers + // (i.e. positive and negative zeros are excluded). + switch(data_type) { - FloatDistributionType distribution{ FloatType(1.0f), FloatType(2.0f) }; - library->fill(tensor, distribution, i); + case DataType::F32: + { + FloatDistributionType distribution{ std::numeric_limits::min(), std::numeric_limits::max() }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(0.00006103515625f), FloatType(65504.0f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); } - else + + break; + } + case ElementWiseUnary::SIN: + { + switch(data_type) { - library->fill_tensor_uniform(tensor, i); + case DataType::F32: + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(-100.0f), FloatType(100.0f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::S32: + { + std::uniform_int_distribution distribution(std::numeric_limits::lowest(), std::numeric_limits::max()); + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); } + break; } case ElementWiseUnary::ABS: case ElementWiseUnary::NEG: + case ElementWiseUnary::ROUND: { switch(data_type) { - case DataType::F16: + case DataType::F32: { - arm_compute::utils::uniform_real_distribution_16bit distribution{ -2.0f, 2.0f }; + FloatDistributionType distribution{ std::numeric_limits::lowest() / 2, std::numeric_limits::max() / 2 }; library->fill(tensor, distribution, i); break; } - case DataType::F32: + + case DataType::F16: { - FloatDistributionType distribution{ FloatType(-2.0f), FloatType(2.0f) }; + FloatDistributionType distribution{ FloatType(-65504.0f), FloatType(65504.0f) }; library->fill(tensor, distribution, i); break; } + case DataType::S32: { - std::uniform_int_distribution distribution(-100, 100); + std::uniform_int_distribution distribution(std::numeric_limits::lowest(), std::numeric_limits::max()); library->fill(tensor, distribution, i); break; } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + default: - ARM_COMPUTE_ERROR("DataType for Elementwise Negation Not implemented"); + ARM_COMPUTE_ERROR("Not implemented"); } - break; - } - case ElementWiseUnary::LOG: - { - FloatDistributionType distribution{ FloatType(0.0000001f), FloatType(100.0f) }; - library->fill(tensor, distribution, i); - break; - } - case ElementWiseUnary::SIN: - { - FloatDistributionType distribution{ FloatType(-100.00f), FloatType(100.00f) }; - library->fill(tensor, distribution, i); - break; - } - case ElementWiseUnary::ROUND: - { - FloatDistributionType distribution{ FloatType(100.0f), FloatType(-100.0f) }; - library->fill(tensor, distribution, i); + break; } default: @@ -199,6 +270,8 @@ protected: SimpleTensor _reference{}; ElementWiseUnary _op{}; bool _use_dynamic_shape{ false }; + QuantizationInfo _input_qinfo{}; + QuantizationInfo _output_qinfo{}; }; template class RsqrtQuantizedValidationFixture : public ElementWiseUnaryValidationFixture @@ -244,6 +317,17 @@ public: } }; +template +class ExpQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::EXP, false, iq, oq); + } +}; + template class NegValidationFixture : public ElementWiseUnaryValidationFixture { @@ -255,6 +339,17 @@ public: } }; +template +class NegQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::NEG, false, iq, oq); + } +}; + template class NegValidationInPlaceFixture : public ElementWiseUnaryValidationFixture { @@ -266,6 +361,17 @@ public: } }; +template +class NegQuantizedValidationInPlaceFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, bool in_place, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, in_place, ElementWiseUnary::NEG, false, iq, oq); + } +}; + template class LogValidationFixture : public ElementWiseUnaryValidationFixture { @@ -277,6 +383,17 @@ public: } }; +template +class LogQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::LOG, false, iq, oq); + } +}; + template class AbsValidationFixture : public ElementWiseUnaryValidationFixture { @@ -288,6 +405,17 @@ public: } }; +template +class AbsQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::ABS, false, iq, oq); + } +}; + template class SinValidationFixture : public ElementWiseUnaryValidationFixture { @@ -299,6 +427,17 @@ public: } }; +template +class SinQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::SIN, false, iq, oq); + } +}; + template class RoundValidationFixture : public ElementWiseUnaryValidationFixture { @@ -309,6 +448,17 @@ public: ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::ROUND); } }; + +template +class RoundQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::ROUND, false, iq, oq); + } +}; } // namespace validation } // namespace test } // namespace arm_compute -- cgit v1.2.1