From fd472f05dc73005a89a5e6275940ab5c9a609485 Mon Sep 17 00:00:00 2001 From: Viet-Hoa Do Date: Wed, 15 Mar 2023 14:05:06 +0000 Subject: Add quantized support for unary elementwise in CPU * Add quantized unary elementwise in CPU using LUT. * Widen the input data range of the test suite. - Fix CPU exponential function overflow/underflow range. - Fix saturation issue of CL round operator. Resolves: COMPMID-5763 Signed-off-by: Viet-Hoa Do Change-Id: I41445de2b4a33ec6b01e0ab701516c240c852d0b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9367 Tested-by: Arm Jenkins Reviewed-by: Jakub Sujak Reviewed-by: Pablo Marquez Tello Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- tests/validation/NEON/ElementwiseAbsoluteValue.cpp | 33 +++- tests/validation/NEON/ElementwiseExpLayer.cpp | 34 +++- tests/validation/NEON/ElementwiseLog.cpp | 34 +++- tests/validation/NEON/ElementwiseNegation.cpp | 33 +++- tests/validation/NEON/ElementwiseRound.cpp | 37 +++- tests/validation/NEON/ElementwiseRsqrtLayer.cpp | 33 +++- tests/validation/NEON/ElementwiseSin.cpp | 34 +++- .../validation/fixtures/ElementwiseUnaryFixture.h | 212 ++++++++++++++++++--- tests/validation/reference/ElementwiseUnary.cpp | 30 ++- 9 files changed, 440 insertions(+), 40 deletions(-) (limited to 'tests') diff --git a/tests/validation/NEON/ElementwiseAbsoluteValue.cpp b/tests/validation/NEON/ElementwiseAbsoluteValue.cpp index ccde670034..7f6a6a5bb2 100644 --- a/tests/validation/NEON/ElementwiseAbsoluteValue.cpp +++ b/tests/validation/NEON/ElementwiseAbsoluteValue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,6 +46,8 @@ RelativeTolerance tolerance_fp32(0.000001f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC RelativeTolerance tolerance_fp16(0.01f); #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_qasymm8(0); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(0); } // namespace TEST_SUITE(NEON) @@ -53,6 +55,9 @@ TEST_SUITE(AbsLayer) template using NEAbsLayerFixture = AbsValidationFixture; +template +using NEAbsLayerQuantizedFixture = AbsQuantizedValidationFixture; + TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) @@ -107,6 +112,32 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEAbsLayerFixture, framework::DatasetM TEST_SUITE_END() // S32 TEST_SUITE_END() // Integer +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, NEAbsLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, NEAbsLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); +} +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE_END() // Quantized TEST_SUITE_END() // AbsLayer TEST_SUITE_END() // Neon } // namespace validation diff --git a/tests/validation/NEON/ElementwiseExpLayer.cpp b/tests/validation/NEON/ElementwiseExpLayer.cpp index f9e5f39989..e8940c5385 100644 --- a/tests/validation/NEON/ElementwiseExpLayer.cpp +++ b/tests/validation/NEON/ElementwiseExpLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,6 +46,8 @@ RelativeTolerance tolerance_fp32(0.000001f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC RelativeTolerance tolerance_fp16(0.01f); #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_qasymm8(0); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(0); } // namespace TEST_SUITE(NEON) TEST_SUITE(ExpLayer) @@ -53,6 +55,9 @@ TEST_SUITE(ExpLayer) template using NEExpLayerFixture = ExpValidationFixture; +template +using NEExpLayerQuantizedFixture = ExpQuantizedValidationFixture; + TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) @@ -82,6 +87,33 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEExpLayerFixture, framework::DatasetMod TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, NEExpLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.01, 0) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.003, 10) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, NEExpLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.02, -1) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.002, -2) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); +} +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE_END() // Quantized + TEST_SUITE_END() // ExpLayer TEST_SUITE_END() // Neon } // namespace validation diff --git a/tests/validation/NEON/ElementwiseLog.cpp b/tests/validation/NEON/ElementwiseLog.cpp index 3aa7fb3665..49a88ced1c 100644 --- a/tests/validation/NEON/ElementwiseLog.cpp +++ b/tests/validation/NEON/ElementwiseLog.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,6 +46,8 @@ RelativeTolerance tolerance_fp32(0.000001f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC RelativeTolerance tolerance_fp16(0.01f); #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_qasymm8(0); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(0); } // namespace TEST_SUITE(NEON) TEST_SUITE(LogLayer) @@ -53,6 +55,9 @@ TEST_SUITE(LogLayer) template using NELogLayerFixture = LogValidationFixture; +template +using NELogLayerQuantizedFixture = LogQuantizedValidationFixture; + TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) @@ -88,6 +93,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NELogLayerFixture, framework::DatasetMod } TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, NELogLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("InputQInfo", { QuantizationInfo(10.5, 0) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(5, 10) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, NELogLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.75, -128) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(12.5, -2) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); +} +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE_END() // Quantized TEST_SUITE_END() // LogLayer TEST_SUITE_END() // Neon } // namespace validation diff --git a/tests/validation/NEON/ElementwiseNegation.cpp b/tests/validation/NEON/ElementwiseNegation.cpp index 0b63588d8a..038058c70c 100644 --- a/tests/validation/NEON/ElementwiseNegation.cpp +++ b/tests/validation/NEON/ElementwiseNegation.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,6 +46,8 @@ RelativeTolerance tolerance_fp32(0.000001f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC RelativeTolerance tolerance_fp16(0.01f); #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_qasymm8(0); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(0); } // namespace TEST_SUITE(NEON) TEST_SUITE(NegLayer) @@ -53,6 +55,9 @@ TEST_SUITE(NegLayer) template using NENegLayerFixture = NegValidationInPlaceFixture; +template +using NENegLayerQuantizedFixture = NegQuantizedValidationFixture; + TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) @@ -113,6 +118,32 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NENegLayerFixture, framework::DatasetM TEST_SUITE_END() // S32 TEST_SUITE_END() // Integer +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, NENegLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, NENegLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); +} +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE_END() // Quantized TEST_SUITE_END() // NegLayer TEST_SUITE_END() // Neon } // namespace validation diff --git a/tests/validation/NEON/ElementwiseRound.cpp b/tests/validation/NEON/ElementwiseRound.cpp index d2f0b456a0..a6ff47c830 100644 --- a/tests/validation/NEON/ElementwiseRound.cpp +++ b/tests/validation/NEON/ElementwiseRound.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -40,12 +40,20 @@ namespace test { namespace validation { +namespace +{ +constexpr AbsoluteTolerance tolerance_qasymm8(0); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(0); +} // namespace TEST_SUITE(NEON) TEST_SUITE(RoundLayer) template using NERoundLayerFixture = RoundValidationFixture; +template +using NERoundLayerQuantizedFixture = RoundQuantizedValidationFixture; + TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) @@ -81,6 +89,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NERoundLayerFixture, framework::DatasetM } TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, NERoundLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, NERoundLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); +} +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE_END() // Quantized TEST_SUITE_END() // RoundLayer TEST_SUITE_END() // Neon } // namespace validation diff --git a/tests/validation/NEON/ElementwiseRsqrtLayer.cpp b/tests/validation/NEON/ElementwiseRsqrtLayer.cpp index 2d52183b15..1d291ac6dc 100644 --- a/tests/validation/NEON/ElementwiseRsqrtLayer.cpp +++ b/tests/validation/NEON/ElementwiseRsqrtLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,6 +46,8 @@ RelativeTolerance tolerance_fp32(0.000001f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC RelativeTolerance tolerance_fp16(0.01f); #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_qasymm8(0); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(0); } // namespace TEST_SUITE(NEON) TEST_SUITE(RsqrtLayer) @@ -72,6 +74,9 @@ TEST_SUITE_END() // DynamicShape template using NERsqrtLayerFixture = RsqrtValidationFixture; +template +using NERsqrtLayerQuantizedFixture = RsqrtQuantizedValidationFixture; + TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) @@ -102,6 +107,32 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NERsqrtLayerFixture, framework::DatasetM TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, NERsqrtLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("InputQInfo", { QuantizationInfo(20, 0) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, NERsqrtLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("InputQInfo", { QuantizationInfo(25, -128) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); +} +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE_END() // Quantized TEST_SUITE_END() // RsqrtLayer TEST_SUITE_END() // Neon } // namespace validation diff --git a/tests/validation/NEON/ElementwiseSin.cpp b/tests/validation/NEON/ElementwiseSin.cpp index 06775c0690..76f4c50b46 100644 --- a/tests/validation/NEON/ElementwiseSin.cpp +++ b/tests/validation/NEON/ElementwiseSin.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -46,6 +46,8 @@ AbsoluteTolerance tolerance_fp32(0.00001f); #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC AbsoluteTolerance tolerance_fp16(0.0005f); #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_qasymm8(0); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(0); } // namespace TEST_SUITE(NEON) TEST_SUITE(SinLayer) @@ -53,6 +55,9 @@ TEST_SUITE(SinLayer) template using NESinLayerFixture = SinValidationFixture; +template +using NESinLayerQuantizedFixture = SinQuantizedValidationFixture; + TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) @@ -89,6 +94,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NESinLayerFixture, framework::DatasetMod TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(200, 10) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine( + datasets::SmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("InputQInfo", { QuantizationInfo(0.07, 6) })), + framework::dataset::make("OutputQInfo", { QuantizationInfo(123, -7) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); +} +TEST_SUITE_END() // QASYMM8_SIGNED + +TEST_SUITE_END() // Quantized + TEST_SUITE_END() // SinLayer TEST_SUITE_END() // Neon } // namespace validation diff --git a/tests/validation/fixtures/ElementwiseUnaryFixture.h b/tests/validation/fixtures/ElementwiseUnaryFixture.h index 1dc4f03e99..9b40d34d2b 100644 --- a/tests/validation/fixtures/ElementwiseUnaryFixture.h +++ b/tests/validation/fixtures/ElementwiseUnaryFixture.h @@ -24,8 +24,10 @@ #ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE #define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE +#include "arm_compute/core/QuantizationInfo.h" #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" #include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/IAccessor.h" @@ -33,6 +35,11 @@ #include "tests/framework/Fixture.h" #include "tests/validation/reference/ElementwiseUnary.h" +#include +#include +#include +#include + namespace arm_compute { namespace test @@ -64,67 +71,131 @@ protected: { case ElementWiseUnary::EXP: { - FloatDistributionType distribution{ FloatType(-1.0f), FloatType(1.0f) }; - library->fill(tensor, distribution, i); + switch(data_type) + { + case DataType::F32: + { + FloatDistributionType distribution{ FloatType(-86.63f), FloatType(88.36f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(-9.00f), FloatType(10.73f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); + } + break; } case ElementWiseUnary::RSQRT: + case ElementWiseUnary::LOG: { - if(data_type == DataType::F32 || data_type == DataType::F16) + // For floating-point data type, the chosen input range is all positive numbers + // (i.e. positive and negative zeros are excluded). + switch(data_type) { - FloatDistributionType distribution{ FloatType(1.0f), FloatType(2.0f) }; - library->fill(tensor, distribution, i); + case DataType::F32: + { + FloatDistributionType distribution{ std::numeric_limits::min(), std::numeric_limits::max() }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(0.00006103515625f), FloatType(65504.0f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); } - else + + break; + } + case ElementWiseUnary::SIN: + { + switch(data_type) { - library->fill_tensor_uniform(tensor, i); + case DataType::F32: + case DataType::F16: + { + FloatDistributionType distribution{ FloatType(-100.0f), FloatType(100.0f) }; + library->fill(tensor, distribution, i); + break; + } + + case DataType::S32: + { + std::uniform_int_distribution distribution(std::numeric_limits::lowest(), std::numeric_limits::max()); + library->fill(tensor, distribution, i); + break; + } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + + default: + ARM_COMPUTE_ERROR("Not implemented"); } + break; } case ElementWiseUnary::ABS: case ElementWiseUnary::NEG: + case ElementWiseUnary::ROUND: { switch(data_type) { - case DataType::F16: + case DataType::F32: { - arm_compute::utils::uniform_real_distribution_16bit distribution{ -2.0f, 2.0f }; + FloatDistributionType distribution{ std::numeric_limits::lowest() / 2, std::numeric_limits::max() / 2 }; library->fill(tensor, distribution, i); break; } - case DataType::F32: + + case DataType::F16: { - FloatDistributionType distribution{ FloatType(-2.0f), FloatType(2.0f) }; + FloatDistributionType distribution{ FloatType(-65504.0f), FloatType(65504.0f) }; library->fill(tensor, distribution, i); break; } + case DataType::S32: { - std::uniform_int_distribution distribution(-100, 100); + std::uniform_int_distribution distribution(std::numeric_limits::lowest(), std::numeric_limits::max()); library->fill(tensor, distribution, i); break; } + + case DataType::QASYMM8: + case DataType::QASYMM8_SIGNED: + library->fill_tensor_uniform(tensor, i); + break; + default: - ARM_COMPUTE_ERROR("DataType for Elementwise Negation Not implemented"); + ARM_COMPUTE_ERROR("Not implemented"); } - break; - } - case ElementWiseUnary::LOG: - { - FloatDistributionType distribution{ FloatType(0.0000001f), FloatType(100.0f) }; - library->fill(tensor, distribution, i); - break; - } - case ElementWiseUnary::SIN: - { - FloatDistributionType distribution{ FloatType(-100.00f), FloatType(100.00f) }; - library->fill(tensor, distribution, i); - break; - } - case ElementWiseUnary::ROUND: - { - FloatDistributionType distribution{ FloatType(100.0f), FloatType(-100.0f) }; - library->fill(tensor, distribution, i); + break; } default: @@ -199,6 +270,8 @@ protected: SimpleTensor _reference{}; ElementWiseUnary _op{}; bool _use_dynamic_shape{ false }; + QuantizationInfo _input_qinfo{}; + QuantizationInfo _output_qinfo{}; }; template class RsqrtQuantizedValidationFixture : public ElementWiseUnaryValidationFixture @@ -244,6 +317,17 @@ public: } }; +template +class ExpQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::EXP, false, iq, oq); + } +}; + template class NegValidationFixture : public ElementWiseUnaryValidationFixture { @@ -255,6 +339,17 @@ public: } }; +template +class NegQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::NEG, false, iq, oq); + } +}; + template class NegValidationInPlaceFixture : public ElementWiseUnaryValidationFixture { @@ -266,6 +361,17 @@ public: } }; +template +class NegQuantizedValidationInPlaceFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, bool in_place, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, in_place, ElementWiseUnary::NEG, false, iq, oq); + } +}; + template class LogValidationFixture : public ElementWiseUnaryValidationFixture { @@ -277,6 +383,17 @@ public: } }; +template +class LogQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::LOG, false, iq, oq); + } +}; + template class AbsValidationFixture : public ElementWiseUnaryValidationFixture { @@ -288,6 +405,17 @@ public: } }; +template +class AbsQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::ABS, false, iq, oq); + } +}; + template class SinValidationFixture : public ElementWiseUnaryValidationFixture { @@ -299,6 +427,17 @@ public: } }; +template +class SinQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::SIN, false, iq, oq); + } +}; + template class RoundValidationFixture : public ElementWiseUnaryValidationFixture { @@ -309,6 +448,17 @@ public: ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::ROUND); } }; + +template +class RoundQuantizedValidationFixture : public ElementWiseUnaryValidationFixture +{ +public: + template + void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq) + { + ElementWiseUnaryValidationFixture::setup(shape, data_type, false, ElementWiseUnary::ROUND, false, iq, oq); + } +}; } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/reference/ElementwiseUnary.cpp b/tests/validation/reference/ElementwiseUnary.cpp index d5218d772d..558f9d24fc 100644 --- a/tests/validation/reference/ElementwiseUnary.cpp +++ b/tests/validation/reference/ElementwiseUnary.cpp @@ -88,8 +88,21 @@ SimpleTensor elementwise_unary(const SimpleTensor &src, SimpleTe dst_tmp[i] = (127.0f - dst.quantization_info().uniform().offset) * dst.quantization_info().uniform().scale ; } break; + + case ElementWiseUnary::LOG: + if(src_tmp[i] != 0) + { + dst_tmp[i] = std::log(src_tmp[i]); + } + else + { + dst_tmp[i] = (-128.0f - dst.quantization_info().uniform().offset) * dst.quantization_info().uniform().scale ; + } + break; + default: - ARM_COMPUTE_ERROR("Not implemented"); + elementwise_unary(src_tmp, dst_tmp, op); + break; } } dst = convert_to_asymmetric(dst_tmp, dst.quantization_info()); @@ -122,8 +135,21 @@ SimpleTensor elementwise_unary(const SimpleTensor &src, Simple dst_tmp[i] = (255.0f - dst.quantization_info().uniform().offset)* dst.quantization_info().uniform().scale; } break; + + case ElementWiseUnary::LOG: + if(src_tmp[i] != 0) + { + dst_tmp[i] = std::log(src_tmp[i]); + } + else + { + dst_tmp[i] = -dst.quantization_info().uniform().offset * dst.quantization_info().uniform().scale; + } + break; + default: - ARM_COMPUTE_ERROR("Not implemented"); + elementwise_unary(src_tmp, dst_tmp, op); + break; } } dst = convert_to_asymmetric(dst_tmp, dst.quantization_info()); -- cgit v1.2.1