From bc0e1c969261f4ba65451dc5fe29acfb80671f68 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Wed, 29 Apr 2020 17:25:20 +0100 Subject: COMPMID-3069: Allow different quantization info in NEElementwiseMin/Max Change-Id: I97f9f7e9f054b2a812a23b25cfb21033f76a9101 Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3125 Reviewed-by: Manuel Bottini Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- src/core/NEON/kernels/NEElementwiseOperationKernel.cpp | 9 --------- src/runtime/NEON/functions/NEElementwiseOperators.cpp | 10 ---------- tests/validation/NEON/ElementwiseMax.cpp | 18 +++++++----------- tests/validation/NEON/ElementwiseMin.cpp | 16 +++++++--------- 4 files changed, 14 insertions(+), 39 deletions(-) diff --git a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp index 444ee8e0d3..0579dc67f4 100644 --- a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp +++ b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp @@ -24,26 +24,17 @@ #include "arm_compute/core/NEON/kernels/NEElementwiseOperationKernel.h" #include "arm_compute/core/CPP/Validate.h" -#include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/IAccessWindow.h" -#include "arm_compute/core/ITensor.h" #include "arm_compute/core/NEON/NEAsymm.h" #include "arm_compute/core/NEON/NEFixedPoint.h" #include "arm_compute/core/NEON/wrapper/wrapper.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Validate.h" -#include #include -#include #include -#include namespace arm_compute { -class Coordinates; - namespace { float32x4x4_t load_quantized(const uint8_t *input1_ptr, const int32x4_t &offset, const float32x4_t &scale) diff --git a/src/runtime/NEON/functions/NEElementwiseOperators.cpp b/src/runtime/NEON/functions/NEElementwiseOperators.cpp index 7451c6ff2b..926ae1fa21 100644 --- a/src/runtime/NEON/functions/NEElementwiseOperators.cpp +++ b/src/runtime/NEON/functions/NEElementwiseOperators.cpp @@ -43,11 +43,6 @@ void NEElementwiseMax::configure(ITensor *input1, ITensor *input2, ITensor *outp Status NEElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info) { ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled()); - if(input1->data_type() == DataType::QASYMM8_SIGNED) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input1, input2, output); - } return NEArithmeticOperationKernel::validate(ArithmeticOperation::MAX, input1, input2, output); } @@ -62,11 +57,6 @@ void NEElementwiseMin::configure(ITensor *input1, ITensor *input2, ITensor *outp Status NEElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info) { ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled()); - if(input1->data_type() == DataType::QASYMM8_SIGNED) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input1, input2, output); - } return NEArithmeticOperationKernel::validate(ArithmeticOperation::MIN, input1, input2, output); } diff --git a/tests/validation/NEON/ElementwiseMax.cpp b/tests/validation/NEON/ElementwiseMax.cpp index fc6bb9454d..bd61ba50a3 100644 --- a/tests/validation/NEON/ElementwiseMax.cpp +++ b/tests/validation/NEON/ElementwiseMax.cpp @@ -41,7 +41,8 @@ namespace validation { namespace { -RelativeTolerance tolerance_fp32(0.000001f); +constexpr RelativeTolerance tolerance_fp32(0.000001f); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(1); /** Input data sets **/ const auto ElementwiseMaxQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("DataType", @@ -79,8 +80,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes TensorInfo(TensorShape(8U, 8U, 3U), 1, DataType::QASYMM8_SIGNED), // OK TensorInfo(TensorShape(8U, 8U, 3U), 1, DataType::QASYMM8_SIGNED), // Mismatching data types - TensorInfo(TensorShape(8U, 8U, 3U), 1, DataType::QASYMM8_SIGNED,QuantizationInfo(0.1f, 2) ), // Mismatching qinfo - }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32), @@ -89,8 +88,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(8U, 8U, 3U), 1, DataType::QASYMM8_SIGNED), TensorInfo(TensorShape(8U, 8U, 3U), 1, DataType::QASYMM8), - TensorInfo(TensorShape(8U, 8U, 3U), 1, DataType::QASYMM8_SIGNED,QuantizationInfo(0.4f, 5) ), // Mismatching qinfo - })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32), @@ -99,10 +96,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(8U, 8U, 3U), 1, DataType::QASYMM8_SIGNED), TensorInfo(TensorShape(8U, 8U, 3U), 1, DataType::QASYMM8_SIGNED), - TensorInfo(TensorShape(8U, 8U, 3U), 1, DataType::QASYMM8_SIGNED,QuantizationInfo(0.3f,4 ) ), // Mismatching qinfo })), - framework::dataset::make("Expected", { true, true, true, false, false, true, false, false,false})), + framework::dataset::make("Expected", { true, true, true, false, false, true, false, false })), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(NEElementwiseMax::validate( @@ -162,12 +158,12 @@ TEST_SUITE_END() TEST_SUITE(QASYMM8_SIGNED) FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseMaxQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), ElementwiseMaxQASYMM8SignedDataset), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 2) })), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 2) })), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 2) }))) + framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 20) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f, 0) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f, -27) }))) { // Validate output - validate(Accessor(_target), _reference); + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } TEST_SUITE_END() diff --git a/tests/validation/NEON/ElementwiseMin.cpp b/tests/validation/NEON/ElementwiseMin.cpp index 9b950a06a7..0fc6f5fead 100644 --- a/tests/validation/NEON/ElementwiseMin.cpp +++ b/tests/validation/NEON/ElementwiseMin.cpp @@ -41,7 +41,8 @@ namespace validation { namespace { -RelativeTolerance tolerance_fp32(0.000001f); +constexpr RelativeTolerance tolerance_fp32(0.000001f); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(1); /** Input data sets **/ const auto ElementwiseMinQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("DataType", @@ -79,7 +80,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Mismatching shapes TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED), // Ok TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED), // Mismatching types, cannot mix QASYMM8_SIGNED with QASYMM8 - TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::QASYMM8_SIGNED,QuantizationInfo(0.1f, 2) ), // Mismatching qinfo }), framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32), @@ -88,7 +88,6 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED), TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8), - TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::QASYMM8_SIGNED,QuantizationInfo(0.5f, 1) ), })), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S32), @@ -97,10 +96,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32), TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED), TensorInfo(TensorShape(4U, 4U, 2U), 1, DataType::QASYMM8_SIGNED), - TensorInfo(TensorShape(1U, 2U, 3U), 1, DataType::QASYMM8_SIGNED,QuantizationInfo(0.5f, 1) ), })), framework::dataset::make("Expected", { true, true, true, false, - false,true,false,false})), + false,true,false})), input1_info, input2_info, output_info, expected) { ARM_COMPUTE_EXPECT(bool(NEElementwiseMin::validate( @@ -161,12 +159,12 @@ TEST_SUITE_END() TEST_SUITE(QASYMM8_SIGNED) FIXTURE_DATA_TEST_CASE(RunSmall, NEElementwiseMinQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), ElementwiseMaxQASYMM8SignedDataset), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 2) })), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 2) })), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 2) }))) + framework::dataset::make("QuantizationInfo", { QuantizationInfo(10.f, 20) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f, 0) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f, -27) }))) { // Validate output - validate(Accessor(_target), _reference); + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } TEST_SUITE_END() -- cgit v1.2.1