From ee939fb58e3fc50ae7c92c895f8abd1dd9f20eb3 Mon Sep 17 00:00:00 2001 From: Luca Foschiani Date: Tue, 28 Jan 2020 10:38:07 +0000 Subject: COMPMID-2774: Add support for QASYMM8_SIGNED in NEReductionOperation, NEReduceMean and NEArgMinMaxLayer Signed-off-by: Luca Foschiani Change-Id: Icf198a983c8ce2c6cd8451a1190bb99115eac3af Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2652 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Giorgio Arena Comments-Addressed: Arm Jenkins --- tests/validation/NEON/ArgMinMax.cpp | 24 +++++++++++++ tests/validation/NEON/ReduceMean.cpp | 29 +++++++++++++--- tests/validation/NEON/ReductionOperation.cpp | 39 +++++++++++++++++++--- tests/validation/fixtures/ArgMinMaxFixture.h | 10 +++++- .../fixtures/ReductionOperationFixture.h | 31 +++++++++++------ tests/validation/reference/ReductionOperation.cpp | 1 + 6 files changed, 113 insertions(+), 21 deletions(-) (limited to 'tests/validation') diff --git a/tests/validation/NEON/ArgMinMax.cpp b/tests/validation/NEON/ArgMinMax.cpp index ec90ab0db0..e7ab4a4bbf 100644 --- a/tests/validation/NEON/ArgMinMax.cpp +++ b/tests/validation/NEON/ArgMinMax.cpp @@ -163,6 +163,30 @@ FIXTURE_DATA_TEST_CASE(RunLarge, validate(Accessor(_target), _reference); } TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEArgMinMaxQuantizedValidationFixture, + framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), + framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 127.f, 20) }))) +{ + // Validate output + validate(Accessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEArgMinMaxQuantizedValidationFixture, + framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), + framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 127.f, 20) }))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // ArgMinMax TEST_SUITE_END() // NEON } // namespace validation diff --git a/tests/validation/NEON/ReduceMean.cpp b/tests/validation/NEON/ReduceMean.cpp index 782b97220e..fade3613da 100644 --- a/tests/validation/NEON/ReduceMean.cpp +++ b/tests/validation/NEON/ReduceMean.cpp @@ -44,9 +44,9 @@ namespace { constexpr AbsoluteTolerance tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -constexpr AbsoluteTolerance tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */ +constexpr AbsoluteTolerance tolerance_f16(0.03f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */ +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +constexpr AbsoluteTolerance tolerance_quantized(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */ const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }), framework::dataset::make("KeepDims", { true })); @@ -162,7 +162,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8); + validate(Accessor(_target), _reference, tolerance_quantized); } FIXTURE_DATA_TEST_CASE(RunLarge, @@ -171,9 +171,28 @@ FIXTURE_DATA_TEST_CASE(RunLarge, combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 5) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8); + validate(Accessor(_target), _reference, tolerance_quantized); } TEST_SUITE_END() // QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEReduceMeanQuantizedFixture, + framework::DatasetMode::PRECOMMIT, + combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 127, 0) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_quantized); +} +FIXTURE_DATA_TEST_CASE(RunLarge, + NEReduceMeanQuantizedFixture, + framework::DatasetMode::NIGHTLY, + combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 127, 0) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_quantized); +} +TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // Quantized TEST_SUITE_END() // ReduceMean TEST_SUITE_END() // NEON diff --git a/tests/validation/NEON/ReductionOperation.cpp b/tests/validation/NEON/ReductionOperation.cpp index 93f1a80735..cd96a6abcc 100644 --- a/tests/validation/NEON/ReductionOperation.cpp +++ b/tests/validation/NEON/ReductionOperation.cpp @@ -44,9 +44,11 @@ namespace { /** Tolerance for float operations */ AbsoluteTolerance tolerance_f32(0.0001f); -RelativeTolerance rel_tolerance_f32(0.00001f); +RelativeTolerance rel_tolerance_f32(0.0001f); +AbsoluteTolerance tolerance_f16(0.1f); +RelativeTolerance rel_tolerance_f16(0.1f); /** Tolerance for quantized operations */ -RelativeTolerance tolerance_qasymm8(1); +RelativeTolerance tolerance_quantized(1.f); const auto ReductionOperations = framework::dataset::make("ReductionOperation", { @@ -58,7 +60,7 @@ const auto ReductionOperations = framework::dataset::make("ReductionOperation", const auto QuantizationInfos = framework::dataset::make("QuantizationInfo", { - QuantizationInfo(1.f / 128, 10), + QuantizationInfo(1.f / 117, 10), // Numbers chosen so that the quantized values are in range of qasymm8_signed data type QuantizationInfo(1.f / 64, 5), QuantizationInfo(1.f / 32, 2) }); @@ -123,6 +125,23 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEReductionOperationFixture, framework:: } TEST_SUITE_END() // FP32 +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), Axises), ReductionOperations), KeepDims)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f16); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NEReductionOperationFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F16)), Axises), ReductionOperations), KeepDims)) +{ + // Validate output + validate(Accessor(_target), _reference, rel_tolerance_f16, 0, tolerance_f16); +} +TEST_SUITE_END() // FP16 +#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + template using NEReductionOperationQuantizedFixture = ReductionOperationQuantizedFixture; @@ -134,10 +153,22 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationQuantizedFixture, KeepDims)) { // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8); + validate(Accessor(_target), _reference, tolerance_quantized); } TEST_SUITE_END() // QASYMM8 +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationQuantizedFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), Axises), + ReductionOperations), + QuantizationInfos), + KeepDims)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_quantized); +} +TEST_SUITE_END() // QASYMM8_SIGNED + TEST_SUITE_END() // ReductionOperation TEST_SUITE_END() // NEON } // namespace validation diff --git a/tests/validation/fixtures/ArgMinMaxFixture.h b/tests/validation/fixtures/ArgMinMaxFixture.h index a4d03fba02..2932ba4508 100644 --- a/tests/validation/fixtures/ArgMinMaxFixture.h +++ b/tests/validation/fixtures/ArgMinMaxFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 ARM Limited. + * Copyright (c) 2018-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -80,6 +80,14 @@ protected: library->fill(tensor, distribution, 0); break; } + case DataType::QASYMM8_SIGNED: + { + std::pair bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution distribution(bounds.first, bounds.second); + + library->fill(tensor, distribution, 0); + break; + } default: ARM_COMPUTE_ERROR("DataType for Elementwise Negation Not implemented"); } diff --git a/tests/validation/fixtures/ReductionOperationFixture.h b/tests/validation/fixtures/ReductionOperationFixture.h index 2802cd4c0a..a93bf49afd 100644 --- a/tests/validation/fixtures/ReductionOperationFixture.h +++ b/tests/validation/fixtures/ReductionOperationFixture.h @@ -61,22 +61,31 @@ protected: template void fill(U &&tensor) { - if(tensor.data_type() == DataType::QASYMM8) + if(!is_data_type_quantized(tensor.data_type())) { - std::pair bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution distribution(bounds.first, bounds.second); - library->fill(tensor, distribution, 0); - } - else if(tensor.data_type() == DataType::QASYMM8_SIGNED) - { - std::pair bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); - std::uniform_int_distribution distribution(bounds.first, bounds.second); + std::uniform_real_distribution<> distribution(-1.0f, 1.0f); library->fill(tensor, distribution, 0); } else { - std::uniform_real_distribution<> distribution(-1.0f, 1.0f); - library->fill(tensor, distribution, 0); + if(tensor.data_type() == DataType::QASYMM8) + { + std::pair bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution distribution(bounds.first, bounds.second); + + library->fill(tensor, distribution, 0); + } + else if(tensor.data_type() == DataType::QASYMM8_SIGNED) + { + std::pair bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution distribution(bounds.first, bounds.second); + + library->fill(tensor, distribution, 0); + } + else + { + ARM_COMPUTE_ERROR("Not supported"); + } } } diff --git a/tests/validation/reference/ReductionOperation.cpp b/tests/validation/reference/ReductionOperation.cpp index a0b6a8eaed..68352cc645 100644 --- a/tests/validation/reference/ReductionOperation.cpp +++ b/tests/validation/reference/ReductionOperation.cpp @@ -327,6 +327,7 @@ template SimpleTensor reduction_operation(const SimpleTensor &sr template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op); template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op); template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op); +template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op); } // namespace reference } // namespace validation -- cgit v1.2.1