From c85edf1a5a3ad0e6906c0e90c18cc0080d71501d Mon Sep 17 00:00:00 2001 From: Viet-Hoa Do Date: Fri, 1 Sep 2023 16:48:17 +0100 Subject: Make zip and combine variadic * Illustrate the benefit by writing CPU MatMul test dataset in a more readable way. Part of: COMPMID-6353 Signed-off-by: Viet-Hoa Do Change-Id: Id5dbc13a051709237bbcc4dd88716d0b24ecfd5d Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10227 Tested-by: Arm Jenkins Reviewed-by: Jakub Sujak Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- tests/framework/datasets/CartesianProductDataset.h | 16 +- tests/framework/datasets/ZipDataset.h | 16 +- tests/validation/NEON/MatMul.cpp | 315 +++++++++++---------- 3 files changed, 201 insertions(+), 146 deletions(-) (limited to 'tests') diff --git a/tests/framework/datasets/CartesianProductDataset.h b/tests/framework/datasets/CartesianProductDataset.h index 19ac4f6666..7b3ff12047 100644 --- a/tests/framework/datasets/CartesianProductDataset.h +++ b/tests/framework/datasets/CartesianProductDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 Arm Limited. + * Copyright (c) 2017-2018, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -182,6 +182,20 @@ CartesianProductDataset combine(T &&dataset1, U &&dataset2) return CartesianProductDataset(std::forward(dataset1), std::forward(dataset2)); } +/** Helper function to create a @ref CartesianProductDataset. + * + * @param[in] dataset1 First dataset. + * @param[in] dataset2 Second dataset. + * @param[in] datasets Subsequent dataset. + * + * @return A grid dataset. + */ +template +auto combine(T1 &&dataset1, T2 &&dataset2, Ts &&... datasets) -> decltype(combine(std::forward(dataset1), combine(std::forward(dataset2), std::forward(datasets)...))) +{ + return combine(std::forward(dataset1), combine(std::forward(dataset2), std::forward(datasets)...)); +} + /** Helper function to create a @ref CartesianProductDataset. * * @param[in] dataset1 First dataset. diff --git a/tests/framework/datasets/ZipDataset.h b/tests/framework/datasets/ZipDataset.h index ce1bb37cab..0b963484c5 100644 --- a/tests/framework/datasets/ZipDataset.h +++ b/tests/framework/datasets/ZipDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 Arm Limited. + * Copyright (c) 2017-2018, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -150,6 +150,20 @@ ZipDataset zip(T &&dataset1, U &&dataset2) { return ZipDataset(std::forward(dataset1), std::forward(dataset2)); } + +/** Helper function to create a @ref ZipDataset. + * + * @param[in] dataset1 First dataset. + * @param[in] dataset2 Second dataset. + * @param[in] datasets Subsequent datasets. + * + * @return A zip dataset. + */ +template +auto zip(T1 &&dataset1, T2 &&dataset2, Ts &&... datasets) -> decltype(zip(std::forward(dataset1), zip(std::forward(dataset2), std::forward(datasets)...))) +{ + return zip(std::forward(dataset1), zip(std::forward(dataset2), std::forward(datasets)...)); +} } // namespace dataset } // namespace framework } // namespace test diff --git a/tests/validation/NEON/MatMul.cpp b/tests/validation/NEON/MatMul.cpp index 0a20d18490..8cc20211f2 100644 --- a/tests/validation/NEON/MatMul.cpp +++ b/tests/validation/NEON/MatMul.cpp @@ -40,6 +40,8 @@ namespace test { namespace validation { +using framework::dataset::make; + TEST_SUITE(NEON) TEST_SUITE(MatMul) @@ -53,42 +55,46 @@ constexpr AbsoluteTolerance tolerance_qasymm8_signed(0); // clang-format off // *INDENT-OFF* // Validation Tests -DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( - framework::dataset::make("InputAInfo", { TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Mismatching datatype - TensorInfo(TensorShape(9U, 6U), 1, DataType::S32), // Unsupported datatypes - TensorInfo(TensorShape(9U, 6U, 2U), 1, DataType::F32), // Broadcasting in batch dimension not supported - TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Invalid shape for multiplication - TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), - TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32), - TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32), // Tensors are not dynamic - TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8), - TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8_SIGNED), - TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8_SIGNED), // Mismatching data type - }), - framework::dataset::make("InputBInfo",{ TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8), - TensorInfo(TensorShape(5U, 9U), 1, DataType::S32), - TensorInfo(TensorShape(5U, 9U, 1U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 12U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 9U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8), - TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8_SIGNED), - TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8_SIGNED), - })), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(5U, 6U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 6U), 1, DataType::S32), - TensorInfo(TensorShape(5U, 6U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 6U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 6U), 1, DataType::F32), - TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32), - TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32), - TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8), - TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8_SIGNED), - TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8), - })), - framework::dataset::make( "TensorIsConst", {false, false, false, false, false , false, true, false, false, false} )), - framework::dataset::make("Expected", { false, false, false, false, true, true, false, true, true, false })), +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, + zip( + make("InputAInfo", { + TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Mismatching datatype + TensorInfo(TensorShape(9U, 6U), 1, DataType::S32), // Unsupported datatypes + TensorInfo(TensorShape(9U, 6U, 2U), 1, DataType::F32), // Broadcasting in batch dimension not supported + TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), // Invalid shape for multiplication + TensorInfo(TensorShape(9U, 6U), 1, DataType::F32), + TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32), + TensorInfo(TensorShape(9U, 6U , 12U) , 1 , DataType::F32), // Tensors are not dynamic + TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8_SIGNED), + TensorInfo(TensorShape(9U, 6U), 1, DataType::QASYMM8_SIGNED), // Mismatching data type + }), + make("InputBInfo", { + TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(5U, 9U), 1, DataType::S32), + TensorInfo(TensorShape(5U, 9U, 1U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 12U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 9U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 9U, 12U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8_SIGNED), + TensorInfo(TensorShape(5U, 9U), 1, DataType::QASYMM8_SIGNED), + }), + make("OutputInfo", { + TensorInfo(TensorShape(5U, 6U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 6U), 1, DataType::S32), + TensorInfo(TensorShape(5U, 6U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 6U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 6U), 1, DataType::F32), + TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32), + TensorInfo(TensorShape(5U, 6U, 12U) , 1, DataType::F32), + TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8), + TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8_SIGNED), + TensorInfo(TensorShape(5U, 6U), 1, DataType::QASYMM8), + }), + make("TensorIsConst", {false, false, false, false, false , false, true, false, false, false}), + make("Expected", { false, false, false, false, true, true, false, true, true, false })), a_info, b_info, output_info, are_tensors_const, expected) { TensorInfo a{a_info}; @@ -121,40 +127,48 @@ using NEQuantizedMatMulFixture = QuantizedMatMulValidationFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture, framework::DatasetMode::PRECOMMIT, + combine( + datasets::SmallMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::F32), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture, framework::DatasetMode::NIGHTLY, + combine( + datasets::LargeMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::F32), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunHighDimensions, NEMatMulFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::HighDimensionalMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunHighDimensions, NEMatMulFixture, framework::DatasetMode::NIGHTLY, + combine( + datasets::HighDimensionalMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::F32), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })), - framework::dataset::make("NumberOfRuns", 5))) +FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture, framework::DatasetMode::PRECOMMIT, + combine( + datasets::SmallMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::F32), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), + make("NumberOfRuns", 5))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); @@ -165,17 +179,18 @@ TEST_SUITE_END() // FP32 /* Note : MatMul BF16 is enabled by specifying FP32 datatype and enabling the fast math setting */ constexpr AbsoluteTolerance tolerance_bf16(0.001f); TEST_SUITE(BF16) -FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFastMathFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(combine(combine(combine( - datasets::SmallMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })), - framework::dataset::make("RunTimes", { 0 })), - framework::dataset::make("Settings", { CpuMatMulSettings().fast_math(true) })), - framework::dataset::make("LhsQInfo", { QuantizationInfo() })), - framework::dataset::make("RhsQInfo", { QuantizationInfo() })), - framework::dataset::make("OutQInfo", { QuantizationInfo() })) +FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFastMathFixture, framework::DatasetMode::PRECOMMIT, + combine( + datasets::SmallMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::F32), + make("ActivationInfo", { ActivationLayerInfo() }), + make("RunTimes", { 0 }), + make("Settings", { CpuMatMulSettings().fast_math(true) }), + make("LhsQInfo", { QuantizationInfo() }), + make("RhsQInfo", { QuantizationInfo() }), + make("OutQInfo", { QuantizationInfo() })) ) { // Validate output @@ -186,30 +201,36 @@ TEST_SUITE_END() // BF16 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture, framework::DatasetMode::PRECOMMIT, + combine( + datasets::SmallMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::F16), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture, framework::DatasetMode::NIGHTLY, + combine( + datasets::LargeMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::F16), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); } -FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })), - framework::dataset::make("NumberOfRuns", 5))) +FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture, framework::DatasetMode::PRECOMMIT, + combine( + datasets::SmallMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::F16), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), + make("NumberOfRuns", 5))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); @@ -224,48 +245,51 @@ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) -FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(combine(combine( - datasets::SmallMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })), - framework::dataset::make("NumberOfExtraRuns", { 0, 1 })), - framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) })), - framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) })), - framework::dataset::make("OutQInfo", { QuantizationInfo(1.f, 2) })) +FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture, framework::DatasetMode::PRECOMMIT, + combine( + datasets::SmallMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::QASYMM8), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), + make("NumberOfExtraRuns", { 0, 1 }), + make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) }), + make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) }), + make("OutQInfo", { QuantizationInfo(1.f, 2) })) ) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine( - datasets::SmallerMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) })), - framework::dataset::make("NumberOfExtraRuns", { 0, 1 })), - framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) })), - framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) })), - framework::dataset::make("OutQInfo", { QuantizationInfo(1.f, 2) })) +FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, + combine( + datasets::SmallerMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::QASYMM8), + make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) }), + make("NumberOfExtraRuns", { 0, 1 }), + make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) }), + make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) }), + make("OutQInfo", { QuantizationInfo(1.f, 2) })) ) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine( - datasets::LargeMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })), - framework::dataset::make("NumberOfExtraRuns", { 0, 1 })), - framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) })), - framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) })), - framework::dataset::make("OutQInfo", { QuantizationInfo(1.f, 2) })) +FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, + combine( + datasets::LargeMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::QASYMM8), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), + make("NumberOfExtraRuns", { 0, 1 }), + make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) }), + make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) }), + make("OutQInfo", { QuantizationInfo(1.f, 2) })) ) { // Validate output @@ -276,48 +300,51 @@ TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) -FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(combine(combine( - datasets::SmallMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })), - framework::dataset::make("NumberOfExtraRuns", { 0, 1 })), - framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) })), - framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) })), - framework::dataset::make("OutQInfo", { QuantizationInfo(1.f, 1) })) +FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture, framework::DatasetMode::PRECOMMIT, + combine( + datasets::SmallMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), + make("NumberOfExtraRuns", { 0, 1 }), + make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) }), + make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) }), + make("OutQInfo", { QuantizationInfo(1.f, 1) })) ) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } -FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine( - datasets::SmallerMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) })), - framework::dataset::make("NumberOfExtraRuns", { 0, 1 })), - framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) })), - framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) })), - framework::dataset::make("OutQInfo", { QuantizationInfo(1.f, 1) })) +FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, + combine( + datasets::SmallerMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) }), + make("NumberOfExtraRuns", { 0, 1 }), + make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) }), + make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) }), + make("OutQInfo", { QuantizationInfo(1.f, 1) })) ) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine( - datasets::LargeMatMulDataset(), - framework::dataset::make("TransposeA", { false, true })), - framework::dataset::make("TransposeB", { false, true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) })), - framework::dataset::make("NumberOfExtraRuns", { 0, 1 })), - framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 150, -2) })), - framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 250, 1) })), - framework::dataset::make("OutQInfo", { QuantizationInfo(1.f, 1) })) +FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, + combine( + datasets::LargeMatMulDataset(), + make("TransposeA", { false, true }), + make("TransposeB", { false, true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), + make("NumberOfExtraRuns", { 0, 1 }), + make("LhsQInfo", { QuantizationInfo(1.f / 150, -2) }), + make("RhsQInfo", { QuantizationInfo(1.f / 250, 1) }), + make("OutQInfo", { QuantizationInfo(1.f, 1) })) ) { // Validate output -- cgit v1.2.1