From 93581a524a8e66ed29ace892bc5cb297287802af Mon Sep 17 00:00:00 2001 From: Pablo Marquez Tello Date: Thu, 21 Jul 2022 13:55:27 +0100 Subject: [ONCPUML-970] Fast math mode for fixed format kernels Minor tweaks and test for running fixed format kernels with BF16 operations when specified by the user. Change-Id: Ic8167f67b86b1298da65e46cfebed9f3b86940e4 Signed-off-by: Milos Puzovic Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8000 Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- tests/validation/NEON/ConvolutionLayer.cpp | 168 +++++++++++++++++++++-------- 1 file changed, 123 insertions(+), 45 deletions(-) (limited to 'tests/validation/NEON/ConvolutionLayer.cpp') diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index 0194220e1a..67f7c8896f 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -509,23 +509,45 @@ TEST_SUITE(VariableWeightUtils) // UC2_1_* tests: the user requests a specific fixed format, but there is no kernel that supports it. -FIXTURE_DATA_TEST_CASE(UC2_1_CpuGemmConv2d, HasOptImplFixture, framework::DatasetMode::ALL, +template +using HasOptImplFixtureNoFastMath = HasOptImplFixture; + +template +using HasOptImplFixtureFastMath = HasOptImplFixture; + +// UC2_1 + +FIXTURE_DATA_TEST_CASE(UC2_1_CpuGemmConv2d, HasOptImplFixtureNoFastMath, framework::DatasetMode::ALL, + combine(framework::dataset::make("DataType", { DataType::F32 }), + framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::OHWIo2 }))) +{ + ARM_COMPUTE_EXPECT(!_kernel_found, framework::LogLevel::ERRORS); +} +FIXTURE_DATA_TEST_CASE(UC2_1_NEGEMMConvolutionLayer, HasOptImplFixtureNoFastMath, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::F32 }), framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::OHWIo2 }))) { ARM_COMPUTE_EXPECT(!_kernel_found, framework::LogLevel::ERRORS); } -FIXTURE_DATA_TEST_CASE(UC2_1_NEGEMMConvolutionLayer, HasOptImplFixture, framework::DatasetMode::ALL, + +FIXTURE_DATA_TEST_CASE(UC2_1_CpuGemmConv2d_FastMath, HasOptImplFixtureFastMath, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::F32 }), framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::OHWIo2 }))) { ARM_COMPUTE_EXPECT(!_kernel_found, framework::LogLevel::ERRORS); } -// UC2_1_* tests: the user requests a specific fixed format, and a +FIXTURE_DATA_TEST_CASE(UC2_1_NEGEMMConvolutionLayer_FastMath, HasOptImplFixtureFastMath, framework::DatasetMode::ALL, + combine(framework::dataset::make("DataType", { DataType::F32 }), + framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::OHWIo2 }))) +{ + ARM_COMPUTE_EXPECT(!_kernel_found, framework::LogLevel::ERRORS); +} + +// UC2_2_* tests: the user requests a specific fixed format, and a // kernel that support that fixed format is found. -FIXTURE_DATA_TEST_CASE(UC2_2_CpuGemmConv2d, HasOptImplFixture, framework::DatasetMode::ALL, +FIXTURE_DATA_TEST_CASE(UC2_2_CpuGemmConv2d, HasOptImplFixtureNoFastMath, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::F32 }), framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::OHWIo4 }))) { @@ -533,7 +555,7 @@ FIXTURE_DATA_TEST_CASE(UC2_2_CpuGemmConv2d, HasOptImplFixture, framework::DatasetMode::ALL, +FIXTURE_DATA_TEST_CASE(UC2_2_NEGEMMConvolutionLayer, HasOptImplFixtureNoFastMath, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::F32 }), framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::OHWIo4 }))) { @@ -541,19 +563,49 @@ FIXTURE_DATA_TEST_CASE(UC2_2_NEGEMMConvolutionLayer, HasOptImplFixture, framework::DatasetMode::ALL, + combine(framework::dataset::make("DataType", { DataType::F32 }), + framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::OHWIo8i4_bf16 }))) +{ + ARM_COMPUTE_EXPECT(_kernel_found, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT_EQUAL(_computed_weight_format, arm_compute::WeightFormat::OHWIo8i4_bf16, framework::LogLevel::ERRORS); +} + +FIXTURE_DATA_TEST_CASE(UC2_2_NEGEMMConvolutionLayer_FastMath, HasOptImplFixtureFastMath, framework::DatasetMode::ALL, + combine(framework::dataset::make("DataType", { DataType::F32 }), + framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::OHWIo8i4_bf16 }))) +{ + ARM_COMPUTE_EXPECT(_kernel_found, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(_computed_weight_format == arm_compute::WeightFormat::OHWIo8i4_bf16, framework::LogLevel::ERRORS); +} + // UC3_1_* tests: the user queries for ANY fixed format, but there is // no kernel that support the use case specified by the user (for // example, there is no fixed format kernel for the datatype of the // problem). -FIXTURE_DATA_TEST_CASE(UC3_1_CpuGemmConv2d, HasOptImplFixture, framework::DatasetMode::ALL, +FIXTURE_DATA_TEST_CASE(UC3_1_CpuGemmConv2d, HasOptImplFixtureNoFastMath, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::S32 }), framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::ANY }))) { ARM_COMPUTE_EXPECT(!_kernel_found, framework::LogLevel::ERRORS); } -FIXTURE_DATA_TEST_CASE(UC3_1_NEGEMMConvolutionLayer, HasOptImplFixture, framework::DatasetMode::ALL, +FIXTURE_DATA_TEST_CASE(UC3_1_NEGEMMConvolutionLayer, HasOptImplFixtureNoFastMath, framework::DatasetMode::ALL, + combine(framework::dataset::make("DataType", { DataType::S32 }), + framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::ANY }))) +{ + ARM_COMPUTE_EXPECT(!_kernel_found, framework::LogLevel::ERRORS); +} + +FIXTURE_DATA_TEST_CASE(UC3_1_CpuGemmConv2d_FastMath, HasOptImplFixtureFastMath, framework::DatasetMode::ALL, + combine(framework::dataset::make("DataType", { DataType::S32 }), + framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::ANY }))) +{ + ARM_COMPUTE_EXPECT(!_kernel_found, framework::LogLevel::ERRORS); +} + +FIXTURE_DATA_TEST_CASE(UC3_1_NEGEMMConvolutionLayer_FastMath, HasOptImplFixtureFastMath, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::S32 }), framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::ANY }))) { @@ -570,7 +622,7 @@ FIXTURE_DATA_TEST_CASE(UC3_1_NEGEMMConvolutionLayer, HasOptImplFixture, framework::DatasetMode::ALL, +FIXTURE_DATA_TEST_CASE(UC3_2_CpuGemmConv2d, HasOptImplFixtureNoFastMath, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::F32 }), framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::ANY }))) { @@ -579,7 +631,7 @@ FIXTURE_DATA_TEST_CASE(UC3_2_CpuGemmConv2d, HasOptImplFixture, framework::DatasetMode::ALL, +FIXTURE_DATA_TEST_CASE(UC3_2_NEGEMMConvolutionLayer, HasOptImplFixtureNoFastMath, framework::DatasetMode::ALL, combine(framework::dataset::make("DataType", { DataType::F32 }), framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::ANY }))) { @@ -587,6 +639,26 @@ FIXTURE_DATA_TEST_CASE(UC3_2_NEGEMMConvolutionLayer, HasOptImplFixture, framework::DatasetMode::ALL, + combine(framework::dataset::make("DataType", { DataType::F32 }), + framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::ANY }))) +{ + ARM_COMPUTE_EXPECT(_kernel_found, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(_computed_weight_format != arm_compute::WeightFormat::ANY, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(_computed_weight_format != arm_compute::WeightFormat::UNSPECIFIED, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format_fast_math(_computed_weight_format), framework::LogLevel::ERRORS); +} + +FIXTURE_DATA_TEST_CASE(UC3_2_NEGEMMConvolutionLayer_FastMath, HasOptImplFixtureFastMath, framework::DatasetMode::ALL, + combine(framework::dataset::make("DataType", { DataType::F32 }), + framework::dataset::make("QueryWeightFormat", { arm_compute::WeightFormat::ANY }))) +{ + ARM_COMPUTE_EXPECT(_kernel_found, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(_computed_weight_format != arm_compute::WeightFormat::ANY, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(_computed_weight_format != arm_compute::WeightFormat::UNSPECIFIED, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(arm_compute::is_fixed_format_fast_math(_computed_weight_format), framework::LogLevel::ERRORS); +} + namespace { using TestCaseType = std::tuple; @@ -669,7 +741,7 @@ TEST_SUITE_END() // VariableWeightUtils TEST_SUITE(ExperimentalCpuAPIVariableWeightWithFixtures) template -using VarWidth = VariableWeightsFixture; +using VarWidth = VariableWeightsFixture; FIXTURE_DATA_TEST_CASE(RunSmallFloat, VarWidth, framework::DatasetMode::ALL, combine(combine(datasets::SmallConvolutionLayerDataset(), @@ -689,12 +761,26 @@ FIXTURE_DATA_TEST_CASE(RunSmallHalf, VarWidth, framework::DatasetMode::ALL validate(Accessor(_target), _reference, rel_tolerance_f16, 0.f, half(abs_tolerance_f16)); } +#if defined(ARM_COMPUTE_ENABLE_BF16) +template +using VarWidthFastMath = VariableWeightsFixture; + +FIXTURE_DATA_TEST_CASE(RunSmallFloatFastMath, VarWidthFastMath, framework::DatasetMode::ALL, + combine(combine(datasets::SmallConvolutionLayerDataset(), + framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("ACL Scalar type", { DataType::F32 }))) +{ + // Validate output + validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32)); +} +#endif // ARM_COMPUTE_ENABLE_BF16 + TEST_SUITE_END() // ExperimentalCpuAPIVariableWeightWithFixtures TEST_SUITE(ExperimentalNEAPIVariableWeightWithFixtures) template -using NEGEMMVarWidth = VariableWeightsFixtureNEInterface; +using NEGEMMVarWidth = VariableWeightsFixtureNEInterface; FIXTURE_DATA_TEST_CASE(NEGEMMRunSmallFloat, NEGEMMVarWidth, framework::DatasetMode::ALL, combine(combine(datasets::SmallConvolutionLayerDataset(), @@ -714,6 +800,20 @@ FIXTURE_DATA_TEST_CASE(NEGEMMRunSmallHalf, NEGEMMVarWidth, framework::Data validate(Accessor(_target), _reference, rel_tolerance_f16, 0.f, half(abs_tolerance_f16)); } +#if defined(ARM_COMPUTE_ENABLE_BF16) +template +using NEGEMMVarWidthFastMath = VariableWeightsFixtureNEInterface; + +FIXTURE_DATA_TEST_CASE(NEGEMMRunSmallFloatFastMath, NEGEMMVarWidthFastMath, framework::DatasetMode::ALL, + combine(combine(datasets::SmallConvolutionLayerDataset(), + framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("ACL Scalar type", { DataType::F32 }))) +{ + // Validate output + validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32)); +} +#endif // ARM_COMPUTE_ENABLE_BF16 + TEST_SUITE_END() // ExperimentalNEAPIVariableWeightWithFixtures #endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS @@ -823,9 +923,7 @@ TEST_SUITE(Float) #if defined(ARM_COMPUTE_ENABLE_BF16) TEST_SUITE(BFLOAT16) FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::BFLOAT16)), - framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::BFLOAT16)), framework::dataset::make("DataLayout", { DataLayout::NHWC })), ActivationFunctionsDataset)) { // Validate output @@ -837,10 +935,7 @@ TEST_SUITE_END() // BFLOAT16 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW })), - ActivationFunctionsDataset)) + framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataLayout", { DataLayout::NCHW })), ActivationFunctionsDataset)) { // Validate output validate(Accessor(_target), _reference, rel_tolerance_f16, tolerance_num, abs_tolerance_f16); @@ -850,9 +945,7 @@ TEST_SUITE_END() // FP16 TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), ActivationFunctionsDataset)) { // Validate output @@ -894,11 +987,8 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), - QuantizedActivationFunctionsDataset)) + framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), QuantizedActivationFunctionsDataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -924,11 +1014,8 @@ TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })), - QuantizedActivationFunctionsDataset)) + framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })), QuantizedActivationFunctionsDataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -1082,10 +1169,7 @@ TEST_CASE(MultipleExecutionWithConfigure, framework::DatasetMode::ALL) TEST_SUITE(Float) TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectGEMMConv2dLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NHWC })), - ActivationFunctionsDataset)) + framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NHWC })), ActivationFunctionsDataset)) { // Validate output validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32)); @@ -1109,11 +1193,8 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectGEMMConv2dLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("DataLayout", { DataLayout::NHWC })), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), - QuantizedActivationFunctionsDataset)) + framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), QuantizedActivationFunctionsDataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -1122,11 +1203,8 @@ TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectGEMMConv2dLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("DataLayout", { DataLayout::NHWC })), - framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })), - QuantizedActivationFunctionsDataset)) + framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.01f, -10) })), QuantizedActivationFunctionsDataset)) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); -- cgit v1.2.1