From 36a75dafdbe6d6a3a6f50bd075fe01f5b7dace38 Mon Sep 17 00:00:00 2001 From: Renato Arantes Date: Fri, 26 Jan 2024 17:31:18 +0000 Subject: =?UTF-8?q?[ONCPUML-1451]=20Add=20matmul=20kernel=20to=20enable=20?= =?UTF-8?q?bf16=20to=20bf16=20operations=20via=20PyTorch=C2=AE=20autocast(?= =?UTF-8?q?)=20function?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The full range of tests must be added with [MLINFSW-482] epic due to the lack of reordering kernels implemented in Acl. Co-Authored-By: David Mansell Change-Id: I820d316295a1ec94fdc89c37e4144a268f914c36 Signed-off-by: Renato Arantes Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11169 Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- tests/validation/NEON/MatMul.cpp | 402 +++++++++++++++++++++++---------------- 1 file changed, 243 insertions(+), 159 deletions(-) (limited to 'tests/validation/NEON') diff --git a/tests/validation/NEON/MatMul.cpp b/tests/validation/NEON/MatMul.cpp index f91dea1b4f..02f0bfda1e 100644 --- a/tests/validation/NEON/MatMul.cpp +++ b/tests/validation/NEON/MatMul.cpp @@ -24,15 +24,14 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/NEON/functions/NEMatMul.h" -#include "tests/NEON/Accessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Macros.h" -#include "tests/framework/datasets/Datasets.h" -#include "tests/validation/Validation.h" - #include "tests/datasets/LargeMatMulDataset.h" #include "tests/datasets/SmallMatMulDataset.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/framework/Macros.h" +#include "tests/NEON/Accessor.h" #include "tests/validation/fixtures/MatMulFixture.h" +#include "tests/validation/Validation.h" namespace arm_compute { @@ -45,8 +44,9 @@ using framework::dataset::make; TEST_SUITE(NEON) TEST_SUITE(MatMul) -constexpr AbsoluteTolerance tolerance_fp32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */ -const AbsoluteTolerance tolerance_fp16(half(0.1f)); +constexpr AbsoluteTolerance tolerance_fp32( + 0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */ +const AbsoluteTolerance tolerance_fp16(half(0.1f)); #ifdef __aarch64__ constexpr AbsoluteTolerance tolerance_qasymm8(1); constexpr AbsoluteTolerance tolerance_qasymm8_signed(1); @@ -120,55 +120,79 @@ template using NEMatMulFastMathFixture = MatMulGenericValidationFixture; template -using NEMatMulDynamicTensorsFixture = MatMulValidationWithDynamicTensorsFixture; +using NEMatMulFixedFormatFixture = MatMulFixedFormatFixture; + +template +using NEMatMulDynamicTensorsFixture = + MatMulValidationWithDynamicTensorsFixture; template using NEQuantizedMatMulFixture = QuantizedMatMulValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEMatMulFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunLarge, + NEMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunHighDimensions, NEMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::HighDimensionalMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunHighDimensions, + NEMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::HighDimensionalMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfRuns", 5))) +FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, + NEMatMulDynamicTensorsFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfRuns", 5))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); @@ -179,37 +203,58 @@ TEST_SUITE_END() // FP32 /* Note : MatMul BF16 is enabled by specifying FP32 datatype and enabling the fast math setting */ constexpr AbsoluteTolerance tolerance_bf16(0.02f); TEST_SUITE(BF16) -FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFastMathFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo() }), - make("RunTimes", { 0 }), - make("Settings", { CpuMatMulSettings().fast_math(true) }), - make("LhsQInfo", { QuantizationInfo() }), - make("RhsQInfo", { QuantizationInfo() }), - make("OutQInfo", { QuantizationInfo() })) -) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEMatMulFastMathFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", {ActivationLayerInfo()}), + make("RunTimes", {0}), + make("Settings", {CpuMatMulSettings().fast_math(true)}), + make("LhsQInfo", {QuantizationInfo()}), + make("RhsQInfo", {QuantizationInfo()}), + make("OutQInfo", {QuantizationInfo()}))) { // Validate output validate(Accessor(_target), _reference, tolerance_bf16); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFastMathFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo() }), - make("RunTimes", { 0 }), - make("Settings", { CpuMatMulSettings().fast_math(true) }), - make("LhsQInfo", { QuantizationInfo() }), - make("RhsQInfo", { QuantizationInfo() }), - make("OutQInfo", { QuantizationInfo() })) -) +FIXTURE_DATA_TEST_CASE(RunTinyFixedFormat, + NEMatMulFixedFormatFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::TinyMatMulDataset(), + make("TransposeA", {false}), + make("TransposeB", {false}), + make("DataType", DataType::BFLOAT16), + make("ActivationInfo", {ActivationLayerInfo()}), + make("RunTimes", {0}), + make("Settings", {CpuMatMulSettings().fast_math(true).fixed_format(true)}), + make("LhsQInfo", {QuantizationInfo()}), + make("RhsQInfo", {QuantizationInfo()}), + make("OutQInfo", {QuantizationInfo()}))) +{ + if (CPUInfo::get().has_bf16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_bf16); + } +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEMatMulFastMathFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", {ActivationLayerInfo()}), + make("RunTimes", {0}), + make("Settings", {CpuMatMulSettings().fast_math(true)}), + make("LhsQInfo", {QuantizationInfo()}), + make("RhsQInfo", {QuantizationInfo()}), + make("OutQInfo", {QuantizationInfo()}))) { // Validate output validate(Accessor(_target), _reference, tolerance_bf16, 0.01 /* tolerance_num */); @@ -219,36 +264,51 @@ TEST_SUITE_END() // BF16 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F16), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEMatMulFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F16), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F16), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunLarge, + NEMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F16), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); } -FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F16), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfRuns", 5))) +FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, + NEMatMulDynamicTensorsFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F16), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfRuns", 5))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); @@ -263,52 +323,64 @@ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) -FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) }), - make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) }), - make("OutQInfo", { QuantizationInfo(1.f, 2) })) -) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEQuantizedMatMulFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}), +make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}), +make("OutQInfo", {QuantizationInfo(1.f, 2)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::SmallerMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8), - make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) }), - make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) }), - make("OutQInfo", { QuantizationInfo(1.f, 2) })) -) +FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, + NEQuantizedMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::SmallerMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8), + make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}), +make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}), +make("OutQInfo", {QuantizationInfo(1.f, 2)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) }), - make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) }), - make("OutQInfo", { QuantizationInfo(1.f, 2) })) -) +FIXTURE_DATA_TEST_CASE(RunLarge, + NEQuantizedMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 100, 1)}), +make("RhsQInfo", {QuantizationInfo(1.f / 200, -1)}), +make("OutQInfo", {QuantizationInfo(1.f, 2)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -318,52 +390,64 @@ TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) -FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8_SIGNED), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) }), - make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) }), - make("OutQInfo", { QuantizationInfo(1.f, 1) })) -) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEQuantizedMatMulFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8_SIGNED), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}), +make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}), +make("OutQInfo", {QuantizationInfo(1.f, 1)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } -FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::SmallerMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8_SIGNED), - make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) }), - make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) }), - make("OutQInfo", { QuantizationInfo(1.f, 1) })) -) +FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, + NEQuantizedMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::SmallerMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8_SIGNED), + make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}), +make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}), +make("OutQInfo", {QuantizationInfo(1.f, 1)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8_SIGNED), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 150, -2) }), - make("RhsQInfo", { QuantizationInfo(1.f / 250, 1) }), - make("OutQInfo", { QuantizationInfo(1.f, 1) })) -) +FIXTURE_DATA_TEST_CASE(RunLarge, + NEQuantizedMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8_SIGNED), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 150, -2)}), +make("RhsQInfo", {QuantizationInfo(1.f / 250, 1)}), +make("OutQInfo", {QuantizationInfo(1.f, 1)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); @@ -372,7 +456,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::Da TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // Quantized -#endif // __aarch64__ +#endif // __aarch64__ TEST_SUITE_END() // MatMul TEST_SUITE_END() // NEON -- cgit v1.2.1