diff options
author | Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com> | 2023-06-30 15:43:29 +0100 |
---|---|---|
committer | Mohmun02 <MohammedSuhail.Munshi@arm.com> | 2023-07-06 09:49:03 +0000 |
commit | c9eeee5c84ad817360a1719c538c6e6c0812ec13 (patch) | |
tree | 6c80020617e83b0889e092d685940c7937f41d2c /tests/validation/CL | |
parent | ce3c48c7af02555f81c0f5e7ef2677916cecef34 (diff) | |
download | ComputeLibrary-c9eeee5c84ad817360a1719c538c6e6c0812ec13.tar.gz |
Fix nightly failures in MatMulLowpNativeKernel when using bounded activation functions
- Added checks for supported activation functions in MatMulLowpKernel validate
- Replaced incorrect float activation macro with quantized implementation in mat_mul_quantized
Resolves: [COMPMID-6339]
Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>
Change-Id: I15661f14877f1d3305644e6473feb5482a67e773
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/532858
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Comments-Addressed: bsgcomp <bsgcomp@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9855
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/CL')
-rw-r--r-- | tests/validation/CL/MatMul.cpp | 40 |
1 files changed, 11 insertions, 29 deletions
diff --git a/tests/validation/CL/MatMul.cpp b/tests/validation/CL/MatMul.cpp index 5a262a8e78..844597f3e9 100644 --- a/tests/validation/CL/MatMul.cpp +++ b/tests/validation/CL/MatMul.cpp @@ -69,30 +69,27 @@ using CLMatMulActivationAlphaBetaFixture = MatMulValidationWithActivationAlphaBe template <typename T> using CLQuantizedMatMulActivationFixture = QuantizedMatMulValidationWithActivationFixture<CLTensor, CLAccessor, CLMatMul, GpuMatMulSettings, T>; -/* The main act functions matmul is expected to support */ +/* The main act functions matmul (float) is expected to support */ const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.75f, 0.25f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH) }); +/* (Float datatype only) Larger activation functions dataset, used during some nightly tests. */ +const auto AllActivationsDataset = combine(datasets::ActivationFunctions(), framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); + +// Alpha beta values should be integer values +// This is for testing purposes with quantized datatypes and is not a limitation of the kernel. +// To properly remove this restriction, dst_qinfo should be auto-initialised with consideration for alpha beta values +// The main act functions quantized matmul kernels are expected to support const auto ActivationFunctionsQuantizedDataset = concat(concat(concat( framework::dataset::make("ActivationInfo", ActivationLayerInfo()), framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))), - framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f))), - framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.75f, 0.25f))); - -/* Larger activation functions dataset, used during some nightly tests. */ -const auto AllActivationsDataset = combine(datasets::ActivationFunctions(), framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); - -const auto AllQuantizedActivationsDataset = combine(concat(datasets::ActivationFunctionsQuantized(), - framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::HARD_SWISH, - ActivationLayerInfo::ActivationFunction::LEAKY_RELU - })), - framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 1.f))), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 2.f, 1.f))); TEST_SUITE(CL) TEST_SUITE(MatMul) @@ -218,22 +215,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLQuantizedMatMulFixture<int8_t>, framework::Da framework::dataset::make("NumberOfExtraRuns", { 0, 1 })), framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) })), framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) })), - framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) }))) -{ - // Validate output - validate(CLAccessor(_target), _reference, tolerance_quant); -} - -FIXTURE_DATA_TEST_CASE(RunAllActivations, CLQuantizedMatMulActivationFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine( - datasets::LargeMatMulDataset(), - framework::dataset::make("TransposeA", { false })), - framework::dataset::make("TransposeB", { true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - AllQuantizedActivationsDataset), - framework::dataset::make("NumberOfExtraRuns", { 0, 1 })), - framework::dataset::make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) })), - framework::dataset::make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) })), - framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 2) }))) + framework::dataset::make("DstQInfo", { QuantizationInfo(1.f, 50) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_quant); |