From 93a77cd48609277a3849269c4cea5bf4b1ceb76a Mon Sep 17 00:00:00 2001 From: Gunes Bayir Date: Fri, 13 Oct 2023 16:58:41 +0100 Subject: Use dynamic quantization in Convolution and Dilated Convolution tests This patch calculates the output quantization info based on the inputs' quantization information. The previous approach was using the same quantization information for input, weights and output. This implementation does not cover the cases where we have fused activation function. Resolves: COMPMID-6482 Change-Id: I4a9d87cfef8ad18ef241d457d23f44c8519a1389 Signed-off-by: SiCong Li Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10541 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Benchmark: Arm Jenkins --- tests/validation/CL/ConvolutionLayer.cpp | 283 ++++++++++++++++++++----------- 1 file changed, 181 insertions(+), 102 deletions(-) (limited to 'tests/validation/CL/ConvolutionLayer.cpp') diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp index 986d76708d..8820a6a31e 100644 --- a/tests/validation/CL/ConvolutionLayer.cpp +++ b/tests/validation/CL/ConvolutionLayer.cpp @@ -47,6 +47,7 @@ namespace test { namespace validation { +using framework::dataset::make; namespace { class SmallConvolutionLayerDatasetCases final : public datasets::ConvolutionLayerDataset @@ -65,7 +66,7 @@ constexpr AbsoluteTolerance tolerance_qasymm8(1); /**< T constexpr float tolerance_num = 0.07f; /**< Tolerance number */ /** CNN data types */ -const auto CNNDataTypes = framework::dataset::make("DataType", +const auto CNNDataTypes = make("DataType", { DataType::F16, DataType::F32, @@ -74,20 +75,20 @@ const auto CNNDataTypes = framework::dataset::make("DataType", }); /** Grouped CNN data types */ -const auto GroupedCNNDataTypes = framework::dataset::make("DataType", +const auto GroupedCNNDataTypes = make("DataType", { DataType::F16, DataType::F32 }); -const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", +const auto ActivationFunctionsDataset = make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f) }); -const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo", +const auto ActivationFunctionsSmallDataset = make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f) @@ -100,7 +101,7 @@ TEST_SUITE(ConvolutionLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM + make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Select GEMM TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), // Select WINOGRAD @@ -110,7 +111,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::QASYMM8_SIGNED), // Select GEMM }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), + make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32), @@ -120,7 +121,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::QASYMM8_SIGNED), })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), + make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32), TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32), @@ -130,7 +131,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32), TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::QASYMM8_SIGNED), })), - framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1), + make("ConvInfo", { PadStrideInfo(1, 2, 1, 1), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), @@ -140,7 +141,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z PadStrideInfo(1, 1, 2, 2), PadStrideInfo(1, 1, 2, 2), })), - framework::dataset::make("GpuTarget", { GPUTarget::BIFROST, + make("GpuTarget", { GPUTarget::BIFROST, GPUTarget::MIDGARD, GPUTarget::G71, GPUTarget::G71, @@ -150,7 +151,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z GPUTarget::BIFROST, GPUTarget::BIFROST, })), - framework::dataset::make("Dilation", { Size2D(1U, 1U), + make("Dilation", { Size2D(1U, 1U), Size2D(1U, 1U), Size2D(1U, 1U), Size2D(1U, 1U), @@ -160,8 +161,8 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z Size2D(2U, 1U), Size2D(2U, 1U), })), - framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true, true })), - framework::dataset::make("Expected",{ ConvolutionMethod::GEMM, + make("EnableFastMath", { false, false, false, false, false, false, true, true, true })), + make("Expected",{ ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, @@ -199,7 +200,7 @@ TEST_SUITE(Float) TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ReshapeWeights", { true })), make("DataType", DataType::F16)), make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), ActivationFunctionsSmallDataset)) { // Validate output @@ -210,7 +211,7 @@ TEST_SUITE_END() // FP16 TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ReshapeWeights", { true })), make("DataType", DataType::F32)), make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), ActivationFunctionsSmallDataset)) { // Validate output @@ -218,15 +219,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture, framework } FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(combine(combine(combine( - framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), - framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), - framework::dataset::make("Bias", TensorShape(2U))), - framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), - framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), - framework::dataset::make("Dilation", Size2D(1, 1))), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U))), + make("Bias", TensorShape(2U))), + make("Output", TensorShape(11U, 25U, 2U))), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + make("Dilation", Size2D(1, 1))), + make("ReshapeWeights", { true })), + make("DataType", DataType::F32)), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), ActivationFunctionsSmallDataset)) { // Validate output @@ -234,11 +235,11 @@ FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerMixedDataLayout } FIXTURE_DATA_TEST_CASE(RunSmallWithPadding, CLConvolutionValidationWithPaddingFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerPrePaddingDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })), -framework::dataset::make("PrePadLayer", { PaddingList({ { 1, 1 }, { 1, 1 } }) }))) + make("ReshapeWeights", { true })), + make("DataType", DataType::F32)), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ActivationInfo", { ActivationLayerInfo() })), +make("PrePadLayer", { PaddingList({ { 1, 1 }, { 1, 1 } }) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -254,64 +255,108 @@ using CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidat template using CLGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture; -const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo", -{ - ActivationLayerInfo(), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) -}); -const auto QuantizedActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo", +TEST_SUITE(Quantized) + +const auto QuantizationData = make("QuantizationInfo", { - ActivationLayerInfo(), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) + QuantizationInfo(0.5f, 10), + QuantizationInfo(0.3f, 3), + QuantizationInfo(1.1f, 10), }); -TEST_SUITE(Quantized) +/// @note: Every asymmetric quantized test has a version with or without activation because the quantization info given +/// is ignored when there is no activation. Instead of using the same quantization information for all the tensors, the +/// fixture generates separate quantization info for each input and the output tensor. +/// When we can also support dynamic quantization with the presence of activation, these two versions should be merged +/// again, with the explicitly specified quantization info removed +const auto NoActivation = make("ActivationInfo", ActivationLayerInfo()); + +const auto IgnoredQuantizationInfo = make("IgnoredQuantizationInfo", QuantizationInfo()); -const auto QuantizationData = framework::dataset::make("QuantizationInfo", +const auto QuantizedActivationFunctionsSmallDataset = make("ActivationInfo", { - QuantizationInfo(0.5f, 10), - QuantizationInfo(0.3f, 3), - QuantizationInfo(1.1f, 10), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) }); + TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmallCases, CLGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(SmallConvolutionLayerDatasetCases(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - QuantizationData), - QuantizedActivationFunctionsSmallDataset)) + combine(SmallConvolutionLayerDatasetCases(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} + +FIXTURE_DATA_TEST_CASE(RunSmallCasesWithActivation, CLGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, + combine(SmallConvolutionLayerDatasetCases(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - QuantizationData), - QuantizedActivationFunctionsSmallDataset)) + combine(datasets::SmallConvolutionLayerDataset(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} + +FIXTURE_DATA_TEST_CASE(RunSmallWithActivation, CLGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, + combine(datasets::SmallConvolutionLayerDataset(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), - framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), - framework::dataset::make("Bias", TensorShape(2U))), - framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), - framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), - framework::dataset::make("Dilation", Size2D(1, 1))), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - QuantizationData), - QuantizedActivationFunctionsSmallDataset)) + combine( + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U)), + make("Bias", TensorShape(2U)), + make("Output", TensorShape(11U, 25U, 2U)), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)), + make("Dilation", Size2D(1, 1)), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::ALL, + combine( + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U)), + make("Bias", TensorShape(2U)), + make("Output", TensorShape(11U, 25U, 2U)), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)), + make("Dilation", Size2D(1, 1)), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -319,44 +364,78 @@ FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedD TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - QuantizationData), - QuantizedActivationFunctionsSmallDataset)) + combine(datasets::SmallConvolutionLayerDataset(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunSmallWithActivation, CLGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, + combine(datasets::SmallConvolutionLayerDataset(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), - framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), - framework::dataset::make("Bias", TensorShape(2U))), - framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), - framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), - framework::dataset::make("Dilation", Size2D(1, 1))), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - QuantizationData), - QuantizedActivationFunctionsSmallDataset)) + combine( + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U)), + make("Bias", TensorShape(2U)), + make("Output", TensorShape(11U, 25U, 2U)), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)), + make("Dilation", Size2D(1, 1)), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::ALL, + combine( + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U)), + make("Bias", TensorShape(2U)), + make("Output", TensorShape(11U, 25U, 2U)), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)), + make("Dilation", Size2D(1, 1)), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE(QSYMM8_PER_CHANNEL) +const auto QuantizedActivationFunctionsSmallPerChannelDataset = make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) +}); + FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLGEMMConvolutionLayerQuantizedPerChannelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ReshapeWeights", { true })), + make("DataType", { DataType::QASYMM8_SIGNED })), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), QuantizationData), - QuantizedActivationFunctionsSmallDataset), - framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) + QuantizedActivationFunctionsSmallPerChannelDataset), + make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -364,12 +443,12 @@ FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLGEMMConvolutionLayerQuantizedPerChannel FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedPerChannelFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", { DataType::QASYMM8 })), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ReshapeWeights", { true })), + make("DataType", { DataType::QASYMM8 })), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), QuantizationData), - QuantizedActivationFunctionsSmallDataset), - framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) + QuantizedActivationFunctionsSmallPerChannelDataset), + make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -388,7 +467,7 @@ TEST_SUITE(Float) TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW })), + make("ReshapeWeights", { true })), make("DataType", DataType::F32)), make("DataLayout", { DataLayout::NCHW })), ActivationFunctionsSmallDataset)) { // Validate output @@ -397,9 +476,9 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture, fr FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW })), + make("ReshapeWeights", { true })), + make("DataType", DataType::F32)), + make("DataLayout", { DataLayout::NCHW })), ActivationFunctionsDataset)) { // Validate output @@ -410,7 +489,7 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("DataLayout", { DataLayout::NCHW })), + make("ReshapeWeights", { true })), make("DataType", DataType::F16)), make("DataLayout", { DataLayout::NCHW })), ActivationFunctionsSmallDataset)) { // Validate output @@ -419,9 +498,9 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture, fra FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW })), + make("ReshapeWeights", { true })), + make("DataType", DataType::F16)), + make("DataLayout", { DataLayout::NCHW })), ActivationFunctionsDataset)) { // Validate output -- cgit v1.2.1