diff options
Diffstat (limited to 'tests/validation/CL/ConvolutionLayer.cpp')
-rw-r--r-- | tests/validation/CL/ConvolutionLayer.cpp | 306 |
1 files changed, 223 insertions, 83 deletions
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp index b66cfd97e7..8820a6a31e 100644 --- a/tests/validation/CL/ConvolutionLayer.cpp +++ b/tests/validation/CL/ConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,6 +22,7 @@ * SOFTWARE. */ #include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CL/CLTensorAllocator.h" #include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h" @@ -37,12 +38,16 @@ #include "tests/validation/Validation.h" #include "tests/validation/fixtures/ConvolutionLayerFixture.h" +/** Synced with tests/validation/dynamic_fusion/gpu/cl/DirectConv2d.cpp + * Please check there for any differences in the coverage + */ namespace arm_compute { namespace test { namespace validation { +using framework::dataset::make; namespace { class SmallConvolutionLayerDatasetCases final : public datasets::ConvolutionLayerDataset @@ -61,32 +66,32 @@ constexpr AbsoluteTolerance<float> tolerance_qasymm8(1); /**< T constexpr float tolerance_num = 0.07f; /**< Tolerance number */ /** CNN data types */ -const auto CNNDataTypes = framework::dataset::make("DataType", +const auto CNNDataTypes = make("DataType", { DataType::F16, - DataType::F32, - DataType::QASYMM8, - DataType::QASYMM8_SIGNED, + DataType::F32, + DataType::QASYMM8, + DataType::QASYMM8_SIGNED, }); /** Grouped CNN data types */ -const auto GroupedCNNDataTypes = framework::dataset::make("DataType", +const auto GroupedCNNDataTypes = make("DataType", { DataType::F16, - DataType::F32 + DataType::F32 }); -const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo", +const auto ActivationFunctionsDataset = make("ActivationInfo", { ActivationLayerInfo(), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f) + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 0.5f), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f) }); -const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo", +const auto ActivationFunctionsSmallDataset = make("ActivationInfo", { ActivationLayerInfo(), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f) + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f) }); } // namespace @@ -96,7 +101,7 @@ TEST_SUITE(ConvolutionLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM + make("InputInfo", { TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Select GEMM TensorInfo(TensorShape(23U, 27U, 31U, 4U), 1, DataType::F32), // Select WINOGRAD @@ -106,7 +111,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F32), // Select GEMM TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::QASYMM8_SIGNED), // Select GEMM }), - framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), + make("WeightsInfo", { TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32), TensorInfo(TensorShape(3U, 3U, 31U, 21U), 1, DataType::F32), @@ -116,7 +121,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::F32), TensorInfo(TensorShape(5U, 5U, 2U, 19U), 1, DataType::QASYMM8_SIGNED), })), - framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), + make("OutputInfo", { TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), TensorInfo(TensorShape(15U, 15U, 19U), 1, DataType::F32), TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32), TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32), @@ -126,7 +131,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::F32), TensorInfo(TensorShape(17U, 31U, 19U), 1, DataType::QASYMM8_SIGNED), })), - framework::dataset::make("ConvInfo", { PadStrideInfo(1, 2, 1, 1), + make("ConvInfo", { PadStrideInfo(1, 2, 1, 1), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 1, 0, 0), @@ -136,7 +141,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z PadStrideInfo(1, 1, 2, 2), PadStrideInfo(1, 1, 2, 2), })), - framework::dataset::make("GpuTarget", { GPUTarget::BIFROST, + make("GpuTarget", { GPUTarget::BIFROST, GPUTarget::MIDGARD, GPUTarget::G71, GPUTarget::G71, @@ -146,7 +151,7 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z GPUTarget::BIFROST, GPUTarget::BIFROST, })), - framework::dataset::make("Dilation", { Size2D(1U, 1U), + make("Dilation", { Size2D(1U, 1U), Size2D(1U, 1U), Size2D(1U, 1U), Size2D(1U, 1U), @@ -156,8 +161,8 @@ DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(z Size2D(2U, 1U), Size2D(2U, 1U), })), - framework::dataset::make("EnableFastMath", { false, false, false, false, false, false, true, true, true })), - framework::dataset::make("Expected",{ ConvolutionMethod::GEMM, + make("EnableFastMath", { false, false, false, false, false, false, true, true, true })), + make("Expected",{ ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, @@ -186,15 +191,16 @@ TEST_SUITE_END() // ConvolutionLayer TEST_SUITE(GEMMConvolutionLayer) template <typename T> using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>; +template <typename T> +using CLGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, true>; +template <typename T> +using CLConvolutionValidationWithPaddingFixture = ConvolutionValidationWithPaddingFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>; TEST_SUITE(Float) TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::F16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ReshapeWeights", { true })), make("DataType", DataType::F16)), make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), ActivationFunctionsSmallDataset)) { // Validate output @@ -205,93 +211,231 @@ TEST_SUITE_END() // FP16 TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", - DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ReshapeWeights", { true })), make("DataType", DataType::F32)), make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), ActivationFunctionsSmallDataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine( + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U))), + make("Bias", TensorShape(2U))), + make("Output", TensorShape(11U, 25U, 2U))), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + make("Dilation", Size2D(1, 1))), + make("ReshapeWeights", { true })), + make("DataType", DataType::F32)), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsSmallDataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} +FIXTURE_DATA_TEST_CASE(RunSmallWithPadding, CLConvolutionValidationWithPaddingFixture<float>, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerPrePaddingDataset(), + make("ReshapeWeights", { true })), + make("DataType", DataType::F32)), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ActivationInfo", { ActivationLayerInfo() })), +make("PrePadLayer", { PaddingList({ { 1, 1 }, { 1, 1 } }) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} + TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float template <typename T> using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>; template <typename T> +using CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, true>; +template <typename T> using CLGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, int8_t>; -const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo", -{ - ActivationLayerInfo(), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) -}); -const auto QuantizedActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo", -{ - ActivationLayerInfo(), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) -}); - TEST_SUITE(Quantized) -const auto QuantizationData = framework::dataset::make("QuantizationInfo", +const auto QuantizationData = make("QuantizationInfo", { QuantizationInfo(0.5f, 10), QuantizationInfo(0.3f, 3), QuantizationInfo(1.1f, 10), }); + +/// @note: Every asymmetric quantized test has a version with or without activation because the quantization info given +/// is ignored when there is no activation. Instead of using the same quantization information for all the tensors, the +/// fixture generates separate quantization info for each input and the output tensor. +/// When we can also support dynamic quantization with the presence of activation, these two versions should be merged +/// again, with the explicitly specified quantization info removed +const auto NoActivation = make("ActivationInfo", ActivationLayerInfo()); + +const auto IgnoredQuantizationInfo = make("IgnoredQuantizationInfo", QuantizationInfo()); + +const auto QuantizedActivationFunctionsSmallDataset = make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) +}); + TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmallCases, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(SmallConvolutionLayerDatasetCases(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - QuantizationData), - QuantizedActivationFunctionsSmallDataset)) + combine(SmallConvolutionLayerDatasetCases(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} + +FIXTURE_DATA_TEST_CASE(RunSmallCasesWithActivation, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, + combine(SmallConvolutionLayerDatasetCases(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - QuantizationData), - QuantizedActivationFunctionsSmallDataset)) + combine(datasets::SmallConvolutionLayerDataset(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} + +FIXTURE_DATA_TEST_CASE(RunSmallWithActivation, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, + combine(datasets::SmallConvolutionLayerDataset(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::ALL, + combine( + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U)), + make("Bias", TensorShape(2U)), + make("Output", TensorShape(11U, 25U, 2U)), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)), + make("Dilation", Size2D(1, 1)), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::ALL, + combine( + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U)), + make("Bias", TensorShape(2U)), + make("Output", TensorShape(11U, 25U, 2U)), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)), + make("Dilation", Size2D(1, 1)), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) - FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), - QuantizationData), - QuantizedActivationFunctionsSmallDataset)) + combine(datasets::SmallConvolutionLayerDataset(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunSmallWithActivation, CLGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, + combine(datasets::SmallConvolutionLayerDataset(), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::ALL, + combine( + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U)), + make("Bias", TensorShape(2U)), + make("Output", TensorShape(11U, 25U, 2U)), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)), + make("Dilation", Size2D(1, 1)), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + IgnoredQuantizationInfo, + NoActivation)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunMixedDataLayoutWithActivation, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::ALL, + combine( + make("Input", TensorShape(23U, 27U, 5U)), + make("Weights", TensorShape(3U, 3U, 5U, 2U)), + make("Bias", TensorShape(2U)), + make("Output", TensorShape(11U, 25U, 2U)), + make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0)), + make("Dilation", Size2D(1, 1)), + make("ReshapeWeights", { true }), + make("DataType", DataType::QASYMM8_SIGNED), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }), + QuantizationData, + QuantizedActivationFunctionsSmallDataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE(QSYMM8_PER_CHANNEL) +const auto QuantizedActivationFunctionsSmallPerChannelDataset = make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.f) +}); + FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLGEMMConvolutionLayerQuantizedPerChannelFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", { DataType::QASYMM8_SIGNED })), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ReshapeWeights", { true })), + make("DataType", { DataType::QASYMM8_SIGNED })), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), QuantizationData), - QuantizedActivationFunctionsSmallDataset), - framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) + QuantizedActivationFunctionsSmallPerChannelDataset), + make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -299,12 +443,12 @@ FIXTURE_DATA_TEST_CASE(RunSmallSigned, CLGEMMConvolutionLayerQuantizedPerChannel FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", { DataType::QASYMM8 })), - framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + make("ReshapeWeights", { true })), + make("DataType", { DataType::QASYMM8 })), + make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), QuantizationData), - QuantizedActivationFunctionsSmallDataset), - framework::dataset::make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) + QuantizedActivationFunctionsSmallPerChannelDataset), + make("WeightsDataType", { DataType::QSYMM8_PER_CHANNEL }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -323,9 +467,7 @@ TEST_SUITE(Float) TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW })), + make("ReshapeWeights", { true })), make("DataType", DataType::F32)), make("DataLayout", { DataLayout::NCHW })), ActivationFunctionsSmallDataset)) { // Validate output @@ -334,9 +476,9 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<float>, fr FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F32)), - framework::dataset::make("DataLayout", { DataLayout::NCHW })), + make("ReshapeWeights", { true })), + make("DataType", DataType::F32)), + make("DataLayout", { DataLayout::NCHW })), ActivationFunctionsDataset)) { // Validate output @@ -347,9 +489,7 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SmallGroupedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW })), + make("ReshapeWeights", { true })), make("DataType", DataType::F16)), make("DataLayout", { DataLayout::NCHW })), ActivationFunctionsSmallDataset)) { // Validate output @@ -358,9 +498,9 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMGroupedConvolutionLayerFixture<half>, fra FIXTURE_DATA_TEST_CASE(RunLarge, CLGEMMGroupedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeGroupedConvolutionLayerDataset(), - framework::dataset::make("ReshapeWeights", { true })), - framework::dataset::make("DataType", DataType::F16)), - framework::dataset::make("DataLayout", { DataLayout::NCHW })), + make("ReshapeWeights", { true })), + make("DataType", DataType::F16)), + make("DataLayout", { DataLayout::NCHW })), ActivationFunctionsDataset)) { // Validate output |