From ca62c6f53eb7244e6fed9f7e932608aa2496d9eb Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Tue, 23 Mar 2021 11:50:34 +0000 Subject: Mixed data-layout testing on high priority operators Change data layouts after the configure in validation tests for: - Scale - Pooling - FullyConnected - DepthwiseConvolution - DirectConvolution - FFTConvolution - WinogradConvolution - GEMMConvolution (Indirect GEMM included) Extending fixtures Fixes for new mixed data layout tests Resolves: COMPMID-4162 Change-Id: I2f2eb2075f7e24ab3872249d88cadb57b82c5dde Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5326 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- tests/validation/NEON/ConvolutionLayer.cpp | 71 ++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) (limited to 'tests/validation/NEON/ConvolutionLayer.cpp') diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index 6b152c9b68..b435744cdc 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -150,6 +150,8 @@ TEST_SUITE_END() // ConvolutionLayer TEST_SUITE(WinogradLayer) template using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture; +template +using NEWinogradConvolutionLayerMixedDataLayoutFixture = WinogradConvolutionLayerFastMathValidationFixture; template using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture; @@ -166,6 +168,21 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture, frame // Validate output validate(Accessor(_target), _reference, abs_tolerance_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEWinogradConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(8U, 8U, 32U)), + framework::dataset::make("Weight", TensorShape(1U, 3U, 32U, 1U))), + framework::dataset::make("Bias", TensorShape(1U))), + framework::dataset::make("Output", TensorShape(8U, 6U, 1U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(1, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1U, 1U))), + framework::dataset::make("DataType", { DataType::F32 })), + ActivationFunctionsDataset), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(Accessor(_target), _reference, abs_tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(), framework::dataset::make("DataType", { DataType::F32 })), @@ -384,6 +401,8 @@ TEST_SUITE_END() // WinogradLayer TEST_SUITE(GEMMConvolutionLayer) template using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture; +template +using NEGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture; TEST_SUITE(Float) #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) @@ -424,11 +443,29 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture, framework // Validate output validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32)); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), + framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), + framework::dataset::make("Bias", TensorShape(2U))), + framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1, 1))), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsDataset)) +{ + // Validate output + validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32)); +} TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float template using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture; +template +using NEGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidationQuantizedFixture; template using NEGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture; @@ -451,6 +488,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), + framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), + framework::dataset::make("Bias", TensorShape(2U))), + framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1, 1))), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), + QuantizedActivationFunctionsDataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) @@ -464,6 +518,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture, // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), + framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), + framework::dataset::make("Bias", TensorShape(2U))), + framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1, 1))), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), + QuantizedActivationFunctionsDataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE(QSYMM8_PER_CHANNEL) -- cgit v1.2.1