From d93e263e70e3101422402c95946e520fef34c4c7 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Tue, 15 Oct 2019 11:09:33 +0100 Subject: COMPMID-2708 NEDepthwiseConvolution Generic: support for QUANT8_PER_CHANNEL_SYMM COMPMID-2470 Implement a new and generic depthwise convolution for NEON QASYMM8 NHWC COMPMID-2477 Enable FP16 data type for the new generic convolution on NEON for NHWC COMPMID-2625 Remove old implementation files for the generic NEDepthwiseConvolution Change-Id: I8f6deda4fc69dd7e472fba3228b1ed5dad172f3e Signed-off-by: Giorgio Arena Reviewed-on: https://review.mlplatform.org/c/2094 Comments-Addressed: Arm Jenkins Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins --- .../validation/NEON/DepthwiseConvolutionLayer.cpp | 68 ++++++++++++++++++---- 1 file changed, 56 insertions(+), 12 deletions(-) (limited to 'tests/validation/NEON/DepthwiseConvolutionLayer.cpp') diff --git a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp index 6392906037..0b482ff9b3 100644 --- a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp +++ b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp @@ -163,16 +163,16 @@ DATA_TEST_CASE(Validate3x3, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip } DATA_TEST_CASE(ValidateGeneric, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights - TensorInfo(TensorShape(27U, 13U, 3U), 1, DataType::F32), // Mismatching input feature maps - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching depth multiplier - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size - TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32), // patch size bigger than input width - TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32), // dilation < 1 - TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 8U), 1, DataType::QASYMM8), + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/weights + TensorInfo(TensorShape(27U, 13U, 3U), 1, DataType::F32), // Mismatching input feature maps + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching depth multiplier + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases size + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid biases dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid output size + TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32), // Patch size bigger than input width + TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32), // Dilation < 1 + TensorInfo(TensorShape(27U, 13U, 8U), 1, DataType::F32), // Window shrinking + TensorInfo(TensorShape(32U, 13U, 8U), 1, DataType::QASYMM8), // Window shrinking }), framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F16), TensorInfo(TensorShape(3U, 3U, 2U), 1, DataType::F32), @@ -240,7 +240,7 @@ DATA_TEST_CASE(ValidateGeneric, framework::DatasetMode::ALL, zip(zip(zip(zip(zip Size2D(1U, 1U), Size2D(1U, 1U), })), - framework::dataset::make("Expected", { false, false, false, false, false, false,false, false, true, true })), + framework::dataset::make("Expected", { false, false, false, false, false, false,false, false, false, false })), input_info, weights_info, biases_info, output_info, conv_info, depth_multiplier,dilation, expected) { bool is_valid = bool(NEDepthwiseConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, depth_multiplier, ActivationLayerInfo(), dilation)); @@ -512,7 +512,8 @@ TEST_SUITE_END() // Float template using NEDepthwiseConvolutionLayerQuantizedFixtureOptimized = DepthwiseConvolutionLayerValidationQuantizedFixture; template -using NEDepthwiseConvolutionLayerQuantizedFixture = DepthwiseConvolutionLayerValidationQuantizedFixture; +using NEDepthwiseConvolutionLayerQuantizedFixture = DepthwiseConvolutionLayerValidationQuantizedFixture; +using NEDepthwiseConvolutionLayerQuantizedSymmetricPerChannelFixture = DepthwiseConvolutionLayerValidationQuantizedPerChannelFixture; TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) @@ -642,6 +643,49 @@ FIXTURE_DATA_TEST_CASE(RunLarge3x3, NEDepthwiseConvolutionLayerQuantizedFixtureO } TEST_SUITE_END() // Optimized TEST_SUITE_END() // QASYMM8 +TEST_SUITE(QSYMM8_PER_CHANNEL) +TEST_SUITE(Generic) +FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerQuantizedSymmetricPerChannelFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(), + depth_multipliers), + framework::dataset::make("InputDataType", DataType::QASYMM8)), + framework::dataset::make("WeightsDataType", DataType::QSYMM8_PER_CHANNEL)), + framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.3f, 10) })), + framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 4) })), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsDataset)) +{ + validate(Accessor(_target), _reference, tolerance_qasymm8); +} + +TEST_SUITE(Dilation) +FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthwiseConvolutionLayerQuantizedSymmetricPerChannelFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset(), + depth_multipliers), + framework::dataset::make("InputDataType", DataType::QASYMM8)), + framework::dataset::make("WeightsDataType", DataType::QSYMM8_PER_CHANNEL)), + framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.3f, 10) })), + framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 4) })), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsDataset)) +{ + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthwiseConvolutionLayerQuantizedSymmetricPerChannelFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset(), + depth_multipliers), + framework::dataset::make("InputDataType", DataType::QASYMM8)), + framework::dataset::make("WeightsDataType", DataType::QSYMM8_PER_CHANNEL)), + framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.3f, 10) })), + framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 4) })), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsDataset)) +{ + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // Dilation +TEST_SUITE_END() // Generic +TEST_SUITE_END() // QSYMM8_PER_CHANNEL TEST_SUITE_END() // Quantized TEST_SUITE_END() // DepthwiseConvLayer -- cgit v1.2.1