aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2021-04-21 16:58:28 +0100
committerJan Eilers <jan.eilers@arm.com>2021-05-05 11:34:32 +0000
commitea835e7d199be0c89bf80611e1d88f47c271503b (patch)
treea369bc586642e224e896dd25441a2c74439e255e
parentdf9a32264780f0d478c0a5ad735296368a5b9edf (diff)
downloadarmnn-ea835e7d199be0c89bf80611e1d88f47c271503b.tar.gz
IVGCVSW-5826 Add per channel tests
* with Conv2D for tflite parser and delegate * with TransposeConv for tflite parser Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: I8d1ed1f3388c6eace436d0e6fd5869451c6e7830
-rw-r--r--delegate/src/test/Convolution2dTest.cpp95
-rw-r--r--src/armnnTfLiteParser/test/Conv2D.cpp290
-rw-r--r--src/armnnTfLiteParser/test/TransposeConv.cpp331
3 files changed, 716 insertions, 0 deletions
diff --git a/delegate/src/test/Convolution2dTest.cpp b/delegate/src/test/Convolution2dTest.cpp
index 6f498ce22e..b2e5fad8df 100644
--- a/delegate/src/test/Convolution2dTest.cpp
+++ b/delegate/src/test/Convolution2dTest.cpp
@@ -222,6 +222,83 @@ void Conv2DWithBiasesRelu6Uint8Test(std::vector<armnn::BackendId>& backends)
biasValues);
}
+
+void Conv2DPerChannelInt8Test(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1,4,4,2 };
+ std::vector<int32_t> filterShape { 4,2,2,2 };
+ std::vector<int32_t> biasShape { 4 };
+ std::vector<int32_t> outputShape { 1,4,4,4 };
+
+ static std::vector<int8_t> inputValues =
+ {
+ -11, 40,-26, 11,-28, 8, 0, -8,
+ -10, 34, 47, 0,-33,-14, 28, 35,
+ 6,-28,-26, 8, 13, 33,-31,-41,
+ 31,-20,-31,-16, 8,-18,-44, 0
+ };
+
+ std::vector<float> filterScales = { 1.858268, 2.0, 1.992126, 1.905512 };
+ int32_t filterQuantizationDim = 0;
+ std::vector<int8_t> filterValues =
+ {
+ 13,-44, 5,-14, 21,-45, 36,-25,
+ -42, -2, 24,-30,-31, 35, 43,-30,
+ -20, -5, 25, 17, 18, 20, 4,-46,
+ -49, 9, -3,-20, 46, 5, 7,-15
+ };
+
+ std::vector<int32_t> biasValues = { 0,0,0,0 };
+ std::vector<float> biasScales = { 0.721445, 0.7764700055, 0.773414, 0.739787 };
+
+ std::vector<int8_t> expectedOutputValues =
+ {
+ -1, 9, 3, 5, 1, -1, 5, 9,
+ 2, 7, -1, 2, 2, 4, 5, 6,
+ 1, 1, 4, 4, 2, 0, -4, -3,
+ 0, 6, 12, 6, 3, 0, -1, -2,
+ 7, -4, 4, 4, 3, 6, 6, 2,
+ 0, -3, -1, 4, 4, 8, 3, 1,
+ 5, 0, 0, 1, 4, 7, 4, 6,
+ 4, 0, 1, 2, 2, 7, 5, 7
+ };
+ float outputQuantScale = 401.960785f;
+ int outputQuantOffset = 3;
+ float inputQuantScale = 0.388235f;
+ int inputQuantOffset = 1;
+
+ tflite::Padding padding = tflite::Padding_SAME;
+
+ ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
+ ::tflite::TensorType_INT8,
+ 1, // strideX
+ 1, // strideY
+ 1, // dilationX
+ 1, // dilationY
+ padding,
+ tflite::ActivationFunctionType_NONE,
+ backends,
+ inputShape,
+ filterShape,
+ outputShape,
+ inputValues,
+ filterValues,
+ expectedOutputValues,
+ biasShape,
+ biasValues,
+ biasScales,
+ {0,0,0,0},
+ filterScales,
+ {0,0,0,0},
+ outputQuantScale,
+ outputQuantOffset,
+ inputQuantScale,
+ inputQuantOffset,
+ 1, // depth_multiplier is ignored for conv2d value doesn't matter
+ filterQuantizationDim);
+}
+
TEST_SUITE("Convolution2dTest_CpuRefTests")
{
@@ -237,6 +314,12 @@ TEST_CASE ("Conv2DWithBiases_Int8_CpuRef_Test")
Conv2DWithBiasesInt8Test(backends);
}
+TEST_CASE ("Conv2DPerChannel_Int8_CpuRef_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ Conv2DPerChannelInt8Test(backends);
+}
+
} //End of TEST_SUITE("Convolution2dTest_CpuRef")
TEST_SUITE("Convolution2dTest_CpuAccTests")
@@ -254,6 +337,12 @@ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
Conv2DWithBiasesInt8Test(backends);
}
+TEST_CASE ("Conv2DPerChannel_Int8_CpuAcc_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ Conv2DPerChannelInt8Test(backends);
+}
+
} //End of TEST_SUITE("Convolution2dTest_CpuAcc")
TEST_SUITE("Convolution2dTest_GpuAccTests")
@@ -271,6 +360,12 @@ std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
Conv2DWithBiasesInt8Test(backends);
}
+TEST_CASE ("Conv2DPerChannel_Int8_GpuAcc_Test")
+{
+ std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ Conv2DPerChannelInt8Test(backends);
+}
+
} //End of TEST_SUITE("Convolution2dTest_GpuAcc")
void TransposeConvInt8Test(std::vector<armnn::BackendId>& backends)
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
index 8ef827ce7f..a480a4ec3d 100644
--- a/src/armnnTfLiteParser/test/Conv2D.cpp
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -374,4 +374,294 @@ BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixtu
});
}
+
+struct PerChannelConv2DFixture : public ParserFlatbuffersFixture
+{
+ explicit PerChannelConv2DFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [
+ {
+ "builtin_code": "CONV_2D",
+ "version": 3
+ }
+ ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [
+ 1,
+ 4,
+ 4,
+ 2
+ ],
+ "type": "INT8",
+ "buffer": 1,
+ "name": "input",
+ "quantization": {
+ "min": [
+ -50.0
+ ],
+ "max": [
+ 49.0
+ ],
+ "scale": [
+ 0.388235
+ ],
+ "zero_point": [
+ 1
+ ],
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": [
+ 4
+ ],
+ "type": "INT32",
+ "buffer": 2,
+ "name": "model/conv2d/Conv2D",
+ "quantization": {
+ "scale": [
+ 0.001523,
+ 0.001197,
+ 0.001517,
+ 0.001364
+ ],
+ "zero_point": [
+ 0,
+ 0,
+ 0,
+ 0
+ ],
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": [
+ 4,
+ 2,
+ 2,
+ 2
+ ],
+ "type": "INT8",
+ "buffer": 3,
+ "name": "model/conv2d/Conv2D1",
+ "quantization": {
+ "min": [
+ -0.498056,
+ -0.362561,
+ -0.307959,
+ -0.207799
+ ],
+ "max": [
+ 0.339136,
+ 0.391629,
+ 0.496193,
+ 0.446191
+ ],
+ "scale": [
+ 0.003922,
+ 0.003084,
+ 0.003907,
+ 0.003513
+ ],
+ "zero_point": [
+ 0,
+ 0,
+ 0,
+ 0
+ ],
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": [
+ 1,
+ 4,
+ 4,
+ 4
+ ],
+ "type": "INT8",
+ "buffer": 4,
+ "name": "Identity",
+ "quantization": {
+ "min": [
+ -66.578751
+ ],
+ "max": [
+ 70.137619
+ ],
+ "scale": [
+ 0.536143
+ ],
+ "zero_point": [
+ -4
+ ],
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ }
+ ],
+ "inputs": [
+ 0
+ ],
+ "outputs": [
+ 3
+ ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [
+ 0,
+ 2,
+ 1
+ ],
+ "outputs": [
+ 3
+ ],
+ "builtin_options_type": "Conv2DOptions",
+ "builtin_options": {
+ "padding": "SAME",
+ "stride_w": 1,
+ "stride_h": 1,
+ "fused_activation_function": "NONE",
+ "dilation_w_factor": 1,
+ "dilation_h_factor": 1
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ "name": "main"
+ }
+ ],
+ "description": "MLIR Converted.",
+ "buffers": [
+ {
+ },
+ {
+ },
+ {
+ "data": [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ]
+ },
+ {
+ "data": [
+ 157,
+ 201,
+ 86,
+ 129,
+ 17,
+ 33,
+ 209,
+ 13,
+ 76,
+ 249,
+ 127,
+ 138,
+ 35,
+ 18,
+ 250,
+ 233,
+ 15,
+ 205,
+ 98,
+ 127,
+ 68,
+ 196,
+ 246,
+ 177,
+ 65,
+ 197,
+ 230,
+ 246,
+ 127,
+ 66,
+ 212,
+ 30
+ ]
+ },
+ {
+ },
+ {
+ "data": [
+ 49,
+ 46,
+ 53,
+ 46,
+ 48,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ]
+ }
+ ],
+ "metadata": [
+ {
+ "name": "min_runtime_version",
+ "buffer": 5
+ }
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("input", "Identity");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE( ParsePerChannelConv2D, PerChannelConv2DFixture )
+{
+ RunTest<4, armnn::DataType::QAsymmS8>(
+ 0,
+ {
+ -11, 40,-26, 11,-28, 8, 0, -8,
+ -10, 34, 47, 0,-33,-14, 28, 35,
+ 6,-28,-26, 8, 13, 33,-31,-41,
+ 31,-20,-31,-16, 8,-18,-44, 0
+ },
+ {
+ -21,-17,-23,-14, -1,-14, 1, 9,
+ 1,-12,-22,-23, 2, -1, -3, 12,
+ 7, 6, 8,-13,-21, -6,-31, 0,
+ 9, -6, 24, 0,-22, -4, -7,-22,
+ -7, -9, 9, 11,-11,-16, 9,-27,
+ -1, 0,-26, 0, 9,-12, -8,-18,
+ -11, -3,-15, 7, 16, -2, -8, -7,
+ -14,-15,-14, 3, 9,-12, -6,-11
+ });
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/TransposeConv.cpp b/src/armnnTfLiteParser/test/TransposeConv.cpp
index 94e42438e1..f990941ad9 100644
--- a/src/armnnTfLiteParser/test/TransposeConv.cpp
+++ b/src/armnnTfLiteParser/test/TransposeConv.cpp
@@ -270,4 +270,335 @@ BOOST_FIXTURE_TEST_CASE( ParseSimpleTransposeConvWithBias, SimpleTransposeConvFi
});
}
+
+struct TransposeConvPerChannelFixture : public ParserFlatbuffersFixture
+{
+ explicit TransposeConvPerChannelFixture()
+ {
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [
+ {
+ "builtin_code": "TRANSPOSE_CONV",
+ "version": 2
+ }
+ ],
+ "subgraphs": [
+ {
+ "tensors": [
+ {
+ "shape": [
+ 1,
+ 4,
+ 4,
+ 2
+ ],
+ "type": "INT8",
+ "buffer": 1,
+ "name": "input",
+ "quantization": {
+ "min": [
+ -50.0
+ ],
+ "max": [
+ 49.0
+ ],
+ "scale": [
+ 0.388235
+ ],
+ "zero_point": [
+ 1
+ ],
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": [
+ 4
+ ],
+ "type": "INT32",
+ "buffer": 2,
+ "name": "model/conv2d_transpose/stack",
+ "quantization": {
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": [
+ 8,
+ 2,
+ 2,
+ 2
+ ],
+ "type": "INT8",
+ "buffer": 3,
+ "name": "model/conv2d_transpose/conv2d_transpose",
+ "quantization": {
+ "min": [
+ -0.081948,
+ -0.379918,
+ -0.223632,
+ -0.098629,
+ -0.386369,
+ -0.351057,
+ -0.348749,
+ -0.264848
+ ],
+ "max": [
+ 0.35091,
+ 0.229681,
+ 0.368384,
+ 0.176761,
+ 0.353717,
+ 0.377565,
+ 0.373713,
+ 0.30141
+ ],
+ "scale": [
+ 0.002763,
+ 0.002991,
+ 0.002901,
+ 0.001392,
+ 0.003042,
+ 0.002973,
+ 0.002943,
+ 0.002373
+ ],
+ "zero_point": [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ],
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ },
+ {
+ "shape": [
+ 1,
+ 4,
+ 4,
+ 8
+ ],
+ "type": "INT8",
+ "buffer": 4,
+ "name": "Identity",
+ "quantization": {
+ "min": [
+ -63.578175
+ ],
+ "max": [
+ 69.305023
+ ],
+ "scale": [
+ 0.521111
+ ],
+ "zero_point": [
+ -6
+ ],
+ "details_type": "NONE",
+ "quantized_dimension": 0
+ },
+ "is_variable": false
+ }
+ ],
+ "inputs": [
+ 0
+ ],
+ "outputs": [
+ 3
+ ],
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": [
+ 1,
+ 2,
+ 0
+ ],
+ "outputs": [
+ 3
+ ],
+ "builtin_options_type": "TransposeConvOptions",
+ "builtin_options": {
+ "padding": "SAME",
+ "stride_w": 1,
+ "stride_h": 1
+ },
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ "name": "main"
+ }
+ ],
+ "description": "MLIR Converted.",
+ "buffers": [
+ {
+ },
+ {
+ },
+ {
+ "data": [
+ 1,
+ 0,
+ 0,
+ 0,
+ 4,
+ 0,
+ 0,
+ 0,
+ 4,
+ 0,
+ 0,
+ 0,
+ 8,
+ 0,
+ 0,
+ 0
+ ]
+ },
+ {
+ "data": [
+ 13,
+ 239,
+ 7,
+ 125,
+ 35,
+ 127,
+ 55,
+ 226,
+ 77,
+ 150,
+ 159,
+ 192,
+ 180,
+ 129,
+ 51,
+ 48,
+ 108,
+ 9,
+ 21,
+ 179,
+ 12,
+ 39,
+ 127,
+ 107,
+ 44,
+ 206,
+ 127,
+ 185,
+ 108,
+ 82,
+ 86,
+ 218,
+ 38,
+ 149,
+ 16,
+ 1,
+ 129,
+ 163,
+ 116,
+ 136,
+ 138,
+ 43,
+ 65,
+ 186,
+ 154,
+ 138,
+ 64,
+ 127,
+ 120,
+ 127,
+ 207,
+ 70,
+ 43,
+ 33,
+ 141,
+ 137,
+ 93,
+ 215,
+ 65,
+ 92,
+ 122,
+ 144,
+ 120,
+ 127
+ ]
+ },
+ {
+ },
+ {
+ "data": [
+ 49,
+ 46,
+ 57,
+ 46,
+ 48,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+ ]
+ }
+ ],
+ "metadata": [
+ {
+ "name": "min_runtime_version",
+ "buffer": 5
+ }
+ ]
+ }
+ )";
+ SetupSingleInputSingleOutput("input", "Identity");
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseTransposeConvPerChannel, TransposeConvPerChannelFixture )
+{
+ RunTest<4, armnn::DataType::QAsymmS8>(
+ 0,
+ {
+ -11, 40,-26, 11,-28, 8, 0, -8,
+ -10, 34, 47, 0,-33,-14, 28, 35,
+ 6,-28,-26, 8, 13, 33,-31,-41,
+ 31,-20,-31,-16, 8,-18,-44, 0
+ },
+ {
+ -8,-17, -8, -9,-16, 1, 2,-11,
+ 3,-16,-19,-12,-11, -6, -3, -6,
+ -5, -8,-16,-12,-11, -3, -7,-13,
+ -4, 1, -9,-10, -5,-12, -5, -8,
+ 2,-25, -5, -6,-20, -7, 2,-21,
+ 1, 4, 5,-13,-10,-12, 3, 4,
+ -10,-17,-17, -6, -7, 12,-22,-17,
+ -17, 0, -5,-14,-21,-12, 17,-13,
+ 3, -6, -3, -3, -2,-16,-11,-12,
+ -15,-14, -1, -2,-35, 5,-18, 0,
+ -6, 8, 5,-12, 12, 7, -6, -3,
+ 11,-28,-28, -3,-18,-29, -5,-13,
+ -12, 11, -2, -5, 6, -9, -6, 7,
+ -9,-11,-14, -2, 12, 5,-21,-23,
+ -4, -4, -6, -6,-21,-25, 0,-18,
+ -26, 10, -7,-13, 3, 39,-39, -4
+ });
+}
+
BOOST_AUTO_TEST_SUITE_END()