ArmNN
 21.05
DepthwiseConvolution2D.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <boost/test/unit_test.hpp>
8 #include "../TfLiteParser.hpp"
9 
10 #include <string>
11 #include <iostream>
12 
13 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
14 
15 struct DepthwiseConvolution2dFixture : public ParserFlatbuffersFixture
16 {
17  explicit DepthwiseConvolution2dFixture(const std::string& inputShape,
18  const std::string& outputShape,
19  const std::string& filterShape,
20  const std::string& filterData,
21  const std::string& strides,
22  const std::string& paddingType,
23  const std::string biasShape = "",
24  const std::string biasData = "")
25  {
26  std::string inputTensors = "[ 0, 2 ]";
27  std::string biasTensor = "";
28  std::string biasBuffer = "";
29  if (biasShape.size() > 0 && biasData.size() > 0)
30  {
31  inputTensors = "[ 0, 2, 3 ]";
32  biasTensor = R"(
33  {
34  "shape": )" + biasShape + R"( ,
35  "type": "INT32",
36  "buffer": 3,
37  "name": "biasTensor",
38  "quantization": {
39  "min": [ 0.0 ],
40  "max": [ 255.0 ],
41  "scale": [ 1.0 ],
42  "zero_point": [ 0 ],
43  }
44  } )";
45  biasBuffer = R"(
46  { "data": )" + biasData + R"(, }, )";
47  }
48  m_JsonString = R"(
49  {
50  "version": 3,
51  "operator_codes": [ { "builtin_code": "DEPTHWISE_CONV_2D" } ],
52  "subgraphs": [ {
53  "tensors": [
54  {
55  "shape": )" + inputShape + R"(,
56  "type": "UINT8",
57  "buffer": 0,
58  "name": "inputTensor",
59  "quantization": {
60  "min": [ 0.0 ],
61  "max": [ 255.0 ],
62  "scale": [ 1.0 ],
63  "zero_point": [ 0 ],
64  }
65  },
66  {
67  "shape": )" + outputShape + R"(,
68  "type": "UINT8",
69  "buffer": 1,
70  "name": "outputTensor",
71  "quantization": {
72  "min": [ 0.0 ],
73  "max": [ 511.0 ],
74  "scale": [ 2.0 ],
75  "zero_point": [ 0 ],
76  }
77  },
78  {
79  "shape": )" + filterShape + R"(,
80  "type": "UINT8",
81  "buffer": 2,
82  "name": "filterTensor",
83  "quantization": {
84  "min": [ 0.0 ],
85  "max": [ 255.0 ],
86  "scale": [ 1.0 ],
87  "zero_point": [ 0 ],
88  }
89  }, )" + biasTensor + R"(
90  ],
91  "inputs": [ 0 ],
92  "outputs": [ 1 ],
93  "operators": [
94  {
95  "opcode_index": 0,
96  "inputs": )" + inputTensors + R"(,
97  "outputs": [ 1 ],
98  "builtin_options_type": "DepthwiseConv2DOptions",
99  "builtin_options": {
100  "padding": ")" + paddingType + R"(",
101  "stride_w": )" + strides+ R"(,
102  "stride_h": )" + strides+ R"(,
103  "depth_multiplier": 1,
104  "fused_activation_function": "NONE"
105  },
106  "custom_options_format": "FLEXBUFFERS"
107  }
108  ],
109  } ],
110  "buffers" : [
111  { },
112  { },
113  { "data": )" + filterData + R"(, }, )"
114  + biasBuffer + R"(
115  ]
116  }
117  )";
118  SetupSingleInputSingleOutput("inputTensor", "outputTensor");
119  }
120 };
121 
122 struct DepthwiseConvolution2dSameFixture : DepthwiseConvolution2dFixture
123 {
124  DepthwiseConvolution2dSameFixture()
125  : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
126  "[ 1, 3, 3, 1 ]", // outputShape
127  "[ 1, 3, 3, 1 ]", // filterShape
128  "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
129  "1", // stride w and h
130  "SAME") // padding type
131  {}
132 };
133 
134 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
135 {
136  RunTest<4, armnn::DataType::QAsymmU8>(
137  0,
138  { 0, 1, 2,
139  3, 4, 5,
140  6, 7, 8 },
141  // the expected values were generated using the example python implementation at
142  // https://eli.thegreenplace.net/2018/depthwise-separable-convolutions-for-machine-learning/
143  // divide the expected values by the output scale, as it is not 1.0
144  { 14/2, 35/2, 38/2,
145  57/2, 120/2, 111/2,
146  110/2, 197/2, 158/2 });
147 }
148 
149 struct DepthwiseConvolution2dValidFixture : DepthwiseConvolution2dFixture
150 {
151  DepthwiseConvolution2dValidFixture ()
152  : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
153  "[ 1, 1, 1, 1 ]", // outputShape
154  "[ 1, 3, 3, 1 ]", // filterShape
155  "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
156  "1", // stride w and h
157  "VALID") // padding type
158  {}
159 };
160 
161 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
162 {
163  RunTest<4, armnn::DataType::QAsymmU8>(
164  0,
165  { 0, 1, 2,
166  3, 4, 5,
167  6, 7, 8 },
168  // divide the expected values by the output scale, as it is not 1.0
169  { 120/2 });
170 }
171 
172 struct DepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
173 {
174  DepthwiseConvolution2dSameBiasFixture()
175  : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
176  "[ 1, 3, 3, 1 ]", // outputShape
177  "[ 1, 3, 3, 1 ]", // filterShape
178  "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
179  "1", // stride w and h
180  "SAME", // padding type
181  "[ 1 ]", // biasShape
182  "[ 10, 0, 0, 0 ]") // biasData
183  {}
184 };
185 
186 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
187 {
188  RunTest<4, armnn::DataType::QAsymmU8>(
189  0,
190  { 0, 1, 2,
191  3, 4, 5,
192  6, 7, 8 },
193  // divide the expected values by the output scale, as it is not 1.0
194  { ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
195  ( 57+10)/2, (120+10)/2, (111+10)/2,
196  (110+10)/2, (197+10)/2, (158+10)/2 });
197 }
198 
199 struct DynamicDepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
200 {
201  DynamicDepthwiseConvolution2dSameBiasFixture()
202  : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]", // inputShape
203  "[ ]", // outputShape
204  "[ 1, 3, 3, 1 ]", // filterShape
205  "[ 9,8,7, 6,5,4, 3,2,1 ]", // filterData
206  "1", // stride w and h
207  "SAME", // padding type
208  "[ 1 ]", // biasShape
209  "[ 10, 0, 0, 0 ]") // biasData
210  {}
211 };
212 
213 BOOST_FIXTURE_TEST_CASE(ParseDynamicDepthwiseConv2DSameBias, DynamicDepthwiseConvolution2dSameBiasFixture)
214 {
215  RunTest<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(0,
216  { { "inputTensor", { 0, 1, 2,
217  3, 4, 5,
218  6, 7, 8 } } },
219  { { "outputTensor", { ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
220  ( 57+10)/2, (120+10)/2, (111+10)/2,
221  (110+10)/2, (197+10)/2, (158+10)/2 } } },
222  true);
223 }
224 
225 struct DepthwiseConvolution2dFixture2 : public ParserFlatbuffersFixture
226 {
227  explicit DepthwiseConvolution2dFixture2(const std::string& inputShape,
228  const std::string& outputShape,
229  const std::string& filterShape,
230  const std::string& filterData,
231  const std::string& strides,
232  const std::string& paddingType,
233  const std::string biasShape = "",
234  const std::string biasData = "",
235  const std::string filter_quant_min = "[ 0.0 ]",
236  const std::string filter_quant_max = "[ 255.0 ]",
237  const std::string filter_quant_scale = "[ 1.0 ]",
238  const std::string filter_quant_zero_point = "[ 0 ]",
239  const std::string filter_quant_axis = "",
240  const std::string output_scale = "[ 1.0 ]")
241  {
242  std::string inputTensors = "[ 0, 2 ]";
243  std::string biasTensor = "";
244  std::string biasBuffer = "";
245  if (biasShape.size() > 0 && biasData.size() > 0)
246  {
247  inputTensors = "[ 0, 2, 3 ]";
248  biasTensor = R"(
249  {
250  "shape": )" + biasShape + R"( ,
251  "type": "INT32",
252  "buffer": 3,
253  "name": "biasTensor",
254  "quantization": {
255  "min": [ 0.0 ],
256  "max": [ 255.0 ],
257  "scale": [ 1.0 ],
258  "zero_point": [ 0 ],
259  }
260  } )";
261  biasBuffer = R"(
262  { "data": )" + biasData + R"(, }, )";
263  }
264 
265  std::string filter_qantization =
266  R"(
267  "min": )" + filter_quant_min + R"(,
268  "max": )" + filter_quant_max + R"(,
269  "scale": )" + filter_quant_scale + R"(,
270  "zero_point": )" + filter_quant_zero_point;
271  // A given quantization axis indicates if per channel quantization is used for filters
272  if (filter_quant_axis.size() > 0)
273  {
274  filter_qantization +=
275  R"(,
276  "quantized_dimension": )" + filter_quant_axis;
277  }
278  m_JsonString = R"(
279  {
280  "version": 3,
281  "operator_codes": [ { "builtin_code": "DEPTHWISE_CONV_2D" } ],
282  "subgraphs": [ {
283  "tensors": [
284  {
285  "shape": )" + inputShape + R"(,
286  "type": "INT8",
287  "buffer": 0,
288  "name": "inputTensor",
289  "quantization": {
290  "min": [ 0.0 ],
291  "max": [ 255.0 ],
292  "scale": [ 1.0 ],
293  "zero_point": [ 0 ],
294  }
295  },
296  {
297  "shape": )" + outputShape + R"(,
298  "type": "INT8",
299  "buffer": 1,
300  "name": "outputTensor",
301  "quantization": {
302  "min": [ 0.0 ],
303  "max": [ 511.0 ],
304  "scale": )" + output_scale + R"(,
305  "zero_point": [ 0 ],
306  }
307  },
308  {
309  "shape": )" + filterShape + R"(,
310  "type": "INT8",
311  "buffer": 2,
312  "name": "filterTensor",
313  "quantization": {)" + filter_qantization + R"(
314  }
315  }, )" + biasTensor + R"(
316  ],
317  "inputs": [ 0 ],
318  "outputs": [ 1 ],
319  "operators": [
320  {
321  "opcode_index": 0,
322  "inputs": )" + inputTensors + R"(,
323  "outputs": [ 1 ],
324  "builtin_options_type": "DepthwiseConv2DOptions",
325  "builtin_options": {
326  "padding": ")" + paddingType + R"(",
327  "stride_w": )" + strides+ R"(,
328  "stride_h": )" + strides+ R"(,
329  "depth_multiplier": 1,
330  "fused_activation_function": "NONE"
331  },
332  "custom_options_format": "FLEXBUFFERS"
333  }
334  ],
335  } ],
336  "buffers" : [
337  { },
338  { },
339  { "data": )" + filterData + R"(, }, )"
340  + biasBuffer + R"(
341  ]
342  }
343  )";
344  SetupSingleInputSingleOutput("inputTensor", "outputTensor");
345  }
346 };
347 
348 
349 // No quantization meaning scale=1.0 and offset=0.0 and tensor quantization
350 struct DepthwiseConvolution2dNoQuantFixture : DepthwiseConvolution2dFixture2
351 {
352  DepthwiseConvolution2dNoQuantFixture()
353  : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
354  "[ 1, 3, 3, 3 ]", // outputShape
355  "[ 1, 3, 3, 3 ]", // filterShape
356  "[ 9,8,7, 6,5,4, 3,2,1, "
357  "9,8,7, 6,5,4, 3,2,1, "
358  "9,8,7, 6,5,4, 3,2,1 ]", // filterData
359  "1", // stride w and h
360  "SAME", // padding type
361  "", // bias shape
362  "" // bias data
363  )
364  {}
365 };
366 
367 // No quantization meaning scale=1.0 and offset=0.0 and tensor quantization
368 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DNoQuant, DepthwiseConvolution2dNoQuantFixture)
369 {
370  RunTest<4, armnn::DataType::QAsymmS8>(
371  0,
372  { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
373  { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
374  36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
375 }
376 
377 // Uses per channel quantization on weights but with scales = 1.0 and offsets = 0.0
378 struct DepthwiseConvolution2dNoChannelQuantFixture : DepthwiseConvolution2dFixture2
379 {
380  DepthwiseConvolution2dNoChannelQuantFixture()
381  : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
382  "[ 1, 3, 3, 3 ]", // outputShape
383  "[ 1, 3, 3, 3 ]", // filterShape
384  "[ 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1 ]", //filterData
385  "1", // stride w and h
386  "SAME", // padding type
387  "", // bias shape
388  "", // bias data
389  "[ 0.0 ]", // filter quantization min values
390  "[ 255.0 ]", // filter quantization max values
391  "[ 1.0, 1.0, 1.0]", // filter quantization scales
392  "[ 0, 0, 0]", // filter quantization zero-points
393  "3" // filter quantized axis
394  // (in case of per channel quantization)
395  )
396  {}
397 };
398 
399 // Uses per channel quantization on weights but with scales = 1.0 and offsets = 0.0
400 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterNoChannelQuant, DepthwiseConvolution2dNoChannelQuantFixture)
401 {
402  RunTest<4, armnn::DataType::QAsymmS8>(
403  0,
404  { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
405  { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
406  36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
407 }
408 
409 // Uses per channel quantization on weights but all scales are set to the same value
410 struct DepthwiseConvolution2dWeightsPerChannelQuantFixture : DepthwiseConvolution2dFixture2
411 {
412  DepthwiseConvolution2dWeightsPerChannelQuantFixture()
413  : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
414  "[ 1, 3, 3, 3 ]", // outputShape
415  "[ 1, 3, 3, 3 ]", // filterShape
416  // filterData is [ 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1 ]
417  // quantized per channel with q_dim=3
418  "[36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, "
419  "20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12, 8, 4]",
420  "1", // stride w and h
421  "SAME", // padding type
422  "", // bias shape
423  "", // bias data
424  "[ 0.0 ]", // filter quantization min values
425  "[ 255.0 ]", // filter quantization max values
426  "[ 0.25, 0.25, 0.25]", // filter quantization scales
427  "[ 0, 0, 0]", // filter quantization zero-points
428  "3" // filter quantized axis
429  // (in case of per channel quantization)
430  )
431  {}
432 };
433 
434 // Weights are per channel quantized but all scales are set to the same value
435 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant,
436  DepthwiseConvolution2dWeightsPerChannelQuantFixture)
437 {
438  RunTest<4, armnn::DataType::QAsymmS8>(
439  0,
440  { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
441  { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
442  36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
443 }
444 
445 // Uses per channel quantization on weights all scales are different in this test
446 struct DepthwiseConvolution2dWeightsPerChannelQuant1Fixture : DepthwiseConvolution2dFixture2
447 {
448  DepthwiseConvolution2dWeightsPerChannelQuant1Fixture()
449  : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
450  "[ 1, 3, 3, 3 ]", // outputShape
451  "[ 1, 3, 3, 3 ]", // filterShape
452  // filterData is [ 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1, 9,8,7, 6,5,4, 3,2,1 ]
453  // quantized per channel with q_dim=3
454  "[36, 40, 70, 24, 25, 40, 12, 10, 10, 36, 40, 70, 24, "
455  "25, 40, 12, 10, 10, 36, 40, 70, 24, 25, 40, 12, 10, 10]",
456  "1", // stride w and h
457  "SAME", // padding type
458  "", // bias shape
459  "", // bias data
460  "[ 0.0 ]", // filter quantization min values
461  "[ 255.0 ]", // filter quantization max values
462  "[ 0.25, 0.2, 0.1]", // filter quantization scales
463  "[ 0, 0, 0]", // filter quantization zero-points
464  "3" // filter quantized axis
465  // (in case of per channel quantization)
466  )
467  {}
468 };
469 
470 // Uses per channel quantization on weights all scales are different in this test
471 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant1,
472  DepthwiseConvolution2dWeightsPerChannelQuant1Fixture)
473 {
474  RunTest<4, armnn::DataType::QAsymmS8>(
475  0,
476  { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
477  { 18, 14, 10, 36, 30, 24, 30, 26, 22, 27, 21, 15, 54, 45,
478  36, 45, 39, 33, 18, 14, 10, 36, 30, 24, 30, 26, 22});
479 }
480 
481 
482 // Uses per channel quantization on weights all scales are different in this test
483 // Uses different shape for weights and input compared to the other tests above
484 struct DepthwiseConvolution2dWeightsPerChannelQuant2Fixture : DepthwiseConvolution2dFixture2
485 {
486  DepthwiseConvolution2dWeightsPerChannelQuant2Fixture()
487  : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
488  "[ 1, 4, 4, 4 ]", // outputShape
489  "[ 1, 2, 2, 4 ]", // filterShape
490  // filterData is [ 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3 ]
491  // quantized per channel with q_dim=3
492  "[36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10]",
493  "1", // stride w and h
494  "SAME", // padding type
495  "", // bias shape
496  "", // bias data
497  "[ 0.0 ]", // filter quantization min values
498  "[ 255.0 ]", // filter quantization max values
499  "[ 0.25, 0.2, 0.1, 0.3]", // filter quantization scales
500  "[ 0, 0, 0, 0]", // filter quantization zero-points
501  "3" // filter quantized axis
502  // (in case of per channel quantization)
503  )
504  {}
505 };
506 
507 // Uses per channel quantization on weights all scales are different in this test
508 // Uses different shape for weights and input compared to the other tests above
509 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant2,
510  DepthwiseConvolution2dWeightsPerChannelQuant2Fixture)
511 {
512  RunTest<4, armnn::DataType::QAsymmS8>(
513  0,
514  { 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
515  1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
516  1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
517  1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1},
518  { 21, 26, 22, 18, 21, 26, 22, 18, 21, 26, 22, 18, 10, 17, 15, 13,
519  21, 26, 22, 18, 21, 26, 22, 18, 21, 26, 22, 18, 10, 17, 15, 13,
520  21, 26, 22, 18, 21, 26, 22, 18, 21, 26, 22, 18, 10, 17, 15, 13,
521  14, 12, 10, 8, 14, 12, 10, 8, 14, 12, 10, 8, 9, 8, 7, 6});
522 }
523 
524 // Test for depthwise_multiplier different to one (M > 1)
525 struct DepthwiseConvolution2dWeightsPerChannelQuant4Fixture : DepthwiseConvolution2dFixture2
526 {
527  DepthwiseConvolution2dWeightsPerChannelQuant4Fixture()
528  : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
529  "[ 1, 4, 4, 16 ]", // outputShape
530  "[ 1, 2, 2, 16 ]", // filterShape
531  // filter data is [ 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3,
532  // 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3,
533  // 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3,
534  // 9,8,7,6, 5,4,3,2, 1,9,8,7, 6,5,4,3 ]
535  // quantized per channel with q_dim=3
536  "[36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10, "
537  "36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10, "
538  "36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10, "
539  "36, 40, 70, 20, 20, 20, 30, 6, 4, 45, 80, 23, 24, 25, 40, 10]",
540  "1", // stride w and h
541  "SAME", // padding type
542  "", // bias shape
543  "", // bias data
544  "[ 0.0 ]", // filter quantization min values
545  "[ 255.0 ]", // filter quantization max values
546  "[ 0.25, 0.2, 0.1, 0.3,"
547  "0.25, 0.2, 0.1, 0.3,"
548  "0.25, 0.2, 0.1, 0.3,"
549  "0.25, 0.2, 0.1, 0.3]", // filter quantization scales
550  "[ 0, 0, 0, 0]", // filter quantization zero-points
551  "3" // filter quantized axis
552  // (in case of per channel quantization)
553  )
554  {}
555 };
556 
557 // Test for depthwise_multiplier different to one (M > 1)
558 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4,
559  DepthwiseConvolution2dWeightsPerChannelQuant4Fixture)
560 {
561  RunTest<4, armnn::DataType::QAsymmS8>(
562  0,
563  { 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
564  1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
565  1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
566  1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1},
567  { 36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
568  36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
569  36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
570  18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
571  36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
572  36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
573  36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
574  18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
575  36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
576  36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
577  36, 32, 28, 24, 20, 16, 12, 8, 4, 36, 32, 28, 24, 20, 16, 12,
578  18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
579  18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
580  18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
581  18, 16, 14, 12, 10, 8, 6, 4, 2, 18, 16, 14, 12, 10, 8, 6,
582  9, 8, 7, 6, 5, 4, 3, 2, 1, 9, 8, 7, 6, 5, 4, 3});
583 }
584 
585 
586 struct DepthwiseConvolution2dWeightsPerChannelQuant6Fixture : DepthwiseConvolution2dFixture2
587 {
588  DepthwiseConvolution2dWeightsPerChannelQuant6Fixture()
589  : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
590  "[ 1, 4, 4, 16 ]", // outputShape
591  "[ 1, 2, 2, 16 ]", // filterShape
592  // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
593  // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
594  // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
595  // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0]
596  // quantized per channel with q_dim=3
597  "[12,20,10, 3, 4,15,30, 6, 4,20,30,12, 4,10,20,12,"
598  " 8, 0,30, 3, 0,10,40, 9,16,15, 0, 3,12,20,40, 3,"
599  " 12,15,20, 0, 0, 0,10, 9,12,10,40,12,12, 5,10, 9,"
600  " 4, 0, 0, 6,12, 0,10, 3,16,10,20, 3, 8,15,20, 0]",
601  "1", // stride w and h
602  "SAME", // padding type
603  "", // bias shape
604  "", // bias data
605  "[ 0.0 ]", // filter quantization min values
606  "[ 255.0 ]", // filter quantization max values
607  "[ 0.25, 0.2, 0.1, 0.333333333,"
608  "0.25, 0.2, 0.1, 0.333333333,"
609  "0.25, 0.2, 0.1, 0.333333333,"
610  "0.25, 0.2, 0.1, 0.333333333]", // filter quantization scales
611  "[ 0, 0, 0, 0]", // filter quantization zero-points
612  "3" // filter quantized axis
613  // (in case of per channel quantization)
614  )
615  {}
616 };
617 
618 
619 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant6,
620  DepthwiseConvolution2dWeightsPerChannelQuant6Fixture)
621 {
622  RunTest<4, armnn::DataType::QAsymmS8>(
623  0,
624  { 1,0,1,2,0,4,4,0,2,1,2,0,1,3,3,0,
625  1,2,2,3,3,4,1,1,2,4,1,3,4,2,0,2,
626  0,3,1,3,4,3,2,0,1,2,3,3,0,2,4,2,
627  1,2,1,4,3,4,1,3,1,0,2,3,1,3,2,0},
628  { 9, 7, 3, 7,12, 8,22,22,27,22,13,17,13,10, 9,17,
629  15, 9,12, 6,16,14,24,27,19,26,18,23, 9,10, 7, 3,
630  18,14, 9,11, 7, 9,21,25,17,19,10,15,13, 9, 7, 9,
631  15,16, 9, 1, 3, 9,11,12, 3,12, 9,12, 6, 2, 2, 6,
632  13, 4,10,12,11,14,28,28,17,17,14,15,15,13,13,22,
633  26,24,17, 7,10,20,33,31,23,17,17,16,16,23,20, 7,
634  17,11,16, 6,10,16,24,22,26,18,23,20,22,23,21,23,
635  12,16, 4, 4, 2, 6, 8,10,12, 8,16,16, 8, 6, 6,14,
636  14, 3,14,10,15,15,27,25,16,14, 9,11,21,19,16,24,
637  24,25,13, 7, 3,13,21,24,25,23,14,17,24,24,21,12,
638  7, 7, 3, 3,11,10,17,13,33,32,21,26,18,17,17,23,
639  3, 3, 2, 0, 2, 6, 9,13,10,20,20,24, 2, 4, 4, 8,
640  9, 4,10, 4, 2,14,22,16, 5, 7, 3, 5,13,20,20,19,
641  11,12, 6, 4, 4,12,12, 8, 9,10, 3, 6,12,18,18,15,
642  5, 4, 4, 2, 0, 6,12, 9,10,14, 6,10, 3, 6, 6,12,
643  3, 4, 1, 1, 3, 9, 9, 6, 2, 8, 6, 8, 0, 0, 0, 0});
644 }
645 
646 
647 struct DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture : DepthwiseConvolution2dFixture2
648 {
649  DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture()
650  : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
651  "[ 1, 3, 3, 3 ]", // outputShape
652  "[ 1, 3, 3, 3 ]", // filterShape
653  // filterData is [ 1,4,0,2,4,3,1,0,1,
654  // 3,0,4,0,1,3,4,2,4,
655  // 3,0,3,4,4,0,3,4,2]
656  // quantized per channel with q_dim=3
657  "[ 4,20, 0, 8,20,30, 4, 0,10,12,"
658  " 0,40, 0, 5,30,16,10,40,12, 0,"
659  "30,16,20, 0,12,20,20]",
660  "1", // stride w and h
661  "SAME", // padding type
662  "", // bias shape
663  "", // bias data
664  "[ 0.0 ]", // filter quantization min values
665  "[ 255.0 ]", // filter quantization max values
666  "[ 0.25, 0.2, 0.1]", // filter quantization scales
667  "[ 0, 0, 0]", // filter quantization zero-points
668  "3" // filter quantized axis
669  // (in case of per channel quantization)
670  )
671  {}
672 };
673 
674 
675 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_1,
676  DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture)
677 {
678  RunTest<4, armnn::DataType::QAsymmS8>(
679  0,
680  { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
681  { 11,11, 9,17,11,16,10, 5,10,
682  14,15,13,21,19,20,13,13,13,
683  7, 7,11,11,11,15, 6, 9,10});
684 }
685 
686 // Same with input different to 1
687 struct DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture : DepthwiseConvolution2dFixture2
688 {
689  DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture()
690  : DepthwiseConvolution2dFixture2("[ 1, 3, 3, 3 ]", // inputShape
691  "[ 1, 3, 3, 3 ]", // outputShape
692  "[ 1, 3, 3, 3 ]", // filterShape
693  // filterData is [ 1,4,0,2,4,3,1,0,1,
694  // 3,0,4,0,1,3,4,2,4,
695  // 3,0,3,4,4,0,3,4,2]
696  // quantized per channel with q_dim=3
697  "[ 4,20, 0, 8,20,30, 4, 0,10,12,"
698  " 0,40, 0, 5,30,16,10,40,12, 0,"
699  "30,16,20, 0,12,20,20]",
700  "1", // stride w and h
701  "SAME", // padding type
702  "", // bias shape
703  "", // bias data
704  "[ 0.0 ]", // filter quantization min values
705  "[ 255.0 ]", // filter quantization max values
706  "[ 0.25, 0.2, 0.1]", // filter quantization scales
707  "[ 0, 0, 0]", // filter quantization zero-points
708  "3" // filter quantized axis
709  // (in case of per channel quantization)
710  )
711  {}
712 };
713 
714 
715 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_2,
716  DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture)
717 {
718  RunTest<4, armnn::DataType::QAsymmS8>(
719  0,
720  { 3,2,0,0,4,3,0,1,2,
721  0,1,3,0,4,2,2,2,3,
722  2,4,3,2,0,4,3,4,0},
723  { 0,30,16,15,30,32, 8, 9,24,
724  20,33,28,34,48,50,18,38,35,
725  8, 8,36,20,28,33,10,28,25});
726 }
727 
728 
729 struct DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture : DepthwiseConvolution2dFixture2
730 {
731  DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture()
732  : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
733  "[ 1, 4, 4, 16 ]", // outputShape
734  "[ 1, 2, 2, 16 ]", // filterShape
735  // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
736  // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
737  // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
738  // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0 ]
739  // quantized per channel with q_dim=3
740  "[12,20,10, 3, 4,15,30, 6, 4,20,30,13, 4,10,20,13,"
741  " 8, 0,30, 3, 0,10,40,10,16,15, 0, 3,12,20,40, 3,"
742  " 12,15,20, 0, 0, 0,10,10,12,10,40,13,12, 5,10,10,"
743  " 4, 0, 0, 6,12, 0,10, 3,16,10,20, 3, 8,15,20, 0]",
744  "1", // stride w and h
745  "SAME", // padding type
746  "", // bias shape
747  "", // bias data
748  "[ 0.0 ]", // filter quantization min values
749  "[ 255.0 ]", // filter quantization max values
750  "[ 0.25, 0.2, 0.1, 0.3,"
751  "0.25, 0.2, 0.1, 0.3,"
752  "0.25, 0.2, 0.1, 0.3,"
753  "0.25, 0.2, 0.1, 0.3]", // filter quantization scales
754  "[ 0, 0, 0, 0]", // filter quantization zero-points
755  "3" // filter quantized axis
756  // (in case of per channel quantization)
757  )
758  {}
759 };
760 
761 
762 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_1,
763  DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture)
764 {
765  RunTest<4, armnn::DataType::QAsymmS8>(
766  0,
767  { 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
768  1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
769  1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1,
770  1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1},
771  { 9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
772  9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
773  9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
774  6, 7, 3, 1, 1, 3, 4, 5, 4, 6, 7, 8, 4, 3, 3, 7,
775  9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
776  9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
777  9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
778  6, 7, 3, 1, 1, 3, 4, 5, 4, 6, 7, 8, 4, 3, 3, 7,
779  9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
780  9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
781  9, 7, 6, 4, 4, 5, 9, 9,12,11, 9,10, 9,10, 9, 8,
782  6, 7, 3, 1, 1, 3, 4, 5, 4, 6, 7, 8, 4, 3, 3, 7,
783  5, 4, 4, 2, 1, 5, 7, 5, 5, 7, 3, 5, 4, 6, 6, 5,
784  5, 4, 4, 2, 1, 5, 7, 5, 5, 7, 3, 5, 4, 6, 6, 5,
785  5, 4, 4, 2, 1, 5, 7, 5, 5, 7, 3, 5, 4, 6, 6, 5,
786  3, 4, 1, 1, 1, 3, 3, 2, 1, 4, 3, 4, 1, 2, 2, 4});
787 }
788 
789 
790 
791 struct DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture : DepthwiseConvolution2dFixture2
792 {
793  DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture()
794  : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
795  "[ 1, 4, 4, 16 ]", // outputShape
796  "[ 1, 2, 2, 16 ]", // filterShape
797  // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
798  // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
799  // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
800  // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0 ]
801  // quantized per channel with q_dim=3
802  "[12,20,10, 3, 4,15,30, 6, 4,20,30,13, 4,10,20,13,"
803  " 8, 0,30, 3, 0,10,40,10,16,15, 0, 3,12,20,40, 3,"
804  " 12,15,20, 0, 0, 0,10,10,12,10,40,13,12, 5,10,10,"
805  " 4, 0, 0, 6,12, 0,10, 3,16,10,20, 3, 8,15,20, 0]",
806  "1", // stride w and h
807  "SAME", // padding type
808  "", // bias shape
809  "", // bias data
810  "[ 0.0 ]", // filter quantization min values
811  "[ 255.0 ]", // filter quantization max values
812  "[ 0.25, 0.2, 0.1, 0.3,"
813  "0.25, 0.2, 0.1, 0.3,"
814  "0.25, 0.2, 0.1, 0.3,"
815  "0.25, 0.2, 0.1, 0.3]", // filter quantization scales
816  "[ 0, 0, 0, 0]", // filter quantization zero-points
817  "3" // filter quantized axis
818  // (in case of per channel quantization)
819  )
820  {}
821 };
822 
823 
824 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_2,
825  DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture)
826 {
827  RunTest<4, armnn::DataType::QAsymmS8>(
828  0,
829  { 3,3,3,4, 4,4,0,0, 0,3,4,3, 0,2,2,3,
830  3,0,3,0, 0,3,2,1, 4,1,2,2, 0,0,0,4,
831  3,2,2,2, 2,1,0,4, 4,3,2,4, 3,2,0,0,
832  4,1,4,4, 1,0,4,3, 3,2,0,3, 1,1,0,2},
833  { 26,21,21, 7,12,17,28,21,20,22,25,26, 6,11,10,16,
834  16,16, 4,12, 7,18,28,27,30,20,12,14,16,19,17, 6,
835  12,12, 8, 0, 3,13,18,15,18,26,20,26,26,32,28,21,
836  0, 0, 0, 0, 2, 6, 6, 4, 2, 8, 6, 8,15,10,10,24,
837  20,21, 9, 7, 3, 6,15,16,17,22,17,22,17,18,14, 7,
838  18, 6,16,12,12,11,17,15,18,18,10,12,27,26,22,18,
839  27,28,12,10, 7, 3, 8,13, 8,12,14,16,26,24,24,24,
840  9, 9, 6, 0, 0, 0, 2, 6, 0, 0, 0, 0, 4, 8, 8,16,
841  26,24,17, 7, 2, 8,11,10,30,24,30,28,32,33,30,24,
842  20,11,16,12, 7, 9,17,13,20,14,16,18,31,36,33,29,
843  28,25,19, 9, 6,13,20,19, 2, 8, 6, 8,17,17,15,25,
844  12,15, 5, 3, 2, 6, 7, 7, 0, 0, 0, 0, 6, 2, 2, 6,
845  14,16, 7, 5, 1, 3, 3, 2,20,28,12,20,13,20,20,19,
846  9, 4,10, 4, 0, 4, 8, 6, 4,16,12,16,12,18,18,15,
847  11,12, 6, 4, 2, 8,10, 7, 0, 0, 0, 0, 9,14,14,14,
848  3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8});
849 }
850 
851 
852 struct DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture : DepthwiseConvolution2dFixture2
853 {
854  DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture()
855  : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
856  "[ 1, 4, 4, 16 ]", // outputShape
857  "[ 1, 2, 2, 16 ]", // filterShape
858  // filter data is [ 1, 4, 9, 16, 25, 36,
859  // 49, 64, 81, 100, 121, 144,
860  // 169, 196, 225, 256, 17, 36,
861  // 57, 80, 105, 132, 161, 192,
862  // 225, 260, 297, 336, 377, 420,
863  // 465, 512, 33, 68, 105, 144,
864  // 185, 228, 273, 320, 369, 420,
865  // 473, 528, 585, 644, 705, 768,
866  // 49, 100, 153, 208, 265, 324,
867  // 385, 448, 513, 580, 649, 720,
868  // 793, 868, 945,1024 ]
869  // quantized per channel with q_dim=3
870  "[ 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,"
871  " 17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,"
872  " 33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,"
873  "49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64]",
874  "1", // stride w and h
875  "SAME", // padding type
876  "", // bias shape
877  "", // bias data
878  "[ 0.0 ]", // filter quantization min values
879  "[ 255.0 ]", // filter quantization max values
880  "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16]", // filter quantization scales
881  "[ 0, 0, 0, 0]", // filter quantization zero-points
882  "3", // filter quantized axis
883  // (in case of per channel quantization)
884  "[ 100.0 ]" // output scale
885  )
886  {}
887 };
888 
889 // Test for depthwise_multiplier different to one (M > 1)
890 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_5,
891  DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture)
892 {
893  RunTest<4, armnn::DataType::QAsymmS8>(
894  0,
895  { 1,1,1,2,2,2,1,2,1,2,2,1,2,2,1,1,1,1,1,1,1,2,2,2,
896  1,2,2,2,1,1,1,2,1,1,1,1,2,1,2,1,2,1,1,2,1,2,1,1,
897  1,2,2,1,2,2,1,1,2,1,2,1,1,2,1,2},
898  { 1, 2, 3, 5, 9,11,14,16,17,19,21,24,32,36,39,43,
899  1, 2, 3, 4,11,14,17,20,22,26,29,33,34,38,42,46,
900  1, 2, 3, 5, 8,11,13,16,16,18,21,24,33,36,39,43,
901  0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6,13,14,16,17,
902  1, 3, 4, 6, 6, 8,10,12,19,22,24,27,23,25,28,30,
903  1, 3, 5, 8, 7, 8,10,12,18,21,24,27,32,36,39,43,
904  1, 2, 4, 5, 8,10,13,15,12,14,16,18,30,33,37,40,
905  0, 0, 1, 1, 3, 4, 5, 7, 4, 5, 5, 6, 9,10,11,12,
906  1, 3, 5, 7,10,12,15,17,17,20,23,25,19,21,23,25,
907  2, 4, 6, 8, 7, 9,11,13,17,20,23,25,23,25,28,30,
908  1, 2, 4, 6, 9,11,14,16,15,17,20,22,28,31,35,38,
909  0, 0, 1, 1, 4, 5, 6, 7, 4, 5, 5, 6,13,14,16,17,
910  0, 0, 1, 1, 2, 3, 4, 5, 3, 4, 5, 6, 5, 6, 6, 7,
911  0, 0, 1, 1, 1, 2, 2, 3, 5, 6, 7, 8, 5, 6, 6, 7,
912  0, 0, 0, 1, 2, 3, 3, 4, 3, 4, 5, 6, 9,10,11,12,
913  0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 3, 3, 4, 5});
914 }
915 
916 
917 struct DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture : DepthwiseConvolution2dFixture2
918 {
919  DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture()
920  : DepthwiseConvolution2dFixture2("[ 1, 4, 4, 4 ]", // inputShape
921  "[ 1, 4, 4, 16 ]", // outputShape
922  "[ 1, 2, 2, 16 ]", // filterShape
923  // filter data is [ 3,4,1,1,1,3,3,2,1,4,3,4,1,2,2,4,
924  // 2,0,3,1,0,2,4,3,4,3,0,1,3,4,4,1,
925  // 3,3,2,0,0,0,1,3,3,2,4,4,3,1,1,3,
926  // 1,0,0,2,3,0,1,1,4,2,2,1,2,3,2,0 ]
927  // quantized per channel with q_dim=3
928  "[12,20,10, 3, 2,24, 9,10, 5,16,30,12, 3,10, 4,32,"
929  " 8, 0,30, 3, 0,16,12,15,20,12, 0, 3, 9,20, 8, 8,"
930  " 12,15,20, 0, 0, 0, 3,15,15, 8,40,12, 9, 5, 2,24,"
931  " 4, 0, 0, 6, 6, 0, 3, 5,20, 8,20, 3, 6,15, 4, 0]",
932  "1", // stride w and h
933  "SAME", // padding type
934  "", // bias shape
935  "", // bias data
936  "[ 0.0 ]", // filter quantization min values
937  "[ 255.0 ]", // filter quantization max values
938  "[0.25, 0.2, 0.1, 0.3333333333, "
939  "0.5, 0.125, 0.33333333, 0.2, "
940  "0.2, 0.25, 0.1, 0.333333333, "
941  "0.3333333333, 0.2, 0.5, 0.125]", // filter quantization scales
942  "[ 0, 0, 0, 0]", // filter quantization zero-points
943  "3" // filter quantized axis
944  // (in case of per channel quantization)
945  )
946  {}
947 };
948 
949 // Test for depthwise_multiplier different to one (M > 1)
950 BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_3_1,
951  DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture)
952 {
953  RunTest<4, armnn::DataType::QAsymmS8>(
954  0,
955  { 3,3,3,4, 4,4,0,0, 0,3,4,3, 0,2,2,3,
956  3,0,3,0, 0,3,2,1, 4,1,2,2, 0,0,0,4,
957  3,2,2,2, 2,1,0,4, 4,3,2,4, 3,2,0,0,
958  4,1,4,4, 1,0,4,3, 3,2,0,3, 1,1,0,2},
959  { 26,21,21, 7,12,17,28,21,20,22,25,26, 6,11,10,16,
960  16,16, 4,12, 7,18,28,27,30,20,12,14,16,19,17, 6,
961  12,12, 8, 0, 3,13,18,15,18,26,20,26,26,32,28,21,
962  0, 0, 0, 0, 2, 6, 6, 4, 2, 8, 6, 8,15,10,10,24,
963  20,21, 9, 7, 3, 6,15,16,17,22,17,22,17,18,14, 7,
964  18, 6,16,12,12,11,17,15,18,18,10,12,27,26,22,18,
965  27,28,12,10, 7, 3, 8,13, 8,12,14,16,26,24,24,24,
966  9, 9, 6, 0, 0, 0, 2, 6, 0, 0, 0, 0, 4, 8, 8,16,
967  26,24,17, 7, 2, 8,11,10,30,24,30,28,32,33,30,24,
968  20,11,16,12, 7, 9,17,13,20,14,16,18,31,36,33,29,
969  28,25,19, 9, 6,13,20,19, 2, 8, 6, 8,17,17,15,25,
970  12,15, 5, 3, 2, 6, 7, 7, 0, 0, 0, 0, 6, 2, 2, 6,
971  14,16, 7, 5, 1, 3, 3, 2,20,28,12,20,13,20,20,19,
972  9, 4,10, 4, 0, 4, 8, 6, 4,16,12,16,12,18,18,15,
973  11,12, 6, 4, 2, 8,10, 7, 0, 0, 0, 0, 9,14,14,14,
974  3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8});
975 }
976 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
BOOST_AUTO_TEST_SUITE_END()
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)