ArmNN
 21.11
Conv2D.cpp File Reference
#include "ParserFlatbuffersFixture.hpp"
#include <sstream>

Go to the source code of this file.

Functions

 TEST_SUITE ("TensorflowLiteParser_Conv2D")
 

Function Documentation

◆ TEST_SUITE()

TEST_SUITE ( "TensorflowLiteParser_Conv2D"  )

Definition at line 9 of file Conv2D.cpp.

References armnn::QAsymmU8, ParserFlatbuffersFixture::SetupSingleInputSingleOutput(), and TEST_CASE_FIXTURE().

10 {
11 struct SimpleConv2DFixture : public ParserFlatbuffersFixture
12 {
13  explicit SimpleConv2DFixture()
14  {
15  m_JsonString = R"(
16  {
17  "version": 3,
18  "operator_codes": [ { "builtin_code": "CONV_2D" } ],
19  "subgraphs": [ {
20  "tensors": [
21  {
22  "shape": [ 1, 3, 3, 1 ],
23  "type": "UINT8",
24  "buffer": 0,
25  "name": "inputTensor",
26  "quantization": {
27  "min": [ 0.0 ],
28  "max": [ 255.0 ],
29  "scale": [ 1.0 ],
30  "zero_point": [ 0 ],
31  }
32  },
33  {
34  "shape": [ 1, 1, 1, 1 ],
35  "type": "UINT8",
36  "buffer": 1,
37  "name": "outputTensor",
38  "quantization": {
39  "min": [ 0.0 ],
40  "max": [ 511.0 ],
41  "scale": [ 2.0 ],
42  "zero_point": [ 0 ],
43  }
44  },
45  {
46  "shape": [ 1, 3, 3, 1 ],
47  "type": "UINT8",
48  "buffer": 2,
49  "name": "filterTensor",
50  "quantization": {
51  "min": [ 0.0 ],
52  "max": [ 255.0 ],
53  "scale": [ 1.0 ],
54  "zero_point": [ 0 ],
55  }
56  }
57  ],
58  "inputs": [ 0 ],
59  "outputs": [ 1 ],
60  "operators": [
61  {
62  "opcode_index": 0,
63  "inputs": [ 0, 2 ],
64  "outputs": [ 1 ],
65  "builtin_options_type": "Conv2DOptions",
66  "builtin_options": {
67  "padding": "VALID",
68  "stride_w": 1,
69  "stride_h": 1,
70  "fused_activation_function": "NONE"
71  },
72  "custom_options_format": "FLEXBUFFERS"
73  }
74  ],
75  } ],
76  "buffers" : [
77  { },
78  { },
79  { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
80  { },
81  ]
82  }
83  )";
84  SetupSingleInputSingleOutput("inputTensor", "outputTensor");
85  }
86 };
87 
88 TEST_CASE_FIXTURE(SimpleConv2DFixture, "ParseSimpleConv2D")
89 {
90  RunTest<4, armnn::DataType::QAsymmU8>(
91  0,
92  {
93  1, 2, 3,
94  4, 5, 6,
95  7, 8, 9,
96  },
97  // because of the output scaling we need to take half of the values
98  {
99  (1*2 + 2*1 + 3*0 +
100  4*6 + 5*2 + 6*1 +
101  7*4 + 8*1 + 9*2) /2
102  });
103 }
104 
105 struct Conv2DWithBiasesFixture : public ParserFlatbuffersFixture
106 {
107  explicit Conv2DWithBiasesFixture(const std::string & inputShape,
108  const std::string & outputShape,
109  const std::string & filterShape,
110  const std::string & filterData,
111  const std::string & biasShape,
112  const std::string & biasData,
113  const std::string & strides,
114  const std::string & activation="NONE",
115  const std::string & filterScale="1.0",
116  const std::string & filterZeroPoint="0",
117  const std::string & outputScale="2.0",
118  const std::string & outputZeroPoint="0")
119  {
120  m_JsonString = R"(
121  {
122  "version": 3,
123  "operator_codes": [ { "builtin_code": "CONV_2D" } ],
124  "subgraphs": [ {
125  "tensors": [
126  {
127  "shape": )" + inputShape + R"(,
128  "type": "UINT8",
129  "buffer": 0,
130  "name": "inputTensor",
131  "quantization": {
132  "min": [ 0.0 ],
133  "max": [ 255.0 ],
134  "scale": [ 1.0 ],
135  "zero_point": [ 0 ],
136  }
137  },
138  {
139  "shape": )" + outputShape + R"(,
140  "type": "UINT8",
141  "buffer": 1,
142  "name": "outputTensor",
143  "quantization": {
144  "min": [ 0.0 ],
145  "max": [ 511.0 ],
146  "scale": [ )" + outputScale + R"( ],
147  "zero_point": [ )" + outputZeroPoint + R"( ],
148  }
149  },
150  {
151  "shape": )" + filterShape + R"( ,
152  "type": "UINT8",
153  "buffer": 2,
154  "name": "filterTensor",
155  "quantization": {
156  "min": [ 0.0 ],
157  "max": [ 255.0 ],
158  "scale": [ )" + filterScale + R"( ],
159  "zero_point": [ )" + filterZeroPoint + R"( ],
160  }
161  },
162  {
163  "shape": )" + biasShape + R"( ,
164  "type": "INT32",
165  "buffer": 3,
166  "name": "biasTensor",
167  "quantization": {
168  "min": [ 0.0 ],
169  "max": [ 255.0 ],
170  "scale": [ 1.0 ],
171  "zero_point": [ 0 ],
172  }
173  }
174  ],
175  "inputs": [ 0 ],
176  "outputs": [ 1 ],
177  "operators": [
178  {
179  "opcode_index": 0,
180  "inputs": [ 0, 2, 3 ],
181  "outputs": [ 1 ],
182  "builtin_options_type": "Conv2DOptions",
183  "builtin_options": {
184  "padding": "SAME",
185  "stride_w": )" + strides + R"(,
186  "stride_h": )" + strides + R"(,
187  "fused_activation_function": )" + activation + R"(
188  },
189  "custom_options_format": "FLEXBUFFERS"
190  }
191  ],
192  } ],
193  "buffers" : [
194  { },
195  { },
196  { "data": )" + filterData + R"(, },
197  { "data": )" + biasData + R"(, },
198  ]
199  }
200  )";
201  SetupSingleInputSingleOutput("inputTensor", "outputTensor");
202  }
203 };
204 
205 struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
206 {
207  SimpleConv2DWithBiasesFixture()
208  : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
209  "[ 1, 2, 2, 1 ]", // outputShape
210  "[ 1, 2, 2, 1 ]", // filterShape
211  "[ 2,1, 0,6 ]", // filterData
212  "[ 1 ]", // biasShape
213  "[ 10, 0, 0, 0 ]", // biasData
214  "1") // stride w and h
215  {}
216 };
217 
218 TEST_CASE_FIXTURE(SimpleConv2DWithBiasesFixture, "ParseConv2DWithBias")
219 {
220  RunTest<4, armnn::DataType::QAsymmU8>(
221  0,
222  {
223  1, 2,
224  3, 4,
225  },
226  // because of the output scaling we need to take half of the values
227  {
228  (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
229  (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
230  (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
231  (4*2 + 0*1 + 0*0 + 0*6 + 10)/2
232  });
233 }
234 
235 struct DynamicConv2DWithBiasesFixture : Conv2DWithBiasesFixture
236 {
237  DynamicConv2DWithBiasesFixture()
238  : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
239  "[ ]", // outputShape
240  "[ 1, 2, 2, 1 ]", // filterShape
241  "[ 2,1, 0,6 ]", // filterData
242  "[ 1 ]", // biasShape
243  "[ 10, 0, 0, 0 ]", // biasData
244  "1") // stride w and h
245  {}
246 };
247 
248 TEST_CASE_FIXTURE(DynamicConv2DWithBiasesFixture, "ParseDynamicConv2DWithBias")
249 {
250  RunTest<4,
252  armnn::DataType::QAsymmU8>(0,
253  { { "inputTensor", { 1, 2, 3, 4, } } },
254  { { "outputTensor", { (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
255  (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
256  (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
257  (4*2 + 0*1 + 0*0 + 0*6 + 10)/2} } },
258  true);
259 }
260 
261 struct Conv2DShapeTestFixture : Conv2DWithBiasesFixture
262 {
263  static std::string GenerateInts(unsigned int n)
264  {
265  std::stringstream ss;
266  ss << " [ ";
267  for( unsigned int i=0; i<n; ++i ) {
268  if (i > 0 )
269  {
270  ss << " , ";
271  }
272  ss << " " << (i%256);
273  }
274  ss << " ] ";
275  return ss.str();
276  }
277 
278  Conv2DShapeTestFixture()
279  : Conv2DWithBiasesFixture("[ 1, 224, 224, 3 ]", // inputShape
280  "[ 1, 112, 112, 32 ]", // outputShape
281  "[ 32, 3, 3, 3 ]", // filterShape
282  GenerateInts(32*3*3*3), // filterData
283  "[ 32 ]", // biasShape
284  GenerateInts(32*4), // biasData
285  "2") // stride w and h
286  {}
287 };
288 
289 TEST_CASE_FIXTURE(Conv2DShapeTestFixture, "ParseConv2D_112x112_out")
290 {
291 }
292 
293 struct ReluConv2DWithBiasesFixture : Conv2DWithBiasesFixture
294 {
295  ReluConv2DWithBiasesFixture()
296  : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
297  "[ 1, 2, 2, 1 ]", // outputShape
298  "[ 1, 2, 2, 1 ]", // filterShape
299  "[ 2,1, 0,6 ]", // filterData
300  "[ 1 ]", // biasShape
301  "[ 16, 0, 0, 0 ]", // biasData
302  "1", // stride w and h
303  "RELU", // activation
304  "1.0", // filter scale
305  "4", // filter zero point
306  "2.0", // output scale
307  "20") // output zero point
308  {}
309 };
310 
311 TEST_CASE_FIXTURE(ReluConv2DWithBiasesFixture, "ParseConv2DAndReluWithBias")
312 {
313  uint8_t bias = 16;
314  uint8_t outZero = 20;
315  uint8_t fz = 4; // filter zero point
316 
317  RunTest<4, armnn::DataType::QAsymmU8>(
318  0,
319  {
320  1, 2,
321  4, 8,
322  },
323  // factors to consider:
324  // - the filter zero point is non zero, hence the (x-fz)
325  // - the output scale is 2 hence the /2
326  // - output zero point is non zero, hence the +outZero
327  // - RELU cuts negative values and then we add the output zero point
328  {
329  std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
330  std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
331  std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
332  std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
333  });
334 }
335 
336 struct Relu6Conv2DWithBiasesFixture : Conv2DWithBiasesFixture
337 {
338  Relu6Conv2DWithBiasesFixture()
339  : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
340  "[ 1, 2, 2, 1 ]", // outputShape
341  "[ 1, 2, 2, 1 ]", // filterShape
342  "[ 2,1, 0,6 ]", // filterData
343  "[ 1 ]", // biasShape
344  "[ 0, 0, 0, 0 ]", // biasData
345  "1", // stride w and h
346  "RELU6", // activation
347  "1.0", // filter scale
348  "0", // filter zero point
349  "2.0", // output scale
350  "0") // output zero point
351  {}
352 };
353 
354 TEST_CASE_FIXTURE(Relu6Conv2DWithBiasesFixture, "ParseConv2DAndRelu6WithBias")
355 {
356  uint8_t relu6Min = 6 / 2; // divide by output scale
357 
358  RunTest<4, armnn::DataType::QAsymmU8>(
359  0,
360  {
361  1, 2,
362  4, 1,
363  },
364  // factors to consider:
365  // - the output scale is 2 hence the /2
366  // - RELU6 cuts output values at +6
367  {
368  std::min(relu6Min, static_cast<uint8_t>((1*2 + 2*1 + 4*0 + 1*6)/2)),
369  std::min(relu6Min, static_cast<uint8_t>((2*2 + 0*1 + 1*0 + 0*6)/2)),
370  std::min(relu6Min, static_cast<uint8_t>((4*2 + 1*1 + 0*0 + 0*6)/2)),
371  std::min(relu6Min, static_cast<uint8_t>((1*2 + 0*1 + 0*0 + 0*6)/2))
372  });
373 }
374 
375 
376 struct PerChannelConv2DFixture : public ParserFlatbuffersFixture
377 {
378  explicit PerChannelConv2DFixture()
379  {
380  m_JsonString = R"(
381  {
382  "version": 3,
383  "operator_codes": [
384  {
385  "builtin_code": "CONV_2D",
386  "version": 3
387  }
388  ],
389  "subgraphs": [
390  {
391  "tensors": [
392  {
393  "shape": [
394  1,
395  4,
396  4,
397  2
398  ],
399  "type": "INT8",
400  "buffer": 1,
401  "name": "input",
402  "quantization": {
403  "min": [
404  -50.0
405  ],
406  "max": [
407  49.0
408  ],
409  "scale": [
410  0.388235
411  ],
412  "zero_point": [
413  1
414  ],
415  "details_type": "NONE",
416  "quantized_dimension": 0
417  },
418  "is_variable": false
419  },
420  {
421  "shape": [
422  4
423  ],
424  "type": "INT32",
425  "buffer": 2,
426  "name": "model/conv2d/Conv2D",
427  "quantization": {
428  "scale": [
429  0.001523,
430  0.001197,
431  0.001517,
432  0.001364
433  ],
434  "zero_point": [
435  0,
436  0,
437  0,
438  0
439  ],
440  "details_type": "NONE",
441  "quantized_dimension": 0
442  },
443  "is_variable": false
444  },
445  {
446  "shape": [
447  4,
448  2,
449  2,
450  2
451  ],
452  "type": "INT8",
453  "buffer": 3,
454  "name": "model/conv2d/Conv2D1",
455  "quantization": {
456  "min": [
457  -0.498056,
458  -0.362561,
459  -0.307959,
460  -0.207799
461  ],
462  "max": [
463  0.339136,
464  0.391629,
465  0.496193,
466  0.446191
467  ],
468  "scale": [
469  0.003922,
470  0.003084,
471  0.003907,
472  0.003513
473  ],
474  "zero_point": [
475  0,
476  0,
477  0,
478  0
479  ],
480  "details_type": "NONE",
481  "quantized_dimension": 0
482  },
483  "is_variable": false
484  },
485  {
486  "shape": [
487  1,
488  4,
489  4,
490  4
491  ],
492  "type": "INT8",
493  "buffer": 4,
494  "name": "Identity",
495  "quantization": {
496  "min": [
497  -66.578751
498  ],
499  "max": [
500  70.137619
501  ],
502  "scale": [
503  0.536143
504  ],
505  "zero_point": [
506  -4
507  ],
508  "details_type": "NONE",
509  "quantized_dimension": 0
510  },
511  "is_variable": false
512  }
513  ],
514  "inputs": [
515  0
516  ],
517  "outputs": [
518  3
519  ],
520  "operators": [
521  {
522  "opcode_index": 0,
523  "inputs": [
524  0,
525  2,
526  1
527  ],
528  "outputs": [
529  3
530  ],
531  "builtin_options_type": "Conv2DOptions",
532  "builtin_options": {
533  "padding": "SAME",
534  "stride_w": 1,
535  "stride_h": 1,
536  "fused_activation_function": "NONE",
537  "dilation_w_factor": 1,
538  "dilation_h_factor": 1
539  },
540  "custom_options_format": "FLEXBUFFERS"
541  }
542  ],
543  "name": "main"
544  }
545  ],
546  "description": "MLIR Converted.",
547  "buffers": [
548  {
549  },
550  {
551  },
552  {
553  "data": [
554  0,
555  0,
556  0,
557  0,
558  0,
559  0,
560  0,
561  0,
562  0,
563  0,
564  0,
565  0,
566  0,
567  0,
568  0,
569  0
570  ]
571  },
572  {
573  "data": [
574  157,
575  201,
576  86,
577  129,
578  17,
579  33,
580  209,
581  13,
582  76,
583  249,
584  127,
585  138,
586  35,
587  18,
588  250,
589  233,
590  15,
591  205,
592  98,
593  127,
594  68,
595  196,
596  246,
597  177,
598  65,
599  197,
600  230,
601  246,
602  127,
603  66,
604  212,
605  30
606  ]
607  },
608  {
609  },
610  {
611  "data": [
612  49,
613  46,
614  53,
615  46,
616  48,
617  0,
618  0,
619  0,
620  0,
621  0,
622  0,
623  0,
624  0,
625  0,
626  0,
627  0
628  ]
629  }
630  ],
631  "metadata": [
632  {
633  "name": "min_runtime_version",
634  "buffer": 5
635  }
636  ]
637  }
638  )";
639  SetupSingleInputSingleOutput("input", "Identity");
640  }
641 };
642 
643 TEST_CASE_FIXTURE(PerChannelConv2DFixture, "ParsePerChannelConv2D")
644 {
645  RunTest<4, armnn::DataType::QAsymmS8>(
646  0,
647  {
648  -11, 40,-26, 11,-28, 8, 0, -8,
649  -10, 34, 47, 0,-33,-14, 28, 35,
650  6,-28,-26, 8, 13, 33,-31,-41,
651  31,-20,-31,-16, 8,-18,-44, 0
652  },
653  {
654  -21,-17,-23,-14, -1,-14, 1, 9,
655  1,-12,-22,-23, 2, -1, -3, 12,
656  7, 6, 8,-13,-21, -6,-31, 0,
657  9, -6, 24, 0,-22, -4, -7,-22,
658  -7, -9, 9, 11,-11,-16, 9,-27,
659  -1, 0,-26, 0, 9,-12, -8,-18,
660  -11, -3,-15, 7, 16, -2, -8, -7,
661  -14,-15,-14, 3, 9,-12, -6,-11
662  });
663 }
664 
665 }
TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenNeonAndGpu")
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)