ArmNN
 21.02
Conv2D.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <boost/test/unit_test.hpp>
8 #include "../TfLiteParser.hpp"
9 #include <sstream>
10 
11 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
12 
13 struct SimpleConv2DFixture : public ParserFlatbuffersFixture
14 {
15  explicit SimpleConv2DFixture()
16  {
17  m_JsonString = R"(
18  {
19  "version": 3,
20  "operator_codes": [ { "builtin_code": "CONV_2D" } ],
21  "subgraphs": [ {
22  "tensors": [
23  {
24  "shape": [ 1, 3, 3, 1 ],
25  "type": "UINT8",
26  "buffer": 0,
27  "name": "inputTensor",
28  "quantization": {
29  "min": [ 0.0 ],
30  "max": [ 255.0 ],
31  "scale": [ 1.0 ],
32  "zero_point": [ 0 ],
33  }
34  },
35  {
36  "shape": [ 1, 1, 1, 1 ],
37  "type": "UINT8",
38  "buffer": 1,
39  "name": "outputTensor",
40  "quantization": {
41  "min": [ 0.0 ],
42  "max": [ 511.0 ],
43  "scale": [ 2.0 ],
44  "zero_point": [ 0 ],
45  }
46  },
47  {
48  "shape": [ 1, 3, 3, 1 ],
49  "type": "UINT8",
50  "buffer": 2,
51  "name": "filterTensor",
52  "quantization": {
53  "min": [ 0.0 ],
54  "max": [ 255.0 ],
55  "scale": [ 1.0 ],
56  "zero_point": [ 0 ],
57  }
58  }
59  ],
60  "inputs": [ 0 ],
61  "outputs": [ 1 ],
62  "operators": [
63  {
64  "opcode_index": 0,
65  "inputs": [ 0, 2 ],
66  "outputs": [ 1 ],
67  "builtin_options_type": "Conv2DOptions",
68  "builtin_options": {
69  "padding": "VALID",
70  "stride_w": 1,
71  "stride_h": 1,
72  "fused_activation_function": "NONE"
73  },
74  "custom_options_format": "FLEXBUFFERS"
75  }
76  ],
77  } ],
78  "buffers" : [
79  { },
80  { },
81  { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
82  { },
83  ]
84  }
85  )";
86  SetupSingleInputSingleOutput("inputTensor", "outputTensor");
87  }
88 };
89 
90 BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
91 {
92  RunTest<4, armnn::DataType::QAsymmU8>(
93  0,
94  {
95  1, 2, 3,
96  4, 5, 6,
97  7, 8, 9,
98  },
99  // because of the output scaling we need to take half of the values
100  {
101  (1*2 + 2*1 + 3*0 +
102  4*6 + 5*2 + 6*1 +
103  7*4 + 8*1 + 9*2) /2
104  });
105 }
106 
107 struct Conv2DWithBiasesFixture : public ParserFlatbuffersFixture
108 {
109  explicit Conv2DWithBiasesFixture(const std::string & inputShape,
110  const std::string & outputShape,
111  const std::string & filterShape,
112  const std::string & filterData,
113  const std::string & biasShape,
114  const std::string & biasData,
115  const std::string & strides,
116  const std::string & activation="NONE",
117  const std::string & filterScale="1.0",
118  const std::string & filterZeroPoint="0",
119  const std::string & outputScale="2.0",
120  const std::string & outputZeroPoint="0")
121  {
122  m_JsonString = R"(
123  {
124  "version": 3,
125  "operator_codes": [ { "builtin_code": "CONV_2D" } ],
126  "subgraphs": [ {
127  "tensors": [
128  {
129  "shape": )" + inputShape + R"(,
130  "type": "UINT8",
131  "buffer": 0,
132  "name": "inputTensor",
133  "quantization": {
134  "min": [ 0.0 ],
135  "max": [ 255.0 ],
136  "scale": [ 1.0 ],
137  "zero_point": [ 0 ],
138  }
139  },
140  {
141  "shape": )" + outputShape + R"(,
142  "type": "UINT8",
143  "buffer": 1,
144  "name": "outputTensor",
145  "quantization": {
146  "min": [ 0.0 ],
147  "max": [ 511.0 ],
148  "scale": [ )" + outputScale + R"( ],
149  "zero_point": [ )" + outputZeroPoint + R"( ],
150  }
151  },
152  {
153  "shape": )" + filterShape + R"( ,
154  "type": "UINT8",
155  "buffer": 2,
156  "name": "filterTensor",
157  "quantization": {
158  "min": [ 0.0 ],
159  "max": [ 255.0 ],
160  "scale": [ )" + filterScale + R"( ],
161  "zero_point": [ )" + filterZeroPoint + R"( ],
162  }
163  },
164  {
165  "shape": )" + biasShape + R"( ,
166  "type": "INT32",
167  "buffer": 3,
168  "name": "biasTensor",
169  "quantization": {
170  "min": [ 0.0 ],
171  "max": [ 255.0 ],
172  "scale": [ 1.0 ],
173  "zero_point": [ 0 ],
174  }
175  }
176  ],
177  "inputs": [ 0 ],
178  "outputs": [ 1 ],
179  "operators": [
180  {
181  "opcode_index": 0,
182  "inputs": [ 0, 2, 3 ],
183  "outputs": [ 1 ],
184  "builtin_options_type": "Conv2DOptions",
185  "builtin_options": {
186  "padding": "SAME",
187  "stride_w": )" + strides + R"(,
188  "stride_h": )" + strides + R"(,
189  "fused_activation_function": )" + activation + R"(
190  },
191  "custom_options_format": "FLEXBUFFERS"
192  }
193  ],
194  } ],
195  "buffers" : [
196  { },
197  { },
198  { "data": )" + filterData + R"(, },
199  { "data": )" + biasData + R"(, },
200  ]
201  }
202  )";
203  SetupSingleInputSingleOutput("inputTensor", "outputTensor");
204  }
205 };
206 
207 struct SimpleConv2DWithBiasesFixture : Conv2DWithBiasesFixture
208 {
209  SimpleConv2DWithBiasesFixture()
210  : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
211  "[ 1, 2, 2, 1 ]", // outputShape
212  "[ 1, 2, 2, 1 ]", // filterShape
213  "[ 2,1, 0,6 ]", // filterData
214  "[ 1 ]", // biasShape
215  "[ 10, 0, 0, 0 ]", // biasData
216  "1") // stride w and h
217  {}
218 };
219 
220 BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
221 {
222  RunTest<4, armnn::DataType::QAsymmU8>(
223  0,
224  {
225  1, 2,
226  3, 4,
227  },
228  // because of the output scaling we need to take half of the values
229  {
230  (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
231  (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
232  (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
233  (4*2 + 0*1 + 0*0 + 0*6 + 10)/2
234  });
235 }
236 
237 struct DynamicConv2DWithBiasesFixture : Conv2DWithBiasesFixture
238 {
239  DynamicConv2DWithBiasesFixture()
240  : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
241  "[ ]", // outputShape
242  "[ 1, 2, 2, 1 ]", // filterShape
243  "[ 2,1, 0,6 ]", // filterData
244  "[ 1 ]", // biasShape
245  "[ 10, 0, 0, 0 ]", // biasData
246  "1") // stride w and h
247  {}
248 };
249 
250 BOOST_FIXTURE_TEST_CASE( ParseDynamicConv2DWithBias, DynamicConv2DWithBiasesFixture )
251 {
252  RunTest<4,
254  armnn::DataType::QAsymmU8>(0,
255  { { "inputTensor", { 1, 2, 3, 4, } } },
256  { { "outputTensor", { (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
257  (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
258  (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
259  (4*2 + 0*1 + 0*0 + 0*6 + 10)/2} } },
260  true);
261 }
262 
263 struct Conv2DShapeTestFixture : Conv2DWithBiasesFixture
264 {
265  static std::string GenerateInts(unsigned int n)
266  {
267  std::stringstream ss;
268  ss << " [ ";
269  for( unsigned int i=0; i<n; ++i ) {
270  if (i > 0 )
271  {
272  ss << " , ";
273  }
274  ss << " " << (i%256);
275  }
276  ss << " ] ";
277  return ss.str();
278  }
279 
280  Conv2DShapeTestFixture()
281  : Conv2DWithBiasesFixture("[ 1, 224, 224, 3 ]", // inputShape
282  "[ 1, 112, 112, 32 ]", // outputShape
283  "[ 32, 3, 3, 3 ]", // filterShape
284  GenerateInts(32*3*3*3), // filterData
285  "[ 32 ]", // biasShape
286  GenerateInts(32*4), // biasData
287  "2") // stride w and h
288  {}
289 };
290 
291 BOOST_FIXTURE_TEST_CASE( ParseConv2D_112x112_out, Conv2DShapeTestFixture )
292 {
293 }
294 
295 struct ReluConv2DWithBiasesFixture : Conv2DWithBiasesFixture
296 {
297  ReluConv2DWithBiasesFixture()
298  : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
299  "[ 1, 2, 2, 1 ]", // outputShape
300  "[ 1, 2, 2, 1 ]", // filterShape
301  "[ 2,1, 0,6 ]", // filterData
302  "[ 1 ]", // biasShape
303  "[ 16, 0, 0, 0 ]", // biasData
304  "1", // stride w and h
305  "RELU", // activation
306  "1.0", // filter scale
307  "4", // filter zero point
308  "2.0", // output scale
309  "20") // output zero point
310  {}
311 };
312 
313 BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture )
314 {
315  uint8_t bias = 16;
316  uint8_t outZero = 20;
317  uint8_t fz = 4; // filter zero point
318 
319  RunTest<4, armnn::DataType::QAsymmU8>(
320  0,
321  {
322  1, 2,
323  4, 8,
324  },
325  // factors to consider:
326  // - the filter zero point is non zero, hence the (x-fz)
327  // - the output scale is 2 hence the /2
328  // - output zero point is non zero, hence the +outZero
329  // - RELU cuts negative values and then we add the output zero point
330  {
331  std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
332  std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
333  std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
334  std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
335  });
336 }
337 
338 struct Relu6Conv2DWithBiasesFixture : Conv2DWithBiasesFixture
339 {
340  Relu6Conv2DWithBiasesFixture()
341  : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]", // inputShape
342  "[ 1, 2, 2, 1 ]", // outputShape
343  "[ 1, 2, 2, 1 ]", // filterShape
344  "[ 2,1, 0,6 ]", // filterData
345  "[ 1 ]", // biasShape
346  "[ 0, 0, 0, 0 ]", // biasData
347  "1", // stride w and h
348  "RELU6", // activation
349  "1.0", // filter scale
350  "0", // filter zero point
351  "2.0", // output scale
352  "0") // output zero point
353  {}
354 };
355 
356 BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixture )
357 {
358  uint8_t relu6Min = 6 / 2; // divide by output scale
359 
360  RunTest<4, armnn::DataType::QAsymmU8>(
361  0,
362  {
363  1, 2,
364  4, 1,
365  },
366  // factors to consider:
367  // - the output scale is 2 hence the /2
368  // - RELU6 cuts output values at +6
369  {
370  std::min(relu6Min, static_cast<uint8_t>((1*2 + 2*1 + 4*0 + 1*6)/2)),
371  std::min(relu6Min, static_cast<uint8_t>((2*2 + 0*1 + 1*0 + 0*6)/2)),
372  std::min(relu6Min, static_cast<uint8_t>((4*2 + 1*1 + 0*0 + 0*6)/2)),
373  std::min(relu6Min, static_cast<uint8_t>((1*2 + 0*1 + 0*0 + 0*6)/2))
374  });
375 }
376 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
BOOST_AUTO_TEST_SUITE_END()
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)
BOOST_FIXTURE_TEST_CASE(ValidConvTest, SimpleConv2DFixture)
Definition: Conv2D.cpp:581