ArmNN
 20.05
Quantize.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <boost/test/unit_test.hpp>
8 #include "../TfLiteParser.hpp"
9 
10 #include <string>
11 #include <iostream>
12 
13 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
14 
15  struct QuantizeFixture : public ParserFlatbuffersFixture
16  {
17  explicit QuantizeFixture(const std::string & inputShape,
18  const std::string & outputShape,
19  const std::string & dataType)
20  {
21  m_JsonString = R"(
22  {
23  "version": 3,
24  "operator_codes": [ { "builtin_code": "QUANTIZE" } ],
25  "subgraphs": [ {
26  "tensors": [
27  {
28  "shape": )" + inputShape + R"(,
29  "type": "FLOAT32",
30  "buffer": 0,
31  "name": "inputTensor",
32  "quantization": {
33  "min": [ 0.0 ],
34  "max": [ 255.0 ],
35  "scale": [ 1.0 ],
36  "zero_point": [ 0 ],
37  }
38  },
39  {
40  "shape": )" + outputShape + R"( ,
41  "type": )" + dataType + R"(,
42  "buffer": 1,
43  "name": "outputTensor",
44  "quantization": {
45  "min": [ 0.0 ],
46  "max": [ 255.0 ],
47  "scale": [ 1.5 ],
48  "zero_point": [ 0 ],
49  }
50  }
51  ],
52  "inputs": [ 0 ],
53  "outputs": [ 1 ],
54  "operators": [
55  {
56  "opcode_index": 0,
57  "inputs": [ 0 ],
58  "outputs": [ 1 ],
59  "builtin_options_type": "QuantizeOptions",
60  "builtin_options": {
61  },
62  "custom_options_format": "FLEXBUFFERS"
63  }
64  ],
65  } ],
66  "buffers" : [
67  { },
68  { },
69  ]
70  }
71  )";
72  SetupSingleInputSingleOutput("inputTensor", "outputTensor");
73  }
74  };
75 
76  struct SimpleQuantizeFixtureQAsymm8 : QuantizeFixture
77  {
78  SimpleQuantizeFixtureQAsymm8() : QuantizeFixture("[ 1, 6 ]",
79  "[ 1, 6 ]",
80  "UINT8") {}
81  };
82 
83  BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQAsymm8, SimpleQuantizeFixtureQAsymm8)
84  {
85  RunTest<2, armnn::DataType::Float32, armnn::DataType::QuantisedAsymm8>(
86  0,
87  {{"inputTensor", { 0.0f, 1.5f, 7.5f, 150.0f, 300.0f, 382.5f }}},
88  {{"outputTensor", { 0u, 1u, 5u, 100u, 200u, 255u }}});
89  }
90 
91  struct SimpleQuantizeFixtureQSymm16 : QuantizeFixture
92  {
93  SimpleQuantizeFixtureQSymm16() : QuantizeFixture("[ 1, 6 ]",
94  "[ 1, 6 ]",
95  "INT16") {}
96  };
97 
98  BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQsymm16, SimpleQuantizeFixtureQSymm16)
99  {
100  RunTest<2, armnn::DataType::Float32, armnn::DataType::QuantisedSymm16>(
101  0,
102  {{"inputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}},
103  {{"outputTensor", { 0, 1, 5, 32767, -1, -32768 }}});
104  }
105 
106  struct SimpleQuantizeFixtureQSymmS8 : QuantizeFixture
107  {
108  SimpleQuantizeFixtureQSymmS8() : QuantizeFixture("[ 1, 6 ]",
109  "[ 1, 6 ]",
110  "INT8") {}
111  };
112 
113  BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQSymmS8, SimpleQuantizeFixtureQSymmS8)
114  {
115  RunTest<2, armnn::DataType::Float32, armnn::DataType::QSymmS8>(
116  0,
117  {{"inputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}},
118  {{"outputTensor", { 0, 1, 5, 127, -128, -1 }}});
119  }
120 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQAsymm8, SimpleQuantizeFixtureQAsymm8)
Definition: Quantize.cpp:83
BOOST_AUTO_TEST_SUITE_END()
void SetupSingleInputSingleOutput(const std::string &inputName, const std::string &outputName)