ArmNN
 21.02
GetInputsOutputs.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <boost/test/unit_test.hpp>
7 #include "../TfLiteParser.hpp"
8 
11 
12 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
13 
14 struct GetInputsOutputsMainFixture : public ParserFlatbuffersFixture
15 {
16  explicit GetInputsOutputsMainFixture(const std::string& inputs, const std::string& outputs)
17  {
18  m_JsonString = R"(
19  {
20  "version": 3,
21  "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" }, { "builtin_code": "CONV_2D" } ],
22  "subgraphs": [
23  {
24  "tensors": [
25  {
26  "shape": [ 1, 1, 1, 1 ] ,
27  "type": "UINT8",
28  "buffer": 0,
29  "name": "OutputTensor",
30  "quantization": {
31  "min": [ 0.0 ],
32  "max": [ 255.0 ],
33  "scale": [ 1.0 ],
34  "zero_point": [ 0 ]
35  }
36  },
37  {
38  "shape": [ 1, 2, 2, 1 ] ,
39  "type": "UINT8",
40  "buffer": 1,
41  "name": "InputTensor",
42  "quantization": {
43  "min": [ -1.2 ],
44  "max": [ 25.5 ],
45  "scale": [ 0.25 ],
46  "zero_point": [ 10 ]
47  }
48  }
49  ],
50  "inputs": [ 1 ],
51  "outputs": [ 0 ],
52  "operators": [ {
53  "opcode_index": 0,
54  "inputs": )"
55  + inputs
56  + R"(,
57  "outputs": )"
58  + outputs
59  + R"(,
60  "builtin_options_type": "Pool2DOptions",
61  "builtin_options":
62  {
63  "padding": "VALID",
64  "stride_w": 2,
65  "stride_h": 2,
66  "filter_width": 2,
67  "filter_height": 2,
68  "fused_activation_function": "NONE"
69  },
70  "custom_options_format": "FLEXBUFFERS"
71  } ]
72  },
73  {
74  "tensors": [
75  {
76  "shape": [ 1, 3, 3, 1 ],
77  "type": "UINT8",
78  "buffer": 0,
79  "name": "ConvInputTensor",
80  "quantization": {
81  "scale": [ 1.0 ],
82  "zero_point": [ 0 ],
83  }
84  },
85  {
86  "shape": [ 1, 1, 1, 1 ],
87  "type": "UINT8",
88  "buffer": 1,
89  "name": "ConvOutputTensor",
90  "quantization": {
91  "min": [ 0.0 ],
92  "max": [ 511.0 ],
93  "scale": [ 2.0 ],
94  "zero_point": [ 0 ],
95  }
96  },
97  {
98  "shape": [ 1, 3, 3, 1 ],
99  "type": "UINT8",
100  "buffer": 2,
101  "name": "filterTensor",
102  "quantization": {
103  "min": [ 0.0 ],
104  "max": [ 255.0 ],
105  "scale": [ 1.0 ],
106  "zero_point": [ 0 ],
107  }
108  }
109  ],
110  "inputs": [ 0 ],
111  "outputs": [ 1 ],
112  "operators": [
113  {
114  "opcode_index": 0,
115  "inputs": [ 0, 2 ],
116  "outputs": [ 1 ],
117  "builtin_options_type": "Conv2DOptions",
118  "builtin_options": {
119  "padding": "VALID",
120  "stride_w": 1,
121  "stride_h": 1,
122  "fused_activation_function": "NONE"
123  },
124  "custom_options_format": "FLEXBUFFERS"
125  }
126  ],
127  }
128  ],
129  "description": "Test Subgraph Inputs Outputs",
130  "buffers" : [
131  { },
132  { },
133  { "data": [ 2,1,0, 6,2,1, 4,1,2 ], },
134  { },
135  ]
136  })";
137 
139  }
140 
141 };
142 
143 struct GetEmptyInputsOutputsFixture : GetInputsOutputsMainFixture
144 {
145  GetEmptyInputsOutputsFixture() : GetInputsOutputsMainFixture("[ ]", "[ ]") {}
146 };
147 
148 struct GetInputsOutputsFixture : GetInputsOutputsMainFixture
149 {
150  GetInputsOutputsFixture() : GetInputsOutputsMainFixture("[ 1 ]", "[ 0 ]") {}
151 };
152 
153 BOOST_FIXTURE_TEST_CASE(GetEmptyInputs, GetEmptyInputsOutputsFixture)
154 {
155  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
156  m_GraphBinary.size());
157  TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
158  BOOST_CHECK_EQUAL(0, tensors.size());
159 }
160 
161 BOOST_FIXTURE_TEST_CASE(GetEmptyOutputs, GetEmptyInputsOutputsFixture)
162 {
163  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
164  m_GraphBinary.size());
165  TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
166  BOOST_CHECK_EQUAL(0, tensors.size());
167 }
168 
169 BOOST_FIXTURE_TEST_CASE(GetInputs, GetInputsOutputsFixture)
170 {
171  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
172  m_GraphBinary.size());
173  TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
174  BOOST_CHECK_EQUAL(1, tensors.size());
175  CheckTensors(tensors[0], 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
176  "InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
177 }
178 
179 BOOST_FIXTURE_TEST_CASE(GetOutputs, GetInputsOutputsFixture)
180 {
181  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
182  m_GraphBinary.size());
183  TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
184  BOOST_CHECK_EQUAL(1, tensors.size());
185  CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
186  "OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
187 }
188 
189 BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsOutputsFixture)
190 {
191  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
192  m_GraphBinary.size());
193  TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 1, 0);
194  BOOST_CHECK_EQUAL(2, tensors.size());
195  CheckTensors(tensors[0], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
196  "ConvInputTensor", { }, { }, { 1.0f }, { 0 });
197  CheckTensors(tensors[1], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 2,
198  "filterTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
199 }
200 
201 BOOST_FIXTURE_TEST_CASE(GetOutputs2, GetInputsOutputsFixture)
202 {
203  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
204  m_GraphBinary.size());
205  TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 1, 0);
206  BOOST_CHECK_EQUAL(1, tensors.size());
207  CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
208  "ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
209 }
210 
211 BOOST_AUTO_TEST_CASE(GetInputsNullModel)
212 {
213  BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(nullptr, 0, 0), armnn::ParseException);
214 }
215 
216 BOOST_AUTO_TEST_CASE(GetOutputsNullModel)
217 {
218  BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(nullptr, 0, 0), armnn::ParseException);
219 }
220 
221 BOOST_FIXTURE_TEST_CASE(GetInputsInvalidSubgraph, GetInputsOutputsFixture)
222 {
223  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
224  m_GraphBinary.size());
225  BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 2, 0), armnn::ParseException);
226 }
227 
228 BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidSubgraph, GetInputsOutputsFixture)
229 {
230  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
231  m_GraphBinary.size());
232  BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 2, 0), armnn::ParseException);
233 }
234 
235 BOOST_FIXTURE_TEST_CASE(GetInputsInvalidOperator, GetInputsOutputsFixture)
236 {
237  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
238  m_GraphBinary.size());
239  BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 0, 1), armnn::ParseException);
240 }
241 
242 BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidOperator, GetInputsOutputsFixture)
243 {
244  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
245  m_GraphBinary.size());
246  BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 0, 1), armnn::ParseException);
247 }
248 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
BOOST_AUTO_TEST_CASE(GetInputsNullModel)
BOOST_FIXTURE_TEST_CASE(GetInput, GetInputsOutputsMainFixture)
std::unique_ptr< onnx::ModelProto > ModelPtr
std::vector< TensorRawPtr > TensorRawPtrVector
BOOST_AUTO_TEST_SUITE_END()