ArmNN
 21.02
GetTensorIds.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <boost/test/unit_test.hpp>
7 #include "../TfLiteParser.hpp"
8 
11 
12 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
13 
14 struct GetTensorIdsFixture : public ParserFlatbuffersFixture
15 {
16  explicit GetTensorIdsFixture(const std::string& inputs, const std::string& outputs)
17  {
18  m_JsonString = R"(
19  {
20  "version": 3,
21  "operator_codes": [ { "builtin_code": "AVERAGE_POOL_2D" } ],
22  "subgraphs": [
23  {
24  "tensors": [
25  {
26  "shape": [ 1, 1, 1, 1 ] ,
27  "type": "UINT8",
28  "buffer": 0,
29  "name": "OutputTensor",
30  "quantization": {
31  "min": [ 0.0 ],
32  "max": [ 255.0 ],
33  "scale": [ 1.0 ],
34  "zero_point": [ 0 ]
35  }
36  },
37  {
38  "shape": [ 1, 2, 2, 1 ] ,
39  "type": "UINT8",
40  "buffer": 1,
41  "name": "InputTensor",
42  "quantization": {
43  "min": [ 0.0 ],
44  "max": [ 255.0 ],
45  "scale": [ 1.0 ],
46  "zero_point": [ 0 ]
47  }
48  }
49  ],
50  "inputs": [ 1 ],
51  "outputs": [ 0 ],
52  "operators": [ {
53  "opcode_index": 0,
54  "inputs": )"
55  + inputs
56  + R"(,
57  "outputs": )"
58  + outputs
59  + R"(,
60  "builtin_options_type": "Pool2DOptions",
61  "builtin_options":
62  {
63  "padding": "VALID",
64  "stride_w": 2,
65  "stride_h": 2,
66  "filter_width": 2,
67  "filter_height": 2,
68  "fused_activation_function": "NONE"
69  },
70  "custom_options_format": "FLEXBUFFERS"
71  } ]
72  }
73  ],
74  "description": "Test loading a model",
75  "buffers" : [ {}, {} ]
76  })";
77 
79  }
80 };
81 
82 struct GetEmptyTensorIdsFixture : GetTensorIdsFixture
83 {
84  GetEmptyTensorIdsFixture() : GetTensorIdsFixture("[ ]", "[ ]") {}
85 };
86 
87 struct GetInputOutputTensorIdsFixture : GetTensorIdsFixture
88 {
89  GetInputOutputTensorIdsFixture() : GetTensorIdsFixture("[ 0, 1, 2 ]", "[ 3 ]") {}
90 };
91 
92 BOOST_FIXTURE_TEST_CASE(GetEmptyInputTensorIds, GetEmptyTensorIdsFixture)
93 {
94  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
95  m_GraphBinary.size());
96  std::vector<int32_t> expectedIds = { };
97  std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
98  BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
99  inputTensorIds.begin(), inputTensorIds.end());
100 }
101 
102 BOOST_FIXTURE_TEST_CASE(GetEmptyOutputTensorIds, GetEmptyTensorIdsFixture)
103 {
104  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
105  m_GraphBinary.size());
106  std::vector<int32_t> expectedIds = { };
107  std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
108  BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
109  outputTensorIds.begin(), outputTensorIds.end());
110 }
111 
112 BOOST_FIXTURE_TEST_CASE(GetInputTensorIds, GetInputOutputTensorIdsFixture)
113 {
114  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
115  m_GraphBinary.size());
116  std::vector<int32_t> expectedInputIds = { 0, 1, 2 };
117  std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
118  BOOST_CHECK_EQUAL_COLLECTIONS(expectedInputIds.begin(), expectedInputIds.end(),
119  inputTensorIds.begin(), inputTensorIds.end());
120 }
121 
122 BOOST_FIXTURE_TEST_CASE(GetOutputTensorIds, GetInputOutputTensorIdsFixture)
123 {
124  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
125  m_GraphBinary.size());
126  std::vector<int32_t> expectedOutputIds = { 3 };
127  std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
128  BOOST_CHECK_EQUAL_COLLECTIONS(expectedOutputIds.begin(), expectedOutputIds.end(),
129  outputTensorIds.begin(), outputTensorIds.end());
130 }
131 
132 BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
133 {
134  BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
135 }
136 
137 BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
138 {
139  BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
140 }
141 
142 BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
143 {
144  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
145  m_GraphBinary.size());
146  BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 1, 0), armnn::ParseException);
147 }
148 
149 BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
150 {
151  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
152  m_GraphBinary.size());
153  BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
154 }
155 
156 BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
157 {
158  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
159  m_GraphBinary.size());
160  BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 0, 1), armnn::ParseException);
161 }
162 
163 BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
164 {
165  TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
166  m_GraphBinary.size());
167  BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
168 }
169 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
BOOST_FIXTURE_TEST_CASE(GetEmptyInputTensorIds, GetEmptyTensorIdsFixture)
TfLiteParserImpl::ModelPtr ModelPtr
std::unique_ptr< onnx::ModelProto > ModelPtr
BOOST_AUTO_TEST_SUITE_END()