ArmNN
 21.02
RefLayerSupportTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include <test/TensorHelpers.hpp>
9 
15 
16 #include <boost/test/unit_test.hpp>
17 
18 #include <string>
19 
20 namespace
21 {
22 
23 bool LayerTypeMatchesTest()
24 {
25  return LayerTypeMatchesTestImpl<armnn::LayerType::FirstLayer>(Tag<armnn::LayerType::FirstLayer>());
26 };
27 
28 } // anonymous namespace
29 
30 BOOST_AUTO_TEST_SUITE(RefLayerSupported)
31 
32 BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches)
33 {
34  LayerTypeMatchesTest();
35 }
36 BOOST_AUTO_TEST_CASE(IsLayerSupportedReferenceAddition)
37 {
38  armnn::TensorShape shape0 = {1,1,3,4};
39  armnn::TensorShape shape1 = {4};
40  armnn::TensorShape outShape = {1,1,3,4};
44 
45  armnn::RefLayerSupport supportChecker;
46  std::string reasonNotSupported;
47  BOOST_CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported));
48 }
49 
50 BOOST_AUTO_TEST_CASE(IsLayerSupportedBFloat16Reference)
51 {
53  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::BFloat16>(&factory);
54 }
55 
56 BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Reference)
57 {
59  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float16>(&factory);
60 }
61 
62 BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference)
63 {
65  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float32>(&factory);
66 }
67 
68 BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference)
69 {
71  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
72 }
73 
74 BOOST_AUTO_TEST_CASE(IsLayerSupportedInt8Reference)
75 {
77  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
78 }
79 
80 BOOST_AUTO_TEST_CASE(IsLayerSupportedInt16Reference)
81 {
83  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS16>(&factory);
84 }
85 
86 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference)
87 {
88  std::string reasonIfUnsupported;
89 
90  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
92 
93  BOOST_CHECK(result);
94 }
95 
96 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputReference)
97 {
98  std::string reasonIfUnsupported;
99 
100  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
101  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
102 
103  BOOST_CHECK(!result);
104  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type input");
105 }
106 
107 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputReference)
108 {
109  std::string reasonIfUnsupported;
110 
111  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
112  armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
113 
114  BOOST_CHECK(!result);
115  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type output");
116 }
117 
118 BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedReference)
119 {
120  std::string reasonIfUnsupported;
121 
122  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
124 
125  BOOST_CHECK(result);
126 }
127 
128 BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedFp32InputReference)
129 {
130  std::string reasonIfUnsupported;
131 
132  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
133  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
134 
135  BOOST_CHECK(!result);
136  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n");
137 }
138 
139 BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedBf16OutputReference)
140 {
141  std::string reasonIfUnsupported;
142 
143  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
144  armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported);
145 
146  BOOST_CHECK(!result);
147  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n");
148 }
149 
150 BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedReference)
151 {
152  std::string reasonIfUnsupported;
153 
154  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
156 
157  BOOST_CHECK(result);
158 }
159 
160 BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedBf16InputReference)
161 {
162  std::string reasonIfUnsupported;
163 
164  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
165  armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported);
166 
167  BOOST_CHECK(!result);
168  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n");
169 }
170 
171 BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedFp32OutputReference)
172 {
173  std::string reasonIfUnsupported;
174 
175  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
176  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
177 
178  BOOST_CHECK(!result);
179  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n");
180 }
181 
182 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedReference)
183 {
184  std::string reasonIfUnsupported;
185 
186  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
187  armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
188 
189  BOOST_CHECK(result);
190 }
191 
192 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputReference)
193 {
194  std::string reasonIfUnsupported;
195 
196  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
197  armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
198 
199  BOOST_CHECK(!result);
200  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type input");
201 }
202 
203 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputReference)
204 {
205  std::string reasonIfUnsupported;
206 
207  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
208  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
209 
210  BOOST_CHECK(!result);
211  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type output");
212 }
213 
214 BOOST_AUTO_TEST_CASE(IsLayerSupportedMeanDimensionsReference)
215 {
216  std::string reasonIfUnsupported;
217 
218  bool result = IsMeanLayerSupportedTests<armnn::RefWorkloadFactory,
219  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
220 
221  BOOST_CHECK(result);
222 }
223 
224 BOOST_AUTO_TEST_CASE(IsLayerNotSupportedMeanDimensionsReference)
225 {
226  std::string reasonIfUnsupported;
227 
228  bool result = IsMeanLayerNotSupportedTests<armnn::RefWorkloadFactory,
229  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
230 
231  BOOST_CHECK(!result);
232 
233  BOOST_CHECK(reasonIfUnsupported.find(
234  "Reference Mean: Expected 4 dimensions but got 2 dimensions instead, for the 'output' tensor.")
235  != std::string::npos);
236 }
237 
238 BOOST_AUTO_TEST_CASE(IsConstantSupportedRef)
239 {
240  std::string reasonIfUnsupported;
241 
242  bool result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
243  armnn::DataType::Float16>(reasonIfUnsupported);
244  BOOST_CHECK(result);
245 
246  result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
247  armnn::DataType::Float32>(reasonIfUnsupported);
248  BOOST_CHECK(result);
249 
250  result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
251  armnn::DataType::QAsymmU8>(reasonIfUnsupported);
252  BOOST_CHECK(result);
253 
254  result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
255  armnn::DataType::Boolean>(reasonIfUnsupported);
256  BOOST_CHECK(!result);
257 
258  result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
259  armnn::DataType::QSymmS16>(reasonIfUnsupported);
260  BOOST_CHECK(result);
261 
262  result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
263  armnn::DataType::QSymmS8>(reasonIfUnsupported);
264  BOOST_CHECK(result);
265 
266  result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
267  armnn::DataType::QAsymmS8>(reasonIfUnsupported);
268  BOOST_CHECK(result);
269 
270  result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
271  armnn::DataType::BFloat16>(reasonIfUnsupported);
272  BOOST_CHECK(result);
273 }
274 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
This layer converts data type Float 16 to Float 32.
This layer converts data type BFloat16 to Float32.
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
This layer converts data type Float 32 to Float 16.
BOOST_AUTO_TEST_SUITE_END()
This layer converts data type Float32 to BFloat16.
BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches)