ArmNN
 20.02
RefLayerSupportTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include <test/TensorHelpers.hpp>
9 
15 
16 #include <boost/test/unit_test.hpp>
17 #include <boost/algorithm/string/trim.hpp>
18 
19 #include <string>
20 
21 namespace
22 {
23 
24 bool LayerTypeMatchesTest()
25 {
26  return LayerTypeMatchesTestImpl<armnn::LayerType::FirstLayer>(Tag<armnn::LayerType::FirstLayer>());
27 };
28 
29 } // anonymous namespace
30 
31 BOOST_AUTO_TEST_SUITE(RefLayerSupported)
32 
33 BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches)
34 {
35  LayerTypeMatchesTest();
36 }
37 BOOST_AUTO_TEST_CASE(IsLayerSupportedReferenceAddition)
38 {
39  armnn::TensorShape shape0 = {1,1,3,4};
40  armnn::TensorShape shape1 = {4};
41  armnn::TensorShape outShape = {1,1,3,4};
45 
46  armnn::RefLayerSupport supportChecker;
47  std::string reasonNotSupported;
48  BOOST_CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported));
49 }
50 
51 BOOST_AUTO_TEST_CASE(IsLayerSupportedBFloat16Reference)
52 {
54  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::BFloat16>(&factory);
55 }
56 
57 BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Reference)
58 {
60  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float16>(&factory);
61 }
62 
63 BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference)
64 {
66  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float32>(&factory);
67 }
68 
69 BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference)
70 {
72  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
73 }
74 
75 BOOST_AUTO_TEST_CASE(IsLayerSupportedInt8Reference)
76 {
78  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
79 }
80 
81 BOOST_AUTO_TEST_CASE(IsLayerSupportedInt16Reference)
82 {
84  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS16>(&factory);
85 }
86 
87 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference)
88 {
89  std::string reasonIfUnsupported;
90 
91  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
93 
94  BOOST_CHECK(result);
95 }
96 
97 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputReference)
98 {
99  std::string reasonIfUnsupported;
100 
101  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
102  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
103 
104  BOOST_CHECK(!result);
105  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type input");
106 }
107 
108 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputReference)
109 {
110  std::string reasonIfUnsupported;
111 
112  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
113  armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
114 
115  BOOST_CHECK(!result);
116  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type output");
117 }
118 
119 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedReference)
120 {
121  std::string reasonIfUnsupported;
122 
123  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
124  armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
125 
126  BOOST_CHECK(result);
127 }
128 
129 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputReference)
130 {
131  std::string reasonIfUnsupported;
132 
133  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
134  armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
135 
136  BOOST_CHECK(!result);
137  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type input");
138 }
139 
140 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputReference)
141 {
142  std::string reasonIfUnsupported;
143 
144  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
145  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
146 
147  BOOST_CHECK(!result);
148  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type output");
149 }
150 
151 BOOST_AUTO_TEST_CASE(IsLayerSupportedMeanDimensionsReference)
152 {
153  std::string reasonIfUnsupported;
154 
155  bool result = IsMeanLayerSupportedTests<armnn::RefWorkloadFactory,
156  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
157 
158  BOOST_CHECK(result);
159 }
160 
161 BOOST_AUTO_TEST_CASE(IsLayerNotSupportedMeanDimensionsReference)
162 {
163  std::string reasonIfUnsupported;
164 
165  bool result = IsMeanLayerNotSupportedTests<armnn::RefWorkloadFactory,
166  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
167 
168  BOOST_CHECK(!result);
169 
170  boost::algorithm::trim(reasonIfUnsupported);
171  BOOST_CHECK_EQUAL(reasonIfUnsupported,
172  "Reference Mean: Expected 4 dimensions but got 2 dimensions instead, for the 'output' tensor.");
173 }
174 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
This layer converts data type Float 16 to Float 32.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
This layer converts data type Float 32 to Float 16.
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches)