ArmNN  NotReleased
RefLayerSupportTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
8 #include <test/TensorHelpers.hpp>
9 
15 
16 #include <boost/test/unit_test.hpp>
17 #include <boost/algorithm/string/trim.hpp>
18 
19 #include <string>
20 
21 namespace
22 {
23 
24 bool LayerTypeMatchesTest()
25 {
26  return LayerTypeMatchesTestImpl<armnn::LayerType::FirstLayer>(Tag<armnn::LayerType::FirstLayer>());
27 };
28 
29 } // anonymous namespace
30 
31 BOOST_AUTO_TEST_SUITE(RefLayerSupported)
32 
33 BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches)
34 {
35  LayerTypeMatchesTest();
36 }
37 BOOST_AUTO_TEST_CASE(IsLayerSupportedReferenceAddition)
38 {
39  armnn::TensorShape shape0 = {1,1,3,4};
40  armnn::TensorShape shape1 = {4};
41  armnn::TensorShape outShape = {1,1,3,4};
45 
46  armnn::RefLayerSupport supportChecker;
47  std::string reasonNotSupported;
48  BOOST_CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported));
49 }
50 
51 BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Reference)
52 {
54  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float16>(&factory);
55 }
56 
57 BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference)
58 {
60  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float32>(&factory);
61 }
62 
63 BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference)
64 {
66  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
67 }
68 
69 BOOST_AUTO_TEST_CASE(IsLayerSupportedInt8Reference)
70 {
72  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
73 }
74 
75 BOOST_AUTO_TEST_CASE(IsLayerSupportedInt16Reference)
76 {
78  IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS16>(&factory);
79 }
80 
81 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference)
82 {
83  std::string reasonIfUnsupported;
84 
85  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
87 
88  BOOST_CHECK(result);
89 }
90 
91 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputReference)
92 {
93  std::string reasonIfUnsupported;
94 
95  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
96  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
97 
98  BOOST_CHECK(!result);
99  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type input");
100 }
101 
102 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputReference)
103 {
104  std::string reasonIfUnsupported;
105 
106  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
107  armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
108 
109  BOOST_CHECK(!result);
110  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type output");
111 }
112 
113 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedReference)
114 {
115  std::string reasonIfUnsupported;
116 
117  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
118  armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
119 
120  BOOST_CHECK(result);
121 }
122 
123 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputReference)
124 {
125  std::string reasonIfUnsupported;
126 
127  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
128  armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
129 
130  BOOST_CHECK(!result);
131  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type input");
132 }
133 
134 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputReference)
135 {
136  std::string reasonIfUnsupported;
137 
138  bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
139  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
140 
141  BOOST_CHECK(!result);
142  BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type output");
143 }
144 
145 BOOST_AUTO_TEST_CASE(IsLayerSupportedMeanDimensionsReference)
146 {
147  std::string reasonIfUnsupported;
148 
149  bool result = IsMeanLayerSupportedTests<armnn::RefWorkloadFactory,
150  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
151 
152  BOOST_CHECK(result);
153 }
154 
155 BOOST_AUTO_TEST_CASE(IsLayerNotSupportedMeanDimensionsReference)
156 {
157  std::string reasonIfUnsupported;
158 
159  bool result = IsMeanLayerNotSupportedTests<armnn::RefWorkloadFactory,
160  armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
161 
162  BOOST_CHECK(!result);
163 
164  boost::algorithm::trim(reasonIfUnsupported);
165  BOOST_CHECK_EQUAL(reasonIfUnsupported,
166  "Reference Mean: Expected 4 dimensions but got 2 dimensions instead, for the 'output' tensor.");
167 }
168 
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
This layer converts data type Float 32 to Float 16.
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches)
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
This layer converts data type Float 16 to Float 32.