17 #include <doctest/doctest.h> 24 bool LayerTypeMatchesTest()
26 return LayerTypeMatchesTestImpl<armnn::LayerType::FirstLayer>(Tag<armnn::LayerType::FirstLayer>());
33 TEST_CASE(
"IsLayerSupportedLayerTypeMatches")
35 LayerTypeMatchesTest();
38 TEST_CASE(
"IsLayerSupportedReferenceAddition")
48 std::string reasonNotSupported;
52 TEST_CASE(
"IsLayerSupportedBFloat16Reference")
55 IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::BFloat16>(&factory);
58 TEST_CASE(
"IsLayerSupportedFloat16Reference")
61 IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float16>(&factory);
64 TEST_CASE(
"IsLayerSupportedFloat32Reference")
67 IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float32>(&factory);
70 TEST_CASE(
"IsLayerSupportedUint8Reference")
73 IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
76 TEST_CASE(
"IsLayerSupportedInt8Reference")
79 IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
82 TEST_CASE(
"IsLayerSupportedInt16Reference")
85 IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS16>(&factory);
88 TEST_CASE(
"IsConvertFp16ToFp32SupportedReference")
90 std::string reasonIfUnsupported;
98 TEST_CASE(
"IsConvertFp16ToFp32SupportedFp32InputReference")
100 std::string reasonIfUnsupported;
106 CHECK_EQ(reasonIfUnsupported,
"Layer is not supported with float32 data type input");
109 TEST_CASE(
"IsConvertFp16ToFp32SupportedFp16OutputReference")
111 std::string reasonIfUnsupported;
117 CHECK_EQ(reasonIfUnsupported,
"Layer is not supported with float16 data type output");
120 TEST_CASE(
"IsConvertBf16ToFp32SupportedReference")
122 std::string reasonIfUnsupported;
130 TEST_CASE(
"IsConvertBf16ToFp32SupportedFp32InputReference")
132 std::string reasonIfUnsupported;
138 CHECK_EQ(reasonIfUnsupported,
"Reference for ConvertBf16ToFp32 layer: input type not supported\n");
141 TEST_CASE(
"IsConvertBf16ToFp32SupportedBf16OutputReference")
143 std::string reasonIfUnsupported;
149 CHECK_EQ(reasonIfUnsupported,
"Reference for ConvertBf16ToFp32 layer: output type not supported\n");
152 TEST_CASE(
"IsConvertFp32ToBf16SupportedReference")
154 std::string reasonIfUnsupported;
162 TEST_CASE(
"IsConvertFp32ToBf16SupportedBf16InputReference")
164 std::string reasonIfUnsupported;
170 CHECK_EQ(reasonIfUnsupported,
"Reference for ConvertFp32ToBf16 layer: input type not supported\n");
173 TEST_CASE(
"IsConvertFp32ToBf16SupportedFp32OutputReference")
175 std::string reasonIfUnsupported;
181 CHECK_EQ(reasonIfUnsupported,
"Reference for ConvertFp32ToBf16 layer: output type not supported\n");
184 TEST_CASE(
"IsConvertFp32ToFp16SupportedReference")
186 std::string reasonIfUnsupported;
194 TEST_CASE(
"IsConvertFp32ToFp16SupportedFp16InputReference")
196 std::string reasonIfUnsupported;
202 CHECK_EQ(reasonIfUnsupported,
"Layer is not supported with float16 data type input");
205 TEST_CASE(
"IsConvertFp32ToFp16SupportedFp32OutputReference")
207 std::string reasonIfUnsupported;
213 CHECK_EQ(reasonIfUnsupported,
"Layer is not supported with float32 data type output");
216 TEST_CASE(
"IsLayerSupportedMeanDimensionsReference")
218 std::string reasonIfUnsupported;
226 TEST_CASE(
"IsLayerNotSupportedMeanDimensionsReference")
228 std::string reasonIfUnsupported;
235 CHECK(reasonIfUnsupported.find(
236 "Reference Mean: Expected 4 dimensions but got 2 dimensions instead, for the 'output' tensor.")
237 != std::string::npos);
240 TEST_CASE(
"IsConstantSupportedRef")
242 std::string reasonIfUnsupported;
248 result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
252 result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
256 result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
260 result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
264 result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
268 result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
272 result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
This layer converts data type Float 16 to Float 32.
This layer converts data type BFloat16 to Float32.
TEST_SUITE("RefLayerSupported")
bool IsAdditionSupported(const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, Optional< std::string &> reasonIfUnsupported=EmptyOptional()) const override
This layer converts data type Float 32 to Float 16.
This layer converts data type Float32 to BFloat16.