diff options
Diffstat (limited to 'src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp')
-rw-r--r-- | src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp | 77 |
1 files changed, 39 insertions, 38 deletions
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp index f93fa77b0d..384b14c0cf 100644 --- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp +++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp @@ -7,12 +7,13 @@ #include <Optimizer.hpp> -#include <boost/test/unit_test.hpp> +#include <doctest/doctest.h> -BOOST_AUTO_TEST_SUITE(Optimizer) +TEST_SUITE("Optimizer") +{ using namespace armnn::optimizations; -BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest) +TEST_CASE("Fp32NetworkToBf16OptimizationNoConversionTest") { armnn::Graph graph; @@ -31,18 +32,18 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest) input->GetOutputSlot().Connect(floor->GetInputSlot(0)); floor->GetOutputSlot().Connect(output->GetInputSlot(0)); - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, + CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>)); // Run the optimizer armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter())); - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, + CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>)); } -BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest) +TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest") { armnn::Graph graph; @@ -82,37 +83,37 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest) input->GetOutputSlot().Connect(conv->GetInputSlot(0)); conv->GetOutputSlot().Connect(output->GetInputSlot(0)); - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, + CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>)); // Run the optimizer armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter())); - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, + CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>)); armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo(); - BOOST_TEST((conv->GetDataType() == armnn::DataType::BFloat16)); - BOOST_TEST((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16)); - BOOST_TEST((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32)); - BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16)); - BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32)); + CHECK((conv->GetDataType() == armnn::DataType::BFloat16)); + CHECK((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16)); + CHECK((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32)); + CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16)); + CHECK((outputTensor.GetDataType() == armnn::DataType::Float32)); // Check whether data matches expected Bf16 data const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>(); - BOOST_CHECK(data[0] == armnn::BFloat16(0.0f)); - BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f)); - BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073 - BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B - BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B - BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073 - BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B - BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B + CHECK(data[0] == armnn::BFloat16(0.0f)); + CHECK(data[1] == armnn::BFloat16(-1.0f)); + CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073 + CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B + CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B + CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073 + CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B + CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B } -BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest) +TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest") { armnn::Graph graph; @@ -152,35 +153,35 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest) input->GetOutputSlot().Connect(fc->GetInputSlot(0)); fc->GetOutputSlot().Connect(output->GetInputSlot(0)); - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, + CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>)); // Run the optimizer armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter())); - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, + CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>)); armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo(); - BOOST_TEST((fc->GetDataType() == armnn::DataType::BFloat16)); - BOOST_TEST((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16)); - BOOST_TEST((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32)); - BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16)); - BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32)); + CHECK((fc->GetDataType() == armnn::DataType::BFloat16)); + CHECK((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16)); + CHECK((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32)); + CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16)); + CHECK((outputTensor.GetDataType() == armnn::DataType::Float32)); // Check whether data matches expected Bf16 data const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>(); - BOOST_CHECK(data[0] == armnn::BFloat16(0.0f)); - BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f)); - BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073 - BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B - BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B - BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073 - BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B - BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B + CHECK(data[0] == armnn::BFloat16(0.0f)); + CHECK(data[1] == armnn::BFloat16(-1.0f)); + CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073 + CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B + CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B + CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073 + CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B + CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B } -BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file +}
\ No newline at end of file |