From 57ef0088d20dd708ff92222d244ea02f1e1e5216 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Thu, 26 Mar 2020 09:20:43 +0000 Subject: IVGCVSW-4597 Modify BF16 optimizer to Convert only inputs and weights of Convolution2d and FullyConnected layers * Add InsertConvertFp32ToBf16LayersBefore * Add ConvertWeight to ConvertFp32NetworkToBf16Impl for Conv2d and FullyConnected * Allow different input and output when input is BF16 and output is FP32 Conv2d and FullyConnected layers * Unit tests Signed-off-by: Narumol Prangnawarat Change-Id: Ic8f92ff28edcae08a72a3114a28f50c4619f919b --- .../Fp32NetworkToBf16ConverterTests.cpp | 148 ++++++++++++++++++++- 1 file changed, 144 insertions(+), 4 deletions(-) (limited to 'src/armnn/test') diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp index 90a15487ac..b35f983434 100644 --- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp +++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp @@ -12,13 +12,13 @@ BOOST_AUTO_TEST_SUITE(Optimizer) using namespace armnn::optimizations; -BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationTest) +BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest) { armnn::Graph graph; const armnn::TensorInfo infoFP32({ 2, 2, 1, 3 }, armnn::DataType::Float32); - // Create the simple test network + // Create the simple test network without Conv2D/FullyConnected. auto input = graph.AddLayer(0, "input"); input->GetOutputSlot().SetTensorInfo(infoFP32); @@ -38,8 +38,148 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationTest) armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter())); BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType, - &IsLayerOfType, &IsLayerOfType, - &IsLayerOfType, &IsLayerOfType)); + &IsLayerOfType, + &IsLayerOfType)); +} + +BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest) +{ + armnn::Graph graph; + + const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32); + + // Create const tensor fp32 data + unsigned int dims[] = { 4, 2, 1, 1 }; + std::vector floatWeights{ 0.0f, -1.0f, + 3.8f, // 0x40733333 Round down + 3.1055E+29f, // 0x707ADC3C Round up + 9.149516E-10f, // 0x307B7FFF Round down + -3.8f, // 0xC0733333 Round down + -3.1055E+29f, // 0xF07ADC3C Round up + -9.149516E-10f // 0xB07B7FFF Round down + }; + armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights); + + // Create const bias fp32 data + unsigned int biasDims[] {4}; + std::vector floatBias{ 1.0f, 2.0f, 3.0f, 4.0f }; + armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias); + + // A network with Convolution2d layer + auto input = graph.AddLayer(0, "input"); + input->GetOutputSlot().SetTensorInfo(infoFP32); + + armnn::Convolution2dDescriptor descriptor; + + auto conv = graph.AddLayer(descriptor, "conv2d"); + conv->m_Weight = std::make_unique(weights); + conv->m_Bias = std::make_unique(bias); + conv->GetOutputSlot().SetTensorInfo(infoFP32); + + auto output = graph.AddLayer(1, "output"); + + // Connect up the layers + input->GetOutputSlot().Connect(conv->GetInputSlot(0)); + conv->GetOutputSlot().Connect(output->GetInputSlot(0)); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType, + &IsLayerOfType, &IsLayerOfType)); + + // Run the optimizer + armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter())); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType, + &IsLayerOfType, &IsLayerOfType, + &IsLayerOfType)); + + armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); + armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo(); + BOOST_TEST((conv->GetDataType() == armnn::DataType::BFloat16)); + BOOST_TEST((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16)); + BOOST_TEST((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32)); + BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16)); + BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32)); + + // Check whether data matches expected Bf16 data + armnn::BFloat16* data = conv->m_Weight->GetTensor(); + BOOST_CHECK(data[0] == armnn::BFloat16(0.0f)); + BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f)); + BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073 + BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B + BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B + BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073 + BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B + BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B +} + +BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest) +{ + armnn::Graph graph; + + const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32); + + // Create const tensor fp32 data + unsigned int dims[] = { 4, 2, 1, 1 }; + std::vector floatWeights{ 0.0f, -1.0f, + 3.8f, // 0x40733333 Round down + 3.1055E+29f, // 0x707ADC3C Round up + 9.149516E-10f, // 0x307B7FFF Round down + -3.8f, // 0xC0733333 Round down + -3.1055E+29f, // 0xF07ADC3C Round up + -9.149516E-10f // 0xB07B7FFF Round down + }; + armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights); + + // Create const bias fp32 data + unsigned int biasDims[] {4}; + std::vector floatBias{ 1.0f, 2.0f, 3.0f, 4.0f }; + armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias); + + // A network with FullyConnected layer + auto input = graph.AddLayer(0, "input"); + input->GetOutputSlot().SetTensorInfo(infoFP32); + + armnn::FullyConnectedDescriptor descriptor; + + auto fc = graph.AddLayer(descriptor, "fully"); + fc->m_Weight = std::make_unique(weights); + fc->m_Bias = std::make_unique(bias); + fc->GetOutputSlot().SetTensorInfo(infoFP32); + + auto output = graph.AddLayer(1, "output"); + + // Connect up the layers + input->GetOutputSlot().Connect(fc->GetInputSlot(0)); + fc->GetOutputSlot().Connect(output->GetInputSlot(0)); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType, + &IsLayerOfType, &IsLayerOfType)); + + // Run the optimizer + armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter())); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType, + &IsLayerOfType, &IsLayerOfType, + &IsLayerOfType)); + + armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); + armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo(); + BOOST_TEST((fc->GetDataType() == armnn::DataType::BFloat16)); + BOOST_TEST((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16)); + BOOST_TEST((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32)); + BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16)); + BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32)); + + // Check whether data matches expected Bf16 data + armnn::BFloat16* data = fc->m_Weight->GetTensor(); + BOOST_CHECK(data[0] == armnn::BFloat16(0.0f)); + BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f)); + BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073 + BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B + BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B + BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073 + BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B + BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B } BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file -- cgit v1.2.1