aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2022-11-07 16:20:48 +0000
committerryan.oshea3 <ryan.oshea3@arm.com>2022-11-16 15:22:50 +0000
commit31441595009182c985dacbedc70c41ee6664d070 (patch)
tree248a85295aeff4022c9b395fc97748b0a0aa6b35 /src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
parentbd18eab07a8f30492de1e462b1815189014cb8d5 (diff)
downloadarmnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp')
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp229
1 files changed, 0 insertions, 229 deletions
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
deleted file mode 100644
index 66893ce1f5..0000000000
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ /dev/null
@@ -1,229 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <TestUtils.hpp>
-
-#include <Optimizer.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn::optimizations;
-
-TEST_CASE("Fp32NetworkToBf16OptimizationNoConversionTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo infoFP32({ 2, 2, 1, 3 }, armnn::DataType::Float32);
-
- // Create the simple test network without Conv2D/FullyConnected.
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
- floor->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(floor->GetInputSlot(0));
- floor->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-}
-
-TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
-
- // Create const tensor fp32 data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> floatWeights{ 0.0f, -1.0f,
- 3.8f, // 0x40733333 Round down
- 3.1055E+29f, // 0x707ADC3C Round up
- 9.149516E-10f, // 0x307B7FFF Round down
- -3.8f, // 0xC0733333 Round down
- -3.1055E+29f, // 0xF07ADC3C Round up
- -9.149516E-10f // 0xB07B7FFF Round down
- };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
- // Create const bias fp32 data
- unsigned int biasDims[] {4};
- std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
-
- // A network with Convolution2d layer
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(infoFP32);
-
- armnn::Convolution2dDescriptor descriptor;
- descriptor.m_BiasEnabled = true;
- auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- conv->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
-
- auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(conv->GetInputSlot(0));
- weightsLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(1));
- biasLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(2));
- conv->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
- Fp32NetworkToBf16Converter()));
-
- CHECK(7 == graph.GetNumLayers());
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo weightTensor = conv->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo biasTensor = conv->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo();
- CHECK((conv->GetDataType() == armnn::DataType::BFloat16));
- CHECK((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- CHECK((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((biasTensor.GetDataType() == armnn::DataType::Float32));
- CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
-
- // Check whether data matches expected Bf16 data
- const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>();
- CHECK(data[0] == armnn::BFloat16(0.0f));
- CHECK(data[1] == armnn::BFloat16(-1.0f));
- CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
-
- // Create const tensor fp32 data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> floatWeights{ 0.0f, -1.0f,
- 3.8f, // 0x40733333 Round down
- 3.1055E+29f, // 0x707ADC3C Round up
- 9.149516E-10f, // 0x307B7FFF Round down
- -3.8f, // 0xC0733333 Round down
- -3.1055E+29f, // 0xF07ADC3C Round up
- -9.149516E-10f // 0xB07B7FFF Round down
- };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
- // Create const bias fp32 data
- unsigned int biasDims[] {4};
- std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
-
- // A network with FullyConnected layer
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(infoFP32);
-
- armnn::FullyConnectedDescriptor descriptor;
- descriptor.m_BiasEnabled = true;
-
- auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
- fc->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
-
- auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(fc->GetInputSlot(0));
- weightsLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(1));
- biasLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(2));
- fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
- Fp32NetworkToBf16Converter()));
-
- CHECK(7 == graph.GetNumLayers());
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo weightTensor = fc->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo biasTensor = fc->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo();
- CHECK((fc->GetDataType() == armnn::DataType::BFloat16));
- CHECK((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- CHECK((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((biasTensor.GetDataType() == armnn::DataType::Float32));
- CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
-
- // Check whether data matches expected Bf16 data
- const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>();
- CHECK(data[0] == armnn::BFloat16(0.0f));
- CHECK(data[1] == armnn::BFloat16(-1.0f));
- CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-} \ No newline at end of file