aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2022-11-07 16:20:48 +0000
committerryan.oshea3 <ryan.oshea3@arm.com>2022-11-16 15:22:50 +0000
commit31441595009182c985dacbedc70c41ee6664d070 (patch)
tree248a85295aeff4022c9b395fc97748b0a0aa6b35 /src/armnn/test
parentbd18eab07a8f30492de1e462b1815189014cb8d5 (diff)
downloadarmnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/FloatingPointConverterTest.cpp70
-rw-r--r--src/armnn/test/ShapeInferenceTests.cpp11
-rw-r--r--src/armnn/test/UtilsTests.cpp48
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp128
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp229
-rw-r--r--src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp151
6 files changed, 0 insertions, 637 deletions
diff --git a/src/armnn/test/FloatingPointConverterTest.cpp b/src/armnn/test/FloatingPointConverterTest.cpp
index 21a16a3cc0..81384cefae 100644
--- a/src/armnn/test/FloatingPointConverterTest.cpp
+++ b/src/armnn/test/FloatingPointConverterTest.cpp
@@ -5,7 +5,6 @@
#include <armnnUtils/FloatingPointConverter.hpp>
-#include <BFloat16.hpp>
#include <Half.hpp>
#include <vector>
@@ -55,73 +54,4 @@ TEST_CASE("TestConvertFp16ToFp32")
}
}
-TEST_CASE("TestConvertFloat32ToBFloat16")
-{
- float floatArray[] = { 1.704735E38f, // 0x7F004000 round down
- 0.0f, // 0x00000000 round down
- 2.2959E-41f, // 0x00004000 round down
- 1.7180272E38f, // 0x7F014000 round down
- 9.18355E-41f, // 0x00010000 round down
- 1.14794E-40f, // 0x00014000 round down
- 4.5918E-41f, // 0x00008000 round down
- -1.708058E38f, // 0xFF008000 round down
- -4.3033756E37f, // 0xFE018000 round up
- 1.60712E-40f, // 0x0001C000 round up
- -2.0234377f, // 0xC0018001 round up
- -1.1800863E-38f,// 0x80808001 round up
- 4.843037E-35f, // 0x0680C000 round up
- 3.9999998f, // 0x407FFFFF round up
- std::numeric_limits<float>::max(), // 0x7F7FFFFF max positive value
- std::numeric_limits<float>::lowest(), // 0xFF7FFFFF max negative value
- 1.1754942E-38f, // 0x007FFFFF min positive value
- -1.1754942E-38f // 0x807FFFFF min negative value
- };
- uint16_t expectedResult[] = { 0x7F00,
- 0x0000,
- 0x0000,
- 0x7F01,
- 0x0001,
- 0x0001,
- 0x0000,
- 0xFF00,
- 0xFE02,
- 0x0002,
- 0xC002,
- 0x8081,
- 0x0681,
- 0x4080,
- 0x7F80,
- 0xFF80,
- 0x0080,
- 0x8080
- };
- size_t numFloats = sizeof(floatArray) / sizeof(floatArray[0]);
-
- std::vector<armnn::BFloat16> convertedBuffer(numFloats);
-
- armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(floatArray, numFloats, convertedBuffer.data());
-
- for (size_t i = 0; i < numFloats; i++)
- {
- armnn::BFloat16 actual = convertedBuffer[i];
- CHECK_EQ(expectedResult[i], actual.Val());
- }
-}
-
-TEST_CASE("TestConvertBFloat16ToFloat32")
-{
- uint16_t bf16Array[] = { 16256, 16320, 38699, 16384, 49156, 32639 };
- size_t numFloats = sizeof(bf16Array) / sizeof(bf16Array[0]);
- float expectedResult[] = { 1.0f, 1.5f, -5.525308E-25f, 2.0f, -2.0625f, 3.3895314E38f };
- std::vector<float> convertedBuffer(numFloats, 0.0f);
-
- armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(bf16Array, numFloats, convertedBuffer.data());
-
- for (size_t i = 0; i < numFloats; i++)
- {
- float actual = convertedBuffer[i];
- CHECK_EQ(expectedResult[i], actual);
- }
-}
-
}
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index a3800ade09..1035a3b6fd 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -250,17 +250,6 @@ TEST_CASE("ConstantTest")
CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
}
-TEST_CASE("ConvertBf16ToFp32Test")
-{
- CreateGraphAndRunTest<ConvertBf16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
-}
-
-TEST_CASE("ConvertFp16ToBf16Test")
-{
- const TensorShape tensorShape{5, 7, 6, 2};
- CreateGraphAndRunTest<ConvertFp32ToBf16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
-}
-
TEST_CASE("ConvertFp16ToFp32Test")
{
CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp
index 63884374b3..067c8612fe 100644
--- a/src/armnn/test/UtilsTests.cpp
+++ b/src/armnn/test/UtilsTests.cpp
@@ -123,54 +123,6 @@ TEST_CASE("BFloatType")
CHECK((GetDataTypeName(armnn::DataType::BFloat16) == std::string("BFloat16")));
}
-TEST_CASE("Float32ToBFloat16Test")
-{
- // LSB = 0, R = 0 -> round down
- armnn::BFloat16 roundDown0 = armnn::BFloat16::Float32ToBFloat16(1.704735E38f); // 0x7F004000
- CHECK_EQ(roundDown0.Val(), 0x7F00);
- // LSB = 1, R = 0 -> round down
- armnn::BFloat16 roundDown1 = armnn::BFloat16::Float32ToBFloat16(9.18355E-41f); // 0x00010000
- CHECK_EQ(roundDown1.Val(), 0x0001);
- // LSB = 0, R = 1 all 0 -> round down
- armnn::BFloat16 roundDown2 = armnn::BFloat16::Float32ToBFloat16(1.14794E-40f); // 0x00014000
- CHECK_EQ(roundDown2.Val(), 0x0001);
- // LSB = 1, R = 1 -> round up
- armnn::BFloat16 roundUp = armnn::BFloat16::Float32ToBFloat16(-2.0234377f); // 0xC0018001
- CHECK_EQ(roundUp.Val(), 0xC002);
- // LSB = 0, R = 1 -> round up
- armnn::BFloat16 roundUp1 = armnn::BFloat16::Float32ToBFloat16(4.843037E-35f); // 0x0680C000
- CHECK_EQ(roundUp1.Val(), 0x0681);
- // Max positive value -> infinity
- armnn::BFloat16 maxPositive = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::max()); // 0x7F7FFFFF
- CHECK_EQ(maxPositive, armnn::BFloat16::Inf());
- // Max negative value -> -infinity
- armnn::BFloat16 maxNeg = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::lowest()); // 0xFF7FFFFF
- CHECK_EQ(maxNeg.Val(), 0xFF80);
- // Min positive value
- armnn::BFloat16 minPositive = armnn::BFloat16::Float32ToBFloat16(1.1754942E-38f); // 0x007FFFFF
- CHECK_EQ(minPositive.Val(), 0x0080);
- // Min negative value
- armnn::BFloat16 minNeg = armnn::BFloat16::Float32ToBFloat16(-1.1754942E-38f); // 0x807FFFFF
- CHECK_EQ(minNeg.Val(), 0x8080);
-}
-
-TEST_CASE("BFloat16ToFloat32Test")
-{
- armnn::BFloat16 bf0(1.5f);
- CHECK_EQ(bf0.ToFloat32(), 1.5f);
- armnn::BFloat16 bf1(-5.525308E-25f);
- CHECK_EQ(bf1.ToFloat32(), -5.525308E-25f);
- armnn::BFloat16 bf2(-2.0625f);
- CHECK_EQ(bf2.ToFloat32(), -2.0625f);
- uint16_t v = 32639;
- armnn::BFloat16 bf3(v);
- CHECK_EQ(bf3.ToFloat32(), 3.3895314E38f);
- // Infinity
- CHECK_EQ(armnn::BFloat16::Inf().ToFloat32(), std::numeric_limits<float>::infinity());
- // NaN
- CHECK(std::isnan(armnn::BFloat16::Nan().ToFloat32()));
-}
-
TEST_CASE("GraphTopologicalSortSimpleTest")
{
std::map<int, std::vector<int>> graph;
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
deleted file mode 100644
index 4aacf7f4fe..0000000000
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ /dev/null
@@ -1,128 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <TestUtils.hpp>
-
-#include <BFloat16.hpp>
-#include <Optimizer.hpp>
-
-#include <doctest/doctest.h>
-
-using namespace armnn;
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn::optimizations;
-
-TEST_CASE("ConvertConstantsFloatToBFloatTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::BFloat16);
-
- // Create const tensor from fp32 data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> floatWeights{ 0.0f, -1.0f,
- 3.8f, // 0x40733333 Round down
- 3.1055E+29f, // 0x707ADC3C Round up
- 9.149516E-10f, // 0x307B7FFF Round down
- -3.8f, // 0xC0733333 Round down
- -3.1055E+29f, // 0xF07ADC3C Round up
- -9.149516E-10f // 0xB07B7FFF Round down
- };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
- // Create simple test network
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(info);
-
- auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
- fc->GetOutputSlot().SetTensorInfo(info);
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(fc->GetInputSlot(0));
- fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- // Check tensor data type before conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToBFloat()));
-
- // Check tensor data type after conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
-
- // Check whether data matches expected Bf16 data
- const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>();
- CHECK(data[0] == BFloat16(0.0f));
- CHECK(data[1] == BFloat16(-1.0f));
- CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
- CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
- CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
- CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
- CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
- CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-TEST_CASE("ConvertConstantsBFloatToFloatTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32);
-
- // Create the BFloat16 precision input data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> convWeightsData{ 0.f, -1.f,
- 3.796875f, // 0x4073
- 3.1072295E29f, // 0x707B
- 9.131327E-10f, // 0x307B
- -3.796875f, // 0xC073
- -3.1072295E29f, // 0xF07B
- -9.131327E-10f // 0xB07B
- };
- std::vector<uint16_t> bfWeights(8);
- armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(convWeightsData.data(), convWeightsData.size(),
- bfWeights.data());
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16, 0.0f, 0, true), bfWeights);
-
- //Create the simple test network
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(info);
-
- auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
- fc->GetOutputSlot().SetTensorInfo(info);
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- //Connect up the layers
- input->GetOutputSlot().Connect(fc->GetInputSlot(0));
- fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- //Test the tensor info is correct.
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsBFloatToFloat()));
-
- //Test the tensor info is correct.
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
-
- // Now test the data matches float32 data
- const float* data = fc->m_Weight->GetConstTensor<float>();
- CHECK(data[0] == 0.0f);
- CHECK(data[1] == -1.0f);
- CHECK(data[2] == 3.796875f);
- CHECK(data[3] == 3.1072295E29f);
- CHECK(data[4] == 9.131327E-10f);
- CHECK(data[5] == -3.796875f);
- CHECK(data[6] == -3.1072295E29f);
- CHECK(data[7] == -9.131327E-10f);
-}
-
-} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
deleted file mode 100644
index 66893ce1f5..0000000000
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ /dev/null
@@ -1,229 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <TestUtils.hpp>
-
-#include <Optimizer.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn::optimizations;
-
-TEST_CASE("Fp32NetworkToBf16OptimizationNoConversionTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo infoFP32({ 2, 2, 1, 3 }, armnn::DataType::Float32);
-
- // Create the simple test network without Conv2D/FullyConnected.
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
- floor->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(floor->GetInputSlot(0));
- floor->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-}
-
-TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
-
- // Create const tensor fp32 data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> floatWeights{ 0.0f, -1.0f,
- 3.8f, // 0x40733333 Round down
- 3.1055E+29f, // 0x707ADC3C Round up
- 9.149516E-10f, // 0x307B7FFF Round down
- -3.8f, // 0xC0733333 Round down
- -3.1055E+29f, // 0xF07ADC3C Round up
- -9.149516E-10f // 0xB07B7FFF Round down
- };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
- // Create const bias fp32 data
- unsigned int biasDims[] {4};
- std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
-
- // A network with Convolution2d layer
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(infoFP32);
-
- armnn::Convolution2dDescriptor descriptor;
- descriptor.m_BiasEnabled = true;
- auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- conv->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
-
- auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(conv->GetInputSlot(0));
- weightsLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(1));
- biasLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(2));
- conv->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
- Fp32NetworkToBf16Converter()));
-
- CHECK(7 == graph.GetNumLayers());
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo weightTensor = conv->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo biasTensor = conv->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo();
- CHECK((conv->GetDataType() == armnn::DataType::BFloat16));
- CHECK((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- CHECK((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((biasTensor.GetDataType() == armnn::DataType::Float32));
- CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
-
- // Check whether data matches expected Bf16 data
- const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>();
- CHECK(data[0] == armnn::BFloat16(0.0f));
- CHECK(data[1] == armnn::BFloat16(-1.0f));
- CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
-
- // Create const tensor fp32 data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> floatWeights{ 0.0f, -1.0f,
- 3.8f, // 0x40733333 Round down
- 3.1055E+29f, // 0x707ADC3C Round up
- 9.149516E-10f, // 0x307B7FFF Round down
- -3.8f, // 0xC0733333 Round down
- -3.1055E+29f, // 0xF07ADC3C Round up
- -9.149516E-10f // 0xB07B7FFF Round down
- };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
- // Create const bias fp32 data
- unsigned int biasDims[] {4};
- std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
-
- // A network with FullyConnected layer
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(infoFP32);
-
- armnn::FullyConnectedDescriptor descriptor;
- descriptor.m_BiasEnabled = true;
-
- auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
- fc->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
-
- auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(fc->GetInputSlot(0));
- weightsLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(1));
- biasLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(2));
- fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
- Fp32NetworkToBf16Converter()));
-
- CHECK(7 == graph.GetNumLayers());
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo weightTensor = fc->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo biasTensor = fc->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo();
- CHECK((fc->GetDataType() == armnn::DataType::BFloat16));
- CHECK((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- CHECK((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((biasTensor.GetDataType() == armnn::DataType::Float32));
- CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
-
- // Check whether data matches expected Bf16 data
- const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>();
- CHECK(data[0] == armnn::BFloat16(0.0f));
- CHECK(data[1] == armnn::BFloat16(-1.0f));
- CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp b/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
deleted file mode 100644
index 93d5948d61..0000000000
--- a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <LayersFwd.hpp>
-#include <Network.hpp>
-#include <NetworkUtils.hpp>
-#include <Optimizer.hpp>
-#include <TestUtils.hpp>
-
-#include <armnn/backends/TensorHandle.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn;
-using namespace armnn::optimizations;
-
-TEST_CASE("FuseConvertFp32Fp16intoConst")
-{
- Graph graph;
- const unsigned int shape[] = {1, 2, 2, 3};
-
- const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true);
- const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true);
-
- ConstantLayer* constantLayer = graph.AddLayer<ConstantLayer>("constant");
- std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f);
- ConstTensor constTensor(constTensorInfo, constantValues.data());
- constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
- ConvertFp32ToBf16Layer* convertLayer = graph.AddLayer<ConvertFp32ToBf16Layer>("convert");
- convertLayer->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-
- OutputLayer* output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connect up constant -> convert -> output
- constantLayer->GetOutputSlot().Connect(convertLayer->GetInputSlot(0));
- convertLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::Float32);
- };
- auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::BFloat16);
- };
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- checkConstantFloat32,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<OutputLayer>));
-
- armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers()));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- checkConstantBFloat16,
- &IsLayerOfType<OutputLayer>));
-}
-
-TEST_CASE("RevertConstantWeightsToFP32")
-{
- Graph graph;
- const unsigned int shape[] = {1, 2, 2, 3};
-
- const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true);
- const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true);
-
- TensorInfo inputInfo(4, shape, DataType::Float32);
- auto* input = graph.AddLayer<InputLayer>(0, "input0");
- input->GetOutputSlot().SetTensorInfo(inputInfo);
-
- auto* constantLayer = graph.AddLayer<ConstantLayer>("constant");
- std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f);
- ConstTensor constTensor(constTensorInfo, constantValues.data());
- constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
- ConvertFp32ToBf16Layer* convertLayerInputs = graph.AddLayer<ConvertFp32ToBf16Layer>("convert");
- convertLayerInputs->GetOutputSlot().SetTensorInfo(outputConvertInfo);
- ConvertFp32ToBf16Layer* convertLayerWeights = graph.AddLayer<ConvertFp32ToBf16Layer>("convert2");
- convertLayerWeights->GetOutputSlot().SetTensorInfo(outputConvertInfo);
- ConvertFp32ToBf16Layer* convertLayerBiases = graph.AddLayer<ConvertFp32ToBf16Layer>("convert3");
- convertLayerBiases->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-
- auto* biases = graph.AddLayer<armnn::ConstantLayer>("Biases");
- biases->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(constTensor);
- biases->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
- armnn::Convolution2dDescriptor descriptor;
- descriptor.m_BiasEnabled = true;
- auto* conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
- conv->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto* output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connect up Input -> Convert ->
- // Constant -> Convert -> Conv2d -> Output
- // Constant -> Convert ->
- input->GetOutputSlot().Connect(convertLayerInputs->GetInputSlot(0));
- constantLayer->GetOutputSlot().Connect(convertLayerWeights->GetInputSlot(0));
- biases->GetOutputSlot().Connect(convertLayerBiases->GetInputSlot(0));
-
- convertLayerInputs->GetOutputSlot().Connect(conv->GetInputSlot(0));
- convertLayerWeights->GetOutputSlot().Connect(conv->GetInputSlot(1));
- convertLayerBiases->GetOutputSlot().Connect(conv->GetInputSlot(2));
-
- conv->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::Float32);
- };
- auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::BFloat16);
- };
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- checkConstantFloat32,
- checkConstantFloat32,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<Convolution2dLayer>,
- &IsLayerOfType<OutputLayer>));
-
- armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers()));
-
- bool revert = RevertConstantWeightsToFP32(conv);
-
- // Erase unconnected layer as occurs during Topological Sort.
- graph.EraseLayer(convertLayerInputs);
-
- CHECK(revert);
- CHECK(constantLayer->GetDataType() == DataType::Float32);
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- checkConstantBFloat16,
- checkConstantFloat32,
- &IsLayerOfType<Convolution2dLayer>,
- &IsLayerOfType<OutputLayer>));
-}
-}