aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/test/optimizations')
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp128
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp229
-rw-r--r--src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp151
3 files changed, 0 insertions, 508 deletions
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
deleted file mode 100644
index 4aacf7f4fe..0000000000
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ /dev/null
@@ -1,128 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <TestUtils.hpp>
-
-#include <BFloat16.hpp>
-#include <Optimizer.hpp>
-
-#include <doctest/doctest.h>
-
-using namespace armnn;
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn::optimizations;
-
-TEST_CASE("ConvertConstantsFloatToBFloatTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::BFloat16);
-
- // Create const tensor from fp32 data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> floatWeights{ 0.0f, -1.0f,
- 3.8f, // 0x40733333 Round down
- 3.1055E+29f, // 0x707ADC3C Round up
- 9.149516E-10f, // 0x307B7FFF Round down
- -3.8f, // 0xC0733333 Round down
- -3.1055E+29f, // 0xF07ADC3C Round up
- -9.149516E-10f // 0xB07B7FFF Round down
- };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
- // Create simple test network
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(info);
-
- auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
- fc->GetOutputSlot().SetTensorInfo(info);
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(fc->GetInputSlot(0));
- fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- // Check tensor data type before conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToBFloat()));
-
- // Check tensor data type after conversion
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
-
- // Check whether data matches expected Bf16 data
- const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>();
- CHECK(data[0] == BFloat16(0.0f));
- CHECK(data[1] == BFloat16(-1.0f));
- CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
- CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
- CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
- CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
- CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
- CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-TEST_CASE("ConvertConstantsBFloatToFloatTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32);
-
- // Create the BFloat16 precision input data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> convWeightsData{ 0.f, -1.f,
- 3.796875f, // 0x4073
- 3.1072295E29f, // 0x707B
- 9.131327E-10f, // 0x307B
- -3.796875f, // 0xC073
- -3.1072295E29f, // 0xF07B
- -9.131327E-10f // 0xB07B
- };
- std::vector<uint16_t> bfWeights(8);
- armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(convWeightsData.data(), convWeightsData.size(),
- bfWeights.data());
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16, 0.0f, 0, true), bfWeights);
-
- //Create the simple test network
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(info);
-
- auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
- fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
- fc->GetOutputSlot().SetTensorInfo(info);
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- //Connect up the layers
- input->GetOutputSlot().Connect(fc->GetInputSlot(0));
- fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- //Test the tensor info is correct.
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsBFloatToFloat()));
-
- //Test the tensor info is correct.
- CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
-
- // Now test the data matches float32 data
- const float* data = fc->m_Weight->GetConstTensor<float>();
- CHECK(data[0] == 0.0f);
- CHECK(data[1] == -1.0f);
- CHECK(data[2] == 3.796875f);
- CHECK(data[3] == 3.1072295E29f);
- CHECK(data[4] == 9.131327E-10f);
- CHECK(data[5] == -3.796875f);
- CHECK(data[6] == -3.1072295E29f);
- CHECK(data[7] == -9.131327E-10f);
-}
-
-} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
deleted file mode 100644
index 66893ce1f5..0000000000
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ /dev/null
@@ -1,229 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <TestUtils.hpp>
-
-#include <Optimizer.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn::optimizations;
-
-TEST_CASE("Fp32NetworkToBf16OptimizationNoConversionTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo infoFP32({ 2, 2, 1, 3 }, armnn::DataType::Float32);
-
- // Create the simple test network without Conv2D/FullyConnected.
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
- floor->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(floor->GetInputSlot(0));
- floor->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::FloorLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-}
-
-TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
-
- // Create const tensor fp32 data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> floatWeights{ 0.0f, -1.0f,
- 3.8f, // 0x40733333 Round down
- 3.1055E+29f, // 0x707ADC3C Round up
- 9.149516E-10f, // 0x307B7FFF Round down
- -3.8f, // 0xC0733333 Round down
- -3.1055E+29f, // 0xF07ADC3C Round up
- -9.149516E-10f // 0xB07B7FFF Round down
- };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
- // Create const bias fp32 data
- unsigned int biasDims[] {4};
- std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
-
- // A network with Convolution2d layer
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(infoFP32);
-
- armnn::Convolution2dDescriptor descriptor;
- descriptor.m_BiasEnabled = true;
- auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- conv->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
-
- auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(conv->GetInputSlot(0));
- weightsLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(1));
- biasLayer->GetOutputSlot(0).Connect(conv->GetInputSlot(2));
- conv->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
- Fp32NetworkToBf16Converter()));
-
- CHECK(7 == graph.GetNumLayers());
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::Convolution2dLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo weightTensor = conv->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo biasTensor = conv->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo();
- CHECK((conv->GetDataType() == armnn::DataType::BFloat16));
- CHECK((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- CHECK((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((biasTensor.GetDataType() == armnn::DataType::Float32));
- CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
-
- // Check whether data matches expected Bf16 data
- const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>();
- CHECK(data[0] == armnn::BFloat16(0.0f));
- CHECK(data[1] == armnn::BFloat16(-1.0f));
- CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
-{
- armnn::Graph graph;
-
- const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
-
- // Create const tensor fp32 data
- unsigned int dims[] = { 4, 2, 1, 1 };
- std::vector<float> floatWeights{ 0.0f, -1.0f,
- 3.8f, // 0x40733333 Round down
- 3.1055E+29f, // 0x707ADC3C Round up
- 9.149516E-10f, // 0x307B7FFF Round down
- -3.8f, // 0xC0733333 Round down
- -3.1055E+29f, // 0xF07ADC3C Round up
- -9.149516E-10f // 0xB07B7FFF Round down
- };
- armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
-
- // Create const bias fp32 data
- unsigned int biasDims[] {4};
- std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
- armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32, 0.0f, 0, true), floatBias);
-
- // A network with FullyConnected layer
- auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
- input->GetOutputSlot().SetTensorInfo(infoFP32);
-
- armnn::FullyConnectedDescriptor descriptor;
- descriptor.m_BiasEnabled = true;
-
- auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
- fc->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("Weights");
- weightsLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(weights);
- weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsLayer->m_LayerOutput->GetTensorInfo());
-
- auto biasLayer = graph.AddLayer<armnn::ConstantLayer>("Bias");
- biasLayer->m_LayerOutput = std::make_shared<armnn::ScopedTensorHandle>(bias);
- biasLayer->GetOutputSlot(0).SetTensorInfo(biasLayer->m_LayerOutput->GetTensorInfo());
-
- auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
- // Connect up the layers
- input->GetOutputSlot().Connect(fc->GetInputSlot(0));
- weightsLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(1));
- biasLayer->GetOutputSlot(0).Connect(fc->GetInputSlot(2));
- fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- // Run the optimizer
- armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs(),
- Fp32NetworkToBf16Converter()));
-
- CHECK(7 == graph.GetNumLayers());
- CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConstantLayer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>,
- &IsLayerOfType<armnn::FullyConnectedLayer>,
- &IsLayerOfType<armnn::OutputLayer>));
-
- armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo weightTensor = fc->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo biasTensor = fc->GetInputSlot(2).GetConnectedOutputSlot()->GetTensorInfo();
- armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo();
- CHECK((fc->GetDataType() == armnn::DataType::BFloat16));
- CHECK((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
- CHECK((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
- CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((weightTensor.GetDataType() == armnn::DataType::BFloat16));
- CHECK((biasTensor.GetDataType() == armnn::DataType::Float32));
- CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
-
- // Check whether data matches expected Bf16 data
- const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>();
- CHECK(data[0] == armnn::BFloat16(0.0f));
- CHECK(data[1] == armnn::BFloat16(-1.0f));
- CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
- CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
- CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
- CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
- CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
- CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
-}
-
-} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp b/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
deleted file mode 100644
index 93d5948d61..0000000000
--- a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <LayersFwd.hpp>
-#include <Network.hpp>
-#include <NetworkUtils.hpp>
-#include <Optimizer.hpp>
-#include <TestUtils.hpp>
-
-#include <armnn/backends/TensorHandle.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn;
-using namespace armnn::optimizations;
-
-TEST_CASE("FuseConvertFp32Fp16intoConst")
-{
- Graph graph;
- const unsigned int shape[] = {1, 2, 2, 3};
-
- const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true);
- const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true);
-
- ConstantLayer* constantLayer = graph.AddLayer<ConstantLayer>("constant");
- std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f);
- ConstTensor constTensor(constTensorInfo, constantValues.data());
- constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
- ConvertFp32ToBf16Layer* convertLayer = graph.AddLayer<ConvertFp32ToBf16Layer>("convert");
- convertLayer->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-
- OutputLayer* output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connect up constant -> convert -> output
- constantLayer->GetOutputSlot().Connect(convertLayer->GetInputSlot(0));
- convertLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::Float32);
- };
- auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::BFloat16);
- };
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- checkConstantFloat32,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<OutputLayer>));
-
- armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers()));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- checkConstantBFloat16,
- &IsLayerOfType<OutputLayer>));
-}
-
-TEST_CASE("RevertConstantWeightsToFP32")
-{
- Graph graph;
- const unsigned int shape[] = {1, 2, 2, 3};
-
- const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true);
- const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true);
-
- TensorInfo inputInfo(4, shape, DataType::Float32);
- auto* input = graph.AddLayer<InputLayer>(0, "input0");
- input->GetOutputSlot().SetTensorInfo(inputInfo);
-
- auto* constantLayer = graph.AddLayer<ConstantLayer>("constant");
- std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f);
- ConstTensor constTensor(constTensorInfo, constantValues.data());
- constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
- ConvertFp32ToBf16Layer* convertLayerInputs = graph.AddLayer<ConvertFp32ToBf16Layer>("convert");
- convertLayerInputs->GetOutputSlot().SetTensorInfo(outputConvertInfo);
- ConvertFp32ToBf16Layer* convertLayerWeights = graph.AddLayer<ConvertFp32ToBf16Layer>("convert2");
- convertLayerWeights->GetOutputSlot().SetTensorInfo(outputConvertInfo);
- ConvertFp32ToBf16Layer* convertLayerBiases = graph.AddLayer<ConvertFp32ToBf16Layer>("convert3");
- convertLayerBiases->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-
- auto* biases = graph.AddLayer<armnn::ConstantLayer>("Biases");
- biases->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(constTensor);
- biases->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
- armnn::Convolution2dDescriptor descriptor;
- descriptor.m_BiasEnabled = true;
- auto* conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
- conv->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto* output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connect up Input -> Convert ->
- // Constant -> Convert -> Conv2d -> Output
- // Constant -> Convert ->
- input->GetOutputSlot().Connect(convertLayerInputs->GetInputSlot(0));
- constantLayer->GetOutputSlot().Connect(convertLayerWeights->GetInputSlot(0));
- biases->GetOutputSlot().Connect(convertLayerBiases->GetInputSlot(0));
-
- convertLayerInputs->GetOutputSlot().Connect(conv->GetInputSlot(0));
- convertLayerWeights->GetOutputSlot().Connect(conv->GetInputSlot(1));
- convertLayerBiases->GetOutputSlot().Connect(conv->GetInputSlot(2));
-
- conv->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::Float32);
- };
- auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::BFloat16);
- };
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- checkConstantFloat32,
- checkConstantFloat32,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<Convolution2dLayer>,
- &IsLayerOfType<OutputLayer>));
-
- armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers()));
-
- bool revert = RevertConstantWeightsToFP32(conv);
-
- // Erase unconnected layer as occurs during Topological Sort.
- graph.EraseLayer(convertLayerInputs);
-
- CHECK(revert);
- CHECK(constantLayer->GetDataType() == DataType::Float32);
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- checkConstantBFloat16,
- checkConstantFloat32,
- &IsLayerOfType<Convolution2dLayer>,
- &IsLayerOfType<OutputLayer>));
-}
-}