aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2022-11-07 16:20:48 +0000
committerryan.oshea3 <ryan.oshea3@arm.com>2022-11-16 15:22:50 +0000
commit31441595009182c985dacbedc70c41ee6664d070 (patch)
tree248a85295aeff4022c9b395fc97748b0a0aa6b35 /src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
parentbd18eab07a8f30492de1e462b1815189014cb8d5 (diff)
downloadarmnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp')
-rw-r--r--src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp151
1 files changed, 0 insertions, 151 deletions
diff --git a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp b/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
deleted file mode 100644
index 93d5948d61..0000000000
--- a/src/armnn/test/optimizations/FuseConvertF32BF16IntoConstLayerTests.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <LayersFwd.hpp>
-#include <Network.hpp>
-#include <NetworkUtils.hpp>
-#include <Optimizer.hpp>
-#include <TestUtils.hpp>
-
-#include <armnn/backends/TensorHandle.hpp>
-
-#include <doctest/doctest.h>
-
-TEST_SUITE("Optimizer")
-{
-using namespace armnn;
-using namespace armnn::optimizations;
-
-TEST_CASE("FuseConvertFp32Fp16intoConst")
-{
- Graph graph;
- const unsigned int shape[] = {1, 2, 2, 3};
-
- const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true);
- const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true);
-
- ConstantLayer* constantLayer = graph.AddLayer<ConstantLayer>("constant");
- std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f);
- ConstTensor constTensor(constTensorInfo, constantValues.data());
- constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
- ConvertFp32ToBf16Layer* convertLayer = graph.AddLayer<ConvertFp32ToBf16Layer>("convert");
- convertLayer->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-
- OutputLayer* output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connect up constant -> convert -> output
- constantLayer->GetOutputSlot().Connect(convertLayer->GetInputSlot(0));
- convertLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::Float32);
- };
- auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::BFloat16);
- };
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- checkConstantFloat32,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<OutputLayer>));
-
- armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers()));
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- checkConstantBFloat16,
- &IsLayerOfType<OutputLayer>));
-}
-
-TEST_CASE("RevertConstantWeightsToFP32")
-{
- Graph graph;
- const unsigned int shape[] = {1, 2, 2, 3};
-
- const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true);
- const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true);
-
- TensorInfo inputInfo(4, shape, DataType::Float32);
- auto* input = graph.AddLayer<InputLayer>(0, "input0");
- input->GetOutputSlot().SetTensorInfo(inputInfo);
-
- auto* constantLayer = graph.AddLayer<ConstantLayer>("constant");
- std::vector<float> constantValues(constTensorInfo.GetNumElements(), 3.1416f);
- ConstTensor constTensor(constTensorInfo, constantValues.data());
- constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
- constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
- ConvertFp32ToBf16Layer* convertLayerInputs = graph.AddLayer<ConvertFp32ToBf16Layer>("convert");
- convertLayerInputs->GetOutputSlot().SetTensorInfo(outputConvertInfo);
- ConvertFp32ToBf16Layer* convertLayerWeights = graph.AddLayer<ConvertFp32ToBf16Layer>("convert2");
- convertLayerWeights->GetOutputSlot().SetTensorInfo(outputConvertInfo);
- ConvertFp32ToBf16Layer* convertLayerBiases = graph.AddLayer<ConvertFp32ToBf16Layer>("convert3");
- convertLayerBiases->GetOutputSlot().SetTensorInfo(outputConvertInfo);
-
- auto* biases = graph.AddLayer<armnn::ConstantLayer>("Biases");
- biases->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(constTensor);
- biases->GetOutputSlot().SetTensorInfo(constTensorInfo);
-
- armnn::Convolution2dDescriptor descriptor;
- descriptor.m_BiasEnabled = true;
- auto* conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
- const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
- conv->GetOutputSlot().SetTensorInfo(infoFP32);
-
- auto* output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connect up Input -> Convert ->
- // Constant -> Convert -> Conv2d -> Output
- // Constant -> Convert ->
- input->GetOutputSlot().Connect(convertLayerInputs->GetInputSlot(0));
- constantLayer->GetOutputSlot().Connect(convertLayerWeights->GetInputSlot(0));
- biases->GetOutputSlot().Connect(convertLayerBiases->GetInputSlot(0));
-
- convertLayerInputs->GetOutputSlot().Connect(conv->GetInputSlot(0));
- convertLayerWeights->GetOutputSlot().Connect(conv->GetInputSlot(1));
- convertLayerBiases->GetOutputSlot().Connect(conv->GetInputSlot(2));
-
- conv->GetOutputSlot().Connect(output->GetInputSlot(0));
-
- auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::Float32);
- };
- auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool {
- return IsLayerOfType<ConstantLayer>(layer) &&
- (layer->GetDataType() == DataType::BFloat16);
- };
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- checkConstantFloat32,
- checkConstantFloat32,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<ConvertFp32ToBf16Layer>,
- &IsLayerOfType<Convolution2dLayer>,
- &IsLayerOfType<OutputLayer>));
-
- armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers()));
-
- bool revert = RevertConstantWeightsToFP32(conv);
-
- // Erase unconnected layer as occurs during Topological Sort.
- graph.EraseLayer(convertLayerInputs);
-
- CHECK(revert);
- CHECK(constantLayer->GetDataType() == DataType::Float32);
-
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- checkConstantBFloat16,
- checkConstantFloat32,
- &IsLayerOfType<Convolution2dLayer>,
- &IsLayerOfType<OutputLayer>));
-}
-}