// // Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include #include #include #include #include #include #include TEST_SUITE("Optimizer") { using namespace armnn; using namespace armnn::optimizations; TEST_CASE("FuseConvertFp32Fp16intoConst") { Graph graph; const unsigned int shape[] = {1, 2, 2, 3}; const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true); const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true); ConstantLayer* constantLayer = graph.AddLayer("constant"); std::vector constantValues(constTensorInfo.GetNumElements(), 3.1416f); ConstTensor constTensor(constTensorInfo, constantValues.data()); constantLayer->m_LayerOutput = std::make_shared(constTensor); constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo); ConvertFp32ToBf16Layer* convertLayer = graph.AddLayer("convert"); convertLayer->GetOutputSlot().SetTensorInfo(outputConvertInfo); OutputLayer* output = graph.AddLayer(0, "output"); // Connect up constant -> convert -> output constantLayer->GetOutputSlot().Connect(convertLayer->GetInputSlot(0)); convertLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool { return IsLayerOfType(layer) && (layer->GetDataType() == DataType::Float32); }; auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool { return IsLayerOfType(layer) && (layer->GetDataType() == DataType::BFloat16); }; CHECK(CheckSequence(graph.cbegin(), graph.cend(), checkConstantFloat32, &IsLayerOfType, &IsLayerOfType)); armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers())); CHECK(CheckSequence(graph.cbegin(), graph.cend(), checkConstantBFloat16, &IsLayerOfType)); } TEST_CASE("RevertConstantWeightsToFP32") { Graph graph; const unsigned int shape[] = {1, 2, 2, 3}; const TensorInfo constTensorInfo(4, shape, DataType::Float32, 1.0, 0, true); const TensorInfo outputConvertInfo(4, shape, DataType::BFloat16, 1.0, 0, true); TensorInfo inputInfo(4, shape, DataType::Float32); auto* input = graph.AddLayer(0, "input0"); input->GetOutputSlot().SetTensorInfo(inputInfo); auto* constantLayer = graph.AddLayer("constant"); std::vector constantValues(constTensorInfo.GetNumElements(), 3.1416f); ConstTensor constTensor(constTensorInfo, constantValues.data()); constantLayer->m_LayerOutput = std::make_shared(constTensor); constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo); ConvertFp32ToBf16Layer* convertLayerInputs = graph.AddLayer("convert"); convertLayerInputs->GetOutputSlot().SetTensorInfo(outputConvertInfo); ConvertFp32ToBf16Layer* convertLayerWeights = graph.AddLayer("convert2"); convertLayerWeights->GetOutputSlot().SetTensorInfo(outputConvertInfo); ConvertFp32ToBf16Layer* convertLayerBiases = graph.AddLayer("convert3"); convertLayerBiases->GetOutputSlot().SetTensorInfo(outputConvertInfo); auto* biases = graph.AddLayer("Biases"); biases->m_LayerOutput = std::make_unique(constTensor); biases->GetOutputSlot().SetTensorInfo(constTensorInfo); armnn::Convolution2dDescriptor descriptor; descriptor.m_BiasEnabled = true; auto* conv = graph.AddLayer(descriptor, "conv2d"); const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32); conv->GetOutputSlot().SetTensorInfo(infoFP32); auto* output = graph.AddLayer(0, "output"); // Connect up Input -> Convert -> // Constant -> Convert -> Conv2d -> Output // Constant -> Convert -> input->GetOutputSlot().Connect(convertLayerInputs->GetInputSlot(0)); constantLayer->GetOutputSlot().Connect(convertLayerWeights->GetInputSlot(0)); biases->GetOutputSlot().Connect(convertLayerBiases->GetInputSlot(0)); convertLayerInputs->GetOutputSlot().Connect(conv->GetInputSlot(0)); convertLayerWeights->GetOutputSlot().Connect(conv->GetInputSlot(1)); convertLayerBiases->GetOutputSlot().Connect(conv->GetInputSlot(2)); conv->GetOutputSlot().Connect(output->GetInputSlot(0)); auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool { return IsLayerOfType(layer) && (layer->GetDataType() == DataType::Float32); }; auto checkConstantBFloat16 = [](const armnn::Layer *const layer) -> bool { return IsLayerOfType(layer) && (layer->GetDataType() == DataType::BFloat16); }; CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType, checkConstantFloat32, checkConstantFloat32, &IsLayerOfType, &IsLayerOfType, &IsLayerOfType, &IsLayerOfType, &IsLayerOfType)); armnn::Optimizer::Pass(graph, MakeOptimizations(FuseConversionLayersIntoConstLayers())); bool revert = RevertConstantWeightsToFP32(conv); // Erase unconnected layer as occurs during Topological Sort. graph.EraseLayer(convertLayerInputs); CHECK(revert); CHECK(constantLayer->GetDataType() == DataType::Float32); CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType, checkConstantBFloat16, checkConstantFloat32, &IsLayerOfType, &IsLayerOfType)); } }