// // Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "LayersFwd.hpp" #include #include #include #include #include TEST_SUITE("Optimizer") { using namespace armnn; using namespace armnn::optimizations; TEST_CASE("ConvertConstFloat16DequantizeToConstFloat32") { Graph graph; const unsigned int shape[] = {1, 2, 2, 3}; const TensorInfo constTensorInfo(4, shape, DataType::Float16, 1.0, 0, true); const TensorInfo outputDequantizeInfo(4, shape, DataType::Float32, 1.0, 0, true); ConstantLayer *constantLayer = graph.AddLayer("constant"); std::vector constantValues(constTensorInfo.GetNumElements(), 4.5f); ConstTensor constTensor(constTensorInfo, constantValues.data()); constantLayer->m_LayerOutput = std::make_shared(constTensor); constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo); DequantizeLayer *dequantizeLayer = graph.AddLayer("dequantize"); dequantizeLayer->GetOutputSlot().SetTensorInfo(outputDequantizeInfo); OutputLayer *output = graph.AddLayer(0, "output"); // Connect up constant -> dequantize -> output constantLayer->GetOutputSlot().Connect(dequantizeLayer->GetInputSlot(0)); dequantizeLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); auto checkConstantFloat16 = [](const armnn::Layer *const layer) -> bool { return IsLayerOfType(layer) && (layer->GetDataType() == DataType::Float16); }; auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool { return IsLayerOfType(layer) && (layer->GetDataType() == DataType::Float32); }; CHECK(CheckSequence(graph.cbegin(), graph.cend(), checkConstantFloat16, &IsLayerOfType, &IsLayerOfType)); armnn::Optimizer::Pass(graph, MakeOptimizations(ConvertConstDequantisationLayersToConstLayers())); CHECK(CheckSequence(graph.cbegin(), graph.cend(), checkConstantFloat32, &IsLayerOfType)); } TEST_CASE("ConvertConstInt8DequantizeToConstFloat32") { Graph graph; const unsigned int shape[] = {1, 2, 2, 3}; const TensorInfo constTensorInfo(4, shape, DataType::QAsymmS8, 1.0, 0, true); const TensorInfo outputDequantizeInfo(4, shape, DataType::Float32, 1.0, 0, true); ConstantLayer *constantLayer = graph.AddLayer("constant"); std::vector constantValues(constTensorInfo.GetNumElements(), 5); ConstTensor constTensor(constTensorInfo, constantValues.data()); constantLayer->m_LayerOutput = std::make_shared(constTensor); constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo); DequantizeLayer *dequantizeLayer = graph.AddLayer("dequantize"); dequantizeLayer->GetOutputSlot().SetTensorInfo(outputDequantizeInfo); OutputLayer *output = graph.AddLayer(0, "output"); // Connect up constant -> dequantize -> output constantLayer->GetOutputSlot().Connect(dequantizeLayer->GetInputSlot(0)); dequantizeLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); auto checkConstantQAsymmS8 = [](const armnn::Layer *const layer) -> bool { return IsLayerOfType(layer) && (layer->GetDataType() == DataType::QAsymmS8); }; auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool { return IsLayerOfType(layer) && (layer->GetDataType() == DataType::Float32); }; CHECK(CheckSequence(graph.cbegin(), graph.cend(), checkConstantQAsymmS8, &IsLayerOfType, &IsLayerOfType)); armnn::Optimizer::Pass(graph, MakeOptimizations(ConvertConstDequantisationLayersToConstLayers())); CHECK(CheckSequence(graph.cbegin(), graph.cend(), checkConstantFloat32, &IsLayerOfType)); } }