From ec67a0f08e0f96a5aebf3cac65331c67f6649f5e Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Fri, 25 Nov 2022 13:55:24 +0000 Subject: IVGCVSW-7209 Remove deprecated code due to be removed in 23.02 * Removed weights and bias from Convolution, DepthwiseConv & FullyConnected layers * Removed the weight and bias ConstTensorHandles from the QueueDescriptors * Updated Workloads to take tensors from WorkloadInfo rather than the QueueDescriptors * Removed unused RedirectMembersToConstantInputs optimization and tests. Signed-off-by: Teresa Charlin Signed-off-by: Mike Kelly Change-Id: I9ffcdc4a1c0dff725539dd69fc435b700bd98a56 --- .../ConvertConstantsFloatToHalfTests.cpp | 21 ++++-- .../ConvertConstantsHalfToFloatTests.cpp | 17 +++-- .../Fp32NetworkToFp16ConverterTests.cpp | 15 +++- .../test/optimizations/FuseBatchNormTests.cpp | 27 +++---- .../RedirectMembersToConstantInputsTests.cpp | 85 ---------------------- 5 files changed, 48 insertions(+), 117 deletions(-) delete mode 100644 src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp (limited to 'src/armnn/test/optimizations') diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp index 34e5f6d3b6..118907e703 100644 --- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp +++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp @@ -1,12 +1,12 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include -#include #include +#include #include @@ -25,33 +25,38 @@ TEST_CASE("ConvertConstantsFloatToHalfTest") // Create const tensor from fp32 data unsigned int dims[] = { 4, 1, 1, 1 }; std::vector floatWeights{ 1.0f, 2.0f, 3.0f, 4.0f }; - armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights); + armnn::TensorInfo weightsInfo = armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true); + armnn::ConstTensor weights(weightsInfo, floatWeights); // Create simple test network auto input = graph.AddLayer(0, "input"); input->GetOutputSlot().SetTensorInfo(info); auto fc = graph.AddLayer(armnn::FullyConnectedDescriptor(), "fc"); - fc->m_Weight = std::make_unique(weights); fc->GetOutputSlot().SetTensorInfo(info); + auto weightsLayer = graph.AddLayer("weights"); + weightsLayer->m_LayerOutput = std::make_unique(weights); + weightsLayer->GetOutputSlot().SetTensorInfo(weightsInfo); + auto output = graph.AddLayer(1, "output"); // Connect up the layers input->GetOutputSlot().Connect(fc->GetInputSlot(0)); + weightsLayer->GetOutputSlot().Connect(fc->GetInputSlot(1)); fc->GetOutputSlot().Connect(output->GetInputSlot(0)); // Check tensor data type before conversion - CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32); + CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32); // Run the optimizer armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf())); // Check tensor data type after conversion - CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16); + CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16); // Check whether data matches expected fp16 data - const Half* data = fc->m_Weight->GetConstTensor(); + const Half* data = weightsLayer->m_LayerOutput->GetConstTensor(); CHECK(data[0] == Half(1.0f)); CHECK(data[1] == Half(2.0f)); CHECK(data[2] == Half(3.0f)); @@ -100,12 +105,14 @@ TEST_CASE("ConvertConstantsFloatToHalfTest_constant") fcLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0)); // Check tensor data type before conversion + CHECK(5 == graph.GetNumLayers()); CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32); // Run the optimizer armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf())); // Check tensor data type after conversion + CHECK(5 == graph.GetNumLayers()); CHECK(weights->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16); // Check whether weights data matches expected fp16 data diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp index 4c453cc799..778d7b0814 100644 --- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp +++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -25,33 +25,38 @@ TEST_CASE("ConvertConstantsHalfToFloatTest") std::vector halfWeights(4); armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(), convWeightsData.size(), halfWeights.data()); - armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true), halfWeights); + armnn::TensorInfo weightInfo = armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true); + armnn::ConstTensor weights(weightInfo, halfWeights); //Create the simple test network auto input = graph.AddLayer(0, "input"); input->GetOutputSlot().SetTensorInfo(info); auto fc = graph.AddLayer(armnn::FullyConnectedDescriptor(), "fc"); - fc->m_Weight = std::make_unique(weights); fc->GetOutputSlot().SetTensorInfo(info); + auto weightsLayer = graph.AddLayer("weights"); + weightsLayer->m_LayerOutput = std::make_unique(weights); + weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo); + auto output = graph.AddLayer(1, "output"); //Connect up the layers input->GetOutputSlot().Connect(fc->GetInputSlot(0)); + weightsLayer->GetOutputSlot().Connect(fc->GetInputSlot(1)); fc->GetOutputSlot().Connect(output->GetInputSlot(0)); //Test the tensor info is correct. - CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16); + CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16); // Run the optimizer armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsHalfToFloat())); //Test the tensor info is correct. - CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32); + CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32); // Now test the data matches float32 data - const float* data = fc->m_Weight->GetConstTensor(); + const float* data = weightsLayer->m_LayerOutput->GetConstTensor(); CHECK(1.0f == data[0]); CHECK(2.0f == data[1]); CHECK(3.0f == data[2]); diff --git a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp index bc8839948b..0a4a4fafde 100644 --- a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp +++ b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -33,14 +33,21 @@ TEST_CASE("Fp32NetworkToFp16OptimizationTest") floor->GetOutputSlot().Connect(output->GetInputSlot(0)); CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType, - &IsLayerOfType, &IsLayerOfType)); + &IsLayerOfType, + &IsLayerOfType)); // Run the optimizer armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToFp16Converter())); CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType, - &IsLayerOfType, &IsLayerOfType, - &IsLayerOfType, &IsLayerOfType)); + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); + + CHECK(floor->GetDataType() == armnn::DataType::Float16); + CHECK(floor->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo().GetDataType() == armnn::DataType::Float16); + CHECK(floor->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16); } } \ No newline at end of file diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp index 54cbbce89f..5cbd17fb6a 100644 --- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp +++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -27,13 +27,8 @@ public: static IConnectableLayer *AddConvolution(INetwork *network, const Convolution2dDescriptor &descriptor, - const ConstTensor &weights, - const Optional &biases, const char *name) { - IgnoreUnused(weights); - IgnoreUnused(biases); - return network->AddConvolution2dLayer(descriptor, name); } @@ -65,12 +60,8 @@ public: static IConnectableLayer* AddConvolution(INetwork* network, const DepthwiseConvolution2dDescriptor& descriptor, - const ConstTensor& weights, - const Optional& biases, const char* name) { - IgnoreUnused(weights); - IgnoreUnused(biases); return network->AddDepthwiseConvolution2dLayer(descriptor, name); } @@ -171,8 +162,6 @@ INetworkPtr CreateNetwork(bool depthwise, bool preventFusing) IConnectableLayer* convLayer = Conv2dTest::AddConvolution(network.get(), convolution2dDescriptor, - weights, - Optional(), "convolution"); IConnectableLayer* batchNormLayer = network->AddBatchNormalizationLayer(batchNormDescriptor, @@ -243,13 +232,21 @@ void FuseBatchNormIntoConvTest(bool depthwise, float tolerance, armnn::Compute b return IsLayerOfType(layer) && (layer->GetNameStr() == "fused-batchNorm-into-convolution"); }; - + auto checkConstant = [ ](const armnn::Layer* const layer) -> bool + { + const ConstantLayer* constLayer = PolymorphicDowncast(layer); + auto tensor = ConstTensor(constLayer->m_LayerOutput->GetTensorInfo(), + constLayer->m_LayerOutput->Map(true)); + const auto* buffer = static_cast(tensor.GetMemoryArea()); + std::vector vector(buffer, buffer + tensor.GetNumElements()); + return IsLayerOfType(layer); + }; CHECK(5 == graphFused.GetNumLayers()); CHECK(CheckSequence(graphFused.cbegin(), graphFused.cend(), &IsLayerOfType, - &IsLayerOfType, - &IsLayerOfType, + checkConstant, + checkConstant, checkFusedConv2d, &IsLayerOfType)); diff --git a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp b/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp deleted file mode 100644 index b3f9ed8780..0000000000 --- a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp +++ /dev/null @@ -1,85 +0,0 @@ -// -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include - -#include - -#include - -TEST_SUITE("Optimizer") -{ -using namespace armnn::optimizations; - -TEST_CASE("RedirectMembersToConstantInputsFullyConnectedTest") -{ - armnn::Graph graph; - - const armnn::TensorInfo inputInfo ({ 1, 2, 2, 3 }, armnn::DataType::Float32); - const armnn::TensorInfo outputInfo ({ 1, 2, 2, 3 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 4 }, armnn::DataType::Float32, 0.0f, 0, true); - const armnn::TensorInfo biasesInfo ({ 2 }, armnn::DataType::Float32, 0.0f, 0, true); - - // Check if isConstant is enabled for weights and biases tensor info. - CHECK(weightsInfo.IsConstant()); - CHECK(biasesInfo.IsConstant()); - - armnn::FullyConnectedDescriptor desc; - desc.m_BiasEnabled = true; - desc.m_ConstantWeights = false; - - // Create the simple test network with Weights and Biases as inputs to a FullyConnected layer. - auto input = graph.AddLayer(0, "Input"); - auto weights = graph.AddLayer("Weights"); - auto biases = graph.AddLayer("Biases"); - auto fcLayer = graph.AddLayer(desc, "FullyConnected"); - auto output = graph.AddLayer(1, "Output"); - - float expectedWeightsData[] = { 1.0f, 1.0f, 1.0f, 1.0f }; - float expectedBiasesData[] = { 2.0f, 2.0f }; - - // Set the m_LayerOutput for the optimizer to point to. - armnn::ConstTensor weightsTensor(weightsInfo, &expectedWeightsData); - armnn::ConstTensor biasesTensor(biasesInfo, &expectedBiasesData); - weights->m_LayerOutput = std::make_unique(weightsTensor); - biases->m_LayerOutput = std::make_unique(biasesTensor); - - input->GetOutputSlot().SetTensorInfo(inputInfo); - weights->GetOutputSlot().SetTensorInfo(weightsInfo); - biases->GetOutputSlot().SetTensorInfo(biasesInfo); - fcLayer->GetOutputSlot().SetTensorInfo(outputInfo); - - // Connect up the layers - input->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(0)); - weights->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(1)); - biases->GetOutputSlot(0).Connect(fcLayer->GetInputSlot(2)); - fcLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0)); - - // Member variables should be null before optimization. - CHECK(fcLayer->m_Weight == nullptr); - CHECK(fcLayer->m_Bias == nullptr); - - // Run the optimizer - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(RedirectMembersToConstantInputs())); - - // Check if member variables are not null and shape is set correctly. - CHECK(fcLayer->m_Weight != nullptr); - CHECK(fcLayer->m_Bias != nullptr); - CHECK(fcLayer->m_Weight->GetTensorInfo().GetShape() == weightsInfo.GetShape()); - CHECK(fcLayer->m_Bias->GetTensorInfo().GetShape() == biasesInfo.GetShape()); - - // Check whether data matches expected float data - const float* weightsData = fcLayer->m_Weight->GetConstTensor(); - CHECK(weightsData[0] == expectedWeightsData[0]); - CHECK(weightsData[1] == expectedWeightsData[1]); - CHECK(weightsData[2] == expectedWeightsData[2]); - CHECK(weightsData[3] == expectedWeightsData[3]); - - const float* biasesData = fcLayer->m_Bias->GetConstTensor(); - CHECK(biasesData[0] == expectedBiasesData[0]); - CHECK(biasesData[1] == expectedBiasesData[1]); -} - -} \ No newline at end of file -- cgit v1.2.1